max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/it/test_docker_image_tag.py
|
gfi-centre-ouest/docker-devbox-ddb
| 4
|
11700
|
<filename>tests/it/test_docker_image_tag.py
import os
import zipfile
import yaml
from dotty_dict import Dotty
from pytest_mock import MockerFixture
from ddb.__main__ import main
from ddb.config import Config
from ddb.feature.version import is_git_repository
class TestDockerImageTag:
def test_image_tag_from_git_tag_jsonnet(self, project_loader, mocker: MockerFixture):
Config.defaults = None
mocker.patch('ddb.feature.version.is_git_repository', is_git_repository)
project_loader("image_tag_from_git_tag")
if os.path.exists("repo.zip"):
with zipfile.ZipFile("repo.zip", 'r') as zip_ref:
zip_ref.extractall(".")
main(["configure"])
assert os.path.exists("docker-compose.yml")
with open("docker-compose.yml") as f:
docker_compose = yaml.load(f, yaml.SafeLoader)
assert Dotty(docker_compose).get('services.node.image') == 'some-registry/node:some-tag'
def test_image_tag_from_git_branch_jsonnet(self, project_loader, mocker: MockerFixture):
Config.defaults = None
mocker.patch('ddb.feature.version.is_git_repository', is_git_repository)
project_loader("image_tag_from_git_branch")
if os.path.exists("repo.zip"):
with zipfile.ZipFile("repo.zip", 'r') as zip_ref:
zip_ref.extractall(".")
main(["configure"])
with open("docker-compose.yml") as f:
docker_compose = yaml.load(f, yaml.SafeLoader)
assert Dotty(docker_compose).get('services.node.image') == 'some-registry/node:some-branch'
def test_image_tag_from_git_disabled_jsonnet(self, project_loader, mocker: MockerFixture):
Config.defaults = None
mocker.patch('ddb.feature.version.is_git_repository', is_git_repository)
project_loader("image_tag_from_git_disabled")
if os.path.exists("repo.zip"):
with zipfile.ZipFile("repo.zip", 'r') as zip_ref:
zip_ref.extractall(".")
main(["configure"])
with open("docker-compose.yml") as f:
docker_compose = yaml.load(f, yaml.SafeLoader)
assert Dotty(docker_compose).get('services.node.image') == 'some-registry/node'
| 2.15625
| 2
|
bin/zeisel.py
|
bendemeo/ample
| 0
|
11701
|
import numpy as np
import os
from scanorama import *
from scipy.sparse import vstack
from process import load_names
from experiments import *
from utils import *
NAMESPACE = 'zeisel'
METHOD = 'svd'
DIMRED = 100
data_names = [
'data/mouse_brain/zeisel/amygdala',
'data/mouse_brain/zeisel/cerebellum',
'data/mouse_brain/zeisel/cortex1',
'data/mouse_brain/zeisel/cortex2',
'data/mouse_brain/zeisel/cortex3',
'data/mouse_brain/zeisel/enteric',
'data/mouse_brain/zeisel/hippocampus',
'data/mouse_brain/zeisel/hypothalamus',
'data/mouse_brain/zeisel/medulla',
'data/mouse_brain/zeisel/midbraindorsal',
'data/mouse_brain/zeisel/midbrainventral',
'data/mouse_brain/zeisel/olfactory',
'data/mouse_brain/zeisel/pons',
'data/mouse_brain/zeisel/spinalcord',
'data/mouse_brain/zeisel/striatumdorsal',
'data/mouse_brain/zeisel/striatumventral',
'data/mouse_brain/zeisel/sympathetic',
'data/mouse_brain/zeisel/thalamus',
]
if __name__ == '__main__':
datasets, genes_list, n_cells = load_names(data_names, norm=False)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
if not os.path.isfile('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)):
log('Dimension reduction with {}...'.format(METHOD))
X_dimred = reduce_dimensionality(
normalize(X), method=METHOD, dimred=DIMRED
)
log('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE))
from ample import gs, uniform, srs
#samp_idx = gs(X_dimred, 20000, replace=False)
#samp_idx = uniform(X_dimred, 20000, replace=False)
samp_idx = srs(X_dimred, 20000, replace=False)
#from anndata import AnnData
#import scanpy.api as sc
#adata = AnnData(X=X_dimred[samp_idx, :])
#sc.pp.neighbors(adata, use_rep='X')
#sc.tl.louvain(adata, resolution=1.5, key_added='louvain')
#
#louv_labels = np.array(adata.obs['louvain'].tolist())
#le = LabelEncoder().fit(louv_labels)
#cell_labels = le.transform(louv_labels)
#
#np.savetxt('data/cell_labels/zeisel_louvain.txt', cell_labels)
labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(labels)
cell_labels = le.transform(labels)
experiments(
X_dimred, NAMESPACE, n_seeds=2,
cell_labels=cell_labels,
kmeans_ami=True, louvain_ami=True,
rare=True,
rare_label=le.transform(['Ependymal'])[0],
)
exit()
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_srs{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
exit()
cell_labels = (
open('data/cell_labels/zeisel_louvain.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
astro = set([ 32, 38, 40, ])
oligo = set([ 2, 5, 12, 20, 23, 33, 37, ])
focus = set([ 15, 36, 41 ])
labels = []
aob_labels = []
for cl in cell_labels:
if cl in focus:
labels.append(0)
aob_labels.append('both')
elif cl in astro or cl in oligo:
labels.append(1)
if cl in astro:
aob_labels.append('astro')
else:
aob_labels.append('oligo')
else:
labels.append(2)
aob_labels.append('none')
labels = np.array(labels)
aob_labels = np.array(aob_labels)
X = np.log1p(normalize(X[samp_idx, :]))
from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'both', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'both', NAMESPACE)
astro_oligo_violin(X, genes, 'GJA1', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'MBP', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'PLP1', aob_labels, NAMESPACE)
viz_genes = [
#'GJA1', 'MBP', 'PLP1', 'TRF',
#'CST3', 'CPE', 'FTH1', 'APOE', 'MT1', 'NDRG2', 'TSPAN7',
#'PLP1', 'MAL', 'PTGDS', 'CLDN11', 'APOD', 'QDPR', 'MAG', 'ERMN',
#'PLP1', 'MAL', 'PTGDS', 'MAG', 'CLDN11', 'APOD', 'FTH1',
#'ERMN', 'MBP', 'ENPP2', 'QDPR', 'MOBP', 'TRF',
#'CST3', 'SPARCL1', 'PTN', 'CD81', 'APOE', 'ATP1A2', 'ITM2B'
]
cell_labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_astro{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
gene_names=viz_genes, gene_expr=X, genes=genes,
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
#visualize_dropout(X, embedding, image_suffix='.png',
# viz_prefix=NAMESPACE + '_dropout')
from differential_entropies import differential_entropies
differential_entropies(X_dimred, labels)
| 2.078125
| 2
|
cogitare/monitor/workers/system_usage.py
|
cogitare-ai/cogitare
| 90
|
11702
|
<filename>cogitare/monitor/workers/system_usage.py
from threading import Thread
import psutil
import time
class SystemUsage(Thread):
def __init__(self, callback, *args, **kwargs):
super(SystemUsage, self).__init__(daemon=True)
self.interval = 1
self.enabled = False
self.callback = callback
self.p = psutil.Process()
self.start()
def run(self):
while True:
if self.enabled:
self.callback(self.get_usage())
time.sleep(self.interval)
def get_usage(self):
usage = {}
usage['Ram (GB)'] = round(psutil.virtual_memory().used * 1.0 / 2 ** 30, 2)
usage['CPU (%)'] = dict(('CPU %d' % idx, usage) for idx, usage in enumerate(psutil.cpu_percent(percpu=True), 1))
usage['CPU used by the Process (%)'] = round(self.p.cpu_percent(), 2)
usage['RAM used by the Process (MB)'] = round(self.p.memory_info().rss * 1.0 / 2 ** 20, 2)
usage['Number of Threads'] = self.p.num_threads()
return usage
| 2.703125
| 3
|
test/worker/net.py
|
ameserole/Naumachia
| 0
|
11703
|
import fcntl
import os
import socket
import struct
import warnings
import subprocess
import logging
import base64
logger = logging.getLogger(__name__)
# Dummy socket used for fcntl functions
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class AddrMeta(type):
@property
def maxvalue(cls):
return (0x1 << (cls.bytelen * 8)) - 1
class Addr(metaclass=AddrMeta):
bytelen = 0
def __init__(self, addr):
self._str = None
self._int = None
self._bytes = None
if isinstance(addr, type(self)):
self._str = addr._str
self._bytes = addr._bytes
self._int = addr._int
elif isinstance(addr, str):
self._str = addr
elif isinstance(addr, int):
self._int = addr
elif isinstance(addr, bytes):
if len(addr) == self.bytelen:
self._bytes = addr
else:
self._str = addr.decode('utf-8')
else:
raise ValueError('Cannot create {!s} from {!s}'.format(type(self), type(addr)))
# Operations
def __and__(self, other):
return type(self)(int(self) & int(other))
def __or__(self, other):
return type(self)(int(self) | int(other))
def __xor__(self, other):
return type(self)(int(self) ^ int(other))
def __invert__(self):
return type(self)(int(self) ^ self.maxvalue)
# Conversions
def __str__(self):
if self._str is None:
self._str = self.bytes_to_str(bytes(self))
return self._str
def __int__(self):
return int.from_bytes(bytes(self), byteorder='big')
def __bytes__(self):
if self._bytes is None:
if self._str is not None:
self._bytes = self.str_to_bytes(self._str)
elif self._int is not None:
self._bytes = self._int.to_bytes(self.bytelen, byteorder='big')
return self._bytes
def __repr__(self):
return '<{0}.{1} {2!s}>'.format(__name__, type(self).__name__, self)
class Ip(Addr):
bytelen = 4
@staticmethod
def bytes_to_str(b):
return socket.inet_ntoa(b)
@staticmethod
def str_to_bytes(s):
return socket.inet_aton(s)
def slash(self):
x, i = int(self), 0
while x & 0x1 == 0:
x >>= 1
i += 1
return 32 - i
class Mac(Addr):
bytelen = 6
@staticmethod
def bytes_to_str(b):
return ':'.join('%02x' % byte for byte in b)
@staticmethod
def str_to_bytes(s):
return bytes.fromhex(s.replace(':', ''))
def _ifctl(ifname, code):
if isinstance(ifname, str):
ifname = ifname.encode('utf-8')
return fcntl.ioctl(
_socket.fileno(),
code,
struct.pack('256s', ifname[:15])
)
def ifaddr(ifname):
return Ip(_ifctl(ifname, 0x8915)[20:24]) # SIOCGIFADDR
def ifmask(ifname):
return Ip(_ifctl(ifname, 0x891b)[20:24]) # SIOCGIFNETMASK
def ifhwaddr(ifname):
return Mac(_ifctl(ifname, 0x8927)[18:24]) # SIOCGIFHWADDR
def cidr(ip, mask):
return "{!s}/{:d}".format(ip, mask.slash())
def parsecidr(ipnet):
ipstr, maskstr = ipnet.split('/')
ip = Ip(ipstr)
mask = Ip(0xffffffff ^ ((0x00000001 << (32-int(maskstr)))-1))
return ip, mask
def ifcidr(ifname):
return cidr(ifaddr(ifname), ifmask(ifname))
class OpenVpnError(Exception):
def __init__(self, instance, msg):
self.instance = instance
super().__init__(msg)
class OpenVpn:
exe = 'openvpn'
initmsg = b'Initialization Sequence Completed'
def __init__(self, **kwargs):
if 'daemonize' in kwargs:
warnings.warn("This class will not be able to close a daemonized tunnel", warnings.Warning)
self.options = kwargs
self.initialized = False
self._process = None
def args(self):
result = []
for name, value in self.options.items():
result.append('--{!s}'.format(name))
# None is special to indicate the option have no value
if value is not None:
result.append(str(value))
return result
def check(self):
if self._process is not None:
self._process.poll()
code = self._process.returncode
if code is not None and code != 0:
raise OpenVpnError(self, "`openvpn {:s}` exited with error code: {:d}".format(" ".join(self.args()), code))
def running(self):
return self._process is not None and self._process.poll() is None
@staticmethod
def maketun():
os.makedirs('/dev/net', exist_ok=True)
subprocess.run(['mknod', '/dev/net/tun', 'c', '10', '200'], check=True)
def connect(self):
if not os.path.exists('/dev/net/tun'):
self.maketun()
if not self.running():
self.initialized = False
self._process = subprocess.Popen(
[self.exe] + self.args(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.check()
def disconnect(self):
if self.running():
self._process.terminate()
os.waitpid(self._process.pid, 0)
def waitforinit(self):
if not self.initialized:
for line in self._process.stdout:
logger.debug("openvpn: %s", line.decode('utf-8').strip())
if self.initmsg in line:
self.initialized = True
break
else:
self.check()
raise OpenVpnError(self, "OpenVPN exited with code 0, but did not display init msg")
def __enter__(self):
self.connect()
return self
def __exit__(self, *args, **kwargs):
self.disconnect()
| 2.46875
| 2
|
kunrei.py
|
kosugi/alfred.romanizer
| 0
|
11704
|
# -*- coding: utf-8 -*-
basic_table = dict(map(lambda s: s.split(u'\t'), u'''
あ a
い i
う u
え e
お o
か ka
き ki
く ku
け ke
こ ko
さ sa
し si
す su
せ se
そ so
た ta
ち ti
つ tu
て te
と to
な na
に ni
ぬ nu
ね ne
の no
は ha
ひ hi
ふ hu
へ he
ほ ho
ま ma
み mi
む mu
め me
も mo
や ya
ゆ yu
よ yo
ら ra
り ri
る ru
れ re
ろ ro
わ wa
を wo
ぁ a
ぃ i
ぅ u
ぇ e
ぉ o
が ga
ぎ gi
ぐ gu
げ ge
ご go
ざ za
じ zi
ず zu
ぜ ze
ぞ zo
だ da
ぢ di
づ du
で de
ど do
ば ba
び bi
ぶ bu
べ be
ぼ bo
ぱ pa
ぴ pi
ぷ pu
ぺ pe
ぽ po
きゃ kya
きゅ kyu
きょ kyo
しゃ sya
しゅ syu
しょ syo
ちゃ tya
ちゅ tyu
ちょ tyo
にゃ nya
にゅ nyu
にょ nyo
ひゃ hya
ひゅ hyu
ひょ hyo
みゃ mya
みゅ myu
みょ myo
りゃ rya
りゅ ryu
りょ ryo
ぎゃ gya
ぎゅ gyu
ぎょ gyo
じゃ zya
じゅ zyu
じょ zyo
でゃ dya
でゅ dyu
でょ dyo
びゃ bya
びゅ byu
びょ byo
ぴゃ pya
ぴゅ pyu
ぴょ pyo
クヮ kwa
グヮ gwa
ア a
イ i
ウ u
エ e
オ o
カ ka
キ ki
ク ku
ケ ke
コ ko
サ sa
シ si
ス su
セ se
ソ so
タ ta
チ ti
ツ tu
テ te
ト to
ナ na
ニ ni
ヌ nu
ネ ne
ノ no
ハ ha
ヒ hi
フ hu
ヘ he
ホ ho
マ ma
ミ mi
ム mu
メ me
モ mo
ヤ ya
ユ yu
ヨ yo
ラ ra
リ ri
ル ru
レ re
ロ ro
ワ wa
ヲ wo
ァ a
ィ i
ゥ u
ェ e
ォ o
ガ ga
ギ gi
グ gu
ゲ ge
ゴ go
ザ za
ジ zi
ズ zu
ゼ ze
ゾ zo
ダ da
ヂ di
ヅ du
デ de
ド do
バ ba
ビ bi
ブ bu
ベ be
ボ bo
パ pa
ピ pi
プ pu
ペ pe
ポ po
キャ kya
キュ kyu
キョ kyo
シャ sya
シュ syu
ショ syo
チャ tya
チュ tyu
チョ tyo
ニャ nya
ニュ nyu
ニョ nyo
ヒャ hya
ヒュ hyu
ヒョ hyo
ミャ mya
ミュ myu
ミョ myo
リャ rya
リュ ryu
リョ ryo
ギャ gya
ギュ gyu
ギョ gyo
ジャ zya
ジュ zyu
ジョ zyo
デャ dya
デュ dyu
デョ dyo
ビャ bya
ビュ byu
ビョ byo
ピャ pya
ピュ pyu
ピョ pyo
くゎ kwa
ぐゎ gwa
'''.strip(u'\n').split(u'\n')))
long_sound_table = dict(u'aâ iî uû eê oô'.split())
long_sounds = u'aa ii uu ee oo ou'.split()
def normalize(s):
roman = u''
l = len(s)
n = 0
while n < l:
c1 = s[n]
c2 = s[n:n+2]
c3 = s[n+1:n+2]
if roman and c1 == u'ー':
c1 = u''
if roman[-1] in u'aiueo':
roman = roman[:-1] + long_sound_table[roman[-1]]
elif c2 in long_sounds:
c1 = long_sound_table[c1]
n += 1
elif c1 in u'んン':
c1 = u'n'
if c3 and c3 in u'aiueoy':
c1 += u"'"
elif c1 in u'っッ':
if c3 in u'bcdfghjklmnpqrstvwxyz':
c1 = c3
else:
c1 = u''
roman += c1
n += 1
return roman
| 2.46875
| 2
|
tests/test_utils.py
|
loganthomas/turkey-bowl
| 0
|
11705
|
<reponame>loganthomas/turkey-bowl<filename>tests/test_utils.py
"""
Unit tests for utils.py
"""
# Standard libraries
import json
from pathlib import Path
# Third-party libraries
import pytest
# Local libraries
from turkey_bowl import utils
@pytest.mark.freeze_time
@pytest.mark.parametrize(
"frozen_date, expected",
[
("2010-09-09", 2010),
("2011-09-08", 2011),
("2012-09-05", 2012),
("2018-09-06", 2018),
("2019-09-05", 2019),
],
)
def test_get_current_year(freezer, frozen_date, expected):
""" Use pytest-freezegun to freeze dates and check year."""
# Setup
freezer.move_to(frozen_date)
# Exercise
result = utils.get_current_year()
# Verify
assert result == expected
# Cleanup - none necessary
def test_write_to_json(tmp_path):
# Setup
tmp_file_path = tmp_path.joinpath("test.json")
json_data = {"test": "test", "test2": "test2"}
# Exercise
assert tmp_file_path.exists() is False # non-existent prior to write
utils.write_to_json(json_data, tmp_file_path)
# Verify
assert tmp_file_path.exists() is True
with open(tmp_file_path, "r") as written_file:
result = json.load(written_file)
assert result == json_data
# Cleanup - none necessary
def test_load_from_json(tmp_path):
# Setup
tmp_file_path = tmp_path.joinpath("test.json")
json_data = {"test": "test", "test2": "test2"}
with open(tmp_file_path, "w") as written_file:
json.dump(json_data, written_file)
# Exercise
assert tmp_file_path.exists() is True # existent prior to load
result = utils.load_from_json(tmp_file_path)
# Verify
assert result == json_data
# Cleanup - none necessary
def test_write_to_and_load_from(tmp_path):
# Setup
tmp_file_path = tmp_path.joinpath("test.json")
json_data = {"test": "test", "test2": "test2"}
# Exercise
assert tmp_file_path.exists() is False # non-existent prior to write
utils.write_to_json(json_data, tmp_file_path)
result = utils.load_from_json(tmp_file_path)
# Verify
assert tmp_file_path.exists() is True
assert result == json_data
# Cleanup - none necessary
def test_load_stat_ids():
# Setup
file_loc = Path(__file__)
stat_ids_json_path = file_loc.parent.parent.joinpath("assets/stat_ids.json")
# Exercise
result = utils.load_from_json(stat_ids_json_path)
# Verify
assert len(result) == 91
for k, v in result.items():
assert int(k) == v["id"]
assert list(v.keys()) == ["id", "abbr", "name", "shortName"]
# Cleanup - none necessary
def test_load_player_ids():
# Setup
file_loc = Path(__file__)
stat_ids_json_path = file_loc.parent.parent.joinpath("assets/player_ids.json")
# Exercise
result = utils.load_from_json(stat_ids_json_path)
# Verify
assert "year" in result
for k, v in result.items():
if k == "year":
assert isinstance(v, int)
else:
assert list(v.keys()) == ["name", "position", "team", "injury"]
# Cleanup - none necessary
| 2.421875
| 2
|
regtests/webclgl/call_external_method.py
|
bpmbank/PythonJS
| 319
|
11706
|
<filename>regtests/webclgl/call_external_method.py
"""external method"""
class myclass:
def __init__(self, i):
self.index = i
def get_index(self):
return self.index
def run(self, n):
self.intarray = new(Int16Array(n))
self.intarray[ self.index ] = 99
@returns( array=n )
@gpu.main
def gpufunc():
int* A = self.intarray
## GLSL compile error: `Index expression must be constant`
#int idx = self.get_index()
#return float( A[idx] )
return float( A[self.get_index()] )
return gpufunc()
def main():
m = myclass(10)
r = m.run(64)
print(r)
TestError( int(r[10])==99 )
| 2.578125
| 3
|
pages/views.py
|
Total-Conversion/eco4coin
| 0
|
11707
|
<gh_stars>0
from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic import TemplateView, ListView
import csv
from django.http import HttpResponse
from backend.models import CustomUser
from django.contrib.auth.mixins import LoginRequiredMixin
class HomePageView(TemplateView):
template_name = 'pages/home.html'
class UsersListView(LoginRequiredMixin, ListView):
model = CustomUser
context_object_name = 'user'
template_name = 'pages/users_list.html'
paginate_by = 100
def get_queryset(self):
return CustomUser.objects.all().order_by('id')
def get_context_data(self, **kwargs):
context = super(UsersListView, self).get_context_data(**kwargs)
context['user'] = CustomUser.objects.get(id=self.request.user.id)
return context
@staff_member_required(login_url="/accounts/login/")
def users_to_csv(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="users_list.csv"'
writer = csv.writer(response)
writer.writerow(
[
"id ", "cash_balance ", "cash_locked ", "coin_balance ", "coin_locked ", "wallet_id ", " date_joined"
]
)
users_list = CustomUser.objects.all().order_by("id")
for user in users_list:
writer.writerow(
[
user.id,
user.cash_balance,
user.cash_locked,
user.coin_balance,
user.coin_locked,
user.wallet_id,
user.date_joined
]
)
return response
| 2.21875
| 2
|
18.part2.py
|
elp2/advent_of_code_2018
| 1
|
11708
|
from collections import defaultdict
def return_default():
return 0
REAL=open("18.txt").readlines()
SAMPLE=open("18.sample").readlines()
OPEN="."
TREE="|"
LUMBERYARD="#"
import copy
def safe_grid_get(grid, x, y, missing=None):
if x < 0 or y < 0:
return missing
if y >= len(grid):
return missing
if x >= len(grid[y]):
return missing
return grid[y][x]
def parse_lines(lines):
return list(map(lambda l: list(l.strip()), lines))
def next_sq(grid, x, y):
around = defaultdict(return_default)
for dy in [-1, 0, 1]:
for dx in [-1, 0, 1]:
if dx == 0 and dy == 0:
continue
a = safe_grid_get(grid, x + dx, y + dy)
if a is not None:
around[a] += 1
here = grid[y][x]
if here == OPEN:
if around[TREE] >= 3:
return TREE
else:
return OPEN
elif here == TREE:
if around[LUMBERYARD] >= 3:
return LUMBERYARD
else:
return TREE
else:
assert here == LUMBERYARD
if around[LUMBERYARD] >= 1 and around[TREE] >= 1:
return LUMBERYARD
else:
return OPEN
def resource_value(board):
lands = defaultdict(return_default)
for y in range(len(board)):
for x in range(len(board[0])):
lands[board[y][x]] += 1
return lands[TREE] * lands[LUMBERYARD]
def solve(lines, minutes):
cache = {}
old_board = parse_lines(lines)
for minute in range(minutes):
board = copy.deepcopy(old_board)
for y in range(len(board)):
for x in range(len(board[0])):
board[y][x] = next_sq(old_board, x, y)
old_board = board
key = "\n".join(map(lambda r: "".join(r), board))
# print(key)
if key in cache:
print(minute, cache[key])
else:
cache[key] = (minute, resource_value(board))
return resource_value(board)
sample = solve(SAMPLE, 10)
assert sample == 1147
print("*** SAMPLE PASSED ***")
# print(solve(REAL, 10000))
loop = """598 570 191420
599 571 189168
600 572 185082
601 573 185227
602 574 185320
603 575 185790
604 576 186120
605 577 189956
606 578 190068
607 579 191080
608 580 190405 # too low
609 581 193795
610 582 190950
611 583 193569
612 584 194350
613 585 196308
614 586 195364
615 587 197911
616 588 199755
617 589 201144
618 590 201607
619 591 203580
620 592 201260
621 593 201950
622 594 200675 # TOO HIGH
623 595 202208
624 596 200151
625 597 198948
626 570 191420
627 571 189168
628 572 185082
629 573 185227
630 574 185320
631 575 185790
632 576 186120
633 577 189956
634 578 190068
635 579 191080
636 580 190405
637 581 193795"""
num = 1000000000
nmod = 28
for num in range(570, 638):
print(num, (num - 570) % nmod + 570)
num = 1000000000 - 1
print(num, (num - 570) % nmod + 570 + nmod)
| 3.1875
| 3
|
PythonExercicios/ex031.py
|
Caio-Moretti/115.Exercicios-Python
| 0
|
11709
|
<gh_stars>0
dis = float(input('Digite a distância da sua viagem em Km: '))
if dis <= 200:
print('O valor da sua passagem será {:.2f} reais'.format(dis * 0.5))
else:
print('O valor da sua passagem será {:.2f} reais'.format(dis * 0.45))
| 3.5625
| 4
|
admin/collection_providers/forms.py
|
rdm-dev12/RDM-osf.io
| 0
|
11710
|
<filename>admin/collection_providers/forms.py
import bleach
import json
from django import forms
from osf.models import CollectionProvider, CollectionSubmission
from admin.base.utils import get_nodelicense_choices, get_defaultlicense_choices
class CollectionProviderForm(forms.ModelForm):
collected_type_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
status_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = CollectionProvider
exclude = ['primary_identifier_name', 'primary_collection', 'type', 'allow_commenting', 'advisory_board',
'example', 'domain', 'domain_redirect_enabled', 'reviews_comments_anonymous',
'reviews_comments_private', 'reviews_workflow']
widgets = {
'licenses_acceptable': forms.CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
nodelicense_choices = get_nodelicense_choices()
defaultlicense_choices = get_defaultlicense_choices()
super(CollectionProviderForm, self).__init__(*args, **kwargs)
self.fields['licenses_acceptable'].choices = nodelicense_choices
self.fields['default_license'].choices = defaultlicense_choices
def clean_description(self, *args, **kwargs):
if not self.data.get('description'):
return u''
return bleach.clean(
self.data.get('description'),
tags=['a', 'br', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_footer_links(self, *args, **kwargs):
if not self.data.get('footer_links'):
return u''
return bleach.clean(
self.data.get('footer_links'),
tags=['a', 'br', 'div', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_collected_type_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
type_choices_old = set(collection_provider.primary_collection.collected_type_choices)
type_choices_new = set(json.loads(self.data.get('collected_type_choices')))
type_choices_added = type_choices_new - type_choices_old
type_choices_removed = type_choices_old - type_choices_new
for item in type_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
collected_type=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
type_choices_added = json.loads(self.data.get('collected_type_choices'))
type_choices_removed = []
return {
'added': type_choices_added,
'removed': type_choices_removed,
}
def clean_status_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
status_choices_old = set(collection_provider.primary_collection.status_choices)
status_choices_new = set(json.loads(self.data.get('status_choices')))
status_choices_added = status_choices_new - status_choices_old
status_choices_removed = status_choices_old - status_choices_new
for item in status_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
status=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
status_choices_added = json.loads(self.data.get('status_choices'))
status_choices_removed = []
return {
'added': status_choices_added,
'removed': status_choices_removed,
}
| 1.90625
| 2
|
src/validate_model.py
|
mareklinka/esk-form-scanner-model
| 0
|
11711
|
<filename>src/validate_model.py
import data_providers as gen
import model_storage as storage
import numpy as np
import data_visualizer
import time
def evaluate(model_name):
"""
Evaluates the model stored in the specified file.
Parameters
----------
model_name : string
The name of the file to read the model from
"""
model = storage.load_model(model_name)
model.summary()
start = time.clock()
score = model.evaluate_generator(gen.finite_generator("data\\validation"), steps=30)
end = time.clock()
print("Time per image: {} ".format((end-start)/300))
print (model.metrics_names)
print (score)
predictions = model.predict_generator(gen.finite_generator("data\\validation"), steps=30)
data_visualizer.draw_bounding_boxes("data\\validation", predictions, "data\\results")
| 3.203125
| 3
|
sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py
|
rsdoherty/azure-sdk-for-python
| 0
|
11712
|
<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AnalyzeResultOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operation status.
"""
NOT_STARTED = "notStarted"
RUNNING = "running"
FAILED = "failed"
SUCCEEDED = "succeeded"
class ApiVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""API version.
"""
TWO_THOUSAND_TWENTY_ONE09_30_PREVIEW = "2021-09-30-preview"
class ContentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Content type for upload
"""
#: Content Type 'application/octet-stream'.
APPLICATION_OCTET_STREAM = "application/octet-stream"
#: Content Type 'application/pdf'.
APPLICATION_PDF = "application/pdf"
#: Content Type 'image/bmp'.
IMAGE_BMP = "image/bmp"
#: Content Type 'image/jpeg'.
IMAGE_JPEG = "image/jpeg"
#: Content Type 'image/png'.
IMAGE_PNG = "image/png"
#: Content Type 'image/tiff'.
IMAGE_TIFF = "image/tiff"
class DocumentFieldType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Semantic data type of the field value.
"""
STRING = "string"
DATE = "date"
TIME = "time"
PHONE_NUMBER = "phoneNumber"
NUMBER = "number"
INTEGER = "integer"
SELECTION_MARK = "selectionMark"
COUNTRY_REGION = "countryRegion"
CURRENCY = "currency"
SIGNATURE = "signature"
ARRAY = "array"
OBJECT = "object"
class DocumentSignatureType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Presence of signature.
"""
SIGNED = "signed"
UNSIGNED = "unsigned"
class DocumentTableCellKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Table cell kind.
"""
CONTENT = "content"
ROW_HEADER = "rowHeader"
COLUMN_HEADER = "columnHeader"
STUB_HEAD = "stubHead"
DESCRIPTION = "description"
class LengthUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The unit used by the width, height, and boundingBox properties. For images, the unit is
"pixel". For PDF, the unit is "inch".
"""
PIXEL = "pixel"
INCH = "inch"
class OperationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of operation.
"""
DOCUMENT_MODEL_BUILD = "documentModelBuild"
DOCUMENT_MODEL_COMPOSE = "documentModelCompose"
DOCUMENT_MODEL_COPY_TO = "documentModelCopyTo"
class OperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operation status.
"""
NOT_STARTED = "notStarted"
RUNNING = "running"
FAILED = "failed"
SUCCEEDED = "succeeded"
CANCELED = "canceled"
class SelectionMarkState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the selection mark.
"""
SELECTED = "selected"
UNSELECTED = "unselected"
class StringIndexType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Method used to compute string offset and length.
"""
TEXT_ELEMENTS = "textElements"
UNICODE_CODE_POINT = "unicodeCodePoint"
UTF16_CODE_UNIT = "utf16CodeUnit"
| 2.15625
| 2
|
activity_log/migrations/0004_auto_20170309_0929.py
|
farezsaputra/BandwidthControllingSystem
| 0
|
11713
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('activity_log', '0003_activitylog_extra_data'),
]
operations = [
migrations.AlterField(
model_name='activitylog',
name='datetime',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='datetime', db_index=True),
),
migrations.AlterField(
model_name='activitylog',
name='ip_address',
field=models.GenericIPAddressField(blank=True, null=True, verbose_name='user IP', db_index=True),
),
migrations.AlterField(
model_name='activitylog',
name='request_url',
field=models.CharField(db_index=True, verbose_name='url', max_length=256),
),
]
| 1.765625
| 2
|
bin/read_oogeso_data.py
|
oogeso/oogeso
| 2
|
11714
|
<filename>bin/read_oogeso_data.py<gh_stars>1-10
import json
import oogeso.io.file_io
# Read in data, validate date et.c. with methods from io
test_data_file = "examples/test case2.yaml"
json_data = oogeso.io.file_io.read_data_from_yaml(test_data_file)
json_formatted_str = json.dumps(json_data, indent=2)
print("Type json formatted str=", type(json_formatted_str))
# deserialize json data to objects
# encoder = oogeso.dto.oogeso_input_data_objects.DataclassJSONEncoder
decoder = oogeso.dto.oogeso_input_data_objects.DataclassJSONDecoder
# decoder = json.JSONDecoder()
with open("examples/energysystem.json", "r") as jsonfile:
energy_system = json.load(jsonfile, cls=decoder)
es_str = oogeso.dto.oogeso_input_data_objects.serialize_oogeso_data(energy_system)
print("Type seriealised=", type(es_str))
mydecoder = decoder()
energy_system = mydecoder.decode(json_formatted_str)
print("Type energysystem=", type(energy_system))
# energy_system = json.loads(
# json_formatted_str, cls=encoder
# )
# energy_system: oogeso.dto.oogeso_input_data_objects.EnergySystem = (
# oogeso.dto.oogeso_input_data_objects.deserialize_oogeso_data(json_data)
# )
print("========================")
print("Energy system:")
# print("Energy system type=", type(energy_system))
# print("Nodes: ", energy_system.nodes)
# print("Node1: ", energy_system.nodes["node1"])
# print("Parameters: ", energy_system.parameters)
# print("Parameters type=", type(energy_system.parameters))
# print("planning horizon: ", energy_system.parameters.planning_horizon)
# print("Carriers: ", energy_system.carriers)
print(energy_system)
| 2.953125
| 3
|
TWITOFF/__init__.py
|
DSPT3/Twitoff
| 0
|
11715
|
<filename>TWITOFF/__init__.py
""" Entry Point for Our Twitoff Flask App """
from .app import create_app
APP = create_app()
| 1.242188
| 1
|
fgivenx/test/test_mass.py
|
ejhigson/fgivenx
| 11
|
11716
|
import numpy
import pytest
import os
from shutil import rmtree
from numpy.testing import assert_allclose
import scipy.stats
import scipy.integrate
import scipy.special
from fgivenx.mass import PMF, compute_pmf
def gaussian_pmf(y, mu=0, sigma=1):
return scipy.special.erfc(numpy.abs(y-mu)/numpy.sqrt(2)/sigma)
def test_gaussian():
numpy.random.seed(0)
nsamp = 5000
samples = numpy.random.randn(nsamp)
y = numpy.random.uniform(-3, 3, 10)
m = PMF(samples, y)
m_ = gaussian_pmf(y)
assert_allclose(m, m_, rtol=3e-1)
def test_PMF():
# Compute samples
numpy.random.seed(0)
nsamp = 100
samples = numpy.concatenate((-5+numpy.random.randn(nsamp//2),
5+numpy.random.randn(nsamp//2)))
# Compute PMF
y = numpy.random.uniform(-10, 10, 10)
m = PMF(samples, y)
# Compute PMF via monte carlo
N = 100000
kernel = scipy.stats.gaussian_kde(samples)
s = kernel.resample(N)[0]
m_ = [sum(kernel(s) <= kernel(y_i))/float(N) for y_i in y]
assert_allclose(m, m_, atol=3*N**-0.5)
# Compute PMF via quadrature
m_ = [scipy.integrate.quad(lambda x: kernel(x)*(kernel(x) <= kernel(y_i)),
-numpy.inf, numpy.inf, limit=500)[0]
for y_i in y]
assert_allclose(m, m_, atol=1e-4)
assert_allclose([0, 0], PMF(samples, [-1e3, 1e3]))
samples = [0, 0]
m = PMF(samples, y)
assert_allclose(m, numpy.zeros_like(y))
def test_compute_pmf():
with pytest.raises(TypeError):
compute_pmf(None, None, wrong_argument=None)
cache = '.test_cache/test'
numpy.random.seed(0)
nsamp = 5000
a, b, e, f = 0, 1, 0, 1
m = numpy.random.normal(a, b, nsamp)
c = numpy.random.normal(e, f, nsamp)
nx = 100
x = numpy.linspace(-1, 1, nx)
fsamps = (numpy.outer(x, m) + c)
ny = 100
y = numpy.linspace(-3, 3, ny)
assert(not os.path.isfile(cache + '_masses.pkl'))
m = compute_pmf(fsamps, y, cache=cache)
assert(os.path.isfile(cache + '_masses.pkl'))
m_ = [gaussian_pmf(y, a*xi+e, numpy.sqrt(b**2*xi**2+f**2)) for xi in x]
assert_allclose(m.transpose(), m_, atol=3e-1)
m = compute_pmf(fsamps, y, cache=cache)
assert_allclose(m.transpose(), m_, atol=3e-1)
rmtree('.test_cache')
| 2.234375
| 2
|
homeassistant/components/vera/config_flow.py
|
liangleslie/core
| 2
|
11717
|
"""Config flow for Vera."""
from __future__ import annotations
from collections.abc import Mapping
import logging
import re
from typing import Any
import pyvera as pv
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EXCLUDE, CONF_LIGHTS, CONF_SOURCE
from homeassistant.core import callback
from homeassistant.helpers import entity_registry as er
from .const import CONF_CONTROLLER, CONF_LEGACY_UNIQUE_ID, DOMAIN
LIST_REGEX = re.compile("[^0-9]+")
_LOGGER = logging.getLogger(__name__)
def fix_device_id_list(data: list[Any]) -> list[int]:
"""Fix the id list by converting it to a supported int list."""
return str_to_int_list(list_to_str(data))
def str_to_int_list(data: str) -> list[int]:
"""Convert a string to an int list."""
return [int(s) for s in LIST_REGEX.split(data) if len(s) > 0]
def list_to_str(data: list[Any]) -> str:
"""Convert an int list to a string."""
return " ".join([str(i) for i in data])
def new_options(lights: list[int], exclude: list[int]) -> dict:
"""Create a standard options object."""
return {CONF_LIGHTS: lights, CONF_EXCLUDE: exclude}
def options_schema(options: Mapping[str, Any] = None) -> dict:
"""Return options schema."""
options = options or {}
return {
vol.Optional(
CONF_LIGHTS,
default=list_to_str(options.get(CONF_LIGHTS, [])),
): str,
vol.Optional(
CONF_EXCLUDE,
default=list_to_str(options.get(CONF_EXCLUDE, [])),
): str,
}
def options_data(user_input: dict) -> dict:
"""Return options dict."""
return new_options(
str_to_int_list(user_input.get(CONF_LIGHTS, "")),
str_to_int_list(user_input.get(CONF_EXCLUDE, "")),
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input: dict = None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(
title="",
data=options_data(user_input),
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(options_schema(self.config_entry.options)),
)
class VeraFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Vera config flow."""
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input: dict = None):
"""Handle user initiated flow."""
if user_input is not None:
return await self.async_step_finish(
{
**user_input,
**options_data(user_input),
**{CONF_SOURCE: config_entries.SOURCE_USER},
**{CONF_LEGACY_UNIQUE_ID: False},
}
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{**{vol.Required(CONF_CONTROLLER): str}, **options_schema()}
),
)
async def async_step_import(self, config: dict):
"""Handle a flow initialized by import."""
# If there are entities with the legacy unique_id, then this imported config
# should also use the legacy unique_id for entity creation.
entity_registry = er.async_get(self.hass)
use_legacy_unique_id = (
len(
[
entry
for entry in entity_registry.entities.values()
if entry.platform == DOMAIN and entry.unique_id.isdigit()
]
)
> 0
)
return await self.async_step_finish(
{
**config,
**{CONF_SOURCE: config_entries.SOURCE_IMPORT},
**{CONF_LEGACY_UNIQUE_ID: use_legacy_unique_id},
}
)
async def async_step_finish(self, config: dict):
"""Validate and create config entry."""
base_url = config[CONF_CONTROLLER] = config[CONF_CONTROLLER].rstrip("/")
controller = pv.VeraController(base_url)
# Verify the controller is online and get the serial number.
try:
await self.hass.async_add_executor_job(controller.refresh_data)
except RequestException:
_LOGGER.error("Failed to connect to vera controller %s", base_url)
return self.async_abort(
reason="cannot_connect", description_placeholders={"base_url": base_url}
)
await self.async_set_unique_id(controller.serial_number)
self._abort_if_unique_id_configured(config)
return self.async_create_entry(title=base_url, data=config)
| 2.15625
| 2
|
bin/setup_spectrum.py
|
MFSJMenger/pysurf
| 7
|
11718
|
<filename>bin/setup_spectrum.py<gh_stars>1-10
import os
from shutil import copy2 as copy
#
from pysurf.logger import get_logger
from pysurf.sampling import Sampling
from pysurf.setup import SetupBase
from pysurf.utils import exists_and_isfile
from pysurf.spp import SurfacePointProvider
from colt import Colt
from sp_calc import SinglePointCalculation
class SetupSpectrum(SetupBase):
folder = 'spectrum'
subfolder = 'condition'
_questions = """
# Number of conditions
n_cond = :: int
# Number of states
nstates = :: int
#Properties that should be calculated
properties = ['energy', 'fosc'] :: list
# Database containing all the initial conditions
sampling_db = sampling.db :: existing_file
# Filepath for the inputfile of the Surface Point Provider
spp = spp.inp :: file
# Filepath for the inputfile of the Single Point Calculation
sp_calc = sp_calc.inp :: file
"""
def __init__(self, config):
""" Class to create initial conditions due to user input. Initial conditions are saved
in a file for further usage.
"""
logger = get_logger('setup_spectrum.log', 'setup_spectrum')
logger.header('SETUP SPECTRUM', config)
SetupBase.__init__(self, logger)
#
logger.info(f"Opening sampling database {config['sampling_db']}")
sampling = Sampling.from_db(config['sampling_db'], logger=logger)
if not exists_and_isfile(config['spp']):
presets="""
use_db = no
"""
logger.info(f"Setting up SPP inputfile: {config['spp']}")
SurfacePointProvider.generate_input(config['spp'], config=None, presets=presets)
else:
logger.info(f"Using SPP inputfile as it is")
if not exists_and_isfile(config['sp_calc']):
presets=f"""
properties = {config['properties']}
nstates = {config['nstates']}
init_db = init.db
"""
logger.info(f"Setting up inputfile for the single point calculations")
SinglePointCalculation.generate_input(config['sp_calc'], config=None, presets=presets)
else:
logger.info(f"Using inputfile for the single point calculations as it is")
logger.info("Starting to prepare the folders...")
self.setup_folders(range(config['n_cond']), config, sampling)
@classmethod
def from_config(cls, config):
return cls(config)
def setup_folder(self, number, foldername, config, sampling):
copy(config['spp'], foldername)
copy(config['sp_calc'], foldername)
#name of new database
initname = os.path.join(foldername, 'init.db')
#get info from old db and adjust
variables = sampling.info['variables']
variables += config['properties']
dimensions = sampling.info['dimensions']
dimensions['nstates'] = config['nstates']
dimensions['nactive'] = config['nstates']
#setup new database
new_sampling = Sampling.create_db(initname, variables, dimensions, sampling.system, sampling.modes, model=sampling.model, sp=True)
#copy condition to new db
condition = sampling.get_condition(number)
new_sampling.write_condition(condition, 0)
if __name__=="__main__":
SetupSpectrum.from_commandline()
| 2.4375
| 2
|
CeV - Gustavo Guanabara/exerc033.py
|
us19861229c/Meu-aprendizado-Python
| 1
|
11719
|
<gh_stars>1-10
#033: ler tres numeros e dizer qual o maior e qual o menor:
print("Digite 3 numeros:")
maiorn = 0
n = int(input("Numero 1: "))
if n > maiorn:
maiorn = n
menorn = n
n = int(input("Numero 2: "))
if n > maiorn:
maiorn = n
if n < menorn:
menorn = n
n = int(input("Numero 3: "))
if n > maiorn:
maiorn = n
if n < menorn:
menorn = n
print(f"o maior numero foi {maiorn} e o menor foi {menorn}")
| 3.875
| 4
|
test/test_dot.py
|
croqaz/dot
| 0
|
11720
|
<reponame>croqaz/dot
import pytest
from prop import strict_get
from prop import get as dot_get
class A:
def __init__(self, val):
self.val = val
def test_dot_get_list():
assert dot_get(['asd'], '0') == dot_get(['asd'], ['0']) == 'asd'
data = {'nested': [0, False, 'foo']}
assert dot_get(data, 'nested.0') == 0
assert dot_get(data, 'nested.1') is False
assert dot_get(data, 'nested.2') == 'foo'
assert dot_get(data, ['nested', '0']) == 0
assert dot_get(data, ['nested', '1']) is False
assert dot_get(data, ['nested', b'1']) is False
assert dot_get(data, ('nested', '2')) == 'foo'
assert dot_get(data, ('nested', b'2')) == 'foo'
assert dot_get(data, ['nested', 1]) is False
assert dot_get(data, ('nested', 2)) == 'foo'
# inexistent
assert dot_get(data, 'nested.9') is None
assert dot_get(data, 'nested.9', 'default') == 'default'
assert dot_get(data, ('nested', 9)) is None
assert dot_get(data, ['nested', '9']) is None
assert dot_get(data, ['nested', b'9']) is None
assert dot_get(data, ['nested', 9], 'default') == 'default'
assert dot_get(data, ('nested', '9'), 'default') == 'default'
assert dot_get(data, ('nested', b'9'), 'default') == 'default'
def test_dot_get_dict():
data = {'a': 'a', 'nested': {'x': 'y', 'int': 0, 'null': None}}
assert dot_get(data, 'a') == 'a'
assert dot_get(data, 'nested.x') == 'y'
assert dot_get(data, 'nested.int') == 0
assert dot_get(data, 'nested.null') is None
assert dot_get(data, ('nested', 'x')) == 'y'
assert dot_get(data, ['nested', 'int']) == 0
assert dot_get(data, ['nested', 'null']) is None
# inexistent
assert dot_get(data, 'nope') is None
assert dot_get(data, 'nested.9') is None
assert dot_get(data, 'nope', 'default') == 'default'
assert dot_get(data, ['nope']) is None
assert dot_get(data, ['nope'], 'default') == 'default'
assert dot_get(data, ('nested', 9)) is None
def test_str_dot_get_obj():
a = A(1)
assert dot_get(a, 'val') == 1
assert dot_get(a, 'nope') is None
assert dot_get(a, 'nope', 'default') == 'default'
a = A([0, False, 'foo'])
assert dot_get(a, 'val.0') == 0
assert dot_get(a, 'val.1') is False
assert dot_get(a, 'val.2') == 'foo'
assert dot_get(a, 'nope') is None
assert dot_get(a, 'nope', 'default') == 'default'
def test_dot_get_mixed():
data = {
'nested': {
1: '1',
'x': 'y',
None: 'null',
},
'list': [[[None, True, 9]]],
b'byte': b'this',
}
assert dot_get(data, 'list.0.0.1') is True
assert dot_get(data, 'list.0.0.2') == 9
assert dot_get(data, ('list', 0, 0, 1)) is True
assert dot_get(data, ['list', 0, 0, 2]) == 9
# String paths can only access string keys, so this won't work:
# assert dot_get(data, 'nested.1') == '1'
# assert dot_get(data, 'nested.None') == 'null'
# But this works:
assert dot_get(data, [b'byte']) == b'this'
assert dot_get(data, ['nested', 1]) == '1'
assert dot_get(data, ['nested', None]) == 'null'
a = A(data)
assert dot_get(a, 'val.nested.x') == 'y'
assert dot_get(a, 'val.list.0.0.1') is True
assert dot_get(a, ['val', 'list', 0, 0, 1]) is True
assert dot_get(a, ('val', 'list', 0, 0, 2)) == 9
def test_circular_refs():
c = A(1)
b = A(c)
a = A(b)
assert dot_get(c, 'val') == 1
assert dot_get(b, 'val') is c
assert dot_get(a, 'val') is b
assert dot_get(a, 'val.val.val') == 1
assert dot_get(a, ['val', 'val', 'val']) == 1
# Create cyclic ref
c.val = a
assert dot_get(c, 'val') == a
assert dot_get(c, 'val.val.val.val') == a
assert dot_get(c, ['val', 'val', 'val', 'val']) == a
def test_str_dot_strict_get():
data = {
'1': 1,
'a': A(7),
'nested': {
'x': 'y',
'int': 0,
'null': None,
},
'list': [[[None, True, 9]]],
}
assert strict_get(data, '1') == 1
assert strict_get(data, 'a.val') == 7
assert strict_get(data, 'nested.x') == 'y'
assert strict_get(data, 'nested.int') == 0
assert strict_get(data, 'nested.null') is None
assert strict_get(data, 'list.0.0.1') is True
assert strict_get(data, 'list.0.0.-1') == 9
with pytest.raises(KeyError):
assert strict_get(data, 'nope') is None
with pytest.raises(IndexError):
assert strict_get(data, 'list.9') is None
def test_str_dot_set_mix():
data = {
'a': 'a',
'nested': {
'x': 'x',
'int': 0,
'list': ['y', 'n'],
},
}
assert strict_get(data, 'nested.x') == 'x'
assert strict_get(data, 'nested.list.0') == 'y'
nested = dot_get(data, 'nested')
nested['x'] = 'yyy'
li = strict_get(data, 'nested.list')
li.insert(0, 'z')
assert strict_get(data, 'nested.x') == 'yyy'
assert strict_get(data, 'nested.list.0') == 'z'
def test_crappy_path():
with pytest.raises(TypeError):
assert dot_get(['asd'], True)
with pytest.raises(TypeError):
assert dot_get(['asd'], None)
with pytest.raises(TypeError):
assert dot_get(['asd'], 0)
| 2.453125
| 2
|
site_scons/site_tools/mplabx_nbproject/__init__.py
|
kbhomes/ps2plus
| 0
|
11721
|
from pprint import pprint
import SCons.Builder
from SCons.Script import *
import json
import os
import copy
import collections
import xml.etree.ElementTree as ET
from mplabx import MPLABXProperties
MAKEFILE_TEXT = '''
MKDIR=mkdir
CP=cp
CCADMIN=CCadmin
RANLIB=ranlib
build: .build-post
.build-pre:
.build-post: .build-impl
clean: .clean-post
.clean-pre:
.clean-post: .clean-impl
clobber: .clobber-post
.clobber-pre:
.clobber-post: .clobber-impl
all: .all-post
.all-pre:
.all-post: .all-impl
help: .help-post
.help-pre:
.help-post: .help-impl
include nbproject/Makefile-impl.mk
include nbproject/Makefile-variables.mk
'''
PROJECT_XML_TEXT = '''
<project>
<type>com.microchip.mplab.nbide.embedded.makeproject</type>
<configuration>
<data>
<name />
<sourceRootList />
<confList />
</data>
</configuration>
</project>
'''
CONFIGURATIONS_XML_TEXT = '''
<configurationDescriptor version="65">
<logicalFolder name="root" displayName="root" projectFiles="true" />
<sourceRootList />
<projectmakefile>Makefile</projectmakefile>
<confs />
</configurationDescriptor>
'''
CONFIGURATION_ELEMENT_TEXT = '''
<conf type="2">
<toolsSet>
<targetDevice />
<languageToolchain />
<languageToolchainVersion />
</toolsSet>
<HI-TECH-COMP />
<HI-TECH-LINK />
<XC8-config-global />
</conf>
'''
def nested_dict():
return collections.defaultdict(nested_dict)
def merge(destination, source):
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(node, value)
else:
destination[key] = value
return destination
def build_mplabx_nbproject_configuration(
env,
name: str,
properties: MPLABXProperties,
additional_compiler_properties: dict[str, str] = {},
additional_linker_properties: dict[str, str] = {},
additional_xc8_properties: dict[str, str] = {},
):
defines_str = ';'.join(env['CPPDEFINES'])
includes_str = ';'.join([env.Dir(path).abspath for path in env['CPPPATH']])
default_compiler_properties = {
'define-macros': f'{defines_str}',
'extra-include-directories': f'{includes_str}',
}
root = ET.fromstring(CONFIGURATION_ELEMENT_TEXT)
root.set('name', name)
root.find('./toolsSet/targetDevice').text = properties.device
root.find('./toolsSet/languageToolchain').text = properties.toolchain
root.find('./toolsSet/languageToolchainVersion').text = properties.toolchain_version
group_properties_mapping = {
'HI-TECH-COMP': default_compiler_properties | properties.compiler_properties | additional_compiler_properties,
'HI-TECH-LINK': properties.linker_properties | additional_linker_properties,
'XC8-config-global': properties.xc8_properties | additional_xc8_properties,
}
for group_name, group_properties in group_properties_mapping.items():
for key, value in group_properties.items():
root.find(group_name).append(ET.Element('property', key=key, value=value))
# ET.dump(root)
return env.Value(root)
def _create_file_hierarchy(source_relpaths: list[str]):
hierarchy = nested_dict()
# Put all entries into the hierarchy, keyed from dirname to basename
for source_relpath in sorted(source_relpaths):
dirname, basename = os.path.split(source_relpath)
hierarchy[dirname][basename] = source_relpath
# Split all directory keys further
while True:
found_nested = False
modified_hierarchy = nested_dict()
for parent_key, entries in hierarchy.items():
dirname, basename = os.path.split(parent_key)
if dirname:
merge(modified_hierarchy[dirname][basename], entries)
found_nested = True
else:
merge(modified_hierarchy[parent_key], entries)
hierarchy = modified_hierarchy
if not found_nested:
break
return hierarchy
def _build_xml_files(project_name: str, project_dir, confs: list, source_files: list[str]):
# Create the `configurations.xml` and `project.xml` file
configurations_xml_root = ET.fromstring(CONFIGURATIONS_XML_TEXT)
project_xml_root = ET.fromstring(PROJECT_XML_TEXT)
project_xml_root.set('xmlns', 'http://www.netbeans.org/ns/project/1')
project_xml_root.find('./configuration/data').set('xmlns', 'http://www.netbeans.org/ns/make-project/1')
project_xml_root.find('./configuration/data/name').text = project_name
# Add each configuration to the two XML files
for configuration_node in confs:
# Modify each configuration to make absolute paths relative to the project directory
modified_node = copy.deepcopy(configuration_node.read())
for includes_element in modified_node.findall('.//property[@key="extra-include-directories"]'):
includes_value = includes_element.get('value')
includes_relative = ';'.join([os.path.relpath(abspath, project_dir.abspath) for abspath in includes_value.split(';')])
includes_element.set('value', includes_relative)
configurations_xml_root.find('./confs').append(modified_node)
# Update the `project.xml` configuration list
project_conf_list_element = project_xml_root.find('./configuration/data/confList')
project_conf_elem_element = ET.SubElement(project_conf_list_element, 'confElem')
project_conf_name_element = ET.SubElement(project_conf_elem_element, 'name')
project_conf_name_element.text = configuration_node.read().get('name')
project_conf_text_element = ET.SubElement(project_conf_elem_element, 'text')
project_conf_text_element.text = '2'
# Generate the source root list, which will have a single root (common path for all sources)
common_root_path = os.path.commonpath([os.path.abspath(path) for path in source_files])
source_root_relpath = os.path.relpath(common_root_path, project_dir.abspath)
configurations_source_root_element = ET.Element('Elem')
configurations_source_root_element.text = source_root_relpath
configurations_xml_root.find('./sourceRootList').append(configurations_source_root_element)
project_source_root_element = ET.Element('sourceRootElem')
project_source_root_element.text = os.path.relpath(common_root_path, project_dir.abspath)
project_xml_root.find('./configuration/data/sourceRootList').append(project_source_root_element)
# Generate all logical folders and private files
root_logical_folder = configurations_xml_root.find('./logicalFolder[@name="root"]')
source_relpaths = [os.path.relpath(source_path, common_root_path) for source_path in source_files]
source_hierarchy = _create_file_hierarchy(source_relpaths)
def _walk_tree(parent_element: ET.Element, tree: dict):
for key, data in tree.items():
if isinstance(data, dict):
folder_element = ET.SubElement(parent_element, 'logicalFolder', name=key, displayName=key, projectFiles="true")
_walk_tree(folder_element, data)
elif isinstance(data, str):
item_element = ET.SubElement(parent_element, 'itemPath')
item_element.text = os.path.relpath(data, project_dir.abspath)
_walk_tree(root_logical_folder, source_hierarchy)
# Generate an item for the build Makefile
ET.SubElement(root_logical_folder, 'itemPath').text = 'Makefile'
return (configurations_xml_root, project_xml_root)
def build_mplabx_nbproject(target, source, env):
'''
target - (singleton list) - Directory node to the project folder
source - (list) - XML value nodes for each project configuration
'''
project_dir = target[0]
nbproject_dir = project_dir.Dir('nbproject')
configurations_xml_file = nbproject_dir.File('configurations.xml')
project_xml_file = nbproject_dir.File('project.xml')
makefile_file = project_dir.File('Makefile')
# Make the directories
env.Execute(Mkdir(project_dir))
env.Execute(Mkdir(nbproject_dir))
# Generate the XML files
confs = source
configurations_xml_root, project_xml_root = _build_xml_files(
project_name=os.path.basename(str(project_dir)),
project_dir=project_dir,
confs=confs,
source_files=env['source_files'])
with open(str(configurations_xml_file), 'w') as f:
ET.indent(configurations_xml_root, space=' ')
ET.ElementTree(configurations_xml_root).write(f, encoding='unicode')
with open(str(project_xml_file), 'w') as f:
ET.indent(project_xml_root, space=' ')
ET.ElementTree(project_xml_root).write(f, encoding='unicode')
with open(str(makefile_file), 'w') as f:
f.write(MAKEFILE_TEXT)
_mplabx_nbproject_builder = SCons.Builder.Builder(action=build_mplabx_nbproject)
def generate(env):
env.AddMethod(build_mplabx_nbproject_configuration, 'MplabxNbprojectConfiguration')
env['BUILDERS']['MplabxNbproject'] = _mplabx_nbproject_builder
def exists(env):
return 1
| 2.28125
| 2
|
lib/autoconnect/example/test_server.py
|
simotek/autoconnect
| 0
|
11722
|
<gh_stars>0
#
# test_server.py
#
# Copyright (C) 2001-2007 <NAME>.
# Email: <EMAIL>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library (see the file LICENSE.TXT); if not,
# write to the Free Software Foundation, Inc., 59 Temple Place,
# Suite 330, Boston, MA 02111-1307 USA.
#
# Date: 2001/12/06 15:54:30
#
import sys
import socket
import xmlrpclib
import autoconnect
from SimpleXMLRPCServer import SimpleXMLRPCServer
class Person:
def greet(self, name=''):
msg = "Hello, nice to meet you"
if name:
msg = "%s %s" % (msg, name)
return msg
class Server:
"""This server runs a simple XML-RPC server and its clients
automatically find it. Its magic ;)
"""
def __init__(self):
self.server = None
self.broadcaster = None
def main(self):
print "Starting XML-RPC server http://localhost:8000"
self.server = SimpleXMLRPCServer(("localhost", 8000))
self.server.register_instance(Person())
# Start the beckon to tell clients the servers XML-RPC URI:
print "Homing beacon running. Press Ctrl-C to exit."
self.broadcaster = autoconnect.beacon("http://localhost:8000")
try:
self.server.serve_forever()
except KeyboardInterrupt,e:
pass
self.server.server_close()
if __name__ == '__main__':
server = Server()
server.main()
| 2.59375
| 3
|
tests/test_loop_seer.py
|
Kyle-Kyle/angr
| 6,132
|
11723
|
<gh_stars>1000+
import os
import sys
import angr
import nose.tools
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_various_loops():
p = angr.Project(os.path.join(test_location, 'x86_64', 'various_loops'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
dummy = p.loader.main_object.get_symbol('dummy')
bvs = state.solver.BVS(dummy.name, 8 * dummy.size)
state.memory.store(dummy.rebased_addr, bvs, endness='Iend_LE')
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions=None, bound=None))
simgr.run()
nose.tools.assert_equal(len(simgr.deadended), 10)
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts), 14)
for i, d in enumerate(simgr.deadended):
f = p.kb.functions.function(name='symbolic_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], i)
f = p.kb.functions.function(name='for_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='while_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='do_while_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.header_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='nullify')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[l.entry.addr]), 8)
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='nested_for_loop')
ol = p.analyses.LoopFinder(functions=[f]).loops[0]
il = ol.subloops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[ol.entry.addr][0], 3)
nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[il.entry.addr]), 3)
nose.tools.assert_true(all(s == 3 for s in d.loop_data.back_edge_trip_counts[il.entry.addr]))
f = p.kb.functions.function(name='nested_while_loop')
ol = p.analyses.LoopFinder(functions=[f]).loops[0]
il = ol.subloops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[ol.entry.addr][0], 3)
nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[il.entry.addr]), 3)
nose.tools.assert_true(all(s == 3 for s in d.loop_data.back_edge_trip_counts[il.entry.addr]))
f = p.kb.functions.function(name='nested_do_while_loop')
ol = p.analyses.LoopFinder(functions=[f]).loops[0]
il = ol.subloops[0]
nose.tools.assert_equal(d.loop_data.header_trip_counts[ol.entry.addr][0], 3)
nose.tools.assert_equal(len(d.loop_data.header_trip_counts[il.entry.addr]), 3)
nose.tools.assert_true(all(s == 3 for s in d.loop_data.header_trip_counts[il.entry.addr]))
f = p.kb.functions.function(name='break_for_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='break_do_while_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.header_trip_counts[l.entry.addr][0], 9)
def test_loops_with_invalid_parameter():
p = angr.Project(os.path.join(test_location, 'x86_64', 'test_loops'), auto_load_libs=False)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(functions=['main', 0x1234], bound=None))
simgr.run()
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts), 3)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400665][0], 10)
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400665]), 10)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400675][0], 10)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x4006b2][0], 100)
def test_arrays():
p = angr.Project(os.path.join(test_location, 'x86_64', 'test_arrays'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions='main', bound=None))
simgr.run()
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts), 2)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400636][0], 26)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x4005fd][0], 26)
def test_loop_limiter():
p = angr.Project(os.path.join(test_location, 'x86_64', 'test_arrays'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions='main', bound=5))
simgr.run()
nose.tools.assert_true('spinning' in simgr.stashes)
nose.tools.assert_equal(simgr.spinning[0].loop_data.back_edge_trip_counts[0x4005fd][0], 6)
def test_loop_limiter_constant_loop():
p = angr.Project(os.path.join(test_location, 'x86_64', 'constant_loopseer'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions='main', bound=5, limit_concrete_loops=False))
simgr.run()
nose.tools.assert_true(simgr.deadended[0].regs.eax.concrete)
val = simgr.deadended[0].solver.eval_one(simgr.deadended[0].regs.eax)
nose.tools.assert_equal(val, 420)
if __name__ == "__main__":
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
g = globals().copy()
for k, v in g.items():
if k.startswith("test_") and hasattr(v, '__call__'):
print(k)
v()
| 1.867188
| 2
|
shongololo/Imet_serial.py
|
swyngaard/shongololo
| 0
|
11724
|
import serial , time , os
import serial.tools.list_ports as port
import logging
sho_logger = logging.getLogger("shongololo_logger")
def open_imets(devices):
"""Tries to open as many imet device serial ports as there are
:return:
a list of socket handles
"""
imet_sockets = []
for d in range(len(devices)): # Create list of imet open ports
port = str(devices["Imet" + str(d)])
try:
ser = serial.Serial(port, baudrate=57600, parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, timeout=3.0, xonxoff=False)
imet_sockets.append(ser)
sho_logger.info("\n Successfully opened Imet device on port {}".format(devices["Imet" + str(d)]))
except serial.SerialException as e:
sho_logger.error(e)
sho_logger.critical("\nFailed to open imet on port {}".format(devices["Imet" + str(d)]))
return imet_sockets
def find_imets():
"""
Finds available imet serial ports and determines which device is attached to which /dev/ path
:rtype: object
:return:
A dictionary of devices labled as" imet<number starting from 0>
"""
device_dict = {}
imets = 0
portlist = list(port.comports())
for p in portlist:
sp = str(p)
if "FT230" in sp:
path = sp.split('-')[0]
device_dict["Imet" + str(imets)] = path[:-1]
imets = imets + 1
sho_logger.info("Found an Imet device on port: %s",path)
status=0
else:
pass
if imets==0:
sho_logger.error("No Imet devices found.")
else:
sho_logger.info("Found {} Imet devices".format(imets))
return device_dict
| 2.875
| 3
|
libs/external_libs/docutils-0.4/test/test_transforms/test_peps.py
|
google-code-export/django-hotclub
| 3
|
11725
|
#! /usr/bin/env python
# Author: <NAME>
# Contact: <EMAIL>
# Revision: $Revision: 3915 $
# Date: $Date: 2005-10-02 03:06:42 +0200 (Sun, 02 Oct 2005) $
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.peps.
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.peps import TargetNotes
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['target_notes'] = ((TargetNotes,), [
["""\
No references or targets exist, therefore
no "References" section should be generated.
""",
"""\
<document source="test data">
<paragraph>
No references or targets exist, therefore
no "References" section should be generated.
"""],
["""\
A target exists, here's the reference_.
A "References" section should be generated.
.. _reference: http://www.example.org
""",
"""\
<document source="test data">
<paragraph>
A target exists, here's the \n\
<reference name="reference" refname="reference">
reference
\n\
<footnote_reference auto="1" ids="id3" refname="TARGET_NOTE: id2">
.
A "References" section should be generated.
<target ids="reference" names="reference" refuri="http://www.example.org">
<section ids="id1">
<title>
References
<footnote auto="1" ids="id2" names="TARGET_NOTE:\ id2">
<paragraph>
<reference refuri="http://www.example.org">
http://www.example.org
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| 2.484375
| 2
|
rental_property/migrations/0011_alter_rentalunit_options.py
|
shumwe/rental-house-management-system
| 1
|
11726
|
# Generated by Django 4.0.2 on 2022-03-15 22:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rental_property', '0010_alter_rentalunit_status'),
]
operations = [
migrations.AlterModelOptions(
name='rentalunit',
options={'verbose_name_plural': 'Rental Houses'},
),
]
| 1.484375
| 1
|
AxesFrame.py
|
Toyuri453/RSSP-Python-demo
| 1
|
11727
|
<filename>AxesFrame.py
import Terminal
class Axes():
def __init__(self, weak_terminal : 'Terminal.CartesianPoint'):
# self._initiator_x = weak_terminal._x
# self._initiator_y = weak_terminal._y
self._initiator = Terminal.CartesianPoint(0.0, 0.0, "UWB", "initiator")
self._weak_terminal = weak_terminal
self._terminal_set = {self._initiator._terminal_name : self._initiator, self._weak_terminal._terminal_name : self._weak_terminal}
self._terminal_measuring_point_set = {'Set' : {}} #Fill Later
print(self._terminal_set)
def add_terminal(self, terminal : 'Terminal.CartesianPoint'):
print("[DATA] Add Terminal {0} ".format(terminal))
self._terminal_set[terminal._terminal_name] = terminal
def show_terminal_names(self):
for key in self._terminal_set:
print("[DATA] Terminal Name: {0}, Color: {1}".format(key, self._terminal_set[key]._terminal_color))
| 2.703125
| 3
|
api/generate.py
|
almeida-matheus/playlist-reader
| 0
|
11728
|
<reponame>almeida-matheus/playlist-reader
import re
from bs4 import BeautifulSoup # beautifulsoup4
import requests # requests
HEADER = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
def catch_info(base,pattern,str_add=''):
'''base text, pattern to search, string to increment if necessary'''
array = []
for match in pattern.finditer(base.prettify()):
array.append(str_add+match.group(1))
return list(dict.fromkeys(array)) # set(array_video)
def generate(playlist_param):
try:
link = 'https://www.youtube.com/playlist?list=' + playlist_param
response = requests.get(link, headers=HEADER)
soup = BeautifulSoup(response.text, "html.parser")
pattern_title = re.compile(r'"title":{"runs":\[{"text":"(.*?)"}\],"accessibility"')
pattern_img = re.compile(r'{"url":"https:\/\/i(.*?)?sqp=')
pattern_video = re.compile(r'{"url":"\/watch(.*?)\\')
array_title = catch_info(soup,pattern_title)
array_img = catch_info(soup,pattern_img,'https://i')
array_video = catch_info(soup,pattern_video,'https://www.youtube.com/watch')
list_array_yt = list(zip(array_title,array_img,array_video))
response = []
for i, info in enumerate(list_array_yt):
response.append({"id": i, "title": info[0], "link_img": info[1], "link_video": info[2]})
return response
except Exception as e:
print(e)
return False
# response = generate('PLMKi-ss_sEoOZw9TB4iCrevTK60uY8wg0')
# print(response)
| 3.109375
| 3
|
homeassistant/components/deconz/const.py
|
hoverduck/core
| 1
|
11729
|
<filename>homeassistant/components/deconz/const.py
"""Constants for the deCONZ component."""
import logging
LOGGER = logging.getLogger(__package__)
DOMAIN = "deconz"
CONF_BRIDGE_ID = "bridgeid"
CONF_GROUP_ID_BASE = "group_id_base"
DEFAULT_PORT = 80
DEFAULT_ALLOW_CLIP_SENSOR = False
DEFAULT_ALLOW_DECONZ_GROUPS = True
DEFAULT_ALLOW_NEW_DEVICES = True
CONF_ALLOW_CLIP_SENSOR = "allow_clip_sensor"
CONF_ALLOW_DECONZ_GROUPS = "allow_deconz_groups"
CONF_ALLOW_NEW_DEVICES = "allow_new_devices"
CONF_MASTER_GATEWAY = "master"
SUPPORTED_PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"light",
"scene",
"sensor",
"switch",
]
NEW_GROUP = "groups"
NEW_LIGHT = "lights"
NEW_SCENE = "scenes"
NEW_SENSOR = "sensors"
ATTR_DARK = "dark"
ATTR_OFFSET = "offset"
ATTR_ON = "on"
ATTR_VALVE = "valve"
DAMPERS = ["Level controllable output"]
WINDOW_COVERS = ["Window covering device", "Window covering controller"]
COVER_TYPES = DAMPERS + WINDOW_COVERS
POWER_PLUGS = ["On/Off light", "On/Off plug-in unit", "Smart plug"]
SIRENS = ["Warning device"]
SWITCH_TYPES = POWER_PLUGS + SIRENS
CONF_ANGLE = "angle"
CONF_GESTURE = "gesture"
CONF_XY = "xy"
| 1.679688
| 2
|
course_app/api/views.py
|
maks-nurgazy/diploma-project
| 0
|
11730
|
<reponame>maks-nurgazy/diploma-project
import json
from rest_framework.generics import ListAPIView, get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from course_app.api.serializers import CourseSerializer
from course_app.models import Course, Enrolled
from users.api.serializers import StudentSerializer
from users.models import Student
class CourseViewSet(ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class StudentCourseView(ListAPIView):
serializer_class = CourseSerializer
def get_queryset(self):
user = self.request.user
enrolls = user.enrolls
courses = []
for enroll in list(enrolls.all()):
courses.append(enroll.course)
return courses
class TeacherCourseView(ListAPIView):
serializer_class = CourseSerializer
def get_queryset(self):
teacher = self.request.user
return teacher.course_list
class CourseStudentsView(ListAPIView):
serializer_class = StudentSerializer
def get_queryset(self):
course_id = self.kwargs['course_id']
course = get_object_or_404(Course, id=course_id)
students = course.students
return students
class EnrollmentView(APIView):
def get(self, request, *args, **kwargs):
student = request.user
courses = Course.objects.filter(co_class=student.profile.st_class)
response = CourseSerializer(courses, many=True).data
return Response(response)
def post(self, request, *args, **kwargs):
courses = json.loads(request.body)['courses']
student = request.user
for course_id in courses:
Enrolled.objects.create(student=student, course_id=course_id)
return Response({"detail": "Enrolled"})
def put(self, request, *args, **kwargs):
pass
| 2.203125
| 2
|
service/repository/repository_controller.py
|
yutiansut/cilantro
| 3
|
11731
|
<gh_stars>1-10
import os
import json
import logging
import yaml
from flask import Blueprint, jsonify, send_file, request, redirect
from service.errors import ApiError
from utils.repository import generate_repository_path, \
list_objects_in_repository
from utils.list_dir import list_dir
repository_controller = Blueprint('repository', __name__)
repository_dir = os.environ['REPOSITORY_DIR']
metadata_file = 'meta.json'
representation_dir = 'data'
sub_object_dir = 'parts'
viewers_config = os.path.join(os.environ['CONFIG_DIR'], "viewers.yml")
with open(viewers_config, 'r', encoding="utf-8") as viewers_file:
viewers = yaml.safe_load(viewers_file)
@repository_controller.route('', methods=['GET'], strict_slashes=False)
def list_repository():
"""
List the ids of all cilantro objects in the repository.
Returns a list of the object_ids
.. :quickref: Repository Controller; List IDs of objects in the repository
**Example request**:
.. sourcecode:: http
GET /repository/ HTTP/1.1
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
["foo", "bar"]
:reqheader Accept: application/json
:resheader Content-Type: application/json
:status 200: OK
:return: JSON array containing the ids of all cilantro objects in the
repository
"""
return jsonify(list_objects_in_repository())
@repository_controller.route('/object/<path:object_id>', methods=['GET'],
strict_slashes=False)
def get_object(object_id):
"""
Retrieve an cilantro (sub)object in the repository folder.
Returns A JSON object containing metadata, representations and sub_objects
of the cilantro object. This can be a subobject as well.
.. :quickref: Repository Controller; Retrieve (sub)object in the repository
**Example request**:
.. sourcecode:: http
GET /repository/object/<object_id> HTTP/1.1
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
{
"metadata": {
"description": "[PDFs teilweise verfugbar]",
"identification": "year",
"number": "",
"ojs_id": "issue-test-188",
"volume": "",
"year": 2018
},
"representations": [
"origin"
],
"sub_objects": [
"part_0001",
"part_0002"
]
}
**Example response ERROR**:
.. sourcecode:: http
HTTP/1.1 404 NOT FOUND
{
"error": {
"code": "object_not_found",
"message": "No object with id test_object was found"
},
"success": false
}
:reqheader Accept: application/json
:param str object_id: The id of the object
:resheader Content-Type: application/json
:status 200: OK
:status 404: cilantro object was not found
:return: JSON object containing metadata, representations and sub_objects
of the cilantro (sub)object
"""
path = os.path.join(repository_dir, generate_repository_path(object_id))
if os.path.isdir(path):
with open(os.path.join(path, metadata_file)) as json_data:
metadata = json.load(json_data)
representations = list_dir(os.path.join(path, representation_dir),
sorted=True, ignore_not_found=True)
sub_objects = list_dir(os.path.join(path, sub_object_dir), sorted=True,
ignore_not_found=True)
return jsonify({
'metadata': metadata,
'representations': representations,
'sub_objects': sub_objects})
else:
raise ApiError("object_not_found",
f"No object with id {object_id} was found", 404)
@repository_controller.route('/representation/<path:object_id>/<rep_name>',
methods=['GET'], strict_slashes=False)
def get_representation(object_id, rep_name):
"""
Retrieve a representation of a cilantro (sub)object.
Returns A JSON array containing all files of the representation.
.. :quickref: Repository Controller; Retrieve a (sub)object representation
**Example request**:
.. sourcecode:: http
GET /repository/representation/<object_id>/<rep_name> HTTP/1.1
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[
"merged.pdf"
]
**Example response ERROR**:
.. sourcecode:: http
HTTP/1.1 404 NOT FOUND
{
"error": {
"code": "representation_not_found",
"message": "No representation jpg for object with id
test_object was found"
},
"success": false
}
:reqheader Accept: application/json
:param str object_id: The id of the (sub) object
:param str rep_name: The name of the representation
:resheader Content-Type: application/json
:status 200: OK
:status 404: representation was not found
:return: JSON array containing all files of the representation
"""
path = os.path.join(repository_dir, generate_repository_path(object_id),
representation_dir, rep_name)
if os.path.isdir(path):
files = list_dir(path, sorted=True, ignore_not_found=True)
return jsonify(files)
else:
raise ApiError("representation_not_found",
f"No representation {rep_name} for object with "
f"id {object_id} was found", 404)
@repository_controller.route(
'/file/<path:object_id>/data/<path:rep_name>/<file>', methods=['GET'],
strict_slashes=False)
def get_file(object_id, rep_name, file):
"""
Retrieve a file from a representation of a cilantro (sub)object.
Returns the file's content
.. :quickref: Repository Controller; Retrieve a file from a representation
**Example request**:
.. sourcecode:: http
GET /repository/file/<object_id>/data/<rep_name>/<file> HTTP/1.1
Note that for sub-object the 'object_id' looks like:
"<parent-object_id>/part_0001"
**Example response ERROR**:
.. sourcecode:: http
HTTP/1.1 404 NOT FOUND
{
"error": {
"code": "file_not_found",
"message": "No file test_file.jpg was found in representation
jpg of object test_object"
},
"success": false
}
:reqheader Accept: *
:param str object_id: The id of the object
:param str rep_name: The name of the representation
:param str file: The name of the file
:resheader Content-Type: *
:status 200: OK
:status 404: file was not found
:return: Downloadable file
"""
path = os.path.join(repository_dir, generate_repository_path(object_id),
representation_dir, rep_name, file)
if os.path.isfile(path):
return handle_file_request(path)
else:
raise ApiError("file_not_found",
f"No file {file} was found in representation {rep_name}"
f" of object {object_id}", 404)
@repository_controller.route('/file/<path:object_id>/<file>',
methods=['GET'], strict_slashes=False)
def get_meta_file(object_id, file):
"""
Retrieve a file from the root of a cilantro (sub)object.
Returns the file's content. Files on root level are normally metdata files.
.. :quickref: Repository Controller; Retrieve metadatafile of (sub)object
**Example request**:
.. sourcecode:: http
GET /repository/file/<object_id>/<file> HTTP/1.1
**Example response ERROR**:
.. sourcecode:: http
HTTP/1.1 404 NOT FOUND
{
"error": {
"code": "file_not_found",
"message": "No file test_file.jpg was found in object
test_object"
},
"success": false
}
:reqheader Accept: application/json
:param str object_id: The id of the object
:param str file: Name of the file
:resheader Content-Type: application/json
:status 200: OK
:status 404: file was not found
:return: Downloadable file
"""
path = os.path.join(repository_dir, generate_repository_path(object_id),
file)
if os.path.isfile(path):
return send_file(path)
else:
raise ApiError("file_not_found",
f"No file {file} was found in object {object_id}", 404)
def handle_file_request(path):
if request.headers.get('Accept') == '*/*':
return send_file(path)
elif request.accept_mimetypes.accept_html:
ext = os.path.splitext(path)[1][1:]
if ext in viewers:
url = viewers[ext] + path[len(repository_dir):]
return redirect(url, code=303)
return send_file(path)
| 2.65625
| 3
|
src/python_import/C/cc.py
|
matiastang/matias-python
| 0
|
11732
|
#!/usr/bin/python3
#coding=utf-8
def cc_debug():
print(__name__)
| 1.234375
| 1
|
plugin.audio.podcasts/addon.py
|
stobb3s/kodi-addon-podcast
| 0
|
11733
|
from datetime import datetime
import base64
import os
import re
import requests
import sys
import urllib.parse
import xmltodict
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
__PLUGIN_ID__ = "plugin.audio.podcasts"
# see https://forum.kodi.tv/showthread.php?tid=112916
_MONTHS = ["Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
GPODDER_API = {
"login": "%s/api/2/auth/%s/login.json",
"subscriptions": "%s/subscriptions/%s.%s"
}
settings = xbmcaddon.Addon(id=__PLUGIN_ID__)
addon_dir = xbmcvfs.translatePath(settings.getAddonInfo('path'))
class HttpStatusError(Exception):
message = ""
def __init__(self, msg):
self.message = msg
class Mediathek:
_GROUPS = 10
_ENTRIES = 10
addon_handle = None
def __init__(self):
pass
def _parse_outlines_from_opml(self, outline):
if type(outline) is not list:
outline = [outline]
entries = []
for i, o in enumerate(outline):
name = o["@title"] if "@title" in o else o["@text"]
if not name and "@xmlUrl" in o:
m = re.match(
"^https?:\/\/([^\/]+).*\/?.*\/([^\/]+)\/?$", o["@xmlUrl"])
if m:
name = "%s %s...%s" % (settings.getLocalizedString(
32053), m.groups()[0][:20], m.groups()[1][-40:])
entry = {
"path": str(i),
"name": name,
"node": []
}
if "@type" in o and o["@type"] == "rss" and "@xmlUrl" in o:
entry["params"] = [{
"rss": o["@xmlUrl"]
}]
entries.append(entry)
elif "outline" in o:
entry["node"] = self._parse_outlines_from_opml(
o["outline"])
entries.append(entry)
return entries
def _play_latest(self, url):
try:
title, description, image, items = self._load_rss(url)
item = items[0]
li = self._create_list_item(item)
xbmcplugin.setResolvedUrl(self.addon_handle, True, li)
except HttpStatusError as error:
xbmcgui.Dialog().notification(settings.getLocalizedString(32090), error.message)
def _create_list_item(self, item):
li = xbmcgui.ListItem(label=item["name"])
if "description" in item:
li.setProperty("label2", item["description"])
if "stream_url" in item:
li.setPath(item["stream_url"])
if "type" in item:
if item["type"] == "video":
li.setInfo(item["type"], {
"title": item["name"],
"plot": item["description"] if "description" in item else ""
})
elif item["type"] == "music":
li.setInfo(item["type"], {
"title": item["name"]
})
if "icon" in item and item["icon"]:
li.setArt({"icon": item["icon"]})
else:
li.setArt({"icon": os.path.join(
addon_dir, "resources", "assets", "icon.png")}
)
if "date" in item and item["date"]:
if "setDateTime" in dir(li): # available since Kodi v20
li.setDateTime(item["date"].strftime("%Y-%m-%dT%H:%M:%SZ"))
else:
pass
if "specialsort" in item:
li.setProperty("SpecialSort", item["specialsort"])
if "duration" in item and item["duration"] >= 0:
li.setInfo("music", {"duration": item["duration"]})
li.setInfo("video", {"duration": item["duration"]})
return li
def _add_list_item(self, entry, path):
def _build_param_string(params, current=""):
if params == None:
return current
for obj in params:
for name in obj:
enc_value = base64.urlsafe_b64encode(
obj[name].encode("utf-8"))
current += "?" if len(current) == 0 else "&"
current += name + "=" + str(enc_value, "utf-8")
return current
if path == "/":
path = ""
item_path = path + "/" + entry["path"]
param_string = ""
if "params" in entry:
param_string = _build_param_string(entry["params"],
current=param_string)
li = self._create_list_item(entry)
if "stream_url" in entry:
url = entry["stream_url"]
else:
url = "".join(
["plugin://", __PLUGIN_ID__, item_path, param_string])
is_folder = "node" in entry
li.setProperty("IsPlayable", "false" if is_folder else "true")
xbmcplugin.addDirectoryItem(handle=self.addon_handle,
listitem=li,
url=url,
isFolder=is_folder)
def _http_request(self, url, headers={}, method="GET"):
useragent = f"{settings.getAddonInfo('id')}/{settings.getAddonInfo('version')} (Kodi/{xbmc.getInfoLabel('System.BuildVersionShort')})"
headers["User-Agent"] = useragent
if method == "GET":
req = requests.get
elif method == "POST":
req = requests.post
else:
raise HttpStatusError(settings.getLocalizedString(32091) % method)
try:
res = req(url, headers=headers)
except requests.exceptions.RequestException as error:
xbmc.log("Request Exception: %s" % str(error), xbmc.LOGERROR)
raise HttpStatusError(settings.getLocalizedString(32092))
if res.status_code == 200:
return res.text, res.cookies
else:
raise HttpStatusError(settings.getLocalizedString(
32093) % (res.status_code, url))
def _load_rss(self, url):
def _parse_item(_ci):
if "enclosure" in _ci and "@url" in _ci["enclosure"]:
stream_url = _ci["enclosure"]["@url"]
if _ci["enclosure"]["@type"].split("/")[0] == "video":
_type = "video"
else:
_type = "music"
elif "guid" in _ci and _ci["guid"]:
# not supported yet
return None
else:
return None
if "itunes:image" in _ci and "@href" in _ci["itunes:image"]:
item_image = _ci["itunes:image"]["@href"]
else:
item_image = image
if "pubDate" in _ci:
_f = re.findall(
"(\d{1,2}) (\w{3}) (\d{4}) (\d{2}):(\d{2}):(\d{2})", _ci["pubDate"])
if _f:
_m = _MONTHS.index(_f[0][1]) + 1
pubDate = datetime(year=int(_f[0][2]), month=_m, day=int(_f[0][0]), hour=int(
_f[0][3]), minute=int(_f[0][4]), second=int(_f[0][5]))
else:
pubDate = None
if "itunes:duration" in _ci:
try:
duration = int(_ci["itunes:duration"]) #if duration is already in seconds
except:
try: #try converting HH:MM:SS or MM:SS string to integer seconds
durationList = _ci["itunes:duration"].split(":")
if len(durationList) == 3: #HH:MM:SS
duration = int(durationList[0]) * 3600 + int(durationList[1]) * 60 + int(durationList[2])
elif len(durationList) == 2: #MM:SS
duration = int(durationList[0]) * 60 + int(durationList[1])
else:
duration = -1
except:
duration = -1
else:
duration = -1
return {
"name": _ci["title"],
"description": _ci["description"] if "description" in _ci else "",
"date": pubDate,
"icon": item_image,
"stream_url": stream_url,
"type": _type,
"duration": duration
}
res, cookies = self._http_request(url)
if not res.startswith("<?xml"):
raise HttpStatusError("%s %s" % (
settings.getLocalizedString(32094), url))
else:
rss_feed = xmltodict.parse(res)
channel = rss_feed["rss"]["channel"]
title = channel["title"] if "title" in channel else ""
description = channel["description"] if "description" in channel else ""
if "image" in channel and "url" in channel["image"]:
image = channel["image"]["url"]
elif "itunes:image" in channel:
image = channel["itunes:image"]["@href"]
else:
image = None
items = []
if type(channel["item"]) is list:
for _ci in channel["item"]:
item = _parse_item(_ci)
if item is not None:
items += [item]
else:
item = _parse_item(channel["item"])
if item is not None:
items += [item]
return title, description, image, items
def _render_rss(self, path, url):
def _update_Image(path, image):
if path.startswith("/pod-"):
_p = path[5:].split("/")
settings.setSetting("group_%i_rss_%i_icon" %
(int(_p[0]), int(_p[1])), image)
try:
title, description, image, items = self._load_rss(url)
if image:
_update_Image(path, image)
except HttpStatusError as error:
xbmc.log("HTTP Status Error: %s, path=%s" %
(error.message, path), xbmc.LOGERROR)
xbmcgui.Dialog().notification(settings.getLocalizedString(32090), error.message)
else:
if len(items) > 0 and settings.getSetting("anchor") == "true":
entry = {
"path": "latest",
"name": "%s (%s)" % (title, settings.getLocalizedString(32052)),
"description": description,
"icon": image,
"date": datetime.now(),
"specialsort": "top",
"type": items[0]["type"],
"params": [
{
"play_latest": url
}
]
}
self._add_list_item(entry, path)
for item in items:
li = self._create_list_item(item)
xbmcplugin.addDirectoryItem(handle=self.addon_handle,
listitem=li,
url=item["stream_url"],
isFolder=False)
if "setDateTime" in dir(li): # available since Kodi v20
xbmcplugin.addSortMethod(
self.addon_handle, xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.endOfDirectory(self.addon_handle)
def _browse(self, dir_structure, path, updateListing=False):
def _get_node_by_path(path):
if path == "/":
return dir_structure[0]
tokens = path.split("/")[1:]
node = dir_structure[0]
while len(tokens) > 0:
path = tokens.pop(0)
for n in node["node"]:
if n["path"] == path:
node = n
break
return node
node = _get_node_by_path(path)
for entry in node["node"]:
self._add_list_item(entry, path)
xbmcplugin.addSortMethod(
self.addon_handle, xbmcplugin.SORT_METHOD_FULLPATH)
xbmcplugin.addSortMethod(
self.addon_handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.endOfDirectory(
self.addon_handle, updateListing=updateListing)
def _parse_opml(self, data):
opml_data = xmltodict.parse(data)
entries = self._parse_outlines_from_opml(
opml_data["opml"]["body"]["outline"])
return opml_data["opml"]["head"]["title"], entries
def _open_opml_file(self, path):
with open(path) as _opml_file:
return _opml_file.read()
def _build_dir_structure(self):
groups = []
# opml files / podcasts lists
for g in range(self._GROUPS):
if settings.getSetting("opml_file_%i" % g) == "":
continue
path = os.path.join(
addon_dir, settings.getSetting("opml_file_%i" % g))
try:
name, nodes = self._parse_opml(self._open_opml_file(path))
groups.append({
"path": "opml-%i" % g,
"name": name,
"node": nodes
})
except:
xbmc.log("Cannot read opml file %s" % path, xbmc.LOGERROR)
# rss feeds from settings
for g in range(self._GROUPS):
if settings.getSetting("group_%i_enable" % g) == "false":
continue
entries = []
for e in range(self._ENTRIES):
if settings.getSetting("group_%i_rss_%i_enable" % (g, e)) == "false":
continue
icon = settings.getSetting("group_%i_rss_%i_icon"
% (g, e))
entries += [{
"path": "%i" % e,
"name": settings.getSetting("group_%i_rss_%i_name"
% (g, e)),
"params": [
{
"rss": settings.getSetting("group_%i_rss_%i_url" % (g, e))
}
],
"icon": icon,
"node": []
}]
groups += [{
"path": "pod-%i" % g,
"name": settings.getSetting("group_%i_name" % g),
"node": entries
}]
return [
{ # root
"path": "",
"node": groups
}
]
def handle(self, argv):
def decode_param(encoded_param):
return base64.urlsafe_b64decode(encoded_param).decode("utf-8")
self.addon_handle = int(argv[1])
path = urllib.parse.urlparse(argv[0]).path.replace("//", "/")
url_params = urllib.parse.parse_qs(argv[2][1:])
if "rss" in url_params:
url = decode_param(url_params["rss"][0])
self._render_rss(path, url)
elif "play_latest" in url_params:
url = decode_param(url_params["play_latest"][0])
self._play_latest(url)
else:
_dir_structure = self._build_dir_structure()
self._browse(dir_structure=_dir_structure, path=path)
def _login_at_gpodder(self):
auth_string = "%s:%s" % (settings.getSetting(
"gpodder_username"), settings.getSetting("gpodder_password"))
b64auth = {
"Authorization": "Basic %s" % base64.urlsafe_b64encode(auth_string.encode("utf-8")).decode("utf-8")
}
response, cookies = self._http_request(
GPODDER_API["login"] % (settings.getSetting("gpodder_hostname"),
settings.getSetting("gpodder_username")), b64auth, "POST")
if "sessionid" not in cookies:
raise HttpStatusError(settings.getLocalizedString(32095))
return cookies["sessionid"]
def _load_gpodder_subscriptions(self, sessionid):
session_cookie = {
"Cookie": "%s=%s" % ("sessionid", sessionid)
}
response, cookies = self._http_request(
GPODDER_API["subscriptions"] % (settings.getSetting("gpodder_hostname"),
settings.getSetting(
"gpodder_username"),
"opml"), session_cookie)
return response
def _select_opml_file(self):
path = xbmcgui.Dialog().browse(
type=1, heading=settings.getLocalizedString(32070), shares="", mask=".xml|.opml")
if path == "":
return None, None
try:
return self._parse_opml(self._open_opml_file(path))
except:
xbmc.log("Cannot read opml file %s" % path, xbmc.LOGERROR)
return None, None
def _select_feeds(self, name, entries, freeslots):
selection = [e["name"]
for e in entries if "params" in e and len(e["params"]) == 1 and "rss" in e["params"][0]]
ok = False
while not ok:
feeds = xbmcgui.Dialog().multiselect(
settings.getLocalizedString(32071), selection)
if feeds == None:
ok = True
elif len(feeds) == 0:
xbmcgui.Dialog().ok(settings.getLocalizedString(32072),
settings.getLocalizedString(32073))
elif len(feeds) > freeslots:
xbmcgui.Dialog().ok(settings.getLocalizedString(32074),
settings.getLocalizedString(32075) % freeslots)
else:
ok = True
return feeds
def _select_target_group(self):
names = list()
freeslots = list()
for g in range(self._GROUPS):
free = sum("false" == settings.getSetting(
"group_%i_rss_%i_enable" % (g, r)) for r in range(self._ENTRIES))
freeslots.append(free)
names.append("%s %i: %s (%i %s)" %
(
settings.getLocalizedString(32000),
g + 1,
settings.getSetting("group_%i_name" % g),
free,
settings.getLocalizedString(32077)
))
selected = xbmcgui.Dialog().select(settings.getLocalizedString(32076), names)
if selected > -1 and freeslots[selected] == 0:
xbmcgui.Dialog().ok(heading=settings.getLocalizedString(32078),
message=settings.getLocalizedString(32084))
return -1, 0
elif selected == -1:
return -1, 0
else:
return selected, freeslots[selected]
def _apply_to_group(self, entries, group, feeds):
settings.setSetting("group_%i_enable" % group, "True")
i, j = 0, 0
while(i < self._ENTRIES):
if j < len(feeds) and "false" == settings.getSetting("group_%i_rss_%i_enable" % (group, i)):
settings.setSetting("group_%i_rss_%i_enable" %
(group, i), "True")
settings.setSetting("group_%i_rss_%i_name" %
(group, i), entries[feeds[j]]["name"])
settings.setSetting("group_%i_rss_%i_url" % (
group, i), entries[feeds[j]]["params"][0]["rss"])
settings.setSetting("group_%i_rss_%i_icon" % (group, i), "")
j += 1
i += 1
def _save_opml_file(self, data):
opml = xmltodict.parse(data)
filename = "%s.opml" % re.sub(
"[^A-Za-z0-9']", " ", opml["opml"]["head"]["title"])
path = xbmcgui.Dialog().browse(
type=3, heading=settings.getLocalizedString(32080), shares="")
if not path:
return None, None
try:
fullpath = "%s%s" % (path, filename)
with open(fullpath, "w") as _file:
_file.write(data)
return fullpath, filename
except:
xbmcgui.Dialog().ok(heading=settings.getLocalizedString(
32081), message=settings.getLocalizedString(32082))
return None, None
def _select_target_opml_slot(self, heading, multi=False):
selection = list()
for g in range(self._GROUPS):
filename = settings.getSetting("opml_file_%i" % g)
selection.append("%s %i%s" % (settings.getLocalizedString(
32023), g + 1, ": %s" % filename if filename else ""))
dialog = xbmcgui.Dialog().multiselect if multi else xbmcgui.Dialog().select
return dialog(heading, selection)
def import_opml(self):
# Step 1: Select target group
group, freeslots = self._select_target_group()
if group == -1:
return
# Step 2: Select file
name, entries = self._select_opml_file()
if name == None:
return
# Step 3: Select feeds
feeds = self._select_feeds(name, entries, freeslots)
if feeds == None:
return
# Step 4: Confirm
self._apply_to_group(entries, group, feeds)
# Success
xbmcgui.Dialog().notification(settings.getLocalizedString(
32085), settings.getLocalizedString(32086))
def import_gpodder_subscriptions(self):
# Step 1: Select target group
group, freeslots = self._select_target_group()
if group == -1:
return
# Step 2: query subscriptions from gPodder
try:
sessionid = self._login_at_gpodder()
name, entries = self._parse_opml(
self._load_gpodder_subscriptions(sessionid))
except HttpStatusError as error:
xbmcgui.Dialog().ok(settings.getLocalizedString(32090), error.message)
return
# Step 3: Select feeds
feeds = self._select_feeds(name, entries, freeslots)
if feeds == None:
return
# Step 4: Apply to group
self._apply_to_group(entries, group, feeds)
# Success
xbmcgui.Dialog().notification(settings.getLocalizedString(
32085), settings.getLocalizedString(32086))
def download_gpodder_subscriptions(self):
# Step 1: download subscriptions from gPodder
try:
sessionid = self._login_at_gpodder()
opml_data = self._load_gpodder_subscriptions(sessionid)
except HttpStatusError as error:
xbmcgui.Dialog().ok(settings.getLocalizedString(32090), error.message)
return
# Step 2: Save file in folder
path, filename = self._save_opml_file(opml_data)
if not path:
return
# Success
xbmcgui.Dialog().notification(settings.getLocalizedString(
32085), "%s %s" % (settings.getLocalizedString(32083), filename))
# Step 3: Select target opml slot
slot = self._select_target_opml_slot(
settings.getLocalizedString(32079))
if slot == -1:
return
settings.setSetting("opml_file_%i" % slot, path)
# Success
xbmcgui.Dialog().notification(settings.getLocalizedString(
32085), settings.getLocalizedString(32086))
def unassign_opml(self):
# Step 1: Select slots
slots = self._select_target_opml_slot(
settings.getLocalizedString(32087), multi=True)
if slots == None or len(slots) == 0:
return
# Step 2: empty slots
for slot in slots:
settings.setSetting("opml_file_%i" % slot, " ")
# Success
xbmcgui.Dialog().notification(settings.getLocalizedString(
32085), settings.getLocalizedString(32086))
if __name__ == '__main__':
mediathek = Mediathek()
if sys.argv[1] == "import_gpodder_subscriptions":
mediathek.import_gpodder_subscriptions()
elif sys.argv[1] == "import_opml":
mediathek.import_opml()
elif sys.argv[1] == "download_gpodder_subscriptions":
mediathek.download_gpodder_subscriptions()
elif sys.argv[1] == "unassign_opml":
mediathek.unassign_opml()
else:
mediathek.handle(sys.argv)
| 2.3125
| 2
|
.venv/Lib/site-packages/lemoncheesecake/reporting/savingstrategy.py
|
yadavdeepa365/HUDL_PYTHON
| 34
|
11734
|
import re
import time
from lemoncheesecake.events import TestSessionSetupEndEvent, TestSessionTeardownEndEvent, \
TestEndEvent, SuiteSetupEndEvent, SuiteTeardownEndEvent, SuiteEndEvent, SteppedEvent
from lemoncheesecake.reporting.report import ReportLocation
DEFAULT_REPORT_SAVING_STRATEGY = "at_each_failed_test"
def _is_end_of_result_event(event):
if isinstance(event, TestEndEvent):
return ReportLocation.in_test(event.test)
if isinstance(event, SuiteSetupEndEvent):
return ReportLocation.in_suite_setup(event.suite)
if isinstance(event, SuiteTeardownEndEvent):
return ReportLocation.in_suite_teardown(event.suite)
if isinstance(event, TestSessionSetupEndEvent):
return ReportLocation.in_test_session_setup()
if isinstance(event, TestSessionTeardownEndEvent):
return ReportLocation.in_test_session_teardown()
return None
def save_at_each_suite_strategy(event, _):
return isinstance(event, SuiteEndEvent)
def save_at_each_test_strategy(event, _):
return _is_end_of_result_event(event) is not None
def save_at_each_failed_test_strategy(event, report):
location = _is_end_of_result_event(event)
if location:
result = report.get(location)
return result and result.status == "failed"
else:
return False
def save_at_each_log_strategy(event, _):
return isinstance(event, SteppedEvent)
class SaveAtInterval(object):
def __init__(self, interval):
self.interval = interval
self.last_saving = None
def __call__(self, event, report):
now = time.time()
if self.last_saving:
must_be_saved = now > self.last_saving + self.interval
if must_be_saved:
self.last_saving = now
return must_be_saved
else:
self.last_saving = now # not a saving but an initialization
return False
def make_report_saving_strategy(expression):
# first, try with a static expression
static_expressions = {
"at_end_of_tests": None, # no need to an intermediate report saving in this case
"at_each_suite": save_at_each_suite_strategy,
"at_each_test": save_at_each_test_strategy,
"at_each_failed_test": save_at_each_failed_test_strategy,
"at_each_log": save_at_each_log_strategy,
"at_each_event": save_at_each_log_strategy # deprecated since 1.4.5, "at_each_log" must be used instead
}
try:
return static_expressions[expression]
except KeyError:
pass
# second, try with "every_Ns"
m = re.compile(r"^every[_ ](\d+)s$").match(expression)
if m:
return SaveAtInterval(int(m.group(1)))
# ok... nothing we know about
raise ValueError("Invalid expression '%s' for report saving strategy" % expression)
| 2.234375
| 2
|
genshin/models/genshin/chronicle/notes.py
|
thesadru/genshin.py
| 63
|
11735
|
"""Genshin chronicle notes."""
import datetime
import typing
import pydantic
from genshin.models.genshin import character
from genshin.models.model import Aliased, APIModel
__all__ = ["Expedition", "ExpeditionCharacter", "Notes"]
def _process_timedelta(time: typing.Union[int, datetime.timedelta, datetime.datetime]) -> datetime.datetime:
if isinstance(time, int):
time = datetime.datetime.fromtimestamp(time).astimezone()
if isinstance(time, datetime.timedelta):
time = datetime.datetime.now().astimezone() + time
if time < datetime.datetime(2000, 1, 1).astimezone():
delta = datetime.timedelta(seconds=int(time.timestamp()))
time = datetime.datetime.now().astimezone() + delta
time = time.replace(second=0, microsecond=0)
return time
class ExpeditionCharacter(character.BaseCharacter):
"""Expedition character."""
class Expedition(APIModel):
"""Real-Time note expedition."""
character: ExpeditionCharacter = Aliased("avatar_side_icon")
status: typing.Literal["Ongoing", "Finished"]
remaining_time: datetime.timedelta = Aliased("remained_time")
@property
def finished(self) -> bool:
"""Whether the expedition has finished."""
return self.remaining_time <= datetime.timedelta(0)
@property
def completion_time(self) -> datetime.datetime:
return datetime.datetime.now().astimezone() + self.remaining_time
@pydantic.validator("character", pre=True)
def __complete_character(cls, v: typing.Any) -> ExpeditionCharacter:
if isinstance(v, str):
return ExpeditionCharacter(icon=v) # type: ignore
return v
class TransformerTimedelta(datetime.timedelta):
"""Transformer recovery time."""
@property
def timedata(self) -> typing.Tuple[int, int, int, int]:
seconds: int = super().seconds
days: int = super().days
hour, second = divmod(seconds, 3600)
minute, second = divmod(second, 60)
return days, hour, minute, second
@property
def hours(self) -> int:
return self.timedata[1]
@property
def minutes(self) -> int:
return self.timedata[2]
@property
def seconds(self) -> int:
return self.timedata[3]
class Notes(APIModel):
"""Real-Time notes."""
current_resin: int
max_resin: int
remaining_resin_recovery_time: datetime.timedelta = Aliased("resin_recovery_time")
current_realm_currency: int = Aliased("current_home_coin")
max_realm_currency: int = Aliased("max_home_coin")
remaining_realm_currency_recovery_time: datetime.timedelta = Aliased("home_coin_recovery_time")
completed_commissions: int = Aliased("finished_task_num")
max_commissions: int = Aliased("total_task_num")
claimed_commission_reward: bool = Aliased("is_extra_task_reward_received")
remaining_resin_discounts: int = Aliased("remain_resin_discount_num")
max_resin_discounts: int = Aliased("resin_discount_num_limit")
remaining_transformer_recovery_time: typing.Optional[TransformerTimedelta]
expeditions: typing.Sequence[Expedition]
max_expeditions: int = Aliased("max_expedition_num")
@property
def resin_recovery_time(self) -> datetime.datetime:
"""The remaining time until resin recovery in seconds."""
return datetime.datetime.now().astimezone() + self.remaining_resin_recovery_time
@property
def realm_currency_recovery_time(self) -> datetime.datetime:
"""The remaining time until realm currency recovery in seconds."""
return datetime.datetime.now().astimezone() + self.remaining_realm_currency_recovery_time
@property
def transformer_recovery_time(self) -> typing.Optional[datetime.datetime]:
"""The remaining time until realm currency recovery in seconds."""
if self.remaining_transformer_recovery_time is None:
return None
remaining = datetime.datetime.now().astimezone() + self.remaining_transformer_recovery_time
return remaining
@pydantic.root_validator(pre=True)
def __flatten_transformer(cls, values: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
if "transformer_recovery_time" in values:
return values
if values.get("transformer") and values["transformer"]["obtained"]:
t = values["transformer"]["recovery_time"]
delta = TransformerTimedelta(days=t["Day"], hours=t["Hour"], minutes=t["Minute"], seconds=t["Second"])
values["remaining_transformer_recovery_time"] = delta
else:
values["remaining_transformer_recovery_time"] = None
return values
| 2.453125
| 2
|
src/the_tale/the_tale/common/bbcode/renderer.py
|
al-arz/the-tale
| 85
|
11736
|
<reponame>al-arz/the-tale
import smart_imports
smart_imports.all()
class Renderer:
__slots__ = ('tags', '_renderer')
def __init__(self, tags):
self.tags = tags
self._renderer = postmarkup.create(include=[],
use_pygments=False,
annotate_links=False)
for tag in tags:
self._renderer.tag_factory.add_tag(tag.tag_class, tag.value, *tag.args, **tag.kwargs)
def render(self, *args, **kwargs):
try:
kwargs['cosmetic_replace'] = False
kwargs['encoding'] = 'utf-8'
return self._renderer.render_to_html(*args, **kwargs)
except Exception:
return 'Текст нельзя отформатировать. Возможно Вы ошиблись при вводе тегов.'
def html_command_line(self):
lines = ['<div class="pgf-bb-command-line command-line">']
for tag in self.tags:
single = 'data-single="true"' if tag.single else ''
line = f'<a class="pgf-bb-command" href="#" data-tag="{tag.value}" {single} rel="tooltip" title=\'{tag.example}\'>[{tag.value}]</a>'
lines.append(line)
lines.append('</div>')
return '\n'.join(lines)
| 2.203125
| 2
|
tests/python/unittest/test_meta_schedule_custom_rule_winograd_cpu.py
|
psrivas2/relax
| 11
|
11737
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm
from tvm import meta_schedule as ms
from tvm.ir import IRModule
from tvm.meta_schedule.testing.conv2d_winograd_cpu import conv2d_winograd_cpu
from tvm.target import Target
from tvm.tir.schedule import Schedule, Trace
def _get_mod():
# pylint: disable=invalid-name
def inline(sch: Schedule):
b1 = sch.get_block(name="A")
b2 = sch.get_block(name="B")
sch.compute_inline(block=b1)
sch.compute_inline(block=b2)
def input_tile_data_pad(sch: Schedule):
b78 = sch.get_block(name="input_tile")
l80 = sch.sample_compute_location(block=b78, decision=4)
sch.compute_at(block=b78, loop=l80, preserve_unit_loops=True)
b81 = sch.get_block(name="data_pad")
l83 = sch.sample_compute_location(block=b81, decision=-2)
sch.compute_at(block=b81, loop=l83, preserve_unit_loops=True)
def data_pack(sch: Schedule):
b18 = sch.get_block(name="data_pack")
l19, l20, l21, l22, l23, l24 = sch.get_loops(block=b18)
sch.unroll(loop=l19)
sch.unroll(loop=l20)
v25, v26 = sch.sample_perfect_tile(
n=2,
loop=l21,
max_innermost_factor=64,
decision=[9, 1],
)
l27, l28 = sch.split(loop=l21, factors=[v25, v26])
v29, v30 = sch.sample_perfect_tile(
n=2,
loop=l22,
max_innermost_factor=64,
decision=[32, 4],
)
l31, l32 = sch.split(loop=l22, factors=[v29, v30])
sch.unroll(loop=l23)
sch.unroll(loop=l24)
sch.reorder(l27, l31, l28, l32, l19, l20, l23, l24)
def bgemm(sch: Schedule):
bgemm = sch.get_block(name="bgemm")
write_cache = sch.cache_write(
block=bgemm,
write_buffer_index=0,
storage_scope="global",
)
sch.annotate(
block_or_loop=bgemm,
ann_key="meta_schedule.tiling_structure",
ann_val="SSRSRS",
)
# b33, b34 = b34, b33
l35, l36, l37, l38, l39 = sch.get_loops(block=bgemm)
v40, v41, v42, v43 = sch.sample_perfect_tile(
n=4,
loop=l35,
max_innermost_factor=64,
decision=[1, 2, 3, 1],
)
l44, l45, l46, l47 = sch.split(loop=l35, factors=[v40, v41, v42, v43])
v48, v49, v50, v51 = sch.sample_perfect_tile(
n=4,
loop=l36,
max_innermost_factor=64,
decision=[1, 1, 1, 6],
)
l52, l53, l54, l55 = sch.split(loop=l36, factors=[v48, v49, v50, v51])
v56, v57, v58, v59 = sch.sample_perfect_tile(
n=4,
loop=l37,
max_innermost_factor=64,
decision=[1, 1, 1, 9],
)
l60, l61, l62, l63 = sch.split(loop=l37, factors=[v56, v57, v58, v59])
v64, v65, v66, v67 = sch.sample_perfect_tile(
n=4,
loop=l38,
max_innermost_factor=64,
decision=[2, 1, 16, 4],
)
l68, l69, l70, l71 = sch.split(loop=l38, factors=[v64, v65, v66, v67])
v72, v73 = sch.sample_perfect_tile(
n=2,
loop=l39,
max_innermost_factor=64,
decision=[16, 8],
)
l74, l75 = sch.split(loop=l39, factors=[v72, v73])
sch.reorder(
# fmt: off
l44, l52, l60, l68,
l45, l53, l61, l69,
l74,
l46, l54, l62, l70,
l75,
l47, l55, l63, l71,
# fmt: on
)
sch.reverse_compute_at(block=write_cache, loop=l69, preserve_unit_loops=True)
def inverse(sch: Schedule):
b3 = sch.get_block(name="inverse")
l4, l5, l6, l7, l8, l9 = sch.get_loops(block=b3)
sch.unroll(loop=l4)
sch.unroll(loop=l5)
v10, v11 = sch.sample_perfect_tile(
n=2,
loop=l6,
max_innermost_factor=64,
decision=[1, 9],
)
l12, l13 = sch.split(loop=l6, factors=[v10, v11])
v14, v15 = sch.sample_perfect_tile(
n=2,
loop=l7,
max_innermost_factor=64,
decision=[2, 64],
)
l16, l17 = sch.split(loop=l7, factors=[v14, v15])
sch.unroll(loop=l8)
sch.unroll(loop=l9)
sch.reorder(l12, l16, l13, l17, l4, l5, l8, l9)
# pylint: enable=invalid-name
sch = Schedule(mod=conv2d_winograd_cpu)
inline(sch)
data_pack(sch)
input_tile_data_pad(sch)
bgemm(sch)
inverse(sch)
return sch.mod
def test_conv2d_winograd_cpu():
mod = conv2d_winograd_cpu
mod = IRModule({"main": mod})
target = Target("llvm --num-cores=16")
context = ms.TuneContext(
mod=mod,
target=target,
task_name="Custom Search Space Task",
space_generator=ms.space_generator.PostOrderApply(),
sch_rules=ms.default_config.schedule_rules(
None,
target,
),
)
context.initialize()
post_order_apply = context.space_generator
(sch,) = post_order_apply.generate_design_space(mod)
decisions = dict(
zip(
[i for i in sch.trace.insts[:-4] if i.kind.name.startswith("Sample")],
[
# data_pack
[9, 1],
[32, 4],
# input_tile
4,
# data_pad
-2,
# inverse
[1, 9],
[2, 64],
# bgemm
[1, 2, 3, 1],
[1, 1, 1, 6],
[1, 1, 1, 9],
[2, 1, 16, 4],
[16, 8],
],
)
)
trace = Trace(sch.trace.insts[:-4], decisions=decisions)
sch = Schedule(mod=mod)
trace.apply_to_schedule(sch, remove_postproc=False)
answer = sch.mod
expected = _get_mod()
tvm.ir.assert_structural_equal(answer, expected)
if __name__ == "__main__":
test_conv2d_winograd_cpu()
| 1.882813
| 2
|
model_zoo/official/cv/FCN8s/src/nets/FCN8s.py
|
LottieWang/mindspore
| 0
|
11738
|
<filename>model_zoo/official/cv/FCN8s/src/nets/FCN8s.py<gh_stars>0
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
from mindspore.ops import operations as P
class FCN8s(nn.Cell):
def __init__(self, n_class):
super().__init__()
self.n_class = n_class
self.conv1 = nn.SequentialCell(
nn.Conv2d(in_channels=3, out_channels=64,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.SequentialCell(
nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(128),
nn.ReLU()
)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.SequentialCell(
nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4 = nn.SequentialCell(
nn.Conv2d(in_channels=256, out_channels=512,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(512),
nn.ReLU()
)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5 = nn.SequentialCell(
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, weight_init='xavier_uniform'),
nn.BatchNorm2d(512),
nn.ReLU()
)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv6 = nn.SequentialCell(
nn.Conv2d(in_channels=512, out_channels=4096,
kernel_size=7, weight_init='xavier_uniform'),
nn.BatchNorm2d(4096),
nn.ReLU(),
)
self.conv7 = nn.SequentialCell(
nn.Conv2d(in_channels=4096, out_channels=4096,
kernel_size=1, weight_init='xavier_uniform'),
nn.BatchNorm2d(4096),
nn.ReLU(),
)
self.score_fr = nn.Conv2d(in_channels=4096, out_channels=self.n_class,
kernel_size=1, weight_init='xavier_uniform')
self.upscore2 = nn.Conv2dTranspose(in_channels=self.n_class, out_channels=self.n_class,
kernel_size=4, stride=2, weight_init='xavier_uniform')
self.score_pool4 = nn.Conv2d(in_channels=512, out_channels=self.n_class,
kernel_size=1, weight_init='xavier_uniform')
self.upscore_pool4 = nn.Conv2dTranspose(in_channels=self.n_class, out_channels=self.n_class,
kernel_size=4, stride=2, weight_init='xavier_uniform')
self.score_pool3 = nn.Conv2d(in_channels=256, out_channels=self.n_class,
kernel_size=1, weight_init='xavier_uniform')
self.upscore8 = nn.Conv2dTranspose(in_channels=self.n_class, out_channels=self.n_class,
kernel_size=16, stride=8, weight_init='xavier_uniform')
self.shape = P.Shape()
self.cast = P.Cast()
def set_model_parallel_shard_strategy(self, device_num):
self.conv2d_strategy = ((1, 1, 1, device_num), (1, 1, 1, 1))
self.bn_strategy = ((1, 1, 1, device_num), (1,), (1,), (1,), (1,))
self.relu_strategy = ((1, 1, 1, device_num),)
self.maxpool_strategy = ((1, 1, 1, device_num),)
self.add_strategy = ((1, 1, 1, device_num), (1, 1, 1, device_num))
self.conv1.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv1.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv1.cell_list[2].relu.shard(self.relu_strategy)
self.conv1.cell_list[3].conv2d.shard(self.conv2d_strategy)
self.conv1.cell_list[4].bn_train.shard(self.bn_strategy)
self.conv1.cell_list[5].relu.shard(self.relu_strategy)
self.pool1.max_pool.shard(self.maxpool_strategy)
self.conv2.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv2.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv2.cell_list[2].relu.shard(self.relu_strategy)
self.conv2.cell_list[3].conv2d.shard(self.conv2d_strategy)
self.conv2.cell_list[4].bn_train.shard(self.bn_strategy)
self.conv2.cell_list[5].relu.shard(self.relu_strategy)
self.pool2.max_pool.shard(self.maxpool_strategy)
self.conv3.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv3.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv3.cell_list[2].relu.shard(self.relu_strategy)
self.conv3.cell_list[3].conv2d.shard(self.conv2d_strategy)
self.conv3.cell_list[4].bn_train.shard(self.bn_strategy)
self.conv3.cell_list[5].relu.shard(self.relu_strategy)
self.conv3.cell_list[6].conv2d.shard(self.conv2d_strategy)
self.conv3.cell_list[7].bn_train.shard(self.bn_strategy)
self.conv3.cell_list[8].relu.shard(self.relu_strategy)
self.pool3.max_pool.shard(self.maxpool_strategy)
self.conv4.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv4.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv4.cell_list[2].relu.shard(self.relu_strategy)
self.conv4.cell_list[3].conv2d.shard(self.conv2d_strategy)
self.conv4.cell_list[4].bn_train.shard(self.bn_strategy)
self.conv4.cell_list[5].relu.shard(self.relu_strategy)
self.conv4.cell_list[6].conv2d.shard(self.conv2d_strategy)
self.conv4.cell_list[7].bn_train.shard(self.bn_strategy)
self.conv4.cell_list[8].relu.shard(self.relu_strategy)
self.pool4.max_pool.shard(self.maxpool_strategy)
self.conv5.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv5.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv5.cell_list[2].relu.shard(self.relu_strategy)
self.conv5.cell_list[3].conv2d.shard(self.conv2d_strategy)
self.conv5.cell_list[4].bn_train.shard(self.bn_strategy)
self.conv5.cell_list[5].relu.shard(self.relu_strategy)
self.conv5.cell_list[6].conv2d.shard(self.conv2d_strategy)
self.conv5.cell_list[7].bn_train.shard(self.bn_strategy)
self.conv5.cell_list[8].relu.shard(self.relu_strategy)
self.pool5.max_pool.shard(((1, 1, 1, device_num),))
self.conv6.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv6.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv6.cell_list[2].relu.shard(self.relu_strategy)
self.conv7.cell_list[0].conv2d.shard(self.conv2d_strategy)
self.conv7.cell_list[1].bn_train.shard(self.bn_strategy)
self.conv7.cell_list[2].relu.shard(self.relu_strategy)
self.score_fr.conv2d.shard(self.conv2d_strategy)
self.upscore2.conv2d_transpose.shard(self.conv2d_strategy)
self.score_pool4.conv2d.shard(self.conv2d_strategy)
self.upscore_pool4.conv2d_transpose.shard(self.conv2d_strategy)
self.score_pool3.conv2d.shard(self.conv2d_strategy)
self.upscore8.conv2d_transpose.shard(self.conv2d_strategy)
self.add1.shard(self.add_strategy)
self.add2.shard(self.add_strategy)
def construct(self, x):
x1 = self.conv1(x)
p1 = self.pool1(x1)
x2 = self.conv2(p1)
p2 = self.pool2(x2)
x3 = self.conv3(p2)
p3 = self.pool3(x3)
x4 = self.conv4(p3)
p4 = self.pool4(x4)
x5 = self.conv5(p4)
p5 = self.pool5(x5)
x6 = self.conv6(p5)
x7 = self.conv7(x6)
sf = self.score_fr(x7)
u2 = self.upscore2(sf)
s4 = self.score_pool4(p4)
f4 = s4 + u2
u4 = self.upscore_pool4(f4)
s3 = self.score_pool3(p3)
f3 = s3 + u4
out = self.upscore8(f3)
return out
| 2.171875
| 2
|
scripts/seqrun_processing/sync_seqrun_data_from_remote.py
|
imperial-genomics-facility/data-management-python
| 7
|
11739
|
#!/usr/bin/env python
import argparse
from igf_data.task_tracking.igf_slack import IGF_slack
from igf_data.process.data_transfer.sync_seqrun_data_on_remote import Sync_seqrun_data_from_remote
parser = argparse.ArgumentParser()
parser.add_argument('-r','--remote_server', required=True, help='Remote server address')
parser.add_argument('-p','--remote_base_path', required=True, help='Seqrun directory path in remote dir')
parser.add_argument('-d','--dbconfig', required=True, help='Database configuration file path')
parser.add_argument('-o','--output_dir', required=True, help='Local output directory path')
parser.add_argument('-n','--slack_config', required=True, help='Slack configuration file path')
args = parser.parse_args()
remote_server = args.remote_server
remote_base_path = args.remote_base_path
dbconfig = args.dbconfig
output_dir = args.output_dir
slack_config = args.slack_config
if __name__=='__main__':
try:
slack_obj=IGF_slack(slack_config=slack_config)
## FIX ME
except Exception as e:
message = 'Error while syncing sequencing run directory from remote server: {0}'.format(e)
slack_obj.post_message_to_channel(message,reaction='fail')
raise ValueError(message)
| 2.109375
| 2
|
pydashlite/arrays/sum_by.py
|
glowlex/pydashlite
| 0
|
11740
|
from typing import Callable, Iterable, TypeVar
T = TypeVar('T')
Num = TypeVar('Num', int, float)
def sumBy(array: Iterable[T], iteratee: Callable[[T], Num] = None, start: Num = 0) -> Num:
if iteratee is None:
return sum([y for y in array], start)
return sum([iteratee(y) for y in array], start)
| 3.40625
| 3
|
Ten_Most_Common_Words.py
|
mcjohnchristopher/Python_Samples
| 0
|
11741
|
<reponame>mcjohnchristopher/Python_Samples
fhand = (romeo.txt)
counts = dict()
for line in fhand:
words = line.split()
for word in words():
count word = count.get(word, 0) + 1
st = list
for Key,Value in count.items():
st.append((val,key))
st.sort(reverse = true)
for val,key in st[:10]:
print key, val
#Using Sorted Function
sorted [(v,k) for k,v in c.items()]:
| 3.25
| 3
|
python/testData/intentions/convertLambdaToFunction.py
|
jnthn/intellij-community
| 2
|
11742
|
newlist = lambda x<caret>, y: (x+y)/y
x = 1
| 1.554688
| 2
|
src/grpc_client.py
|
thealphadollar/py-grpcio-pg
| 0
|
11743
|
import grpc
from consts import PORT, SERVER_CERT
from grpc_generated_files import api_pb2, api_pb2_grpc
def main(stub):
request = api_pb2.ApiRequest(
name="Shivam",
message="Hey there!"
)
response = stub.ApiEndpoint(request)
print(response)
if __name__ == "__main__":
with open(SERVER_CERT, 'rb') as f:
server_cert = f.read()
creds = grpc.ssl_channel_credentials(server_cert)
# the server IP should be in the common name of the certificate
channel = grpc.secure_channel(f'localhost:{PORT}', creds)
stub = api_pb2_grpc.ApiStub(channel)
main(stub)
| 2.78125
| 3
|
0188.Best Time to Buy and Sell Stock IV/solution.py
|
zhlinh/leetcode
| 0
|
11744
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-03-23
Last_modify: 2016-03-23
******************************************
'''
'''
Say you have an array for which the ith element is
the price of a given stock on day i.
Design an algorithm to find the maximum profit.
You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time
(ie, you must sell the stock before you buy again).
Credits:
Special thanks to @Freezen for adding this problem and creating all test cases.
'''
class Solution(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n == 0:
return 0
if k > n // 2:
return self.quickSolve(prices)
hold = [-2 ** 31] * (k + 1)
release = [0] * (k + 1)
for p in prices:
for i in range(k):
hold[i+1] = max(hold[i+1], release[i] - p)
release[i+1] = max(release[i+1], hold[i+1] + p)
return release[k]
def quickSolve(self, prices):
res = 0
for i in range(1, len(prices)):
if prices[i] - prices[i-1] > 0:
res += prices[i] - prices[i-1]
return res
| 3.90625
| 4
|
gips/gistmodel/post_processing.py
|
accsc/gips
| 1
|
11745
|
<filename>gips/gistmodel/post_processing.py
import numpy as np
import copy
from gips import FLOAT
from gips import DOUBLE
class post_processing(object):
def __init__(self, fitter, x, pairs=False, prefix=None):
self.fitter = fitter
self.x = x
self.pairs = pairs
self.case = 0
score_dict = { 4 : self.parms4,
5 : self.parms5,
6 : self.parms6
}
mode_dict = { 0 : self.mode0,
1 : self.mode1,
3 : self.mode3,
4 : self.mode4,
5 : self.mode5,
6 : self.mode6,
7 : self.mode7
}
self.score = score_dict[self.fitter.parms]
self.process = mode_dict[self.fitter.mode]
self.prefix = prefix
if type(self.prefix)==type(None) \
or self.prefix=="":
self.prefix = ""
else:
self.prefix = "%s" %self.prefix
self.set_x(self.x)
self.set_case(0)
self.process_rec = False
self.process_cplx = False
self.process_lig = False
def set_x(self, x):
self.x = copy.copy(x)
### Apply the solution to the scoring function
self.fitter.gist_functional(self.x)
self.fitter._f_process(self.x)
def set_case(self, case):
self.case = case
self.name = self.fitter.name[case]
### |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|
### |OVERVIEW OF THE DATA STRUCTURE IN THE FITTER OBJECT|
### |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|
###
### Experimental data stored with gdat_fit_lib
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.dg = np.zeros(self.N_case, dtype=DOUBLE)
### self.dh = np.zeros(self.N_case, dtype=DOUBLE)
### self.ds = np.zeros(self.N_case, dtype=DOUBLE)
###
###
### GIST data generated with gdat_fit_lib (receptor)
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.E = np.zeros((self.N_rec, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.S = np.zeros((self.N_rec, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.g = np.zeros((self.N_rec, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.w = np.zeros(self.N_pos, dtype=DOUBLE)
### self.vol = np.zeros((self.N_pos, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### Which pose belongs to which receptor/gistdata
### self.ind_rec = np.zeros(self.N_pos, dtype=np.int32)
### Which pose belongs to which case
### self.ind_case = np.zeros(self.N_pos, dtype=np.int32)
###
###
### GIST data generated with gdat_fit_lib (complex)
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.E_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.S_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.g_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.w_cplx = np.zeros(self.N_cplx, dtype=DOUBLE)
### self.vol_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.ind_rec_cplx = np.arange(self.N_cplx, dtype=np.int32)
### self.ind_case_cplx = np.zeros(self.N_cplx, dtype=np.int32)
###
###
### GIST data generated with gdat_fit_lib (ligand)
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.E_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.S_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.g_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.w_lig = np.zeros(self.N_lig, dtype=DOUBLE)
### self.vol_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.ind_rec_lig = np.arange(self.N_lig, dtype=np.int32)
### self.ind_case_lig = np.zeros(self.N_lig, dtype=np.int32)
###
def mode0(self, callback=None):
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
def mode1(self, callback=None):
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
def mode2(self, callback=None):
pass
def mode3(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode4(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode5(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
_xr = np.zeros(self.fitter.parms, dtype=DOUBLE)
_xr[:-2] = self.x[:-4]
_xr[-2] = self.x[-4]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
_xr)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
_xc = np.zeros(self.fitter.parms, dtype=DOUBLE)
_xc[:-2] = self.x[:-4]
_xc[-2] = self.x[-3]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
_xc)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
_xl = np.zeros(self.fitter.parms, dtype=DOUBLE)
_xl[:-2] = self.x[:-4]
_xl[-2] = self.x[-2]
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
_xl)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode6(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
_xr = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
_xr[:-3] = self.x[:-5]
_xr[-3] = self.x[-5]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
_xr)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : _xr[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
_xc = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
_xc[:-3] = self.x[:-5]
_xc[-3] = self.x[-4]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
_xc)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xc[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
_xl = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
_xl[:-3] = self.x[:-5]
_xl[-3] = self.x[-3]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
_xl)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xl[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode7(self, callback=None):
if self.process_rec and not self.pairs:
_xr = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
if self.process_cplx:
_xc = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
if self.process_lig:
_xl = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
###
### For parms=4:
###
### with pairs:
### -----------
### x[0] = e_co (Cplx)
### x[1] = e_co (Lig)
### x[2] = s_co (Cplx)
### x[3] = s_co (Lig)
### x[4] = g_co (Cplx)
### x[5] = g_co (Lig)
### x[6] = C_E
### x[7] = C_S
###
### without pairs:
### --------------
### x[0] = e_co (Rec)
### x[1] = e_co (Cplx)
### x[2] = e_co (Lig)
### x[3] = s_co (Rec)
### x[4] = s_co (Cplx)
### x[5] = s_co (Lig)
### x[6] = g_co (Rec)
### x[7] = g_co (Cplx)
### x[8] = g_co (Lig)
### x[9] = C_E
### x[10] = C_S
if self.fitter.parms==4:
if self.pairs:
if self.process_cplx:
_xc[:-2] = self.x[[0,2,4]]
if self.process_lig:
_xl[:-2] = self.x[[1,3,5]]
else:
if self.process_rec:
_xr[:-2] = self.x[[0,3,6]]
if self.process_cplx:
_xc[:-2] = self.x[[1,4,7]]
if self.process_lig:
_xl[:-2] = self.x[[2,5,8]]
###
### For parms=5:
###
### with pairs:
### -----------
### x[0] = A
### x[1] = e_co (Cplx)
### x[2] = e_co (Lig)
### x[3] = s_co (Cplx)
### x[4] = s_co (Lig)
### x[5] = g_co (Cplx)
### x[6] = g_co (Lig)
### x[7] = C_E
### x[8] = C_S
###
### without pairs:
### --------------
### x[0] = A
### x[1] = e_co (Rec)
### x[2] = e_co (Cplx)
### x[3] = e_co (Lig)
### x[4] = s_co (Rec)
### x[5] = s_co (Cplx)
### x[6] = s_co (Lig)
### x[7] = g_co (Rec)
### x[8] = g_co (Cplx)
### x[9] = g_co (Lig)
### x[10] = C_E
### x[11] = C_S
elif self.fitter.parms==5:
if self.pairs:
if self.process_cplx:
_xc[:-2] = self.x[[0,1,3,5]]
if self.process_lig:
_xl[:-2] = self.x[[0,2,4,6]]
else:
if self.process_rec:
_xr[:-2] = self.x[[0,1,4,7]]
if self.process_cplx:
_xc[:-2] = self.x[[0,2,5,8]]
if self.process_lig:
_xl[:-2] = self.x[[0,3,6,9]]
###
### For parms=6:
###
### with pairs:
### -----------
### x[0] = E_aff
### x[1] = e_co (Cplx)
### x[2] = e_co (Lig)
### x[3] = S_aff
### x[4] = s_co (Cplx)
### x[5] = s_co (Lig)
### x[6] = g_co (Cplx)
### x[7] = g_co (Lig)
### x[8] = C_E
### x[9] = C_S
###
### without pairs:
### --------------
### x[0] = E_aff
### x[1] = e_co (Rec)
### x[2] = e_co (Cplx)
### x[3] = e_co (Lig)
### x[4] = S_aff
### x[5] = s_co (Rec)
### x[6] = s_co (Cplx)
### x[7] = s_co (Lig)
### x[8] = g_co (Rec)
### x[9] = g_co (Cplx)
### x[10] = g_co (Lig)
### x[11] = C_E
### x[12] = C_S
elif self.fitter.parms==6:
if self.pairs:
if self.process_cplx:
_xc[:-2] = self.x[[0,1,3,4,6]]
if self.process_lig:
_xl[:-2] = self.x[[0,2,3,5,7]]
else:
if self.process_rec:
_xr[:-2] = self.x[[0,1,4,5,8]]
if self.process_cplx:
_xc[:-2] = self.x[[0,2,4,6,9]]
if self.process_lig:
_xl[:-2] = self.x[[0,3,4,7,10]]
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
_xr)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : _xr[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
_xc)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xc[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
_xl)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xl[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def parms4(self, E_grid, S_grid, g_grid, vol_grid, x):
E = np.zeros_like(E_grid)
S = np.zeros_like(S_grid)
g = np.zeros_like(g_grid)
valids_E = np.where(E_grid>x[0])
valids_S = np.where(S_grid>x[1])
valids_g = np.where(g_grid>x[2])
E[valids_E] = np.copy(E_grid[valids_E])
S[valids_S] = np.copy(S_grid[valids_S])
g[valids_g] = np.copy(g_grid[valids_g])
E_grid_val = np.zeros_like(E)
S_grid_val = np.zeros_like(S)
gv_grid_val = np.zeros_like(g)
### This is probably wrong:
#E_grid_val[valids_g] = E[valids_g] * vol_grid[valids_g] / g[valids_g] * 0.0332
#S_grid_val[valids_g] = S[valids_g] * vol_grid[valids_g] / g[valids_g] * 0.0332 * -1.
### This is how it should be:
### Note: 0.125 is the volume of one voxel
E_grid_val[valids_g] = E[valids_g] * vol_grid[valids_g] * g[valids_g] * 0.0332 * 0.125
S_grid_val[valids_g] = S[valids_g] * vol_grid[valids_g] * g[valids_g] * 0.0332 * 0.125
gv_grid_val[valids_g] = vol_grid[valids_g]*g[valids_g]
return E_grid_val, S_grid_val, gv_grid_val
def parms5(self, E_grid, S_grid, g_grid, vol_grid, x):
E = np.zeros_like(E_grid)
S = np.zeros_like(S_grid)
g = np.zeros_like(g_grid)
E[np.where(E_grid>x[1])] = 1.
S[np.where(S_grid>x[2])] = 1.
g[np.where(g_grid>x[3])] = 1.
E_grid_val = E*g*vol_grid*x[0]
S_grid_val = S*g*vol_grid*x[0]
gv_grid_val = vol_grid*g
return E_grid_val, S_grid_val, gv_grid_val
def parms6(self, E_grid, S_grid, g_grid, vol_grid, x):
E = np.zeros_like(E_grid)
S = np.zeros_like(S_grid)
g = np.zeros_like(g_grid)
E[np.where(E_grid>x[1])] = 1.
S[np.where(S_grid>x[3])] = 1.
g[np.where(g_grid>x[4])] = 1.
E_grid_val = E*g*vol_grid*x[0]
S_grid_val = S*g*vol_grid*x[2]
gv_grid_val = vol_grid*g
return E_grid_val, S_grid_val, gv_grid_val
| 2.625
| 3
|
src/Cogs/InfoCog.py
|
kodyVS/Discord-Bot-Development
| 5
|
11746
|
<filename>src/Cogs/InfoCog.py
from discord.ext import commands
import discord
import requests
from bs4 import BeautifulSoup
# work in progress! more languages welcome!
class InfoCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name = 'docs', brief = 'programming language documentation', description = 'documentation for languages, access by calling `.docs <language> <query>`', aliases = ['documentation', 'info'])
async def docs(self, ctx, language: str, query):
# access docs based on language
if language == 'python' or language == 'python3':
full_link = 'https://docs.python.org/3/genindex-all.html'
page = requests.get(full_link).content
soup = BeautifulSoup(page, 'html.parser')
link_descriptions = []
for link in soup.findAll('a'):
if query in link.contents[0]:
link_descriptions.append(f"[{link.contents[0]}](https://docs.python.org/3/{link['href']})")
link_descriptions = list(dict.fromkeys(link_descriptions))
link_descriptions = link_descriptions[:10]
### TODO: multi-lingual docs support (devdocs.io?)
### TODO: faster searching (current 4-5 secs)
### TODO: filter results -> currently only pick top ten, and there are some odd results as well
embed = discord.Embed(title="Python 3 Docs", color = 0x00ff00)
embed.add_field(name=f'{len(link_descriptions)} results found for `{query}` :', value='\n'.join(
link_descriptions), inline=False)
embed.set_thumbnail(url=
'https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Python-logo-notext.svg/240px-Python-logo-notext.svg.png')
await ctx.send(embed=embed)
@commands.command(name='github', brief = 'view top 10 daily github repos', description = 'see the names and descriptions of the top x github repos today with `.github x` (default 10)', aliases=['gh'])
async def github(self, ctx, amount: int = 10):
'''Gets the GitHub first < amount > repositories without embeds'''
page = requests.get(
'https://github-trending-api.now.sh/repositories?q=sort=stars&order=desc&since=daily')
response = [
f"{entry['description']}: {'<' + entry['url'] + '>'}\n" for entry in page.json()[:amount]]
embed = discord.Embed(
title=f"**GitHub's top {str(amount)} today**", description='\n'.join(response), color=0x00ff00)
await ctx.send(embed=embed)
| 2.765625
| 3
|
Python/List/37.drop.py
|
angelmpalomares/ModelAndLanguagesForBioInformatics
| 0
|
11747
|
def drop(i_list: list,n:int) -> list:
"""
Drop at multiple of n from the list
:param n: Drop from the list i_list every N element
:param i_list: The source list
:return: The returned list
"""
assert(n>0)
_shallow_list = []
k=1
for element in i_list:
if k % n != 0:
_shallow_list.append(element)
k+=1
return _shallow_list
if __name__ == "__main__":
print(drop([1,2,3,4,5],6))
| 3.921875
| 4
|
cil/balanced_experience_replay.py
|
itaicaspi-intel/advanced-coach
| 1
|
11748
|
<gh_stars>1-10
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
import random
from enum import Enum
from typing import List, Tuple, Any, Union
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters, ExperienceReplay
from rl_coach.schedules import Schedule, ConstantSchedule
class BalancedExperienceReplayParameters(ExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 1000000)
self.allow_duplicates_in_batch_sampling = False
self.num_classes = 0
self.state_key_with_the_class_index = 'class'
@property
def path(self):
return 'cil.balanced_experience_replay:BalancedExperienceReplay'
"""
A replay buffer which allows sampling batches which are balanced in terms of the classes that are sampled
"""
class BalancedExperienceReplay(ExperienceReplay):
def __init__(self, max_size: Tuple[MemoryGranularity, int], allow_duplicates_in_batch_sampling: bool=True,
num_classes: int=0, state_key_with_the_class_index: Any='class'):
"""
:param max_size: the maximum number of transitions or episodes to hold in the memory
:param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
:param num_classes: the number of classes in the replayed data
:param state_key_with_the_class_index: the class index is assumed to be a value in the state dictionary.
this parameter determines the key to retrieve the class index value
"""
super().__init__(max_size, allow_duplicates_in_batch_sampling)
self.current_class_to_sample_from = 0
self.num_classes = num_classes
self.state_key_with_the_class_index = state_key_with_the_class_index
self.transitions = [[] for _ in range(self.num_classes)]
self.transitions_order = []
if self.num_classes < 2:
raise ValueError("The number of classes for a balanced replay buffer should be at least 2. "
"The number of classes that were defined are: {}".format(self.num_classes))
def store(self, transition: Transition, lock: bool=True) -> None:
"""
Store a new transition in the memory.
:param transition: a transition to store
:param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
locks and then calls store with lock = True
:return: None
"""
if lock:
self.reader_writer_lock.lock_writing_and_reading()
self._num_transitions += 1
if self.state_key_with_the_class_index not in transition.state.keys():
raise ValueError("The class index was not present in the state of the transition under the given key ({})"
.format(self.state_key_with_the_class_index))
class_idx = transition.state[self.state_key_with_the_class_index]
if class_idx >= self.num_classes:
raise ValueError("The given class index is outside the defined number of classes for the replay buffer. "
"The given class was: {} and the number of classes defined is: {}"
.format(class_idx, self.num_classes))
self.transitions[class_idx].append(transition)
self.transitions_order.append(class_idx)
self._enforce_max_length()
if lock:
self.reader_writer_lock.release_writing_and_reading()
def sample(self, size: int) -> List[Transition]:
"""
Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
of samples available in the replay buffer then the batch will return empty.
:param size: the size of the batch to sample
:return: a batch (list) of selected transitions from the replay buffer
"""
self.reader_writer_lock.lock_writing()
if size % self.num_classes != 0:
raise ValueError("Sampling batches from a balanced replay buffer should be done only using batch sizes "
"which are a multiple of the number of classes. The number of classes defined is: {} "
"and the batch size requested is: {}".format(self.num_classes, size))
batch_size_from_each_class = size // self.num_classes
if self.allow_duplicates_in_batch_sampling:
transitions_idx = [np.random.randint(len(class_transitions), size=batch_size_from_each_class)
for class_transitions in self.transitions]
else:
for class_idx, class_transitions in enumerate(self.transitions):
if self.num_transitions() < batch_size_from_each_class:
raise ValueError("The replay buffer cannot be sampled since there are not enough transitions yet. "
"There are currently {} transitions for class {}"
.format(len(class_transitions), class_idx))
transitions_idx = [np.random.choice(len(class_transitions), size=batch_size_from_each_class, replace=False)
for class_transitions in self.transitions]
batch = []
for class_idx, class_transitions_idx in enumerate(transitions_idx):
batch += [self.transitions[class_idx][i] for i in class_transitions_idx]
self.reader_writer_lock.release_writing()
return batch
def remove_transition(self, transition_index: int, lock: bool=True) -> None:
raise ValueError("It is not possible to remove specific transitions with a balanced replay buffer")
def get_transition(self, transition_index: int, lock: bool=True) -> Union[None, Transition]:
raise ValueError("It is not possible to access specific transitions with a balanced replay buffer")
def _enforce_max_length(self) -> None:
"""
Make sure that the size of the replay buffer does not pass the maximum size allowed.
If it passes the max size, the oldest transition in the replay buffer will be removed.
This function does not use locks since it is only called internally
:return: None
"""
granularity, size = self.max_size
if granularity == MemoryGranularity.Transitions:
while size != 0 and self.num_transitions() > size:
self._num_transitions -= 1
del self.transitions[self.transitions_order[0]][0]
del self.transitions_order[0]
else:
raise ValueError("The granularity of the replay buffer can only be set in terms of transitions")
def clean(self, lock: bool=True) -> None:
"""
Clean the memory by removing all the episodes
:return: None
"""
if lock:
self.reader_writer_lock.lock_writing_and_reading()
self.transitions = [[] for _ in range(self.num_classes)]
self.transitions_order = []
self._num_transitions = 0
if lock:
self.reader_writer_lock.release_writing_and_reading()
| 2.0625
| 2
|
tools/schedprof/schedprof/mutex.py
|
ivochkin/dfk
| 1
|
11749
|
<reponame>ivochkin/dfk
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from schedprof.enumerated_instance import EnumeratedInstance
class Mutex(EnumeratedInstance):
def __init__(self):
super(Mutex, self).__init__(Mutex)
self.acquired_by = None
self.wait_queue = []
| 2.25
| 2
|
openpype/hosts/houdini/plugins/publish/increment_current_file.py
|
jonclothcat/OpenPype
| 87
|
11750
|
import pyblish.api
import avalon.api
from openpype.api import version_up
from openpype.action import get_errored_plugins_from_data
class IncrementCurrentFile(pyblish.api.InstancePlugin):
"""Increment the current file.
Saves the current scene with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["colorbleed.usdrender", "redshift_rop"]
targets = ["local"]
def process(self, instance):
# This should be a ContextPlugin, but this is a workaround
# for a bug in pyblish to run once for a family: issue #250
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
context = instance.context
errored_plugins = get_errored_plugins_from_data(context)
if any(
plugin.__name__ == "HoudiniSubmitPublishDeadline"
for plugin in errored_plugins
):
raise RuntimeError(
"Skipping incrementing current file because "
"submission to deadline failed."
)
# Filename must not have changed since collecting
host = avalon.api.registered_host()
current_file = host.current_file()
assert (
context.data["currentFile"] == current_file
), "Collected filename from current scene name."
new_filepath = version_up(current_file)
host.save(new_filepath)
| 2.21875
| 2
|
968 Binary Tree Cameras.py
|
krishna13052001/LeetCode
| 872
|
11751
|
#!/usr/bin/python3
"""
Given a binary tree, we install cameras on the nodes of the tree.
Each camera at a node can monitor its parent, itself, and its immediate children.
Calculate the minimum number of cameras needed to monitor all nodes of the tree.
Example 1:
Input: [0,0,null,0,0]
Output: 1
Explanation: One camera is enough to monitor all nodes if placed as shown.
Example 2:
Input: [0,0,null,0,null,0,null,null,0]
Output: 2
Explanation: At least two cameras are needed to monitor all nodes of the tree.
The above image shows one of the valid configurations of camera placement.
Note:
The number of nodes in the given tree will be in the range [1, 1000].
Every node has value 0.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.covered = {None}
self.cnt = 0
def minCameraCover(self, root: TreeNode) -> int:
"""
Greedy?
Bottom up, cover leaf's parent is strictly better than cover leaf
"""
self.dfs(root, None)
if root not in self.covered:
self.covered.add(root)
self.cnt += 1
return self.cnt
def dfs(self, node, pi):
"""
post order
rely on the parents to cover it
"""
if not node:
return
self.dfs(node.left, node)
self.dfs(node.right, node)
if node.left not in self.covered or node.right not in self.covered:
self.cnt += 1
self.covered.add(node.left)
self.covered.add(node.right)
self.covered.add(node)
self.covered.add(pi)
class SolutionErrror:
def __init__(self):
self.covered = set()
def minCameraCover(self, root: TreeNode) -> int:
"""
Greedy?
Top-down, no good.
Bottom up, cover leaf's parent is strictly better than cover leaf
"""
dummy = TreeNode(0)
dummy.left = root
self.dfs(root, dummy)
self.covered.discard(dummy) # swallow KeyError
return len(self.covered)
def dfs(self, node, pi):
"""
post order
"""
if not node:
return
self.dfs(node.left, node)
self.dfs(node.right, node)
# post oder
if (
(not node.left or node.left in self.covered) and
(not node.right or node.right in self.covered)
):
self.covered.add(pi)
return
| 4.09375
| 4
|
examples/get_tiktoks_by_sound.py
|
twitter-79/TikTok-Api
| 2,095
|
11752
|
<gh_stars>1000+
from TikTokApi import TikTokApi
api = TikTokApi.get_instance()
count = 30
# You can find this from a tiktok getting method in another way or find songs from the discoverMusic method.
sound_id = "6601861313180207878"
tiktoks = api.by_sound(sound_id, count=count)
for tiktok in tiktoks:
print(tiktok)
| 2.515625
| 3
|
code/SimPleAC_pof_paperplots.py
|
1ozturkbe/robustSPpaper
| 0
|
11753
|
from builtins import str
from builtins import range
from robust.simulations.simulate import filter_gamma_result_dict
from SimPleAC_save import load_obj
import pickle as pickle
import numpy as np
import matplotlib.pyplot as plt
from SimPleAC_pof_simulate import pof_parameters
if __name__ == "__main__":
# Retrieving pof parameters
[model, methods, gammas, number_of_iterations,
min_num_of_linear_sections, max_num_of_linear_sections, verbosity, linearization_tolerance,
number_of_time_average_solves, uncertainty_sets, nominal_solution, directly_uncertain_vars_subs, parallel,
nominal_number_of_constraints, nominal_solve_time] = pof_parameters()
method = methods[0] # only care about Best Pairs
# Loading results
margin = {}
nGammas = nmargins = len(gammas)
margins = gammas
margin['solutions'] = {}
for i in range(nmargins):
margin['solutions'][margins[i]] = pickle.load(open("marginResults/" +
str(margins[i]), 'rb'))
margin['number_of_constraints'] = load_obj('marginnumber_of_constraints', 'marginResults')
margin['simulation_results'] = load_obj('marginsimulation_results', 'marginResults')
gamma = {}
gamma['solutions'] = {}
for i in range(nGammas):
for j in range(len(methods)):
for k in range((len(uncertainty_sets))):
gamma['solutions'][gammas[i], methods[j]['name'], uncertainty_sets[k]] = pickle.load(open(
"gammaResults\\" + str((gammas[i], methods[j]['name'], uncertainty_sets[k])), 'rb'))
gamma['solve_times'] = load_obj('gammasolve_times', 'gammaResults')
gamma['simulation_results'] = load_obj('gammasimulation_results', 'gammaResults')
gamma['number_of_constraints'] = load_obj('gammanumber_of_constraints', 'gammaResults')
# Plotting of cost and probability of failure
objective_name = 'Total fuel weight'
objective_units = 'N'
title = ''
filteredResults = [margin['solutions'],
filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'box'),
filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'ellipsoidal')]
filteredSimulations = [margin['simulation_results'],
filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'box'),
filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'ellipsoidal')]
objective_varkey = 'W_{f_m}'
legend_keys = ['margins', 'box', 'ellipsoidal']
edgecolors = ['#FFBF00', '#CC0000', '#008000']
facecolors = ['#FFE135','#FF2052', '#8DB600']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
lines = []
mincost = 1e10
maxcost = 0
for i in range(len(legend_keys)):
sims = list(filteredSimulations[i].items())
pofs = []
objective_costs = []
objective_stddev = []
for j in sims:
pofs.append(j[1][0])
objective_costs.append(j[1][1])
objective_stddev.append(j[1][2])
mincost = np.min([mincost] + objective_costs)
maxcost = np.max([maxcost] + objective_costs)
lines.append(ax1.plot(gammas, objective_costs, color=edgecolors[i], label=legend_keys[i] + ', cost'))
inds = np.nonzero(np.ones(len(gammas)) - pofs)[0]
uppers = [objective_costs[ind] + objective_stddev[ind] for ind in inds]
lowers = [objective_costs[ind] - objective_stddev[ind] for ind in inds]
x = [gammas[ind] for ind in inds]
ax1.fill_between(x, lowers, uppers,
alpha=0.5, edgecolor = edgecolors[i], facecolor = facecolors[i])
lines.append(ax2.plot(gammas, pofs, color=edgecolors[i], label=legend_keys[i] + ', PoF'))
ax1.set_xlabel(r'Uncertainty Set Scaling Factor $\Gamma$', fontsize=12)
ax1.set_ylabel('Cost [' + objective_name + ' (' + objective_units.capitalize() + ')]', fontsize=12)
ax2.set_ylabel("Probability of Failure", fontsize=12)
ax1.set_ylim([mincost, maxcost])
ax2.set_ylim([0, 1])
plt.title(title, fontsize=12)
labs = [lines[l][0].get_label() for l in [1,3,5,0,2,4]]
ax1.legend(labs, loc="lower right", fontsize=9, numpoints=1)
# ax1.legend(loc="lower right", fontsize=10, numpoints=1)
# fig.legend(loc="lower right", fontsize=10, numpoints=1)
plt.show()
| 2.21875
| 2
|
test/show-cifar10.py
|
tom01h/deep-learning-from-scratch
| 3
|
11754
|
<filename>test/show-cifar10.py<gh_stars>1-10
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from dataset.cifar10 import load_cifar10
from PIL import Image
np.set_printoptions(threshold=100)
(x_train, t_train), (x_test, t_test) = load_cifar10(flatten=False)
sample_image = x_test[0:100].reshape((10, 10, 3, 32, 32)).transpose((0, 3, 1, 4, 2)).reshape((320, 320, 3)) # 先頭100個をタイル状に並べ替える
Image.fromarray(np.uint8(sample_image*255)).save('sample.png')
print(t_test[0:100].reshape(10,10))
#pil_img = Image.fromarray(np.uint8(sample_image*255))
#pil_img.show()
| 2.859375
| 3
|
vertex-server/signals/__init__.py
|
aoswalt/greenlite-hardware
| 0
|
11755
|
from . import lights
from . import schedule
| 0.9375
| 1
|
PWWS/fool.py
|
ForeverZyh/ASCC
| 21
|
11756
|
<reponame>ForeverZyh/ASCC
# coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
import os
import numpy as np
from read_files import split_imdb_files, split_yahoo_files, split_agnews_files
from word_level_process import word_process, get_tokenizer
from char_level_process import char_process
from neural_networks import word_cnn, char_cnn, bd_lstm, lstm
from adversarial_tools import ForwardGradWrapper, adversarial_paraphrase
import tensorflow as tf
from keras import backend as K
import time
from unbuffered import Unbuffered
sys.stdout = Unbuffered(sys.stdout)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
parser = argparse.ArgumentParser(
description='Craft adversarial examples for a text classifier.')
parser.add_argument('--clean_samples_cap',
help='Amount of clean(test) samples to fool',
type=int, default=1000)
parser.add_argument('-m', '--model',
help='The model of text classifier',
choices=['word_cnn', 'char_cnn', 'word_lstm', 'word_bdlstm'],
default='word_cnn')
parser.add_argument('-d', '--dataset',
help='Data set',
choices=['imdb', 'agnews', 'yahoo'],
default='imdb')
parser.add_argument('-l', '--level',
help='The level of process dataset',
choices=['word', 'char'],
default='word')
def write_origin_input_texts(origin_input_texts_path, test_texts, test_samples_cap=None):
if test_samples_cap is None:
test_samples_cap = len(test_texts)
with open(origin_input_texts_path, 'a') as f:
for i in range(test_samples_cap):
f.write(test_texts[i] + '\n')
def fool_text_classifier():
clean_samples_cap = args.clean_samples_cap # 1000
print('clean_samples_cap:', clean_samples_cap)
# get tokenizer
dataset = args.dataset
tokenizer = get_tokenizer(opt)
# Read data set
x_test = y_test = None
test_texts = None
if dataset == 'imdb':
train_texts, train_labels, dev_texts, dev_labels, test_texts, test_labels = split_imdb_files(opt)
if args.level == 'word':
x_train, y_train, x_test, y_test = word_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif args.level == 'char':
x_train, y_train, x_test, y_test = char_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif dataset == 'agnews':
train_texts, train_labels, test_texts, test_labels = split_agnews_files()
if args.level == 'word':
x_train, y_train, x_test, y_test = word_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif args.level == 'char':
x_train, y_train, x_test, y_test = char_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif dataset == 'yahoo':
train_texts, train_labels, test_texts, test_labels = split_yahoo_files()
if args.level == 'word':
x_train, y_train, x_test, y_test = word_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif args.level == 'char':
x_train, y_train, x_test, y_test = char_process(train_texts, train_labels, test_texts, test_labels, dataset)
# Write clean examples into a txt file
clean_texts_path = r'./fool_result/{}/clean_{}.txt'.format(dataset, str(clean_samples_cap))
if not os.path.isfile(clean_texts_path):
write_origin_input_texts(clean_texts_path, test_texts)
# Select the model and load the trained weights
assert args.model[:4] == args.level
model = None
if args.model == "word_cnn":
model = word_cnn(dataset)
elif args.model == "word_bdlstm":
model = bd_lstm(dataset)
elif args.model == "char_cnn":
model = char_cnn(dataset)
elif args.model == "word_lstm":
model = lstm(dataset)
model_path = r'./runs/{}/{}.dat'.format(dataset, args.model)
model.load_weights(model_path)
print('model path:', model_path)
# evaluate classification accuracy of model on clean samples
scores_origin = model.evaluate(x_test[:clean_samples_cap], y_test[:clean_samples_cap])
print('clean samples origin test_loss: %f, accuracy: %f' % (scores_origin[0], scores_origin[1]))
all_scores_origin = model.evaluate(x_test, y_test)
print('all origin test_loss: %f, accuracy: %f' % (all_scores_origin[0], all_scores_origin[1]))
grad_guide = ForwardGradWrapper(model)
classes_prediction = grad_guide.predict_classes(x_test[: clean_samples_cap])
print('Crafting adversarial examples...')
successful_perturbations = 0
failed_perturbations = 0
sub_rate_list = []
NE_rate_list = []
start_cpu = time.clock()
adv_text_path = r'./fool_result/{}/{}/adv_{}.txt'.format(dataset, args.model, str(clean_samples_cap))
change_tuple_path = r'./fool_result/{}/{}/change_tuple_{}.txt'.format(dataset, args.model, str(clean_samples_cap))
file_1 = open(adv_text_path, "a")
file_2 = open(change_tuple_path, "a")
for index, text in enumerate(test_texts[: clean_samples_cap]):
sub_rate = 0
NE_rate = 0
if np.argmax(y_test[index]) == classes_prediction[index]:
# If the ground_true label is the same as the predicted label
adv_doc, adv_y, sub_rate, NE_rate, change_tuple_list = adversarial_paraphrase(input_text=text,
true_y=np.argmax(y_test[index]),
grad_guide=grad_guide,
tokenizer=tokenizer,
dataset=dataset,
level=args.level)
if adv_y != np.argmax(y_test[index]):
successful_perturbations += 1
print('{}. Successful example crafted.'.format(index))
else:
failed_perturbations += 1
print('{}. Failure.'.format(index))
text = adv_doc
sub_rate_list.append(sub_rate)
NE_rate_list.append(NE_rate)
file_2.write(str(index) + str(change_tuple_list) + '\n')
file_1.write(text + " sub_rate: " + str(sub_rate) + "; NE_rate: " + str(NE_rate) + "\n")
end_cpu = time.clock()
print('CPU second:', end_cpu - start_cpu)
mean_sub_rate = sum(sub_rate_list) / len(sub_rate_list)
mean_NE_rate = sum(NE_rate_list) / len(NE_rate_list)
print('mean substitution rate:', mean_sub_rate)
print('mean NE rate:', mean_NE_rate)
file_1.close()
file_2.close()
def fool_text_classifier_pytorch(model, dataset='imdb'):
clean_samples_cap = 100
print('clean_samples_cap:', clean_samples_cap)
# get tokenizer
tokenizer = get_tokenizer(opt)
# Read data set
x_test = y_test = None
test_texts = None
if dataset == 'imdb':
train_texts, train_labels, dev_texts, dev_labels, test_texts, test_labels = split_imdb_files(opt)
x_train, y_train, x_test, y_test = word_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif dataset == 'agnews':
train_texts, train_labels, test_texts, test_labels = split_agnews_files()
x_train, y_train, x_test, y_test = word_process(train_texts, train_labels, test_texts, test_labels, dataset)
elif dataset == 'yahoo':
train_texts, train_labels, test_texts, test_labels = split_yahoo_files()
x_train, y_train, x_test, y_test = word_process(train_texts, train_labels, test_texts, test_labels, dataset)
grad_guide = ForwardGradWrapper_pytorch(model)
classes_prediction = grad_guide.predict_classes(x_test[: clean_samples_cap])
print('Crafting adversarial examples...')
successful_perturbations = 0
failed_perturbations = 0
sub_rate_list = []
NE_rate_list = []
start_cpu = time.clock()
adv_text_path = r'./fool_result/{}/adv_{}.txt'.format(dataset, str(clean_samples_cap))
change_tuple_path = r'./fool_result/{}/change_tuple_{}.txt'.format(dataset, str(clean_samples_cap))
file_1 = open(adv_text_path, "a")
file_2 = open(change_tuple_path, "a")
for index, text in enumerate(test_texts[: clean_samples_cap]):
sub_rate = 0
NE_rate = 0
if np.argmax(y_test[index]) == classes_prediction[index]:
# If the ground_true label is the same as the predicted label
adv_doc, adv_y, sub_rate, NE_rate, change_tuple_list = adversarial_paraphrase(input_text=text,
true_y=np.argmax(y_test[index]),
grad_guide=grad_guide,
tokenizer=tokenizer,
dataset=dataset,
level='word')
if adv_y != np.argmax(y_test[index]):
successful_perturbations += 1
print('{}. Successful example crafted.'.format(index))
else:
failed_perturbations += 1
print('{}. Failure.'.format(index))
text = adv_doc
sub_rate_list.append(sub_rate)
NE_rate_list.append(NE_rate)
file_2.write(str(index) + str(change_tuple_list) + '\n')
file_1.write(text + " sub_rate: " + str(sub_rate) + "; NE_rate: " + str(NE_rate) + "\n")
end_cpu = time.clock()
print('CPU second:', end_cpu - start_cpu)
mean_sub_rate = sum(sub_rate_list) / len(sub_rate_list)
mean_NE_rate = sum(NE_rate_list) / len(NE_rate_list)
print('mean substitution rate:', mean_sub_rate)
print('mean NE rate:', mean_NE_rate)
file_1.close()
file_2.close()
if __name__ == '__main__':
args = parser.parse_args()
fool_text_classifier()
| 1.96875
| 2
|
test_training_data.py
|
miermans/gym-2048
| 0
|
11757
|
#!/usr/bin/env python
from __future__ import absolute_import
import numpy as np
import os
import pytest
import tempfile
import training_data
class TestTrainingData():
def test_add(self):
td = training_data.training_data()
assert np.array_equal(td.get_x(), np.empty([0, 4, 4], dtype=np.int))
assert np.array_equal(td.get_y_digit(), np.empty([0, 1], dtype=np.int))
assert np.allclose(td.get_reward(), np.empty([0, 1], dtype=np.float))
assert np.array_equal(td.get_next_x(), np.empty([0, 4, 4], dtype=np.int))
assert np.array_equal(td.get_done(), np.empty([0, 1], dtype=np.bool))
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]), True)
assert np.array_equal(td.get_x(), np.ones([1, 4, 4], dtype=np.int))
assert np.array_equal(td.get_y_digit(), np.array([[1]], dtype=np.int))
assert np.allclose(td.get_reward(), np.array([[4]], dtype=np.float))
assert np.array_equal(td.get_next_x(), np.zeros([1, 4, 4], dtype=np.int))
assert np.array_equal(td.get_done(), np.array([[1]], dtype=np.bool))
def test_get_x_stacked(self):
td = training_data.training_data()
td.add(np.full([4, 4], 2), 0, 4, np.zeros([4, 4]))
td.add(np.full([4, 4], 8), 1, 8, np.ones([4, 4]))
td.add(np.full([4, 4], 2048), 1, 8, np.ones([4, 4]))
expected_x_stacked = np.array([
[
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]
]
], dtype=np.int)
assert np.array_equal(td.get_x_stacked(), expected_x_stacked)
def test_get_y_one_hot(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 3, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4]))
expected_y_one_hot = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]
], dtype=np.int)
assert np.array_equal(td.get_y_one_hot(), expected_y_one_hot)
def test_get_total_reward(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 3, 16, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 2, 32, np.ones([4, 4]))
assert td.get_total_reward() == 60
def test_get_highest_tile(self):
td = training_data.training_data()
td.add(np.full((4, 4), 1), 0, 4, np.full((4, 4), 2))
td.add(np.full((4, 4), 2), 0, 4, np.full((4, 4), 4))
assert td.get_highest_tile() == 4
def test_get_n(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 1, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4]))
(state, action, reward, next_state, done) = td.get_n(1)
assert np.array_equal(state, np.zeros([4, 4], dtype=np.int))
assert action == 2
assert reward == pytest.approx(8.)
assert np.array_equal(next_state, np.ones([4, 4], dtype=np.int))
def test_hflip(self):
td = training_data.training_data()
board1 = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
board2 = np.array([[0, 0, 0, 0],
[2, 4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(board1, 1, 2, board2)
td.add(board2, 2, 0, board1)
td.hflip()
expected_x = np.array([
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[3],
[2]
], dtype=np.int)
expected_reward = np.array([
[2],
[0],
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.allclose(td.get_next_x(), expected_next_x)
def test_rotate(self):
td = training_data.training_data()
board1 = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
board2 = np.array([[0, 0, 0, 0],
[2, 4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(board1, 1, 2, board2)
td.add(board2, 2, 0, board1)
td.rotate(3)
expected_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1]
], dtype=np.int)
expected_reward = np.array([
[2],
[0],
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_augment(self):
td = training_data.training_data()
initial_board = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
next_board = np.array([[0, 0, 0, 2],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(initial_board, 1, 4, next_board)
td.augment()
assert td.size() == 8
expected_x = np.array([
[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]],
[[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[1],
[3],
[2],
[0],
[3],
[1],
[0],
[2]
], dtype=np.int)
expected_reward = np.array([
[4],
[4],
[4],
[4],
[4],
[4],
[4],
[4]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Original
[[2, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Hflip'd
[[0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 2]], # Original, rotated 90 degrees
[[0, 0, 0, 2], [0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]], # Hflip, rotated 90 degrees
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0, 0, 0]], # Original, rotated 180 degrees
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], # Hflip, rotated 180 degrees
[[2, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], # Original, rotate 270 degrees
[[0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0]] # Hflip, rotated 270 degrees
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_merge(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4]))
td2 = training_data.training_data()
td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4]))
td.merge(td2)
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[1],
[2]
], dtype=np.int)
expected_reward = np.array([
[16],
[0]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_split(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4]))
td2 = training_data.training_data()
td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4]))
td.merge(td2)
a, b = td.split()
assert np.array_equal(a.get_x(), np.ones([1, 4, 4]))
assert np.array_equal(a.get_y_digit(), [[1]])
assert np.array_equal(a.get_reward(), [[16]])
assert np.array_equal(a.get_next_x(), np.zeros([1, 4, 4]))
assert np.array_equal(b.get_x(), np.zeros([1, 4, 4]))
assert np.array_equal(b.get_y_digit(), [[2]])
assert np.array_equal(b.get_reward(), [[0]])
assert np.array_equal(b.get_next_x(), np.ones([1, 4, 4]))
def test_sample(self):
td = training_data.training_data()
td.add(np.zeros([1, 4, 4]), 0, 0, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 1, np.ones([1, 4, 4]))
sample = td.sample([1])
assert sample.size() == 1
assert sample.get_y_digit() in [[[0]], [[1]]]
if sample.get_y_digit() == 0:
assert np.array_equal(sample.get_x(), np.zeros([1, 4, 4]))
if sample.get_y_digit() == 1:
assert np.array_equal(sample.get_x(), np.ones([1, 4, 4]))
def test_size(self):
td = training_data.training_data()
assert td.size() == 0
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
assert td.size() == 1
def test_log2_rewards(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 0, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 16, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 75, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2048, np.zeros([1, 4, 4]))
td.log2_rewards()
expected_reward = np.array([
[0], [1], [2], [4], [6.2288], [11]
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
expected_action = np.array([
[0], [1], [2], [3], [0], [1]
], dtype=np.int)
assert np.allclose(td.get_y_digit(), expected_action)
def test_get_discounted_return(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]))
# Test using default gamma value of 0.9
td2 = td.copy()
discounted_return = td2.get_discounted_return()
expected_return = np.array([
[20.218], [18.02], [17.8], [2.0]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
# Test using gamma value of 0, should have no effect on rewards
td2 = td.copy()
discounted_return = td2.get_discounted_return(gamma=0.0)
expected_return = np.array([
[4], [2], [16], [2]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
# Test end of episode
td3 = training_data.training_data()
td3.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]), False)
td3.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]), True)
td3.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]), False)
td3.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]), True)
discounted_return = td3.get_discounted_return()
expected_return = np.array([
[5.8], [2.0], [17.8], [2.0]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
def test_normalize_rewards(self):
# Test calculating mean and standard deviation
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4]))
td.normalize_rewards()
expected_reward = np.array([
[-0.8165], [-0.8165], [0.], [1.633],
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
# Test specifying mean and standard deviation
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4]))
td.normalize_rewards(mean=8, sd=1)
expected_reward = np.array([
[-4.], [-4.], [0.], [8.],
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
def test_normalize_boards(self):
# Test calculating mean and standard deviation
td = training_data.training_data()
td.add(np.full((1, 4, 4), 4), 1, 4, np.full((1, 4, 4), 8))
td.add(np.full((1, 4, 4), 8), 2, 4, np.full((1, 4, 4), 16))
td.add(np.full((1, 4, 4), 16), 3, 4, np.full((1, 4, 4), 32))
td.add(np.full((1, 4, 4), 32), 4, 4, np.full((1, 4, 4), 64))
td.normalize_boards()
mean = 15.
sd = 10.7238052947636
a = (4. - mean) / sd
b = (8. - mean) / sd
c = (16. - mean) / sd
d = (32. - mean) / sd
e = (64. - mean) / sd
expected_x = np.array([
[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]],
[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]],
[[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]],
[[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]]
], dtype=np.float)
assert np.allclose(td.get_x(), expected_x)
expected_next_x = np.array([
[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]],
[[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]],
[[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]],
[[e, e, e, e], [e, e, e, e], [e, e, e, e], [e, e, e, e]]
], dtype=np.float)
assert np.allclose(td.get_next_x(), expected_next_x)
def test_save_restore(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4]))
temp_dir = tempfile.mkdtemp()
temp_filename = os.path.join(temp_dir, 'data.csv')
td.export_csv(temp_filename)
td2 = training_data.training_data()
td2.import_csv(temp_filename)
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1],
[2],
[3]
], dtype=np.int)
expected_reward = np.array([
[4],
[2],
[16],
[2]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td2.get_x(), expected_x)
assert np.array_equal(td2.get_y_digit(), expected_y_digit)
assert np.allclose(td2.get_reward(), expected_reward)
assert np.array_equal(td2.get_next_x(), expected_next_x)
os.remove(temp_filename)
os.rmdir(temp_dir)
def test_shuffle(self):
td = training_data.training_data()
n = 5
for i in range(n):
# Use "is odd" for done
td.add(np.full((1, 4, 4), i), i, i, np.full((1, 4, 4), i), (i % 2) == 1)
td.shuffle()
for i in range(n):
# Find where this has been shuffled too
index_of_val = np.where(td.get_y_digit() == i)[0].item(0)
# Check that all parts of this equal i
arrays = td.get_n(index_of_val)
for a in arrays:
if a.dtype is np.dtype(np.bool):
assert((a == ((i % 2) == 1)).all())
else:
assert((a == i).all())
def test_make_boards_unique(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4]))
td.make_boards_unique()
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1]
], dtype=np.int)
expected_reward = np.array([
[4],
[2]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
if __name__ == '__main__':
import pytest
pytest.main()
| 2.546875
| 3
|
tests/test_db_exam_psd.py
|
awenhaowenchao/bee
| 4
|
11758
|
<reponame>awenhaowenchao/bee
from datetime import datetime
from bee import Psd, CX, On, T
from bee import Model, IntegerField, StringField, DateTimeField, Equal, W, C
db_exam = Psd.open("exam")
# 1) sing table count search, SELECT COUNT(*) AS COUNT FROM t_teacher
with db_exam.connection() as conn:
teacher_count = db_exam.Select(*CX("COUNT(*)", "COUNT")).From("t_teacher").int()
print("total techer count is %s" % teacher_count)
# 2) sing table search, SELECT * FROM t_teacher
with db_exam.connection() as conn:
teachers = db_exam.Select(*CX("*")).From("t_teacher").list()
print(teachers)
# 3) sing table search, SELECT * FROM t_teacher convert values to model of Teacher
class Teacher(Model):
__table__ = 't_teacher'
id = IntegerField(primary_key=True)
name = StringField()
with db_exam.connection() as conn:
teachers = db_exam.Select(*CX("*")).From("t_teacher").list(Teacher)
print(teachers)
# 4) sing table search, SELECT * FROM t_teacher WHERE id=? convert values to model of Teacher
with db_exam.connection() as conn:
teachers = db_exam.Select(*CX("*")).From("t_teacher").Where(W().equal("id", 1004)).list(Teacher)
print(teachers)
# 5) tow table Join search, SELECT DISTINCT id,cid,score FROM t_student JOIN t_sc ON id=sid WHERE id=?
with db_exam.connection() as conn:
result = db_exam.Query(C("id", "cid", "score"), True)\
.From("t_student")\
.Join("t_sc", On("id", "sid"))\
.Where(Equal("id", 1001))\
.list()
print(result)
#or use alias mode like 'SELECT DISTINCT s.id,sc.cid,sc.score FROM t_student AS s JOIN t_sc AS sc ON s.id=sc.sid WHERE s.id=?'
with db_exam.connection() as conn:
result = db_exam.Query(C("s.id", "sc.cid", "sc.score"), True)\
.From(T("t_student", "s"))\
.Join(T("t_sc", "sc"), On("s.id", "sc.sid"))\
.Where(Equal("s.id", 1001))\
.list()
print(result)
# 6) with transaction
with db_exam.transaction():
# insert sql
# update sql
# raise exception
# update Sql
pass
# 7) sing table search, SELECT * FROM t_student limit 0, 5
with db_exam.connection() as conn:
students = db_exam.Select(*CX("*")).From("t_student").limit(1, 5).list()
print(students)
| 2.78125
| 3
|
slybot/slybot/plugins/scrapely_annotations/builder.py
|
coolkunal64/ht
| 0
|
11759
|
<gh_stars>0
import json
from scrapy import Selector
from scrapy.utils.spider import arg_to_iter
from scrapely.htmlpage import parse_html, HtmlTag, HtmlDataFragment
from collections import defaultdict
from itertools import tee, count, groupby
from operator import itemgetter
from slybot.utils import (serialize_tag, add_tagids, remove_tagids, TAGID,
OPEN_TAG, CLOSE_TAG, UNPAIRED_TAG, GENERATEDTAGID)
from .migration import _get_parent, short_guid
class Annotations(object):
def save_extraction_data(self, data, template, **options):
"""
data = {
extracts: [
{
annotations: {"content": "Title"},
id: "id-string",
required: [],
tagid: 12,
# All keys below are optional
variant: 0,
text-content: "name-of-text-content-field",
ignore: True,
ignore_beneath: True,
insert_after: True,
slice: [2, 16],
item_container: True,
container_id: "parent-id-string",
schema_id: "schema-id-string",
repeated: true,
siblings: 2,
field: "field-id-to-be-added-to-in-parent-container"
}
]
}
"""
annotation_data = _clean_annotation_data(data.get('extracts', []))
data['extracts'] = annotation_data
body = template.get('body') or 'original_body'
if body not in template:
if 'original_body' in template:
body = 'original_body'
else:
bodies = [k for k, v in template.items()
if v and k.endswith('_body')]
if bodies:
body = bodies[0]
html = template[body]
template['annotated_body'] = apply_annotations(
annotation_data, html, bool(options.get('legacy')))
return data
def _clean_annotation_data(data):
result = []
sticky_count, stickies = count(1), set()
for ann in data:
if ann.get('item_container'):
ann['annotations'] = {'#portia-content': '#dummy'}
ann['text-content'] = '#portia-content'
elif 'data' in ann:
modified_annotations = {}
grp = itemgetter('attribute')
for _id, value in ann['data'].items():
value['id'] = '%s|%s' % (ann['id'], _id)
sorted_annotations = sorted(ann['data'].values(), key=grp)
for attribute, annotations in groupby(sorted_annotations, grp):
modified_annotations[attribute] = list(annotations)
ann['annotations'] = modified_annotations
elif 'annotations' in ann:
filtered_annotations = {}
for k, v in ann['annotations'].items():
if not v:
continue
if v == '#sticky':
next_sticky = '_sticky%s' % next(sticky_count)
stickies.add(next_sticky)
v = next_sticky
filtered_annotations[k] = v
ann['annotations'] = filtered_annotations
ann['required'] = list((set(ann.get('required', [])) | stickies) &
set(filtered_annotations.values()))
elif "ignore" in ann or "ignore_beneath" in ann:
pass
else:
continue
result.append(ann)
return result
def _get_data_id(annotation):
"""Get id (a str) of an annotation."""
if isinstance(annotation, HtmlTag):
return annotation.attributes[TAGID]
def _gen_annotation_info(annotations, legacy=False):
data = {}
annotation_data = []
for annotation in arg_to_iter(annotations):
if 'annotations' in annotation:
annotation_data.append({
'id': annotation.get('id', short_guid()),
'annotations': annotation.get('annotations', {}),
'required': annotation.get('required', []),
'required_fields': annotation.get('required', []),
'variant': int(annotation.get('variant', 0)),
'generated': annotation.get('generated', False),
'text-content': annotation.get('text-content', 'content'),
'item_container': annotation.get('item_container', False),
'container_id': annotation.get('container_id'),
'schema_id': annotation.get('schema_id'),
'repeated': annotation.get('repeated'),
'siblings': annotation.get('siblings'),
'field': annotation.get('field'),
'selector': annotation.get('selector'),
'selection_mode': annotation.get('selection_mode'),
'min_jump': annotation.get('min_jump', -1),
'max_separator': annotation.get('max_separator', -1),
'xpath': annotation.get('xpath')
})
if 'ignore' in annotation or 'ignore_beneath' in annotation:
if annotation.get('ignore_beneath'):
data['data-scrapy-ignore-beneath'] = 'true'
elif annotation.get('ignore'):
data['data-scrapy-ignore'] = 'true'
if annotation_data:
if legacy:
annotation_data = annotation_data[0]
serialized = json.dumps(annotation_data).replace('"', '"')
data['data-scrapy-annotate'] = serialized
return data
def _get_generated_annotation(element, annotations, nodes, html_body, inserts,
legacy=False):
eid = insert_after_tag = _get_data_id(element)
text_strings = _get_text_nodes(nodes, html_body)
text_content = ''.join((s.lstrip() for s in text_strings))
pre_selected = []
for annotation in annotations:
start, end = _get_generated_slice(annotation)
pre_selected.append((text_content[0:start], text_content[start:end],
annotation))
tag_stack = [insert_after_tag]
next_text_node = ''
for i, node in enumerate(nodes):
if isinstance(node, HtmlTag):
if node.tag_type == OPEN_TAG:
tagid = node.attributes.get(TAGID, '').strip()
if tagid:
tag_stack.append(tagid)
elif node.tag_type == CLOSE_TAG and tag_stack:
insert_after_tag = tag_stack.pop()
elif (isinstance(node, HtmlDataFragment) and len(tag_stack) == 1):
text = html_body[node.start:node.end]
# This allows for a clean way to insert fragments up until the
# next tag in apply_annotations if we have already inserted a new
# generated tag
if not node.is_text_content and inserts.get(insert_after_tag):
inserts[insert_after_tag].append(text)
continue
removed = 0
inserted = False
for j, (pre, selected, annotation) in enumerate(pre_selected[:]):
if selected and selected in text:
previous, post = text.split(selected, 1)
if previous.strip() in pre:
pre_selected.pop(j - removed)
removed += 1
generated = _generate_elem(
annotation, selected, legacy)
# Next immediate text node will be returned and added
# to the new document. Other text nodes within this
# node will be added after other child nodes have been
# closed.
if (insert_after_tag == eid and
not annotation.get('insert_after')):
next_text_node += previous + generated
inserted = True
else:
inserts[insert_after_tag].extend([previous,
generated])
text = post
if inserted:
next_text_node += text
else:
inserts[insert_after_tag].append(text)
return next_text_node
def _get_text_nodes(nodes, html_body):
text = []
open_tags = 0
for node in nodes:
if isinstance(node, HtmlTag):
if node.tag_type == OPEN_TAG:
open_tags += 1
elif node.tag_type == CLOSE_TAG:
open_tags -= 1
elif (isinstance(node, HtmlDataFragment) and
node.is_text_content and open_tags == 0):
text.append(html_body[node.start:node.end])
return text
def _get_generated_slice(annotation):
annotation_slice = annotation.get('slice', [0])[:2]
if not annotation_slice:
annotation_slice = [0, 0]
elif len(annotation_slice) < 2:
annotation_slice.append(annotation_slice[0])
return annotation_slice
def _generate_elem(annotation, text, legacy=False):
sections = ['<ins']
annotation_info = _gen_annotation_info(annotation, legacy)
annotation_info[GENERATEDTAGID] = annotation.get('id')
attributes = []
for key, value in annotation_info.items():
attributes.append('%s="%s"' % (key, value))
sections.append(' '.join(attributes))
if len(sections) > 1:
sections[0] += ' '
sections.extend(['>', text, '</ins>'])
return ''.join(sections)
def _get_inner_nodes(target, open_tags=1, insert_after=False,
stop_on_next=False):
nodes = []
while open_tags > -0:
elem = next(target)
if isinstance(elem, HtmlTag):
if elem.tag_type == OPEN_TAG:
open_tags += 1
if stop_on_next and elem.attributes.get(TAGID) is not None:
return nodes
elif (stop_on_next and
elem.tag_type == UNPAIRED_TAG and
elem.attributes.get(TAGID) is not None):
return nodes
elif elem.tag_type == CLOSE_TAG:
open_tags -= 1
nodes.append(elem)
if insert_after:
return _get_inner_nodes(target, stop_on_next=True)
return nodes
def _add_element(element, output, html):
if '__added' not in element.attributes:
output.append(html[element.start:element.end])
element.attributes['__added'] = True
return element
def _annotation_key(a):
return a.get('generated', False) + sum(a.get('slice', []))
def _filter_annotations(annotations):
selector, tagid = [], []
for ann in annotations:
if ann:
if ann.get('selector'):
selector.append(ann)
elif ann.get('tagid') and (ann.get('annotations') or
ann.get('ignore')):
tagid.append(ann)
return selector, tagid
def _merge_annotations_by_selector(annotations):
def grouper(x):
return x.get('selector')
annotations.sort(key=grouper)
return [list(annos) for _, annos in groupby(annotations, key=grouper)]
def apply_selector_annotations(annotations, target_page):
page = Selector(text=target_page)
converted_annotations = []
tagid_selector_map = {}
added_repeated = {}
containers = {}
for annotation in annotations:
if annotation.get('item_container'):
containers[annotation['id']] = annotation
selector = annotation.get('selector')
tagid, elems = tagid_for_annotation(annotation, page)
if tagid is not None:
annotation['tagid'] = tagid
if selector:
tagid_selector_map[tagid] = selector
converted_annotations.append(annotation)
# Create container for repeated field annotation
if (annotation.get('repeated') and
not annotation.get('item_container') and
elems is not None and len(elems) and
len(annotation.get('annotations')) == 1):
repeated_parent = add_repeated_field(annotation, elems, page)
if repeated_parent:
converted_annotations.append(repeated_parent)
container_id = repeated_parent['container_id']
added_repeated[container_id] = repeated_parent
if added_repeated:
for container_id, child in added_repeated.items():
container = containers[container_id]
if container['tagid'] != child['tagid']:
continue
_, elems = tagid_for_annotation(container, page)
parent = elems[0].getparent()
container['tagid'] = int(parent.attrib.get('data-tagid', 1e9))
return _merge_annotations_by_selector(converted_annotations)
def tagid_for_annotation(annotation, page):
selector = annotation.get('selector')
if not selector:
return None, None
elems = []
while selector and not elems:
elems = [elem._root for elem in page.css(selector)]
selector = ' > '.join(selector.split(' > ')[1:])
if not elems:
return None, None
tagids = [int(e.attrib.get('data-tagid', 1e9)) for e in elems]
return min(tagids), elems
def add_repeated_field(annotation, elems, page):
parent = _get_parent(elems, page)
field = annotation['annotations'].values()[0][0]['field']
container_id = '%s#parent' % annotation['id']
if len(parent):
tagid = int(parent.attrib.get('data-tagid', 1e9))
parent_annotation = {
'item_container': True,
'id': container_id,
'annotations': {'#portia-content': '#dummy'},
'text-content': '#portia-content',
'container_id': annotation['container_id'],
'field': field,
'tagid': tagid
}
annotation['item_container'] = True
annotation['field'] = field
annotation['container_id'] = container_id
return parent_annotation
def apply_annotations(annotations, target_page, legacy=False):
selector_annotations, tagid_annotations = _filter_annotations(annotations)
inserts = defaultdict(list)
numbered_html = add_tagids(target_page)
if selector_annotations:
converted_annotations = apply_selector_annotations(
selector_annotations, numbered_html)
tagid_annotations += converted_annotations
target = iter(parse_html(numbered_html))
output, tag_stack = [], []
element = next(target)
last_id = 0
# XXX: A dummy element is added to the end so if the last annotation is
# generated it will be added to the output
filtered = defaultdict(list)
for grouped in tagid_annotations:
for ann in arg_to_iter(grouped):
filtered[ann['tagid']].append(ann)
dummy = [(1e9, [{}])]
sorted_annotations = sorted([(int(k), v) for k, v in filtered.items()] +
dummy)
try:
for aid, annotation_data in sorted_annotations:
# Move target until replacement/insertion point
while True:
while not isinstance(element, HtmlTag) or element.tag == 'ins':
output.append(numbered_html[element.start:element.end])
element = next(target)
if element.tag_type in {OPEN_TAG, UNPAIRED_TAG}:
last_id = element.attributes.get(TAGID)
tag_stack.append(last_id)
if element.tag_type in {CLOSE_TAG, UNPAIRED_TAG} and tag_stack:
if ('__added' not in element.attributes and
last_id is not None and aid is not None and
int(last_id) < int(aid)):
output.append(numbered_html[element.start:element.end])
element.attributes['__added'] = True
last_inserted = tag_stack.pop()
to_insert = inserts.pop(last_inserted, None)
if to_insert:
output.extend(to_insert)
# Skip all nodes up to the next HtmlTag as these
# have already been added
while True:
element = next(target)
try:
last_id = element.attributes.get(TAGID,
last_id)
except AttributeError:
pass
if isinstance(element, HtmlTag):
break
continue
if (last_id is not None and aid is not None and
int(last_id) < int(aid)):
if '__added' not in element.attributes:
output.append(numbered_html[element.start:element.end])
element.attributes['__added'] = True
element = next(target)
else:
break
generated = []
next_generated = []
regular_annotations = []
# Place generated annotations at the end and sort by slice
for annotation in sorted(annotation_data, key=_annotation_key):
if annotation.get('generated'):
if annotation.get('insert_after'):
next_generated.append(annotation)
else:
generated.append(annotation)
else:
regular_annotations.append(annotation)
# Add annotations data as required
if regular_annotations:
annotation_info = _gen_annotation_info(regular_annotations,
legacy)
for key, val in annotation_info.items():
element.attributes[key] = val
next_text_section = ''
if generated:
inner_data, target = tee(target)
nodes = _get_inner_nodes(inner_data)
next_text_section = _get_generated_annotation(
element, generated, nodes, numbered_html, inserts,
legacy)
if next_generated:
inner_data, target = tee(target)
open_tags = 0 if element.tag_type == UNPAIRED_TAG else 1
nodes = _get_inner_nodes(inner_data, open_tags=open_tags,
insert_after=True)
next_text_section = _get_generated_annotation(
element, next_generated, nodes, numbered_html, inserts,
legacy)
if '__added' not in element.attributes:
output.append(serialize_tag(element))
element.attributes['__added'] = True
# If an <ins> tag has been inserted we need to move forward
if next_text_section:
while True:
elem = next(target)
if (isinstance(elem, HtmlDataFragment) and
elem.is_text_content):
break
output.append(numbered_html[elem.start:elem.end])
output.append(next_text_section)
# Reached the end of the document
except StopIteration:
output.append(numbered_html[element.start:element.end])
else:
for element in target:
output.append(numbered_html[element.start:element.end])
return remove_tagids(''.join(output))
| 2.21875
| 2
|
scripts/slave/recipes/mojo.py
|
bopopescu/chromium-build
| 0
|
11760
|
<filename>scripts/slave/recipes/mojo.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'adb',
'depot_tools/bot_update',
'depot_tools/gclient',
'goma',
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'recipe_engine/url',
'depot_tools/tryserver',
]
def _CheckoutSteps(api, buildername):
# Checkout mojo and its dependencies (specified in DEPS) using gclient
api.gclient.set_config('mojo')
if 'Android' in buildername:
api.gclient.apply_config('android')
api.bot_update.ensure_checkout()
api.gclient.runhooks()
def _BuildSteps(api, buildername, is_debug, is_official):
mojob_path = api.path['checkout'].join('mojo', 'tools', 'mojob.py')
args = []
gn_args = []
if 'Android' in buildername:
args += ['--android']
if 'ASan' in buildername:
args += ['--asan']
if api.tryserver.is_tryserver:
args += ['--dcheck_always_on']
env = {}
goma_dir = ''
if 'Win' not in buildername:
# Disable Goma on Windows as it makes the build much slower (> 1 hour vs
# 15 minutes). Try renabling once we have trybots and the cache would be
# warm.
goma_dir = api.goma.ensure_goma()
env['GOMA_SERVICE_ACCOUNT_JSON_FILE'] = api.goma.service_account_json_path
if is_debug:
build_type = "--debug"
elif is_official:
build_type = "--official"
else:
build_type = "--release"
if goma_dir:
env['GOMA_DIR'] = goma_dir
with api.context(env=env):
with api.context(cwd=api.path['checkout']):
api.python('mojob gn',
mojob_path,
args=['gn', build_type] + args + gn_args)
api.python('mojob build',
mojob_path,
args=['build', build_type] + args)
def _DeviceCheckStep(api):
known_devices_path = api.path.join(
api.path.expanduser('~'), '.android', 'known_devices.json')
# Device recovery.
args = [
'--known-devices-file', known_devices_path,
'--adb-path', api.adb.adb_path(),
'-v'
]
api.step(
'device_recovery',
[api.path['checkout'].join('third_party', 'catapult', 'devil',
'devil', 'android', 'tools',
'device_recovery.py')] + args,
infra_step=True)
# Device provisioning.
api.python(
'provision_device',
api.path['checkout'].join('third_party', 'catapult', 'devil',
'devil', 'android', 'tools',
'provision_devices.py'),
infra_step=True)
# Device Status.
try:
buildbot_file = '/home/chrome-bot/.adb_device_info'
args = [
'--json-output', api.json.output(),
'--known-devices-file', known_devices_path,
'--buildbot-path', buildbot_file,
'-v', '--overwrite-known-devices-files',
]
result = api.python(
'device_status',
api.path['checkout'].join('third_party', 'catapult', 'devil', 'devil',
'android', 'tools', 'device_status.py'),
args=args,
infra_step=True)
return result
except api.step.InfraFailure as f:
params = {
'summary': ('Device Offline on %s %s' %
(api.properties['mastername'], api.properties['bot_id'])),
'comment': ('Buildbot: %s\n(Please do not change any labels)' %
api.properties['buildername']),
'labels': 'Restrict-View-Google,OS-Android,Infra-Client,Infra-Labs',
}
link = ('https://code.google.com/p/chromium/issues/entry?%s' %
api.url.urlencode(params))
f.result.presentation.links.update({
'report a bug': link
})
raise
def _GetTestConfig(api):
buildername = api.properties.get('buildername')
test_config = {}
if 'Android' in buildername:
test_config['target_os'] = 'android'
elif 'Linux' in buildername:
test_config['target_os'] = 'linux'
elif 'Win' in buildername:
test_config['target_os'] = 'windows'
else:
raise NotImplementedError('Unknown platform') # pragma: no cover
test_config['is_debug'] = 'dbg' in buildername
if 'Official' in buildername:
# This is not reached, as we only have Android official builds.
raise NotImplementedError(
'Testing not supported for official builds') # pragma: no cover
if 'Perf' in buildername:
test_config['test_types'] = ['perf']
else:
test_config['test_types'] = ['default']
if 'ASan' in buildername:
test_config['sanitizer'] = 'asan'
test_config['master_name'] = api.properties.get('mastername')
test_config['builder_name'] = api.properties.get('buildername')
test_config['build_number'] = api.properties.get('buildnumber')
test_config['test_results_server'] = api.properties.get(
'test_results_server', 'test-results.appspot.com')
test_config['dcheck_always_on'] = api.tryserver.is_tryserver
return test_config
def _TestSteps(api):
get_test_list_path = api.path['checkout'].join('mojo', 'tools',
'get_test_list.py')
test_config = _GetTestConfig(api)
test_out = [{'name': u'Hello', 'command': ['world']}]
result = api.python('get_test_list', get_test_list_path,
args=[api.json.input(test_config), api.json.output()],
step_test_data=lambda: api.json.test_api.output(test_out))
test_list = result.json.output
with api.step.defer_results():
for entry in test_list:
name = str(entry['name']) # api.step() wants a non-Unicode string.
command = entry['command']
with api.context(cwd=api.path['checkout']):
api.step(name, command)
def _UploadShellAndApps(api, buildername):
upload_path = api.path['checkout'].join('mojo', 'tools', 'upload_binaries.py')
is_android = 'Android' in buildername
args = []
if is_android:
args.append('--android')
if 'Official' in buildername:
args.append('--official')
api.python('upload shell and app binaries', upload_path, args)
def RunSteps(api):
buildername = api.properties.get('buildername')
_CheckoutSteps(api, buildername)
is_debug = 'dbg' in buildername
is_official = 'Official' in buildername
_BuildSteps(api, buildername, is_debug, is_official)
is_linux = 'Linux' in buildername
is_win = 'Win' in buildername
is_android = 'Android' in buildername
is_tester = 'Tests' in buildername
is_try = api.tryserver.is_tryserver
is_asan = 'ASan' in buildername
is_perf = 'Perf' in buildername
if is_android and is_tester:
_DeviceCheckStep(api)
upload_binaries = ((is_linux or is_android)
and not is_debug and not is_try and not is_perf and not is_asan)
if not is_tester and not is_linux and not is_win:
# TODO(blundell): Eliminate this special case
# once there's an Android release tester bot.
if upload_binaries and is_android:
_UploadShellAndApps(api, buildername)
return
_TestSteps(api)
# TODO(blundell): Remove the "and not is_android" once there's an
# Android release tester bot and I've removed the logic uploading the
# shell on Android above.
if upload_binaries and not is_android:
_UploadShellAndApps(api, buildername)
def GenTests(api):
tests = [
['mojo_linux', 'Mojo Linux'],
['mojo_linux_dbg', 'Mojo Linux (dbg)'],
['mojo_linux_asan', 'Mojo Linux ASan'],
['mojo_linux_asan_dbg', 'Mojo Linux ASan (dbg)'],
['mojo_android_builder', 'Mojo Android Builder'],
['mojo_android_official', 'Mojo Android Official Builder'],
['mojo_android_dbg', 'Mojo Android (dbg)'],
['mojo_android_builder_tests_dbg', 'Mojo Android Builder Tests (dbg)'],
['mojo_win_dbg', 'Mojo Win (dbg)'],
['mojo_linux_perf', 'Mojo Linux Perf']
]
for test_name, buildername in tests:
test = api.test(test_name) + api.properties.generic(buildername=buildername)
if 'Android' in buildername and 'Tests' in buildername:
test += api.step_data("device_status", api.json.output([
{
"battery": {
"status": "5",
"scale": "100",
"temperature": "249",
"level": "100",
"AC powered": "false",
"health": "2",
"voltage": "4286",
"Wireless powered": "false",
"USB powered": "true",
"technology": "Li-ion",
"present": "true"
},
"wifi_ip": "",
"imei_slice": "Unknown",
"ro.build.id": "LRX21O",
"build_detail":
"google/razor/flo:5.0/LRX21O/1570415:userdebug/dev-keys",
"serial": "07a00ca4",
"ro.build.product": "flo",
"adb_status": "device",
"blacklisted": False,
"usb_status": True,
},
{
"adb_status": "offline",
"blacklisted": True,
"serial": "03e0363a003c6ad4",
"usb_status": False,
},
{
"adb_status": "unauthorized",
"blacklisted": True,
"serial": "03e0363a003c6ad5",
"usb_status": True,
},
{
"adb_status": "device",
"blacklisted": True,
"serial": "03e0363a003c6ad6",
"usb_status": True,
},
{}
]))
yield test
yield(api.test('mojo_linux_try') +
api.properties.tryserver(buildername="Mojo Linux Try"))
yield(api.test('mojo_android_builder_tests_dbg_fail_device_check') +
api.properties.tryserver(buildername="Mojo Android Builder Tests (dbg)") +
api.step_data("device_status", retcode=1))
| 1.46875
| 1
|
lib/navigation/AreaFighting.py
|
sadnecc/pb
| 0
|
11761
|
<gh_stars>0
# -*- coding:utf8 -*-
import random
import time
from lib.navigation.PathFinding import Pathfinding
from lib.control.Control import Control
from lib.unit.Player import Player
from lib.struct.CoordiPoint import CoordiPoint
# 区域打怪
class AreaFighting(Pathfinding):
# area_pos: 区域4角坐标。顺序为:左上,右上,左下,右下
def __init__(self, control: Control, player: Player, area_pos, move_type=0):
Pathfinding.__init__(self, control=control, player=player)
self.area_pos = area_pos
self.hander_area = open("tmp/logs/" + self.getFormatTime(False) + "_areafighting.log", 'a+')
self.start_pos = self.getNowPos()
self.move_type = move_type
# 回到区域内
def goto_area(self):
nowPos = self.getNowPos()
if not AreaFighting.pos_in_area(nowPos,self.area_pos) and self.player.getStatus()['combat'] == 0:
print(nowPos.toString())
print("not in area #################################################################################")
print("not in area #################################################################################", file=self.hander_area)
from lib.navigation.EnemyFinder import EnemyFinder
EnemyFinder(self.player,self.control).clear_target() #在区域外选中怪先取消掉,免得走回区域后又跑出来打这个怪
# 直接走向区域中心,没有多余路点
self.walk(
self.__get_center_of_area(),
move_type=self.move_type,
sleep=0.3,
precision=0.3,
last=3,
combat_exit=True
)
#self.player.not_combat_recover()
self.player.combat_recover()
return True
# 获取区域中心点坐标
# 计算方法:使用中点公式计算一i条对角线的中点即可
def __get_center_of_area(self):
left_top = self.area_pos["leftTop"]
right_bottom = self.area_pos["rightBottom"]
center = [(left_top[0] + right_bottom[0]) / 2, (left_top[1] + right_bottom[1]) / 2]
print("center:")
print(center)
return CoordiPoint(center[0], center[1])
# 判断给定坐标是否在区域内
# 向量叉积(顺时针方向)
# 四边形内的点都在顺时针(逆时针)向量的同一边,即夹角小于90o,向量积同向
# a = (B.x - A.x)*(y - A.y) - (B.y - A.y)*(x - A.x);
# b = (C.x - B.x)*(y - B.y) - (C.y - B.y)*(x - B.x);
# c = (D.x - C.x)*(y - C.y) - (D.y - C.y)*(x - C.x);
# d = (A.x - D.x)*(y - D.y) - (A.y - D.y)*(x - D.x);
@staticmethod
def pos_in_area(pos: CoordiPoint,area):
A = CoordiPoint(area["leftTop"][0], area["leftTop"][1])
B = CoordiPoint(area["rightTop"][0], area["rightTop"][1])
C = CoordiPoint(area["rightBottom"][0], area["rightBottom"][1])
D = CoordiPoint(area["leftBottom"][0], area["leftBottom"][1])
a = (B.x - A.x) * (pos.y - A.y) - (B.y - A.y) * (pos.x - A.x)
b = (C.x - B.x) * (pos.y - B.y) - (C.y - B.y) * (pos.x - B.x)
c = (D.x - C.x) * (pos.y - C.y) - (D.y - C.y) * (pos.x - C.x)
d = (A.x - D.x) * (pos.y - D.y) - (A.y - D.y) * (pos.x - D.x)
if (a > 0 and b > 0 and c > 0 and d > 0) or (a < 0 and b < 0 and c < 0 and d < 0):
return True
return False
| 2.59375
| 3
|
compress.py
|
willemwouters/PhotoboothPi
| 0
|
11762
|
<filename>compress.py<gh_stars>0
import os
import time
import sys
if(len(sys.argv) is 1):
path="/home/pi/storage/"
else:
path=sys.argv[1]
try:
arr=[]
for filename in os.listdir(path):
if("2018-09" in filename):
arr.append(filename)
for f in arr:
filen = os.path.splitext(f)[0]
if(("%s.h264" % filen) in arr) and (("%s.mp3" % filen) in arr and ("%s.mp4" % filen) not in arr):
if(("%s.h264" % filen) == f):
time.sleep(1)
os.system("ffmpeg -i %s -i %s -c:v copy -c:a aac -strict experimental %s" % (path + f, path + filen + ".mp3", path + filen + ".mp4"))
os.system("rm %s %s" % (path + filen + ".mp3", path + f))
except:
print "d"
| 2.59375
| 3
|
html/en/reference/graphs/sage/graphs/graph_plot-2.py
|
sagemath/documentation
| 10
|
11763
|
<reponame>sagemath/documentation
petersen_spring = Graph(':I`ES@obGkqegW~')
sphinx_plot(petersen_spring)
| 1.234375
| 1
|
module00/ex05/kata00.py
|
MedAymenF/42AI-Python-bootcamp
| 0
|
11764
|
<filename>module00/ex05/kata00.py<gh_stars>0
t = (19, 42, 21)
print(f"The {len(t)} numbers are: {t[0]}, {t[1]}, {t[2]}")
| 2.3125
| 2
|
examples/cochrane-simplification/log_regression/bow_newsela_lm_tokens.py
|
AshOlogn/transformers
| 0
|
11765
|
import json
import os
from os.path import join
from random import shuffle
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler, normalize
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split
from sklearn.metrics import accuracy_score
from transformers import BertTokenizer, BertConfig, BartTokenizer
def make_vector(text, tokenizer):
token_ids = tokenizer.encode(text)[1:-1]
count_vector = np.zeros(tokenizer.vocab_size, dtype=np.int16)
for ID in token_ids:
count_vector[ID] += 1
return count_vector
def dataloader(data_dir, batch_size=5000):
names = [x[:-6] for x in os.listdir(data_dir) if x[-5:] == '3.txt']
index = 0
while index < len(names):
cur_names = names[index:index+batch_size]
tuples = []
for name in cur_names:
hard = open(join(data_dir, f'{name}.0.txt')).read()
simple = open(join(data_dir, f'{name}.3.txt')).read()
tuples.append((hard, simple))
yield tuples
index += batch_size
def construct_dataset(tuples, tokenizer):
X = np.empty((2*len(tuples), tokenizer.vocab_size), dtype=np.int16)
y = np.empty(2*len(tuples), dtype=np.int16)
index = 0
for s,t in tuples:
X[index] = make_vector(s, tokenizer)
X[index+1] = make_vector(t, tokenizer)
y[index] = 0
y[index+1] = 1
index += 2
return X, y
def get_vocab(tokenizer):
tokens = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(tokenizer.vocab_size)]
return tokens
def simple_term_counts(data_dir='data/newsela/articles'):
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-xsum')
model = LogisticRegression(max_iter=100)
for batch in dataloader(data_dir):
X, y = construct_dataset(batch, tokenizer)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#apply feature scaling
#X_train = normalize(X_train)
#X_test = normalize(X_test)
#model.fit(X_train, y_train)
#predictions = model.predict(X_test)
#print(accuracy_score(y_test, predictions))
X = normalize(X)
model.fit(X, y)
vocab = get_vocab(tokenizer)
weights = np.squeeze(model.coef_, axis=0).tolist()
sorted_weights = filter(lambda x: len(x[1].strip()) > 0, zip(range(tokenizer.vocab_size), vocab, weights))
sorted_weights = list(sorted(sorted_weights, key=lambda x: x[2]))
with open('data/logr_weights/bart_freq_newsela_ids.txt', 'w') as f:
for ID, word, weight in sorted_weights:
f.write(f'{ID} {weight}\n')
with open('data/logr_weights/bart_freq_newsela_tokens.txt', 'w') as f:
for ID, word, weight in sorted_weights:
f.write(f'{word} {weight}\n')
print(simple_term_counts())
| 2.328125
| 2
|
fdrtd_server/exceptions.py
|
UNakade/server
| 0
|
11766
|
<filename>fdrtd_server/exceptions.py
import logging as _logging
def handle_exception(e):
if isinstance(e, ApiError):
_logging.exception(e.message)
return e.message, e.statuscode
_logging.exception(repr(e))
return None, 500
class ApiError(Exception):
def __init__(self, statuscode, message):
self.statuscode = statuscode
self.message = message
def __str__(self):
return self.message
class InternalServerError(ApiError):
def __init__(self, message):
super().__init__(500, f'internal server error: {message}')
class NotAvailable(ApiError):
def __init__(self, missing):
super().__init__(501, f'not implemented / not available: {missing}')
class MissingParameter(ApiError):
def __init__(self, missing):
super().__init__(400, f'missing parameter: {missing}')
class InvalidParameter(ApiError):
def __init__(self, parameter, invalid):
super().__init__(400, f'invalid parameter: {parameter} = {invalid}')
class InvalidIdentifier(ApiError):
def __init__(self, identifier, invalid):
super().__init__(404, f'invalid identifier: {identifier} = {invalid}')
class MicroserviceNotFound(ApiError):
def __init__(self, missing):
super().__init__(404, f'microservice not available: {missing}')
class FunctionNotFound(ApiError):
def __init__(self, missing):
super().__init__(404, f'function not available: {missing}')
class FunctionNotPublic(ApiError):
def __init__(self, missing):
super().__init__(403, f'function not public: {missing}')
| 2.421875
| 2
|
TCU/usageexample/automationexample.py
|
p--q/TCU
| 0
|
11767
|
<reponame>p--q/TCU
#!/opt/libreoffice5.4/program/python
# -*- coding: utf-8 -*-
import unohelper # オートメーションには必須(必須なのはuno)。
def macro():
ctx = XSCRIPTCONTEXT.getComponentContext() # コンポーネントコンテクストの取得。
smgr = ctx.getServiceManager() # サービスマネージャーの取得。
tcu = smgr.createInstanceWithContext("pq.Tcu", ctx) # サービス名か実装名でインスタンス化。
print("\n".join(tcu.treelines(ctx)))
g_exportedScripts = macro, #マクロセレクターに限定表示させる関数をタプルで指定。
if __name__ == "__main__": # オートメーションで実行するとき
def automation(): # オートメーションのためにglobalに出すのはこの関数のみにする。
import officehelper
from functools import wraps
import sys
from com.sun.star.beans import PropertyValue
from com.sun.star.script.provider import XScriptContext
def connectOffice(func): # funcの前後でOffice接続の処理
@wraps(func)
def wrapper(): # LibreOfficeをバックグラウンドで起動してコンポーネントテクストとサービスマネジャーを取得する。
try:
ctx = officehelper.bootstrap() # コンポーネントコンテクストの取得。
except:
print("Could not establish a connection with a running office.", file=sys.stderr)
sys.exit()
print("Connected to a running office ...")
smgr = ctx.getServiceManager() # サービスマネジャーの取得。
print("Using {} {}".format(*_getLOVersion(ctx, smgr))) # LibreOfficeのバージョンを出力。
return func(ctx, smgr) # 引数の関数の実行。
def _getLOVersion(ctx, smgr): # LibreOfficeの名前とバージョンを返す。
cp = smgr.createInstanceWithContext('com.sun.star.configuration.ConfigurationProvider', ctx)
node = PropertyValue(Name = 'nodepath', Value = 'org.openoffice.Setup/Product' ) # share/registry/main.xcd内のノードパス。
ca = cp.createInstanceWithArguments('com.sun.star.configuration.ConfigurationAccess', (node,))
return ca.getPropertyValues(('ooName', 'ooSetupVersion')) # LibreOfficeの名前とバージョンをタプルで返す。
return wrapper
@connectOffice # createXSCRIPTCONTEXTの引数にctxとsmgrを渡すデコレータ。
def createXSCRIPTCONTEXT(ctx, smgr): # XSCRIPTCONTEXTを生成。
class ScriptContext(unohelper.Base, XScriptContext):
def __init__(self, ctx):
self.ctx = ctx
def getComponentContext(self):
return self.ctx
def getDesktop(self):
return ctx.getByName('/singletons/com.sun.star.frame.theDesktop') # com.sun.star.frame.Desktopはdeprecatedになっている。
def getDocument(self):
return self.getDesktop().getCurrentComponent()
return ScriptContext(ctx)
XSCRIPTCONTEXT = createXSCRIPTCONTEXT() # XSCRIPTCONTEXTの取得。
doc = XSCRIPTCONTEXT.getDocument() # 現在開いているドキュメントを取得。
doctype = "scalc", "com.sun.star.sheet.SpreadsheetDocument" # Calcドキュメントを開くとき。
# doctype = "swriter", "com.sun.star.text.TextDocument" # Writerドキュメントを開くとき。
if (doc is None) or (not doc.supportsService(doctype[1])): # ドキュメントが取得できなかった時またはCalcドキュメントではない時
XSCRIPTCONTEXT.getDesktop().loadComponentFromURL("private:factory/{}".format(doctype[0]), "_blank", 0, ()) # ドキュメントを開く。ここでdocに代入してもドキュメントが開く前にmacro()が呼ばれてしまう。
flg = True
while flg:
doc = XSCRIPTCONTEXT.getDocument() # 現在開いているドキュメントを取得。
if doc is not None:
flg = (not doc.supportsService(doctype[1])) # ドキュメントタイプが確認できたらwhileを抜ける。
return XSCRIPTCONTEXT
XSCRIPTCONTEXT = automation() # XSCRIPTCONTEXTを取得。
macro() # マクロの実行。
| 1.976563
| 2
|
last_char.py
|
AkhilaSaiBejjarapu/Python
| 0
|
11768
|
word=input()
last_letter=(len(word)-1)
result=word[last_letter]
print(result)
| 3.828125
| 4
|
simple_retry/decorators.py
|
nicolasmota/retry_decorator
| 11
|
11769
|
<filename>simple_retry/decorators.py
import time
from functools import wraps
import asyncio
from simple_retry.simple_retry.helpers import (
format_retry_message,
has_retries_to_go,
log_message
)
def retry(Except, retries=5, delay=0, logger=None, level='info', multiple=1):
def deco_retry(function):
@wraps(function)
def f_retry(*args, **kwargs):
tries = 1
mdelay = delay
while has_retries_to_go(
tries_performed=tries,
retries_limit=retries
):
try:
return function(*args, **kwargs)
except Except as e:
log_message(
logger=logger,
level=level,
exception=e,
tries_performed=tries,
retries_limit=retries,
wait_delay_multiple=multiple
)
time.sleep(mdelay)
mdelay *= multiple
tries += 1
return function(*args, **kwargs)
return f_retry
return deco_retry
def coroutine_retry(
Except,
retries=5,
delay=0,
logger=None,
level='info',
multiple=1
):
def deco_retry(function):
@asyncio.coroutine
@wraps(function)
def f_retry(*args, **kwargs):
tries = 1
mdelay = delay
while has_retries_to_go(
tries_performed=tries,
retries_limit=retries
):
try:
return (yield from (function(*args, **kwargs)))
except Except as e:
log_message(
logger=logger,
level=level,
exception=e,
tries_performed=tries,
retries_limit=retries,
wait_delay_multiple=multiple
)
yield from (asyncio.sleep(mdelay))
mdelay *= multiple
tries += 1
return (yield from function(*args, **kwargs))
return f_retry
return deco_retry
def async_retry(
Except,
retries=5,
delay=0,
logger=None,
level='info',
multiple=1
):
def deco_retry(function):
@wraps(function)
async def f_retry(*args, **kwargs):
tries = 1
mdelay = delay
while has_retries_to_go(
tries_performed=tries,
retries_limit=retries
):
try:
return await (function(*args, **kwargs))
except Except as e:
log_message(
logger=logger,
level=level,
exception=e,
tries_performed=tries,
retries_limit=retries,
wait_delay_multiple=multiple
)
await (asyncio.sleep(mdelay))
mdelay *= multiple
tries += 1
return await (function(*args, **kwargs))
return f_retry
return deco_retry
| 2.578125
| 3
|
environment.py
|
bopopescu/cbrc-devteam-blog
| 0
|
11770
|
<filename>environment.py
# application environment
import settings
import sys
sys.path.append(settings.app_home_dir)
sys.path.append(settings.app_settings["app_lib_dir"])
| 1.71875
| 2
|
cogeo_mosaic/backends/base.py
|
drnextgis/cogeo-mosaic
| 0
|
11771
|
<reponame>drnextgis/cogeo-mosaic<filename>cogeo_mosaic/backends/base.py<gh_stars>0
"""cogeo_mosaic.backend.base: base Backend class."""
import abc
import itertools
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import attr
import mercantile
from cachetools import TTLCache, cached
from cachetools.keys import hashkey
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS
from rio_tiler.errors import PointOutsideBounds
from rio_tiler.io import BaseReader, COGReader
from rio_tiler.models import ImageData
from rio_tiler.mosaic import mosaic_reader
from rio_tiler.tasks import multi_values
from cogeo_mosaic.backends.utils import find_quadkeys, get_hash
from cogeo_mosaic.cache import cache_config
from cogeo_mosaic.errors import NoAssetFoundError
from cogeo_mosaic.models import Info, Metadata
from cogeo_mosaic.mosaic import MosaicJSON
from cogeo_mosaic.utils import bbox_union
def _convert_to_mosaicjson(value: Union[Dict, MosaicJSON]):
if value is not None:
return MosaicJSON(**dict(value))
@attr.s
class BaseBackend(BaseReader):
"""Base Class for cogeo-mosaic backend storage.
Attributes:
path (str): mosaic path.
mosaic_def (MosaicJSON, optional): mosaicJSON document.
reader (rio_tiler.io.BaseReader): Dataset reader. Defaults to `rio_tiler.io.COGReader`.
reader_options (dict): Options to forward to the reader config.
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. **READ ONLY attribute**. Defaults to `WebMercatorQuad`.
bbox (tuple): mosaic bounds (left, bottom, right, top). **READ ONLY attribute**. Defaults to `(-180, -90, 180, 90)`.
minzoom (int): mosaic Min zoom level. **READ ONLY attribute**. Defaults to `0`.
maxzoom (int): mosaic Max zoom level. **READ ONLY attribute**. Defaults to `30`
"""
path: str = attr.ib()
mosaic_def: MosaicJSON = attr.ib(default=None, converter=_convert_to_mosaicjson)
reader: Type[BaseReader] = attr.ib(default=COGReader)
reader_options: Dict = attr.ib(factory=dict)
# TMS is outside the init because mosaicJSON and cogeo-mosaic only
# works with WebMercator (mercantile) for now.
tms: TileMatrixSet = attr.ib(init=False, default=WEB_MERCATOR_TMS)
# default values for bounds and zoom
bounds: Tuple[float, float, float, float] = attr.ib(
init=False, default=(-180, -90, 180, 90)
)
minzoom: int = attr.ib(init=False, default=0)
maxzoom: int = attr.ib(init=False, default=30)
_backend_name: str
_file_byte_size: Optional[int] = 0
def __attrs_post_init__(self):
"""Post Init: if not passed in init, try to read from self.path."""
self.mosaic_def = self.mosaic_def or self._read()
self.minzoom = self.mosaic_def.minzoom
self.maxzoom = self.mosaic_def.maxzoom
self.bounds = self.mosaic_def.bounds
@abc.abstractmethod
def _read(self) -> MosaicJSON:
"""Fetch mosaic definition"""
@abc.abstractmethod
def write(self, overwrite: bool = True):
"""Upload new MosaicJSON to backend."""
def update(
self,
features: Sequence[Dict],
add_first: bool = True,
quiet: bool = False,
**kwargs,
):
"""Update existing MosaicJSON on backend."""
new_mosaic = MosaicJSON.from_features(
features,
self.mosaic_def.minzoom,
self.mosaic_def.maxzoom,
quadkey_zoom=self.quadkey_zoom,
quiet=quiet,
**kwargs,
)
for quadkey, new_assets in new_mosaic.tiles.items():
tile = mercantile.quadkey_to_tile(quadkey)
assets = self.assets_for_tile(*tile)
assets = [*new_assets, *assets] if add_first else [*assets, *new_assets]
# add custom sorting algorithm (e.g based on path name)
self.mosaic_def.tiles[quadkey] = assets
bounds = bbox_union(new_mosaic.bounds, self.mosaic_def.bounds)
self.mosaic_def._increase_version()
self.mosaic_def.bounds = bounds
self.mosaic_def.center = (
(bounds[0] + bounds[2]) / 2,
(bounds[1] + bounds[3]) / 2,
self.mosaic_def.minzoom,
)
self.bounds = bounds
self.write(overwrite=True)
def assets_for_tile(self, x: int, y: int, z: int) -> List[str]:
"""Retrieve assets for tile."""
return self.get_assets(x, y, z)
def assets_for_point(self, lng: float, lat: float) -> List[str]:
"""Retrieve assets for point."""
tile = mercantile.tile(lng, lat, self.quadkey_zoom)
return self.get_assets(tile.x, tile.y, tile.z)
@cached(
TTLCache(maxsize=cache_config.maxsize, ttl=cache_config.ttl),
key=lambda self, x, y, z: hashkey(self.path, x, y, z, self.mosaicid),
)
def get_assets(self, x: int, y: int, z: int) -> List[str]:
"""Find assets."""
mercator_tile = mercantile.Tile(x=x, y=y, z=z)
quadkeys = find_quadkeys(mercator_tile, self.quadkey_zoom)
return list(
itertools.chain.from_iterable(
[self.mosaic_def.tiles.get(qk, []) for qk in quadkeys]
)
)
def tile( # type: ignore
self, x: int, y: int, z: int, reverse: bool = False, **kwargs: Any,
) -> Tuple[ImageData, List[str]]:
"""Get Tile from multiple observation."""
mosaic_assets = self.assets_for_tile(x, y, z)
if not mosaic_assets:
raise NoAssetFoundError(f"No assets found for tile {z}-{x}-{y}")
if reverse:
mosaic_assets = list(reversed(mosaic_assets))
def _reader(asset: str, x: int, y: int, z: int, **kwargs: Any) -> ImageData:
with self.reader(asset, **self.reader_options) as src_dst:
return src_dst.tile(x, y, z, **kwargs)
return mosaic_reader(mosaic_assets, _reader, x, y, z, **kwargs)
def point(
self, lon: float, lat: float, reverse: bool = False, **kwargs: Any,
) -> List:
"""Get Point value from multiple observation."""
mosaic_assets = self.assets_for_point(lon, lat)
if not mosaic_assets:
raise NoAssetFoundError(f"No assets found for point ({lon},{lat})")
if reverse:
mosaic_assets = list(reversed(mosaic_assets))
def _reader(asset: str, lon: float, lat: float, **kwargs) -> Dict:
with self.reader(asset, **self.reader_options) as src_dst:
return src_dst.point(lon, lat, **kwargs)
if "allowed_exceptions" not in kwargs:
kwargs.update({"allowed_exceptions": (PointOutsideBounds,)})
return list(multi_values(mosaic_assets, _reader, lon, lat, **kwargs).items())
def info(self, quadkeys: bool = False) -> Info: # type: ignore
"""Mosaic info."""
return Info(
bounds=self.mosaic_def.bounds,
center=self.mosaic_def.center,
maxzoom=self.mosaic_def.maxzoom,
minzoom=self.mosaic_def.minzoom,
name=self.mosaic_def.name if self.mosaic_def.name else "mosaic",
quadkeys=[] if not quadkeys else self._quadkeys,
)
@property
def metadata(self) -> Metadata: # type: ignore
"""Retrieve Mosaic metadata
Returns
-------
MosaicJSON as dict without `tiles` key.
"""
return Metadata(**self.mosaic_def.dict())
@property
def center(self):
"""Return center from the mosaic definition."""
return self.mosaic_def.center
@property
def mosaicid(self) -> str:
"""Return sha224 id of the mosaicjson document."""
return get_hash(**self.mosaic_def.dict(exclude_none=True))
@property
def _quadkeys(self) -> List[str]:
"""Return the list of quadkey tiles."""
return list(self.mosaic_def.tiles)
@property
def quadkey_zoom(self) -> int:
"""Return Quadkey zoom property."""
return self.mosaic_def.quadkey_zoom or self.mosaic_def.minzoom
############################################################################
# Not Implemented methods
# BaseReader required those method to be implemented
def stats(self):
"""PlaceHolder for BaseReader.stats."""
raise NotImplementedError
def preview(self):
"""PlaceHolder for BaseReader.preview."""
raise NotImplementedError
def part(self):
"""PlaceHolder for BaseReader.part."""
raise NotImplementedError
def feature(self):
"""PlaceHolder for BaseReader.feature."""
raise NotImplementedError
| 2.140625
| 2
|
userge/core/methods/decorators/on_filters.py
|
wildyvpn-network/bot
| 0
|
11772
|
# pylint: disable=missing-module-docstring
#
# Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/uaudith/Userge/blob/master/LICENSE >
#
# All rights reserved.
__all__ = ['OnFilters']
from pyrogram.filters import Filter as RawFilter
from ... import types
from . import RawDecorator
class OnFilters(RawDecorator): # pylint: disable=missing-class-docstring
def on_filters(self, # pylint: disable=arguments-differ
filters: RawFilter,
group: int = 0,
allow_private: bool = True,
allow_bots: bool = True,
allow_groups: bool = True,
allow_channels: bool = True,
only_admins: bool = False,
allow_via_bot: bool = True,
check_client: bool = True,
check_downpath: bool = False,
check_change_info_perm: bool = False,
check_edit_perm: bool = False,
check_delete_perm: bool = False,
check_restrict_perm: bool = False,
check_promote_perm: bool = False,
check_invite_perm: bool = False,
check_pin_perm: bool = False) -> RawDecorator._PYRORETTYPE:
"""\nDecorator for handling filters
Parameters:
filters (:obj:`~pyrogram.filters`):
Pass one or more filters to allow only a subset of
messages to be passed in your function.
group (``int``, *optional*):
The group identifier, defaults to 0.
allow_private (``bool``, *optional*):
If ``False``, prohibit private chats, defaults to True.
allow_bots (``bool``, *optional*):
If ``False``, prohibit bot chats, defaults to True.
allow_groups (``bool``, *optional*):
If ``False``, prohibit group chats, defaults to True.
allow_channels (``bool``, *optional*):
If ``False``, prohibit channel chats, defaults to True.
only_admins (``bool``, *optional*):
If ``True``, client should be an admin, defaults to False.
allow_via_bot (``bool``, *optional*):
If ``True``, allow this via your bot, defaults to True.
check_client (``bool``, *optional*):
If ``True``, check client is bot or not before execute, defaults to True.
check_downpath (``bool``, *optional*):
If ``True``, check downpath and make if not exist, defaults to False.
check_change_info_perm (``bool``, *optional*):
If ``True``, check user has change_info permission before execute,
defaults to False.
check_edit_perm (``bool``, *optional*):
If ``True``, check user has edit permission before execute,
defaults to False.
check_delete_perm (``bool``, *optional*):
If ``True``, check user has delete permission before execute,
defaults to False.
check_restrict_perm (``bool``, *optional*):
If ``True``, check user has restrict permission before execute,
defaults to False.
check_promote_perm (``bool``, *optional*):
If ``True``, check user has promote permission before execute,
defaults to False.
check_invite_perm (``bool``, *optional*):
If ``True``, check user has invite permission before execute,
defaults to False.
check_pin_perm (``bool``, *optional*):
If ``True``, check user has pin permission before execute,
defaults to False.
"""
return self._build_decorator(
types.raw.Filter.parse(client=self,
filters=filters,
group=group,
allow_private=allow_private,
allow_bots=allow_bots,
allow_groups=allow_groups,
allow_channels=allow_channels,
only_admins=only_admins,
allow_via_bot=allow_via_bot,
check_client=check_client,
check_downpath=check_downpath,
check_change_info_perm=check_change_info_perm,
check_edit_perm=check_edit_perm,
check_delete_perm=check_delete_perm,
check_restrict_perm=check_restrict_perm,
check_promote_perm=check_promote_perm,
check_invite_perm=check_invite_perm,
check_pin_perm=check_pin_perm))
| 2.1875
| 2
|
pygromos/tests/test_submission/test_hpc_queuing_submission_scheduling.py
|
pultar/PyGromosTools
| 13
|
11773
|
import unittest, tempfile
from pygromos.simulations.hpc_queuing.job_scheduling.schedulers import simulation_scheduler
from pygromos.data.simulation_parameters_templates import template_md
from pygromos.data.topology_templates import blank_topo_template
from pygromos.simulations.hpc_queuing.submission_systems import DUMMY
from pygromos.files.gromos_system.gromos_system import Gromos_System
from pygromos.tests.in_testfiles import in_test_file_path
from pygromos.tests.test_files import out_test_root_dir
class test_MD_scheduler(unittest.TestCase):
submissionSystem = DUMMY
def setUp(self) -> None:
self.tmp_test_dir = tempfile.mkdtemp(dir=out_test_root_dir, prefix="scheduling_Dummy_")
def test_do(self):
in_cnf = in_test_file_path+"/cnf/in_cnf1.cnf"
out_dir_path = self.tmp_test_dir
in_simSystem = Gromos_System(system_name="test_do", work_folder=out_dir_path,
in_top_path=blank_topo_template, in_cnf_path=in_cnf, in_imd_path=template_md,
in_gromosXX_bin_dir=None, in_gromosPP_bin_dir=None)
submission_system = self.submissionSystem()
simulation_scheduler.do(in_simSystem=in_simSystem, out_dir_path=out_dir_path,
submission_system=submission_system,
simulation_run_num=2, verbose= True)
| 1.890625
| 2
|
tutorials.py
|
Xython/pattern-matching
| 20
|
11774
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 30 17:03:01 2017
@author: misakawa
"""
from pattern_matching import Match, when, var, T, t, _, overwrite
from numpy.random import randint
@overwrite(var[(t == int) | (t == float)], var[(t == int) | (t == float)])
def add(a, b):
return a + b
@when(var[t == str], var[t == str])
def add(a, b):
return a + b
class Bound1:
pass
class Bound2:
pass
class Bound3(Bound1, Bound2):
def __repr__(self):
return "bound3"
class Bound4(Bound3):
pass
@when(_[(t != Bound3) & (t < Bound4)])
def add():
return 2
@when(_)
def add():
return 3
assert add(1, 1) == 2
assert add(Bound2()) == 2
assert add(Bound3()) == 3
@when(_[int], _[Bound1], var)
def add(u):
return u
assert add(1, Bound1(), 'last') == 'last'
def is_type(x):
return isinstance(x, type)
m = Match(1, 2, (3, int))
[a, b, c] = m.case(var[int], var, *var[tuple]).get
assert a == 1 and b == 2 and c == ((3, int), )
[c2] = m.case((_, _, (_, var.when(is_type)))).get
assert c2 == int
@overwrite(_ == None)
def summary():
return 0
@when([var[int], *(_ == [])], var)
def summary(head, res):
return head + res
@when([var[int], *var[list]], var)
def summary(head, tail, res):
return summary(tail, res + head)
@when(var[list])
def summary(lst):
return summary(lst, 0)
assert summary(list(range(100))) == 4950
@overwrite([var, *var])
def qsort(head, tail):
lowers = [i for i in tail if i < head]
highers = [i for i in tail if i >= head]
return qsort(lowers) + [head] + qsort(highers)
@when(var)
def qsort(lst):
return lst
qsort(randint(0, 500, size=(1200, )))
@when(_[t.when(lambda _: _ == int)])
def trait_test():
return 1
assert trait_test(1) == 1
class Population:
num: int = 1000
@when(var[t.when(lambda _: hasattr(_, 'num'))])
def trait_test(x):
return x.num
assert trait_test(Population()) == 1000
| 3.0625
| 3
|
RepositoryBootstrap/EnvironmentDiffs.py
|
davidbrownell/v3-Common_Environment
| 0
|
11775
|
# ----------------------------------------------------------------------
# |
# | EnvironmentDiffs.py
# |
# | <NAME> <<EMAIL>>
# | 2018-06-02 22:19:34
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Displays changes made by an environment during activation."""
import json
import os
import sys
import textwrap
import six
import CommonEnvironment
from CommonEnvironment import CommandLine
from CommonEnvironment.Shell.All import CurrentShell
from RepositoryBootstrap import Constants
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@CommandLine.EntryPoint
@CommandLine.Constraints( output_stream=None,
)
def Before( decorate=False,
output_stream=sys.stdout,
):
_Display(GetOriginalEnvironment(), output_stream, decorate)
return 0
# ----------------------------------------------------------------------
@CommandLine.EntryPoint
@CommandLine.Constraints( output_stream=None,
)
def After( decorate=False,
output_stream=sys.stdout,
):
original_env = GetOriginalEnvironment()
# Compare to the current environment
this_env = dict(os.environ)
differences = {}
for k, v in six.iteritems(this_env):
if ( k not in original_env or
original_env[k] != v
):
differences[k] = v
_Display(differences, output_stream, decorate)
return 0
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GetOriginalEnvironment():
# Get the original environment
generated_dir = os.getenv(Constants.DE_REPO_GENERATED_NAME)
assert os.path.isdir(generated_dir), generated_dir
original_environment_filename = os.path.join(generated_dir, Constants.GENERATED_ACTIVATION_ORIGINAL_ENVIRONMENT_FILENAME)
assert os.path.isfile(original_environment_filename), original_environment_filename
with open(original_environment_filename) as f:
return json.load(f)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _Display(content, output_stream, decorate):
if not isinstance(content, six.string_types):
content = json.dumps(content)
if decorate:
output_stream.write(textwrap.dedent(
"""\
//--//--//--//--//--//--//--//--//--//--//--//--//--//--//--//
{}
//--//--//--//--//--//--//--//--//--//--//--//--//--//--//--//
""").format(content))
else:
output_stream.write(content)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(CommandLine.Main())
except KeyboardInterrupt: pass
| 1.609375
| 2
|
tests/model/test_ocrd_mets.py
|
wrznr/pyocrd
| 0
|
11776
|
from datetime import datetime
from os.path import join
from tests.base import TestCase, main, assets, copy_of_directory
from ocrd_utils import (
initLogging,
VERSION,
MIMETYPE_PAGE
)
from ocrd_models import OcrdMets
# pylint: disable=protected-access,deprecated-method,too-many-public-methods
class TestOcrdMets(TestCase):
def setUp(self):
self.mets = OcrdMets(filename=assets.url_of('SBB0000F29300010000/data/mets.xml'))
initLogging()
def test_unique_identifier(self):
self.assertEqual(self.mets.unique_identifier, 'http://resolver.staatsbibliothek-berlin.de/SBB0000F29300010000', 'Right identifier')
self.mets.unique_identifier = 'foo'
self.assertEqual(self.mets.unique_identifier, 'foo', 'Right identifier after change')
def test_unique_identifier_from_nothing(self):
mets = OcrdMets.empty_mets()
self.assertEqual(mets.unique_identifier, None, 'no identifier')
mets.unique_identifier = 'foo'
self.assertEqual(mets.unique_identifier, 'foo', 'Right identifier after change')
as_string = mets.to_xml().decode('utf-8')
self.assertIn('ocrd/core v%s' % VERSION, as_string)
self.assertIn('CREATEDATE="%d-%d-%02dT' % (
datetime.now().year,
datetime.now().month,
datetime.now().day,
), as_string)
def test_str(self):
mets = OcrdMets(content='<mets/>')
self.assertEqual(str(mets), 'OcrdMets[fileGrps=[],files=[]]')
def test_override_constructor_args(self):
id2file = {'foo': {}}
mets = OcrdMets(id2file, content='<mets/>')
self.assertEqual(mets._file_by_id, id2file)
def test_file_groups(self):
self.assertEqual(len(self.mets.file_groups), 17, '17 file groups')
def test_find_files(self):
self.assertEqual(len(self.mets.find_files()), 35, '35 files total')
self.assertEqual(len(self.mets.find_files(fileGrp='OCR-D-IMG')), 3, '3 files in "OCR-D-IMG"')
self.assertEqual(len(self.mets.find_files(pageId='PHYS_0001')), 17, '17 files for page "PHYS_0001"')
self.assertEqual(len(self.mets.find_files(pageId='PHYS_0001-NOTEXIST')), 0, '0 pages for "PHYS_0001-NOTEXIST"')
self.assertEqual(len(self.mets.find_files(mimetype='image/tiff')), 13, '13 image/tiff')
self.assertEqual(len(self.mets.find_files(mimetype=MIMETYPE_PAGE)), 20, '20 ' + MIMETYPE_PAGE)
self.assertEqual(len(self.mets.find_files(url='OCR-D-IMG/FILE_0005_IMAGE.tif')), 1, '1 xlink:href="OCR-D-IMG/FILE_0005_IMAGE.tif"')
def test_find_files_local_only(self):
self.assertEqual(len(self.mets.find_files(pageId='PHYS_0001', local_only=True)), 3, '3 local files for page "PHYS_0001"')
def test_physical_pages(self):
self.assertEqual(len(self.mets.physical_pages), 3, '3 physical pages')
def test_physical_pages_from_empty_mets(self):
mets = OcrdMets(content="<mets></mets>")
self.assertEqual(len(mets.physical_pages), 0, 'no physical page')
mets.add_file('OUTPUT', ID="foo123", pageId="foobar")
self.assertEqual(len(mets.physical_pages), 1, '1 physical page')
def test_add_group(self):
mets = OcrdMets.empty_mets()
self.assertEqual(len(mets.file_groups), 0, '0 file groups')
mets.add_file_group('TEST')
self.assertEqual(len(mets.file_groups), 1, '1 file groups')
mets.add_file_group('TEST')
self.assertEqual(len(mets.file_groups), 1, '1 file groups')
def test_add_file(self):
mets = OcrdMets.empty_mets()
self.assertEqual(len(mets.file_groups), 0, '0 file groups')
self.assertEqual(len(mets.find_files(fileGrp='OUTPUT')), 0, '0 files in "OUTPUT"')
f = mets.add_file('OUTPUT', ID="foo123", mimetype="bla/quux", pageId="foobar")
f2 = mets.add_file('OUTPUT', ID="foo1232", mimetype="bla/quux", pageId="foobar")
self.assertEqual(f.pageId, 'foobar', 'pageId set')
self.assertEqual(len(mets.file_groups), 1, '1 file groups')
self.assertEqual(len(mets.find_files(fileGrp='OUTPUT')), 2, '2 files in "OUTPUT"')
mets.set_physical_page_for_file('barfoo', f, order='300', orderlabel="page 300")
self.assertEqual(f.pageId, 'barfoo', 'pageId changed')
mets.set_physical_page_for_file('quux', f2, order='302', orderlabel="page 302")
self.assertEqual(f2.pageId, 'quux', 'pageId changed')
mets.set_physical_page_for_file('barfoo', f2, order='301', orderlabel="page 301")
self.assertEqual(f2.pageId, 'barfoo', 'pageId changed')
self.assertEqual(len(mets.file_groups), 1, '1 file group')
def test_add_file_ID_fail(self):
f = self.mets.add_file('OUTPUT', ID='best-id-ever', mimetype="beep/boop")
self.assertEqual(f.ID, 'best-id-ever', "ID kept")
with self.assertRaises(Exception) as cm:
self.mets.add_file('OUTPUT', ID='best-id-ever', mimetype="boop/beep")
self.assertEqual(str(cm.exception), "File with ID='best-id-ever' already exists")
f2 = self.mets.add_file('OUTPUT', ID='best-id-ever', mimetype="boop/beep", force=True)
self.assertEqual(f._el, f2._el)
def test_filegrp_from_file(self):
f = self.mets.find_files(fileGrp='OCR-D-IMG')[0]
self.assertEqual(f.fileGrp, 'OCR-D-IMG')
def test_add_file_no_id(self):
with self.assertRaisesRegex(Exception, "Must set ID of the mets:file"):
self.mets.add_file('FOO')
def test_add_file_no_pageid(self):
f = self.mets.add_file('OUTPUT', mimetype="bla/quux", ID="foo3")
self.assertEqual(f.pageId, None, 'No pageId')
def test_file_pageid(self):
f = self.mets.find_files()[0]
self.assertEqual(f.pageId, 'PHYS_0001')
f.pageId = 'foo'
self.assertEqual(f.pageId, 'foo')
def test_agent(self):
# Processor(workspace=self.workspace)
mets = self.mets
beforelen = len(mets.agents)
mets.add_agent('foo bar v0.0.1', 'OTHER', 'OTHER', 'YETOTHERSTILL')
# print(['%s'%x for x in mets.agents])
self.assertEqual(len(mets.agents), beforelen + 1)
def test_metshdr(self):
"""
Test whether metsHdr is created on-demand
"""
mets = OcrdMets(content="<mets></mets>")
self.assertFalse(mets._tree.getroot().getchildren())
mets.add_agent()
self.assertEqual(len(mets._tree.getroot().getchildren()), 1)
def test_nocontent_nofilename(self):
with self.assertRaisesRegex(Exception, "Must pass 'filename' or 'content' to"):
OcrdMets()
def test_encoding_entities(self):
mets = OcrdMets(content="""
<mets>
<metsHdr>
<agent>
<name>Őh śéé Áŕ</name>
<note>OCR-D</note>
</agent>
</metsHdr>
</mets>
""")
self.assertIn('Őh śéé Áŕ', mets.to_xml().decode('utf-8'))
def test_remove_file_group(self):
"""
Test removal of filegrp
"""
with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir:
mets = OcrdMets(filename=join(tempdir, 'mets.xml'))
self.assertEqual(len(mets.file_groups), 17)
self.assertEqual(len(mets.find_files()), 35)
# print()
# before = sorted([x.ID for x in mets.find_files()])
with self.assertRaisesRegex(Exception, "not empty"):
mets.remove_file_group('OCR-D-GT-ALTO')
mets.remove_file_group('OCR-D-GT-PAGE', recursive=True)
# print([x for x in before if x not in sorted([x.ID for x in mets.find_files()])])
self.assertEqual(len(mets.file_groups), 16)
self.assertEqual(len(mets.find_files()), 33)
if __name__ == '__main__':
main()
| 2.21875
| 2
|
robo_gym/envs/ur/ur_avoidance_basic.py
|
psFournier/robo-gym
| 236
|
11777
|
"""
Environment for basic obstacle avoidance controlling a robotic arm from UR.
In this environment the obstacle is only moving up and down in a vertical line in front of the robot.
The goal is for the robot to stay within a predefined minimum distance to the moving obstacle.
When feasible the robot should continue to the original configuration,
otherwise wait for the obstacle to move away before proceeding
"""
import numpy as np
from typing import Tuple
from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2
from robo_gym.envs.simulation_wrapper import Simulation
from robo_gym.envs.ur.ur_base_avoidance_env import URBaseAvoidanceEnv
# base, shoulder, elbow, wrist_1, wrist_2, wrist_3
JOINT_POSITIONS = [-1.57, -1.31, -1.31, -2.18, 1.57, 0.0]
DEBUG = True
MINIMUM_DISTANCE = 0.3 # the distance [cm] the robot should keep to the obstacle
class BasicAvoidanceUR(URBaseAvoidanceEnv):
"""Universal Robots UR basic obstacle avoidance environment.
Args:
rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None.
fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False.
fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False.
fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False.
fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False.
fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False.
fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True.
ur_model (str): determines which ur model will be used in the environment. Defaults to 'ur5'.
include_polar_to_elbow (bool): determines wether or not the polar coordinates to the elbow joint are included in the state. Defaults to False.
Attributes:
ur (:obj:): Robot utilities object.
client (:obj:str): Robot Server client.
real_robot (bool): True if the environment is controlling a real robot.
"""
max_episode_steps = 1000
def _set_initial_robot_server_state(self, rs_state, fixed_object_position = None) -> robot_server_pb2.State:
if fixed_object_position:
state_msg = super()._set_initial_robot_server_state(rs_state=rs_state, fixed_object_position=fixed_object_position)
return state_msg
z_amplitude = np.random.default_rng().uniform(low=0.09, high=0.35)
z_frequency = 0.125
z_offset = np.random.default_rng().uniform(low=0.2, high=0.6)
string_params = {"object_0_function": "triangle_wave"}
float_params = {"object_0_x": 0.12,
"object_0_y": 0.34,
"object_0_z_amplitude": z_amplitude,
"object_0_z_frequency": z_frequency,
"object_0_z_offset": z_offset}
state = {}
state_msg = robot_server_pb2.State(state = state, float_params = float_params,
string_params = string_params, state_dict = rs_state)
return state_msg
def reset(self, joint_positions = JOINT_POSITIONS, fixed_object_position = None) -> np.array:
"""Environment reset.
Args:
joint_positions (list[6] or np.array[6]): robot joint positions in radians.
fixed_object_position (list[3]): x,y,z fixed position of object
"""
self.prev_action = np.zeros(6)
state = super().reset(joint_positions = joint_positions, fixed_object_position = fixed_object_position)
return state
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
env_state = self._robot_server_state_to_env_state(rs_state)
reward = 0
done = False
info = {}
# Reward weights
close_distance_weight = -2
delta_joint_weight = 1
action_usage_weight = 1
rapid_action_weight = -0.2
# Difference in joint position current vs. starting position
delta_joint_pos = env_state[9:15]
# Calculate distance to the obstacle
obstacle_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
forearm_coord = np.array([rs_state['forearm_to_ref_translation_x'], rs_state['forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']])
distance_to_ee = np.linalg.norm(obstacle_coord - ee_coord)
distance_to_forearm = np.linalg.norm(obstacle_coord - forearm_coord)
distance_to_target = np.min([distance_to_ee, distance_to_forearm])
# Reward staying close to the predefined joint position
if abs(env_state[-6:]).sum() < 0.1 * action.size:
reward += delta_joint_weight * (1 - (abs(delta_joint_pos).sum()/(0.1 * action.size))) * (1/1000)
# Reward for not acting
if abs(action).sum() <= action.size:
reward += action_usage_weight * (1 - (np.square(action).sum()/action.size)) * (1/1000)
# Negative reward if actions change to rapidly between steps
for i in range(len(action)):
if abs(action[i] - self.prev_action[i]) > 0.5:
reward += rapid_action_weight * (1/1000)
# Negative reward if the obstacle is close than the predefined minimum distance
if distance_to_target < MINIMUM_DISTANCE:
reward += close_distance_weight * (1/self.max_episode_steps)
# Check if there is a collision
collision = True if rs_state['in_collision'] == 1 else False
if collision:
done = True
info['final_status'] = 'collision'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
if self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'success'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
return reward, done, info
def step(self, action) -> Tuple[np.array, float, bool, dict]:
if type(action) == list: action = np.array(action)
state, reward, done, info = super().step(action)
self.prev_action = self.add_fixed_joints(action)
return state, reward, done, info
class BasicAvoidanceURSim(BasicAvoidanceUR, Simulation):
cmd = "roslaunch ur_robot_server ur_robot_server.launch \
world_name:=tabletop_sphere50.world \
reference_frame:=base_link \
max_velocity_scale_factor:=0.2 \
action_cycle_rate:=20 \
rviz_gui:=false \
gazebo_gui:=true \
objects_controller:=true \
rs_mode:=1moving2points \
n_objects:=1.0 \
object_0_model_name:=sphere50 \
object_0_frame:=target"
def __init__(self, ip=None, lower_bound_port=None, upper_bound_port=None, gui=False, ur_model='ur5', **kwargs):
self.cmd = self.cmd + ' ' + 'ur_model:=' + ur_model
Simulation.__init__(self, self.cmd, ip, lower_bound_port, upper_bound_port, gui, **kwargs)
BasicAvoidanceUR.__init__(self, rs_address=self.robot_server_ip, ur_model=ur_model, **kwargs)
class BasicAvoidanceURRob(BasicAvoidanceUR):
real_robot = True
# roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving
| 3.6875
| 4
|
hear_me_django_app/accounts/management/commands/initial_users.py
|
kamil1marczak/hear_me_django_app
| 0
|
11778
|
<filename>hear_me_django_app/accounts/management/commands/initial_users.py
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.core.management.base import BaseCommand
from ._private import populate_user
User = get_user_model()
class Command(BaseCommand):
help = 'admin deployment'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
populate_user(number=total)
obj, created = User.objects.get_or_create(name="root", password=make_password('<PASSWORD>!'), is_superuser=True)
message = "Successfully populated database with initial users"
if created:
message += f" Superuser {obj.name} ha been created"
self.stdout.write(self.style.SUCCESS(message))
| 2.28125
| 2
|
config/constants.py
|
flopezag/fiware-tsc-dashboard
| 0
|
11779
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
##
# Copyright 2017 FIWARE Foundation, e.V.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
__author__ = 'fla'
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
APPLICATION_NAME = 'TSC Enablers Dashboard'
CREDENTIAL_DIR = '.credentials'
CREDENTIAL_FILE = 'sheets.googleapis.com.json'
DB_NAME = 'enablers-dashboard.db'
DB_FOLDER = 'dbase'
LOG_FILE = 'tsc-dashboard.log'
# We need to add 16 rows in the number of enablers list corresponding to:
# - Title
# - Report date
# - Data sources updated on
# - Source
# - Units
# - Enabler Impl
# - INCUBATED
# - DEVELOPMENT
# - SUPPORT
# - DEPRECATED
# - And 6 extra blank rows between them
FIXED_ROWS = 16
# We keep the firsts row without change in the sheet (sheet title)
INITIAL_ROW = 2
# The number of columns to delete corresponds to:
# Source, Catalogue, ReadTheDocs, Docker, GitHub, Coverall, Academy, HelpDesk, Backlog, GitHub_Open_Issues,
# GitHub_Closed_Issues, GitHub_Adopters, GitHub_Adopters_Open_Issues, GitHub_Adopters_Closed_Issues,
# GitHub_Comits, GitHub_Forks, GitHub_Watchers, GitHub_Stars, Jira_WorkItem_Not_Closed, Jira_WorkItem_Closed
# + Extra 2 = 22
FIXED_COLUMNS = 22
# We start to delete from the initial column
INITIAL_COLUMN = 1
| 1.304688
| 1
|
tensornetwork/backends/backend_test.py
|
ashoknar/TensorNetwork
| 0
|
11780
|
<gh_stars>0
"""Tests for graphmode_tensornetwork."""
import builtins
import sys
import pytest
import numpy as np
from tensornetwork import connect, contract, Node
from tensornetwork.backends.base_backend import BaseBackend
from tensornetwork.backends import backend_factory
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
#never do this outside testing
clean_tensornetwork_modules()
yield # use as teardown
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
# pylint: disable=redefined-builtin
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
# Nuke the cache.
backend_factory._INSTANTIATED_BACKENDS = dict()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
#not sure why config is imported here?
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork.config
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
#pylint: disable=reimported
import tensornetwork
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.pytorch.pytorch_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.tensorflow.tensorflow_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.jax.jax_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
a = Node(np.ones((10,)), backend="numpy")
b = Node(np.ones((10,)), backend="numpy")
edge = connect(a[0], b[0])
final_node = contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="jax")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="tensorflow")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="pytorch")
def test_base_backend_name():
backend = BaseBackend()
assert backend.name == "base backend"
def test_base_backend_tensordot_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.tensordot(np.ones((2, 2)), np.ones((2, 2)), axes=[[0], [0]])
def test_base_backend_reshape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.reshape(np.ones((2, 2)), (4, 1))
def test_base_backend_transpose_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.transpose(np.ones((2, 2)), [0, 1])
def test_base_backend_slice_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.slice(np.ones((2, 2)), (0, 1), (1, 1))
def test_base_backend_svd_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.svd_decomposition(np.ones((2, 2)), 0)
def test_base_backend_qr_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.qr_decomposition(np.ones((2, 2)), 0)
def test_base_backend_rq_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.rq_decomposition(np.ones((2, 2)), 0)
def test_base_backend_shape_concat_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_concat([np.ones((2, 2)), np.ones((2, 2))], 0)
def test_base_backend_shape_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tensor(np.ones((2, 2)))
def test_base_backend_shape_tuple_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tuple(np.ones((2, 2)))
def test_base_backend_shape_prod_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_prod(np.ones((2, 2)))
def test_base_backend_sqrt_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sqrt(np.ones((2, 2)))
def test_base_backend_diag_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.diag(np.ones((2, 2)))
def test_base_backend_convert_to_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.convert_to_tensor(np.ones((2, 2)))
def test_base_backend_trace_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.trace(np.ones((2, 2)))
def test_base_backend_outer_product_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.outer_product(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_einsul_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.einsum("ii", np.ones((2, 2)))
def test_base_backend_norm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.norm(np.ones((2, 2)))
def test_base_backend_eye_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eye(2, dtype=np.float64)
def test_base_backend_ones_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.ones((2, 2), dtype=np.float64)
def test_base_backend_zeros_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.zeros((2, 2), dtype=np.float64)
def test_base_backend_randn_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.randn((2, 2))
def test_base_backend_random_uniforl_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.random_uniform((2, 2))
def test_base_backend_conj_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.conj(np.ones((2, 2)))
def test_base_backend_eigh_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigh(np.ones((2, 2)))
def test_base_backend_eigs_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigs(np.ones((2, 2)))
def test_base_backend_eigs_lanczos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigsh_lanczos(lambda x: x, np.ones((2)))
def test_base_backend_addition_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.addition(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_subtraction_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.subtraction(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_multiply_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.multiply(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_divide_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.divide(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_index_update_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.index_update(np.ones((2, 2)), np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_inv_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.inv(np.ones((2, 2)))
def test_base_backend_sin_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sin(np.ones((2, 2)))
def test_base_backend_cos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.cos(np.ones((2, 2)))
def test_base_backend_exp_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.exp(np.ones((2, 2)))
def test_base_backend_log_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.log(np.ones((2, 2)))
def test_base_backend_expm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.expm(np.ones((2, 2)))
def test_base_backend_sparse_shape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sparse_shape(np.ones((2, 2)))
def test_base_backend_broadcast_right_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_right_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_broadcast_left_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_left_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_backend_instantiation(backend):
backend1 = backend_factory.get_backend(backend)
backend2 = backend_factory.get_backend(backend)
assert backend1 is backend2
| 1.921875
| 2
|
src/GUI/Plotter.py
|
sbooeshaghi/pegasus
| 1
|
11781
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pyqtgraph as pg
import numpy as np
class CustomWidget(pg.GraphicsWindow):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
def __init__(self, parent=None, **kargs):
pg.GraphicsWindow.__init__(self, **kargs)
self.setParent(parent)
self.setWindowTitle('pyqtgraph example: Scrolling Plots')
self.p = self.addPlot(labels = {'left':'Position', 'bottom':'Time'})
self.data = np.zeros(10)
self.curve = self.p.plot(self.data, pen='b')
if __name__ == '__main__':
w = CustomWidget()
w.show()
| 3.078125
| 3
|
tests/test_collapsible.py
|
TehMillhouse/sphinxawesome-theme
| 17
|
11782
|
"""Tests for collapsible definition lists.
When the option ``html_collapsible_definitions``
is ``True``, some HTML classes should be added
to some definition lists but not all of them.
"""
from pathlib import Path
import pytest
from sphinx.application import Sphinx
from .util import parse_html
@pytest.mark.sphinx(
"html",
testroot="collapsible",
confoverrides={"html_theme": "sphinxawesome_theme"},
freshenv=True,
)
def test_no_permalinks(app: Sphinx) -> None:
"""It tests that there are no permalinks."""
app.config.html_permalinks = False # type: ignore[attr-defined]
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
dl = tree("dl")
assert len(dl) == 2
headerlinks = tree("a", class_="headerlink")
assert len(headerlinks) == 0
@pytest.mark.sphinx(
"html",
testroot="collapsible",
confoverrides={"html_theme": "sphinxawesome_theme"},
freshenv=True,
)
def test_no_collapsible_definitions(app: Sphinx) -> None:
"""By default, no classes should be added."""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
dl = tree("dl")
assert len(dl) == 2
assert str(dl[0]).replace("\n", "") == (
'<dl class="simple"><dt>term</dt><dd><p>definition</p></dd></dl>'
)
assert dl[1]["class"] == ["std", "option", "code-definition"]
dt, dd = (c for c in dl[1].children if c.strip is None)
assert dt.name == "dt"
assert "accordion" not in dt["class"]
assert dd.name == "dd"
assert "class" not in dd
expand_more_button = dt("button", class_="expand-more")
assert len(expand_more_button) == 0
@pytest.mark.sphinx(
"html",
testroot="collapsible",
confoverrides={"html_theme": "sphinxawesome_theme"},
freshenv=True,
)
def test_collapsible_definitions(app: Sphinx) -> None:
"""It tests the correct classes being added to the definition lists.
It should not add the classes to normal definition lists.
"""
# if specified in 'confoverrides', this returns a warning
app.config.html_collapsible_definitions = True # type: ignore[attr-defined]
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
dl = tree("dl")
assert len(dl) == 2
assert str(dl[0]).replace("\n", "") == (
'<dl class="simple"><dt>term</dt><dd><p>definition</p></dd></dl>'
)
assert "code-definition" in dl[1]["class"]
dt, dd = (c for c in dl[1].children if c.strip is None)
assert dt.name == "dt"
assert dt["class"] == ["sig", "sig-object", "std", "accordion"]
assert dd.name == "dd"
assert dd["class"] == ["panel"]
expand_more_button = dt("button", class_="expand-more")
assert len(expand_more_button) == 1
| 2.625
| 3
|
Iris Network/Conclusion/task.py
|
jetbrains-academy/Machine-Learning-101
| 0
|
11783
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
def read_data(fpath):
iris = pd.read_csv(fpath)
iris.loc[iris['species'] == 'virginica', 'species'] = 0
iris.loc[iris['species'] == 'versicolor', 'species'] = 1
iris.loc[iris['species'] == 'setosa', 'species'] = 2
iris = iris[iris['species'] != 2]
return iris[['petal_length', 'petal_width']].values, iris[['species']].values.astype('uint8')
def plot_data(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)
plt.title("IRIS DATA | Blue - Versicolor, Red - Virginica ")
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.show()
def train_test_split(X, y, ratio=0.8):
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_len = int(X.shape[0] * ratio)
return X[indices[:train_len]], y[indices[:train_len]], X[indices[train_len:]], y[indices[train_len:]]
if __name__ == '__main__':
X, y = read_data('iris.csv')
# comment the following line if you don't need the plot anymore
plot_data(X, y)
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
output = nn.feedforward(X_train)
print(output)
print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}')
nn.backward(X_train, y_train, output)
print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}')
nn.train(X_train, y_train)
print("Accuracy:")
print(accuracy(nn, X_test, y_test))
| 3.09375
| 3
|
agentless/crypto.py
|
tinyauth/agentless
| 0
|
11784
|
<gh_stars>0
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
backend = default_backend()
def generate_private_key():
key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=backend,
)
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
).decode('utf-8')
def load_private_key(private_key_pem):
return serialization.load_pem_private_key(
private_key_pem,
password=<PASSWORD>,
backend=backend,
)
def public_key_from_private_key(private_key):
public_key = private_key.public_key()
foo = public_key.public_bytes(
encoding=serialization.Encoding.OpenSSH,
format=serialization.PublicFormat.OpenSSH,
).decode('utf-8')
return foo
def ssh_sign_data(key, data):
return key.sign(
data,
padding=padding.PKCS1v15(),
algorithm=hashes.SHA1(),
)
| 2.46875
| 2
|
test/programytest/storage/entities/test_nodes.py
|
cdoebler1/AIML2
| 345
|
11785
|
import unittest
import unittest.mock
from programy.storage.entities.nodes import NodesStore
class NodesStoreTest(unittest.TestCase):
def test_load(self):
store = NodesStore()
with self.assertRaises(NotImplementedError):
collector = unittest.mock.Mock()
store.load(collector)
| 2.65625
| 3
|
sphinx/source/tutorial/exercises/stocks.py
|
minrk/bokeh
| 0
|
11786
|
<reponame>minrk/bokeh
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000",
parse_dates=['Date'])
GOOG = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# EXERCISE: turn on plot hold
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
x_axis_type = "datetime", # NOTE: only needed on first
tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first
)
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
# EXERCISE: start a new figure
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer.
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
show() # open a browser
| 3.375
| 3
|
nni/retiarii/converter/visualize.py
|
qfyin/nni
| 3
|
11787
|
import graphviz
def convert_to_visualize(graph_ir, vgraph):
for name, graph in graph_ir.items():
if name == '_training_config':
continue
with vgraph.subgraph(name='cluster'+name) as subgraph:
subgraph.attr(color='blue')
cell_node = {}
ioput = {'_inputs': '{}-{}'.format(name, '_'.join(graph['inputs'])),
'_outputs': '{}-{}'.format(name, '_'.join(graph['outputs']))}
subgraph.node(ioput['_inputs'])
subgraph.node(ioput['_outputs'])
for node_name, node_value in graph['nodes'].items():
value = node_value['operation']
if value['type'] == '_cell':
cell_input_name = '{}-{}'.format(value['cell_name'], '_'.join(graph_ir[value['cell_name']]['inputs']))
cell_output_name = '{}-{}'.format(value['cell_name'], '_'.join(graph_ir[value['cell_name']]['outputs']))
cell_node[node_name] = (cell_input_name, cell_output_name)
print('cell: ', node_name, cell_input_name, cell_output_name)
else:
subgraph.node(node_name)
for edge in graph['edges']:
src = edge['head'][0]
if src == '_inputs':
src = ioput['_inputs']
elif src in cell_node:
src = cell_node[src][1]
dst = edge['tail'][0]
if dst == '_outputs':
dst = ioput['_outputs']
elif dst in cell_node:
dst = cell_node[dst][0]
subgraph.edge(src, dst)
def visualize_model(graph_ir):
vgraph = graphviz.Digraph('G', filename='vgraph', format='jpg')
convert_to_visualize(graph_ir, vgraph)
vgraph.render()
| 2.765625
| 3
|
Python/Tests/TestData/TestDiscoverer/ConfigUnittest/Product/prefix_not_included.py
|
techkey/PTVS
| 404
|
11788
|
import unittest
class PrefixNotIncluded(unittest.TestCase):
def test_not_included(self):
pass
if __name__ == '__main__':
unittest.main()
| 1.820313
| 2
|
customBackground.py
|
VisweshK/Jashmup
| 0
|
11789
|
'''
This is the class to create a scrolling background.
Because the background was so large, it was made to be a .jpg.
'''
import pygame, os
class Background(pygame.sprite.Sprite):
# Initialize the sprite.
def __init__(self,disp):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join("images", "spacebackground.jpg"))
self.image = self.image.convert()
self.rect = self.image.get_rect()
self.dx = 10
self.reset()
# Constantly have the sprite move to the left.
# If the right side of the image moves beyond the right side of the screen, reset the image.
def update(self):
self.rect.left -= self.dx
if self.rect.right <= 800:
self.reset()
# Reset the image's left side to the left side of the screen.
def reset(self):
self.rect.left = 0
| 3.921875
| 4
|
src/python/squarepants/file_utils.py
|
ericzundel/mvn2pants
| 8
|
11790
|
<filename>src/python/squarepants/file_utils.py
import os
import shutil
from contextlib import contextmanager
from tempfile import mkdtemp, mktemp
@contextmanager
def temporary_dir():
"""Returns a temporary directory that gets cleaned up when the context manager exits."""
tempdir = mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextmanager
def temporary_file():
"""Returns a temporary file that gets cleaned up when the context manager exits."""
tempfile = mktemp()
try:
yield tempfile
finally:
os.remove(tempfile)
@contextmanager
def frozen_dir(path):
"""Ensures that the contents of the directory are the same after exiting as before entering."""
with temporary_dir() as tempdir:
backup = os.path.join(tempdir, 'backup')
shutil.copytree(path, backup)
try:
yield path
finally:
shutil.rmtree(path, ignore_errors=True)
shutil.move(backup, path)
def file_pattern_exists_in_subdir(subdir, pattern):
"""Search for a file pattern recursively in a subdirectory
:param subdir: directory to search recursively
:param re.RegexObject pattern: compiled regular expression object from re.compile()
:return: True if a file with the named pattern exists in the subdirectory
:rtype: bool
"""
for (dirpath, dirnames, filenames) in os.walk(subdir):
for filename in filenames:
if pattern.match(filename):
return True
return False
def touch(fname, times=None, makedirs=False):
"""Creates the specified file at the named path (and optionally sets the time)."""
if makedirs:
directory = os.path.dirname(fname)
if not os.path.exists(directory):
os.makedirs(directory)
with open(fname, 'a'):
os.utime(fname, times)
| 3.21875
| 3
|
docs/DSDC/miniprez/miniprez/continuous_integration.py
|
thoppe/Presentation_Topics
| 2
|
11791
|
<filename>docs/DSDC/miniprez/miniprez/continuous_integration.py<gh_stars>1-10
import asyncio
import os
from parser import miniprez_markdown, build_body
import logging
logger = logging.getLogger("miniprez")
async def file_watcher(target_file, sleep_time=0.5):
"""
Watchs a file. If modified, yield the filename.
Yield the filename once to start.
"""
# Yield the file first
yield target_file, 0
latest_modification_time = os.path.getmtime(target_file)
while True:
current_time = os.path.getmtime(target_file)
if current_time > latest_modification_time:
delta = current_time - latest_modification_time
latest_modification_time = current_time
yield target_file, delta
await asyncio.sleep(sleep_time)
async def parser_loop(f_markdown, sleep_time=0.5):
"""
Main event loop. If the target file is modified, or new start a build.
"""
async for f_target, dt in file_watcher(f_markdown, sleep_time):
build_html(f_target)
def build_html(f_target):
"""
Build the html from the markdown.
"""
f_html_output = f_target.replace(".md", ".html")
logger.info(f"Building {f_target} to {f_html_output}")
with open(f_target) as FIN:
markdown = FIN.read()
html = miniprez_markdown(markdown)
soup = build_body(html)
with open(f_html_output, "w") as FOUT:
FOUT.write(soup.prettify())
| 2.78125
| 3
|
rl_algorithms/dqn/linear.py
|
yonghangzhou/rl_algorithms
| 1
|
11792
|
# -*- coding: utf-8 -*-
"""Linear module for dqn algorithms
- Author: <NAME>
- Contact: <EMAIL>
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rl_algorithms.common.helper_functions import numpy2floattensor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class NoisyLinear(nn.Module):
"""Noisy linear module for NoisyNet.
References:
https://github.com/higgsfield/RL-Adventure/blob/master/5.noisy%20dqn.ipynb
https://github.com/Kaixhin/Rainbow/blob/master/model.py
Attributes:
in_features (int): input size of linear module
out_features (int): output size of linear module
std_init (float): initial std value
weight_mu (nn.Parameter): mean value weight parameter
weight_sigma (nn.Parameter): std value weight parameter
bias_mu (nn.Parameter): mean value bias parameter
bias_sigma (nn.Parameter): std value bias parameter
"""
def __init__(self, in_features: int, out_features: int, std_init: float = 0.5):
"""Initialize."""
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.Tensor(out_features, in_features))
self.register_buffer("weight_epsilon", torch.Tensor(out_features, in_features))
self.bias_mu = nn.Parameter(torch.Tensor(out_features))
self.bias_sigma = nn.Parameter(torch.Tensor(out_features))
self.register_buffer("bias_epsilon", torch.Tensor(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
"""Reset trainable network parameters (factorized gaussian noise)."""
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
@staticmethod
def scale_noise(size: int) -> torch.Tensor:
"""Set scale to make noise (factorized gaussian noise)."""
x = numpy2floattensor(np.random.normal(loc=0.0, scale=1.0, size=size), device)
return x.sign().mul(x.abs().sqrt())
def reset_noise(self):
"""Make new noise."""
epsilon_in = self.scale_noise(self.in_features)
epsilon_out = self.scale_noise(self.out_features)
# outer product
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation.
We don't use separate statements on train / eval mode.
It doesn't show remarkable difference of performance.
"""
return F.linear(
x,
self.weight_mu + self.weight_sigma * self.weight_epsilon,
self.bias_mu + self.bias_sigma * self.bias_epsilon,
)
class NoisyLinearConstructor:
"""Constructor class for changing hyper parameters of NoisyLinear.
Attributes:
std_init (float): initial std value
"""
def __init__(self, std_init: float = 0.5):
"""Initialize."""
self.std_init = std_init
def __call__(self, in_features: int, out_features: int) -> NoisyLinear:
"""Return NoisyLinear instance set hyper parameters"""
return NoisyLinear(in_features, out_features, self.std_init)
class NoisyMLPHandler:
"""Includes methods to handle noisy linear."""
def reset_noise(self):
"""Re-sample noise"""
for _, module in self.named_children():
module.reset_noise()
| 2.8125
| 3
|
2015/day7/2015-day7-part2.py
|
matt-the-ogre/advent-of-code
| 1
|
11793
|
# Advent of Code - 2015 - Day 7
# --- Day 7: Some Assembly Required ---
# This year, Santa brought little Bobby Tables a set of wires and bitwise logic gates! Unfortunately, little Bobby is a little under the recommended age range, and he needs help assembling the circuit.
# Each wire has an identifier (some lowercase letters) and can carry a 16-bit signal (a number from 0 to 65535). A signal is provided to each wire by a gate, another wire, or some specific value. Each wire can only get a signal from one source, but can provide its signal to multiple destinations. A gate provides no signal until all of its inputs have a signal.
# The included instructions booklet describes how to connect the parts together: x AND y -> z means to connect wires x and y to an AND gate, and then connect its output to wire z.
# For example:
# 123 -> x means that the signal 123 is provided to wire x.
# x AND y -> z means that the bitwise AND of wire x and wire y is provided to wire z.
# p LSHIFT 2 -> q means that the value from wire p is left-shifted by 2 and then provided to wire q.
# NOT e -> f means that the bitwise complement of the value from wire e is provided to wire f.
# Other possible gates include OR (bitwise OR) and RSHIFT (right-shift). If, for some reason, you'd like to emulate the circuit instead, almost all programming languages (for example, C, JavaScript, or Python) provide operators for these gates.
# For example, here is a simple circuit:
# 123 -> x
# 456 -> y
# x AND y -> d
# x OR y -> e
# x LSHIFT 2 -> f
# y RSHIFT 2 -> g
# NOT x -> h
# NOT y -> i
# After it is run, these are the signals on the wires:
# d: 72
# e: 507
# f: 492
# g: 114
# h: 65412
# i: 65079
# x: 123
# y: 456
# In little Bobby's kit's instructions booklet (provided as your puzzle input), what signal is ultimately provided to wire a?
import time, math
def createCircuitDict():
global circuitStrings
global circuitDict
# this function takes the string as input (circuitStrings) and converts them (parses them) into a dictionary (circuitDict)
for circuitLine in circuitStrings:
# the string "->" is the delimeter (sp?) between the left side (input) and the wire name (dictionary key)
leftSide = circuitLine[0 : circuitLine.find("->") - 1]
# if debug:
# print("leftSide:", leftSide)
rightSide = circuitLine[circuitLine.find("->") + 3 : ]
# if debug:
# print("rightSide:", rightSide)
# we set the outputValue to nan (not a number) as a way of checking if we have successfully evaluated the wires inputs or not: default = nan, not evaluated
outputValue = math.nan
# check for numeric input string -- this is easy, just make it the output
if leftSide.isnumeric():
leftSide = int(leftSide)
outputValue = leftSide # simple -- the input to this wire is also it's output
# check for duplicate wire names (dictionary keys) in the input string
if circuitDict.get(rightSide) != None:
print("Weird... dictionary key ", rightSide, "already exists. This shouldn't happen.")
circuitDict[rightSide] = {"input" : leftSide, "output" : outputValue}
def evaluateInput(circuit, operator):
global circuitDict
# if debug:
# print(circuit, operator)
# check left argument for circuit name or number
inputWire1 = circuitDict[circuit]["input"][: circuitDict[circuit]["input"].find(operator) - 1]
inputWire2 = circuitDict[circuit]["input"][circuitDict[circuit]["input"].find(operator) + len(operator) + 1 : ]
# if debug:
# print(circuit, "=", inputWire1, operator, inputWire2)
# look up the output of the inputWire
if inputWire1.isnumeric():
input1 = int(inputWire1)
else:
input1 = circuitDict[inputWire1]["output"]
if inputWire2.isnumeric():
input2 = int(inputWire2)
else:
input2 = circuitDict[inputWire2]["output"]
if math.isnan(input1):
# print("input wire 1 isn't calculated yet")
pass
elif math.isnan(input2):
# print("input wire 2 isn't calculated yet")
pass
else:
# do the bitwise complement on the input number and assign it to the output of this wire
if operator == "AND":
circuitDict[circuit]["output"] = input1 & input2
elif operator == "OR":
circuitDict[circuit]["output"] = input1 | input2
elif operator == "LSHIFT":
circuitDict[circuit]["output"] = input1 << input2
elif operator == "RSHIFT":
circuitDict[circuit]["output"] = input1 >> input2
else:
print("Unknown operator", operator)
# check for rollunder 0
# this occurs because we are using a signed integer for what should be an unsigned 16-bit integer
# TODO figure out if Python has an unsigned 16-bit integer type
if circuitDict[circuit]["output"] < 0:
# if debug:
# print("result under zero, fix it")
circuitDict[circuit]["output"] = 65535 + circuitDict[circuit]["output"]
def doConnection():
global circuitDict
unfinishedCount = len(circuitDict)
lowCount = unfinishedCount
while unfinishedCount:
unfinishedCount = len(circuitDict)
if debug:
print("lowCount", lowCount)
for circuit in circuitDict:
# if the output is not a number, evaluate the input
if math.isnan(circuitDict[circuit]["output"]):
# parse the left side
# we can have NOT, AND, OR, LSHIFT, and RSHIFT as possible commands
if "NOT" in circuitDict[circuit]["input"]:
# operation is logical NOT, invert the input line to be the output
inputWire1 = circuitDict[circuit]["input"][circuitDict[circuit]["input"].find("NOT")+4 : ]
# if debug:
# print(circuit, "= NOT", inputWire1)
# look up the output of the inputWire
if inputWire1.isnumeric():
input1 = int(inputWire1)
else:
input1 = circuitDict[inputWire1]["output"]
if math.isnan(input1):
# print("input wire isn't calculated yet")
pass
else:
# do the bitwise complement on the input number and assign it to the output of this wire
circuitDict[circuit]["output"] = ~input1
# check for rollunder 0
if circuitDict[circuit]["output"] < 0:
# if debug:
# print("result under zero, fix it")
circuitDict[circuit]["output"] = 65536 + circuitDict[circuit]["output"]
elif "AND" in circuitDict[circuit]["input"]:
evaluateInput(circuit, "AND")
elif "OR" in circuitDict[circuit]["input"]:
evaluateInput(circuit, "OR")
elif "LSHIFT" in circuitDict[circuit]["input"]:
evaluateInput(circuit, "LSHIFT")
elif "RSHIFT" in circuitDict[circuit]["input"]:
evaluateInput(circuit, "RSHIFT")
else:
# simplest case -- one input only!
# copy the input wire
# this could be improved by doing it only if the inputWire is resolved
inputWire1 = circuitDict[circuit]["input"]
if debug:
print("simplest case circuit", circuit, " inputWire", inputWire1)
circuitDict[circuit]["output"] = circuitDict[inputWire1]["output"]
else:
# this circuit is done, move on
# if debug:
# print("circuit",circuit,"is done with output ", circuitDict[circuit]["output"], "Break.")
pass
if math.isnan(circuitDict[circuit]["output"]) is False:
# this output is calculated, decrement the unfinished counter
unfinishedCount -= 1
if unfinishedCount < lowCount:
lowCount = unfinishedCount
# if debug:
# print("unfinishedCount", unfinishedCount)
startTime = time.perf_counter() # time in seconds (float)
debug = False
timing = True
unitTesting = False
# maybe a dictionary again?
# circuitStrings = {"a" : {"input" : 1, "output" : NaN}}
# parse the input text file to set up the circuitStrings inputs, then just roll through the dictionary to calculate the outputs
# how will I be sure that the output has been calculated to be the input for the next circuitStrings?
# can I assume the input file is "in order"? Probably not.
# does this mean some sort of recursion algorithm?
# maybe if I populate the outputs with 'NaN' (or Python equivalent) then check that it's not that before using it's output
# I can make it recurse through the inputs, calculating any that have fully realized inputs?
circuitStrings = []
circuitDict = {}
# unit tests, kind of
if unitTesting:
print("Unit Testing")
circuitStrings = ["123 -> x","456 -> y", "x AND y -> d", "x OR y -> e", "x LSHIFT 2 -> f", "y RSHIFT 2 -> g", "NOT x -> h", "NOT y -> i"]
else:
# read the input text file into a variable called presents
with open("2015/day7/input-part2.txt","r") as inputString:
circuitStrings = inputString.readlines()
# remove newlines
for i in range(0, len(circuitStrings)):
circuitStrings[i] = circuitStrings[i].rstrip()
# parse the input to create the dictionary
createCircuitDict()
doConnection()
# show the circuits
if debug:
for circuit in circuitDict:
print(circuit,":",circuitDict[circuit])
if unitTesting:
testPass = False
testPassOutput = {"d": {"output" : 72}, "e": {"output" : 507}, "f": {"output" : 492}, "g": {"output" : 114}, "h": {"output" : 65412}, "i": {"output" : 65079}, "x": {"output" : 123}, "y": {"output" : 456}}
for wire in testPassOutput:
testPassWire = testPassOutput[wire]["output"]
circuitWire = circuitDict[wire]["output"]
if debug:
print("wire", wire, "test:", testPassWire, "calc:", circuitWire)
testPass = testPassWire == circuitWire
if testPass is False:
break
print("testPass:", testPass)
else:
print(circuitDict["a"]["output"])
# this answer for my input is 46065 (part 1), 14134 (part 2)
endTime = time.perf_counter() # time in seconds (float)
if timing:
print("Execution took ", endTime - startTime, " seconds.")
| 3.484375
| 3
|
flink-ai-flow/examples/workflow_on_event/workflows/init/init.py
|
lisy09/flink-ai-extended
| 0
|
11794
|
<reponame>lisy09/flink-ai-extended
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ai_flow as af
hourly_data_dir = '/tmp/hourly_data'
process_result_base_path = '/tmp/hourly_processed'
daily_data_base_path = '/tmp/daily_data'
daily_result = '/tmp/daily_result'
def init():
af.register_dataset(name='hourly_data', uri=hourly_data_dir)
af.register_dataset(name='hourly_data_processed', uri=process_result_base_path)
af.register_dataset(name='daily_data', uri=daily_data_base_path)
af.register_dataset(name='daily_data_result', uri=daily_result)
if __name__ == '__main__':
af.init_ai_flow_context()
init()
| 1.453125
| 1
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/profiles/Profile_WiSUN.py
|
SiliconLabs/Gecko_SDK
| 82
|
11795
|
<filename>platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/profiles/Profile_WiSUN.py
from pyradioconfig.parts.ocelot.profiles.Profile_WiSUN import Profile_WiSUN_Ocelot
from pyradioconfig.parts.common.profiles.bobcat_regs import build_modem_regs_bobcat
from pyradioconfig.parts.common.profiles.profile_common import buildCrcOutputs, buildFecOutputs, buildFrameOutputs, \
buildWhiteOutputs
class Profile_WiSUN_Bobcat(Profile_WiSUN_Ocelot):
def __init__(self):
self._profileName = "WiSUN"
self._readable_name = "WiSUN Profile"
self._category = ""
self._description = "Profile used for WiSUN PHYs"
self._default = False
self._activation_logic = ""
self._family = "bobcat"
def build_register_profile_outputs(self, model, profile):
family = self._family
build_modem_regs_bobcat(model, profile, family)
buildFrameOutputs(model, profile, family)
buildCrcOutputs(model, profile, family)
buildWhiteOutputs(model, profile)
buildFecOutputs(model, profile)
| 1.96875
| 2
|
tests/dgds_functions_test.py
|
openearth/hydro-engine-service
| 4
|
11796
|
import logging
import pytest
from . import auth
from hydroengine_service import dgds_functions
logger = logging.getLogger(__name__)
class TestDGDSFunctions:
@pytest.mark.parametrize('source, start_date, end_date, limit',
[
('projects/dgds-gee/bathymetry/gebco/2019', None, None, 10),
('projects/dgds-gee/glossis/currents', None, None, None),
('projects/dgds-gee/glossis/waterlevel', '2020-11-01', '2020-12-01', None),
('projects/dgds-gee/glossis/wind', '2020-11-01', '2020-11-10', 10),
('projects/dgds-gee/glossis/waveheight', None, None, None),
('projects/dgds-gee/gloffis/weather', None, None, 5),
('projects/dgds-gee/gloffis/hydro', None, None, 5),
('projects/dgds-gee/metocean/waves/percentiles', None, None, 5),
('projects/dgds-gee/chasm/waves', None, None, None),
('projects/dgds-gee/chasm/wind', None, None, None),
('projects/dgds-gee/crucial/evaporation_deficit', None, None, None),
('projects/dgds-gee/crucial/groundwater_declining_trend', None, None, None),
('projects/dgds-gee/msfd/chlorophyll', None, None, None)
])
def test_get_image_collection_info(self, source, start_date, end_date, limit):
image_date_list = dgds_functions.get_image_collection_info(source, start_date, end_date, limit)
assert len(image_date_list) >= 1
assert "imageId" in image_date_list[0]
assert "date" in image_date_list[0]
| 2.171875
| 2
|
smol_opyt/logistic_problem.py
|
abelsiqueira/smol-opyt
| 0
|
11797
|
<filename>smol_opyt/logistic_problem.py
from math import log
import numpy as np
from numpy import linalg as la
class LogisticProblem:
"""Class for the logistic regression method for classification."""
def __init__(self, feat_mtx, y):
"""Create a Logistic Problem with matrix `feat_mtx` n by p and vector `y` of 0s and 1s with size n.
A bias is added to the model as the first variable."""
self._feat_mtx = feat_mtx
self._y = y
p = feat_mtx.shape[1]
self.beta = np.zeros(p + 1)
def sigmoid(self, v):
"""Compute sigmoid(v) = 1 / (1 + exp(-v)"""
return 1 / (1 + np.exp(-v))
def predict(self, feat_mtx=None, beta=None):
if feat_mtx is None:
feat_mtx = self._feat_mtx
if beta is None:
beta = self.beta
return self.sigmoid(beta[0] + np.dot(feat_mtx, beta[1:]))
def cross_entropy(self, yhat):
"""Compute the cross entropy, given by
sum y[i] * log(yhat[i]) + (1 - y[i]) * log(1 - yhat[i])"""
n = len(self._y)
c = 0.0
for i in range(0, n):
c += self._y[i] * log(
yhat[i]) + (1 - self._y[i]) * log(1 - yhat[i])
return c
def cross_entropy_gradient(self, yhat):
"""Assuming yhat_i = sigmoid(x_i^T beta), returns
sum (y[i] - yhat) * x_i
"""
n = len(self._y)
p = len(self.beta)
g = np.zeros(p)
for i in range(0, n):
g = g + (self._y[i] - yhat[i]) * np.array(
[1.0, *self._feat_mtx[i, :]])
return g
def solve(self):
"""Solve the logistic regression problem"""
max_iter = 1000
iter_count = 0
yhat = self.predict()
loss = self.cross_entropy(yhat)
gradloss = self.cross_entropy_gradient(yhat)
while la.norm(gradloss) > 1e-6 and iter_count < max_iter:
alpha = 1.0
slope = la.norm(gradloss)**2
beta_new = self.beta + alpha * gradloss
yhat = self.predict(beta=beta_new)
loss_new = self.cross_entropy(yhat)
while loss_new < loss + 1e-4 * alpha * slope:
alpha = alpha / 2
beta_new = self.beta + alpha * gradloss
yhat = self.predict(beta=beta_new)
loss_new = self.cross_entropy(yhat)
self.beta = beta_new
loss = loss_new
gradloss = self.cross_entropy_gradient(yhat)
iter_count += 1
| 3.46875
| 3
|
TeamClassificationUtils.py
|
Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer
| 8
|
11798
|
<gh_stars>1-10
import numpy as np
# TODO : add code for referee
def get_team_classifications(teamColor1, teamColor2, refColor, keeper1Color, keeper2Color, pose_estimations):
for pose in pose_estimations:
if len(pose[1]) < 2:
pose.append('color not found')
continue
colorDiffs = {}
colorList = np.array(pose[1][0]) + np.array(pose[1][1])
colorList = np.divide(colorList, 2)
colorList = colorList.tolist()
diffTeam1 = list(abs(np.array(teamColor1) - np.array(colorList)))
colorDiffs['team1'] = diffTeam1
diffTeam2 = list(abs(np.array(teamColor2) - np.array(colorList)))
colorDiffs['team2'] = diffTeam2
diffRef = list(abs(np.array(refColor) - np.array(colorList)))
colorDiffs['ref'] = diffRef
diffKeep1 = list(abs(np.array(refColor) - np.array(colorList)))
colorDiffs['keep1'] = diffKeep1
diffKeep2 = list(abs(np.array(refColor) - np.array(colorList)))
colorDiffs['keep2'] = diffKeep2
for key in colorDiffs.keys():
colorDiffs[key] = sum(colorDiffs[key]) / len(colorDiffs[key])
colorDiffs = {k: v for k, v in sorted(colorDiffs.items(), key=lambda item: item[1])}
for key in colorDiffs.keys():
pose.append(key)
break
return pose_estimations
| 2.359375
| 2
|
examples/PTSD/mpi_tmp/PTSD_cognet.py
|
zeroknowledgediscovery/cognet
| 0
|
11799
|
<gh_stars>0
from mpi4py.futures import MPIPoolExecutor
import numpy as np
import pandas as pd
from quasinet.qnet import Qnet, qdistance, load_qnet, qdistance_matrix
from quasinet.qsampling import qsample, targeted_qsample
qnet=load_qnet('../results/PTSD_cognet_test.joblib')
w = 304
h = w
p_all = pd.read_csv("tmp_samples_as_strings.csv", header=None).values.astype(str)[:]
def distfunc(x,y):
d=qdistance(x,y,qnet,qnet)
return d
def dfunc_line(k):
line = np.zeros(w)
y = p_all[k]
for j in range(w):
if j > k:
x = p_all[j]
line[j] = distfunc(x, y)
return line
if __name__ == '__main__':
with MPIPoolExecutor() as executor:
result = executor.map(dfunc_line, range(h))
result = pd.DataFrame(result)
result = result.to_numpy()
result = pd.DataFrame(np.maximum(result, result.transpose()))
result.to_csv('tmp_distmatrix.csv',index=None,header=None)
| 2.296875
| 2
|