hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a00ecf5169810e7505addc750380ef02512919a
| 5,377
|
py
|
Python
|
python/jittor/test/test_grad.py
|
llehtahw/jittor
|
d83389117fd026a0881dd713e658ce5ae2a75bcb
|
[
"Apache-2.0"
] | 1
|
2020-11-13T10:08:00.000Z
|
2020-11-13T10:08:00.000Z
|
python/jittor/test/test_grad.py
|
llehtahw/jittor
|
d83389117fd026a0881dd713e658ce5ae2a75bcb
|
[
"Apache-2.0"
] | null | null | null |
python/jittor/test/test_grad.py
|
llehtahw/jittor
|
d83389117fd026a0881dd713e658ce5ae2a75bcb
|
[
"Apache-2.0"
] | null | null | null |
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from .test_core import expect_error
def equal_size(x, y):
return x.dtype == y.dtype and x.shape == y.shape
def ngrad(func, vars, eps):
out = func(vars)
dout = []
for i in range(len(vars)):
pvar = vars[i].astype("float64")
if type(pvar)==np.ndarray and pvar.size>1:
grad = []
var_f = pvar.flatten()
for j in range(len(var_f)):
var = pvar.flatten()
var[j] += eps
vars[i] = var.reshape(pvar.shape)
out2 = func(vars)
grad.append((out2-out)/eps)
dout.append(np.array(grad).reshape(pvar.shape))
else:
vars[i] = vars[i] + eps
out2 = func(vars)
dout.append((out2-out)/eps)
vars[i] = pvar
return out, dout
class TestGrad(unittest.TestCase):
def test_grad(self):
x = jt.array([1.0, 2.0])
y = jt.array([3.0, 4.0])
z = x*y
dx, dy, dz = jt.grad(z, [x,y,z])
assert equal_size(dx, x) and equal_size(dy, y), f"{x} {y} {dx} {dy}"
assert (dy.data == x.data).all(), f"{dy.data} {x.data}"
assert (dx.data == y.data).all(), f"{dx.data} {y.data}"
assert (dz.data == 1).all()
def test_check_float(self):
x = jt.array(1)
y = x*x
expect_error(lambda: jt.grad(y, [x]))
def test_grad2(self):
def test(n):
x = jt.array(2.0)
y = x
for _ in range(n-1): y = y*x
dx, = jt.grad(y, [x])
assert dx.data == n*2**(n-1), f"{dx.data} {x.data}, {y.data}"
test(5)
test(6)
test(7)
test(8)
def test_var_index(self):
x = jt.array(2.0)
y = x-x
dx, = jt.grad(y, [x])
assert dx.data == 0, dx.data
x = jt.array(2.0)
y = x/x
dx, = jt.grad(x, [y])
assert dx.data == 0
def test_random_graph(self):
def test(num_vars, num_ops, seed):
np.random.seed(seed)
vars = []
for _ in range(num_vars):
vars.append(np.random.rand(1))
def random_func(vars):
np.random.seed(seed+1)
vars = list(vars)
for i in range(num_ops):
v1 = len(vars)-1-np.random.randint(num_vars)
v2 = len(vars)-1-np.random.randint(num_vars)
rop = "+-*/"[np.random.randint(4)]
if (rop == '/' or rop == '-') and v1 is v2:
rop = '+'
vout = eval(f"vars[v1]{rop}vars[v2]")
vars.append(vout)
if type(vars[i]) == jt.Var:
for i in range(len(vars)):
vars[i].name("v"+str(i))
return vout
np_out, np_dout = ngrad(random_func, vars, 1e-7)
jt_vars = [ jt.array(v) for v in vars ]
jt_out = random_func(jt_vars)
assert (np.abs(jt_out.data-np_out) < 1e-5).all(), (jt_out.data, np_out)
jt_dout = jt.grad(jt_out, jt_vars)
jt_dout = [ v.data for v in jt_dout ]
for jt_d, np_d in zip(jt_dout, np_dout):
assert abs(jt_d - np_d) < 1e-3, f"{jt_d} {np_d}"
test(1,1,0)
# test(3,3,1)
test(3,6,0)
test(10,100,2)
test(30,100,4)
test(50,100,6)
def test_top_sort(self):
x = jt.array(2.0)
x.name('x')
y1 = x*x # 2
y1.name('y1')
y2 = x*x # 2
y2.name('y2')
y3 = y1*y2 # 4
y3.name('y3')
y4 = y3*y1 # 6
y4.name('y4')
y5 = y4*y1 # 8
y5.name('y5')
y6 = y5*y1 # 10
y6.name('y6')
vars = [x,y1,y2,y3,y4,y5,y6]
grads = [ g.data for g in jt.grad(y6, vars) ]
dx = grads[0]
assert dx == 10*2**9, f"{grads}"
def test_int_grad(self):
x = jt.array(2.0)
z = x*x*x*x*x
dx, = jt.grad(z, [x])
self.assertEqual(dx.data, 5*2**4)
y1 = jt.int(x)
y2 = jt.float(x)
z = x*x*y1*y1*y2
expect_error(lambda: jt.grad(z, [y1]))
dx, = jt.grad(z, [x])
self.assertEqual(dx.data, 48)
def test_nth_grad(self):
x = jt.array(2.0)
y = x*x*x*x
dx = jt.grad(y, x)
ddx = jt.grad(dx, x)
dddx = jt.grad(ddx, x)
self.assertEqual(y.data, 2**4)
self.assertEqual(dx.data, 4*2**3)
self.assertEqual(ddx.data, 4*3*2**2)
self.assertEqual(dddx.data, 4*3*2*2**1)
def test_no_grad(self):
a = jt.array(1.0)
with jt.no_grad():
b = a
for i in range(10):
b = b.clone() + 1
assert b.data == 11
jt.clean()
assert jt.liveness_info()["lived_vars"] == 2
if __name__ == "__main__":
unittest.main()
| 32.197605
| 92
| 0.455644
| 4,105
| 0.763437
| 0
| 0
| 0
| 0
| 0
| 0
| 598
| 0.111214
|
7a012bf9cfedafb87b6096b3721323abb9371444
| 846
|
py
|
Python
|
mre/helper/Range.py
|
alvarofpp/mre
|
025a5f10b92a0a4bf32d673509958b660871b2f6
|
[
"MIT"
] | 7
|
2019-04-21T18:25:49.000Z
|
2020-12-22T19:13:25.000Z
|
mre/helper/Range.py
|
alvarofpp/mre
|
025a5f10b92a0a4bf32d673509958b660871b2f6
|
[
"MIT"
] | 12
|
2019-08-10T02:09:43.000Z
|
2021-10-02T15:29:48.000Z
|
mre/helper/Range.py
|
alvarofpp/mre
|
025a5f10b92a0a4bf32d673509958b660871b2f6
|
[
"MIT"
] | 22
|
2019-04-21T18:25:54.000Z
|
2020-10-04T21:43:12.000Z
|
from typing import Union
from mre.Regex import Regex
class Range(Regex):
def __init__(self, minimum: Union[str, int] = 0, maximum: Union[str, int] = 9):
super().__init__('{}-{}'.format(minimum, maximum))
@staticmethod
def digits(minimum: int = 0, maximum: int = 9):
return Range(minimum, maximum)
@staticmethod
def letters(
minimum: chr = 'A',
maximum: chr = 'z',
uppercase: bool = False,
lowercase: bool = False,
):
if lowercase and uppercase:
minimum = minimum.upper()
maximum = maximum.lower()
elif lowercase:
minimum = minimum.lower()
maximum = maximum.lower()
elif uppercase:
minimum = minimum.upper()
maximum = maximum.upper()
return Range(minimum, maximum)
| 26.4375
| 83
| 0.568558
| 789
| 0.932624
| 0
| 0
| 615
| 0.72695
| 0
| 0
| 13
| 0.015366
|
7a014283816fd43c5b99389dd4a3fcc4eb6396ff
| 3,463
|
py
|
Python
|
tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py
|
driazati/tvm
|
b76c817986040dc070d215cf32523d9b2adc8e8b
|
[
"Apache-2.0"
] | 1
|
2021-12-13T22:07:00.000Z
|
2021-12-13T22:07:00.000Z
|
tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py
|
driazati/tvm
|
b76c817986040dc070d215cf32523d9b2adc8e8b
|
[
"Apache-2.0"
] | 7
|
2022-02-17T23:04:46.000Z
|
2022-03-31T22:22:55.000Z
|
tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py
|
driazati/tvm
|
b76c817986040dc070d215cf32523d9b2adc8e8b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import tvm
from tvm.ir.module import IRModule
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
def _check(before, expect):
if isinstance(before, PrimFunc):
before = IRModule({"main": before})
if isinstance(expect, PrimFunc):
expect = IRModule({"main": expect})
mod = tvm.tir.transform.RemoveWeightLayoutRewriteBlock()(before)
tvm.ir.assert_structural_equal(mod, expect)
def test_matmul():
@T.prim_func
def before(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_ = T.alloc_buffer([16, 4, 4], dtype="float32")
for i0_o, i1_o in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [i0_o, i1_o])
T.reads(B[i0, i1])
T.writes(B_[i1, i0 // 4, i0 % 4])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
B_[i1, i0 // 4, i0 % 4] = B[i0, i1]
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
T.reads(A[vi, vk], B_[vj, vk // 4, vk % 4])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_[vj, vk // 4, vk % 4]
@T.prim_func
def after(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 4, 4), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0_o, i1_o in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [i0_o, i1_o])
T.reads()
T.writes()
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
T.evaluate(0)
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
T.reads(A[vi, vk], B[vj, vk // 4, vk % 4])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk // 4, vk % 4]
_check(before, after)
if __name__ == "__main__":
test_matmul()
| 37.641304
| 76
| 0.547791
| 0
| 0
| 0
| 0
| 2,134
| 0.616229
| 0
| 0
| 1,028
| 0.296852
|
7a0383028d6c513dd8786b4e28fcf20c534cff1a
| 341
|
py
|
Python
|
CS1/Ch11/Artwork.py
|
DoctorOac/SwosuCsPythonExamples
|
07476b9b4ef9a6f8bd68921aef19e8f00183b1e7
|
[
"Apache-2.0"
] | 1
|
2022-03-28T18:27:10.000Z
|
2022-03-28T18:27:10.000Z
|
CS1/Ch11/Artwork.py
|
DoctorOac/SwosuCsPythonExamples
|
07476b9b4ef9a6f8bd68921aef19e8f00183b1e7
|
[
"Apache-2.0"
] | 1
|
2022-01-11T16:27:40.000Z
|
2022-01-11T16:27:40.000Z
|
CS1/Ch11/Artwork.py
|
DoctorOac/SwosuCsPythonExamples
|
07476b9b4ef9a6f8bd68921aef19e8f00183b1e7
|
[
"Apache-2.0"
] | 7
|
2022-03-25T21:01:42.000Z
|
2022-03-28T18:51:24.000Z
|
from Artist import Artist
class Artwork:
def __init__(self, title='None', year_created=0,\
artist=Artist()):
self.title = title
self.year_created = year_created
self.artist = artist
def print_info(self):
self.artist.print_info()
print('Title: %s, %d' % (self.title, self.year_created))
| 26.230769
| 64
| 0.630499
| 313
| 0.917889
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.061584
|
7a03cb031046f0f5a4ab04de791c5d2ae9f6699d
| 2,249
|
py
|
Python
|
nearproteins/__init__.py
|
audy/nearproteins
|
ed426a98004c7608894a63c6b445ff60ae251d05
|
[
"MIT"
] | null | null | null |
nearproteins/__init__.py
|
audy/nearproteins
|
ed426a98004c7608894a63c6b445ff60ae251d05
|
[
"MIT"
] | 1
|
2019-07-10T05:47:01.000Z
|
2019-07-10T17:23:52.000Z
|
nearproteins/__init__.py
|
audy/nearproteins
|
ed426a98004c7608894a63c6b445ff60ae251d05
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from collections import defaultdict
from itertools import product
import json
import random
import sys
from annoy import AnnoyIndex
from Bio import SeqIO
import numpy as np
class FeatureGenerator:
def __init__(self, k=2):
''' '''
self.k = k
self.alphabet = [ 'A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K',
'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', ]
assert len(self.alphabet) == 20
self.feature_space = list(''.join(i) for i in product(self.alphabet,
repeat=self.k))
self.n_features = len(self.feature_space)
def shingles(self, s, k):
''' return shingles of a given string given a k-mer size k '''
return [ s[i : i + k ] for i in range(0, len(s) - k + 1) ]
def vectorize(self, s):
''' convert shingles to features vector '''
d = defaultdict(lambda: 0)
for i in s:
d[i] += 1
# convert to counts in feature space
vector = np.array([ d[i] for i in self.feature_space ])
return vector
def transform(self, str):
return self.vectorize(self.shingles(str, self.k))
class SimilarStringStore:
def __init__(self, **kwargs):
self.transformer = FeatureGenerator(k=1)
print(self.transformer.n_features)
self.store = AnnoyIndex(self.transformer.n_features)
def vectorize(self, s):
return self.transformer.transform(s)
def add(self, id, s):
''' add a string to index '''
vector = self.transformer.transform(s)
self.store.add_item(int(id), vector)
return vector
def build(self):
self.store.build(500)
def save(self, filename='store.knn'):
self.store.save(filename)
def build_and_save(self, filename='store.knn'):
self.build()
self.save(filename)
def load(self, filename='store.knn'):
self.store.load(filename)
def query(self, s):
''' query index '''
vector = self.transformer.transform(s)
neighbors = self.store.get_nns_by_vector(vector, 40)
return neighbors
def remove(self, id):
''' remove a string from the index '''
pass
| 22.267327
| 85
| 0.578924
| 2,045
| 0.909293
| 0
| 0
| 0
| 0
| 0
| 0
| 350
| 0.155625
|
7a042b77715a588fe196553691f390b7d45b469f
| 314
|
py
|
Python
|
arm_control/src/orientation.py
|
ALxander19/zobov_arm
|
8b5b322b53a7a0d9c91fcbc720473a2a6e6f5826
|
[
"BSD-2-Clause"
] | null | null | null |
arm_control/src/orientation.py
|
ALxander19/zobov_arm
|
8b5b322b53a7a0d9c91fcbc720473a2a6e6f5826
|
[
"BSD-2-Clause"
] | null | null | null |
arm_control/src/orientation.py
|
ALxander19/zobov_arm
|
8b5b322b53a7a0d9c91fcbc720473a2a6e6f5826
|
[
"BSD-2-Clause"
] | null | null | null |
# tf.transformations alternative is not yet available in tf2
from tf.transformations import quaternion_from_euler
if __name__ == '__main__':
# RPY to convert: 90deg, 0, -90deg
q = quaternion_from_euler(1.5707, 0, -1.5707)
print "The quaternion representation is %s %s %s %s." % (q[0], q[1], q[2], q[3])
| 31.4
| 82
| 0.694268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.480892
|
7a05099cb4069ff152e86f9e7700bcfd829e2375
| 2,997
|
py
|
Python
|
django_server/fvh_courier/rest/tests/base.py
|
ForumViriumHelsinki/CityLogistics
|
df4efef49bdc740a1dc47d0bda49ce2b3833e9c1
|
[
"MIT"
] | 1
|
2021-11-02T03:21:48.000Z
|
2021-11-02T03:21:48.000Z
|
django_server/fvh_courier/rest/tests/base.py
|
ForumViriumHelsinki/CityLogistics
|
df4efef49bdc740a1dc47d0bda49ce2b3833e9c1
|
[
"MIT"
] | 136
|
2019-12-03T14:52:17.000Z
|
2022-02-26T21:18:15.000Z
|
django_server/fvh_courier/rest/tests/base.py
|
ForumViriumHelsinki/CityLogistics
|
df4efef49bdc740a1dc47d0bda49ce2b3833e9c1
|
[
"MIT"
] | 2
|
2020-06-23T23:58:08.000Z
|
2020-12-08T13:19:28.000Z
|
import datetime
from django.contrib.auth.models import User, Group
from django.utils import timezone
from rest_framework.test import APITestCase
import fvh_courier.models.base
from fvh_courier import models
class FVHAPITestCase(APITestCase):
def assert_dict_contains(self, superset, subset, path=''):
for key, expected in subset.items():
full_path = path + key
received = superset.get(key, None)
if isinstance(expected, dict) and isinstance(received, dict):
self.assert_dict_contains(superset[key], expected, full_path + '.')
else:
assert received == expected, 'Value mismatch for key {}: {} != {}'.format(
full_path, expected, received
)
def create_courier(self):
courier = models.Courier.objects.create(
company=models.CourierCompany.objects.create(name='Couriers r us'),
user=User.objects.create(
username='courier', first_name='Coranne', last_name='Courier', email='coranne@couriersrus.com'),
phone_number='+358505436657')
courier.company.coordinator = courier
courier.company.save()
return courier
def create_and_login_courier(self):
courier = self.create_courier()
self.client.force_login(courier.user)
return courier
def create_package(self, sender, **kwargs):
now = timezone.now()
return models.Package.objects.create(
pickup_at=fvh_courier.models.base.Address.objects.create(
street_address='Paradisäppelvägen 123',
postal_code='00123',
city='Ankeborg',
country='Ankerige',
lat=64.04,
lon=80.65
),
deliver_to=fvh_courier.models.base.Address.objects.create(
street_address='Helvetesapelsinvägen 666',
postal_code='00321',
city='Ankeborg',
country='Ankerige',
lat=64.54,
lon=80.05
),
height=20, width=30, depth=20, weight=2,
sender=sender,
recipient='Reginald Receiver',
recipient_phone='+358505436657',
earliest_pickup_time=now,
latest_pickup_time=now + datetime.timedelta(hours=1),
earliest_delivery_time=now + datetime.timedelta(hours=1),
latest_delivery_time=now + datetime.timedelta(hours=2),
**kwargs
)
def create_sender(self, **kwargs):
return models.Sender.objects.create(
user=User.objects.create(username='sender', first_name='Cedrik', last_name='Sender'),
address=models.Address.objects.create(
street_address="Paradisäppelvägen 123",
postal_code="00123",
city="Ankeborg",
country="Ankerige"),
phone_number='+358505436657', **kwargs)
| 36.54878
| 112
| 0.593594
| 2,790
| 0.92938
| 0
| 0
| 0
| 0
| 0
| 0
| 355
| 0.118254
|
7a0915dbc8c3508d29e923526b1c9bacf3a1ca69
| 12,039
|
py
|
Python
|
pynoorm/binder.py
|
jpeyret/pynoorm
|
d6f7e0e102bb0eb4865beff75cf671b560ebc8b2
|
[
"MIT"
] | 2
|
2016-04-14T23:11:06.000Z
|
2016-06-04T22:39:10.000Z
|
pynoorm/binder.py
|
jpeyret/pynoorm
|
d6f7e0e102bb0eb4865beff75cf671b560ebc8b2
|
[
"MIT"
] | null | null | null |
pynoorm/binder.py
|
jpeyret/pynoorm
|
d6f7e0e102bb0eb4865beff75cf671b560ebc8b2
|
[
"MIT"
] | 1
|
2022-01-16T15:19:16.000Z
|
2022-01-16T15:19:16.000Z
|
"""
Binder classes perform two functions through their format method
- given a query template with %(somevar)s python substition
class MyClass(object):
pass
arg1 = MyClass()
arg1.customer = 101
default = MyClass()
default.customer = 201
arg2.country = "CAN"
qry, sub = format("
select *
from customer
where country = %(country)s
and custid = %(customer)s", arg1, default)
means that we will be fetching for country=CAN, custid=101
- the query template itself is transformed to a format
that fits the underlying database's bind variable
scheme which protects against sql injection attacks.
For example, assuming an Oracle database (paramstyle="named")
qry:
"select * from customer where country = :country and custid = :customer"
sub:
{"country":"CAN", "customer" : 101}
Postgres (paramstyle=""):
qry:
"select * from customer where country = :country and custid = :customer"
sub:
{"country":"CAN", "customer" : 101}
a positional database (paramstyle="numeric") (NotImplementedError)
would instead return
qry:
"select * from customer where country = :1 and custid = :2"
sub:
["CAN", 101]
"""
import re
class Binder(object):
"""query template and substitution management - generic
"""
def __init__(self, *args, **kwds):
pass
def format(self, tqry, *args):
"""
:param tqry: query with optional substitution variables
Python style i.e.
select * from orders where custid = %(custid)s
:param *args: zero or more arguments that will be checked
left-to-right, argument[<key>], getattr(argument,<key>)
"""
def __repr__(self):
msg = "%s paramstyle=%s" % (self.__class__.__name__, self.paramstyle)
if hasattr(self, "supports"):
msg += " supports: %s" % (self.supports)
return msg
def _case_sensitive(self, key):
return [key]
def _case_insensitive(self, key):
if key == key.upper():
return [key, key.lower()]
if key == key.lower():
return [key, key.upper()]
return [key]
_key_expand = _case_sensitive
def _get_from_args(self, key_in):
"""generic way to look for a key in the arg list"""
li_key = self._key_expand(key_in)
for key in li_key:
for arg in self.li_arg:
try:
got = arg[key]
return got
except (KeyError):
try:
# try getattr
got = getattr(arg, key)
return got
except AttributeError:
continue
except (AttributeError, TypeError):
# no __getitem__, try getattr
try:
got = getattr(arg, key)
return got
except AttributeError:
continue
try:
raise KeyError(key_in)
except Exception as e:
raise
@classmethod
def factory(cls, paramstyle, case_insensitive=False):
"""
return a Binder subclass instance appropriate
to the underlying db library paramstyle bind variable
:param paramstyle: parameter style string as per PEP-249
:case_insensitive: %(custid)s will match {"custid":1} or {"CUSTID":2}, with priority
going to the initial case. mixed-case keys (custId) will only match {"custId":3}
"""
try:
inst = cls._di_paramstyle[paramstyle]()
if case_insensitive:
inst._key_expand = inst._case_insensitive
return inst
except KeyError:
msg = """got:%s,
but expecting one of %s.
See
https://www.python.org/dev/peps/pep-0249/#paramstyle
for details""" % (
paramstyle,
"/".join(list(cls._di_paramstyle.keys())),
)
raise ValueError(msg)
except NotImplementedError:
msg = "%s is not implemented yet" % (paramstyle)
raise NotImplementedError(msg)
_di_paramstyle = {}
# the regular expression pattern that looks for list type binds
re_pattern_listsubstition = re.compile("%\([a-zA-Z0-9_]+\)l")
# leading '__' variable name makes name clashes more unlikely
T_LIST_KEYNAME = "%s_%03d__"
# def _pre_process(self):
# """do nothing for now - intended to support list substitutions"""
# pass
def _pre_process(self):
li_listsubstition = self.re_pattern_listsubstition.findall(self.tqry)
if li_listsubstition:
self.preprocess_listsubstitution(li_listsubstition)
def preprocess_listsubstitution(self, li_hit):
""" this will transform %(xxx)l into %(__xxx_000)s, %(__xxx_001)s """
di_list_sub = {}
self.li_arg.insert(0, di_list_sub)
for hit in li_hit:
key = hit[2:-2]
got = self._get_from_args(key)
if not isinstance(got, (list, set)):
raise ValueError(
"list substitutions require an iterable parameter: `%s` was of type `%s`"
% (key, type(got))
)
#
# self.tqry = self.tqry.replace(hit, hit[:-1] + "s")
else:
li = []
if not got:
# empty list or set
self.tqry = self.tqry.replace(hit, "NULL")
continue
for ix, val in enumerate(got):
ikeyname = self.T_LIST_KEYNAME % (key, ix)
ikeyname_sub = "%%(%s)s" % (ikeyname)
di_list_sub[ikeyname] = val
li.append(ikeyname_sub)
# replace the original bind %(xxx)l with
# %(__xxx_000)s, %(__xxx_001)s, ...
repval = ", ".join(li)
self.tqry = self.tqry.replace(hit, repval)
class Binder_pyformat(Binder):
"""support Postgresql
query template and substitution management for postgresql
query is unchanged because postgresql is happy
with %(somevar)s as a bind
"""
paramstyle = "pyformat"
supports = "Postgresql"
def _pre_process(self):
li_listsubstition = self.re_pattern_listsubstition.findall(self.tqry)
if li_listsubstition:
self.preprocess_listsubstitution(li_listsubstition)
def format(self, tqry, *args):
"""
looks up substitutions and sets them up in dictionary self.sub
postgresql accepts Python named variable so keeping the query as is
select * from foo where bar = %(somebar)s"
=>
select * from foo where bar = %(somebar)s
{"somebar" : value-found-for-somebar}
"""
self.sub = {}
self.li_arg = list(args)
self.tqry = tqry
self._pre_process()
try:
self.tqry % (self)
except (Exception,) as e:
raise
# Postgresql query format stays as %(foo)s
# so we just return the original query
# (which _pre_process may have altered)
return self.tqry, self.sub
__call__ = format
def __getitem__(self, key):
if key in self.sub:
return None
got = self._get_from_args(key)
self.sub[key] = got
return None
PARAMSTYLE_QMARK = PARAMSTYLE_SQLITE = PARAMSTYLE_SQLSERVER = "qmark"
class BinderQmark(Binder):
""" supports: sqlite3, SQL Server
query template and substitution management for sqlite3
query changes from %(somevar)s to ?
select * from foo where bar = %(somebar)s
=>
select * from foo where bar = ?,
(value-found-for-somebar,)
"""
paramstyle = PARAMSTYLE_QMARK
supports = "sqlite3, mssql"
qry_replace = "?"
def format(self, tqry, *args):
"""
looks up substitutions and sets them up in self.sub
Note:
Assuming both will be happy with a tuple.
Might be one SQL Server needs a list instead.
"""
self.tqry = tqry
self._di_sub = {}
self.sub = []
self.li_arg = list(args)
self._pre_process()
try:
qry = self.tqry % (self)
except (Exception,) as e:
raise
return qry, tuple(self.sub)
__call__ = format
def __getitem__(self, key):
"""
finds a substitution and append it to the bind list
but also transforms the variable in the query to ?
"""
qry_replace = self.qry_replace
try:
got = self._di_sub[key]
except KeyError:
got = self._di_sub[key] = self._get_from_args(key)
self.sub.append(got)
return qry_replace
class BinderFormat(BinderQmark):
"""supports: MySQL
query template and substitution management for MySQL
query changes from %(somevar)s to %s format
parameters are (<var1>,<var2>,)
Note: pretty much identical to BinderQmark/sqlite3
except for the placeholder being %s
"""
paramstyle = "format"
supports = "MySQL"
qry_replace = "%s"
class BinderNamed(Binder):
"""supports: Oracle
query template and substitution management for Oracle
query changes from %(somevar)s to :somevar format
list-based substitutions:
%(somelist)l :__somelist_000, :__somelist_001...
"""
paramstyle = "named"
supports = "Oracle"
t_qry_replace = ":%s"
def format(self, tqry, *args):
"""
looks up substitutions and sets them up in self.sub
but also transforms the query to Oracle named
format
"select * from foo where bar = %(somebar)s"
=>
"select * from foo where bar = :somebar "
{"somebar" : value-found-for-somebar}
"""
self.sub = {}
self.li_arg = list(args)
self.tqry = tqry
self._pre_process()
try:
qry = self.tqry % (self)
except (Exception,) as e:
raise
return qry, self.sub
__call__ = format
def __getitem__(self, key):
"""
finds a substitution
but also transforms the variable in the query to Oracle named
format :foo
"""
# already seen so already in the substition dict
# replace the query's %(foo)s with :foo
if key in self.sub:
return self.t_qry_replace % (key)
got = self._get_from_args(key)
self.sub[key] = got
return self.t_qry_replace % (key)
"""
https://www.python.org/dev/peps/pep-0249/#paramstyle
paramstyle Meaning
qmark Question mark style, e.g. ...WHERE name=? sequence
numeric Numeric, positional style, e.g. ...WHERE name=:1
named Named style, e.g. ...WHERE name=:name
format ANSI C printf format codes, e.g. ...WHERE name=%s
pyformat Python extended format codes, e.g. ...WHERE name=%(name)s
"""
ExperimentalBinderNamed = BinderNamed
class Binder_NotImplementedError(Binder):
"""not implemented yet"""
paramstyle = "not implemented"
def __init__(self, *args, **kwds):
raise NotImplementedError()
# This is what decides how the Binder
# will process incoming template substitutions
Binder._di_paramstyle["pyformat"] = Binder_pyformat
Binder._di_paramstyle["named"] = BinderNamed
Binder._di_paramstyle[PARAMSTYLE_QMARK] = BinderQmark
Binder._di_paramstyle["format"] = BinderFormat
Binder._di_paramstyle["experimentalnamed"] = ExperimentalBinderNamed
# and these are not done yet
Binder._di_paramstyle["numeric"] = Binder_NotImplementedError
| 27.675862
| 93
| 0.572971
| 10,212
| 0.848243
| 0
| 0
| 1,144
| 0.095025
| 0
| 0
| 5,941
| 0.49348
|
7a0b66937d09d19c265c09560989c32e86648150
| 4,313
|
py
|
Python
|
parselglossy/documentation.py
|
dev-cafe/parseltongue
|
834e78724bb90dfa19748d7f65f6af02d525e3f2
|
[
"MIT"
] | 5
|
2019-03-11T18:42:26.000Z
|
2021-08-24T18:24:05.000Z
|
parselglossy/documentation.py
|
dev-cafe/parseltongue
|
834e78724bb90dfa19748d7f65f6af02d525e3f2
|
[
"MIT"
] | 105
|
2018-12-04T03:07:22.000Z
|
2022-03-24T13:04:48.000Z
|
parselglossy/documentation.py
|
dev-cafe/parseltongue
|
834e78724bb90dfa19748d7f65f6af02d525e3f2
|
[
"MIT"
] | 1
|
2019-02-08T09:54:49.000Z
|
2019-02-08T09:54:49.000Z
|
# -*- coding: utf-8 -*-
#
# parselglossy -- Generic input parsing library, speaking in tongues
# Copyright (C) 2020 Roberto Di Remigio, Radovan Bast, and contributors.
#
# This file is part of parselglossy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# For information on the complete list of contributors to the
# parselglossy library, see: <http://parselglossy.readthedocs.io/>
#
"""Documentation generation."""
from typing import List # noqa: F401
from .utils import JSONDict
def documentation_generator(
template: JSONDict, *, header: str = "Input parameters"
) -> str:
"""Generates documentation from a valid template.
Parameters
----------
template : JSONDict
The template to generate documentation from.
We assume that the template is valid.
Returns
-------
documentation : str
"""
comment = (
".. raw:: html\n\n" # noqa: F541
" <style> .red {color:#aa0060; font-weight:bold; font-size:18px} </style>\n\n" # noqa: E501
".. role:: red\n\n"
f".. This documentation was autogenerated using parselglossy."
" Editing by hand is not recommended.\n"
)
header = (
f"{comment:s}\n{'=' * len(header):s}\n{header:s}\n{'=' * len(header):s}\n\n"
"- Keywords without a default value are **required**.\n"
"- Default values are either explicit or computed from the value of other keywords in the input.\n" # noqa: E501
"- Sections where all keywords have a default value can be omitted.\n"
"- Predicates, if present, are the functions run to validate user input.\n"
)
docs = _rec_documentation_generator(template=template)
documentation = header + docs
return documentation
def _document_keyword(keyword: JSONDict) -> str:
docstring = keyword["docstring"].replace("\n", " ")
doc = f"""
:{keyword['name']:s}: {docstring:s}
**Type** ``{keyword['type']:s}``
"""
if "default" in keyword.keys():
doc += f"""
**Default** ``{keyword['default']}``
"""
if "predicates" in keyword.keys():
preds = "\n ".join((f"- ``{x}``" for x in keyword["predicates"]))
doc += f"""
**Predicates**
{preds}
"""
return doc
def _rec_documentation_generator(template, *, level: int = 0) -> str:
"""Generates documentation from a valid template.
Parameters
----------
template : JSONDict
level : int
Returns
-------
docs : str
"""
docs = [] # type: List[str]
keywords = template["keywords"] if "keywords" in template.keys() else []
if keywords:
docs.append(_indent("\n:red:`Keywords`", level))
for k in keywords:
doc = _document_keyword(k)
docs.extend(_indent(doc, level))
sections = template["sections"] if "sections" in template.keys() else []
if sections:
docs.append(_indent("\n:red:`Sections`", level))
for s in sections:
docstring = s["docstring"].replace("\n", " ")
doc = f"\n :{s['name']:s}: {docstring:s}\n"
doc += _rec_documentation_generator(s, level=level + 1)
docs.extend(_indent(doc, level))
return "".join(docs)
def _indent(in_str: str, level: int = 0) -> str:
return in_str.replace("\n", "\n" + (" " * level))
| 31.713235
| 121
| 0.644563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,891
| 0.670299
|
7a0b7b8522bbe2e3900e18756663a43a8ac174f7
| 2,765
|
py
|
Python
|
functions/print_initial_values.py
|
CINPLA/edNEGmodel_analysis
|
be8854c563376a14ee7d15e51d98d0d82be96a35
|
[
"MIT"
] | null | null | null |
functions/print_initial_values.py
|
CINPLA/edNEGmodel_analysis
|
be8854c563376a14ee7d15e51d98d0d82be96a35
|
[
"MIT"
] | null | null | null |
functions/print_initial_values.py
|
CINPLA/edNEGmodel_analysis
|
be8854c563376a14ee7d15e51d98d0d82be96a35
|
[
"MIT"
] | null | null | null |
import numpy as np
def print_initial_values(init_cell):
phi_sn, phi_se, phi_sg, phi_dn, phi_de, phi_dg, phi_msn, phi_mdn, phi_msg, phi_mdg = init_cell.membrane_potentials()
E_Na_sn, E_Na_sg, E_Na_dn, E_Na_dg, E_K_sn, E_K_sg, E_K_dn, E_K_dg, E_Cl_sn, E_Cl_sg, E_Cl_dn, E_Cl_dg, E_Ca_sn, E_Ca_dn = init_cell.reversal_potentials()
q_sn = init_cell.total_charge(np.array([init_cell.Na_sn, init_cell.K_sn, init_cell.Cl_sn, init_cell.Ca_sn, init_cell.X_sn]))
q_se = init_cell.total_charge(np.array([init_cell.Na_se, init_cell.K_se, init_cell.Cl_se, init_cell.Ca_se, init_cell.X_se]))
q_sg = init_cell.total_charge(np.array([init_cell.Na_sg, init_cell.K_sg, init_cell.Cl_sg, 0, init_cell.X_sg]))
q_dn = init_cell.total_charge(np.array([init_cell.Na_dn, init_cell.K_dn, init_cell.Cl_dn, init_cell.Ca_dn, init_cell.X_dn]))
q_de = init_cell.total_charge(np.array([init_cell.Na_de, init_cell.K_de, init_cell.Cl_de, init_cell.Ca_de, init_cell.X_de]))
q_dg = init_cell.total_charge(np.array([init_cell.Na_dg, init_cell.K_dg, init_cell.Cl_dg, 0, init_cell.X_dg]))
print("----------------------------")
print("Initial values")
print("----------------------------")
print("initial total charge(C):", q_sn + q_se + q_sg + q_dn + q_de + q_dg)
print("Q_sn + Q_sg (C):", q_sn+q_sg)
print("Q_se (C):", q_se)
print("Q_dn + Q_sg (C):", q_dn+q_dg)
print("Q_de (C):", q_de)
print("----------------------------")
print('phi_sn: ', round(phi_sn*1000, 1))
print('phi_se: ', round(phi_se*1000, 1))
print('phi_sg: ', round(phi_sg*1000, 1))
print('phi_dn: ', round(phi_dn*1000, 1))
print('phi_de: ', round(phi_de*1000, 1))
print('phi_dg: ', round(phi_dg*1000, 1))
print('phi_msn: ', round(phi_msn*1000, 1))
print('phi_mdn: ', round(phi_mdn*1000, 1))
print('phi_msg: ', round(phi_msg*1000, 1))
print('phi_mdg: ', round(phi_mdg*1000, 1))
print('E_Na_sn: ', round(E_Na_sn*1000))
print('E_Na_sg: ', round(E_Na_sg*1000))
print('E_K_sn: ', round(E_K_sn*1000))
print('E_K_sg: ', round(E_K_sg*1000))
print('E_Cl_sn: ', round(E_Cl_sn*1000))
print('E_Cl_sg: ', round(E_Cl_sg*1000))
print('E_Ca_sn: ', round(E_Ca_sn*1000))
print("----------------------------")
print('psi_se-psi_sn', init_cell.psi_se-init_cell.psi_sn)
print('psi_se-psi_sg', init_cell.psi_se-init_cell.psi_sg)
print('psi_de-psi_dn', init_cell.psi_de-init_cell.psi_dn)
print('psi_de-psi_dg', init_cell.psi_de-init_cell.psi_dg)
print("----------------------------")
print('initial total volume (m^3):', init_cell.V_sn + init_cell.V_se + init_cell.V_sg + init_cell.V_dn + init_cell.V_de + init_cell.V_dg)
print("----------------------------")
| 56.428571
| 158
| 0.637975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.198192
|
7a0c0a5f5ecb615e0a6336ce27fac2621034f8ff
| 21,021
|
py
|
Python
|
anyway/parsers/cbs.py
|
edermon/anyway
|
3523b7871b7eebeca225e088af653ba074e5bee3
|
[
"BSD-3-Clause"
] | null | null | null |
anyway/parsers/cbs.py
|
edermon/anyway
|
3523b7871b7eebeca225e088af653ba074e5bee3
|
[
"BSD-3-Clause"
] | null | null | null |
anyway/parsers/cbs.py
|
edermon/anyway
|
3523b7871b7eebeca225e088af653ba074e5bee3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import glob
import os
import json
from collections import OrderedDict
import itertools
import re
from datetime import datetime
import six
from six import iteritems
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from .. import field_names, localization
from ..models import AccidentMarker, Involved, Vehicle
from .. import models
from ..utilities import ItmToWGS84, init_flask, CsvReader, time_delta, decode_hebrew,ImporterUI,truncate_tables
from functools import partial
import logging
failed_dirs = OrderedDict()
CONTENT_ENCODING = 'cp1255'
ACCIDENT_TYPE_REGEX = re.compile(r"Accidents Type (?P<type>\d)")
ACCIDENTS = 'accidents'
CITIES = 'cities'
STREETS = 'streets'
ROADS = "roads"
URBAN_INTERSECTION = 'urban_intersection'
NON_URBAN_INTERSECTION = 'non_urban_intersection'
DICTIONARY = "dictionary"
INVOLVED = "involved"
VEHICLES = "vehicles"
cbs_files = {
ACCIDENTS: "AccData.csv",
URBAN_INTERSECTION: "IntersectUrban.csv",
NON_URBAN_INTERSECTION: "IntersectNonUrban.csv",
STREETS: "DicStreets.csv",
DICTIONARY: "Dictionary.csv",
INVOLVED: "InvData.csv",
VEHICLES: "VehData.csv"
}
coordinates_converter = ItmToWGS84()
app = init_flask()
db = SQLAlchemy(app)
json_dumps = partial(json.dumps, encoding=models.db_encoding) if six.PY2 else json.dumps
def get_street(settlement_sign, street_sign, streets):
"""
extracts the street name using the settlement id and street id
"""
if settlement_sign not in streets:
# Changed to return blank string instead of None for correct presentation (Omer)
return u""
street_name = [decode_hebrew(x[field_names.street_name]) for x in streets[settlement_sign] if
x[field_names.street_sign] == street_sign]
# there should be only one street name, or none if it wasn't found.
return street_name[0] if len(street_name) == 1 else u""
def get_address(accident, streets):
"""
extracts the address of the main street.
tries to build the full address: <street_name> <street_number>, <settlement>,
but might return a partial one if unsuccessful.
"""
street = get_street(accident[field_names.settlement_sign], accident[field_names.street1], streets)
if not street:
return u""
# the home field is invalid if it's empty or if it contains 9999
home = accident[field_names.home] if accident[field_names.home] != 9999 else None
settlement = localization.get_city_name(accident[field_names.settlement_sign])
if not home and not settlement:
return street
if not home and settlement:
return u"{}, {}".format(street, settlement)
if home and not settlement:
return u"{} {}".format(street, home)
return u"{} {}, {}".format(street, home, settlement)
def get_streets(accident, streets):
"""
extracts the streets the accident occurred in.
every accident has a main street and a secondary street.
:return: a tuple containing both streets.
"""
main_street = get_address(accident, streets)
secondary_street = get_street(accident[field_names.settlement_sign], accident[field_names.street2], streets)
return main_street, secondary_street
def get_junction(accident, roads):
"""
extracts the junction from an accident
omerxx: added "km" parameter to the calculation to only show the right junction,
every non-urban accident shows nearest junction with distance and direction
:return: returns the junction or None if it wasn't found
"""
if accident["KM"] is not None and accident[field_names.non_urban_intersection] is None:
min_dist = 100000
key = (), ()
junc_km = 0
for option in roads:
if accident[field_names.road1] == option[0] and abs(accident["KM"]-option[2]) < min_dist:
min_dist = abs(accident["KM"]-option[2])
key = accident[field_names.road1], option[1], option[2]
junc_km = option[2]
junction = roads.get(key, None)
if junction:
if accident["KM"] - junc_km > 0:
direction = u"צפונית" if accident[field_names.road1] % 2 == 0 else u"מזרחית"
else:
direction = u"דרומית" if accident[field_names.road1] % 2 == 0 else u"מערבית"
if abs(float(accident["KM"] - junc_km)/10) >= 1:
string = str(abs(float(accident["KM"])-junc_km)/10) + u" ק״מ " + direction + u" ל" + \
decode_hebrew(junction)
elif 0 < abs(float(accident["KM"] - junc_km)/10) < 1:
string = str(int((abs(float(accident["KM"])-junc_km)/10)*1000)) + u" מטרים " + direction + u" ל" + \
decode_hebrew(junction)
else:
string = decode_hebrew(junction)
return string
else:
return u""
elif accident[field_names.non_urban_intersection] is not None:
key = accident[field_names.road1], accident[field_names.road2], accident["KM"]
junction = roads.get(key, None)
return decode_hebrew(junction) if junction else u""
else:
return u""
def parse_date(accident):
"""
parses an accident's date
"""
year = accident[field_names.accident_year]
month = accident[field_names.accident_month]
day = accident[field_names.accident_day]
'''
hours calculation explanation - The value of the hours is between 1 to 96.
These values represent 15 minutes each that start at 00:00:
1 equals 00:00, 2 equals 00:15, 3 equals 00:30 and so on.
'''
minutes = accident[field_names.accident_hour] * 15 - 15
hours = int(minutes // 60)
minutes %= 60
accident_date = datetime(year, month, day, hours, minutes, 0)
return accident_date
def load_extra_data(accident, streets, roads):
"""
loads more data about the accident
:return: a dictionary containing all the extra fields and their values
:rtype: dict
"""
extra_fields = {}
# if the accident occurred in an urban setting
if bool(accident[field_names.urban_intersection]):
main_street, secondary_street = get_streets(accident, streets)
if main_street:
extra_fields[field_names.street1] = main_street
if secondary_street:
extra_fields[field_names.street2] = secondary_street
# if the accident occurred in a non urban setting (highway, etc')
if bool(accident[field_names.non_urban_intersection]):
junction = get_junction(accident, roads)
if junction:
extra_fields[field_names.junction_name] = junction
# localize static accident values
for field in localization.get_supported_tables():
# if we have a localized field for that particular field, save the field value
# it will be fetched we deserialized
if accident[field] and localization.get_field(field, accident[field]):
extra_fields[field] = accident[field]
return extra_fields
def get_data_value(value):
"""
:returns: value for parameters which are not mandatory in an accident data
OR -1 if the parameter value does not exist
"""
return int(value) if value else -1
def import_accidents(provider_code, accidents, streets, roads, **kwargs):
logging.info("\tReading accident data from '%s'..." % os.path.basename(accidents.name()))
markers = []
for accident in accidents:
if field_names.x_coordinate not in accident or field_names.y_coordinate not in accident:
raise ValueError("Missing x and y coordinates")
if accident[field_names.x_coordinate] and accident[field_names.y_coordinate]:
lng, lat = coordinates_converter.convert(accident[field_names.x_coordinate],
accident[field_names.y_coordinate])
else:
lng, lat = None, None # Must insert everything to avoid foreign key failure
main_street, secondary_street = get_streets(accident, streets)
assert(int(provider_code) == int(accident[field_names.file_type]))
marker = {
"id": int(accident[field_names.id]),
"provider_code": int(provider_code),
"title": "Accident",
"description": json_dumps(load_extra_data(accident, streets, roads)),
"address": get_address(accident, streets),
"latitude": lat,
"longitude": lng,
"subtype": int(accident[field_names.accident_type]),
"severity": int(accident[field_names.accident_severity]),
"created": parse_date(accident),
"locationAccuracy": int(accident[field_names.igun]),
"roadType": int(accident[field_names.road_type]),
"roadShape": int(accident[field_names.road_shape]),
"dayType": int(accident[field_names.day_type]),
"unit": int(accident[field_names.unit]),
"mainStreet": main_street,
"secondaryStreet": secondary_street,
"junction": get_junction(accident, roads),
"one_lane": get_data_value(accident[field_names.one_lane]),
"multi_lane": get_data_value(accident[field_names.multi_lane]),
"speed_limit": get_data_value(accident[field_names.speed_limit]),
"intactness": get_data_value(accident[field_names.intactness]),
"road_width": get_data_value(accident[field_names.road_width]),
"road_sign": get_data_value(accident[field_names.road_sign]),
"road_light": get_data_value(accident[field_names.road_light]),
"road_control": get_data_value(accident[field_names.road_control]),
"weather": get_data_value(accident[field_names.weather]),
"road_surface": get_data_value(accident[field_names.road_surface]),
"road_object": get_data_value(accident[field_names.road_object]),
"object_distance": get_data_value(accident[field_names.object_distance]),
"didnt_cross": get_data_value(accident[field_names.didnt_cross]),
"cross_mode": get_data_value(accident[field_names.cross_mode]),
"cross_location": get_data_value(accident[field_names.cross_location]),
"cross_direction": get_data_value(accident[field_names.cross_direction]),
"road1": get_data_value(accident[field_names.road1]),
"road2": get_data_value(accident[field_names.road2]),
"km": float(accident[field_names.km]) if accident[field_names.km] else None,
"yishuv_symbol": get_data_value(accident[field_names.yishuv_symbol]),
"geo_area": get_data_value(accident[field_names.geo_area]),
"day_night": get_data_value(accident[field_names.day_night]),
"day_in_week": get_data_value(accident[field_names.day_in_week]),
"traffic_light": get_data_value(accident[field_names.traffic_light]),
"region": get_data_value(accident[field_names.region]),
"district": get_data_value(accident[field_names.district]),
"natural_area": get_data_value(accident[field_names.natural_area]),
"minizipali_status": get_data_value(accident[field_names.minizipali_status]),
"yishuv_shape": get_data_value(accident[field_names.yishuv_shape]),
}
markers.append(marker)
return markers
def import_involved(provider_code, involved, **kwargs):
logging.info("\tReading involved data from '%s'..." % os.path.basename(involved.name()))
involved_result = []
for involve in involved:
if not involve[field_names.id]: # skip lines with no accident id
continue
involved_result.append({
"accident_id": int(involve[field_names.id]),
"provider_code": int(provider_code),
"involved_type": int(involve[field_names.involved_type]),
"license_acquiring_date": int(involve[field_names.license_acquiring_date]),
"age_group": int(involve[field_names.age_group]),
"sex": get_data_value(involve[field_names.sex]),
"car_type": get_data_value(involve[field_names.car_type]),
"safety_measures": get_data_value(involve[field_names.safety_measures]),
"home_city": get_data_value(involve[field_names.home_city]),
"injury_severity": get_data_value(involve[field_names.injury_severity]),
"injured_type": get_data_value(involve[field_names.injured_type]),
"Injured_position": get_data_value(involve[field_names.injured_position]),
"population_type": get_data_value(involve[field_names.population_type]),
"home_district": get_data_value(involve[field_names.home_district]),
"home_nafa": get_data_value(involve[field_names.home_nafa]),
"home_area": get_data_value(involve[field_names.home_area]),
"home_municipal_status": get_data_value(involve[field_names.home_municipal_status]),
"home_residence_type": get_data_value(involve[field_names.home_residence_type]),
"hospital_time": get_data_value(involve[field_names.hospital_time]),
"medical_type": get_data_value(involve[field_names.medical_type]),
"release_dest": get_data_value(involve[field_names.release_dest]),
"safety_measures_use": get_data_value(involve[field_names.safety_measures_use]),
"late_deceased": get_data_value(involve[field_names.late_deceased]),
})
return involved_result
def import_vehicles(provider_code, vehicles, **kwargs):
logging.info("\tReading vehicles data from '%s'..." % os.path.basename(vehicles.name()))
vehicles_result = []
for vehicle in vehicles:
vehicles_result.append({
"accident_id": int(vehicle[field_names.id]),
"provider_code": int(provider_code),
"engine_volume": int(vehicle[field_names.engine_volume]),
"manufacturing_year": get_data_value(vehicle[field_names.manufacturing_year]),
"driving_directions": get_data_value(vehicle[field_names.driving_directions]),
"vehicle_status": get_data_value(vehicle[field_names.vehicle_status]),
"vehicle_attribution": get_data_value(vehicle[field_names.vehicle_attribution]),
"vehicle_type": get_data_value(vehicle[field_names.vehicle_type]),
"seats": get_data_value(vehicle[field_names.seats]),
"total_weight": get_data_value(vehicle[field_names.total_weight]),
})
return vehicles_result
def get_files(directory):
for name, filename in iteritems(cbs_files):
if name not in (STREETS, NON_URBAN_INTERSECTION, ACCIDENTS, INVOLVED, VEHICLES):
continue
files = [path for path in os.listdir(directory)
if filename.lower() in path.lower()]
amount = len(files)
if amount == 0:
raise ValueError("Not found: '%s'" % filename)
if amount > 1:
raise ValueError("Ambiguous: '%s'" % filename)
csv = CsvReader(os.path.join(directory, files[0]), encoding="cp1255")
if name == STREETS:
streets_map = {}
for settlement in itertools.groupby(csv, lambda street: street.get(field_names.settlement, "OTHER")):
key, val = tuple(settlement)
streets_map[key] = [{field_names.street_sign: x[field_names.street_sign],
field_names.street_name: x[field_names.street_name]} for x in val if
field_names.street_name in x and field_names.street_sign in x]
csv.close()
yield name, streets_map
elif name == NON_URBAN_INTERSECTION:
roads = {(x[field_names.road1], x[field_names.road2], x["KM"]): x[field_names.junction_name] for x in csv if
field_names.road1 in x and field_names.road2 in x}
csv.close()
yield ROADS, roads
elif name in (ACCIDENTS, INVOLVED, VEHICLES):
yield name, csv
def chunks(l, n, xrange):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def import_to_datastore(directory, provider_code, batch_size):
"""
goes through all the files in a given directory, parses and commits them
"""
try: xrange
except NameError:
xrange = range
try:
assert batch_size > 0
files_from_cbs = dict(get_files(directory))
if len(files_from_cbs) == 0:
return 0
logging.info("Importing '{}'".format(directory))
started = datetime.now()
new_items = 0
all_existing_accidents_ids = set(map(lambda x: x[0], db.session.query(AccidentMarker.id).all()))
accidents = import_accidents(provider_code=provider_code, **files_from_cbs)
accidents = [accident for accident in accidents if accident['id'] not in all_existing_accidents_ids]
new_items += len(accidents)
for accidents_chunk in chunks(accidents, batch_size, xrange):
db.session.bulk_insert_mappings(AccidentMarker, accidents_chunk)
all_involved_accident_ids = set(map(lambda x: x[0], db.session.query(Involved.accident_id).all()))
involved = import_involved(provider_code=provider_code, **files_from_cbs)
involved = [x for x in involved if x['accident_id'] not in all_involved_accident_ids]
for involved_chunk in chunks(involved, batch_size, xrange):
db.session.bulk_insert_mappings(Involved, involved_chunk)
new_items += len(involved)
all_vehicles_accident_ids = set(map(lambda x: x[0], db.session.query(Vehicle.accident_id).all()))
vehicles = import_vehicles(provider_code=provider_code, **files_from_cbs)
vehicles = [x for x in vehicles if x['accident_id'] not in all_vehicles_accident_ids]
for vehicles_chunk in chunks(vehicles, batch_size, xrange):
db.session.bulk_insert_mappings(Vehicle, vehicles_chunk)
new_items += len(vehicles)
logging.info("\t{0} items in {1}".format(new_items, time_delta(started)))
return new_items
except ValueError as e:
failed_dirs[directory] = str(e)
return 0
def delete_invalid_entries():
"""
deletes all markers in the database with null latitude or longitude
first deletes from tables Involved and Vehicle, then from table AccidentMarker
"""
marker_ids_to_delete = db.session.query(AccidentMarker.id).filter(or_((AccidentMarker.longitude == None),
(AccidentMarker.latitude == None))).all()
marker_ids_to_delete = [acc_id[0] for acc_id in marker_ids_to_delete]
q = db.session.query(Involved).filter(Involved.accident_id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from Involved')
q.delete(synchronize_session='fetch')
q = db.session.query(Vehicle).filter(Vehicle.accident_id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from Vehicle')
q.delete(synchronize_session='fetch')
q = db.session.query(AccidentMarker).filter(AccidentMarker.id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from AccidentMarker')
q.delete(synchronize_session='fetch')
db.session.commit()
def get_provider_code(directory_name=None):
if directory_name:
match = ACCIDENT_TYPE_REGEX.match(directory_name)
if match:
return int(match.groupdict()['type'])
ans = ""
while not ans.isdigit():
ans = six.moves.input("Directory provider code is invalid. Please enter a valid code: ")
if ans.isdigit():
return int(ans)
def main(specific_folder, delete_all, path, batch_size):
import_ui = ImporterUI(path, specific_folder, delete_all)
dir_name = import_ui.source_path()
if specific_folder:
dir_list = [dir_name]
else:
dir_list = glob.glob("{0}/*/*".format(dir_name))
# wipe all the AccidentMarker and Vehicle and Involved data first
if import_ui.is_delete_all():
truncate_tables(db, (Vehicle, Involved, AccidentMarker))
started = datetime.now()
total = 0
for directory in dir_list:
parent_directory = os.path.basename(os.path.dirname(os.path.join(os.pardir, directory)))
provider_code = get_provider_code(parent_directory)
total += import_to_datastore(directory, provider_code, batch_size)
delete_invalid_entries()
failed = ["\t'{0}' ({1})".format(directory, fail_reason) for directory, fail_reason in
iteritems(failed_dirs)]
logging.info("Finished processing all directories{0}{1}".format(", except:\n" if failed else "",
"\n".join(failed)))
logging.info("Total: {0} items in {1}".format(total, time_delta(started)))
| 43.521739
| 120
| 0.667999
| 0
| 0
| 1,650
| 0.078366
| 0
| 0
| 0
| 0
| 4,287
| 0.20361
|
7a0d3a18b6c3bcab1db31cd7020fbecfa8d1cc2b
| 7,709
|
py
|
Python
|
src/tests/test_pagure_flask_api_project_delete_project.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
src/tests/test_pagure_flask_api_project_delete_project.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
src/tests/test_pagure_flask_api_project_delete_project.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
(c) 2020 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import json
import unittest
import shutil
import sys
import tempfile
import os
import pygit2
from celery.result import EagerResult
from mock import patch, Mock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.api
import pagure.flask_app
import pagure.lib.query
import tests
from pagure.lib.repo import PagureRepo
class PagureFlaskApiProjectDeleteProjecttests(tests.Modeltests):
""" Tests for the flask API of pagure for deleting projects """
maxDiff = None
def setUp(self):
super(PagureFlaskApiProjectDeleteProjecttests, self).setUp()
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "repos", "docs"), bare=True
)
tests.create_projects_git(
os.path.join(self.path, "repos", "tickets"), bare=True
)
tests.create_projects_git(
os.path.join(self.path, "repos", "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
tests.create_tokens(self.session, project_id=2, suffix="_test2")
tests.create_tokens_acl(self.session, token_id="aaabbbcccddd_test2")
tests.create_tokens(self.session, user_id=2, suffix="_foo")
tests.create_tokens_acl(self.session, token_id="aaabbbcccddd_foo")
project = pagure.lib.query.get_authorized_project(self.session, "test")
project.read_only = False
self.session.add(project)
self.session.commit()
def test_delete_project_no_header(self):
output = self.app.post("/api/0/invalid/delete")
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.EINVALIDTOK.name, data["error_code"]
)
self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data["error"])
def test_delete_project_invalid_project(self):
headers = {"Authorization": "token aaabbbcccddd"}
output = self.app.post("/api/0/invalid/delete", headers=headers)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.ENOPROJECT.name, data["error_code"]
)
self.assertEqual(pagure.api.APIERROR.ENOPROJECT.value, data["error"])
def test_delete_project_invalid_token_project(self):
headers = {"Authorization": "token aaabbbcccddd"}
output = self.app.post("/api/0/test2/delete", headers=headers)
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.EINVALIDTOK.name, data["error_code"]
)
self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data["error"])
def test_delete_project_read_only_project(self):
headers = {"Authorization": "token aaabbbcccddd_test2"}
output = self.app.post("/api/0/test2/delete", headers=headers)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(pagure.api.APIERROR.ENOCODE.name, data["error_code"])
error = "The ACLs of this project are being refreshed in the backend this prevents the project from being deleted. Please wait for this task to finish before trying again. Thanks!"
self.assertEqual(data["error"], error)
def test_delete_project_not_allowed(self):
headers = {"Authorization": "token aaabbbcccddd_foo"}
output = self.app.post("/api/0/test/delete", headers=headers)
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.ENOTHIGHENOUGH.name, data["error_code"]
)
self.assertEqual(
pagure.api.APIERROR.ENOTHIGHENOUGH.value, data["error"]
)
@patch.dict("pagure.config.config", {"ENABLE_DEL_PROJECTS": False})
def test_delete_project_not_allowed_by_config(self):
headers = {"Authorization": "token aaabbbcccddd_test2"}
output = self.app.post("/api/0/test2/delete", headers=headers)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.ENOPROJECT.name, data["error_code"]
)
self.assertEqual(pagure.api.APIERROR.ENOPROJECT.value, data["error"])
def test_delete_project(self):
headers = {"Authorization": "token aaabbbcccddd"}
projects = pagure.lib.query.search_projects(session=self.session)
self.assertEqual(len(projects), 3)
for frag in [".", "docs", "tickets", "requests"]:
self.assertTrue(
os.path.exists(
os.path.join(self.path, "repos", frag, "test.git")
)
)
output = self.app.post("/api/0/test/delete", headers=headers)
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
for key in ["date_created", "date_modified"]:
data["project"][key] = "1595341690"
self.assertEqual(
data,
{
"message": "Project deleted",
"project": {
"access_groups": {
"admin": [],
"collaborator": [],
"commit": [],
"ticket": [],
},
"access_users": {
"admin": [],
"collaborator": [],
"commit": [],
"owner": ["pingou"],
"ticket": [],
},
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"custom_keys": [],
"date_created": "1595341690",
"date_modified": "1595341690",
"description": "test project #1",
"full_url": "http://localhost.localdomain/test",
"fullname": "test",
"id": 1,
"milestones": {},
"name": "test",
"namespace": None,
"parent": None,
"priorities": {},
"tags": [],
"url_path": "test",
"user": {
"fullname": "PY C",
"name": "pingou",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
},
},
)
projects = pagure.lib.query.search_projects(session=self.session)
self.assertEqual(len(projects), 2)
for frag in [".", "docs", "tickets", "requests"]:
self.assertFalse(
os.path.exists(
os.path.join(self.path, "repos", frag, "test.git")
)
)
| 37.42233
| 188
| 0.572578
| 7,138
| 0.925931
| 0
| 0
| 549
| 0.071215
| 0
| 0
| 1,721
| 0.223246
|
7a0ed4f58fe297f5e920c7a02179f8ba85d4d8b4
| 3,827
|
py
|
Python
|
06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py
|
fanchi/ml-design-patterns
|
6f686601d2385a11a517f8394324062ec6094e14
|
[
"Apache-2.0"
] | 1,149
|
2020-04-09T21:20:56.000Z
|
2022-03-31T02:41:53.000Z
|
06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py
|
dfinke/ml-design-patterns
|
6f686601d2385a11a517f8394324062ec6094e14
|
[
"Apache-2.0"
] | 28
|
2020-06-14T15:17:59.000Z
|
2022-02-17T10:13:08.000Z
|
06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py
|
dfinke/ml-design-patterns
|
6f686601d2385a11a517f8394324062ec6094e14
|
[
"Apache-2.0"
] | 296
|
2020-04-28T06:26:41.000Z
|
2022-03-31T06:52:33.000Z
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this is adapted from the official TFX taxi pipeline sample
# You can find it here: https://github.com/tensorflow/tfx/tree/master/tfx/examples/chicago_taxi_pipeline
import os # pylint: disable=unused-import
# Pipeline name will be used to identify this pipeline
PIPELINE_NAME = 'my_pipeline'
# TODO: replace with your Google Cloud project
GOOGLE_CLOUD_PROJECT='your-cloud-project'
# TODO: replace with the GCS bucket where you'd like to store model artifacts
# Only include the bucket name here, without the 'gs://'
GCS_BUCKET_NAME = 'your-gcs-bucket'
# TODO: set your Google Cloud region below (or use us-central1)
GOOGLE_CLOUD_REGION = 'us-central1'
RUN_FN = 'pipeline.model.run_fn'
TRAIN_NUM_STEPS = 100
EVAL_NUM_STEPS = 100
BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS = [
'--project=' + GOOGLE_CLOUD_PROJECT,
'--temp_location=' + os.path.join('gs://', GCS_BUCKET_NAME, 'tmp'),
]
# The rate at which to sample rows from the Chicago Taxi dataset using BigQuery.
# The full taxi dataset is > 120M record. In the interest of resource
# savings and time, we've set the default for this example to be much smaller.
# Feel free to crank it up and process the full dataset!
_query_sample_rate = 0.0001 # Generate a 0.01% random sample.
# The query that extracts the examples from BigQuery. This sample uses
# a BigQuery public dataset from NOAA
BIG_QUERY_QUERY = """
SELECT
usa_wind,
usa_sshs
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
WHERE
latitude > 19.5
AND latitude < 64.85
AND longitude > -161.755
AND longitude < -68.01
AND usa_wind IS NOT NULL
AND longitude IS NOT NULL
AND latitude IS NOT NULL
AND usa_sshs IS NOT NULL
AND usa_sshs > 0
"""
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
GCP_AI_PLATFORM_TRAINING_ARGS = {
'project': GOOGLE_CLOUD_PROJECT,
'region': 'us-central1',
# Starting from TFX 0.14, training on AI Platform uses custom containers:
# https://cloud.google.com/ml-engine/docs/containers-overview
# You can specify a custom container here. If not specified, TFX will use
# a public container image matching the installed version of TFX.
# Set your container name below.
'masterConfig': {
'imageUri': 'gcr.io/' + GOOGLE_CLOUD_PROJECT + '/tfx-pipeline'
},
# Note that if you do specify a custom container, ensure the entrypoint
# calls into TFX's run_executor script (tfx/scripts/run_executor.py)
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
GCP_AI_PLATFORM_SERVING_ARGS = {
'model_name': PIPELINE_NAME,
'project_id': GOOGLE_CLOUD_PROJECT,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
'regions': [GOOGLE_CLOUD_REGION],
}
| 37.519608
| 104
| 0.736608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,184
| 0.831983
|
7a0ee0d44c1b61902945942d2ba7e385c1519999
| 4,707
|
py
|
Python
|
tests/test_vcf_info_annotator.py
|
apaul7/VAtools
|
9e969cfdb605ec5e65a6aa60a416d7d74a8ff4fd
|
[
"MIT"
] | 15
|
2019-03-20T06:55:04.000Z
|
2022-02-22T06:16:56.000Z
|
tests/test_vcf_info_annotator.py
|
apaul7/VAtools
|
9e969cfdb605ec5e65a6aa60a416d7d74a8ff4fd
|
[
"MIT"
] | 27
|
2019-03-05T18:20:19.000Z
|
2022-03-04T14:58:36.000Z
|
tests/test_vcf_info_annotator.py
|
apaul7/VAtools
|
9e969cfdb605ec5e65a6aa60a416d7d74a8ff4fd
|
[
"MIT"
] | 4
|
2019-03-19T10:33:38.000Z
|
2022-02-23T13:40:33.000Z
|
import unittest
import sys
import os
import py_compile
from vatools import vcf_info_annotator
import tempfile
from filecmp import cmp
class VcfInfoEncoderTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
cls.executable = os.path.join(base_dir, 'vatools', 'vcf_info_annotator.py')
cls.test_data_dir = os.path.join(base_dir, 'tests', 'test_data')
def test_source_compiles(self):
self.assertTrue(py_compile.compile(self.executable))
def test_error_already_INFO_annotated(self):
with self.assertRaises(Exception) as context:
command = [
os.path.join(self.test_data_dir, 'input.vcf'),
os.path.join(self.test_data_dir, 'info.tsv'),
'CSQ',
'-d', "test",
'-f', 'Integer',
'-o', 'ztest.vcf'
]
vcf_info_annotator.main(command)
self.assertTrue('INFO already contains a CSQ field. Choose a different label, or use the --overwrite flag to retain this field and overwrite values' in str(context.exception))
def test_error_new_field_no_description(self):
with self.assertRaises(Exception) as context:
command = [
os.path.join(self.test_data_dir, 'input.vcf'),
os.path.join(self.test_data_dir, 'info.tsv'),
'TEST',
'-o', 'ztest.vcf'
]
vcf_info_annotator.main(command)
self.assertTrue("the --description and --value_format arguments are required unless updating/overwriting an existing field (with flag --overwrite)" in str(context.exception))
def test_overwrite_when_field_doesnt_exist(self):
with self.assertRaises(Exception) as context:
command = [
os.path.join(self.test_data_dir, 'input.vcf'),
os.path.join(self.test_data_dir, 'info.tsv'),
'TEST',
'-o', 'ztest.vcf',
'-w'
]
vcf_info_annotator.main(command)
self.assertTrue("INFO field TEST does not exist and thus cannot be overwritten!" in str(context.exception))
def test_simple_caseq(self):
temp_path = tempfile.TemporaryDirectory()
print(temp_path)
command = [
os.path.join(self.test_data_dir, 'input.vcf'),
os.path.join(self.test_data_dir, 'info.tsv'),
'TEST',
'-d', "test",
'-f', 'Integer',
'-o', os.path.join(temp_path.name, 'info_annotation.vcf')
]
vcf_info_annotator.main(command)
self.assertTrue(cmp(os.path.join(self.test_data_dir, 'info_annotation.vcf'), os.path.join(temp_path.name, 'info_annotation.vcf')))
temp_path.cleanup()
def test_simple_string(self):
temp_path = tempfile.TemporaryDirectory()
print(temp_path)
command = [
os.path.join(self.test_data_dir, 'input.vcf'),
os.path.join(self.test_data_dir, 'info3.tsv'),
'TEST',
'-d', "test",
'-f', 'String',
'-o', os.path.join(temp_path.name, 'info3_output.vcf')
]
vcf_info_annotator.main(command)
self.assertTrue(cmp(os.path.join(self.test_data_dir, 'info3_output.vcf'), os.path.join(temp_path.name, 'info3_output.vcf')))
temp_path.cleanup()
def test_addwhile_overwriteset(self):
temp_path = tempfile.TemporaryDirectory()
print(temp_path)
command = [
os.path.join(self.test_data_dir, 'info2_input.vcf'),
os.path.join(self.test_data_dir, 'info2.tsv'),
'MQ0',
'-w',
'-o', os.path.join(temp_path.name, 'info2_output.vcf')
]
vcf_info_annotator.main(command)
self.assertTrue(cmp(os.path.join(self.test_data_dir, 'info2_output.vcf'), os.path.join(temp_path.name, 'info2_output.vcf')))
temp_path.cleanup()
def test_overwrite_existing_field(self):
temp_path = tempfile.TemporaryDirectory()
print(temp_path)
command = [
os.path.join(self.test_data_dir, 'input.vcf'),
os.path.join(self.test_data_dir, 'info.tsv'),
'CSQ',
'-d', "test",
'-f', 'Integer',
'-w',
'-o', os.path.join(temp_path.name, 'info_annotation.vcf')
]
vcf_info_annotator.main(command)
self.assertTrue(cmp(os.path.join(self.test_data_dir, 'info_overwrite.vcf'), os.path.join(temp_path.name, 'info_annotation.vcf')))
temp_path.cleanup()
| 40.930435
| 183
| 0.599745
| 4,571
| 0.971107
| 0
| 0
| 306
| 0.06501
| 0
| 0
| 972
| 0.206501
|
7a0f470f2ade1699e468a55aa0458f89b6b1d2f2
| 17,965
|
py
|
Python
|
bddtests/peer/admin_pb2.py
|
hacera-jonathan/fabric
|
3ba291e8fbb0246aa440e02cba54d16924649479
|
[
"Apache-2.0"
] | null | null | null |
bddtests/peer/admin_pb2.py
|
hacera-jonathan/fabric
|
3ba291e8fbb0246aa440e02cba54d16924649479
|
[
"Apache-2.0"
] | 1
|
2021-03-20T05:34:24.000Z
|
2021-03-20T05:34:24.000Z
|
bddtests/peer/admin_pb2.py
|
hacera-jonathan/fabric
|
3ba291e8fbb0246aa440e02cba54d16924649479
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/admin.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/admin.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x10peer/admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\"8\n\x0fLogLevelRequest\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t\"9\n\x10LogLevelResponse\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t2\xd5\x02\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12H\n\x11GetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x12H\n\x11SetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x42+Z)github.com/hyperledger/fabric/protos/peerb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='protos.ServerStatus.StatusCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=123,
serialized_end=212,
)
_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE)
_SERVERSTATUS = _descriptor.Descriptor(
name='ServerStatus',
full_name='protos.ServerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='protos.ServerStatus.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVERSTATUS_STATUSCODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=212,
)
_LOGLEVELREQUEST = _descriptor.Descriptor(
name='LogLevelRequest',
full_name='protos.LogLevelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelRequest.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelRequest.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=214,
serialized_end=270,
)
_LOGLEVELRESPONSE = _descriptor.Descriptor(
name='LogLevelResponse',
full_name='protos.LogLevelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelResponse.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelResponse.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=272,
serialized_end=329,
)
_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE
_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['LogLevelRequest'] = _LOGLEVELREQUEST
DESCRIPTOR.message_types_by_name['LogLevelResponse'] = _LOGLEVELRESPONSE
ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict(
DESCRIPTOR = _SERVERSTATUS,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.ServerStatus)
))
_sym_db.RegisterMessage(ServerStatus)
LogLevelRequest = _reflection.GeneratedProtocolMessageType('LogLevelRequest', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELREQUEST,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelRequest)
))
_sym_db.RegisterMessage(LogLevelRequest)
LogLevelResponse = _reflection.GeneratedProtocolMessageType('LogLevelResponse', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELRESPONSE,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelResponse)
))
_sym_db.RegisterMessage(LogLevelResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z)github.com/hyperledger/fabric/protos/peer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class AdminStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/protos.Admin/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.StartServer = channel.unary_unary(
'/protos.Admin/StartServer',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.StopServer = channel.unary_unary(
'/protos.Admin/StopServer',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.GetModuleLogLevel = channel.unary_unary(
'/protos.Admin/GetModuleLogLevel',
request_serializer=LogLevelRequest.SerializeToString,
response_deserializer=LogLevelResponse.FromString,
)
self.SetModuleLogLevel = channel.unary_unary(
'/protos.Admin/SetModuleLogLevel',
request_serializer=LogLevelRequest.SerializeToString,
response_deserializer=LogLevelResponse.FromString,
)
class AdminServicer(object):
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModuleLogLevel(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetModuleLogLevel(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StartServer': grpc.unary_unary_rpc_method_handler(
servicer.StartServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StopServer': grpc.unary_unary_rpc_method_handler(
servicer.StopServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'GetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.GetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
'SetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.SetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'protos.Admin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaAdminServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StartServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StopServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetModuleLogLevel(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def SetModuleLogLevel(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaAdminStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Return the serve status.
"""
raise NotImplementedError()
GetStatus.future = None
def StartServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
StartServer.future = None
def StopServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
StopServer.future = None
def GetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
GetModuleLogLevel.future = None
def SetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
SetModuleLogLevel.future = None
def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
}
response_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'GetStatus'): ServerStatus.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'StartServer'): ServerStatus.SerializeToString,
('protos.Admin', 'StopServer'): ServerStatus.SerializeToString,
}
method_implementations = {
('protos.Admin', 'GetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.GetModuleLogLevel),
('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('protos.Admin', 'SetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.SetModuleLogLevel),
('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer),
('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
}
response_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'GetStatus'): ServerStatus.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'StartServer'): ServerStatus.FromString,
('protos.Admin', 'StopServer'): ServerStatus.FromString,
}
cardinalities = {
'GetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'SetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'StartServer': cardinality.Cardinality.UNARY_UNARY,
'StopServer': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 41.77907
| 1,109
| 0.74044
| 4,737
| 0.263679
| 0
| 0
| 0
| 0
| 0
| 0
| 4,990
| 0.277762
|
7a11c84dcc647f7a847b687bafc676e5c125037d
| 4,002
|
py
|
Python
|
tests/basic_test.py
|
c0fec0de/anycache
|
1848d9b85cd11c16c271284e0911ba5628391835
|
[
"Apache-2.0"
] | 13
|
2018-02-07T15:52:07.000Z
|
2022-02-18T12:37:40.000Z
|
tests/basic_test.py
|
c0fec0de/anycache
|
1848d9b85cd11c16c271284e0911ba5628391835
|
[
"Apache-2.0"
] | 2
|
2018-09-23T15:43:32.000Z
|
2021-09-21T00:34:55.000Z
|
tests/basic_test.py
|
c0fec0de/anycache
|
1848d9b85cd11c16c271284e0911ba5628391835
|
[
"Apache-2.0"
] | 1
|
2020-01-20T23:58:54.000Z
|
2020-01-20T23:58:54.000Z
|
from pathlib import Path
from tempfile import mkdtemp
from nose.tools import eq_
from anycache import AnyCache
from anycache import get_defaultcache
from anycache import anycache
def test_basic():
"""Basic functionality."""
@anycache()
def myfunc(posarg, kwarg=3):
# count the number of calls
myfunc.callcount += 1
return posarg + kwarg
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 2), 6)
eq_(myfunc.callcount, 2)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 2)
assert get_defaultcache().size > 0
def test_del():
ac = AnyCache()
@ac.anycache()
def myfunc(posarg, kwarg=3):
return posarg + kwarg
eq_(myfunc(4, 5), 9)
assert ac.size > 0
del ac
# we are not able to check anything here :-(
def test_cleanup():
"""Cleanup."""
ac = AnyCache()
cachedir = ac.cachedir
@ac.anycache()
def myfunc(posarg, kwarg=3):
# count the number of calls
myfunc.callcount += 1
return posarg + kwarg
myfunc.callcount = 0
# first use
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 2), 6)
eq_(myfunc.callcount, 2)
eq_(myfunc(4, 2), 6)
eq_(myfunc.callcount, 2)
assert ac.size > 0
# clear
ac.clear()
eq_(ac.size, 0)
eq_(tuple(cachedir.glob("*")), tuple())
# second use
eq_(myfunc(4, 4), 8)
eq_(myfunc.callcount, 3)
assert ac.size > 0
# clear twice
ac.clear()
eq_(ac.size, 0)
ac.clear()
eq_(ac.size, 0)
def test_size():
"""Size."""
ac = AnyCache()
@ac.anycache()
def myfunc(posarg, kwarg=3):
return posarg + kwarg
eq_(ac.size, 0)
eq_(len(tuple(ac.cachedir.glob("*.cache"))), 0)
eq_(myfunc(4, 5), 9)
eq_(len(tuple(ac.cachedir.glob("*.cache"))), 1)
size1 = ac.size
eq_(myfunc(4, 2), 6)
eq_(ac.size, 2 * size1)
eq_(len(tuple(ac.cachedir.glob("*.cache"))), 2)
def test_corrupt_cache():
"""Corrupted Cache."""
cachedir = Path(mkdtemp())
ac = AnyCache(cachedir=cachedir)
@ac.anycache()
def myfunc(posarg, kwarg=3):
myfunc.callcount += 1
return posarg + kwarg
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
# corrupt cache
cachefilepath = list(cachedir.glob("*.cache"))[0]
with open(str(cachefilepath), "w") as cachefile:
cachefile.write("foo")
# repair
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 2)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 2)
# corrupt dep
depfilepath = list(cachedir.glob("*.dep"))[0]
with open(str(depfilepath), "w") as depfile:
depfile.write("foo")
# repair
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 3)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 3)
ac.clear()
def test_cachedir():
"""Corrupted Cache."""
cachedir = Path(mkdtemp())
@anycache(cachedir=cachedir)
def myfunc(posarg, kwarg=3):
myfunc.callcount += 1
return posarg + kwarg
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
@anycache(cachedir=cachedir)
def myfunc(posarg, kwarg=3):
myfunc.callcount += 1
return posarg + kwarg
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 0)
def class_test():
class MyClass(object):
def __init__(self, posarg, kwarg=3):
self.posarg = posarg
self.kwarg = kwarg
@anycache()
def func(self, foo):
return self.posarg + self.kwarg + foo
a = MyClass(2, 4)
b = MyClass(1, 3)
eq_(a.func(6), 12)
eq_(a.func(6), 12)
eq_(b.func(6), 10)
eq_(b.func(6), 10)
eq_(a.func(6), 12)
| 21.063158
| 53
| 0.581709
| 232
| 0.057971
| 0
| 0
| 876
| 0.218891
| 0
| 0
| 342
| 0.085457
|
7a11f415ef1c8a456c66c6b816eed5e347dea42d
| 2,173
|
py
|
Python
|
self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_runner.py
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | 2
|
2022-01-06T11:52:57.000Z
|
2022-01-09T01:53:56.000Z
|
self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_runner.py
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | null | null | null |
self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_runner.py
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | null | null | null |
"""A run_fn method called by the TFX Trainer component."""
import os
import logging
from tfx import v1 as tfx
from tfx_taxifare_tips.model_training import defaults
from tfx_taxifare_tips.model_training import model_trainer
from tfx_taxifare_tips.model_training import model_exporter
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs. See
https://www.tensorflow.org/tfx/api_docs/python/tfx/v1/components/FnArgs.
"""
logging.info("Model Runner started...")
logging.info("fn_args: %s", fn_args)
logging.info("")
try:
log_dir = fn_args.model_run_dir
except KeyError:
log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), "logs")
hyperparameters = fn_args.hyperparameters
if not hyperparameters:
hyperparameters = {}
hyperparameters = defaults.update_hyperparameters(hyperparameters)
logging.info("Hyperparameter:")
logging.info(hyperparameters)
logging.info("")
logging.info("Model Runner executing model trainer...")
classifier = model_trainer.train(
data_accessor=fn_args.data_accessor,
train_data_dir=fn_args.train_files,
eval_data_dir=fn_args.eval_files,
tft_output_dir=fn_args.transform_output,
log_dir=log_dir,
hyperparameters=hyperparameters,
)
logging.info("Model Runner executing model evaluation...")
classifier = model_trainer.evaluate(
classifier=classifier,
data_accessor=fn_args.data_accessor,
eval_data_dir=fn_args.eval_files,
tft_output_dir=fn_args.transform_output,
hyperparameters=hyperparameters,
)
logging.info("Model Runner executing exporter...")
model_exporter.export_serving_model(
classifier=classifier,
serving_model_dir=fn_args.serving_model_dir,
raw_schema_location=fn_args.schema_path,
tft_output_dir=fn_args.transform_output,
)
logging.info("Model Runner completed.")
| 34.492063
| 83
| 0.703175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 523
| 0.240681
|
7a15cfeb891a079af5b1c667c60e264effefd0f3
| 4,602
|
py
|
Python
|
main.py
|
Lorn-Hukka/academy-record-sender
|
137ef9d1dff373662a046bc2a50d7dd5f4fad0ee
|
[
"MIT"
] | null | null | null |
main.py
|
Lorn-Hukka/academy-record-sender
|
137ef9d1dff373662a046bc2a50d7dd5f4fad0ee
|
[
"MIT"
] | null | null | null |
main.py
|
Lorn-Hukka/academy-record-sender
|
137ef9d1dff373662a046bc2a50d7dd5f4fad0ee
|
[
"MIT"
] | null | null | null |
import random, os, string, subprocess, shutil, requests
from discord import Webhook, RequestsWebhookAdapter, Embed
from dotenv import dotenv_values
import argparse, colorama
from colorama import Fore
class Settings():
def __init__(self):
for k, v in dotenv_values(".settings").items():
setattr(self, k, v)
class App():
def __init__(self, config):
self.config = config
self.webhook = Webhook.from_url(self.config.WEBHOOK, adapter=RequestsWebhookAdapter())
self.output_path = self.config.RECORDS_PATH + '\\output\\'
self.processed_path = self.config.RECORDS_PATH + '\\processed\\'
def gen_pass(self, lenght):
chars = string.ascii_letters + string.digits + "!#$%&()*+<=>?@[]^_|~"
password = ''.join(random.choices(chars, k=lenght))
return password
def _check_7zip(self):
if not os.path.isfile(self.config._7ZIP):
exit(f'{Fore.RED}WRONG path to 7ZIP executable. Program Exited.')
def _generate_dirs(self):
if not os.path.isdir(self.processed_path):
os.mkdir(self.processed_path)
print(f'{Fore.YELLOW}Path for proccsed records not found. Created one for you.')
if not os.path.isdir(self.output_path):
os.mkdir(self.output_path)
print(f'{Fore.YELLOW}Output path not found. Created one for you.')
def process_files(self):
with open('passwords', 'a+', encoding="utf-8") as f:
for fn in os.listdir(self.config.RECORDS_PATH):
if fn.endswith(self.config.EXTENSION):
file_password, link_password = self.gen_pass(16), self.gen_pass(16)
command = [self.config._7ZIP, 'a -mx9 -mhe=on -y -r', f'-p"{file_password}"',
'--', f'"{self.output_path + fn[:-len(self.config.EXTENSION)]}.7z"', f'"{self.config.RECORDS_PATH}\\{fn}"']
subprocess.run(" ".join(command))
shutil.move(self.config.RECORDS_PATH + '\\' + fn, self.processed_path + fn)
f.write(f'F: {fn} | FP: {file_password} | LP: {link_password} | L: \n')
def send_2_discord(self):
data = None
with open('passwords', 'r', encoding="utf-8") as f:
data = [line.strip('\n').split(' | ') for line in f.readlines()]
with open('passwords', 'w+', encoding="utf-8") as f:
for line in data:
fn = line[0][2::].strip(' ')
file_password = line[1][3::].strip(' ')
link_password = line[2][3::].strip(' ')
link = line[3][2::].strip(' ')
if link == '':
print(f'{Fore.YELLOW}{fn} SKIPPED - No SHARE LINK specified.')
f.write(' | '.join(line) + '\n')
continue
if line[0][0] == '*':
f.write(' | '.join(line) + '\n')
continue
else:
f.write('*' + ' | '.join(line) + '\n')
msg = {
'title': f'{fn}',
'description': 'W razie wątpliwości pytać na <#809980920249319465>;',
'fields': [
{'name': 'Link do nagrania:', 'value': f'[Kliknij, aby się przenieść.]({link})', 'inline': False},
{'name': 'Hasło dostępu:', 'value': f'```{link_password}```', 'inline': True},
{'name': 'Hasło do pliku:', 'value': f'```{file_password}```', 'inline': True}
],
'footer': {
'text': f'~{self.config.NAME}', 'inline': True
}
}
self.webhook.send('Nowe nagranie zostało udostępnione.', username='Student.', embed=Embed().from_dict(msg),
avatar_url="https://cdn4.iconfinder.com/data/icons/science-131/64/265-512.png")
def run(self):
self._check_7zip()
self._generate_dirs()
self.process_files()
self.send_2_discord()
if __name__ == "__main__":
colorama.init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Display errors in console.", action="store_true", default=False)
args = parser.parse_args()
CONFIG = Settings()
app = App(CONFIG)
try:
app.run()
except Exception as e:
if args.verbose:
print(e)
exit(f'{Fore.RED}An Error occured program will exit.')
| 40.368421
| 140
| 0.526945
| 3,950
| 0.856276
| 0
| 0
| 0
| 0
| 0
| 0
| 1,217
| 0.26382
|
7a1607febbd34072033d2922ea13752164e46320
| 357
|
py
|
Python
|
src/__init__.py
|
w9PcJLyb/GFootball
|
b271238bd0dc922787a0a9b984a8ae598cea2b2b
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
w9PcJLyb/GFootball
|
b271238bd0dc922787a0a9b984a8ae598cea2b2b
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
w9PcJLyb/GFootball
|
b271238bd0dc922787a0a9b984a8ae598cea2b2b
|
[
"Apache-2.0"
] | null | null | null |
from .board import Board
from .slide import slide_action
from .corner import corner_action
from .control import control_action
from .penalty import penalty_action
from .throwin import throwin_action
from .kickoff import kickoff_action
from .goalkick import goalkick_action
from .freekick import freekick_action
from .without_ball import without_ball_action
| 32.454545
| 45
| 0.859944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7a1ab771a442031e1729dd19987c53780afb2187
| 3,447
|
py
|
Python
|
tests/bin/test_tcex_list.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
tests/bin/test_tcex_list.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
tests/bin/test_tcex_list.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
"""Bin Testing"""
# standard library
from importlib.machinery import SourceFileLoader
from importlib.util import module_from_spec, spec_from_loader
from typing import List
# third-party
from typer.testing import CliRunner
# dynamically load bin/tcex file
spec = spec_from_loader('app', SourceFileLoader('app', 'bin/tcex'))
tcex_cli = module_from_spec(spec)
spec.loader.exec_module(tcex_cli)
# get app from bin/tcex CLI script
app = tcex_cli.app
# get instance of typer CliRunner for test case
runner = CliRunner()
class TestTcexCliList:
"""Tcex CLI Testing."""
def setup_method(self):
"""Configure teardown before all tests."""
def teardown_method(self):
"""Configure teardown before all tests."""
@staticmethod
def _run_command(args: List[str]) -> str:
"""Test Case"""
result = runner.invoke(app, args)
return result
def test_tcex_list(self) -> None:
"""Test Case"""
result = self._run_command(['list'])
assert result.exit_code == 0, result.stdout
# spot check a few lines of outputs
assert 'Organization Templates' in result.stdout
assert 'Playbook Templates' in result.stdout
# TODO: [med] update this once template is done
# assert 'API Service Templates' in result.stdout
# assert 'Trigger Service Templates' in result.stdout
# assert 'Webhook Trigger Service Templates' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_external_api_service(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'api_service'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_external_basic(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'external'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
def test_tcex_list_organization_basic(self) -> None:
"""Test Case"""
result = self._run_command(['list', '--type', 'organization'])
assert result.exit_code == 0, result.stdout
# spot check a few lines of outputs
assert 'basic' in result.stdout
def test_tcex_list_playbook_basic(self) -> None:
"""Test Case"""
result = self._run_command(['list', '--type', 'playbook'])
assert result.exit_code == 0, f'{result.stdout}'
# spot check a few lines of outputs
assert 'basic' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_trigger_basic(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'trigger_service'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_webhook_trigger_basic(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'webhook_trigger_service'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
| 33.794118
| 83
| 0.642878
| 2,926
| 0.848854
| 0
| 0
| 147
| 0.042646
| 0
| 0
| 2,070
| 0.600522
|
7a1abf4048e07e8bc9343e0dfe167284107c6c27
| 16,752
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/managed_prefix_list.py
|
jen20/pulumi-aws
|
172e00c642adc03238f89cc9c5a16b914a77c2b1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/managed_prefix_list.py
|
jen20/pulumi-aws
|
172e00c642adc03238f89cc9c5a16b914a77c2b1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/managed_prefix_list.py
|
jen20/pulumi-aws
|
172e00c642adc03238f89cc9c5a16b914a77c2b1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ManagedPrefixListArgs', 'ManagedPrefixList']
@pulumi.input_type
class ManagedPrefixListArgs:
def __init__(__self__, *,
address_family: pulumi.Input[str],
max_entries: pulumi.Input[int],
entries: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ManagedPrefixList resource.
:param pulumi.Input[str] address_family: The address family (`IPv4` or `IPv6`) of
this prefix list.
:param pulumi.Input[int] max_entries: The maximum number of entries that
this prefix list can contain.
:param pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]] entries: Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
:param pulumi.Input[str] name: The name of this resource. The name must not start with `com.amazonaws`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to this resource.
"""
pulumi.set(__self__, "address_family", address_family)
pulumi.set(__self__, "max_entries", max_entries)
if entries is not None:
pulumi.set(__self__, "entries", entries)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="addressFamily")
def address_family(self) -> pulumi.Input[str]:
"""
The address family (`IPv4` or `IPv6`) of
this prefix list.
"""
return pulumi.get(self, "address_family")
@address_family.setter
def address_family(self, value: pulumi.Input[str]):
pulumi.set(self, "address_family", value)
@property
@pulumi.getter(name="maxEntries")
def max_entries(self) -> pulumi.Input[int]:
"""
The maximum number of entries that
this prefix list can contain.
"""
return pulumi.get(self, "max_entries")
@max_entries.setter
def max_entries(self, value: pulumi.Input[int]):
pulumi.set(self, "max_entries", value)
@property
@pulumi.getter
def entries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]]]:
"""
Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
"""
return pulumi.get(self, "entries")
@entries.setter
def entries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]]]):
pulumi.set(self, "entries", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this resource. The name must not start with `com.amazonaws`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ManagedPrefixList(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a managed prefix list resource.
> **NOTE on `max_entries`:** When you reference a Prefix List in a resource,
the maximum number of entries for the prefix lists counts as the same number of rules
or entries for the resource. For example, if you create a prefix list with a maximum
of 20 entries and you reference that prefix list in a security group rule, this counts
as 20 rules for the security group.
## Example Usage
Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.ManagedPrefixList("example",
address_family="IPv4",
max_entries=5,
entries=[
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc["example"]["cidr_block"],
description="Primary",
),
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc_ipv4_cidr_block_association["example"]["cidr_block"],
description="Secondary",
),
],
tags={
"Env": "live",
})
```
## Import
Prefix Lists can be imported using the `id`, e.g.
```sh
$ pulumi import aws:ec2/managedPrefixList:ManagedPrefixList default pl-0570a1d2d725c16be
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_family: The address family (`IPv4` or `IPv6`) of
this prefix list.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]] entries: Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
:param pulumi.Input[int] max_entries: The maximum number of entries that
this prefix list can contain.
:param pulumi.Input[str] name: The name of this resource. The name must not start with `com.amazonaws`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagedPrefixListArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a managed prefix list resource.
> **NOTE on `max_entries`:** When you reference a Prefix List in a resource,
the maximum number of entries for the prefix lists counts as the same number of rules
or entries for the resource. For example, if you create a prefix list with a maximum
of 20 entries and you reference that prefix list in a security group rule, this counts
as 20 rules for the security group.
## Example Usage
Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.ManagedPrefixList("example",
address_family="IPv4",
max_entries=5,
entries=[
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc["example"]["cidr_block"],
description="Primary",
),
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc_ipv4_cidr_block_association["example"]["cidr_block"],
description="Secondary",
),
],
tags={
"Env": "live",
})
```
## Import
Prefix Lists can be imported using the `id`, e.g.
```sh
$ pulumi import aws:ec2/managedPrefixList:ManagedPrefixList default pl-0570a1d2d725c16be
```
:param str resource_name: The name of the resource.
:param ManagedPrefixListArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagedPrefixListArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if address_family is None and not opts.urn:
raise TypeError("Missing required property 'address_family'")
__props__['address_family'] = address_family
__props__['entries'] = entries
if max_entries is None and not opts.urn:
raise TypeError("Missing required property 'max_entries'")
__props__['max_entries'] = max_entries
__props__['name'] = name
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
__props__['version'] = None
super(ManagedPrefixList, __self__).__init__(
'aws:ec2/managedPrefixList:ManagedPrefixList',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[int]] = None) -> 'ManagedPrefixList':
"""
Get an existing ManagedPrefixList resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_family: The address family (`IPv4` or `IPv6`) of
this prefix list.
:param pulumi.Input[str] arn: The ARN of the prefix list.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]] entries: Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
:param pulumi.Input[int] max_entries: The maximum number of entries that
this prefix list can contain.
:param pulumi.Input[str] name: The name of this resource. The name must not start with `com.amazonaws`.
:param pulumi.Input[str] owner_id: The ID of the AWS account that owns this prefix list.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to this resource.
:param pulumi.Input[int] version: The latest version of this prefix list.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_family"] = address_family
__props__["arn"] = arn
__props__["entries"] = entries
__props__["max_entries"] = max_entries
__props__["name"] = name
__props__["owner_id"] = owner_id
__props__["tags"] = tags
__props__["version"] = version
return ManagedPrefixList(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressFamily")
def address_family(self) -> pulumi.Output[str]:
"""
The address family (`IPv4` or `IPv6`) of
this prefix list.
"""
return pulumi.get(self, "address_family")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the prefix list.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def entries(self) -> pulumi.Output[Optional[Sequence['outputs.ManagedPrefixListEntry']]]:
"""
Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
"""
return pulumi.get(self, "entries")
@property
@pulumi.getter(name="maxEntries")
def max_entries(self) -> pulumi.Output[int]:
"""
The maximum number of entries that
this prefix list can contain.
"""
return pulumi.get(self, "max_entries")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of this resource. The name must not start with `com.amazonaws`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
The ID of the AWS account that owns this prefix list.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to this resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
The latest version of this prefix list.
"""
return pulumi.get(self, "version")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.465347
| 168
| 0.621299
| 16,283
| 0.972003
| 0
| 0
| 13,278
| 0.792622
| 0
| 0
| 9,013
| 0.538025
|
7a1b3ef788466c80c3a4e53bf1538ad6b91df51a
| 1,847
|
py
|
Python
|
scripts/ann_architectures/mnist/lenet5.py
|
qian-liu/snn_toolbox
|
9693647f9b2421a4f1ab789a97cc19fd17781e87
|
[
"MIT"
] | null | null | null |
scripts/ann_architectures/mnist/lenet5.py
|
qian-liu/snn_toolbox
|
9693647f9b2421a4f1ab789a97cc19fd17781e87
|
[
"MIT"
] | null | null | null |
scripts/ann_architectures/mnist/lenet5.py
|
qian-liu/snn_toolbox
|
9693647f9b2421a4f1ab789a97cc19fd17781e87
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""LeNet for MNIST"""
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint, TensorBoard
from snntoolbox.parsing.utils import \
get_quantized_activation_function_from_string
from snntoolbox.utils.utils import ClampedReLU
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32') / 255.
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32') / 255.
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
# nonlinearity = get_quantized_activation_function_from_string('relu_Q1.4')
# nonlinearity = ClampedReLU
nonlinearity = 'relu'
model = Sequential()
model.add(Conv2D(6, (5, 5), input_shape=(1, 28, 28), activation=nonlinearity))
model.add(MaxPooling2D())
model.add(Conv2D(16, (5, 5), activation=nonlinearity))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
model.add(Conv2D(120, (5, 5), padding='same', activation=nonlinearity))
model.add(Flatten())
model.add(Dense(84, activation=nonlinearity))
model.add(Dense(10, activation='softmax'))
model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
path = '/home/rbodo/.snntoolbox/data/mnist/cnn/lenet5/keras/gradients'
checkpoint = ModelCheckpoint('weights.{epoch:02d}-{val_acc:.2f}.h5', 'val_acc')
gradients = TensorBoard(os.path.join(path, 'logs'), 2, write_grads=True)
model.fit(X_train, Y_train, validation_data=(X_test, Y_test),
callbacks=[checkpoint, gradients])
score = model.evaluate(X_test, Y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.save(os.path.join(path, '{:2.2f}.h5'.format(score[1]*100)))
| 33.581818
| 79
| 0.750947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.203573
|
7a1eab82419109b15e6baf92f1df08cd9c6fa14b
| 856
|
py
|
Python
|
class_exercises/using_numpy.py
|
Eddz7/astr-19
|
380c6b45762e0207cd6c237fa28a4d796b1aef94
|
[
"MIT"
] | null | null | null |
class_exercises/using_numpy.py
|
Eddz7/astr-19
|
380c6b45762e0207cd6c237fa28a4d796b1aef94
|
[
"MIT"
] | 1
|
2022-03-31T17:57:17.000Z
|
2022-03-31T17:57:17.000Z
|
class_exercises/using_numpy.py
|
Eddz7/astr-19
|
380c6b45762e0207cd6c237fa28a4d796b1aef94
|
[
"MIT"
] | null | null | null |
import numpy as np
x = 1.0 #define a float
y = 2.0 #define another float
#trigonometry
print(f"np.sin({x}) = {np.sin(x)}") #sin(x)
print(f"np.cos({x}) = {np.cos(x)}") #cos(x)
print(f"np.tan({x}) = {np.tan(x)}") #tan(x)
print(f"np.arcsin({x}) = {np.arcsin(x)}") #arcsin(x)
print(f"np.arccos({x}) = {np.arccos(x)}") #arccos(x)
print(f"np.arctan({x}) = {np.arctan(x)}") #arctan(x)
print(f"np.arctan2({x}) = {np.arctan2(x,y)}") #arctan(x/y)
print(f"np.rad2deg({x}) = {np.rad2deg(x)}") #convert rad to degree
#hyperbolic functions
print(f"np.sinh({x}) = {np.sinh(x)}") #sinh(x)
print(f"np.cosh({x}) = {np.cosh(x)}") #cosh(x)
print(f"np.tanh({x}) = {np.tanh(x)}") #tanh(x)
print(f"np.arcsinh({x}) = {np.arcsinh(x)}") #arcsinh(x)
print(f"np.arccosh({x}) = {np.arccosh(x)}") #arccosh(x)
print(f"np.arctanh({x}) = {np.arctanh(x)}") #arctanh(x)
| 40.761905
| 67
| 0.580607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 687
| 0.80257
|
7a1ed1421848b1354b08c81026945785b3714d10
| 10,544
|
py
|
Python
|
amy/workshops/migrations/0158_curriculum_workshoprequest.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 53
|
2015-01-10T17:39:19.000Z
|
2019-06-12T17:36:34.000Z
|
amy/workshops/migrations/0158_curriculum_workshoprequest.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 1,176
|
2015-01-02T06:32:47.000Z
|
2019-06-18T11:57:47.000Z
|
amy/workshops/migrations/0158_curriculum_workshoprequest.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 44
|
2015-01-03T15:08:56.000Z
|
2019-06-09T05:33:08.000Z
|
# Generated by Django 2.1.2 on 2018-10-27 15:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('workshops', '0157_auto_20181006_0859'),
]
operations = [
migrations.CreateModel(
name='Curriculum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('slug', models.CharField(default='', help_text="Use computer-friendly text here, e.g. 'dc-ecology-r'.", max_length=40, unique=True, verbose_name='Curriculum ID')),
('name', models.CharField(default='', help_text="Use user-friendly language, e.g. 'Data Carpentry (Ecology with R)'.", max_length=200, unique=True, verbose_name='Curriculum name')),
('unknown', models.BooleanField(blank=True, default=False, help_text="Mark this curriculum record as 'I don't know yet', or 'Unknown', or 'Not sure yet'. There can be only one such record in the database.", verbose_name='Unknown entry')),
],
options={
'verbose_name': 'Curriculum',
'verbose_name_plural': 'Curricula',
},
),
migrations.CreateModel(
name='WorkshopRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_updated_at', models.DateTimeField(auto_now=True, null=True)),
('data_privacy_agreement', models.BooleanField(default=False, verbose_name='I have read and agree to <a href="https://docs.carpentries.org/topic_folders/policies/privacy.html", target="_blank">the data privacy policy</a> of The Carpentries.')),
('code_of_conduct_agreement', models.BooleanField(default=False, verbose_name='I agree to abide by The Carpentries\' <a target="_blank"href="https://docs.carpentries.org/topic_folders/policies/code-of-conduct.html">Code of Conduct</a>.')),
('host_responsibilities', models.BooleanField(default=False, verbose_name='I understand <a href="https://docs.carpentries.org/topic_folders/hosts_instructors/index.html">the responsibilities of the workshop host</a>, including recruiting local helpers to support the workshop (1 helper for every 8-10 learners).')),
('state', models.CharField(choices=[('p', 'Pending'), ('d', 'Discarded'), ('a', 'Accepted')], default='p', max_length=1)),
('personal', models.CharField(max_length=255, verbose_name='Personal (given) name')),
('family', models.CharField(max_length=255, verbose_name='Family name (surname)')),
('email', models.EmailField(max_length=254, verbose_name='Email address')),
('institution_name', models.CharField(blank=True, default='', max_length=255, verbose_name="Name of institution you're affiliated with")),
('institution_department', models.CharField(blank=True, default='', max_length=255, verbose_name='Department/school affiliation (if applicable)')),
('location', models.CharField(default='', help_text='City, province/state.', max_length=255, verbose_name='Location')),
('country', django_countries.fields.CountryField(max_length=2, verbose_name='Country')),
('part_of_conference', models.BooleanField(help_text='We can manage registration and other coordination for our workshop, but not other conference activities.', verbose_name='Is this workshop part of conference or larger event?')),
('conference_details', models.CharField(blank=True, default='', help_text='Name, description (if applicable).', max_length=255, verbose_name='Conference details')),
('preferred_dates', models.CharField(default='', help_text='Because we need to coordinate with instructors, a minimum of 2-3 months lead time is required for workshop planning.', max_length=255, verbose_name='Preferred dates or date range')),
('number_attendees', models.CharField(choices=[('10-40', '10-40 (one room, two instructors)'), ('40-80', '40-80 (two rooms, four instructors)'), ('80-120', '80-120 (three rooms, six instructors)')], default='10-40', help_text="This number doesn't need to be precise, but will help us decide how many instructors your workshop will need. Each workshop must have at least two instructors.", max_length=15, verbose_name='Number of attendees')),
('domains_other', models.CharField(blank=True, default='', max_length=255, verbose_name='Other domains')),
('audience_description', models.TextField(verbose_name='Please describe your anticipated audience, including their experience, background, and goals')),
('organization_type', models.CharField(choices=[('self', 'Self-organized'), ('central', 'Centrally-organized')], default=None, max_length=15, verbose_name='Will this be a self-organized or centrally-organized workshop?')),
('self_organized_github', models.CharField(blank=True, help_text='Please provide URL.', max_length=255, verbose_name='Link to workshop GitHub page')),
('centrally_organized_fee', models.CharField(blank=True, choices=[('', 'Not applicable.'), ('nonprofit', 'I am with a government site, university, or other nonprofit. I understand the workshop fee of US$2500, and agree to follow through on The Carpentries invoicing process.'), ('forprofit', 'I am with a corporate or for-profit site. I understand The Carpentries staff will contact me about workshop fees. I will follow through on The Carpentries invoicing process for the agreed upon fee.'), ('member', 'I am with a Member Organisation so the workshop fee does not apply (Instructor travel costs will still apply).'), ('waiver', 'I am requesting a waiver of the workshop fee (Instructor travel costs will still apply).')], default='', max_length=20, verbose_name='Which of the following applies to your payment for the administrative fee?')),
('waiver_circumstances', models.TextField(blank=True, help_text='Required only if you request a waiver.', verbose_name='Please explain the circumstances for your waiver request')),
('travel_expences_agreement', models.BooleanField(default=False, verbose_name='Regardless of the fee due to The Carpentries, I understand I am also responsible for travel costs for the Instructors which can include airfare, ground travel, hotel, and meals/incidentals. I understand local Instructors will be prioritized but not guaranteed. Instructor travel costs are managed directly between the host site and the Instructors, not through The Carpentries. I will share detailed information regarding policies and procedures for travel arrangements with instructors. All reimbursements will be completed within 60 days of the workshop.')),
('travel_expences_management', models.CharField(choices=[('booked', 'Hotel and airfare will be booked by site; ground travel and meals/incidentals will be reimbursed within 60 days.'), ('reimbursed', 'All expenses will be booked by instructors and reimbursed within 60 days.'), ('', 'Other:')], max_length=20, verbose_name='How will you manage travel expenses for Carpentries Instructors?')),
('travel_expences_management_other', models.CharField(blank=True, default='', max_length=255, verbose_name='Other travel expences management')),
('comment', models.TextField(blank=True, verbose_name='Is there anything else you would like to share with us?')),
('academic_levels', models.ManyToManyField(help_text="If you know the academic level(s) of your attendees, indicate them here.'", to='workshops.AcademicLevel', verbose_name="Attendees' academic level / career stage")),
('assigned_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('computing_levels', models.ManyToManyField(help_text="Indicate the attendees' level of computing experience, if known. We will ask attendees to fill in a skills survey before the workshop, so this answer can be an approximation.", to='workshops.ComputingExperienceLevel', verbose_name="Attendees' level of computing experience")),
('domains', models.ManyToManyField(help_text="The attendees' academic field(s) of study, if known.", to='workshops.KnowledgeDomain', verbose_name='Domains or topic of interest for target audience')),
('institution', models.ForeignKey(blank=True, help_text="If your institution isn't on the list, enter its name in the field below.", null=True, on_delete=django.db.models.deletion.PROTECT, to='workshops.Organization', verbose_name='Institutional affiliation')),
('language', models.ForeignKey(help_text='Our workshops are offered primarily in English, with a few of our lessons available in Spanish. While materials are mainly in English, we know it can be valuable to have an instructor who speaks the native language of the learners. We will attempt to locate Instructors speaking a particular language, but cannot guarantee the availability of non-English speaking Instructors.', on_delete=django.db.models.deletion.PROTECT, to='workshops.Language', verbose_name='Language')),
('requested_workshop_types', models.ManyToManyField(help_text="If your learners are new to programming and primarily interested in working with data, Data Carpentry is likely the best choice. If your learners are interested in learning more about programming, including version control and automation, Software Carpentry is likely the best match. Please visit the <a href='https://software-carpentry.org/lessons/'>Software Carpentry lessons page</a> or the <a href='http://www.datacarpentry.org/lessons/'>Data Carpentry lessons page</a> for more information about any of our lessons. If you’re not sure and would like to discuss with us, please select the 'Not sure' option below.", limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentry workshop are you requesting?')),
],
options={
'ordering': ['created_at'],
},
),
]
| 142.486486
| 860
| 0.71434
| 10,356
| 0.981984
| 0
| 0
| 0
| 0
| 0
| 0
| 6,420
| 0.608762
|
7a1ef72332e8f8f0f2089763d5744f430bdbbf1f
| 2,365
|
py
|
Python
|
log_parser/single_hand_efficiency_training_data.py
|
xinranhe/mahjong
|
8cfc6234f9c80fd11267adf06b420b63f4c8d87d
|
[
"MIT"
] | null | null | null |
log_parser/single_hand_efficiency_training_data.py
|
xinranhe/mahjong
|
8cfc6234f9c80fd11267adf06b420b63f4c8d87d
|
[
"MIT"
] | null | null | null |
log_parser/single_hand_efficiency_training_data.py
|
xinranhe/mahjong
|
8cfc6234f9c80fd11267adf06b420b63f4c8d87d
|
[
"MIT"
] | null | null | null |
import argparse
from mahjong.shanten import Shanten
from multiprocessing import Pool
import os
import sys
from log_parser.discard_prediction_parser import parse_discard_prediction
SHANTEN = Shanten()
INPUT_DATA_FOLDER = "data/raw"
OUTPUT_DATA_DIR = "data/single_hand_efficiency"
def tiles34_to_list(tiles):
result = []
for i in xrange(34):
for j in xrange(tiles[i]):
result.append(i)
return sorted(result)
def generate_data(folder):
folder_path = "%s/%s" % (INPUT_DATA_FOLDER, folder)
writer = open("%s/%s.txt" % (OUTPUT_DATA_DIR, folder), "w")
num_hands = [0] * 7
num_failed_files = 0
for i, file in enumerate(os.listdir(folder_path)):
print "processed %d files with %d failed: %s records" % (i, num_failed_files, ",".join([str(n) for n in num_hands]))
file_path = "%s/%s" % (folder_path, file)
try:
games = parse_discard_prediction(open(file_path, "r").read())
for game in games:
for one_round in game.one_round:
hais = one_round.center_player.hand
if len(hais) != 14:
continue
hand = [0] * 34
for hai in hais:
hand[hai.id] += 1
if hand[one_round.discarded_hai.id] <= 0:
continue
hand[one_round.discarded_hai.id] -= 1
shanten = int(SHANTEN.calculate_shanten(hand))
num_hands[shanten] += 1
writer.write("%d:%s\n" % (shanten, ",".join([str(i) for i in tiles34_to_list(hand)])))
except:
num_failed_files += 1
print "Failed in parseing:", file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('--start_date', default='')
parser.add_argument('--end_date', default='')
known_args, _ = parser.parse_known_args(sys.argv)
date_to_process = []
for date in os.listdir(INPUT_DATA_FOLDER):
if date >= known_args.start_date and date <= known_args.end_date:
date_to_process.append(date)
print date_to_process
generate_data(date_to_process[0])
# multithread generate training data
#p = Pool(NUM_THREADS)
#p.map(generate_data, date_to_process)
| 35.833333
| 124
| 0.60296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 292
| 0.123467
|
e12e6ff3f71515946f2d758523bf5e5b716bfa6b
| 1,942
|
py
|
Python
|
apps/portalbase/system/system__alerts/methodclass/system_alerts.py
|
Jumpscale/jumpscale_portal8
|
3a4d56a1ba985b68fe9b525aed2486a54808332f
|
[
"Apache-2.0"
] | null | null | null |
apps/portalbase/system/system__alerts/methodclass/system_alerts.py
|
Jumpscale/jumpscale_portal8
|
3a4d56a1ba985b68fe9b525aed2486a54808332f
|
[
"Apache-2.0"
] | 74
|
2015-12-28T16:17:20.000Z
|
2021-09-08T12:28:59.000Z
|
apps/portalbase/system/system__alerts/methodclass/system_alerts.py
|
Jumpscale/jumpscale_portal8
|
3a4d56a1ba985b68fe9b525aed2486a54808332f
|
[
"Apache-2.0"
] | null | null | null |
from JumpScale import j
class system_alerts(j.tools.code.classGetBase()):
"""
Alerts handler
"""
def __init__(self):
self._te = {}
self.actorname = "alertshandler"
self.appname = "system"
self.alertmodel = j.data.models.system.Alert
def update(self, state, alert, comment=None, username=None, **kwargs):
alert_obj = self._update(state, alert, comment=None, username=None, **kwargs)
attrs = {'state': state, 'alert': alert,
'comment': comment, 'username': username}
attrs.update(**kwargs)
for attr in attrs:
if attr == 'ctx':
continue
setattr(alert_obj, attr, eval(attr))
alert_obj.save()
return True
def _update(self, state, alert, comment=None, username=None, **kwargs):
"""
process eco
first find duplicates for eco (errorcondition obj of style as used in this actor)
the store in db
"""
if not j.data.models.system.Alert.exists(alert):
raise RuntimeError('Invalid Alert')
alert_obj = j.data.models.system.Alert.get(alert)
if username and not j.data.models.system.Alert.find({'username': username})[0]:
raise RuntimeError('User %s does not exist' % username)
username = username or kwargs['ctx'].env['beaker.session']['user']
comment = comment or ''
epoch = j.data.time.getTimeEpoch()
history = {'user': username,
'state': state,
'comment': comment,
'epoch': epoch}
alert_obj.state = state
alert_obj.history.append(history)
return alert_obj
def escalate(self, alert, username=None, comment=None, **kwargs):
alert_obj = self._update('ALERT', alert, comment, username, **kwargs)
alert_obj.level += 1
alert_obj.save()
return True
| 30.825397
| 89
| 0.581874
| 1,915
| 0.986097
| 0
| 0
| 0
| 0
| 0
| 0
| 355
| 0.182801
|
e12ea6090b7a3fc25058fb7f99f94d6f336e2f07
| 17,628
|
py
|
Python
|
docs/pyqbdi.py
|
pbrunet/QBDI
|
39a936b2efd000f0c5def0a8ea27538d7d5fab47
|
[
"Apache-2.0"
] | 1
|
2019-10-01T08:32:41.000Z
|
2019-10-01T08:32:41.000Z
|
docs/pyqbdi.py
|
pbrunet/QBDI
|
39a936b2efd000f0c5def0a8ea27538d7d5fab47
|
[
"Apache-2.0"
] | null | null | null |
docs/pyqbdi.py
|
pbrunet/QBDI
|
39a936b2efd000f0c5def0a8ea27538d7d5fab47
|
[
"Apache-2.0"
] | null | null | null |
# This file is only used to generate documentation
# VM class
class vm():
def getGPRState():
"""Obtain the current general purpose register state.
:returns: GPRState (an object containing the GPR state).
"""
pass
def getFPRState():
"""Obtain the current floating point register state.
:returns: FPRState (an object containing the FPR state).
"""
pass
def setGPRState(gprState):
"""Set the general purpose register state.
:param grpState: An object containing the GPR state.
"""
pass
def setFPRState(fprState):
"""Set the current floating point register state.
:param fprState: An object containing the FPR state
"""
pass
def run(start, stop):
"""Start the execution by the DBI from a given address (and stop when another is reached).
:param start: Address of the first instruction to execute.
:param stop: Stop the execution when this instruction is reached.
:returns: True if at least one block has been executed.
"""
pass
def call(function, args):
"""Call a function using the DBI (and its current state).
:param function: Address of the function start instruction.
:param args: The arguments as a list [arg0, arg1, arg2, ...].
:returns: (True, retValue) if at least one block has been executed.
"""
pass
def addCodeCB(pos, cbk, data):
"""Register a callback event for a specific instruction event.
:param pos: Relative position of the event callback (:py:const:`pyqbdi.PREINST` / :py:const:`pyqbdi.POSTINST`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def addCodeAddrCB(address, pos, cbk, data):
"""Register a callback for when a specific address is executed.
:param address: Code address which will trigger the callback.
:param pos: Relative position of the event callback (:py:const:`pyqbdi.PREINST` / :py:const:`pyqbdi.POSTINST`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def addCodeRangeCB(start, end, pos, cbk, data):
"""Register a callback for when a specific address range is executed.
:param start: Start of the address range which will trigger the callback.
:param end: End of the address range which will trigger the callback.
:param pos: Relative position of the event callback (:py:const:`pyqbdi.PREINST` / :py:const:`pyqbdi.POSTINST`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def addMnemonicCB(mnemonic, pos, cbk, data):
"""Register a callback event if the instruction matches the mnemonic.
:param mnemonic: Mnemonic to match.
:param pos: Relative position of the event callback (:py:const:`pyqbdi.PREINST` / :py:const:`pyqbdi.POSTINST`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def deleteInstrumentation(id):
"""Remove an instrumentation.
:param id: The id of the instrumentation to remove.
:returns: True if instrumentation has been removed.
"""
pass
def deleteAllInstrumentations():
"""Remove all the registered instrumentations.
"""
pass
def addMemAddrCB(address, type, cbk, data):
"""Add a virtual callback which is triggered for any memory access at a specific address matching the access type. Virtual callbacks are called via callback forwarding by a gate callback triggered on every memory access. This incurs a high performance cost.
:param address: Code address which will trigger the callback.
:param type: A mode bitfield: either :py:const:`pyqbdi.MEMORY_READ`, :py:const:`pyqbdi.MEMORY_WRITE` or both (:py:const:`pyqbdi.MEMORY_READ_WRITE`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def addMemRangeCB(start, end, type, cbk, data):
"""Add a virtual callback which is triggered for any memory access in a specific address range matching the access type. Virtual callbacks are called via callback forwarding by a gate callback triggered on every memory access. This incurs a high performance cost.
:param start: Start of the address range which will trigger the callback.
:param end: End of the address range which will trigger the callback.
:param type: A mode bitfield: either :py:const:`pyqbdi.MEMORY_READ`, :py:const:`pyqbdi.MEMORY_WRITE` or both (:py:const:`pyqbdi.MEMORY_READ_WRITE`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def addMemAccessCB(type, cbk, data):
"""Register a callback event for every memory access matching the type bitfield made by an instruction.
:param type: A mode bitfield: either :py:const:`pyqbdi.MEMORY_READ`, :py:const:`pyqbdi.MEMORY_WRITE` or both (:py:const:`pyqbdi.MEMORY_READ_WRITE`).
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def recordMemoryAccess(type):
"""Add instrumentation rules to log memory access using inline instrumentation and instruction shadows.
:param type: Memory mode bitfield to activate the logging for: either :py:const:`pyqbdi.MEMORY_READ`, :py:const:`pyqbdi.MEMORY_WRITE` or both (:py:const:`pyqbdi.MEMORY_READ_WRITE`).
:returns: True if inline memory logging is supported, False if not or in case of error.
"""
pass
def getInstAnalysis(type):
""" Obtain the analysis of an instruction metadata. Analysis results are cached in the VM. The validity of the returned object is only guaranteed until the end of the callback, else a deepcopy of the object is required.
:param type: Properties to retrieve during analysis (pyqbdi.ANALYSIS_INSTRUCTION, pyqbdi.ANALYSIS_DISASSEMBLY, pyqbdi.ANALYSIS_OPERANDS, pyqbdi.ANALYSIS_SYMBOL).
:returns: A :py:class:`InstAnalysis` object containing the analysis result.
"""
pass
def getInstMemoryAccess():
"""Obtain the memory accesses made by the last executed instruction.
:returns: A list of memory accesses (:py:class:`MemoryAccess`) made by the instruction.
"""
pass
def getBBMemoryAccess():
"""Obtain the memory accesses made by the last executed basic block.
:returns: A list of memory accesses (:py:class:`MemoryAccess`) made by the basic block.
"""
pass
def precacheBasicBlock(pc):
"""Pre-cache a known basic block
:param pc: Start address of a basic block
:returns: True if basic block has been inserted in cache.
"""
pass
def clearCache(start, end):
"""Clear a specific address range from the translation cache.
:param start: Start of the address range to clear from the cache.
:param end: End of the address range to clear from the cache.
"""
pass
def clearAllCache():
"""Clear the entire translation cache.
"""
pass
def addVMEventCB(mask, cbk, data):
"""Register a callback event for a specific VM event.
:param mask: A mask of VM event type which will trigger the callback.
:param cbk: A function to be called back.
:param data: User defined data passed to the callback.
:returns: The id of the registered instrumentation (or :py:const:`pyqbdi.INVALID_EVENTID` in case of failure).
"""
pass
def addInstrumentedModule(name):
"""Add the executable address ranges of a module to the set of instrumented address ranges.
:param name: The module's name.
:returns: True if at least one range was added to the instrumented ranges.
"""
pass
def addInstrumentedModuleFromAddr(addr):
""" Add the executable address ranges of a module to the set of instrumented address ranges using an address belonging to the module.
:param addr: An address contained by module's range.
:returns: True if at least one range was added to the instrumented ranges.
"""
pass
def addInstrumentedRange(start, end):
"""Add an address range to the set of instrumented address ranges.
:param start: Start address of the range (included).
:param end: End address of the range (excluded).
"""
pass
def instrumentAllExecutableMaps():
"""Adds all the executable memory maps to the instrumented range set.
:returns: True if at least one range was added to the instrumented ranges.
"""
pass
def removeAllInstrumentedRanges():
"""Remove all instrumented ranges.
"""
pass
def removeInstrumentedModule(name):
"""Remove the executable address ranges of a module from the set of instrumented address ranges.
:param name: The module's name.
:returns: True if at least one range was removed from the instrumented ranges.
"""
pass
def removeInstrumentedModuleFromAddr(addr):
"""Remove the executable address ranges of a module from the set of instrumented address ranges using an address belonging to the module.
:param addr: An address contained by module's range.
:returns: True if at least one range was removed from the instrumented ranges.
"""
pass
def removeInstrumentedRange(start, end):
"""Remove an address range from the set of instrumented address ranges.
:param start: Start address of the range (included).
:param end: End address of the range (excluded).
"""
pass
# PyQBDI module functions
def alignedAlloc(size, align):
"""Allocate a block of memory of a specified sized with an aligned base address.
:param size: Allocation size in bytes.
:param align: Base address alignement in bytes.
:returns: Pointer to the allocated memory (as a long) or NULL in case an error was encountered.
"""
pass
def alignedFree():
"""
"""
pass
def allocateVirtualStack(ctx, stackSize):
"""Allocate a new stack and setup the GPRState accordingly.
The allocated stack needs to be freed with alignedFree().
:param ctx: GPRState which will be setup to use the new stack.
:param stackSize: Size of the stack to be allocated.
:returns: A tuple (bool, stack) where 'bool' is true if stack allocation was successfull. And 'stack' the newly allocated stack pointer.
"""
pass
def simulateCall(ctx, returnAddress, args):
"""Simulate a call by modifying the stack and registers accordingly.
:param ctx: GPRState where the simulated call will be setup. The state needs to point to a valid stack for example setup with allocateVirtualStack().
:param returnAddress: Return address of the call to simulate.
:param args: A list of arguments.
"""
pass
def getModuleNames():
""" Get a list of all the module names loaded in the process memory.
:returns: A list of strings, each one containing the name of a loaded module.
"""
pass
def getCurrentProcessMaps():
""" Get a list of all the memory maps (regions) of the current process.
:returns: A list of :py:class:`MemoryMap` object.
"""
pass
def readMemory(address, size):
"""Read a memory content from a base address.
:param address: Base address
:param size: Read size
:returns: Bytes of content.
.. warning::
This API is hazardous as the whole process memory can be read.
"""
pass
def writeMemory(address, bytes):
"""Write a memory content to a base address.
:param address: Base address
:param bytes: Memory content
.. warning::
This API is hazardous as the whole process memory can be written.
"""
pass
def decodeFloat(val):
""" Decode a float stored as a long.
:param val: Long value.
"""
pass
def encodeFloat(val):
"""Encode a float as a long.
:param val: Float value
"""
pass
# Various objects
class MemoryMap:
""" Map of a memory area (region).
"""
range = (0, 0xffff)
""" A range of memory (region), delimited between a start and an (excluded) end address. """
permission = 0
""" Region access rights (PF_READ, PF_WRITE, PF_EXEC). """
name = ""
""" Region name (useful when a region is mapping a module). """
class InstAnalysis:
""" Object containing analysis results of an instruction provided by the VM.
"""
mnemonic = ""
""" LLVM mnemonic (warning: None if !ANALYSIS_INSTRUCTION) """
address = 0
""" Instruction address """
instSize = 0
""" Instruction size (in bytes) """
affectControlFlow = False
""" true if instruction affects control flow """
isBranch = False
""" true if instruction acts like a 'jump' """
isCall = False
""" true if instruction acts like a 'call' """
isReturn = False
""" true if instruction acts like a 'return' """
isCompare = False
""" true if instruction is a comparison """
isPredicable = False
""" true if instruction contains a predicate (~is conditional) """
mayLoad = False
""" true if instruction 'may' load data from memory """
mayStore = False
""" true if instruction 'may' store data to memory """
disassembly = ""
""" Instruction disassembly (warning: None if !ANALYSIS_DISASSEMBLY) """
numOperands = 0
""" Number of operands used by the instruction """
operands = []
""" A list of :py:class:`OperandAnalysis` objects.
(warning: empty if !ANALYSIS_OPERANDS) """
symbol = ""
""" Instruction symbol (warning: None if !ANALYSIS_SYMBOL or not found) """
symbolOffset = 0
""" Instruction symbol offset """
module = ""
""" Instruction module name (warning: None if !ANALYSIS_SYMBOL or not found) """
class OperandAnalysis:
""" Object containing analysis results of an operand provided by the VM.
"""
# Common fields
type = 0
""" Operand type (pyqbdi.OPERAND_IMM, pyqbdi.OPERAND_REG, pyqbdi.OPERAND_PRED) """
value = 0
""" Operand value (if immediate), or register Id """
size = 0
""" Operand size (in bytes) """
# Register specific fields
regOff = 0
""" Sub-register offset in register (in bits) """
regCtxIdx = 0
""" Register index in VM state """
regName = ""
""" Register name """
regAccess = 0
""" Register access type (pyqbdi.REGISTER_READ, pyqbdi.REGISTER_WRITE, pyqbdi.REGISTER_READ_WRITE) """
class VMState:
""" Object describing the current VM state.
"""
event = 0
""" The event(s) which triggered the callback (must be checked using a mask: event & pyqbdi.BASIC_BLOCK_ENTRY). """
basicBlockStart = 0
""" The current basic block start address which can also be the execution transfer destination. """
basicBlockEnd = 0
""" The current basic block end address which can also be the execution transfer destination. """
sequenceStart = 0
""" The current sequence start address which can also be the execution transfer destination. """
sequenceEnd = 0
""" The current sequence end address which can also be the execution transfer destination. """
class MemoryAccess:
""" Describe a memory access
"""
instAddress = 0
""" Address of instruction making the access. """
accessAddress = 0
""" Address of accessed memory. """
value = 0
""" Value read from / written to memory. """
size = 0
""" Size of memory access (in bytes). """
type = 0
""" Memory access type (pyqbdi.MEMORY_READ, pyqbdi.MEMORY_WRITE, pyqbdi.MEMORY_READ_WRITE). """
GPRState = None
""" GPRState object, a binding to :cpp:type:`QBDI::GPRState`
"""
FPRState = None
""" FPRState object, a binding to :cpp:type:`QBDI::FPRState`
"""
| 35.90224
| 271
| 0.645677
| 14,891
| 0.844736
| 0
| 0
| 0
| 0
| 0
| 0
| 14,255
| 0.808657
|
e13042781e2e380894da0aab1c6ec72861b3ce01
| 227
|
py
|
Python
|
krkbipscraper/settings.py
|
pawmar/krkbipscraper
|
f2629bede33930cf91378caa7f2ee5d683cf1616
|
[
"BSD-3-Clause"
] | null | null | null |
krkbipscraper/settings.py
|
pawmar/krkbipscraper
|
f2629bede33930cf91378caa7f2ee5d683cf1616
|
[
"BSD-3-Clause"
] | null | null | null |
krkbipscraper/settings.py
|
pawmar/krkbipscraper
|
f2629bede33930cf91378caa7f2ee5d683cf1616
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Scrapy settings."""
BOT_NAME = 'krkbipscraper'
SPIDER_MODULES = ['krkbipscraper.spiders']
NEWSPIDER_MODULE = 'krkbipscraper.spiders'
ITEM_PIPELINES = ['krkbipscraper.pipelines.JsonWriterPipeline']
| 22.7
| 63
| 0.744493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.660793
|
e1311759e08a6c90f2dd14452c29543ae793ad35
| 1,797
|
py
|
Python
|
sap hana/connections and query execution with python/script.py
|
Phelipe-Sempreboni/databases
|
3be823db9029994d7b50d23d1830209276e5f40a
|
[
"MIT"
] | 1
|
2020-10-27T21:50:28.000Z
|
2020-10-27T21:50:28.000Z
|
sap hana/connections and query execution with python/script.py
|
Phelipe-Sempreboni/databases
|
3be823db9029994d7b50d23d1830209276e5f40a
|
[
"MIT"
] | null | null | null |
sap hana/connections and query execution with python/script.py
|
Phelipe-Sempreboni/databases
|
3be823db9029994d7b50d23d1830209276e5f40a
|
[
"MIT"
] | null | null | null |
# Importação da biblioteca.
# Certifique-se de ter a biblioteca instalada.
import pyhdb
# Essa função traz/chama outro arquivo que contém a senha, visando não deixar exposta na aplicação.
# Caso não queira utilizar esse método e inserir diretamente a senha na conexão, exclua esse bloco e insira a senha diretamente no bloco (def connect) em (passoword).
def pass_location():
pass_file = "" # Insira o caminho do arquivo com a senha.
file = open(pass_file, 'r') # Leiteura do arquivo com read.
return file.read()
# Realiza a conexão com o Sap Hana.
def connect():
try:
connection = pyhdb.connect(
host = "", # Insira o server.
port=, # Insira a porta, normalmente númerica, caso não seja, utilize o ("") para digitar a localidade da porta.
user="", # Insira o usuário.
password=pass_location() # Aqui estamos utilizando o bloco (pass_location) que busca a senha em outro arquivo, caso não queira exclua o bloco e insira a senha.
)
return connection.cursor()
except:
return 1
# Executa a query no Sap Hana.
def query_exec():
#A query de exemplo abaixo lista 10 instalacoes da tabela de dados mestres de instalacao
cursor = connect()
cursor.execute("SET SCHEMA SAPBP1") # Insira o schema do banco de dados.
cursor.execute("SELECT top 10 "'"/BIC/EPINSTALA"'" \
from "'"SAPBP1"'"."'"/BIC/PEPINSTALA"'" ") # Insira a query.
result = cursor.fetchall() # Retorna todos os resultados.
cursor.close() # Encerra o cursor.
return result
if __name__ == '__main__':
connect() # Execução a função de conexão.
resultado = query_exec() # Executa a função de execução da query.
print (resultado) # Imprimi o resultado no terminal.
| 41.790698
| 171
| 0.670006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,210
| 0.664105
|
e131340a4484b6722bf5a16704072d57bfdba8fe
| 2,418
|
py
|
Python
|
tests/mvae/distributions/test_von_mises_fisher.py
|
macio232/mvae
|
df3d5158ce29744e54b378ad663361e8b785632a
|
[
"Apache-2.0"
] | 53
|
2019-11-20T05:39:54.000Z
|
2022-02-05T06:36:43.000Z
|
tests/mvae/distributions/test_von_mises_fisher.py
|
macio232/mvae
|
df3d5158ce29744e54b378ad663361e8b785632a
|
[
"Apache-2.0"
] | 8
|
2020-03-14T20:25:08.000Z
|
2021-06-10T08:06:15.000Z
|
tests/mvae/distributions/test_von_mises_fisher.py
|
macio232/mvae
|
df3d5158ce29744e54b378ad663361e8b785632a
|
[
"Apache-2.0"
] | 10
|
2020-03-14T20:17:47.000Z
|
2021-12-01T14:08:06.000Z
|
# Copyright 2019 Ondrej Skopek.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import torch
from mt.mvae import utils
from mt.mvae.distributions.von_mises_fisher import VonMisesFisher
dims = [2, 3, 4]
scales = [1e9, 1e5, 1e1, 1e0, 1e-5, 1e-15]
def vmf_distribution(dim: int, scale: float) -> VonMisesFisher:
utils.set_seeds(42)
loc = torch.tensor([[1.] + [0.] * (dim - 1)], dtype=torch.get_default_dtype())
scale = torch.tensor([[scale]], dtype=torch.get_default_dtype())
vmf = VonMisesFisher(loc, scale)
return vmf
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("scale", scales)
def test_vmf_sampling_nans(dim: int, scale: float) -> None:
vmf = vmf_distribution(dim, scale)
shape = torch.Size([10])
for i in range(100):
samples = vmf.sample(shape)
assert torch.isfinite(samples).all()
assert torch.norm(samples, p=2, dim=-1).allclose(torch.ones(samples.shape[:-1]))
log_prob = vmf.log_prob(samples)
assert torch.isfinite(log_prob).all()
# assert (log_prob <= 0).all() This does not hold, because it actually doesn't have to hold :) Math :)
assert (log_prob < 1e20).all()
assert (log_prob > -1e20).all()
# This does not depend on the mean (loc), just it's dimensionality.
@pytest.mark.parametrize("scale", scales)
def test_sampling_w3(scale: float) -> None:
vmf = vmf_distribution(3, scale)
w = vmf._sample_w3(shape=torch.Size([100]))
assert (w.abs() <= 1).all()
# This does not depend on the mean (loc), just it's dimensionality.
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("scale", scales)
def test_sampling_w_rej(dim: int, scale: float) -> None:
vmf = vmf_distribution(dim, scale)
w = vmf._sample_w_rej(shape=torch.Size([100]))
assert (w.abs() <= 1).all()
| 37.2
| 110
| 0.669975
| 0
| 0
| 0
| 0
| 1,139
| 0.47105
| 0
| 0
| 911
| 0.376758
|
e13147c692ddf6997325ddaffddf29246eba0b66
| 1,033
|
py
|
Python
|
cello/download_resources.py
|
Ann-Holmes/CellO
|
bc2192a2d27e0859f6df885a6fc246e26e54a7b0
|
[
"MIT"
] | 42
|
2019-05-14T19:04:38.000Z
|
2022-03-06T12:57:00.000Z
|
cello/download_resources.py
|
Ann-Holmes/CellO
|
bc2192a2d27e0859f6df885a6fc246e26e54a7b0
|
[
"MIT"
] | 16
|
2020-08-04T12:34:08.000Z
|
2022-03-31T22:30:48.000Z
|
cello/download_resources.py
|
Ann-Holmes/CellO
|
bc2192a2d27e0859f6df885a6fc246e26e54a7b0
|
[
"MIT"
] | 6
|
2019-05-13T15:57:03.000Z
|
2022-03-18T02:17:05.000Z
|
"""
Download CellO's resources files. These files include CellO's pre-trained
models, gene ID-to-symbol mappings, and training sets for training CellO's
models on new gene sets.
Authors: Matthew Bernstein <mbernstein@morgridge.org>
"""
import subprocess
from os.path import join
from shutil import which
def download(dest):
if which('curl') is None:
sys.exit(
"""
Error. Could not find command, 'curl'. Please make sure that
curl is installed and available via the 'PATH' variable. For
details, see https://curl.se.
"""
)
cmds = [
'curl http://deweylab.biostat.wisc.edu/cell_type_classification/resources_v2.0.0.tar.gz > {}'.format(
join(dest, 'resources_v2.0.0.tar.gz')
),
'tar -C {} -zxf resources_v2.0.0.tar.gz'.format(dest),
'rm {}'.format(join(dest, 'resources_v2.0.0.tar.gz'))
]
for cmd in cmds:
print('Running command: {}'.format(cmd))
subprocess.run(cmd, shell=True)
| 31.30303
| 109
| 0.621491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 662
| 0.640852
|
e1314b5fc0e2d4894cb7cecd74444fc00587afb1
| 190
|
py
|
Python
|
prob_020.py
|
tansly/euler
|
8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc
|
[
"MIT"
] | 1
|
2017-02-13T19:00:59.000Z
|
2017-02-13T19:00:59.000Z
|
prob_020.py
|
tansly/euler
|
8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc
|
[
"MIT"
] | null | null | null |
prob_020.py
|
tansly/euler
|
8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc
|
[
"MIT"
] | null | null | null |
def sum_digit(n):
total = 0
while n != 0:
total += n % 10
n /= 10
return total
def factorial(n):
if n <= 0:
return 1
return n * factorial(n - 1)
| 15.833333
| 31
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e133fe625681b1837857d1c7c1998eeec6f05e88
| 7,755
|
py
|
Python
|
mmtrack/models/mot/trackers/base_tracker.py
|
sht47/mmtracking
|
5a25e418e9c598d1b576bce8702f5e156cbbefe7
|
[
"Apache-2.0"
] | 12
|
2021-09-05T20:47:16.000Z
|
2022-03-23T07:00:35.000Z
|
mmtrack/models/mot/trackers/base_tracker.py
|
hellock/mmtracking
|
a22a36b2055d80cf4a7a5ef3913849abb56defcb
|
[
"Apache-2.0"
] | 2
|
2021-09-06T13:20:09.000Z
|
2022-01-13T05:36:14.000Z
|
mmtrack/models/mot/trackers/base_tracker.py
|
hellock/mmtracking
|
a22a36b2055d80cf4a7a5ef3913849abb56defcb
|
[
"Apache-2.0"
] | 1
|
2022-02-28T19:33:49.000Z
|
2022-02-28T19:33:49.000Z
|
from abc import ABCMeta, abstractmethod
import torch
import torch.nn.functional as F
from addict import Dict
from mmtrack.models import TRACKERS
@TRACKERS.register_module()
class BaseTracker(metaclass=ABCMeta):
"""Base tracker model.
Args:
momentums (dict[str:float], optional): Momentums to update the buffers.
The `str` indicates the name of the buffer while the `float`
indicates the momentum. Default to None.
num_frames_retain (int, optional). If a track is disappeared more than
`num_frames_retain` frames, it will be deleted in the memo.
"""
def __init__(self, momentums=None, num_frames_retain=10):
super().__init__()
if momentums is not None:
assert isinstance(momentums, dict), 'momentums must be a dict'
self.momentums = momentums
self.num_frames_retain = num_frames_retain
self.reset()
def reset(self):
"""Reset the buffer of the tracker."""
self.num_tracks = 0
self.tracks = dict()
@property
def empty(self):
"""Whether the buffer is empty or not."""
return False if self.tracks else True
@property
def ids(self):
"""All ids in the tracker."""
return list(self.tracks.keys())
@property
def with_reid(self):
"""bool: whether the framework has a reid model"""
return hasattr(self, 'reid') and self.reid is not None
def update(self, **kwargs):
"""Update the tracker.
Args:
kwargs (dict[str: Tensor | int]): The `str` indicates the
name of the input variable. `ids` and `frame_ids` are
obligatory in the keys.
"""
memo_items = [k for k, v in kwargs.items() if v is not None]
rm_items = [k for k in kwargs.keys() if k not in memo_items]
for item in rm_items:
kwargs.pop(item)
if not hasattr(self, 'memo_items'):
self.memo_items = memo_items
else:
assert memo_items == self.memo_items
assert 'ids' in memo_items
num_objs = len(kwargs['ids'])
id_indice = memo_items.index('ids')
assert 'frame_ids' in memo_items
frame_id = int(kwargs['frame_ids'])
if isinstance(kwargs['frame_ids'], int):
kwargs['frame_ids'] = torch.tensor([kwargs['frame_ids']] *
num_objs)
# cur_frame_id = int(kwargs['frame_ids'][0])
for k, v in kwargs.items():
if len(v) != num_objs:
raise ValueError()
for obj in zip(*kwargs.values()):
id = int(obj[id_indice])
if id in self.tracks:
self.update_track(id, obj)
else:
self.init_track(id, obj)
self.pop_invalid_tracks(frame_id)
def pop_invalid_tracks(self, frame_id):
"""Pop out invalid tracks."""
invalid_ids = []
for k, v in self.tracks.items():
if frame_id - v['frame_ids'][-1] >= self.num_frames_retain:
invalid_ids.append(k)
for invalid_id in invalid_ids:
self.tracks.pop(invalid_id)
def update_track(self, id, obj):
"""Update a track."""
for k, v in zip(self.memo_items, obj):
v = v[None]
if self.momentums is not None and k in self.momentums:
m = self.momentums[k]
self.tracks[id][k] = (1 - m) * self.tracks[id][k] + m * v
else:
self.tracks[id][k].append(v)
def init_track(self, id, obj):
"""Initialize a track."""
self.tracks[id] = Dict()
for k, v in zip(self.memo_items, obj):
v = v[None]
if self.momentums is not None and k in self.momentums:
self.tracks[id][k] = v
else:
self.tracks[id][k] = [v]
@property
def memo(self):
"""Return all buffers in the tracker."""
outs = Dict()
for k in self.memo_items:
outs[k] = []
for id, objs in self.tracks.items():
for k, v in objs.items():
if k not in outs:
continue
if self.momentums is not None and k in self.momentums:
v = v
else:
v = v[-1]
outs[k].append(v)
for k, v in outs.items():
outs[k] = torch.cat(v, dim=0)
return outs
def get(self, item, ids=None, num_samples=None, behavior=None):
"""Get the buffer of a specific item.
Args:
item (str): The demanded item.
ids (list[int]): The demaned ids.
num_samples (int, optional): Number of samples to calculate the
results. Defaults to None.
behavior (str, optional): Behavior to calculate the results.
Options are `mean` | None. Defaults to None.
Returns:
Tensor: The results of the demanded item.
"""
if ids is None:
ids = self.ids
outs = []
for id in ids:
out = self.tracks[id][item]
if isinstance(out, list):
if num_samples is not None:
out = out[-num_samples:]
out = torch.cat(out, dim=0)
if behavior == 'mean':
out = out.mean(dim=0, keepdim=True)
elif behavior is None:
out = out[None]
else:
raise NotImplementedError()
else:
out = out[-1]
outs.append(out)
return torch.cat(outs, dim=0)
@abstractmethod
def track(self, *args, **kwargs):
"""Tracking forward function."""
pass
def crop_imgs(self, img, img_metas, bboxes, rescale=False):
"""Crop the images according to some bounding boxes. Typically for re-
identification sub-module.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
bboxes (Tensor): of shape (N, 4) or (N, 5).
rescale (bool, optional): If True, the bounding boxes should be
rescaled to fit the scale of the image. Defaults to False.
Returns:
Tensor: Image tensor of shape (N, C, H, W).
"""
h, w, _ = img_metas[0]['img_shape']
img = img[:, :, :h, :w]
if rescale:
bboxes[:, :4] *= torch.tensor(img_metas[0]['scale_factor']).to(
bboxes.device)
bboxes[:, 0::2] = torch.clamp(bboxes[:, 0::2], min=0, max=w)
bboxes[:, 1::2] = torch.clamp(bboxes[:, 1::2], min=0, max=h)
crop_imgs = []
for bbox in bboxes:
x1, y1, x2, y2 = map(int, bbox)
if x2 == x1:
x2 = x1 + 1
if y2 == y1:
y2 = y1 + 1
crop_img = img[:, :, y1:y2, x1:x2]
if self.reid.get('img_scale', False):
crop_img = F.interpolate(
crop_img,
size=self.reid['img_scale'],
mode='bilinear',
align_corners=False)
crop_imgs.append(crop_img)
if len(crop_imgs) > 0:
return torch.cat(crop_imgs, dim=0)
else:
return img.new_zeros((0, ))
| 34.466667
| 79
| 0.52392
| 7,577
| 0.977047
| 0
| 0
| 7,605
| 0.980658
| 0
| 0
| 2,427
| 0.312959
|
e134a13671522e1fa873cc9f15fcf37d47bcca9a
| 3,675
|
py
|
Python
|
test/conftest.py
|
pauldg/ro-crate-py
|
695004f18175ca70b439534adece9e2242dca778
|
[
"Apache-2.0"
] | null | null | null |
test/conftest.py
|
pauldg/ro-crate-py
|
695004f18175ca70b439534adece9e2242dca778
|
[
"Apache-2.0"
] | null | null | null |
test/conftest.py
|
pauldg/ro-crate-py
|
695004f18175ca70b439534adece9e2242dca778
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2022 The University of Manchester, UK
# Copyright 2020-2022 Vlaams Instituut voor Biotechnologie (VIB), BE
# Copyright 2020-2022 Barcelona Supercomputing Center (BSC), ES
# Copyright 2020-2022 Center for Advanced Studies, Research and Development in Sardinia (CRS4), IT
# Copyright 2022 École Polytechnique Fédérale de Lausanne, CH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
import shutil
import pytest
from rocrate.utils import get_norm_value
THIS_DIR = pathlib.Path(__file__).absolute().parent
TEST_DATA_NAME = 'test-data'
BASE_URL = 'https://w3id.org/ro/crate'
VERSION = '1.1'
LEGACY_VERSION = '1.0'
class Helpers:
PROFILE = f"{BASE_URL}/{VERSION}"
LEGACY_PROFILE = f"{BASE_URL}/{LEGACY_VERSION}"
WORKFLOW_PROFILE = "https://w3id.org/workflowhub/workflow-ro-crate/1.0"
METADATA_FILE_NAME = 'ro-crate-metadata.json'
LEGACY_METADATA_FILE_NAME = 'ro-crate-metadata.jsonld'
WORKFLOW_TYPES = {"File", "SoftwareSourceCode", "ComputationalWorkflow"}
WORKFLOW_DESC_TYPES = {"File", "SoftwareSourceCode", "HowTo"}
LEGACY_WORKFLOW_TYPES = {"File", "SoftwareSourceCode", "Workflow"}
PREVIEW_FILE_NAME = "ro-crate-preview.html"
@classmethod
def read_json_entities(cls, crate_base_path):
metadata_path = pathlib.Path(crate_base_path) / cls.METADATA_FILE_NAME
with open(metadata_path, "rt") as f:
json_data = json.load(f)
return {_["@id"]: _ for _ in json_data["@graph"]}
@classmethod
def check_crate(cls, json_entities, root_id="./", data_entity_ids=None):
assert root_id in json_entities
root = json_entities[root_id]
assert root["@type"] == "Dataset"
assert cls.METADATA_FILE_NAME in json_entities
metadata = json_entities[cls.METADATA_FILE_NAME]
assert metadata["@type"] == "CreativeWork"
assert cls.PROFILE in get_norm_value(metadata, "conformsTo")
assert metadata["about"] == {"@id": root_id}
if data_entity_ids:
data_entity_ids = set(data_entity_ids)
assert data_entity_ids.issubset(json_entities)
assert "hasPart" in root
assert data_entity_ids.issubset([_["@id"] for _ in root["hasPart"]])
@classmethod
def check_wf_crate(cls, json_entities, wf_file_name, root_id="./"):
cls.check_crate(json_entities, root_id=root_id)
assert json_entities[root_id]["mainEntity"]["@id"] == wf_file_name
assert wf_file_name in json_entities
wf_entity = json_entities[wf_file_name]
assert isinstance(wf_entity["@type"], list)
assert cls.WORKFLOW_TYPES.issubset(wf_entity["@type"])
assert "programmingLanguage" in wf_entity
metadata = json_entities[cls.METADATA_FILE_NAME]
assert cls.WORKFLOW_PROFILE in get_norm_value(metadata, "conformsTo")
@pytest.fixture
def helpers():
return Helpers
# pytest's default tmpdir returns a py.path object
@pytest.fixture
def tmpdir(tmpdir):
return pathlib.Path(tmpdir)
@pytest.fixture
def test_data_dir(tmpdir):
d = tmpdir / TEST_DATA_NAME
shutil.copytree(THIS_DIR / TEST_DATA_NAME, d)
return d
| 37.5
| 98
| 0.71619
| 2,209
| 0.600598
| 0
| 0
| 1,892
| 0.51441
| 0
| 0
| 1,444
| 0.392605
|
e134f405b60309ac638075a35a6b8ff83d2c5ab6
| 3,791
|
py
|
Python
|
tests/unit/test_marathon.py
|
seomoz/roger-mesos-tools
|
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_marathon.py
|
seomoz/roger-mesos-tools
|
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
|
[
"Apache-2.0"
] | 47
|
2016-05-26T22:09:56.000Z
|
2018-08-08T20:33:39.000Z
|
tests/unit/test_marathon.py
|
seomoz/roger-mesos-tools
|
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
|
[
"Apache-2.0"
] | 3
|
2017-09-20T22:39:03.000Z
|
2017-11-07T22:29:29.000Z
|
#!/usr/bin/python
from __future__ import print_function
import unittest
import json
import os
import sys
import requests
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, "cli")))
from cli.marathon import Marathon
from cli.appconfig import AppConfig
from mockito import mock, when
# Test basic functionalities of MarathonValidator class
class TestMarathon(unittest.TestCase):
def setUp(self):
self.marathon = Marathon()
self.appconfig = AppConfig()
def test_validateGroupDetails(self):
group_details = {}
message_list = []
group_details['/test/app1'] = ('/http_prefix1', ['3000', '3001'], False)
group_details['/test/app2'] = ('/http_prefix2', ['9000', '9001'], False)
valid = self.marathon.validateGroupDetails(group_details, message_list)
assert valid is True
assert len(message_list) == 0
message_list = []
group_details['/test/app3'] = ('/http_prefix3', ['8000', '9001'], False)
valid = self.marathon.validateGroupDetails(group_details, message_list)
assert valid is False
assert len(message_list) == 1
message_list = []
group_details = {}
group_details['/test/app3'] = ('/http_prefix3', ['8000', '9001'], False)
group_details['/test/app4'] = ('/http_prefix3', ['7000', '7001'], False)
valid = self.marathon.validateGroupDetails(group_details, message_list)
assert valid is False
assert len(message_list) == 1
message_list = []
group_details = {}
# One of the conflicting apps have affinity = False, leads to a failed
# validation for HTTP_PREFIX
group_details['/test/app3'] = ('/http_prefix3', ['9000', '9001'], False)
group_details['/test/app5'] = ('/http_prefix3', ['8000', '8001'], True)
valid = self.marathon.validateGroupDetails(group_details, message_list)
assert valid is False
assert len(message_list) == 1
message_list = []
group_details = {}
group_details['/test/app3'] = ('/http_prefix3', ['9000', '9001'], True)
group_details['/test/app5'] = ('/http_prefix3', ['8000', '8001'], True)
valid = self.marathon.validateGroupDetails(group_details, message_list)
assert valid is True
assert len(message_list) == 1
for message in message_list:
print(message)
@property
def config_dir(self):
os.environ['ROGER_CONFIG_DIR'] = '/vagrant/config'
def test_get_image(self):
res = mock(requests.Response)
url = 'https:/marathon-example.com/v2/apps/welcome'
data = {'environments': {'dev': {
'marathon_endpoint': 'https:/marathon-example.com'}}}
image_data = {
'app': {
'container': {
'docker': {
'image': "registry.example.com:6000/0dajdhkfa8ecb64/v0.72.0"}}}}
os.environ['ROGER_CONFIG_DIR'] = '/vagrant/config'
username = 'first.first'
password = 'last.last'
config_dir = '/vagrant/config'
config_file = 'test.yml'
app_config_object = mock(AppConfig)
when(app_config_object).getRogerEnv(config_dir).thenReturn(data)
when(requests).get(url, auth=(username, password)).thenReturn(res)
when(res).json().thenReturn(image_data)
m = Marathon()
img = m.get_image_name(
username,
password,
'dev',
'welcome',
config_dir,
config_file,
app_config_object
)
assert img == image_data['app']['container']['docker']['image']
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 36.104762
| 80
| 0.613031
| 3,342
| 0.881562
| 0
| 0
| 94
| 0.024796
| 0
| 0
| 896
| 0.236349
|
e136e8225ad172a851846dc46f34389a3f760935
| 65
|
py
|
Python
|
1/0/10821/10821.py
|
chr0m3/boj-codes
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
[
"MIT"
] | 3
|
2017-07-08T16:29:06.000Z
|
2020-07-20T00:17:45.000Z
|
1/0/10821/10821.py
|
chr0m3/boj-codes
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
[
"MIT"
] | null | null | null |
1/0/10821/10821.py
|
chr0m3/boj-codes
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
[
"MIT"
] | 2
|
2017-11-20T14:06:06.000Z
|
2020-07-20T00:17:47.000Z
|
numbers = list(map(int, input().split(',')))
print(len(numbers))
| 21.666667
| 44
| 0.646154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.046154
|
e137881799720563759aa64b3e6bb8a63eb7afae
| 496
|
py
|
Python
|
Chapter13/server.py
|
Joustie/Mastering-GitLab-12
|
5ac4700791e4274ef3de825bc789c46142af403e
|
[
"MIT"
] | 40
|
2019-07-06T04:40:27.000Z
|
2022-03-31T09:25:07.000Z
|
Chapter13/server.py
|
Joustie/Mastering-GitLab-12
|
5ac4700791e4274ef3de825bc789c46142af403e
|
[
"MIT"
] | 1
|
2019-08-03T17:52:08.000Z
|
2020-12-16T06:31:53.000Z
|
Chapter13/server.py
|
Joustie/Mastering-GitLab-12
|
5ac4700791e4274ef3de825bc789c46142af403e
|
[
"MIT"
] | 50
|
2019-07-26T08:49:49.000Z
|
2022-03-17T21:01:03.000Z
|
from flask import Flask, request
import json
app = Flask(__name__)
def runsomething():
print "This is triggered"
@app.route('/',methods=['POST'])
def trigger():
data = json.loads(request.data)
print "New commit by: {}".format(data['commits'][0]['author']['name'])
print "New commit by: {}".format(data['commits'][0]['author']['email'])
print "New commit by: {}".format(data['commits'][0]['message'])
runsomething()
return "OK"
if __name__ == '__main__':
app.run()
| 24.8
| 74
| 0.635081
| 0
| 0
| 0
| 0
| 335
| 0.675403
| 0
| 0
| 164
| 0.330645
|
e1380bef90ab2ac303d6b8ab31b603e3157ac287
| 4,349
|
py
|
Python
|
tests/test_nlp4e.py
|
EDTAKE/IA
|
2731e8ccb9d1b72f564c8c7a1c46a855760edfac
|
[
"MIT"
] | null | null | null |
tests/test_nlp4e.py
|
EDTAKE/IA
|
2731e8ccb9d1b72f564c8c7a1c46a855760edfac
|
[
"MIT"
] | null | null | null |
tests/test_nlp4e.py
|
EDTAKE/IA
|
2731e8ccb9d1b72f564c8c7a1c46a855760edfac
|
[
"MIT"
] | 1
|
2019-10-26T22:33:40.000Z
|
2019-10-26T22:33:40.000Z
|
import pytest
import nlp
from nlp4e import Rules, Lexicon, Grammar, ProbRules, ProbLexicon, ProbGrammar, E0
from nlp4e import Chart, CYK_parse, subspan, astar_search_parsing, beam_search_parsing
# Clumsy imports because we want to access certain nlp.py globals explicitly, because
# they are accessed by functions within nlp.py
def test_rules():
check = {'A': [['B', 'C'], ['D', 'E']], 'B': [['E'], ['a'], ['b', 'c']]}
assert Rules(A="B C | D E", B="E | a | b c") == check
def test_lexicon():
check = {'Article': ['the', 'a', 'an'], 'Pronoun': ['i', 'you', 'he']}
lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he")
assert lexicon == check
def test_grammar():
rules = Rules(A="B C | D E", B="E | a | b c")
lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he")
grammar = Grammar("Simplegram", rules, lexicon)
assert grammar.rewrites_for('A') == [['B', 'C'], ['D', 'E']]
assert grammar.isa('the', 'Article')
grammar = nlp.E_Chomsky
for rule in grammar.cnf_rules():
assert len(rule) == 3
def test_generation():
lexicon = Lexicon(Article="the | a | an",
Pronoun="i | you | he")
rules = Rules(
S="Article | More | Pronoun",
More="Article Pronoun | Pronoun Pronoun"
)
grammar = Grammar("Simplegram", rules, lexicon)
sentence = grammar.generate_random('S')
for token in sentence.split():
found = False
for non_terminal, terminals in grammar.lexicon.items():
if token in terminals:
found = True
assert found
def test_prob_rules():
check = {'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)],
'B': [(['E'], 0.1), (['a'], 0.2), (['b', 'c'], 0.7)]}
rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]")
assert rules == check
def test_prob_lexicon():
check = {'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)],
'Pronoun': [('i', 0.4), ('you', 0.3), ('he', 0.3)]}
lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]",
Pronoun="i [0.4] | you [0.3] | he [0.3]")
assert lexicon == check
def test_prob_grammar():
rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]")
lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]",
Pronoun="i [0.4] | you [0.3] | he [0.3]")
grammar = ProbGrammar("Simplegram", rules, lexicon)
assert grammar.rewrites_for('A') == [(['B', 'C'], 0.3), (['D', 'E'], 0.7)]
assert grammar.isa('the', 'Article')
grammar = nlp.E_Prob_Chomsky
for rule in grammar.cnf_rules():
assert len(rule) == 4
def test_prob_generation():
lexicon = ProbLexicon(Verb="am [0.5] | are [0.25] | is [0.25]",
Pronoun="i [0.4] | you [0.3] | he [0.3]")
rules = ProbRules(
S="Verb [0.5] | More [0.3] | Pronoun [0.1] | nobody is here [0.1]",
More="Pronoun Verb [0.7] | Pronoun Pronoun [0.3]"
)
grammar = ProbGrammar("Simplegram", rules, lexicon)
sentence = grammar.generate_random('S')
assert len(sentence) == 2
def test_chart_parsing():
chart = Chart(nlp.E0)
parses = chart.parses('the stench is in 2 2')
assert len(parses) == 1
def test_CYK_parse():
grammar = nlp.E_Prob_Chomsky
words = ['the', 'robot', 'is', 'good']
P = CYK_parse(words, grammar)
assert len(P) == 5
grammar = nlp.E_Prob_Chomsky_
words = ['astronomers', 'saw', 'stars']
P = CYK_parse(words, grammar)
assert len(P) == 3
def test_subspan():
spans = subspan(3)
assert spans.__next__() == (1,1,2)
assert spans.__next__() == (2,2,3)
assert spans.__next__() == (1,1,3)
assert spans.__next__() == (1,2,3)
def test_text_parsing():
words = ["the", "wumpus", "is", "dead"]
grammer = E0
assert astar_search_parsing(words, grammer) == 'S'
assert beam_search_parsing(words, grammer) == 'S'
words = ["the", "is", "wupus", "dead"]
assert astar_search_parsing(words, grammer) == False
assert beam_search_parsing(words, grammer) == False
if __name__ == '__main__':
pytest.main()
| 31.977941
| 87
| 0.539894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,127
| 0.25914
|
e13a783a47008677ccb95568f58fe7dd6ad2e4f3
| 1,598
|
py
|
Python
|
integration_test/ESI/cosim/loopback.py
|
Patosga/circt
|
ebf06c9aa5a4e8ae2485b52fd3c564eec7df5754
|
[
"Apache-2.0"
] | null | null | null |
integration_test/ESI/cosim/loopback.py
|
Patosga/circt
|
ebf06c9aa5a4e8ae2485b52fd3c564eec7df5754
|
[
"Apache-2.0"
] | null | null | null |
integration_test/ESI/cosim/loopback.py
|
Patosga/circt
|
ebf06c9aa5a4e8ae2485b52fd3c564eec7df5754
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import binascii
import random
import cosim
class LoopbackTester(cosim.CosimBase):
"""Provides methods to test the loopback simulations."""
def test_list(self):
ifaces = self.cosim.list().wait().ifaces
assert len(ifaces) > 0
def test_open_close(self):
ifaces = self.cosim.list().wait().ifaces
openResp = self.cosim.open(ifaces[0]).wait()
assert openResp.iface is not None
ep = openResp.iface
ep.close().wait()
def test_i32(self, num_msgs):
ep = self.openEP(sendType=self.schema.I32, recvType=self.schema.I32)
for _ in range(num_msgs):
data = random.randint(0, 2**32)
print(f"Sending {data}")
ep.send(self.schema.I32.new_message(i=data))
result = self.readMsg(ep, self.schema.I32)
print(f"Got {result}")
assert (result.i == data)
def write_3bytes(self, ep):
r = random.randrange(0, 2**24)
data = r.to_bytes(3, 'big')
print(f'Sending: {binascii.hexlify(data)}')
ep.send(self.schema.UntypedData.new_message(data=data)).wait()
return data
def read_3bytes(self, ep):
dataMsg = self.readMsg(ep, self.schema.UntypedData)
data = dataMsg.data
print(binascii.hexlify(data))
return data
def test_3bytes(self, num_msgs=50):
ep = self.openEP()
print("Testing writes")
dataSent = list()
for _ in range(num_msgs):
dataSent.append(self.write_3bytes(ep))
print()
print("Testing reads")
dataRecv = list()
for _ in range(num_msgs):
dataRecv.append(self.read_3bytes(ep))
ep.close().wait()
assert dataSent == dataRecv
| 27.551724
| 72
| 0.6602
| 1,532
| 0.958698
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.111389
|
e13cf9268aca0f5ca5922030192f194f32c26039
| 48,282
|
py
|
Python
|
pcdsdevices/targets.py
|
christina-pino/pcdsdevices
|
c696093b33b252a5fe6ca020063216b0d062aa61
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-06-17T20:08:54.000Z
|
2022-01-11T17:55:21.000Z
|
pcdsdevices/targets.py
|
christina-pino/pcdsdevices
|
c696093b33b252a5fe6ca020063216b0d062aa61
|
[
"BSD-3-Clause-LBNL"
] | 757
|
2017-12-21T23:16:41.000Z
|
2022-03-31T22:56:06.000Z
|
pcdsdevices/targets.py
|
christina-pino/pcdsdevices
|
c696093b33b252a5fe6ca020063216b0d062aa61
|
[
"BSD-3-Clause-LBNL"
] | 38
|
2018-01-26T00:01:35.000Z
|
2022-02-17T00:48:55.000Z
|
"""
Module for common target stage stack configurations.
"""
import logging
import numpy as np
from datetime import datetime
import os
from ophyd.device import Device
import json
import jsonschema
import yaml
from itertools import chain
from pcdsdevices.epics_motor import _GetMotorClass
from .interface import tweak_base
logger = logging.getLogger(__name__)
def StageStack(mdict, name):
"""
Conveniencefunction for generating a stage stack device. Intended for
bundling various motors into a single object. The function takes a
dictionary of PVs and/or previously instantiated motor objects and bundles
them together. If given a PV, The factory function attempts to determine
the appropriate motor class from the given base PV; if this fails then it
will attempt to create an EpicsMotor. Axes are given the same name as they
are assigned in the provided dictionary. See examples below.
Parameters
----------
mdict : dictionary
Dictionary of motor objects and or base PVs.
name : str
Name for the stack. Used to make a class name. No whitespace.
Examples
--------
# Make a classic XYZ stack with two PVs and one motor object
d = {'x': 'TST:MMS:01', 'y': 'TST:MMS:02', 'z': z_motor}
xyz = StageStack(d, 'my_xyz')
"""
cpts = {}
for mname, mitem in mdict.items():
# Check if this is a PV or motor object
if issubclass(type(mitem), Device): # Motor object
cpts[mname] = mitem
elif isinstance(mitem, (str)): # PV
mcls = _GetMotorClass(mitem)
cpt = mcls(prefix=mitem, name=mname)
cpts[mname] = cpt
else: # Something is wrong
logger.warning("Unrecognized input {}. "
"Skipping axis {}.".format(mitem, mname))
cls_name = name + '_StageStack'
cls = type(cls_name, (object,), cpts)
dev = cls()
return dev
# Internal class
class GridAxis():
"""
Class for axes that move in regularly spaced intervals.
"""
def __init__(self, stage, spacing):
assert isinstance(spacing, float), "Specify a float target spacing"
self.stage = stage
self.spacing = spacing
def advance(self, nspaces, direction, wait=False):
"""
Move the grid axis in the specified direction by (n_spacings * spacing)
"""
assert direction in [1, -1], "Direction must be 1 or -1"
assert nspaces >= 1, "n_targets must be >= 1"
curr_pos = self.stage.wm()
next_pos = curr_pos + direction*(nspaces*self.spacing)
self.stage.mv(next_pos, wait=wait)
class XYTargetGrid():
"""
Class methods for managing a target grid oriented normal to the beam, with
regular X-Y spacing between targets.
Parameters
----------
x : Ophyd motor object
X stage (LUSI coords.) to be using in the stack
y : Ophyd motor object
Y stage (LUSI coords.) to be using in the stack
x_init : float
The initial position for the x motor. Defines the x-position of target
'zero'.
x_spacing : float
The nominal spacing between targets in the x-direction.
x_comp : float (optional)
A parameter to account for skew in target position due to non-ideal
mounting. This skew is assumed to be identical between targets.
y_init : float
The initial position for the y motor. Defines the y-position of target
'zero'.
y_spacing : float
The nominal spacing between targets in the y-direction.
y_comp : float (optional)
A parameter to account for skew in target position due to non-ideal
mounting. This skew is assumed to be identical between targets.
Examples
--------
# Make a target stack with targets spaced in a 1.0mm square grid, starting
# at motor positions (0.0, 0.0), with no skew.
xygrid = XYTargetGrid(x=x_motor, y=y_motor, x_init=0.0, y_init=0.0,
x_spacing=1.0, y_spacing=1.0)
# Make a target stack as above, but with some skew in x and y.
xygrid = XYTargetGrid(x=x_motor, y=y_motor, x_init=0.0, y_init=0.0,
x_spacing=1.0, y_spacing=1.0, x_comp=0.05,
y_comp=0.01)
"""
def __init__(self, x=None, y=None, x_init=None, x_spacing=None,
x_comp=0.0, y_init=None, y_spacing=None, y_comp=0.0,
name=None):
self.x_init = x_init
self.x_spacing = x_spacing
self.x_comp = x_comp
self.y_init = y_init
self.y_spacing = y_spacing
self.y_comp = y_comp
d = {'x': x, 'y': y}
self._stack = StageStack(d, name)
self.x = self._stack.x
self._xgrid = GridAxis(self.x, self.x_spacing)
self.y = self._stack.y
self._ygrid = GridAxis(self.y, self.y_spacing)
# Treat compensation axes like additional grid axes
if self.x_comp:
self._x_comp_axis = GridAxis(self.x, self.x_comp)
if self.y_comp:
self._y_comp_axis = GridAxis(self.y, self.y_comp)
def wm(self):
"""
Return current position of X and Y axes as a dictionary, i.e.
{x: <x_position>, y: <y_position>}.
"""
return {'x': self.x.wm(), 'y': self.y.wm()}
def reset(self, wait=False):
"""
Return to the defined initial position (x_init, y_init). Should be
called during experiment setup prior to using other class methods to
initialize the position.
Parameters:
-----------
wait : bool (default = False)
Flag to wait for movement to complete before returning.
"""
self.x.mv(self.x_init, wait=wait)
self.y.mv(self.y_init, wait=wait)
def next(self, nspaces=1, wait=False):
"""
Move forward (in X) by specified integer number of targets.
Parameters:
-----------
wait : bool (default = False)
Flag to wait for movement to complete before returning.
nspaces : int (default = 1)
Number of spaces to move "forward" on x-axis.
"""
self._xgrid.advance(nspaces, 1, wait=wait)
if self.y_comp:
self._y_comp_axis.advance(nspaces, 1, wait=wait)
def back(self, nspaces=1, wait=False):
"""
Move backward (in X) by specified integer number of targets.
Parameters:
-----------
wait : bool (default = False)
Flag to wait for movement to complete before returning.
nspaces : int (default = 1)
Number of spaces to move "backward" on x-axis.
"""
self._xgrid.advance(nspaces, -1, wait=wait)
if self.y_comp:
self._y_comp_axis.advance(nspaces, -1, wait=wait)
def up(self, nspaces=1, wait=False):
"""
Move to higher target position by specified integer number of targets
(stage moves down).
Parameters:
-----------
wait : bool (default = False)
Flag to wait for movement to complete before returning.
nspaces : int (default = 1)
Number of spaces to move "up" on y-axis.
"""
self._ygrid.advance(nspaces, 1, wait=wait)
if self.x_comp:
self._x_comp_axis.advance(nspaces, 1, wait=wait)
def down(self, nspaces=1, wait=False):
"""
Move to lower target position by specified integer number of targets
(stage moves up).
Parameters:
-----------
wait : bool (default = False)
Flag to wait for movement to complete before returning.
nspaces : int (default = 1)
Number of spaces to move "down" on y-axis.
"""
self._ygrid.advance(nspaces, -1, wait=wait)
if self.x_comp:
self._x_comp_axis.advance(nspaces, -1, wait=wait)
def move(self, nxspaces, nyspaces, wait=False):
"""
Move to a specific target position given by (xspaces, yspaces)
from the defined initial position. Includes compensation if defined.
Parameters:
-----------
wait : bool (default = False)
Flag to wait for movement to complete before returning.
nxspaces : int (default = 1)
Number of spaces to move on x-axis.
nyspaces : int (default = 1)
Number of spaces to move on y-axis.
"""
xpos = self.x_init + self.x_spacing*nxspaces + self.x_comp*nyspaces
ypos = self.y_init + self.y_spacing*nyspaces + self.y_comp*nxspaces
self.x.mv(xpos, wait=wait)
self.y.mv(ypos, wait=wait)
class XYGridStage():
"""
Class that helps support multiple samples on a mount for an XY Grid setup.
We could have multiple samples mounted in a setup. This class helps
figuring out the samples xy coordinates for each target on the grid,
maps points accordingly, and saves those records in a file.
Parameters
----------
name : str, otpinal
Name of the XYGridStage object.
x_motor : str or motor object
Epics PV prefix for x motor, or a motor object.
y_motor : str or motor object
Epics PV prefix for y motor, or a motor object.
m_points : int
Number of rows the grid has, used to determine the coordinate
points, where for e.g.: `(0, m_points)` would represent the top right
corner of the desired sample grid.
n_points : int
Number of columns the grid has, used to determine the coordinate
points, where for e.g.: `(n_points, 0)` would represent the bottom
left corner of the desired sample grid.
path : str
Path to an `yaml` file where to save the grid patterns for
different samples.
"""
sample_schema = json.loads("""
{
"type": "object",
"properties": {
"time_created": {"type": "string"},
"top_left": {"type": "array", "items": {"type": "number"}},
"top_rigt": {"type": "array", "items": {"type": "number"}},
"bottom_right": {"type": "array", "items": {"type": "number"}},
"bottom_left": {"type": "array", "items": {"type": "number"}},
"M": {"type": "number"},
"N": {"type": "number"},
"coefficients": {"type": "array", "items": {"type": "number"}},
"xx" : {"type": "array", "items": {"type": "object"}},
"yy" : {"type": "array", "items": {"type": "object"}}
},
"required": ["time_created", "top_left", "top_right", "bottom_right",
"bottom_left", "coefficients", "xx", "yy"],
"additionalProperties": true
}
""")
def __init__(self, x_motor, y_motor, m_points, n_points, path):
self._path = path
self._m_points = m_points
self._n_points = n_points
d = {'x': x_motor, 'y': y_motor}
self._stack = StageStack(d, 'xy_stage_grid')
self.x = self._stack.x
self.y = self._stack.y
# TODO: assert here for a valid path, also valid yaml file
# assert os.path.exists(path)
self._coefficients = []
self._current_sample = ''
self._positions_x = []
self._positions_y = []
@property
def m_n_points(self):
"""
Get the current m and n points.
The m and n points represent the number of grid points on the current
grid, `m` -> representing the number of rows, and `n` -> representing
the number of columns.
Returns
-------
m_points, n_points : tuple
The number of grid points on the x and y axis.
E.g.: `10, 5` -> 10 rows by 5 columns grid.
"""
return self._m_points, self._n_points
@m_n_points.setter
def m_n_points(self, m_n_values):
"""
Set m and n points.
Set the m value representing the number of grid points in x direction,
and n value representing the number of grid points in the y direction.
Parameters
----------
m_n_values : tuple
The number of grid points on the x and y axis respectively.
Examples
--------
>>> xy_grid.m_n_points = 10, 10
"""
try:
self._m_points, self._n_points = m_n_values
except Exception:
err_msg = ("Please pass an iterable with two items for m points"
" and n points respectively.")
raise Exception(err_msg)
@property
def coefficients(self):
"""
Get the current coefficients if any.
These coefficients are calculated from the projective transformation.
Knowing the coefficients, the x an y values can be determined.
Returns
-------
coefficients : list
Array of 8 projective transformation coefficients.
First 4 -> alpha, last 4 -> beta
"""
return self._coefficients
@coefficients.setter
def coefficients(self, coefficients):
"""
Set the current coefficients.
These coefficients are calculated from the projective transformation.
Knowing the coefficients, the x an y values can be determined.
Parameters
----------
coefficients : array
Array of 8 projective transformation coefficients.
First 4 -> alpha, last 4 -> beta
"""
self._coefficients = coefficients
@property
def positions_x(self):
"""
Get the current mapped x positions if any.
These positions are set when `map_points` method is called.
Returns
-------
positions_x : list
List of all target x positions mapped for a sample.
"""
return self._positions_x
@property
def positions_y(self):
"""
Get the current mapped y positions if any.
These positions are set when `map_points` method is called.
Returns
-------
positions_y : list
List of all target y positions mapped for a sample.
"""
return self._positions_y
@positions_x.setter
def positions_x(self, x_positions):
"""
Set the x positions.
These positions are be saved in the sample file when `save_grid`
method is called.
Parameters
----------
x_positions : list
List of all the x positions.
"""
self._positions_x = x_positions
@positions_y.setter
def positions_y(self, y_positions):
"""
Set the y positions.
These positions are be saved in the sample file when `save_grid`
method is called.
Parameters
----------
y_positions : list
List of all the y positions.
"""
self._positions_y = y_positions
def tweak(self):
"""
Call the tweak function from `pcdsdevice.interface`.
Use the Left arrow to move x motor left.
Use the Right arrow to move x motor right.
Use the Down arrow to move y motor down.
Use the Up arrow to move y motor up.
Use Shift & Up arrow to scale*2.
Use Shift & Down arrow to scale/2.
Press q to quit.
"""
tweak_base(self.x, self.y)
def set_presets(self):
"""
Save four preset coordinate points.
These are the coordinates from the four corners of the
wanted/defined grid. The points for these coordinates shuld be taken
from the middle of the four targets that are encasing the grid.
The user will be asked to define the coordinates using the `tweak`
method.
Examples
--------
# Press q when ready to save the coordinates
>>> xy.set_presets()
Setting coordinates for (0, 0) top left corner:
0.0000, : 0.0000, scale: 0.1
Setting coordinates for (0, M) top right corner:
10.0000, : 0.0000, scale: 0.1
Setting coordinates for (N, M) bottom right corner:
10.0000, : -10.0000, scale: 0.1
Setting coordinates for (N, 0) bottom left corner:
-0.0000, : -10.0000, scale: 0.1
"""
# check to see the the presets are setup
if not hasattr(self.x.presets, 'add_hutch'):
raise AttributeError('No folder setup for motor presets. '
'Please add a location to save the positions '
'to, using setup_preset_paths from '
'pcdsdevices.interface to save the position.')
print('\nSetting coordinates for (0, 0) top left corner: \n')
self.tweak()
pos = [self.x.position, self.y.position]
print('\nSetting coordinates for (0, M) top right corner: \n')
self.tweak()
pos.extend([self.x.position, self.y.position])
print('\nSetting coordinates for (N, M) bottom right corner: \n')
self.tweak()
pos.extend([self.x.position, self.y.position])
print('\nSetting coordinates for (N, 0) bottom left corner: \n')
self.tweak()
pos.extend([self.x.position, self.y.position])
# create presets
# corner (0, 0)
self.x.presets.add_hutch(value=pos[0], name="x_top_left")
self.y.presets.add_hutch(value=pos[1], name="y_top_left")
# corner (0, M)
self.x.presets.add_hutch(value=pos[2], name="x_top_right")
self.y.presets.add_hutch(value=pos[3], name="y_top_right")
# corner (M, N)
self.x.presets.add_hutch(value=pos[4], name="x_bottom_right")
self.y.presets.add_hutch(value=pos[5], name="y_bottom_right")
# corner (N, 0)
self.x.presets.add_hutch(value=pos[6], name="x_bottom_left")
self.y.presets.add_hutch(value=pos[7], name="y_bottom_left")
def get_presets(self):
"""
Get the saved presets if any.
Examples
--------
>>> xy.get_presets()
((0, 0),
(9.99999999999998, 0),
(9.99999999999998, -9.99999999999998),
(-6.38378239159465e-16, -9.99999999999998))
Returns
-------
coord : tuple
Four coordinate positions.
(top_left, top_right, bottom_right, bottom_left)
"""
try:
top_left = (self.x.presets.positions.x_top_left.pos,
self.y.presets.positions.y_top_left.pos)
# corner (0, M)
top_right = (self.x.presets.positions.x_top_right.pos,
self.y.presets.positions.y_top_right.pos)
# corner (M, N)
bottom_right = (self.x.presets.positions.x_bottom_right.pos,
self.y.presets.positions.y_bottom_right.pos)
# corner (N, 0)
bottom_left = (self.x.presets.positions.x_bottom_left.pos,
self.y.presets.positions.y_bottom_left.pos)
return top_left, top_right, bottom_right, bottom_left
except Exception:
logger.warning('Could not get presets, try to set_presets.')
def get_samples(self, path=None):
"""
Get all the available sample grids names that are currently saved.
Returns
-------
samples : list
List of strings of all the sample names available.
"""
samples = []
path = path or self._path
with os.scandir(path) as entries:
for entry in entries:
if entry.is_file():
samples.append(entry.name.split('.yml')[0])
return samples
@property
def current_sample(self):
"""
Get the current sample that is loaded.
Returns
-------
sample : dict
Dictionary with current sample information.
"""
return self._current_sample
@current_sample.setter
def current_sample(self, sample_name):
"""
Set the current sample.
Parameters
----------
sample_name : str
The name of the sample to be set as current one.
"""
self._current_sample = str(sample_name)
@property
def status(self):
x_index = ''
y_index = ''
x_pos = self.x.position
y_pos = self.y.position
data = self.get_sample_data(self.current_sample)
try:
xx = data['xx']
yy = data['yy']
x_index = next((index for (index, d) in enumerate(xx)
if np.isclose(d["pos"], x_pos)))
y_index = next((index for (index, d) in enumerate(yy)
if np.isclose(d["pos"], y_pos)))
except Exception:
logger.warning('Could not determine the m n points from position.')
n_points = self.m_n_points[1]
x_index += 1
m = int(np.ceil(x_index / n_points))
res = np.mod(x_index, 2 * n_points)
if res == 0:
n = 1
elif res <= n_points:
n = res
else:
n = 2 * n_points - (res + 1)
if x_index != '':
# to start from 1 instead of 0
x_index = n
if y_index != '':
y_index = m
lines = []
sample = f'current_sample: {self.current_sample}'
grid = f'grid M x N: {self.m_n_points}'
m_n = f'current m, n : {y_index, x_index}'
lines.extend([sample, grid, m_n])
print('\n'.join(lines))
@property
def current_sample_path(self):
"""
Get the current path for the sample that is loaded.
Returns
-------
sample: dict
Dictionary with current sample information.
"""
if self._current_sample != '':
return os.path.join(self._path, self._current_sample + '.yml')
raise ValueError('No current sample loaded, please use load() first.')
def load(self, sample_name, path=None):
"""
Get the sample information and populate these parameters.
This function displays the parameters for the sample just loaded, but
also populates them, in the sense that it sets the current
`coefficients` and current `m, n` values.
Parameters
----------
sample_name : str
Name of the sample to load.
path : str, optional
Path where the samples yaml file exists.
"""
path = path or self._path
entry = os.path.join(path, sample_name + '.yml')
m_points, n_points, coeffs = self.get_sample_map_info(
str(sample_name), path=entry)
self.m_n_points = m_points, n_points
self.coefficients = coeffs
# make this sample the current one
self.current_sample = str(sample_name)
def get_sample_data(self, sample_name, path=None):
"""
Get the information for a saved sample.
Parameters
----------
sample_name : str
The sample name that we want the grid for. To see current
available samples call `mapped_grids`
path : str, optional
Path to the `.yml` file. Defaults to the path defined when
creating this object.
Returns
-------
data : dictionary
Dictionary of all the information for a saved sample, or empty
dictionary if troubles getting the sample.
Examples
--------
>>> get_sample('sample1')
{'time_created': '2021-01-06 11:43:40.701095',
'top_left': [0, 0],
'top_right': [4.0, -1.0],
'bottom_right': [4.4, -3.5],
'bottom_left': [1.0, -3.0],
'M': 10,
'N': 10,
'coefficients': [1.1686746987951824,
-0.3855421686746996,
-9.730859023513261e-15,
-0.29216867469879476,
1.1566265060240974,
6.281563288265657e-16,
0.042168674698794054,
-0.05220883534136586],
xx:
...
yy:
...}
"""
path = path or os.path.join(self._path, sample_name + '.yml')
data = None
with open(path) as sample_file:
try:
data = yaml.safe_load(sample_file)
except yaml.YAMLError as err:
logger.error('Error when loading the samples yaml file: %s',
err)
raise err
if data is None:
logger.warning('The file is empty, no sample grid yet. '
'Please use `save_presets` to insert grids '
'in the file.')
return {}
try:
return data[str(sample_name)]
except Exception:
logger.error('The sample %s might not exist in the file.',
sample_name)
return {}
def get_sample_map_info(self, sample_name, path=None):
"""
Given a sample name, get the m and n points, as well as the coeffs.
Parameters
----------
sample_name : str
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
path : str, optional
Path to the samples yaml file.
"""
path = path or os.path.join(self._path, sample_name + '.yml')
sample = self.get_sample_data(str(sample_name), path=path)
coeffs = []
m_points, n_points = 0, 0
if sample:
try:
coeffs = sample["coefficients"]
m_points = sample['M']
n_points = sample['N']
except Exception as ex:
logger.error('Something went wrong when getting the '
'information for sample %s. %s', sample_name, ex)
raise ex
else:
err_msg = ('This sample probably does not exist. Please call'
' mapped_samples() to see which ones are available.')
logger.error(err_msg)
raise Exception(err_msg)
return m_points, n_points, coeffs
def save_grid(self, sample_name, path=None):
"""
Save a grid file of mapped points for a sample.
This will save the date it was created, along with the sample name,
the m and n points, the coordinates for the four corners, and the
coefficients that will help get the x and y position on the grid.
If an existing name for a sample is saved again, it will override
the information for that samplefile keeping the status of the targets.
When overriding a sample, this is assuming that a re-calibration was
needed for that sample, so in case we have already shot targets from
that sample - we want to keep track of that.
Parameters
----------
sample_name : str
A name to identify the sample grid, should be snake_case style.
path : str, optional
Path to the sample folder where this sample will be saved.
Defaults to the path defined when creating this object.
Examples
--------
>>> save_grid('sample_1')
"""
path = path or self._path
entry = os.path.join(path, sample_name + '.yml')
now = str(datetime.now())
top_left, top_right, bottom_right, bottom_left = [], [], [], []
if self.get_presets():
top_left, top_right, bottom_right, bottom_left = self.get_presets()
xx, yy = self.positions_x, self.positions_y
flat_xx, flat_yy = [], []
if xx and yy:
flat_xx = [float(x) for x in xx]
flat_yy = [float(y) for y in yy]
# add False to each target to indicate they
# have not been shot yet
flat_xx = [{"pos": x, "status": False} for x in flat_xx]
flat_yy = [{"pos": y, "status": False} for y in flat_yy]
m_points, n_points = self.m_n_points
coefficients = self.coefficients
data = {sample_name: {"time_created": now,
"top_left": list(top_left),
"top_right": list(top_right),
"bottom_right": list(bottom_right),
"bottom_left": list(bottom_left),
"M": m_points, # number of rows
"N": n_points, # number of columns
"coefficients": coefficients,
"xx": flat_xx,
"yy": flat_yy}}
try:
jsonschema.validate(data[sample_name], self.sample_schema)
except jsonschema.exceptions.ValidationError as err:
logger.warning('Invalid input: %s', err)
raise err
# entry = os.path.join(path, sample_name + '.yml')
# if this is an existing file, overrite the info but keep the statuses
if os.path.isfile(entry):
with open(entry) as sample_file:
yaml_dict = yaml.safe_load(sample_file)
sample = yaml_dict[sample_name]
# when overriding the same sample, this is assuming that a
# re-calibration was done - so keep the previous statuses.
temp_xx = sample['xx']
temp_yy = sample['yy']
temp_x_status = [i['status'] for i in temp_xx]
temp_y_status = [i['status'] for i in temp_yy]
# update the current data statuses with previous ones
for xd, status in zip(data[sample_name]['xx'], temp_x_status):
xd.update((k, status)
for k, v in xd.items() if k == 'status')
for yd, status in zip(data[sample_name]['yy'], temp_y_status):
yd.update((k, status)
for k, v in yd.items() if k == 'status')
yaml_dict.update(data)
with open(entry, 'w') as sample_file:
yaml.safe_dump(data, sample_file,
sort_keys=False, default_flow_style=False)
else:
# create a new file
with open(entry, 'w') as sample_file:
yaml.safe_dump(data, sample_file,
sort_keys=False, default_flow_style=False)
def reset_statuses(self, sample_name, path=None):
"""
Reset the statuses to `False` for the sample targets.
Parameters
----------
sample_name : str
A name to identify the sample grid, should be snake_case style.
path : str, optional
Path to the `.yml` file. Defaults to the path defined when
creating this object.
"""
path = path or os.path.join(self._path, sample_name + '.yml')
with open(path) as sample_file:
yaml_dict = yaml.safe_load(sample_file) or {}
sample = yaml_dict.get(sample_name)
if sample:
for xd in sample.get('xx'):
xd.update((k, False)
for k, v in xd.items() if k == 'status')
for yd in sample.get('yy'):
yd.update((k, False)
for k, v in yd.items() if k == 'status')
yaml_dict[sample_name].update(sample)
else:
raise ValueError('Could not find this sample name in the file:'
f' {sample}')
with open(path, 'w') as sample_file:
yaml.safe_dump(yaml_dict, sample_file,
sort_keys=False, default_flow_style=False)
def map_points(self, snake_like=True, top_left=None, top_right=None,
bottom_right=None, bottom_left=None, m_rows=None,
n_columns=None):
"""
Map the points of a quadrilateral.
Given the 4 corners coordinates of a grid, and the numbers of rows and
columns, map all the sample positions in 2-d coordinates.
Parameters
----------
snake_like : bool
Indicates if the points should be saved in a snake_like pattern.
top_left : tuple, optional
(x, y) coordinates of the top left corner
top_right : tuple, optional
(x, y) coordinates of the top right corner
bottom_right : tuple, optional
(x, y) coordinates of the bottom right corner
bottom_left : tuple, optional
(x, y) coordinates of the bottom left corner
m_rows : int, optional
Number of rows the grid has.
n_columns : int, optional
Number of columns the grid has.
Returns
-------
xx, yy : tuple
Tuple of two lists with all mapped points for x and y positions in
the grid.
"""
top_left = top_left or self.get_presets()[0]
top_right = top_right or self.get_presets()[1]
bottom_right = bottom_right or self.get_presets()[2]
bottom_left = bottom_left or self.get_presets()[3]
if any(v is None for v in [top_left, top_right, bottom_right,
bottom_left]):
raise ValueError('Could not get presets, make sure you set presets'
' first using the `set_presets` method.')
rows = m_rows or self.m_n_points[0]
columns = n_columns or self.m_n_points[1]
a_coeffs, b_coeffs = mesh_interpolation(top_left, top_right,
bottom_right, bottom_left)
self.coefficients = a_coeffs.tolist() + b_coeffs.tolist()
x_points, y_points = [], []
xx, yy = get_unit_meshgrid(m_rows=rows, n_columns=columns)
# return x_points, y_points
for rowx, rowy in zip(xx, yy):
for x, y in zip(rowx, rowy):
i, j = convert_to_physical(a_coeffs=a_coeffs,
b_coeffs=b_coeffs,
logic_x=x, logic_y=y)
x_points.append(i)
y_points.append(j)
if snake_like:
x_points = snake_grid_list(
np.array(x_points).reshape(rows, columns))
y_points = snake_grid_list(
np.array(y_points).reshape(rows, columns))
self.positions_x = x_points
self.positions_y = y_points
return x_points, y_points
def is_target_shot(self, m, n, sample=None, path=None):
"""
Check to see if the target position at MxN is shot.
Parameters
----------
sample_name : str, optional
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
m_point : int
Represents the row value of the point we want the position for.
n_point : int
Represents the column value of the point we want the position for.
path : str, optional
Sample path.
Returns
-------
is_shot : bool
Indicates is target is shot or not.
"""
sample = sample or self.current_sample
path = path or self.current_sample_path
x, y = self.compute_mapped_point(m_row=m,
n_column=n,
sample_name=sample, path=path)
data = self.get_sample_data(sample)
xx = data.get('xx')
x_status = None
# one value should be enough
# TODO: this is assuming that none of the points will be the unique.
if xx is not None:
x_status = next((item['status']
for item in xx if item['pos'] == x), None)
return x_status
def compute_mapped_point(self, m_row, n_column, sample_name=None,
path=None, compute_all=False):
"""
For a given sample, compute the x, y position for M and N respecively.
Parameters
----------
sample_name : str
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
m_point : int
Represents the row value of the point we want the position for.
n_point : int
Represents the column value of the point we want the position for.
compute_all : boolean, optional
If `True` all the point positions will be computed for this sample.
path : str, optional
Path to the samples yaml file.
Returns
-------
x, y : tuple
The x, y position for m n location.
"""
path = path or self._path
sample_name = sample_name or self.current_sample
if sample_name is None or sample_name == '':
raise ValueError(
'Please make sure you provide a sample name or use load()')
# if we have a current loaded sample, use the current M, N values and
# current coefficients
if self.current_sample != '':
m_points, n_points = self.m_n_points
coeffs = self.coefficients
else:
# try to get them from the sample_name file
entry = os.path.join(path, sample_name + '.yml')
m_points, n_points, coeffs = self.get_sample_map_info(
str(sample_name), path=entry)
if any(v is None for v in [m_points, n_points, coeffs]):
raise ValueError('Some values are empty, please check the sample '
f'{sample_name} in the has the M and N values as '
'well as coefficients saved')
if (m_row > m_points) or (n_column > n_points):
raise IndexError('Index out of range, make sure the m and n values'
f' are between ({m_points, n_points})')
if (m_row or n_column) == 0:
raise IndexError('Please start at 1, 1, as the initial points.')
xx_origin, yy_origin = get_unit_meshgrid(m_rows=m_points,
n_columns=n_points)
a_coeffs = coeffs[:4]
b_coeffs = coeffs[4:]
if not compute_all:
logic_x = xx_origin[m_row - 1][n_column - 1]
logic_y = yy_origin[m_row - 1][n_column - 1]
x, y = convert_to_physical(a_coeffs, b_coeffs, logic_x, logic_y)
return x, y
else:
# compute all points
x_points, y_points = [], []
for rowx, rowy in zip(xx_origin, yy_origin):
for x, y in zip(rowx, rowy):
i, j = convert_to_physical(a_coeffs=a_coeffs,
b_coeffs=b_coeffs,
logic_x=x, logic_y=y)
x_points.append(i)
y_points.append(j)
return x_points, y_points
def move_to_sample(self, m, n):
"""
Move x,y motors to the computed positions of n, m of current sample.
Given m (row) and n (column), compute the positions for x and y based
on the current sample's parameters. See `current_sample` and move
the x and y motor to those positions.
Parameters
----------
m : int
Indicates the row on the grid.
n : int
Indicates the column on the grid.
"""
sample_name = self.current_sample
if sample_name:
n, m = self.compute_mapped_point(m_row=m, n_column=n)
self.x.mv(n)
self.y.mv(m)
def move_to(self, sample, m, n):
"""
Move x,y motors to the computed positions of n, m of given sample.
Given m (row) and n (column), compute the positions for x and y based
on the current sample's parameters. See `current_sample`
Parameters
----------
m : int
Indicates the row on the grid.
n : int
Indicates the column on the grid.
"""
entry = os.path.join(self._path, sample + '.yml')
n, m = self.compute_mapped_point(m_row=m, n_column=n,
sample_name=sample, path=entry)
self.x.mv(n)
self.y.mv(m)
def set_status(self, m, n, status=False, sample_name=None, path=None):
"""
TODO not working properly yet
Set the status for a specific m and n point.
Parametrs:
---------
m : int
Indicates the row number starting at 1.
n : int
Indicates the column number starting at 1.
status : bool, optional
`False` to indicate that is has been shot, and `True` for
available.
"""
assert isinstance(status, bool)
sample_name = sample_name or self.current_sample
path = path or os.path.join(self._path, sample_name + '.yml')
m_points, n_points = self.m_n_points
if (m > m_points) or (n > n_points):
raise IndexError('Index out of range, make sure the m and n values'
f' are between ({m_points, n_points})')
if (m or n) == 0:
raise IndexError('Please start at 1, 1, as the initial points.')
with open(path) as sample_file:
yaml_dict = yaml.safe_load(sample_file) or {}
sample = yaml_dict.get(sample_name)
if sample:
xx = sample['xx']
yy = sample['yy']
n_pos = next(d['pos'] for (index, d) in enumerate(xx)
if index == n - 1)
m_pos = next(d['pos'] for (index, d) in enumerate(yy)
if index == m - 1)
for xd in sample.get('xx'):
for k, v in xd.items():
if k == 'pos' and v == n_pos:
xd.update((st, status)
for st, vv in xd.items()
if st == 'status')
for yd in sample.get('yy'):
for k, v in yd.items():
if k == 'pos' and v == m_pos:
yd.update((st, status)
for st, vv in xd.items()
if st == 'status')
yaml_dict[sample_name].update(sample)
else:
raise ValueError('Could not find this sample name in the file:'
f' {sample}')
with open(path, 'w') as sample_file:
yaml.safe_dump(yaml_dict, sample_file,
sort_keys=False, default_flow_style=False)
def mesh_interpolation(top_left, top_right, bottom_right, bottom_left):
"""
Mapping functions for an arbitrary quadrilateral.
Reference: https://www.particleincell.com/2012/quad-interpolation/
In order to perform the interpolation on an arbitrary quad, we need to
obtain a mapping function. Our goal is to come up with a function such
as (x, y) = f(l, m) where l = [0, 1] and m = [0, 1] describes the
entire point space enclosed by the quadrilateral. In addition, we want
f(0, 0) = (x1, y1), f(1, 0) = (x2, y2) and so on to correspond to the
polygon vertices. This function forms a map that allows us to
transform the quad from the physical coordinates set to a logical
coordinate space. In the logical coordinates, the polygon morphs into
a square, regardless of its physical form. Once the logical
coordinates are obtained, we perform the scatter and find the
physical x, y values.
To find the map, we assume a bilinear mapping function given by:
x = alpha_1 + alpha_2*l + alpha_3*m + alpha_4 * l _ m
y = beta_1 + beta_2 * l + beta_3 * m + beta_4 * l * m
Next we use these experessions to solve for the 4 coefficients:
x1 1 0 0 0 alpha_1
x2 1 1 0 0 alpha_2
x3 1 1 1 1 alpha_3
x4 1 0 1 0 alpha_4
We do the same for the beta coefficients.
Parameters
----------
top_left : tuple
(x, y) coordinates of the top left corner
top_right : tuple
(x, y) coordinates of the top right corner
bottom_right : tuple
(x, y) coordinates of the bottom right corner
bottom_left : tuple
(x, y) coordinates of the bottom left corner
Returns
-------
a_coeffs, b_coeffs : tuple
List of tuples with the alpha and beta coefficients for projective
transformation. They are used to find x and y.
"""
# describes the entire point space enclosed by the quadrilateral
unit_grid = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 0, 1, 0]])
# x value coordinates for current grid (4 corners)
px = np.array([top_left[0],
top_right[0],
bottom_right[0],
bottom_left[0]])
# y value coordinates for current grid (4 corners)
py = np.array([top_left[1],
top_right[1],
bottom_right[1],
bottom_left[1]])
a_coeffs = np.linalg.solve(unit_grid, px)
b_coeffs = np.linalg.solve(unit_grid, py)
return a_coeffs, b_coeffs
def get_unit_meshgrid(m_rows, n_columns):
"""
Based on the 4 coordinates and m and n points, find the meshgrid.
Regardless of the physical form of our polygon, we first need to morph
it into a unit square.
Parameters
----------
m_rows : int
Number of rows our grid has.
n_columns : int
Number of columns our grid has.
"""
px = [0, 1, 1, 0]
py = [0, 0, 1, 1]
x0 = min(px)
lx = max(px) - min(px)
y0 = min(py)
ly = max(py) - min(py)
ni = n_columns
nj = m_rows
dx = lx / (ni - 1)
dy = ly / (nj - 1)
xx = [x0 + (i - 1) * dx for i in range(1, ni + 1)]
yy = [y0 + (j - 1) * dy for j in range(1, nj + 1)]
return np.meshgrid(xx, yy)
def convert_to_physical(a_coeffs, b_coeffs, logic_x, logic_y):
"""
Convert to physical coordinates from logical coordinates.
Parameters
----------
a_coeffs : array
Perspective transformation coefficients for alpha.
b_coeffs : array
Perspective transformation coefficients for beta.
logic_x : float
Logical point in the x direction.
logic_y : float
Logical point in the y direction.
Returns
-------
x, y : tuple
The x and y physical values on the specified grid.
"""
# x = a(1) + a(2)*l + a(3)*m + a(4)*l*m
x = (a_coeffs[0] + a_coeffs[1] * logic_x + a_coeffs[2]
* logic_y + a_coeffs[3] * logic_x * logic_y)
# y = b(1) + b(2)*l + b(3)*m + b(4)*l*m
y = (b_coeffs[0] + b_coeffs[1] * logic_x +
b_coeffs[2] * logic_y + b_coeffs[3] * logic_x * logic_y)
return x, y
def snake_grid_list(points):
"""
Flatten them into lists with snake_like pattern coordinate points.
[[1, 2], [3, 4]] => [1, 2, 4, 3]
Parameters
----------
points : array
Array containing the grid points for an axis with shape MxN.
Returns
-------
flat_points : list
List of all the grid points folowing a snake-like pattern.
"""
temp_points = []
for i in range(points.shape[0]):
if i % 2 == 0:
temp_points.append(points[i])
else:
t = points[i]
tt = t[::-1]
temp_points.append(tt)
flat_points = list(chain.from_iterable(temp_points))
# convert the numpy.float64 to normal float to be able to easily
# save them in the yaml file
flat_points = [float(v) for v in flat_points]
return flat_points
| 35.579956
| 79
| 0.558096
| 41,215
| 0.853631
| 0
| 0
| 5,893
| 0.122054
| 0
| 0
| 26,152
| 0.541651
|
e13d3df96caed4ad7bea9f68e21a31547457cf49
| 1,564
|
py
|
Python
|
release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py
|
zaion520/ATtomato
|
4d48bb79f8d147f89a568cf18da9e0edc41f93fb
|
[
"FSFAP"
] | 2
|
2019-01-13T09:16:31.000Z
|
2019-02-15T03:30:28.000Z
|
release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py
|
zaion520/ATtomato
|
4d48bb79f8d147f89a568cf18da9e0edc41f93fb
|
[
"FSFAP"
] | null | null | null |
release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py
|
zaion520/ATtomato
|
4d48bb79f8d147f89a568cf18da9e0edc41f93fb
|
[
"FSFAP"
] | 2
|
2020-03-08T01:58:25.000Z
|
2020-12-20T10:34:54.000Z
|
#!/usr/bin/env python
#
# time
#
# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import common
from samba.net import Net
from samba.netcmd import (
Command,
)
class cmd_time(Command):
"""Retrieve the time on a remote server [server connection needed]"""
synopsis = "%prog time <server-name>"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["server_name?"]
def run(self, server_name=None, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
net = Net(creds, lp, server=credopts.ipaddress)
if server_name is None:
server_name = common.netcmd_dnsname(lp)
print net.time(server_name)
| 32.583333
| 85
| 0.710997
| 720
| 0.460358
| 0
| 0
| 0
| 0
| 0
| 0
| 847
| 0.54156
|
e13edb5a04062ed656b823c80283871afa60af92
| 900
|
py
|
Python
|
tests/job/test_redis.py
|
ulule/bokchoy
|
58afaf325ce275edf5c4a955379afb1cc5eb5de3
|
[
"MIT"
] | null | null | null |
tests/job/test_redis.py
|
ulule/bokchoy
|
58afaf325ce275edf5c4a955379afb1cc5eb5de3
|
[
"MIT"
] | null | null | null |
tests/job/test_redis.py
|
ulule/bokchoy
|
58afaf325ce275edf5c4a955379afb1cc5eb5de3
|
[
"MIT"
] | null | null | null |
import unittest
import redis
import socket
import pytest
from bokchoy.conductors.dummy import DummyConductor
from bokchoy.results.redis import RedisResult
from bokchoy.serializers.json import JSONSerializer
from exam import fixture
from .base import JobTests
def redis_is_available():
try:
socket.create_connection(('127.0.0.1', 6379), 1.0)
except socket.error:
return False
else:
return True
requires_redis = pytest.mark.skipif(
not redis_is_available(),
reason="requires redis search server running")
@requires_redis
class RedisJobTests(JobTests, unittest.TestCase):
@fixture
def conductor(self):
return DummyConductor(serializer=self.serializer, result=self.result)
@fixture
def serializer(self):
return JSONSerializer()
@fixture
def result(self):
return RedisResult(client=redis.StrictRedis())
| 21.95122
| 77
| 0.728889
| 328
| 0.364444
| 0
| 0
| 344
| 0.382222
| 0
| 0
| 49
| 0.054444
|
e13fba4b45b4ccda568c26a9f752c38c0cf1cb17
| 97
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_internal/network/__init__.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/pip/_internal/network/__init__.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/pip/_internal/network/__init__.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020
# Author: xiaoweixiang
"""Contains purely network-related utilities.
"""
| 16.166667
| 45
| 0.71134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.958763
|
e13fc219ca69c0c1e1bed3ebfc6ec504fbe94731
| 1,153
|
py
|
Python
|
server/global_config.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | 1
|
2021-04-03T05:11:29.000Z
|
2021-04-03T05:11:29.000Z
|
server/global_config.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | null | null | null |
server/global_config.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | null | null | null |
class Global:
sand_box = True
app_key = None
# your secret
secret = None
callback_url = None
server_url = None
log = None
def __init__(self, config):
Global.sand_box = config.get_env()
Global.app_key = config.get_app_key()
Global.secret = config.get_secret()
Global.callback_url = config.get_callback_url()
Global.log = config.get_log()
@staticmethod
def get_env():
return Global.sand_box
@staticmethod
def get_app_key():
return Global.app_key
@staticmethod
def get_secret():
return Global.secret
@staticmethod
def get_callback_url():
return Global.callback_url
@staticmethod
def get_log():
return Global.log
@staticmethod
def get_server_url():
return Global.server_url
@staticmethod
def get_access_token_url():
return Global.get_server_url() + "/token"
@staticmethod
def get_api_server_url():
return Global.get_server_url() + "/api/v1/"
@staticmethod
def get_authorize_url():
return Global.get_server_url() + "/authorize"
| 20.589286
| 55
| 0.632264
| 1,153
| 1
| 0
| 0
| 685
| 0.594102
| 0
| 0
| 43
| 0.037294
|
e13fcadccf45c68be598d453263bc3fd7d573b02
| 3,004
|
py
|
Python
|
Constants.py
|
micv-dev/DeepKubeGPUCluster
|
b1f674ea3c251a5287ee83d582b193248e04f9d6
|
[
"Apache-2.0"
] | 2
|
2021-01-22T05:56:40.000Z
|
2021-07-03T17:50:49.000Z
|
Constants.py
|
micv-dev/DeepKubeGPUCluster
|
b1f674ea3c251a5287ee83d582b193248e04f9d6
|
[
"Apache-2.0"
] | null | null | null |
Constants.py
|
micv-dev/DeepKubeGPUCluster
|
b1f674ea3c251a5287ee83d582b193248e04f9d6
|
[
"Apache-2.0"
] | null | null | null |
DEFAULT_KUBE_VERSION=1.14
KUBE_VERSION="kubeVersion"
USER_ID="userId"
DEFAULT_USER_ID=1
CLUSTER_NAME="clusterName"
CLUSTER_MASTER_IP="masterHostIP"
CLUSTER_WORKER_IP_LIST="workerIPList"
FRAMEWORK_TYPE= "frameworkType"
FRAMEWORK_VERSION="frameworkVersion"
FRAMEWORK_RESOURCES="frameworkResources"
FRAMEWORK_VOLUME_SIZE= "storageVolumeSizegb"
FRAMEWORK_ASSIGN_DPU_TYPE= "dpuType"
FRAMEWORK_ASSIGN_DPU_COUNT= "count"
FRAMEWORK_INSTANCE_COUNT="instanceCount"
FRAMEWORK_SPEC="spec"
FRAMEWORK_IMAGE_NAME="imageName"
FRAMEWORK_DPU_ID="dpuId"
FRAMEWORK_DPU_COUNT="count"
CLUSTER_ID="clusterId"
FRAMEWORK_DEFAULT_PVC="/home/user/"
DEFAULT_FRAMEWORK_TYPE="POLYAXON"
DEFAULT_FRAMEWORK_VERSION="0.4.4"
POLYAXON_TEMPLATE="templates/polyaxon_config"
POLYAXON_CONFIG_FILE="/home/user/polyaxonConfig.yaml"
POLYAXON_DEFAULT_NAMESPACE="polyaxon"
TENSORFLOW_TEMPLATE="templates/tensorflow-gpu"
DEFAULT_PATH="/home/user/"
##########Cluster Info####################
POD_IP="podIp"
POD_STATUS="podStatus"
POD_HOST_IP="hostIp"
##########End Of Cluster Info####################
PVC_MAX_ITERATIONS=50
SLEEP_TIME=5
GLUSTER_DEFAULT_MOUNT_PATH="/volume"
CONTAINER_VOLUME_PREFIX="volume"
MAX_RETRY_FOR_CLUSTER_FORM=10
##############Cluster Related ####################33
CLUSTER_NODE_READY_COUNT=60
CLUSTER_NODE_READY_SLEEP=6
CLUSTER_NODE_NAME_PREFIX="worker"
NO_OF_GPUS_IN_GK210_K80=2
POLYAXON_NODE_PORT_RANGE_START=30000
POLYAXON_NODE_PORT_RANGE_END=32767
DEFAULT_CIDR="10.244.0.0/16"
GFS_STORAGE_CLASS="glusterfs"
GFS_STORAGE_REPLICATION="replicate:2"
HEKETI_REST_URL="http://10.138.0.2:8080"
DEFAULT_VOLUME_MOUNT_PATH="/volume"
GLUSTER_DEFAULT_REP_FACTOR=2
POLYAXON_DEFAULT_HTTP_PORT=80
POLYAXON_DEFAULT_WS_PORT=1337
SUCCESS_MESSAGE_STATUS="SUCCESS"
ERROR_MESSAGE_STATUS="SUCCESS"
ROLE="role"
IP_ADDRESS="ipAddress"
INTERNAL_IP_ADDRESS="internalIpAddress"
ADD_NODE_USER_ID="hostUserId"
ADD_NODE_PASSWORD="password"
####Polyaxon GetClusterInfo###
QUOTA_NAME="quotaName"
QUOTA_USED="used"
QUOTA_LIMIT="limit"
DEFAULT_QUOTA="default"
VOLUME_NAME="volumeName"
MOUNT_PATH_IN_POD="volumePodMountPath"
VOLUME_TOTAL_SIZE="totalSize"
VOLUME_FREE="free"
NVIDIA_GPU_RESOURCE_NAME="requests.nvidia.com/gpu"
EXECUTOR="executor"
MASTER_IP="masterIP"
GPU_COUNT="gpuCount"
NAME="name"
KUBE_CLUSTER_INFO="kubeClusterInfo"
ML_CLUSTER_INFO="mlClusterInfo"
POLYAXON_DEFAULT_USER_ID="root"
POLYAXON_DEFAULT_PASSWORD="rootpassword"
POLYAXON_USER_ID="polyaxonUserId"
POLYAXON_PASSWORD="polyaxonPassword"
DEFAULT_DATASET_VOLUME_NAME="vol_f37253d9f0f35868f8e3a1d63e5b1915"
DEFAULT_DATASET_MOUNT_PATH="/home/user/dataset"
DEFAULT_CLUSTER_VOLUME_MOUNT_PATH="/home/user/volume"
DEFAULT_GLUSTER_SERVER="10.138.0.2"
DEFAULT_DATASET_VOLUME_SIZE="10Gi"
CLUSTER_VOLUME_MOUNT_PATH="volumeHostMountPath"
DATASET_VOLUME_MOUNT_POINT="dataSetVolumemountPointOnHost"
DATASET_VOLUME_MOUNT_PATH_IN_POD_REST= "volumeDataSetPodMountPoint"
DATASET_VOLUME_MOUNT_PATH_IN_POD="/dataset"
DYNAMIC_GLUSTERFS_ENDPOINT_STARTS_WITH="glusterfs-dynamic-"
| 25.243697
| 67
| 0.831891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,153
| 0.383822
|
e13feb6e08fa5f3de107d84f4998b9cc0fdd3b93
| 1,582
|
py
|
Python
|
mpcontribs-portal/mpcontribs/portal/urls.py
|
fraricci/MPContribs
|
800e8fded594dce57807e7ef0ec8d3192ce54825
|
[
"MIT"
] | null | null | null |
mpcontribs-portal/mpcontribs/portal/urls.py
|
fraricci/MPContribs
|
800e8fded594dce57807e7ef0ec8d3192ce54825
|
[
"MIT"
] | null | null | null |
mpcontribs-portal/mpcontribs/portal/urls.py
|
fraricci/MPContribs
|
800e8fded594dce57807e7ef0ec8d3192ce54825
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic.base import RedirectView
from mpcontribs.portal import views
app_name = "mpcontribs_portal"
urlpatterns = [
url(r"^$", views.index, name="index"),
url(r"^healthcheck/?$", views.healthcheck, name="healthcheck"),
url(
r"^notebooks/(?P<nb>[A-Za-z0-9_\/]{3,}).html$",
views.notebooks,
name="notebooks",
),
url(r"^(?P<cid>[a-f\d]{24})/?$", views.contribution, name="contribution"),
# downloads
url(
r"^component/(?P<oid>[a-f\d]{24})$",
views.download_component,
name="download_component",
),
url(
r"^(?P<cid>[a-f\d]{24}).json.gz$",
views.download_contribution,
name="download_contribution",
),
# TODO .(?P<fmt>[a-z]{3})
url(
r"^(?P<project>[a-zA-Z0-9_]{3,}).json.gz$",
views.download_project,
name="download_project",
),
# redirects
url(r"^fe-co-v/?$", RedirectView.as_view(url="/swf/", permanent=False)),
url(r"^fe-co-v/dataset-01/?$", RedirectView.as_view(url="/swf/", permanent=False)),
url(
r"^boltztrap/?$",
RedirectView.as_view(url="/carrier_transport/", permanent=True),
),
url(
r"^Screeninginorganicpv/?$",
RedirectView.as_view(url="/screening_inorganic_pv/", permanent=False),
),
url(
r"^ScreeningInorganicPV/?$",
RedirectView.as_view(url="/screening_inorganic_pv/", permanent=False),
),
# default view
url(r"^[a-zA-Z0-9_]{3,}/?$", views.landingpage),
]
| 31.019608
| 87
| 0.584071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 634
| 0.400759
|
e1404018df8652fa89529ce0d2a499530d166df6
| 3,363
|
py
|
Python
|
src/mp_api/dielectric/client.py
|
jmmshn/api
|
5254a453f6ec749793639e4ec08bea14628c7dc3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/mp_api/dielectric/client.py
|
jmmshn/api
|
5254a453f6ec749793639e4ec08bea14628c7dc3
|
[
"BSD-3-Clause-LBNL"
] | 159
|
2020-11-16T16:02:31.000Z
|
2022-03-28T15:03:38.000Z
|
src/mp_api/dielectric/client.py
|
jmmshn/api
|
5254a453f6ec749793639e4ec08bea14628c7dc3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from typing import List, Optional, Tuple
from collections import defaultdict
from mp_api.core.client import BaseRester, MPRestError
import warnings
class DielectricRester(BaseRester):
suffix = "dielectric"
def get_dielectric_from_material_id(self, material_id: str):
"""
Get dielectric data for a given Materials Project ID.
Arguments:
material_id (str): Materials project ID
Returns:
results (Dict): Dictionary containing dielectric data.
"""
result = self._make_request("{}/?all_fields=true".format(material_id))
if len(result.get("data", [])) > 0:
return result
else:
raise MPRestError("No document found")
def search_dielectric_docs(
self,
e_total: Optional[Tuple[float, float]] = None,
e_ionic: Optional[Tuple[float, float]] = None,
e_static: Optional[Tuple[float, float]] = None,
n: Optional[Tuple[float, float]] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
"""
Query equations of state docs using a variety of search criteria.
Arguments:
e_total (Tuple[float,float]): Minimum and maximum total dielectric constant to consider.
e_ionic (Tuple[float,float]): Minimum and maximum ionic dielectric constant to consider.
e_static (Tuple[float,float]): Minimum and maximum electronic dielectric constant to consider.
n (Tuple[float,float]): Minimum and maximum refractive index to consider.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
fields (List[str]): List of fields in EOSDoc to return data for.
Default is material_id only.
Yields:
([dict]) List of dictionaries containing data for entries defined in 'fields'.
Defaults to Materials Project IDs only.
"""
query_params = defaultdict(dict) # type: dict
if chunk_size <= 0 or chunk_size > 100:
warnings.warn("Improper chunk size given. Setting value to 100.")
chunk_size = 100
if e_total:
query_params.update({"e_total_min": e_total[0], "e_total_max": e_total[1]})
if e_ionic:
query_params.update({"e_ionic_min": e_ionic[0], "e_ionic_max": e_ionic[1]})
if e_static:
query_params.update(
{"e_static_min": e_static[0], "e_static_max": e_static[1]}
)
if n:
query_params.update({"n_min": n[0], "n_max": n[1]})
if fields:
query_params.update({"fields": ",".join(fields)})
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self.query(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
| 33.969697
| 106
| 0.600059
| 3,210
| 0.954505
| 2,617
| 0.778174
| 0
| 0
| 0
| 0
| 1,441
| 0.428486
|
e1404a753371b136c19314c274ee0f8405dd2c32
| 1,598
|
py
|
Python
|
docs/example/advanced/view.py
|
Kozea/Pynuts
|
f2eb1839f59d2e8a4ec96175726186e67f85c4b0
|
[
"BSD-3-Clause"
] | 1
|
2016-06-16T15:31:30.000Z
|
2016-06-16T15:31:30.000Z
|
docs/example/advanced/view.py
|
Kozea/Pynuts
|
f2eb1839f59d2e8a4ec96175726186e67f85c4b0
|
[
"BSD-3-Clause"
] | null | null | null |
docs/example/advanced/view.py
|
Kozea/Pynuts
|
f2eb1839f59d2e8a4ec96175726186e67f85c4b0
|
[
"BSD-3-Clause"
] | null | null | null |
from wtforms import TextField, IntegerField, PasswordField
from wtforms.ext.sqlalchemy.fields import (
QuerySelectField, QuerySelectMultipleField)
from wtforms.validators import Required
from pynuts.view import BaseForm
import database
from application import nuts
class EmployeeView(nuts.ModelView):
model = database.Employee
list_column = 'fullname'
table_columns = ('fullname', )
create_columns = ('login', 'password', 'name', 'firstname', 'company')
read_columns = ('person_id', 'name', 'firstname', 'fullname', 'company')
update_columns = ('name', 'firstname')
class Form(BaseForm):
person_id = IntegerField('ID')
login = TextField(u'Login', validators=[Required()])
password = PasswordField(u'Password', validators=[Required()])
name = TextField(u'Surname', validators=[Required()])
firstname = TextField(u'Firstname', validators=[Required()])
fullname = TextField(u'Employee name')
company = QuerySelectField(
u'Company', get_label='name',
query_factory=lambda: database.Company.query, allow_blank=True)
class CompanyView(nuts.ModelView):
model = database.Company
list_column = 'name'
create_columns = ('name', 'employees')
read_columns = ('name', 'employees')
class Form(BaseForm):
company_id = IntegerField('Company')
name = TextField('Company name')
employees = QuerySelectMultipleField(
u'Employees', get_label='fullname', query_factory=
lambda: database.Employee.query.filter_by(company_id=None))
| 35.511111
| 76
| 0.682728
| 1,322
| 0.827284
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.180851
|
e1412f411269485acbe2ebcad67a9f18d2b335f9
| 330
|
py
|
Python
|
scripts/extract_hit_upstreams.py
|
waglecn/helD_search
|
2b77e81419b9929d5cf5ecc519f27cb381178b2c
|
[
"MIT"
] | null | null | null |
scripts/extract_hit_upstreams.py
|
waglecn/helD_search
|
2b77e81419b9929d5cf5ecc519f27cb381178b2c
|
[
"MIT"
] | null | null | null |
scripts/extract_hit_upstreams.py
|
waglecn/helD_search
|
2b77e81419b9929d5cf5ecc519f27cb381178b2c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
from Bio import SeqIO
import os
genome = sys.argv[1]
in_aa = f'hits/{genome}.hits'
in_up = f'fa/{genome}.upstream'
hits = SeqIO.to_dict(SeqIO.parse(in_aa, 'fasta'))
raes = SeqIO.to_dict(SeqIO.parse(in_up, 'fasta'))
for k in hits.keys():
i = k.split('|')[1]
print(raes[i].format('fasta'))
| 17.368421
| 49
| 0.672727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.272727
|
e14130d3b319054f84f8b96b0e660e7e60ab2e53
| 11,674
|
py
|
Python
|
homeassistant/components/airtouch4/climate.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 4
|
2021-07-11T09:11:00.000Z
|
2022-02-27T14:43:50.000Z
|
homeassistant/components/airtouch4/climate.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 277
|
2021-10-04T06:39:33.000Z
|
2021-12-28T22:04:17.000Z
|
homeassistant/components/airtouch4/climate.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2022-01-02T18:49:54.000Z
|
2022-01-25T02:03:54.000Z
|
"""AirTouch 4 component to control of AirTouch 4 Climate Devices."""
from __future__ import annotations
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_DIFFUSE,
FAN_FOCUS,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
AT_TO_HA_STATE = {
"Heat": HVAC_MODE_HEAT,
"Cool": HVAC_MODE_COOL,
"AutoHeat": HVAC_MODE_AUTO, # airtouch reports either autoheat or autocool
"AutoCool": HVAC_MODE_AUTO,
"Auto": HVAC_MODE_AUTO,
"Dry": HVAC_MODE_DRY,
"Fan": HVAC_MODE_FAN_ONLY,
}
HA_STATE_TO_AT = {
HVAC_MODE_HEAT: "Heat",
HVAC_MODE_COOL: "Cool",
HVAC_MODE_AUTO: "Auto",
HVAC_MODE_DRY: "Dry",
HVAC_MODE_FAN_ONLY: "Fan",
HVAC_MODE_OFF: "Off",
}
AT_TO_HA_FAN_SPEED = {
"Quiet": FAN_DIFFUSE,
"Low": FAN_LOW,
"Medium": FAN_MEDIUM,
"High": FAN_HIGH,
"Powerful": FAN_FOCUS,
"Auto": FAN_AUTO,
"Turbo": "turbo",
}
AT_GROUP_MODES = [HVAC_MODE_OFF, HVAC_MODE_FAN_ONLY]
HA_FAN_SPEED_TO_AT = {value: key for key, value in AT_TO_HA_FAN_SPEED.items()}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Airtouch 4."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
info = coordinator.data
entities: list[ClimateEntity] = [
AirtouchGroup(coordinator, group["group_number"], info)
for group in info["groups"]
]
entities.extend(
AirtouchAC(coordinator, ac["ac_number"], info) for ac in info["acs"]
)
_LOGGER.debug(" Found entities %s", entities)
async_add_entities(entities)
class AirtouchAC(CoordinatorEntity, ClimateEntity):
"""Representation of an AirTouch 4 ac."""
_attr_supported_features = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, coordinator, ac_number, info):
"""Initialize the climate device."""
super().__init__(coordinator)
self._ac_number = ac_number
self._airtouch = coordinator.airtouch
self._info = info
self._unit = self._airtouch.GetAcs()[self._ac_number]
@callback
def _handle_coordinator_update(self):
self._unit = self._airtouch.GetAcs()[self._ac_number]
return super()._handle_coordinator_update()
@property
def device_info(self) -> DeviceInfo:
"""Return device info for this device."""
return DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
name=self.name,
manufacturer="Airtouch",
model="Airtouch 4",
)
@property
def unique_id(self):
"""Return unique ID for this device."""
return f"ac_{self._ac_number}"
@property
def current_temperature(self):
"""Return the current temperature."""
return self._unit.Temperature
@property
def name(self):
"""Return the name of the climate device."""
return f"AC {self._ac_number}"
@property
def fan_mode(self):
"""Return fan mode of the AC this group belongs to."""
return AT_TO_HA_FAN_SPEED[self._airtouch.acs[self._ac_number].AcFanSpeed]
@property
def fan_modes(self):
"""Return the list of available fan modes."""
airtouch_fan_speeds = self._airtouch.GetSupportedFanSpeedsForAc(self._ac_number)
return [AT_TO_HA_FAN_SPEED[speed] for speed in airtouch_fan_speeds]
@property
def hvac_mode(self):
"""Return hvac target hvac state."""
is_off = self._unit.PowerState == "Off"
if is_off:
return HVAC_MODE_OFF
return AT_TO_HA_STATE[self._airtouch.acs[self._ac_number].AcMode]
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
airtouch_modes = self._airtouch.GetSupportedCoolingModesForAc(self._ac_number)
modes = [AT_TO_HA_STATE[mode] for mode in airtouch_modes]
modes.append(HVAC_MODE_OFF)
return modes
async def async_set_hvac_mode(self, hvac_mode):
"""Set new operation mode."""
if hvac_mode not in HA_STATE_TO_AT:
raise ValueError(f"Unsupported HVAC mode: {hvac_mode}")
if hvac_mode == HVAC_MODE_OFF:
return await self.async_turn_off()
await self._airtouch.SetCoolingModeForAc(
self._ac_number, HA_STATE_TO_AT[hvac_mode]
)
# in case it isn't already, unless the HVAC mode was off, then the ac should be on
await self.async_turn_on()
self._unit = self._airtouch.GetAcs()[self._ac_number]
_LOGGER.debug("Setting operation mode of %s to %s", self._ac_number, hvac_mode)
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
if fan_mode not in self.fan_modes:
raise ValueError(f"Unsupported fan mode: {fan_mode}")
_LOGGER.debug("Setting fan mode of %s to %s", self._ac_number, fan_mode)
await self._airtouch.SetFanSpeedForAc(
self._ac_number, HA_FAN_SPEED_TO_AT[fan_mode]
)
self._unit = self._airtouch.GetAcs()[self._ac_number]
self.async_write_ha_state()
async def async_turn_on(self):
"""Turn on."""
_LOGGER.debug("Turning %s on", self.unique_id)
# in case ac is not on. Airtouch turns itself off if no groups are turned on
# (even if groups turned back on)
await self._airtouch.TurnAcOn(self._ac_number)
async def async_turn_off(self):
"""Turn off."""
_LOGGER.debug("Turning %s off", self.unique_id)
await self._airtouch.TurnAcOff(self._ac_number)
self.async_write_ha_state()
class AirtouchGroup(CoordinatorEntity, ClimateEntity):
"""Representation of an AirTouch 4 group."""
_attr_supported_features = SUPPORT_TARGET_TEMPERATURE
_attr_temperature_unit = TEMP_CELSIUS
_attr_hvac_modes = AT_GROUP_MODES
def __init__(self, coordinator, group_number, info):
"""Initialize the climate device."""
super().__init__(coordinator)
self._group_number = group_number
self._airtouch = coordinator.airtouch
self._info = info
self._unit = self._airtouch.GetGroupByGroupNumber(self._group_number)
@callback
def _handle_coordinator_update(self):
self._unit = self._airtouch.GetGroupByGroupNumber(self._group_number)
return super()._handle_coordinator_update()
@property
def device_info(self) -> DeviceInfo:
"""Return device info for this device."""
return DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
manufacturer="Airtouch",
model="Airtouch 4",
name=self.name,
)
@property
def unique_id(self):
"""Return unique ID for this device."""
return self._group_number
@property
def min_temp(self):
"""Return Minimum Temperature for AC of this group."""
return self._airtouch.acs[self._unit.BelongsToAc].MinSetpoint
@property
def max_temp(self):
"""Return Max Temperature for AC of this group."""
return self._airtouch.acs[self._unit.BelongsToAc].MaxSetpoint
@property
def name(self):
"""Return the name of the climate device."""
return self._unit.GroupName
@property
def current_temperature(self):
"""Return the current temperature."""
return self._unit.Temperature
@property
def target_temperature(self):
"""Return the temperature we are trying to reach."""
return self._unit.TargetSetpoint
@property
def hvac_mode(self):
"""Return hvac target hvac state."""
# there are other power states that aren't 'on' but still count as on (eg. 'Turbo')
is_off = self._unit.PowerState == "Off"
if is_off:
return HVAC_MODE_OFF
return HVAC_MODE_FAN_ONLY
async def async_set_hvac_mode(self, hvac_mode):
"""Set new operation mode."""
if hvac_mode not in HA_STATE_TO_AT:
raise ValueError(f"Unsupported HVAC mode: {hvac_mode}")
if hvac_mode == HVAC_MODE_OFF:
return await self.async_turn_off()
if self.hvac_mode == HVAC_MODE_OFF:
await self.async_turn_on()
self._unit = self._airtouch.GetGroups()[self._group_number]
_LOGGER.debug(
"Setting operation mode of %s to %s", self._group_number, hvac_mode
)
self.async_write_ha_state()
@property
def fan_mode(self):
"""Return fan mode of the AC this group belongs to."""
return AT_TO_HA_FAN_SPEED[self._airtouch.acs[self._unit.BelongsToAc].AcFanSpeed]
@property
def fan_modes(self):
"""Return the list of available fan modes."""
airtouch_fan_speeds = self._airtouch.GetSupportedFanSpeedsByGroup(
self._group_number
)
return [AT_TO_HA_FAN_SPEED[speed] for speed in airtouch_fan_speeds]
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Setting temp of %s to %s", self._group_number, str(temp))
self._unit = await self._airtouch.SetGroupToTemperature(
self._group_number, int(temp)
)
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
if fan_mode not in self.fan_modes:
raise ValueError(f"Unsupported fan mode: {fan_mode}")
_LOGGER.debug("Setting fan mode of %s to %s", self._group_number, fan_mode)
self._unit = await self._airtouch.SetFanSpeedByGroup(
self._group_number, HA_FAN_SPEED_TO_AT[fan_mode]
)
self.async_write_ha_state()
async def async_turn_on(self):
"""Turn on."""
_LOGGER.debug("Turning %s on", self.unique_id)
await self._airtouch.TurnGroupOn(self._group_number)
# in case ac is not on. Airtouch turns itself off if no groups are turned on
# (even if groups turned back on)
await self._airtouch.TurnAcOn(
self._airtouch.GetGroupByGroupNumber(self._group_number).BelongsToAc
)
# this might cause the ac object to be wrong, so force the shared data
# store to update
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
async def async_turn_off(self):
"""Turn off."""
_LOGGER.debug("Turning %s off", self.unique_id)
await self._airtouch.TurnGroupOff(self._group_number)
# this will cause the ac object to be wrong
# (ac turns off automatically if no groups are running)
# so force the shared data store to update
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
| 33.642651
| 91
| 0.668837
| 9,291
| 0.795871
| 0
| 0
| 3,884
| 0.332705
| 4,726
| 0.404831
| 2,563
| 0.219548
|
e1414f639d12d9584079f8b303441fd98b73dfdd
| 772
|
py
|
Python
|
giosgappsdk/giosg_api.py
|
mentholi/giosgapp-python-sdk
|
2a5ea25e223dc4a88a32e917dd393cc9a07f9999
|
[
"MIT"
] | null | null | null |
giosgappsdk/giosg_api.py
|
mentholi/giosgapp-python-sdk
|
2a5ea25e223dc4a88a32e917dd393cc9a07f9999
|
[
"MIT"
] | null | null | null |
giosgappsdk/giosg_api.py
|
mentholi/giosgapp-python-sdk
|
2a5ea25e223dc4a88a32e917dd393cc9a07f9999
|
[
"MIT"
] | null | null | null |
import json
import requests
class GiosgApiMixin(object):
URL_USERS = '/api/v3/customer/personnel'
URL_CHATS = '/api/v3/chat/chatsessions'
def build_request_url(self, base, page_size=25, page=1):
domain = self.data.get('sub')
return '%s://%s%s?page_size=%s&page=%s' % (self._protocol, domain, base, page_size, page)
def get_users(self, page=1, page_size=25):
response = requests.get(self.build_request_url(self.URL_USERS, page_size, page), headers=self.get_auth_header())
return json.loads(response.content)
def get_chats(self, page=1, page_size=25):
response = requests.get(self.build_request_url(self.URL_CHATS, page_size, page), headers=self.get_auth_header())
return json.loads(response.content)
| 38.6
| 120
| 0.700777
| 741
| 0.959845
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.119171
|
e1416e342916d61944b1391ba364f72736a6b340
| 1,415
|
py
|
Python
|
Pixelfonts/Delete duplicate components.py
|
NaN-xyz/Glyphs-Scripts
|
bdacf455babc72e0801d8d8db5dc10f8e88aa37b
|
[
"Apache-2.0"
] | 1
|
2022-01-09T04:28:36.000Z
|
2022-01-09T04:28:36.000Z
|
Pixelfonts/Delete duplicate components.py
|
NaN-xyz/Glyphs-Scripts
|
bdacf455babc72e0801d8d8db5dc10f8e88aa37b
|
[
"Apache-2.0"
] | null | null | null |
Pixelfonts/Delete duplicate components.py
|
NaN-xyz/Glyphs-Scripts
|
bdacf455babc72e0801d8d8db5dc10f8e88aa37b
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Delete Duplicate Components
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Looks for duplicate components (same component, same x/y values) and keeps only one of them.
"""
Font = Glyphs.font
selectedLayers = Font.selectedLayers
def getAttr( thisLayer, compNumber ):
return [thisLayer.components[compNumber].componentName, thisLayer.components[compNumber].x, thisLayer.components[compNumber].y]
def scanForDuplicates( thisLayer, compNumber ):
if compNumber == len( thisLayer.components ) - 1:
return []
else:
indexList = scanForDuplicates( thisLayer, compNumber + 1 )
currAttr = getAttr( thisLayer, compNumber )
for i in range( compNumber + 1, len( thisLayer.components ) ):
if currAttr == getAttr( thisLayer, i ):
indexList.append(i)
return sorted( set( indexList ) )
def process( thisLayer ):
if len( thisLayer.components ) != 0:
thisLayer.parent.beginUndo()
indexesToBeDeleted = scanForDuplicates( thisLayer, 0 )
for indexToBeDeleted in indexesToBeDeleted[::-1]:
del thisLayer.components[indexToBeDeleted]
print len( indexesToBeDeleted )
thisLayer.parent.endUndo()
else:
# no components in this layer
print "n/a"
Font.disableUpdateInterface()
for thisLayer in selectedLayers:
print "Components deleted in %s:" % thisLayer.parent.name,
process( thisLayer )
Font.enableUpdateInterface()
| 27.745098
| 128
| 0.743463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.157597
|
e141938b24307f066ff503fed7f111fa1bbefd00
| 3,317
|
py
|
Python
|
src/structures/Errors.py
|
Xiddoc/ComPy
|
7d26f95209d0615d7eb188fa02470ddae5311fca
|
[
"MIT"
] | null | null | null |
src/structures/Errors.py
|
Xiddoc/ComPy
|
7d26f95209d0615d7eb188fa02470ddae5311fca
|
[
"MIT"
] | 9
|
2022-02-23T10:32:44.000Z
|
2022-03-27T17:55:43.000Z
|
src/structures/Errors.py
|
Xiddoc/ComPy
|
7d26f95209d0615d7eb188fa02470ddae5311fca
|
[
"MIT"
] | null | null | null |
"""
Error classes, when needed for exceptions.
"""
from _ast import AST
from dataclasses import dataclass, field
from typing import Optional, Union
from src.compiler.Util import Util
@dataclass(frozen=True)
class ObjectAlreadyDefinedError(NameError):
"""
For our compilation scheme, objects can only be defined once and must be given a type hint.
If you try to type hint the same object 2 times, this should raise an error.
From this, you should also realize that object types are immutable and cannot be freed.
"""
object_name: str
def __str__(self) -> str:
# Error text
return f"You cannot redefine object '{self.object_name}' as it is already initialized."
@dataclass(frozen=True)
class ObjectNotDefinedError(NameError):
"""
As stated in ObjectAlreadyDefinedError, a object must have an explicit type hint the first time it is used.
This is referred to as "defining" or "initializing".
If a object is referenced without being defined, then the compiler should throw this error.
"""
object_name: str
def __str__(self) -> str:
# Error text
return f"Object '{self.object_name}' was not initialized yet."
@dataclass(frozen=True)
class UnsupportedFeatureException(SyntaxError):
"""
An error to raise whenever a Python feature is used which is not implemented in the compiler.
Examples (currently) include classes, for example. (Boo hoo, no OOP for you)
"""
feature: Union[AST, str]
def __str__(self) -> str:
# Local import to avoid import error
# Error text
return "Python feature '" + \
(Util.get_name(self.feature) if isinstance(self.feature, AST) else self.feature) + \
"' is not supported by the compiler."
@dataclass(frozen=True)
class InvalidArgumentError(ValueError):
"""
An error to throw when the user inputted an invalid argument.
Specifically, to be used for command line arguments. Not for
syntax arguments / code that is currently being compiled.
"""
argument: Optional[str] = field(default=None)
def __str__(self) -> str:
# Error text
return f"Argument '{self.argument}' is not valid." \
if self.argument is not None else \
"Internal argument handling error encountered."
@dataclass(frozen=True)
class SyntaxSubsetError(SyntaxError):
"""
An error to throw when the user's code does
not match the syntax subset specifications.
"""
warning: str = field()
def __str__(self) -> str:
# Error text
return f"Invalid usage of '{self.warning}' caused a syntax error (the code must comply to the syntax subset)."
@dataclass(frozen=True)
class InvalidTypeError(TypeError):
"""
An error to throw when the user gave an invalid type or
value of a non-corresponding type (in their syntax/code).
"""
given_type: Optional[str] = field(default=None)
expected_type: Optional[str] = field(default=None)
def __str__(self) -> str:
# Error text
return f"Could not use type '{self.given_type}' when type '{self.expected_type}' was expected." \
if self.given_type is not None else \
"Invalid types (or value of conflicting type) found in code."
| 31.894231
| 118
| 0.67561
| 2,971
| 0.895689
| 0
| 0
| 3,115
| 0.939102
| 0
| 0
| 1,880
| 0.566777
|
e1419fb66f46497cc9f96ff1980d0c0ddc909d97
| 4,314
|
py
|
Python
|
github/recorders/github/github_user_info_recorder.py
|
zvtvz/play-github
|
30ad38ca88c1a57b2cec48b19ca31ffa28fa0154
|
[
"MIT"
] | 2
|
2019-09-21T04:31:01.000Z
|
2020-01-21T03:45:51.000Z
|
github/recorders/github/github_user_info_recorder.py
|
zvtvz/play-github
|
30ad38ca88c1a57b2cec48b19ca31ffa28fa0154
|
[
"MIT"
] | null | null | null |
github/recorders/github/github_user_info_recorder.py
|
zvtvz/play-github
|
30ad38ca88c1a57b2cec48b19ca31ffa28fa0154
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
from github.accounts.github_account import GithubAccount
from github.domain.github import GithubUser
from github.recorders.github.common import get_result
from zvdata.api import get_entities
from zvdata.domain import get_db_session
from zvdata.recorder import TimeSeriesDataRecorder
from zvdata.utils.time_utils import day_offset_today, now_pd_timestamp
class GithubUserInfoRecorder(TimeSeriesDataRecorder):
entity_provider = 'github'
entity_schema = GithubUser
provider = 'github'
data_schema = GithubUser
url = 'https://api.github.com/users/{}'
def __init__(self,
codes=None,
batch_size=50,
force_update=True,
sleeping_time=5,
default_size=2000,
one_shot=True,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None) -> None:
super().__init__('github_user', ['github'], None, codes, batch_size, force_update, sleeping_time,
default_size, one_shot, fix_duplicate_way, start_timestamp, end_timestamp)
self.seed = 0
def init_entities(self):
if self.entity_provider == self.provider and self.entity_schema == self.data_schema:
self.entity_session = self.session
else:
self.entity_session = get_db_session(provider=self.entity_provider, data_schema=self.entity_schema)
# init the entity list
self.entities = get_entities(session=self.entity_session,
entity_type=self.entity_type,
entity_ids=self.entity_ids,
codes=self.codes,
return_type='domain',
provider=self.entity_provider,
# 最近7天更新过的跳过
filters=[(GithubUser.updated_timestamp < day_offset_today(
-7)) | (GithubUser.updated_timestamp.is_(None))],
start_timestamp=self.start_timestamp,
end_timestamp=self.end_timestamp)
def record(self, entity_item, start, end, size, timestamps):
self.seed += 1
the_url = self.url.format(entity_item.code)
user_info = get_result(url=the_url, token=GithubAccount.get_token(seed=self.seed))
if user_info:
user_info['updated_timestamp'] = now_pd_timestamp()
return [user_info]
return []
def get_data_map(self):
return {
'site_admin': 'site_admin',
'name': 'name',
'avatar_url': 'avatar_url',
'gravatar_id': 'gravatar_id',
'company': 'company',
'blog': 'blog',
'location': 'location',
'email': 'email',
'hireable': 'hireable',
'bio': 'bio',
'public_repos': 'public_repos',
'public_gists': 'public_gists',
'followers': 'followers',
'following': 'following',
'updated_timestamp': 'updated_timestamp'
}
def generate_domain_id(self, security_item, original_data):
return security_item.id
def evaluate_start_end_size_timestamps(self, entity):
latest_record = self.get_latest_saved_record(entity=entity)
if latest_record:
latest_timestamp = latest_record.updated_timestamp
if latest_timestamp is not None:
if (now_pd_timestamp() - latest_timestamp).days < 7:
self.logger.info('entity_item:{},updated_timestamp:{},ignored'.format(entity.id, latest_timestamp))
return None, None, 0, None
return None, None, self.default_size, None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start', help='start_timestamp', default='2015-01-01')
parser.add_argument('--end', help='end_timestamp', default='2015-12-31')
args = parser.parse_args()
start = args.start
end = args.end
recorder = GithubUserInfoRecorder(start_timestamp=start, end_timestamp=end)
recorder.run()
| 38.176991
| 119
| 0.592721
| 3,535
| 0.815644
| 0
| 0
| 0
| 0
| 0
| 0
| 627
| 0.14467
|
e141a2ac84bf3c71baee17e1baf51d264eb93a13
| 94
|
py
|
Python
|
pyEDAA/OutputFilter/__init__.py
|
edaa-org/pyEDAA.OutputFilter
|
ca602c9992b40df7bd117968c0dc333a4f16d255
|
[
"Apache-2.0"
] | 1
|
2021-12-30T02:49:43.000Z
|
2021-12-30T02:49:43.000Z
|
pyEDAA/OutputFilter/__init__.py
|
edaa-org/pyEDAA.OutputFilter
|
ca602c9992b40df7bd117968c0dc333a4f16d255
|
[
"Apache-2.0"
] | null | null | null |
pyEDAA/OutputFilter/__init__.py
|
edaa-org/pyEDAA.OutputFilter
|
ca602c9992b40df7bd117968c0dc333a4f16d255
|
[
"Apache-2.0"
] | null | null | null |
from pyTooling.Decorators import export
__version__ = "0.1.0"
@export
class Filter:
pass
| 9.4
| 39
| 0.744681
| 19
| 0.202128
| 0
| 0
| 27
| 0.287234
| 0
| 0
| 7
| 0.074468
|
e141a89f1384646896cf35e7b57e68052818e1a7
| 1,766
|
py
|
Python
|
tut/app.py
|
Tyler9937/titanic-test
|
6a5200558caf203ed1dc3de71a6c9b5d488f847a
|
[
"MIT"
] | null | null | null |
tut/app.py
|
Tyler9937/titanic-test
|
6a5200558caf203ed1dc3de71a6c9b5d488f847a
|
[
"MIT"
] | null | null | null |
tut/app.py
|
Tyler9937/titanic-test
|
6a5200558caf203ed1dc3de71a6c9b5d488f847a
|
[
"MIT"
] | null | null | null |
# Importing needed libraries
import uuid
from decouple import config
from dotenv import load_dotenv
from flask import Flask, render_template, request, jsonify
from sklearn.externals import joblib
import traceback
import pandas as pd
import numpy as np
from flask_sqlalchemy import SQLAlchemy
# Saving DB var
DB = SQLAlchemy()
# Reads key value pair from .env
load_dotenv()
# Running function to create the app
def create_app():
'''
Used to initiate the app
'''
# saving flask(__name__) to var app
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
@app.route('/predict', methods=['POST'])
def predict():
if lr:
try:
json_ = request.json
print(json_)
query = pd.get_dummies(pd.DataFrame(json_))
query = query.reindex(columns=model_columns, fill_value=0)
prediction = list(lr.predict(query))
return jsonify({'prediction': str(prediction)})
except:
return jsonify({'trace': traceback.format_exc()})
else:
print ('Train the model first')
return ('No model here to use')
if __name__ == '__main__':
try:
port = int(sys.argv[1]) # This is for a command-line input
except:
port = 12345 # If you don't provide any port the port will be set to 12345
lr = joblib.load("model.pkl") # Load "model.pkl"
print ('Model loaded')
model_columns = joblib.load("model_columns.pkl") # Load "model_columns.pkl"
print ('Model columns loaded')
app.run(port=port, debug=True)
| 28.95082
| 86
| 0.623443
| 0
| 0
| 0
| 0
| 601
| 0.340317
| 0
| 0
| 552
| 0.312571
|
e143b369aa9fc5500990d0521c4867296c4568dc
| 1,237
|
py
|
Python
|
trainer.py
|
thedesertm/leapmotion_training_svm
|
659a439be4209450b98d638e655ee025e5bd562b
|
[
"MIT"
] | null | null | null |
trainer.py
|
thedesertm/leapmotion_training_svm
|
659a439be4209450b98d638e655ee025e5bd562b
|
[
"MIT"
] | null | null | null |
trainer.py
|
thedesertm/leapmotion_training_svm
|
659a439be4209450b98d638e655ee025e5bd562b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import pickle
BASE_PATH = os.path.join(os.getcwd() , "dataset")
df = None
i = 0
for file_name in os.listdir(BASE_PATH):
file_path = os.path.join(BASE_PATH , file_name)
print(file_path)
data_frame = pd.read_csv(file_path , header=None)
data_frame.pop(178)
data_frame.pop(0)
dat = pd.DataFrame({'result': [i for k in range(data_frame.shape[1])]})
data_frame = data_frame.join(dat)
if not df is None :
df = df.append(data_frame , ignore_index=True)
else:
df = data_frame
i += 1
scaler = StandardScaler()
y = df.pop("result")
scalled_data = scaler.fit_transform(df)
X_train, X_test, y_train, y_test = train_test_split(scalled_data , y, test_size = 0.20)
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
pickle.dump(svclassifier , open("classifier.pkl" , 'wb'))
pickle.dump(scaler , open("scaler.pkl" , 'wb'))
| 31.717949
| 87
| 0.735651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.05578
|
e145c5c7a800878dc251c5025a3fb2b44ba71b0b
| 6,266
|
py
|
Python
|
1 - Data Analysis/2_Analysis - Data Exploration.py
|
dkim319/NFL_Predictive_Model_v2
|
5884e10a681e2e34f54a2280c94d2f42fc442d17
|
[
"CNRI-Python"
] | 1
|
2019-09-14T04:04:51.000Z
|
2019-09-14T04:04:51.000Z
|
1 - Data Analysis/2_Analysis - Data Exploration.py
|
dkim319/NFL_Predictive_Model_v2
|
5884e10a681e2e34f54a2280c94d2f42fc442d17
|
[
"CNRI-Python"
] | null | null | null |
1 - Data Analysis/2_Analysis - Data Exploration.py
|
dkim319/NFL_Predictive_Model_v2
|
5884e10a681e2e34f54a2280c94d2f42fc442d17
|
[
"CNRI-Python"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 20:21:23 2017
@author: DKIM
"""
import pandas as pd
import numpy as np
# required libraries loaded
import pandas as pd
import numpy as np
import matplotlib
matplotlib.style.use('ggplot')
import matplotlib.pyplot as plt
seednumber = 319
data = pd.read_csv('Data.csv')
# Initial dataset
print('Initial dataset dimensions')
print(data.shape)
target_year = 2017
print('Filter to only the training data')
orig_data = data[data['season'] <= target_year]
# Data Preprocessing
# replace any null values with 0
data = data.fillna(0)
# use one-hot coding to replace the favorite and underdog categorical variables
fav_team = pd.get_dummies(data['favorite'])
und_team = pd.get_dummies(data['underdog'])
# use a prefix to distinguish the two categorical variables
fav_team = fav_team.add_prefix('fav_')
und_team = und_team.add_prefix('und_')
# remove the original fields
data = data.drop('favorite', axis = 1)
data = data.drop('underdog', axis = 1)
# add the one-hot coded fields
data = pd.concat([data, fav_team], axis = 1)
data = pd.concat([data, und_team], axis = 1)
#print data.head(5)
#print(data.describe())
# split the dataset into training and testing datasets
data = data[data['season'] <= target_year]
data.reset_index()
print('Final dataset dimensions')
print(data.shape)
#statistics = data.describe()
#statistics.to_csv('stats.csv')
print('Review the distribution of the target variable')
print('Target variable is evenly distributed and is not skewed')
spread_by_year = data.groupby(['season'])['spreadflag'].mean()
print(spread_by_year)
corr_data = data.corr(method = 'pearson')
print('Review the correlation between the variables and the target variable')
print('Top 10 correlated variables')
print(corr_data['spreadflag'].sort_values(ascending=False).head(11))
print('Top 10 negatively correlated variables')
print(corr_data['spreadflag'].sort_values(ascending=True).head(10))
years = [2010,2011,2012,2013,2014,2015,2016,2017]
for x in years:
year_data = data[data['season'] == x]
year_data_corr = year_data.corr(method = 'pearson')
print('Top 10 correlated variables for the target variable, spreadflag, for the year ' + str(x))
print(year_data_corr['spreadflag'].sort_values(ascending=False).head(11))
print('')
print('Top 10 negatively correlated variables for the target variable, spreadflag, for the year ' + str(x))
print(year_data_corr['spreadflag'].sort_values(ascending=True).head(10))
print('')
# Plot favorite win % over spread
spread_agg = data.groupby(['spread'])['spreadflag'].mean()
spread_count = data.groupby(['spread'])['spreadflag'].count() / data.shape[0]
fig, axes = plt.subplots(2,1)
spread_agg_ax = spread_agg.plot(ax = axes[0])
spread_agg_ax.set_ylabel('favorite win %')
spread_agg_ax.set_title('Figure 1 - Spread')
spread_agg_figure = spread_agg_ax.get_figure()
spread_count_ax = spread_count.plot(kind = 'line',ax = axes[1])
spread_count_ax.set_ylabel('spread %')
spread_count_figure = spread_count_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 1 - spread_vis.png')
# Plot the favorite win % over total
total_agg = data.groupby(['total'])['spreadflag'].mean()
total_count = data.groupby(['total'])['spreadflag'].count() / data.shape[0]
fig, axes = plt.subplots(2,1)
total_agg_ax = total_agg.plot(ax = axes[0])
total_agg_ax.set_ylabel('favorite win %')
total_agg_ax.set_title('Figure 2 - Total')
total_agg_figure = total_agg_ax.get_figure()
total_count_ax = total_count.plot(kind = 'line',ax = axes[1])
total_count_ax.set_ylabel('total %')
total_count_figure = total_count_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 2 - total_vis.png')
# Check the Team over winning %
favorite_win_percent = orig_data.groupby(['favorite'])['spreadflag'].mean()
underdog_win_percent = 1 - orig_data.groupby(['underdog'])['spreadflag'].mean()
print('Top 10 Favorites by ATS percent')
print(favorite_win_percent.sort_values(ascending=False).head(10))
print('')
print('Top 10 Underdogs by ATS percent')
print(underdog_win_percent.sort_values(ascending=False).head(10))
print('')
# Plot the favorite win % over favorite's win record over last 5 and 10 games
fav_last_5_percent_vis_agg = data.groupby(['fav_last_5_percent'])['spreadflag'].mean()
fav_last_10_percent_vis_agg = data.groupby(['fav_last_10_percent'])['spreadflag'].mean()
fig, axes = plt.subplots(2,1)
fav_last_5_percent_vis_agg_ax = fav_last_5_percent_vis_agg.plot(ax = axes[0])
fav_last_5_percent_vis_agg_ax.set_ylabel('favorite win %')
fav_last_5_percent_vis_agg_ax.set_title('Figure 3a - Favorite Win % Last 5 Games')
fav_last_5_percent_vis_agg_figure = fav_last_5_percent_vis_agg_ax.get_figure()
fav_last_5_percent_vis_agg_figure.subplots_adjust(hspace=0.75)
fav_last_10_percent_vis_agg_ax = fav_last_10_percent_vis_agg.plot(kind = 'line',ax = axes[1])
fav_last_10_percent_vis_agg_ax.set_ylabel('favorite win %')
fav_last_10_percent_vis_agg_ax.set_title('Figure 3b - Favorite Win % Last 10 Games')
fav_last_10_percent_vis_count_figure = fav_last_10_percent_vis_agg_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 3 - fav_last_5_percent.png')
# Plot the favorite win % over underdog's win record over last 5 and 10 games
undlast_5_percent_vis_agg = data.groupby(['und_last_5_percent'])['spreadflag'].mean()#.sum()/ data.groupby(['spread'])['spreadflag'].count()
und_last_10_percent_vis_agg = data.groupby(['und_last_10_percent'])['spreadflag'].mean()
fig, axes = plt.subplots(2,1)
und_last_5_percent_vis_agg_ax = undlast_5_percent_vis_agg.plot(ax = axes[0])
und_last_5_percent_vis_agg_ax.set_ylabel('underdog win %')
und_last_5_percent_vis_agg_ax.set_title('Figure 4a - Underdog Win % Last 5 Games')
und_last_5_percent_vis_agg_figure = und_last_5_percent_vis_agg_ax.get_figure()
und_last_5_percent_vis_agg_figure.subplots_adjust(hspace=0.75)
und_last_10_percent_vis_agg_ax = und_last_10_percent_vis_agg.plot(kind = 'line',ax = axes[1])
und_last_10_percent_vis_agg_ax.set_ylabel('underdog win %')
und_last_10_percent_vis_agg_ax.set_title('Figure 4b - Underdog Win % Last 10 Games')
und_last_10_percent_vis_agg_figure = und_last_10_percent_vis_agg_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 4 - und_last_5_percent.png')
| 34.811111
| 141
| 0.76157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,379
| 0.379668
|
e1473bb4e004b0d3642a2fee0b5a8667fbdf36d4
| 597
|
py
|
Python
|
tests/functional/testplan/test_plan_timeout.py
|
dobragab/testplan
|
407ac1dfd33d19753e41235a1f576aeb06118840
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/testplan/test_plan_timeout.py
|
dobragab/testplan
|
407ac1dfd33d19753e41235a1f576aeb06118840
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/testplan/test_plan_timeout.py
|
dobragab/testplan
|
407ac1dfd33d19753e41235a1f576aeb06118840
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Testplan that is expected to time out."""
import sys
import threading
import testplan
from testplan.testing import multitest
@multitest.testsuite
class TimeoutSuite(object):
@multitest.testcase
def blocks(self, env, result):
result.log('Blocking...')
threading.Event().wait()
@testplan.test_plan(name='Timeout example',
timeout=5)
def main(plan):
plan.add(multitest.MultiTest(name='Timeout MTest',
suites=[TimeoutSuite()]))
if __name__ == '__main__':
sys.exit(main().exit_code)
| 20.586207
| 58
| 0.643216
| 154
| 0.257956
| 0
| 0
| 379
| 0.634841
| 0
| 0
| 120
| 0.201005
|
e14841f80a1f905b5006c26969f6f10bf64c27b5
| 107
|
py
|
Python
|
Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python3
def makeArrayConsecutive2(statues):
return (max(statues) - min(statues) + 1) - len(statues)
| 21.4
| 59
| 0.700935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.084112
|
e14a89ff9896dc6d76ffe641bcbb01393e6b478d
| 1,127
|
py
|
Python
|
tests/classification_test.py
|
mjirik/lisa
|
06c5cb8f375f51302341e768512f02236774c8a3
|
[
"BSD-3-Clause"
] | 22
|
2015-01-26T12:58:54.000Z
|
2021-04-15T17:48:13.000Z
|
tests/classification_test.py
|
mjirik/lisa
|
06c5cb8f375f51302341e768512f02236774c8a3
|
[
"BSD-3-Clause"
] | 31
|
2015-01-23T14:46:13.000Z
|
2018-05-18T14:47:18.000Z
|
tests/classification_test.py
|
mjirik/lisa
|
06c5cb8f375f51302341e768512f02236774c8a3
|
[
"BSD-3-Clause"
] | 13
|
2015-06-30T08:54:27.000Z
|
2020-09-11T16:08:19.000Z
|
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
# import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
# sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# sys.path.append(os.path.join(path_to_script, "../src/"))
import unittest
import numpy as np
import lisa.classification
class OrganSegmentationTest(unittest.TestCase):
def test_gmmclassifier(self):
X_tr = np.array([1, 2, 0, 1, 1, 0, 7, 8, 9, 8, 6, 7]).reshape(-1, 1)
y_tr = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]).reshape(-1)
X_te = np.array([1, 7, 8]).reshape(-1, 1)
y_te = np.array([0, 1, 1]).reshape(-1)
# cl = GMMClassifier(each_class_params=[{},{}])
cl = lisa.classification.GMMClassifier(each_class_params=[
{'covariance_type': 'full'},
{'n_components': 2}])
cl.fit(X_tr, y_tr)
y_te_pr = cl.predict(X_te)
self.assertTrue((y_te_pr == y_te).all())
if __name__ == "__main__":
unittest.main()
| 29.657895
| 76
| 0.615794
| 642
| 0.567639
| 0
| 0
| 0
| 0
| 0
| 0
| 385
| 0.340407
|
e14b23b0342f7644f668cb1aa04ae3158b4e1e5b
| 751
|
py
|
Python
|
application.py
|
milindvb/python-docs-hello-world
|
6d3c8b1936c10ee245cc7c4ffb448e94c8b4b9de
|
[
"MIT"
] | null | null | null |
application.py
|
milindvb/python-docs-hello-world
|
6d3c8b1936c10ee245cc7c4ffb448e94c8b4b9de
|
[
"MIT"
] | null | null | null |
application.py
|
milindvb/python-docs-hello-world
|
6d3c8b1936c10ee245cc7c4ffb448e94c8b4b9de
|
[
"MIT"
] | null | null | null |
from flask import Flask
# import pyodbc
app = Flask(__name__)
@app.route("/")
def hello():
# Some other example server values are
# server = 'localhost\sqlexpress' # for a named instance
# server = 'myserver,port' # to specify an alternate port
# server = 'tcp:mytest.centralus.cloudapp.azure.com'
# database = 'test'
# username = 'ndb'
# password = 'test1789###'
# cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
# cursor = cnxn.cursor()
# cursor.execute('SELECT * FROM dbo.Users')
# s = ' '
# for row in cursor:
# s += ''.join(row)
# print(row)
s = '!! Azure'
return "hello"+s
| 28.884615
| 141
| 0.600533
| 0
| 0
| 0
| 0
| 686
| 0.913449
| 0
| 0
| 576
| 0.766977
|
e14ca387e55877393570685f057c5e66f54b5ec5
| 3,906
|
py
|
Python
|
basefiles/sweeps/SMTBFsweep.py
|
hpec-2021-ccu-lanl/simulator
|
21a7cc0dd12feef5ad26668a3cc216854cc2dd40
|
[
"BSD-3-Clause"
] | null | null | null |
basefiles/sweeps/SMTBFsweep.py
|
hpec-2021-ccu-lanl/simulator
|
21a7cc0dd12feef5ad26668a3cc216854cc2dd40
|
[
"BSD-3-Clause"
] | null | null | null |
basefiles/sweeps/SMTBFsweep.py
|
hpec-2021-ccu-lanl/simulator
|
21a7cc0dd12feef5ad26668a3cc216854cc2dd40
|
[
"BSD-3-Clause"
] | null | null | null |
from sweeps.sweepFunctions import *
import numpy as np
def SMTBFSweep(SMTBFSweepInput,ourInput):
myRange = SMTBFSweepInput["range"] if dictHasKey(SMTBFSweepInput,"range") else False
myStickyRange=SMTBFSweepInput["sticky-range"] if dictHasKey(SMTBFSweepInput,"sticky-range") else False
sticky=False if type(myStickyRange) == bool else True
myFormula = SMTBFSweepInput["formula"] if dictHasKey(SMTBFSweepInput,"formula") else False
fixedToNode = SMTBFSweepInput["compute-SMTBF-from-NMTBF"] if dictHasKey(SMTBFSweepInput,"compute-SMTBF-from-NMTBF") else False
if type(myRange) == bool and type(myStickyRange) == bool:
#ok so we are going to have a min,max,step
minimum = float(SMTBFSweepInput["min"])
maximum = float(SMTBFSweepInput["max"])
step = float(SMTBFSweepInput["step"])
if myFormula:
#ok so we have a formula
formula_range = list(np.arange(minimum,maximum+step,step))
SMTBFRange = [eval(myFormula) for i in formula_range]
else:
SMTBFRange = list(np.arange(minimum,maximum+step,step))
elif myFormula:
if sticky:
formula_range = myStickyRange
else:
formula_range = myRange
SMTBFRange = [eval(myFormula) for i in formula_range]
else:
if sticky:
SMTBFRange = myStickyRange
else:
SMTBFRange = myRange
currentExperiments = len(ourInput.keys())
if sticky and not(len(SMTBFRange) == currentExperiments):
print("chose sticky-range for SMTBF but length of sticky-range does not match length of currentExperiments\n"+"SMTBFRange: "+str(len(SMTBFRange))
+" currentExperiments: "+ str(currentExperiments))
raise ValueError("chose sticky-range for SMTBF but length of sticky-range does not match length of currentExperiments\n"+"SMTBFRange: "+str(len(SMTBFRange))
+" currentExperiments: "+ str(currentExperiments))
#if there were no sweeps before. Notice compute-SMTBF-from-NMTBF doesn't make sense if this is the case since there will be no nodes
if currentExperiments == 0:
count = 1
for i in SMTBFRange:
ourInput["experiment_{count}".format(count=count)]={"SMTBF":i}
count+=1
#there were sweeps before
else:
tmpInput = ourInput.copy()
count = 1
# update the current experiments first, if sticky ONLY update the current experiments
for i in ourInput.keys():
data = ourInput[i]
if fixedToNode == True:
nodes = data["nodes"] if dictHasKey(data,"nodes") else False
if type(nodes) == bool:
print("compute-SMTBF-from-NMTBF set but no nodes set")
sys.exit(1)
if sticky:
data["SMTBF"] = SMTBFRange[count-1]/nodes
else:
data["SMTBF"] = SMTBFRange[0]/nodes
else:
data["SMTBF"] = SMTBFRange[0]
ourInput[i] = data
count+=1
if not sticky:
for i in SMTBFRange:
if not i == SMTBFRange[0]: #skip the first, we already did it
for j in tmpInput.keys():
data = tmpInput[j].copy()
if fixedToNode == True:
nodes = data["nodes"] if dictHasKey(data,"nodes") else False
if type(nodes) == bool:
print("compute-SMTBF-from-NMTBF set but no nodes set")
sys.exit(1)
data["SMTBF"] = i/nodes
else:
data["SMTBF"] = i
ourInput["experiment_{count}".format(count=count)] = data
count+=1
| 48.222222
| 164
| 0.575269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 959
| 0.24552
|
e14ce3e30f3e8ef1bb113abf4b81672a5245be55
| 1,708
|
py
|
Python
|
tests/functional_pyecore/regressions/test_issue_34_resolving_pyecore.py
|
aranega/textX
|
abb04d272a1b74f937d43400be130cf7a3be3516
|
[
"MIT"
] | 4
|
2017-12-04T11:07:11.000Z
|
2021-06-21T20:54:09.000Z
|
tests/functional_pyecore/regressions/test_issue_34_resolving_pyecore.py
|
aranega/textX
|
abb04d272a1b74f937d43400be130cf7a3be3516
|
[
"MIT"
] | null | null | null |
tests/functional_pyecore/regressions/test_issue_34_resolving_pyecore.py
|
aranega/textX
|
abb04d272a1b74f937d43400be130cf7a3be3516
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import pytest # noqa
import sys
pytestmark = pytest.mark.skipif(sys.version_info[0] < 3,
reason="pyecore is not Python 2 compatible") # noqa
pyecore = pytest.importorskip("pyecore") # noqa
import textx
from textx.metamodel import metamodel_from_str
@pytest.fixture(scope="module")
def enable_pyecore_support():
textx.enable_pyecore_support()
yield
textx.enable_pyecore_support(enable=False)
pytestmark = pytest.mark.usefixtures("enable_pyecore_support")
def test_issue_34_resolving():
"""An issue in resolving a list of objects of different types.
In the grammar below, attribute `values` in `FormulaExp` collect STRING
instances which leads textX to deduce the type of this attribute to be list
of STRING objects. Thus, object reference resolving does not consider the
`values` list.
In the new version textX will deduce type OBJECT if different types are
used in multiple assignments.
"""
grammar = """
Expression:
atts+=Attribute[','] 'formula' form=Formula
;
Formula:
value=FormulaExp
;
FormulaExp:
values=Cond
| ( values='(' values=Formula values=')' )
;
Cond:
attribute = [Attribute|attr_id] '<' values=STRING
;
attr_id:
/attr_[a-f0-9]+/
;
Attribute:
name = attr_id
;
"""
meta_model = metamodel_from_str(grammar)
model = meta_model.model_from_str(
"attr_123, attr_444 formula attr_123 < 'aa'")
assert type(model.form.value.values[0].attribute).__name__ == 'Attribute'
assert model.form.value.values[0].attribute.name == 'attr_123'
| 25.878788
| 84
| 0.67096
| 0
| 0
| 121
| 0.070843
| 153
| 0.089578
| 0
| 0
| 985
| 0.576698
|
e14d0acbede38071c9f51e6e3d4fd2359e4f607b
| 863
|
py
|
Python
|
pylbd/s3_object.py
|
MacHu-GWU/pylbd-project
|
d9be28d1f9f7679237e4d3c86f63ea06f43249dd
|
[
"MIT"
] | null | null | null |
pylbd/s3_object.py
|
MacHu-GWU/pylbd-project
|
d9be28d1f9f7679237e4d3c86f63ea06f43249dd
|
[
"MIT"
] | null | null | null |
pylbd/s3_object.py
|
MacHu-GWU/pylbd-project
|
d9be28d1f9f7679237e4d3c86f63ea06f43249dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import boto3
from botocore.exceptions import ClientError
import attr
from attrs_mate import AttrsClass
import weakref
@attr.s
class S3Object(AttrsClass):
aws_profile = attr.ib()
bucket = attr.ib() # type: str
key = attr.ib() # type: str
_s3_client_cache = weakref.WeakValueDictionary()
def s3_client(self):
if self.aws_profile not in self._s3_client_cache:
client = boto3.session.Session(profile_name=self.aws_profile).client("s3")
self._s3_client_cache[self.aws_profile] = client
return self._s3_client_cache[self.aws_profile]
def exists_on_s3(self):
try:
self.s3_client().head_object(Bucket=self.bucket, Key=self.key)
return True
except ClientError:
return False
except Exception as e:
raise e
| 26.151515
| 86
| 0.659328
| 708
| 0.820394
| 0
| 0
| 716
| 0.829664
| 0
| 0
| 49
| 0.056779
|
e14d1130b819743aa4189ff145d7b0695bac00b3
| 543
|
py
|
Python
|
android_toast/toast.py
|
ShareASmile/car-locator
|
765d26ad414ab86e4d93bc5338868769e8b3e90f
|
[
"MIT"
] | 21
|
2020-09-08T21:03:25.000Z
|
2022-02-15T07:08:04.000Z
|
android_toast/toast.py
|
ShareASmile/car-locator
|
765d26ad414ab86e4d93bc5338868769e8b3e90f
|
[
"MIT"
] | 3
|
2021-04-13T09:40:20.000Z
|
2021-05-28T20:53:07.000Z
|
android_toast/toast.py
|
ShareASmile/car-locator
|
765d26ad414ab86e4d93bc5338868769e8b3e90f
|
[
"MIT"
] | 9
|
2020-12-11T09:01:42.000Z
|
2022-03-28T00:55:59.000Z
|
from android.runnable import run_on_ui_thread
from jnius import autoclass, cast
mActivity = autoclass("org.kivy.android.PythonActivity").mActivity
Toast = autoclass("android.widget.Toast")
CharSequence = autoclass("java.lang.CharSequence")
String = autoclass("java.lang.String")
@run_on_ui_thread
def android_toast(text, long=False):
duration = Toast.LENGTH_SHORT if long else Toast.LENGTH_LONG
text = cast(CharSequence, String(text))
Toast.makeText(
mActivity.getApplicationContext(), text, duration
).show()
| 28.578947
| 66
| 0.756906
| 0
| 0
| 0
| 0
| 259
| 0.47698
| 0
| 0
| 97
| 0.178637
|
e14d6f0551ebee50376c52df3cd3465b333386e1
| 9,395
|
py
|
Python
|
ktsp.py
|
lum4chi/mylearn
|
8a66fd5ebc32a70783132e185b4f5ce18ce14c5f
|
[
"MIT"
] | null | null | null |
ktsp.py
|
lum4chi/mylearn
|
8a66fd5ebc32a70783132e185b4f5ce18ce14c5f
|
[
"MIT"
] | null | null | null |
ktsp.py
|
lum4chi/mylearn
|
8a66fd5ebc32a70783132e185b4f5ce18ce14c5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
# author: Francesco Lumachi <francesco.lumachi@gmail.com>
import pandas as pd
import numpy as np
from itertools import islice
from sklearn.utils.validation import check_X_y
class KTopScoringPair:
""" K-Top Scoring Pair classifier.
This classifier evaluate maximum-likelihood estimation for P(X_i < X_i | Y),
with X_i < X_i a pair of feature given a class Y. K determine how many pair
evaluate. Then pairs are ranked by the primary score:
s = P(X_i < X_j | 0) - P(X_i < X_j | 1)
Further detail can be found in [1].
For its nature this is a binary classifier but it will not provide any error
if found multiple label, score will be computed between first and second
class. Multi-class classification can be achieved by using sklearn multiclass
wrappers.
Parameters
----------
pairs : list of tuples with index of the feature to be considered.
The feature will be tested in order, that is (X_i, X_j) will be counted
for X_i < X_j.
K : int. How many pairs will contribute to classification.
It should be chosen as an odd int, to allow majority voting.
t : int, optional (default=0)
It can be used to adjust accuracy/specificity. By default it means that
score_{ij} = (P(X_i < X_j | 0) - P(X_i < X_j | 1)) > t
Attributes
----------
estimated_proba_ : 2d array of float
Estimated probability computed from training.
rules_ : array of shape = [n_classes]
Human-readable K rules found with training.
----------
.. [1] AFSARI, Bahman, et al. Rank discriminants for predicting phenotypes
from RNA expression. The Annals of Applied Statistics, 2014, 8.3: 1469-1491.
"""
def __init__(self, pairs, K, t=0):
self.pairs = pairs
self.K = K
self.t = t
self._estimator_type = "classifier"
# Defined after fitting
self.estimated_proba_ = None
self.rules_ = []
self.classes_ = []
def fit(self, X, y):
""" Train the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
y : array-like of shape = [n_samples]
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y) # Assert input is safe
# Determine class and convert y accordingly
self.classes_, y = np.unique(y, return_inverse=True)
# Main statistics gathering
Frequencies, Sizes = self._fit(X, y, self.pairs)
# Compute likelihood probabilities
self._compute_proba(Frequencies, Sizes)
return self
def _fit(self, X, y, pairs):
# Instantiate dictionary as counter for (X_i, X_j) = |{X_i < X_i | Y}|
pairs_dict = {l: dict() for l in range(len(self.classes_))}
class_size = {l: 0 for l in range(len(self.classes_))}
# Class loop
for label in pairs_dict.keys():
X_given_y = X[y==label]
class_size[label] = X_given_y.shape[0]
class_pairs = pairs_dict[label]
# Pairs loop
for X_i, X_j in pairs:
class_pairs[(X_i, X_j)] = sum(X_given_y[:, X_i] < X_given_y[:, X_j])
# Return statistics in a convenient format
Freq, Size = pd.DataFrame(pairs_dict), pd.Series(class_size)
return Freq, Size
def predict(self, X, K=None, t=None):
""" Predict the provided X.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
K : int, optional.
Once estimated_proba_ were computed there is no problem to vary K and
use K-rules different from __init__ time K
t : int, optional
Same as above
Returns
-------
y : array-like of shape = [n_samples]
"""
P = self.predict_proba(X, K)
# Translate most probable class with its label
return self.classes_[np.argmax(P, axis=1)]
def predict_proba(self, X, K=None, t=None):
""" Predict the provided X with probabilities.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
K : int, optional.
Once estimated_proba_ were computed there is no problem to vary K and
use K-rules different from __init__ time K
t : int, optional
Same as above
Returns
-------
P : array of shape = [n_samples, n_class]
"""
def vote_for(x):
return [r['i<j'] if x[r['i']] < x[r['j']] else r['j<i'] for r in self.rules_]
# Rebuild rules if K or t is different from __init__ time K
if (K is not None and K != self.K) or (t is not None and t != self.t):
P = self.estimated_proba_
self.K = self.K if K is None else K
self.t = self.t if t is None else t
self.rules_ = self._scorer(P, self.K, self.t, P.columns[0], P.columns[1])
# Gather votes for every sample -> V = (n, k)
V = [vote_for(x) for _, x in X.iterrows()]
# Group votes by class -> P (n, c)
P = [{k: v for k, v in zip(*np.unique(v, return_counts=True))} for v in V]
P = pd.DataFrame(P, columns=self.classes_).fillna(0)
# Normalized it to emit probabilities
return (P / self.K).as_matrix()
def partial_fit(self, X_batch, y_batch, classes):
""" Train the classifier by chunk. This can take advantage of multiprocessing
computation. Choose chunk dimension it is your discretion.
Parameters
----------
X_batch : iterator for an {array-like, sparse matrix} of
shape = [n_samples, n_features]
y_batch : iterator for an array-like of shape = [n_samples]
classes : array-like, shape (n_classes,)
Can't be inferred, then classes need to be passed as argument.
Returns
-------
self : returns an instance of self.
"""
from multiprocessing import Pool
self.classes_ = np.array(sorted(classes))
pool = Pool()
# Process mapping (zip is needed because map can handle only one argument)
Freq_chunks, Size_chunks = zip(*pool.map(self._chunk_worker, zip(X_batch, y_batch)))
# Concatenate resultant dictionary for missing pairs, then group-by and
# aggregate totals with a sum
F, S = pd.concat(Freq_chunks), pd.concat(Size_chunks)
Frequencies, Sizes = F.groupby(level=[0, 1]).sum(), S.groupby(S.index).sum()
# Now statistics are complete, compute as normal fit
self._compute_proba(Frequencies, Sizes)
return self
def _chunk_worker(self, X_y):
# Assert input safely
X, y = X_y
X, y = check_X_y(X, y)
# Translate y as label
d = {k:v for k,v in zip(self.classes_, range(len(self.classes_)))}
y = np.array(list(map(lambda x: d[x], y)))
# Count frequencies-sizes for this chunk
return self._fit(X, y, self.pairs)
def _scorer(self, P, K, t, minus, plus):
# Not efficient friendly, but produce human-readable rules.
def formatted_rule(i, j, isPositive, score):
if isPositive:
return {"i":i, "j":j, "i<j":minus, "j<i":plus, "score":score}
else:
return {"i":i, "j":j, "i<j":plus, "j<i":minus, "score":score}
# +/- scores depends on what is subtracted from what
scores = P[minus] - P[plus]
ranked = scores.abs().sort_values(ascending=False)
# Compute rules, ranked by descending score
rules = [formatted_rule(k[0], k[1], scores[k] > t, scores[k])
for k in islice(iter(ranked.keys()), K)]
return rules
def _compute_proba(self, Frequencies, Sizes):
# Mainly for debugging purposes
self.frequencies_, self.sizes_ = Frequencies, Sizes
# Compute P = |{X_i < X_i | Y}| / |Y|
P = Frequencies / Sizes
self.estimated_proba_ = P
# Build rules
self.rules_ = self._scorer(P, self.K, self.t, P.columns[0], P.columns[1])
def get_params(self, deep=True):
return {"pairs": self.pairs, "K": self.K, "t": self.t}
def set_params(self, **parameters):
for parameter, value in parameters.items():
self.setattr(parameter, value)
return self
def human_rules(self, features):
""" Allow rules convertion for human reading.
Parameters
----------
features : list of feature name corresponding to i,j indexing
Returns
-------
hr_rules : list of rules, with label converted according to input
"""
import copy as cp
hr_rules = cp.deepcopy(self.rules_)
for d in hr_rules:
d['i'], d['j'] = features[d['i']], features[d['j']]
d['i<j'], d['j<i'] = self.classes_[d['i<j']], self.classes_[d['j<i']]
return hr_rules
| 41.570796
| 92
| 0.571368
| 9,179
| 0.977009
| 0
| 0
| 0
| 0
| 0
| 0
| 5,119
| 0.544864
|
e14da54b265b1cbaa55d62627f5c4770644546b4
| 11,089
|
py
|
Python
|
picmodels/models/care_advisors/daily_metrics_models/services/read.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picmodels/models/care_advisors/daily_metrics_models/services/read.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picmodels/models/care_advisors/daily_metrics_models/services/read.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
import datetime
import picmodels.models
from picmodels.models.utils import filter_db_queryset_by_id
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_f_and_l_name
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_first_name
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_last_name
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_email
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_mpn
def retrieve_metrics_data_by_staff_id(cls, rqst_staff_id, list_of_ids, search_params, rqst_errors, fields=None):
staff_instances = filter_db_queryset_by_id(picmodels.models.Navigators.objects.all(), rqst_staff_id, list_of_ids)
response_list = create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(staff_instances, search_params, fields)
def check_response_data_for_requested_data():
missing_parameter_list = []
if not response_list:
rqst_errors.append("No metrics entries found in database for given staff id(s).")
if rqst_staff_id == 'all':
missing_parameter_list = ['all']
else:
missing_parameter_list = list_of_ids
else:
if list_of_ids:
for db_id in list_of_ids:
tuple_of_bools_if_id_in_data = (metrics_data_entry["Staff Information"]['id'] == db_id for metrics_data_entry in response_list)
if not any(tuple_of_bools_if_id_in_data):
rqst_errors.append('Metrics for staff Member with id: {} not found in database'.format(db_id))
missing_parameter_list.append(db_id)
return missing_parameter_list
missing_primary_parameters = check_response_data_for_requested_data()
return response_list, missing_primary_parameters
def retrieve_metrics_data_by_staff_f_and_l_name(cls, list_of_first_names, list_of_last_names, search_params, rqst_errors, fields=None):
response_list = []
missing_primary_parameters = []
if len(list_of_first_names) == len(list_of_last_names):
for i in range(len(list_of_first_names)):
first_name = list_of_first_names[i]
last_name = list_of_last_names[i]
staff_instances = filter_navigator_objs_by_f_and_l_name(picmodels.models.Navigators.objects.all(), first_name, last_name)
response_list_component = create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(staff_instances, search_params, fields)
def check_response_data_for_requested_data():
if not response_list_component:
rqst_errors.append("No metrics entries found in database for {} {}".format(first_name, last_name))
missing_primary_parameters.append("{} {}".format(first_name, last_name))
check_response_data_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
else:
rqst_errors.append('Length of first name list must be equal to length of last name list')
return response_list, missing_primary_parameters
def retrieve_metrics_data_by_staff_first_name(cls, list_of_first_names, search_params, rqst_errors, fields=None):
response_list = []
missing_primary_parameters = []
for first_name in list_of_first_names:
staff_instances = filter_navigator_objs_by_first_name(picmodels.models.Navigators.objects.all(), first_name)
response_list_component = create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(staff_instances,
search_params,
fields)
def check_response_data_for_requested_data():
if not response_list_component:
rqst_errors.append("No metrics entries found in database for {}".format(first_name))
missing_primary_parameters.append(first_name)
check_response_data_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list, missing_primary_parameters
def retrieve_metrics_data_by_staff_last_name(cls, list_of_last_names, search_params, rqst_errors, fields=None):
response_list = []
missing_primary_parameters = []
for last_name in list_of_last_names:
staff_instances = filter_navigator_objs_by_last_name(picmodels.models.Navigators.objects.all(), last_name)
response_list_component = create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(staff_instances, search_params, fields)
def check_response_data_for_requested_data():
if not response_list_component:
rqst_errors.append("No metrics entries found in database for {}".format(last_name))
missing_primary_parameters.append(last_name)
check_response_data_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list, missing_primary_parameters
def retrieve_metrics_data_by_staff_email(cls, list_of_emails, search_params, rqst_errors, fields=None):
response_list = []
missing_primary_parameters = []
for email in list_of_emails:
staff_instances = filter_navigator_objs_by_email(picmodels.models.Navigators.objects.all(), email)
response_list_component = create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(staff_instances, search_params, fields)
def check_response_data_for_requested_data():
if not response_list_component:
rqst_errors.append("No metrics entries found in database for {}".format(email))
missing_primary_parameters.append(email)
check_response_data_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list, missing_primary_parameters
def retrieve_metrics_data_by_staff_mpn(cls, list_of_staff_mpns, search_params, rqst_errors, fields=None):
response_list = []
missing_primary_parameters = []
for staff_mpn in list_of_staff_mpns:
staff_instances = filter_navigator_objs_by_mpn(picmodels.models.Navigators.objects.all(), staff_mpn)
response_list_component = create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(
staff_instances, search_params, fields)
def check_response_data_for_requested_data():
if not response_list_component:
rqst_errors.append("No metrics entries found in database for {}".format(staff_mpn))
missing_primary_parameters.append(staff_mpn)
check_response_data_for_requested_data()
def add_response_component_to_response_data():
if response_list_component:
response_list.append(response_list_component)
add_response_component_to_response_data()
return response_list, missing_primary_parameters
def create_metrics_response_list_from_filtered_staff_objects_and_secondary_params(staff_objects, search_params, requested_fields):
response_list = []
for staff_instance in staff_objects:
response_list_entry = {"Staff Information": staff_instance.return_values_dict()}
metrics_db_objects_for_this_staff_instance = staff_instance.metricssubmission_set.all()
filtered_metrics_instances = filter_metrics_db_instances_by_secondary_params(search_params, metrics_db_objects_for_this_staff_instance)
metrics_data_list = []
for metrics_instance in filtered_metrics_instances:
metrics_data_entry = create_metrics_data_response_entry_including_requested_fields(metrics_instance, requested_fields)
metrics_data_list.append(metrics_data_entry)
if metrics_data_list:
response_list_entry["Metrics Data"] = metrics_data_list
response_list.append(response_list_entry)
return response_list
def create_metrics_data_response_entry_including_requested_fields(metrics_instance, requested_fields):
complete_metrics_data_entry = metrics_instance.return_values_dict()
filtered_metrics_data_entry = {}
if requested_fields:
for field in requested_fields:
filtered_metrics_data_entry[field] = complete_metrics_data_entry[field]
else:
filtered_metrics_data_entry = complete_metrics_data_entry
return filtered_metrics_data_entry
def prefetch_related_rows(db_queryset):
db_queryset = db_queryset.select_related(
'staff_member',
'location',
'location__address',
'location__address__country'
)
db_queryset = db_queryset.prefetch_related(
'planstat_set',
)
return db_queryset
def filter_metrics_db_instances_by_secondary_params(search_params, metrics_instances):
metrics_instances = prefetch_related_rows(metrics_instances)
if 'zipcode_list' in search_params:
list_of_zipcodes = search_params['zipcode_list']
metrics_instances = metrics_instances.filter(location__address__zipcode__in=list_of_zipcodes)
if 'time_delta_in_days' in search_params:
time_delta_in_days = search_params['time_delta_in_days']
begin_date = datetime.date.today() - time_delta_in_days
metrics_instances = metrics_instances.filter(submission_date__gte=begin_date)
if 'start_date' in search_params:
rqst_start_date = search_params['start_date']
metrics_instances = metrics_instances.filter(submission_date__gte=rqst_start_date)
if 'end_date' in search_params:
rqst_end_date = search_params['end_date']
metrics_instances = metrics_instances.filter(submission_date__lte=rqst_end_date)
if 'location' in search_params:
rqst_location = search_params['location']
metrics_instances = metrics_instances.filter(location__name__iexact=rqst_location)
if 'location_id_list' in search_params:
list_of_location_ids = search_params['location_id_list']
metrics_instances = metrics_instances.filter(location__id__in=list_of_location_ids)
return metrics_instances.order_by("submission_date")
| 44.179283
| 155
| 0.742447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 761
| 0.068627
|
e14daa2e05a40d76224587c816432879876d552a
| 949
|
py
|
Python
|
utils.py
|
YaelMoshe/CSV-Compressor
|
82a72c1750a8d1fb4b6f3d312995a537edbda48c
|
[
"MIT"
] | null | null | null |
utils.py
|
YaelMoshe/CSV-Compressor
|
82a72c1750a8d1fb4b6f3d312995a537edbda48c
|
[
"MIT"
] | null | null | null |
utils.py
|
YaelMoshe/CSV-Compressor
|
82a72c1750a8d1fb4b6f3d312995a537edbda48c
|
[
"MIT"
] | null | null | null |
class CompressorUtils(object):
@staticmethod
def encode(_cell, _list):
if not _cell:
data = "-"
elif _cell not in _list:
data = str(len(_list))
_list.append(_cell)
else:
data = str(_list.index(_cell))
return data
@staticmethod
def decode(_cell, _list):
data = ""
if _cell is not "-":
print _cell
data = _list[int(_cell)]
return data
@staticmethod
def get_lists_string(lists):
all_lists = ""
for element in lists:
all_lists += ','.join(element)
all_lists += "@"
return all_lists[:-1]
@staticmethod
def get_string_lists(str_to_lists):
list_of_lists = []
ll = str_to_lists.split("@")
for l in ll:
list_of_lists.append( l.split(","))
return list_of_lists
| 23.725
| 48
| 0.500527
| 945
| 0.995785
| 0
| 0
| 885
| 0.932561
| 0
| 0
| 22
| 0.023182
|
e14dbf37cac6b30fd02dacd5e179dc9f00f542ab
| 3,405
|
py
|
Python
|
beakerx/beakerx/commands.py
|
acq/beakerx
|
584023ce0fdb052713855d8a9455e6d7422e53da
|
[
"Apache-2.0"
] | null | null | null |
beakerx/beakerx/commands.py
|
acq/beakerx
|
584023ce0fdb052713855d8a9455e6d7422e53da
|
[
"Apache-2.0"
] | null | null | null |
beakerx/beakerx/commands.py
|
acq/beakerx
|
584023ce0fdb052713855d8a9455e6d7422e53da
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import beakerx
from notebook import notebookapp as app
from .install import install, uninstall
from .bkr2ipynb import main
from beakerx_magics import Py4JServer
def install_subparser(subparser):
install_parser = subparser.add_parser('install', help='installs BeakerX extensions')
install_parser.set_defaults(func=install)
install_parser.add_argument("--prefix",
help="location of the environment to install into",
default=sys.prefix)
install_parser.add_argument("--lab",
help="install lab extension",
action='store_true')
return subparser
def uninstall_subparser(subparser):
uninstall_parser = subparser.add_parser('uninstall', help='uninstalls BeakerX extensions')
uninstall_parser.set_defaults(func=uninstall)
uninstall_parser.add_argument("--prefix",
help="location of the environment to uninstall from",
default=sys.prefix)
uninstall_parser.add_argument("--lab",
help="uninstall lab extension",
action='store_true')
return subparser
def bkr2ipynb_subparser(subparser):
bkr2ipynb_parser = subparser.add_parser('bkr2ipynb', help='converts Beaker notebooks to ipynb format')
bkr2ipynb_parser.set_defaults(func=main)
bkr2ipynb_parser.add_argument('notebooks', nargs='+',
help="Beaker notebooks to be converted. Enter *.bkr in case you want to convert all notebooks at once.")
return subparser
def py4j_server_subparser(subparser):
py4j_server_parser = subparser.add_parser('py4j_server')
py4j_server_parser.set_defaults(func=start_py4j_server)
py4j_server_parser.add_argument("--port")
py4j_server_parser.add_argument("--pyport")
py4j_server_parser.add_argument("--kernel")
def start_py4j_server(args):
Py4JServer(args.port, args.pyport, args.kernel)
def run_jupyter(jupyter_commands):
app.launch_new_instance(jupyter_commands)
def init_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=beakerx.__version__)
parser.set_defaults(func=run_jupyter)
subparsers = parser.add_subparsers()
install_subparser(subparsers)
uninstall_subparser(subparsers)
bkr2ipynb_subparser(subparsers)
py4j_server_subparser(subparsers)
return parser
def parse():
parser = init_parser()
args, jupyter_commands = parser.parse_known_args()
if args.func == run_jupyter:
args.func(jupyter_commands)
elif not jupyter_commands:
args.func(args)
else:
parser.parse_args(jupyter_commands)
| 35.842105
| 138
| 0.698678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,085
| 0.318649
|
e14e630e471443793b9fd29816d201fc888da13e
| 2,512
|
py
|
Python
|
attnganw/runner.py
|
cptanalatriste/AttnGAN
|
6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8
|
[
"MIT"
] | null | null | null |
attnganw/runner.py
|
cptanalatriste/AttnGAN
|
6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8
|
[
"MIT"
] | null | null | null |
attnganw/runner.py
|
cptanalatriste/AttnGAN
|
6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List
import dateutil
from datasets import TextDataset
from miscc.config import cfg_from_file, cfg
from torchvision.transforms import transforms
from attnganw.train import GanTrainerWrapper, BirdGenerationFromCaption
def get_text_dataset(tree_base_size: int, tree_branch_number: int, dataset_split: str,
data_directory: str) -> TextDataset:
image_size = tree_base_size * (2 ** (tree_branch_number - 1))
image_transform = transforms.Compose([
transforms.Scale(int(image_size * 76 / 64)),
transforms.RandomCrop(image_size),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(data_directory, dataset_split,
base_size=tree_base_size,
transform=image_transform)
return dataset
def get_output_directory(dataset_name: str, config_name: str) -> str:
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_directory = '../output/%s_%s_%s' % \
(dataset_name, config_name, timestamp)
return output_directory
def generate_images(config_file: str, gpu_id: int, identifier: str,
caption_list: List[str]) -> List[BirdGenerationFromCaption]:
cfg_from_file(config_file)
cfg.GPU_ID = gpu_id
dataset_split: str = 'test'
shuffle_data_loader: bool = True
output_directory: str = get_output_directory(dataset_name=cfg.DATASET_NAME, config_name=cfg.CONFIG_NAME)
text_dataset: TextDataset = get_text_dataset(tree_base_size=cfg.TREE.BASE_SIZE,
tree_branch_number=cfg.TREE.BRANCH_NUM,
dataset_split=dataset_split, data_directory=cfg.DATA_DIR)
gan_trainer_wrapper: GanTrainerWrapper = GanTrainerWrapper(output_directory=output_directory,
text_data_set=text_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE,
shuffle_data_loader=shuffle_data_loader,
data_loader_workers=int(cfg.WORKERS),
split_directory=dataset_split)
return gan_trainer_wrapper.generate_from_caption_list(identifier=identifier, caption_list=caption_list)
| 44.857143
| 108
| 0.624602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.017914
|
e150737ff1e7de27f34b49c4df0d1658c30b7b57
| 2,469
|
py
|
Python
|
Gds/src/fprime_gds/common/data_types/sys_data.py
|
m-aleem/fprime
|
ae8a2a43a39d0e8a1908a82b48106467357d6cba
|
[
"Apache-2.0"
] | 1
|
2020-05-12T03:43:36.000Z
|
2020-05-12T03:43:36.000Z
|
Gds/src/fprime_gds/common/data_types/sys_data.py
|
abcouwer-jpl/fprime
|
f28c92e31d58e7e44bff09ad57d574ca5d5e91c7
|
[
"Apache-2.0"
] | 5
|
2020-05-26T21:38:02.000Z
|
2020-05-26T21:43:33.000Z
|
Gds/src/fprime_gds/common/data_types/sys_data.py
|
abcouwer-jpl/fprime
|
f28c92e31d58e7e44bff09ad57d574ca5d5e91c7
|
[
"Apache-2.0"
] | 3
|
2020-09-05T18:17:21.000Z
|
2020-11-15T04:06:24.000Z
|
'''
@brief Base class for system data classes.
This class defines the interface for cata classes which are intended to hold
a specific data item (packet, channel, event). This data item includes the time
of the data as well as data such as channel value or argument value.
@date Created July 2, 2018
@author R. Joseph Paetz (rpaetz@jpl.nasa.gov)
@bug No known bugs
'''
from fprime.common.models.serialize import time_type
from fprime_gds.common.templates import data_template
import fprime_gds.common.utils.jsonable
class SysData(object):
'''
The SysData class defines the interface for system data classes which are
for specific data readings/events
'''
def __init__(self):
'''
Constructor.
Each subclass will define new constructors with necessary arguments.
The necessary fields are time, id, and template.
Returns:
An initialized SysData object
'''
if not self.id:
self.id = 0
if not self.template:
self.template = data_template.DataTemplate()
if not self.time:
self.time = time_type.TimeType()
def get_id(self):
'''
Returns the id of the channel
Returns:
The id of the channel
'''
return self.id
def get_time(self):
'''
Returns the time of the channel data reading
Returns:
Time of the reading as a TimeType
'''
return self.time
def get_template(self):
'''
Returns the template class instance for the data stored
Returns:
An instance of a template class for this instance's data
'''
return self.template
def to_jsonable(self):
'''
Converts to a JSONable object (primatives, anon-objects, lists)
'''
return fprime_gds.common.utils.jsonable.fprime_to_jsonable(self)
@staticmethod
def compare(x, y):
'''
Compares two data items.
Returns:
Negative, 0, or positive for t1<t2, t1==t2, t1>t2 respectively
'''
# Compare by time first
time_comp = time_type.TimeType.compare(x.time, y.time)
if (time_comp != 0):
return time_comp
# Compare by id second (just let multiple events at the same time with
# the same id be counted as equal
return cmp(x.id, y.id)
if __name__ == '__main__':
pass
| 24.939394
| 79
| 0.617254
| 1,908
| 0.772783
| 0
| 0
| 493
| 0.199676
| 0
| 0
| 1,496
| 0.605913
|
e15162de1790d30c48e2c7c4e83b30934c311fba
| 3,063
|
py
|
Python
|
txdav/carddav/datastore/query/test/test_filter.py
|
eventable/CalendarServer
|
384444edb1966b530bc391789afbe3fb9cd6fd3e
|
[
"Apache-2.0"
] | 1
|
2017-02-18T19:22:19.000Z
|
2017-02-18T19:22:19.000Z
|
txdav/carddav/datastore/query/test/test_filter.py
|
eventable/CalendarServer
|
384444edb1966b530bc391789afbe3fb9cd6fd3e
|
[
"Apache-2.0"
] | null | null | null |
txdav/carddav/datastore/query/test/test_filter.py
|
eventable/CalendarServer
|
384444edb1966b530bc391789afbe3fb9cd6fd3e
|
[
"Apache-2.0"
] | null | null | null |
##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.syntax import SQLFragment
from twisted.trial.unittest import TestCase
from twistedcaldav import carddavxml
from txdav.carddav.datastore.query.filter import Filter, FilterBase
from txdav.common.datastore.sql_tables import schema
from txdav.carddav.datastore.query.builder import buildExpression
from txdav.common.datastore.query.generator import SQLQueryGenerator
from txdav.carddav.datastore.index_file import sqladdressbookquery
class TestQueryFilter(TestCase):
_objectSchema = schema.ADDRESSBOOK_OBJECT
_queryFields = {
"UID": _objectSchema.UID
}
def test_query(self):
"""
Basic query test - single term.
Only UID can be queried via sql.
"""
filter = carddavxml.Filter(
*[carddavxml.PropertyFilter(
carddavxml.TextMatch.fromString("Example"),
**{"name": "UID"}
)]
)
filter = Filter(filter)
expression = buildExpression(filter, self._queryFields)
sql = SQLQueryGenerator(expression, self, 1234)
select, args = sql.generate()
self.assertEqual(select.toSQL(), SQLFragment("select distinct RESOURCE_NAME, VCARD_UID from ADDRESSBOOK_OBJECT where ADDRESSBOOK_HOME_RESOURCE_ID = ? and VCARD_UID like (? || (? || ?))", [1234, "%", "Example", "%"]))
self.assertEqual(args, {})
def test_sqllite_query(self):
"""
Basic query test - single term.
Only UID can be queried via sql.
"""
filter = carddavxml.Filter(
*[carddavxml.PropertyFilter(
carddavxml.TextMatch.fromString("Example"),
**{"name": "UID"}
)]
)
filter = Filter(filter)
sql, args = sqladdressbookquery(filter, 1234)
self.assertEqual(sql, " from RESOURCE where RESOURCE.UID GLOB :1")
self.assertEqual(args, ["*Example*"])
class TestQueryFilterSerialize(TestCase):
def test_query(self):
"""
Basic query test - no time range
"""
filter = carddavxml.Filter(
*[carddavxml.PropertyFilter(
carddavxml.TextMatch.fromString("Example"),
**{"name": "UID"}
)]
)
filter = Filter(filter)
j = filter.serialize()
self.assertEqual(j["type"], "Filter")
f = FilterBase.deserialize(j)
self.assertTrue(isinstance(f, Filter))
| 31.57732
| 224
| 0.647405
| 1,991
| 0.650016
| 0
| 0
| 0
| 0
| 0
| 0
| 1,127
| 0.36794
|
e151f9085880a3aa0708d3748aa966d97630b3db
| 8,259
|
py
|
Python
|
train/erfnet_pspnet_hier33.py
|
elnino9ykl/Sequential-Hierarchical-ERF-PSPNet
|
c7fe42967894b15f6ed82608cd1836a17fec0260
|
[
"MIT"
] | null | null | null |
train/erfnet_pspnet_hier33.py
|
elnino9ykl/Sequential-Hierarchical-ERF-PSPNet
|
c7fe42967894b15f6ed82608cd1836a17fec0260
|
[
"MIT"
] | null | null | null |
train/erfnet_pspnet_hier33.py
|
elnino9ykl/Sequential-Hierarchical-ERF-PSPNet
|
c7fe42967894b15f6ed82608cd1836a17fec0260
|
[
"MIT"
] | null | null | null |
# ERFNet full model definition for Pytorch
# Sept 2017
# Eduardo Romera
#######################
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class DownsamplerBlock (nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput-ninput, (3, 3), stride=2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
return F.relu(output)
#TODO: 1Dv28 downsampler has dropout as well
class non_bottleneck_1d (nn.Module):
def __init__(self, chann, dropprob, dilated): #TODO: check if 3x1 is height in Torch
super().__init__()
self.conv3x1_1 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1,0), bias=True)
self.conv1x3_1 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1), bias=True)
self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)
self.conv3x1_2 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1*dilated,0), bias=True, dilation = (dilated,1))
self.conv1x3_2 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1*dilated), bias=True, dilation = (1, dilated))
self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)
self.dropout = nn.Dropout2d(dropprob)
def forward(self, input):
output = self.conv3x1_1(input)
output = F.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output = self.conv3x1_2(output)
output = F.relu(output)
output = self.conv1x3_2(output)
output = self.bn2(output)
#output = F.relu(output) #ESTO ESTABA MAL
if (self.dropout.p != 0):
output = self.dropout(output)
return F.relu(output+input) #+input = identity (residual connection)
class non_bottleneck_1d_hier (nn.Module):
def __init__(self):
super().__init__()
self.conv3x1_1 = nn.Conv2d(128, 128, (3, 1), stride=1, padding=(1,0), bias=True)
self.conv1x3_1 = nn.Conv2d(128, 128, (1,3), stride=1, padding=(0,1), bias=True)
self.bn1 = nn.BatchNorm2d(128, eps=1e-03)
self.conv3x1_22 = nn.Conv2d(128, 128, (3, 1), stride=1, padding=(2,0), bias=True, dilation = (2,1))
self.conv1x3_22 = nn.Conv2d(128, 128, (1,3), stride=1, padding=(0,2), bias=True, dilation = (1, 2))
self.conv3x1_24 = nn.Conv2d(128, 128, (3, 1), stride=1, padding=(4,0), bias=True, dilation = (4,1))
self.conv1x3_24 = nn.Conv2d(128, 128, (1,3), stride=1, padding=(0,4), bias=True, dilation = (1, 4))
self.conv3x1_28 = nn.Conv2d(128, 128, (3, 1), stride=1, padding=(8,0), bias=True, dilation = (8,1))
self.conv1x3_28 = nn.Conv2d(128, 128, (1,3), stride=1, padding=(0,8), bias=True, dilation = (1, 8))
#self.conv3x1_216 = nn.Conv2d(128, 128, (3, 1), stride=1, padding=(16,0), bias=True, dilation = (16,1))
#self.conv1x3_216 = nn.Conv2d(128, 128, (1,3), stride=1, padding=(0,16), bias=True, dilation = (1, 16))
self.bn2 = nn.BatchNorm2d(128, eps=1e-03)
self.dropout = nn.Dropout2d(0.3)
def forward(self, input):
output = self.conv3x1_1(input)
output = F.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output2 = self.conv3x1_22(output)
output2 = F.relu(output2)
output2 = self.conv1x3_22(output2)
output2 = self.bn2(output2)
if (self.dropout.p != 0):
output2 = self.dropout(output2)
output4 = self.conv3x1_24(output)
output4 = F.relu(output4)
output4 = self.conv1x3_24(output4)
output4 = self.bn2(output4)
if (self.dropout.p != 0):
output4 = self.dropout(output4)
output8 = self.conv3x1_28(output)
output8 = F.relu(output8)
output8 = self.conv1x3_28(output8)
output8 = self.bn2(output8)
if (self.dropout.p != 0):
output8 = self.dropout(output8)
#output16 = self.conv3x1_216(output)
#output16 = F.relu(output16)
#output16 = self.conv1x3_216(output16)
#output16 = self.bn2(output16)
#if (self.dropout.p != 0):
#output16 = self.dropout(output16)
return F.relu(output2+output4+output8+input)
class Encoder(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.initial_block = DownsamplerBlock(3,16)
self.layers = nn.ModuleList()
self.layers.append(DownsamplerBlock(16,64))
for x in range(0, 5): #5 times
self.layers.append(non_bottleneck_1d(64, 0.03, 1)) #Dropout here was wrong in prev trainings
self.layers.append(DownsamplerBlock(64,128))
for x in range(0, 3): #3 times
self.layers.append(non_bottleneck_1d_hier())
#self.layers.append(non_bottleneck_1d(128, 0.3, 2))
#self.layers.append(non_bottleneck_1d(128, 0.3, 4))
#self.layers.append(non_bottleneck_1d(128, 0.3, 8))
#self.layers.append(non_bottleneck_1d(128, 0.3, 16))
#TODO: descomentar para encoder
self.output_conv = nn.Conv2d(128, num_classes, 1, stride=1, padding=0, bias=True)
def forward(self, input, predict=False):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
if predict:
output = self.output_conv(output)
return output
class UpsamplerBlock (nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.ConvTranspose2d(ninput, noutput, 3, stride=2, padding=1, output_padding=1, bias=True)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return F.relu(output)
class PSPDec(nn.Module):
def __init__(self, in_features, out_features, downsize, upsize=(30,40)):
super(PSPDec,self).__init__()
self.features = nn.Sequential(
nn.AvgPool2d(downsize, stride=downsize),
nn.Conv2d(in_features, out_features, 1, bias=False),
nn.BatchNorm2d(out_features, momentum=.95),
nn.ReLU(inplace=True),
#nn.UpsamplingBilinear2d(upsize)
nn.Upsample(size=upsize, mode='bilinear')
)
def forward(self, x):
return self.features(x)
class Decoder (nn.Module):
def __init__(self, num_classes):
super().__init__()
#H=480/8 240/8
#W=640/8 320/8
self.layer5a = PSPDec(128, 32, (30,40),(30,40))
self.layer5b = PSPDec(128, 32, (int(15),int(20)),(30,40))
self.layer5c = PSPDec(128, 32, (int(7.5),int(10)),(30,40))
self.layer5d = PSPDec(128, 32, (int(3.75),int(5)),(30,40))
self.final = nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256, momentum=.95),
nn.ReLU(inplace=True),
nn.Dropout(.1),
nn.Conv2d(256, num_classes, 1),
)
def forward(self, x):
#x=x[0]
x = self.final(torch.cat([
x,
self.layer5a(x),
self.layer5b(x),
self.layer5c(x),
self.layer5d(x),
], 1))
#print('final', x.size())
return F.upsample(x,size=(240,320), mode='bilinear')
#ERFNet
class Net(nn.Module):
def __init__(self, num_classes, encoder=None): #use encoder to pass pretrained encoder
super().__init__()
if (encoder == None):
self.encoder = Encoder(num_classes)
else:
self.encoder = encoder
self.decoder = Decoder(num_classes)
def forward(self, input, only_encode=False):
if only_encode:
return self.encoder.forward(input, predict=True)
else:
output = self.encoder(input) #predict=False by default
return self.decoder.forward(output)
| 33.987654
| 124
| 0.593413
| 8,041
| 0.973605
| 0
| 0
| 0
| 0
| 0
| 0
| 1,133
| 0.137184
|
e15240f37f48e80c43aa04132d7d8be1877669db
| 1,009
|
py
|
Python
|
wsgi.py
|
nam4dev/evedom_demo
|
3cba0cf8e37f6fa75af006c4a99a0b3fab7c2e13
|
[
"MIT"
] | null | null | null |
wsgi.py
|
nam4dev/evedom_demo
|
3cba0cf8e37f6fa75af006c4a99a0b3fab7c2e13
|
[
"MIT"
] | null | null | null |
wsgi.py
|
nam4dev/evedom_demo
|
3cba0cf8e37f6fa75af006c4a99a0b3fab7c2e13
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WSGI script
Setup Application, Authentication, ...
"""
import os
from eve import Eve
from evedom import loader
# from your_app.authentication.token import TokenBasedAuth
__author__ = "nam4dev"
__created__ = '08/11/2017'
ROOT_PATH = os.path.dirname(
os.path.abspath(__file__)
)
EVE_SETTINGS = os.path.join(ROOT_PATH, 'settings.py')
def runner(*_, **options):
"""
A simple runner
Args:
*_:
**options:
Returns:
Flask App run
"""
arguments = dict(
debug=1,
port=5000,
)
arguments.update(options)
if 'EVE_SETTINGS' not in os.environ:
os.environ['EVE_SETTINGS'] = EVE_SETTINGS
application = Eve(
settings=EVE_SETTINGS,
# auth=TokenBasedAuth,
)
application.root_path = ROOT_PATH
with application.app_context():
loader.init()
return application.run(**arguments)
if __name__ == "__main__":
exit(runner())
| 16.274194
| 58
| 0.620416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 365
| 0.361744
|
e152f4d6dde06ac4acdcd8bfa8623f41a066db20
| 1,654
|
py
|
Python
|
workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py
|
mercycorps/TolaWorkflow
|
59542132fafd611081adb0e8cfaa04abc5886d7a
|
[
"Apache-2.0"
] | null | null | null |
workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py
|
mercycorps/TolaWorkflow
|
59542132fafd611081adb0e8cfaa04abc5886d7a
|
[
"Apache-2.0"
] | 268
|
2020-03-31T15:46:59.000Z
|
2022-03-31T18:01:08.000Z
|
workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py
|
Falliatcom-sa/falliatcom
|
39fb926de072c296ed32d50cccfb8003ca870739
|
[
"Apache-2.0"
] | 1
|
2021-01-05T01:58:24.000Z
|
2021-01-05T01:58:24.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-01-18 17:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflow', '0026_auto_20190116_1357'),
]
operations = [
migrations.CreateModel(
name='TolaUserCountryRoles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('user', 'User'), ('basic_admin', 'Basic Admin'), ('super_admin', 'Super Admin')], max_length=100)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_roles', to='workflow.Country')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='country_roles', to='workflow.TolaUser')),
],
),
migrations.CreateModel(
name='TolaUserProgramRoles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('low', 'Low'), ('medium', 'Medium'), ('high', 'High')], max_length=100)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_roles', to='workflow.Program')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='program_roles', to='workflow.TolaUser')),
],
),
]
| 47.257143
| 151
| 0.628174
| 1,463
| 0.884522
| 0
| 0
| 0
| 0
| 0
| 0
| 438
| 0.264813
|
e15353b2bdb09ab7d8c47ead4ff13403eb177890
| 753
|
py
|
Python
|
set-2/challenge-11.py
|
natehouk/cryptopals-crypto-challenges-solutions
|
3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869
|
[
"MIT"
] | null | null | null |
set-2/challenge-11.py
|
natehouk/cryptopals-crypto-challenges-solutions
|
3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869
|
[
"MIT"
] | null | null | null |
set-2/challenge-11.py
|
natehouk/cryptopals-crypto-challenges-solutions
|
3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869
|
[
"MIT"
] | null | null | null |
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random
from util.util import pad, detect_aes_ecb, generate_key, ammend_plaintext, encrypt_random
# Chosen plaintext
plaintext = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
# Generate data and encrypt plaintext
key = generate_key()
plaintext = pad(ammend_plaintext(plaintext), 16)
ciphertext = encrypt_random(key, plaintext)
# Detect AES in ECB mode
detect = detect_aes_ecb(ciphertext)
# Print answer
print("Plaintext: " + str(plaintext, 'latin-1'))
print("Ciphertext: " + str(ciphertext, 'latin-1'))
if (detect[1] == 6):
print("Guess: ECB without CBC mode")
elif (detect[1] == 4):
print("Guess: ECB with CBC mode")
else:
raise Exception
| 30.12
| 89
| 0.749004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 238
| 0.316069
|
e1570b21e8b27475e013ab8eb1dbd45fef7957ed
| 5,457
|
py
|
Python
|
lib/python3.6/site-packages/example/authorize_driver.py
|
venkyyPoojari/Smart-Mirror
|
256b7a870f8cda2965b848a66574ee38254274f5
|
[
"MIT"
] | 187
|
2015-10-02T13:47:33.000Z
|
2022-03-23T08:09:22.000Z
|
lib/python3.6/site-packages/example/authorize_driver.py
|
venkyyPoojari/Smart-Mirror
|
256b7a870f8cda2965b848a66574ee38254274f5
|
[
"MIT"
] | 44
|
2015-12-08T04:31:14.000Z
|
2022-03-14T17:33:11.000Z
|
lib/python3.6/site-packages/example/authorize_driver.py
|
venkyyPoojari/Smart-Mirror
|
256b7a870f8cda2965b848a66574ee38254274f5
|
[
"MIT"
] | 88
|
2015-10-11T03:09:01.000Z
|
2022-03-19T04:16:37.000Z
|
# Copyright (c) 2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Initializes an UberRidesClient with OAuth 2.0 Credentials.
This example demonstrates how to get an access token through the
OAuth 2.0 Authorization Code Grant and use credentials to create
an UberRidesClient.
To run this example:
(1) Set your app credentials in config.driver.yaml
(2) Run `python authorize_driver.py`
(3) A success message will print, 'Hello {YOUR_NAME}'
(4) User OAuth 2.0 credentials are recorded in
'oauth_driver_session_store.yaml'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
from yaml import safe_dump
from example import utils # NOQA
from example.utils import fail_print
from example.utils import response_print
from example.utils import success_print
from example.utils import import_app_credentials
from uber_rides.auth import AuthorizationCodeGrant
from uber_rides.client import UberRidesClient
from uber_rides.errors import ClientError
from uber_rides.errors import ServerError
from uber_rides.errors import UberIllegalState
def authorization_code_grant_flow(credentials, storage_filename):
"""Get an access token through Authorization Code Grant.
Parameters
credentials (dict)
All your app credentials and information
imported from the configuration file.
storage_filename (str)
Filename to store OAuth 2.0 Credentials.
Returns
(UberRidesClient)
An UberRidesClient with OAuth 2.0 Credentials.
"""
auth_flow = AuthorizationCodeGrant(
credentials.get('client_id'),
credentials.get('scopes'),
credentials.get('client_secret'),
credentials.get('redirect_url'),
)
auth_url = auth_flow.get_authorization_url()
login_message = 'Login as a driver and grant access by going to:\n\n{}\n'
login_message = login_message.format(auth_url)
response_print(login_message)
redirect_url = 'Copy the URL you are redirected to and paste here:\n\n'
result = input(redirect_url).strip()
try:
session = auth_flow.get_session(result)
except (ClientError, UberIllegalState) as error:
fail_print(error)
return
credential = session.oauth2credential
credential_data = {
'client_id': credential.client_id,
'redirect_url': credential.redirect_url,
'access_token': credential.access_token,
'expires_in_seconds': credential.expires_in_seconds,
'scopes': list(credential.scopes),
'grant_type': credential.grant_type,
'client_secret': credential.client_secret,
'refresh_token': credential.refresh_token,
}
with open(storage_filename, 'w') as yaml_file:
yaml_file.write(safe_dump(credential_data, default_flow_style=False))
return UberRidesClient(session, sandbox_mode=True)
def hello_user(api_client):
"""Use an authorized client to fetch and print profile information.
Parameters
api_client (UberRidesClient)
An UberRidesClient with OAuth 2.0 credentials.
"""
try:
response = api_client.get_driver_profile()
except (ClientError, ServerError) as error:
fail_print(error)
return
else:
profile = response.json
first_name = profile.get('first_name')
last_name = profile.get('last_name')
email = profile.get('email')
message = 'Hello, {} {}. Successfully granted access token to {}.'
message = message.format(first_name, last_name, email)
success_print(message)
success_print(profile)
success_print('---')
response = api_client.get_driver_trips()
trips = response.json
success_print(trips)
success_print('---')
response = api_client.get_driver_payments()
payments = response.json
success_print(payments)
if __name__ == '__main__':
"""Run the example.
Get an access token through the OAuth 2.0 Authorization Code Grant
and use credentials to create an UberRidesClient.
"""
credentials = import_app_credentials('config.driver.yaml')
api_client = authorization_code_grant_flow(
credentials,
'oauth_driver_session_store.yaml',
)
hello_user(api_client)
| 33.478528
| 79
| 0.721092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,739
| 0.501924
|
e1572ceb180b2c59c917246045f255f6d9bcd968
| 10,766
|
py
|
Python
|
orangeplus/OPTICS_w.py
|
panatronic-git/orange-plus
|
b51643d5edaa7de78388bc38a7695eec64331d27
|
[
"CC0-1.0"
] | null | null | null |
orangeplus/OPTICS_w.py
|
panatronic-git/orange-plus
|
b51643d5edaa7de78388bc38a7695eec64331d27
|
[
"CC0-1.0"
] | null | null | null |
orangeplus/OPTICS_w.py
|
panatronic-git/orange-plus
|
b51643d5edaa7de78388bc38a7695eec64331d27
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
""" A data clustering widget for the Orange3.
This is a data clustering widget for Orange3, that implements the OPTICS algorithm.
OPTICS stands for "Ordering Points To Identify the Clustering Structure".
This is a very useful algorithm for clustering data when the dataset is unlabeled with
Non-flat geometry or when it has uneven cluster sizes or variable cluster density.
The package used is called "sklearn". Source: https://scikit-learn.org/stable/index.html
To run the addon, just install it using 'pip install -e .' from its package folder.
Don't forget to first activate the orange environment.
__author__ = Panagiotis Papadopoulos
__date__ = Feb 2020
__version__ = 0.1.0
__type__ = Orange Addon
__platform__ = Windows (Orange enviroment)
__email__ = 'Panagiotis Papadopoulos' <panatronic@outlook.com>
__status__ = Dev
"""
import numpy as np
from AnyQt.QtCore import Qt
from AnyQt.QtGui import QColor
from Orange.widgets import widget, gui
from Orange.widgets import settings
from Orange.widgets.widget import Msg
from Orange.widgets.utils.signals import Input, Output
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.utils.slidergraph import SliderGraph
from Orange.data import Table, Domain, DiscreteVariable
from pyqtgraph import mkPen
from pyqtgraph.functions import intColor
from sklearn.cluster import OPTICS
from sklearn.neighbors import VALID_METRICS
""" OPTICS Parameters
class sklearn.cluster.OPTICS(
* min_samples=5, {default=5 or int > 1}, title: Min samples
max_eps=inf, {default=np.inf}, not changed
* metric='minkowski', {default='minkowski' or [1]}, title: Metric
p=2, {default=2}, not changed
cluster_method='xi', {default='xi'}, not changed
eps=None, {default=None}, not changed
* xi=0.05, {default=0.05 or float, between 0 and 1}, title: Minimum steepness
predecessor_correction=True, {default=True}, not changed
min_cluster_size=None, {default=None}, not changed
* algorithm='auto', {default=auto or ball_tree, kd_tree, brute, auto}, title: Algorithm for nearest neighbors:
leaf_size=30, {default=30}, not changed
n_jobs=None, {default=None}, not changed
)
[1] Valid values for metric are:
from scikit-learn: [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’, ‘manhattan’]
from scipy.spatial.distance: [‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’,
‘kulsinski’, ‘mahalanobis’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’]
See the documentation for scipy.spatial.distance for details on these metrics.
"""
OPTICS_METRICS = [
("cityblock", "cityblock"),
("cosine", "cosine"),
("euclidean", "euclidean"),
("l1", "l1"),
("l2", "l2"),
("manhattan", "manhattan"),
("braycurtis", "braycurtis"),
("canberra", "canberra"),
("chebyshev", "chebyshev"),
("correlation", "correlation"),
("hamming", "hamming"),
("minkowski", "minkowski"),
("sqeuclidean", "sqeuclidean"),
]
OPTICS_ALGORITHM = [
("Auto","auto"),
("Ball Tree","ball_tree"),
("kd Tree","kd_tree"),
("Brute","brute"),
]
class OPTICS_w(widget.OWWidget):
name = "OPTICS"
description = "dynamicaly clustering unlabeled data by density"
icon = "icons/OPTICS.svg"
priority = 20
class Inputs:
data = Input("Data", Table)
class Outputs:
annotated_data = Output("Data", Table)
class Error(widget.OWWidget.Error):
not_enough_instances = Msg("Not enough unique data instances. "
"At least two are required.")
minimum_samples = settings.Setting(5)
metric_methode = settings.Setting(11)
xi_value = settings.Setting(0.05)
algorithm_base = settings.Setting(0)
auto_commit = settings.Setting(False)
cut_point = xi_value
want_main_area = True
def __init__(self):
super().__init__()
self.data = None
self.dataset = None
self.annotated_data = None
# GUI
infobox = gui.widgetBox(self.controlArea, "Info")
self.infoa = gui.widgetLabel(infobox, "No data on input yet, waiting to get something.")
self.infob = gui.widgetLabel(infobox, "")
self.infoc = gui.widgetLabel(infobox, "")
self.infod = gui.widgetLabel(infobox, "")
self.optionsBox = gui.widgetBox(self.controlArea, "OPTICS Options")
gui.spin(
self.optionsBox,
self,
"minimum_samples",
minv=1,
maxv=100,
step=1,
label="Core point neighbors ",
callback=self._min_samples_changed
)
gui.comboBox(
self.optionsBox,
self,
"metric_methode",
orientation=Qt.Horizontal,
label="Distance metric: ",
items=[d[0] for d in OPTICS_METRICS],
callback=self._metric_changed
)
gui.doubleSpin(
self.optionsBox,
self,
"xi_value",
minv=(0.000),
maxv=(0.999),
step=(0.001),
label="Minimum steepness: ",
callback=self._xi_changed
)
gui.comboBox(
self.optionsBox,
self,
"algorithm_base",
orientation=Qt.Horizontal,
label="neighborhood algorithm: ",
items=[d[0] for d in OPTICS_ALGORITHM],
callback=self._algorithm_changed
)
self.optionsBox.setDisabled(True)
gui.auto_apply(self.controlArea, self, "auto_commit")
gui.rubber(self.controlArea)
self.controlArea.layout().addStretch()
self.plot = SliderGraph(
x_axis_label="Ordering of the points as processed by OPTICS",
y_axis_label="Reachability distance (epsilon distance)",
callback=self._on_changed
)
self.mainArea.layout().addWidget(self.plot)
def check_data_size(self, data):
if data is None:
return False
if len(data) < 2:
self.Error.not_enough_instances()
return False
return True
def normalizing(self,model):
clusters = [c if c >= 0 else np.nan for c in model.labels_]
k = len(set(clusters) - {np.nan})
clusters = np.array(clusters).reshape(len(self.data), 1)
clust_var = DiscreteVariable("Cluster", values=["C%d" % (x + 1) for x in range(k)])
domain = self.data.domain
attributes, classes = domain.attributes, domain.class_vars
meta_attrs = domain.metas
x, y, metas = self.data.X, self.data.Y, self.data.metas
meta_attrs += (clust_var, )
metas = np.hstack((metas, clusters))
domain = Domain(attributes, classes, meta_attrs)
new_table = Table(domain, x, y, metas, self.data.W)
# self.Outputs.annotated_data.send(new_table)
return new_table
def commit(self):
self.cluster()
return
def cluster(self):
if not self.check_data_size(self.data):
return
model = OPTICS(min_samples=self.minimum_samples,
metric=OPTICS_METRICS[self.metric_methode][1],
xi=self.xi_value,
algorithm=OPTICS_ALGORITHM[self.algorithm_base][1],
)
model.fit(self.data.X)
self._plot_graph(model)
self.result_OPTICS = self.normalizing(model)
self.send_data()
def _plot_graph(self,model):
reachability = model.reachability_[model.ordering_]
space = np.arange(len(reachability))
reachability[reachability == np.inf] = np.nanmax(reachability[reachability != np.inf])
labels = model.labels_[model.ordering_]
cluster_count = (len(np.unique(labels[labels[:]>=0])))
self.infoc.setText("%d values in the cluster outcome" % cluster_count)
noisy_counter = len(space[labels==-1])
self.infod.setText("%d noisy samples in the leaf cluster" % noisy_counter)
x_plot = space
y_plot = reachability
self.plot.clear_plot()
colors = np.arange(150, (150+cluster_count))
for klaster, color in zip(range(0, cluster_count), colors):
Xk = space[labels == klaster]
Rk = reachability[labels == klaster]
self.plot.plot(Xk, Rk, pen=mkPen(intColor(color), width=2), antialias=True)
self.plot.plot(x_plot[labels==-1], y_plot[labels==-1], pen=mkPen(QColor('black'), width=2), antialias=True)
@Inputs.data
def set_data(self, dataset):
self.Error.clear()
if not self.check_data_size(dataset):
self.optionsBox.setDisabled(True)
self.plot.clear_plot()
self.infoa.setText(
"No data on input yet, waiting to get something.")
self.infob.setText('')
self.infoc.setText('')
self.infod.setText('')
self.dataset = None
self.annotated_data = None
self.Outputs.annotated_data.send(None)
return
self.data = dataset
self.optionsBox.setDisabled(False)
self.numberOfInputInstances = len(self.data)
self.infoa.setText("%d instances in input data set" % self.numberOfInputInstances)
numOfclasses = len(self.data.domain.class_var.values)
self.infob.setText("%d values in the categorical outcome" % numOfclasses)
self.commit()
def checkCommit(self):
if self.commitOnChange:
self.commit()
def send_data(self):
self.Outputs.annotated_data.send(self.result_OPTICS)
def _min_samples_changed(self):
if self.data is None:
return
self.commit()
def _metric_changed(self):
if self.data is None:
return
self.algorithm_base = 0
self.commit()
def _xi_changed(self):
self.commit()
def _algorithm_changed(self):
if self.data is None:
return
if self.algorithm_base != 0:
if OPTICS_METRICS[self.metric_methode][1] not in VALID_METRICS[OPTICS_ALGORITHM[self.algorithm_base][1]]:
self.algorithm_base = 0
self.commit()
def _on_changed(self, value):
self.cut_point = value
if __name__ == "__main__":
WidgetPreview(OPTICS_w).run(Table("iris-imbalanced"))
| 34.396166
| 145
| 0.612205
| 7,285
| 0.670934
| 0
| 0
| 948
| 0.087309
| 0
| 0
| 3,468
| 0.319396
|
e157ca6b782a8b3accbd943a60246d4523efb4e1
| 2,590
|
py
|
Python
|
message_prototypes/base_message.py
|
agratoth/py-message-prototypes
|
a23527f8264631e947ed2a6657b325036005330f
|
[
"MIT"
] | 1
|
2021-02-26T04:40:00.000Z
|
2021-02-26T04:40:00.000Z
|
message_prototypes/base_message.py
|
agratoth/py-message-prototypes
|
a23527f8264631e947ed2a6657b325036005330f
|
[
"MIT"
] | null | null | null |
message_prototypes/base_message.py
|
agratoth/py-message-prototypes
|
a23527f8264631e947ed2a6657b325036005330f
|
[
"MIT"
] | null | null | null |
import json
from message_prototypes.exceptions import MissingModelException
class BaseMessage:
_serializable_fields = []
def pack(self, unpacking_info=True):
result = {
'_model': self.__class__.__name__,
} if unpacking_info else {}
def serialize_node(node=None):
if isinstance(node, (str, int, type(None))):
return node
elif isinstance(node, list):
return [
serialize_node(elem)
for elem
in node
]
elif isinstance(node, dict):
return {
elem_name: serialize_node(elem)
for elem_name, elem
in node.items()
}
elif isinstance(node, BaseMessage):
return node.pack(unpacking_info=unpacking_info)
for field_name in self._serializable_fields:
field_value = getattr(self, field_name)
result[field_name] = serialize_node(field_value)
return result
def json(self):
return json.dumps(self.pack())
@classmethod
def unpack(cls, data={}):
result = cls()
if '_model' not in data:
raise MissingModelException
def deserialize_node(node={}):
if isinstance(node, (str, int, type(None))):
return node
elif isinstance(node, list):
return [
deserialize_node(elem)
for elem
in node
]
elif isinstance(node, dict):
if '_model' in node:
subclass = cls.detect_model(node)
if subclass:
return subclass.unpack(node)
return None
else:
return {
elem_name: deserialize_node(elem)
for elem_name, elem
in node.items()
}
for field_name in cls._serializable_fields:
field_value = data.get(field_name, None)
setattr(result, field_name, deserialize_node(field_value))
return result
@classmethod
def detect_model(cls, data={}):
if '_model' not in data:
raise MissingModelException
subclasses = BaseMessage.__subclasses__()
for subclass in subclasses:
if subclass.__name__ == data['_model']:
return subclass
return None
| 30.116279
| 70
| 0.508108
| 2,510
| 0.969112
| 0
| 0
| 1,425
| 0.550193
| 0
| 0
| 40
| 0.015444
|
e1597f1ccc9fdad83bd98d093bad683b7edf4352
| 2,015
|
py
|
Python
|
Tests/Methods/Geometry/test_is_inside.py
|
Eomys/pyleecan
|
9a8cea3e62f63bc73417fe09770bd4d480021e35
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
Tests/Methods/Geometry/test_is_inside.py
|
Eomys/pyleecan
|
9a8cea3e62f63bc73417fe09770bd4d480021e35
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
Tests/Methods/Geometry/test_is_inside.py
|
Eomys/pyleecan
|
9a8cea3e62f63bc73417fe09770bd4d480021e35
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
# -*- coding: utf-8 -*
import pytest
from pyleecan.Classes.Circle import Circle
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
# Configuring the test of is_inside
inside_test = list()
# Test 1 : checking if a point is inside a circle of radius 1 at 0 + 0j
C1 = Circle()
inside_test.append({"surf": C1, "Z": 0, "result": True}) # inside
inside_test.append({"surf": C1, "Z": 20, "result": False}) # outside
inside_test.append({"surf": C1, "Z": 1, "result": False}) # online not OK
inside_test.append({"surf": C1, "Z": 1, "if_online": True, "result": True}) # online OK
# Test 2 : checking if a point is inside a "C-shape" surface
A0 = 0
A1 = 0 + 4j
A2 = 3 + 4j
A3 = 3 + 3j
A4 = 1 + 3j
A5 = 1 + 1j
A6 = 3 + 1j
A7 = 3
line_list1 = list()
line_list1.append(Segment(A0, A1))
line_list1.append(Segment(A1, A2))
line_list1.append(Segment(A2, A3))
line_list1.append(Segment(A3, A4))
line_list1.append(Segment(A4, A5))
line_list1.append(Segment(A5, A6))
line_list1.append(Segment(A6, A7))
line_list1.append(Segment(A7, A0))
C2 = SurfLine(line_list=line_list1, point_ref=A0)
inside_test.append({"surf": C2, "Z": 0.5 + 2j, "result": True}) # inside
inside_test.append({"surf": C2, "Z": 2 + 2j, "result": False}) # outside
inside_test.append({"surf": C2, "Z": 2.03, "result": False}) # online not OK
inside_test.append(
{"surf": C2, "Z": 2.03, "if_online": True, "result": True}
) # online OK
@pytest.mark.parametrize("test_dict", inside_test)
def test_is_inside(test_dict):
"Check if the method is_inside is working correctly"
surf = test_dict["surf"]
Z = test_dict["Z"]
result = test_dict["result"]
if "if_online" in test_dict:
if_online = test_dict["if_online"]
assert result == surf.is_inside(Z, if_online)
else:
assert result == surf.is_inside(Z)
if __name__ == "__main__":
for test_dict in inside_test:
test_is_inside(test_dict)
| 31
| 89
| 0.651613
| 0
| 0
| 0
| 0
| 418
| 0.207444
| 0
| 0
| 556
| 0.275931
|
e15a7dcff0d33ea587927f028856b4941738c99e
| 542
|
py
|
Python
|
examples/polling_tweets_example.py
|
Cheetah97/nitter_scraper
|
2da2cf9dca66c7ff9b02c06fccf1cfad772f14a5
|
[
"MIT"
] | 21
|
2020-08-31T06:20:36.000Z
|
2022-01-10T19:22:00.000Z
|
examples/polling_tweets_example.py
|
Cheetah97/nitter_scraper
|
2da2cf9dca66c7ff9b02c06fccf1cfad772f14a5
|
[
"MIT"
] | 2
|
2021-02-09T18:19:51.000Z
|
2021-07-25T17:27:59.000Z
|
examples/polling_tweets_example.py
|
Cheetah97/nitter_scraper
|
2da2cf9dca66c7ff9b02c06fccf1cfad772f14a5
|
[
"MIT"
] | 4
|
2020-12-20T01:31:30.000Z
|
2022-01-24T14:22:13.000Z
|
import time
from nitter_scraper import NitterScraper
last_tweet_id = None
with NitterScraper(port=8008) as nitter:
while True:
for tweet in nitter.get_tweets("dgnsrekt", pages=1, break_on_tweet_id=last_tweet_id):
if tweet.is_pinned is True:
continue
if tweet.is_retweet is True:
continue
if tweet.tweet_id != last_tweet_id:
print(tweet.json(indent=4))
last_tweet_id = tweet.tweet_id
break
time.sleep(0.1)
| 21.68
| 93
| 0.605166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.01845
|
e15ca3d18b760bf74faf9a038392f2b6d8bb59b6
| 11,697
|
py
|
Python
|
esse/mainapp/migrations/0003_auto_20210225_0350.py
|
alexeevivan/bookstore
|
d5698a5681a5d4f6b7616aacd3ac384d25b306e5
|
[
"Unlicense"
] | null | null | null |
esse/mainapp/migrations/0003_auto_20210225_0350.py
|
alexeevivan/bookstore
|
d5698a5681a5d4f6b7616aacd3ac384d25b306e5
|
[
"Unlicense"
] | null | null | null |
esse/mainapp/migrations/0003_auto_20210225_0350.py
|
alexeevivan/bookstore
|
d5698a5681a5d4f6b7616aacd3ac384d25b306e5
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-02-25 00:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainapp', '0002_auto_20210225_0349'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_products', models.PositiveIntegerField(default=0)),
('final_price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Total cost')),
('in_order', models.BooleanField(default=False)),
('for_anonymous_user', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name of category')),
('slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Novel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Book Title')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='')),
('description', models.TextField(null=True, verbose_name='Annotation')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Price')),
('authorship', models.CharField(max_length=255, verbose_name='Author:')),
('book_format', models.CharField(max_length=255, verbose_name='Format:')),
('publisher', models.CharField(max_length=255, verbose_name='Publisher:')),
('the_year_of_publishing', models.CharField(max_length=10, verbose_name='Published:')),
('book_dimensions', models.CharField(max_length=10, verbose_name='Quantity of pages:')),
('language', models.CharField(max_length=40, verbose_name='Language:')),
('appropriate_for_ages', models.CharField(max_length=10, verbose_name='Appropriate for ages:')),
('ISBN_13', models.CharField(max_length=13, verbose_name='International Standard Book Number:')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Category')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Medicine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Book Title')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='')),
('description', models.TextField(null=True, verbose_name='Annotation')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Price')),
('authorship', models.CharField(max_length=255, verbose_name='Author:')),
('book_format', models.CharField(max_length=255, verbose_name='Format:')),
('theme', models.CharField(max_length=255, verbose_name='Part of science:')),
('publisher', models.CharField(max_length=255, verbose_name='Publisher:')),
('the_year_of_publishing', models.CharField(max_length=10, verbose_name='Published:')),
('book_dimensions', models.CharField(max_length=10, verbose_name='Quantity of pages:')),
('language', models.CharField(max_length=40, verbose_name='Language:')),
('appropriate_for_ages', models.CharField(max_length=10, verbose_name='Appropriate for ages:')),
('ISBN_13', models.CharField(max_length=13, verbose_name='International Standard Book Number:')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Category')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='History',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Book Title')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='')),
('description', models.TextField(null=True, verbose_name='Annotation')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Price')),
('authorship', models.CharField(max_length=255, verbose_name='Author:')),
('book_format', models.CharField(max_length=255, verbose_name='Format:')),
('period', models.CharField(max_length=255, verbose_name='Сovers the period of time:')),
('publisher', models.CharField(max_length=255, verbose_name='Publisher:')),
('the_year_of_publishing', models.CharField(max_length=10, verbose_name='Published:')),
('book_dimensions', models.CharField(max_length=10, verbose_name='Quantity of pages:')),
('language', models.CharField(max_length=40, verbose_name='Language:')),
('appropriate_for_ages', models.CharField(max_length=10, verbose_name='Appropriate for ages:')),
('ISBN_13', models.CharField(max_length=13, verbose_name='International Standard Book Number:')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Category')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Economics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Book Title')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='')),
('description', models.TextField(null=True, verbose_name='Annotation')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Price')),
('authorship', models.CharField(max_length=255, verbose_name='Author:')),
('book_format', models.CharField(max_length=255, verbose_name='Format:')),
('theme', models.CharField(max_length=255, verbose_name='Topic under consideration:')),
('publisher', models.CharField(max_length=255, verbose_name='Publisher:')),
('the_year_of_publishing', models.CharField(max_length=10, verbose_name='Published:')),
('book_dimensions', models.CharField(max_length=10, verbose_name='Quantity of pages:')),
('language', models.CharField(max_length=40, verbose_name='Language:')),
('appropriate_for_ages', models.CharField(max_length=10, verbose_name='Appropriate for ages:')),
('ISBN_13', models.CharField(max_length=13, verbose_name='International Standard Book Number:')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Category')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=20, verbose_name='Phone number')),
('address', models.CharField(max_length=255, verbose_name='Address')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
),
migrations.CreateModel(
name='CartProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('quantity', models.PositiveIntegerField(default=1)),
('final_price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Total cost')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_products', to='mainapp.cart', verbose_name='Cart')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.customer', verbose_name='Customer')),
],
),
migrations.AddField(
model_name='cart',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.customer', verbose_name='Owner'),
),
migrations.AddField(
model_name='cart',
name='products',
field=models.ManyToManyField(blank=True, related_name='related_cart', to='mainapp.CartProduct'),
),
migrations.CreateModel(
name='Biography',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Book Title')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='')),
('description', models.TextField(null=True, verbose_name='Annotation')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Price')),
('authorship', models.CharField(max_length=255, verbose_name='Author:')),
('book_format', models.CharField(max_length=255, verbose_name='Format:')),
('publisher', models.CharField(max_length=255, verbose_name='Publisher:')),
('the_year_of_publishing', models.CharField(max_length=10, verbose_name='Published:')),
('book_dimensions', models.CharField(max_length=10, verbose_name='Quantity of pages:')),
('language', models.CharField(max_length=40, verbose_name='Language:')),
('appropriate_for_ages', models.CharField(max_length=10, verbose_name='Appropriate for ages:')),
('ISBN_13', models.CharField(max_length=13, verbose_name='International Standard Book Number:')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Category')),
],
options={
'abstract': False,
},
),
]
| 62.550802
| 162
| 0.608703
| 11,539
| 0.986408
| 0
| 0
| 0
| 0
| 0
| 0
| 2,647
| 0.226278
|
e15ca6e7927c7dfaebe88887cd584126de16a196
| 45
|
py
|
Python
|
mypo/sampler/__init__.py
|
sonesuke/my-portfolio
|
4fd19fdee8a0aa13194cab0df53c83218c5664e3
|
[
"MIT"
] | 2
|
2021-03-14T00:14:25.000Z
|
2021-09-04T16:26:02.000Z
|
mypo/sampler/__init__.py
|
sonesuke/my-portfolio
|
4fd19fdee8a0aa13194cab0df53c83218c5664e3
|
[
"MIT"
] | 104
|
2021-02-21T08:11:11.000Z
|
2021-09-26T03:02:27.000Z
|
mypo/sampler/__init__.py
|
sonesuke/mypo
|
4fd19fdee8a0aa13194cab0df53c83218c5664e3
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from .sampler import Sampler
| 11.25
| 28
| 0.755556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.311111
|
e15cf3b8a65ba6fdc8baa289bed190ee2b034ffe
| 295
|
py
|
Python
|
tools/com/test/test_alpha.py
|
AnthonyEdvalson/Machina
|
fefb058591dd7b62817c75277d5ca0eb6dbd8c3a
|
[
"MIT"
] | null | null | null |
tools/com/test/test_alpha.py
|
AnthonyEdvalson/Machina
|
fefb058591dd7b62817c75277d5ca0eb6dbd8c3a
|
[
"MIT"
] | null | null | null |
tools/com/test/test_alpha.py
|
AnthonyEdvalson/Machina
|
fefb058591dd7b62817c75277d5ca0eb6dbd8c3a
|
[
"MIT"
] | null | null | null |
from tools.com.alpha import Flow, Path
def test_flow():
p = Path("TEST", "layer", ["TASK", "SUB"])
f = Flow(content="abc123", path=p, format="text", a=1, b=7, c="aaaa")
s = str(p).encode() + b"""
text
a: 1
b: 7
c: aaaa
abc123"""
assert f.to_bytes()[5:] == s
| 17.352941
| 74
| 0.522034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.298305
|
e15d3dcf46973ac9dc62ab07128caefad49a8319
| 907
|
py
|
Python
|
compile.py
|
DrSuiunbek/pbc
|
6dd3dc4e480483a885d80cd9c01b80c4ae9fb076
|
[
"MIT"
] | null | null | null |
compile.py
|
DrSuiunbek/pbc
|
6dd3dc4e480483a885d80cd9c01b80c4ae9fb076
|
[
"MIT"
] | null | null | null |
compile.py
|
DrSuiunbek/pbc
|
6dd3dc4e480483a885d80cd9c01b80c4ae9fb076
|
[
"MIT"
] | null | null | null |
import argparse
import os
import subprocess
from subprocess import call
def compile_separate():
cmd = "docker run -v " + contractsdir + ":/data ethereum/solc:stable --bin --abi --overwrite -o /data /data/" + filename
print(cmd)
call(cmd, shell=True)
def comile_combined():
cmd = "docker run -v " + contractsdir + ":/data ethereum/solc:stable --optimize --combined-json abi,bin,interface /data/" + filename
print(cmd)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
s = process.stdout.readline().rstrip()
print(s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="the name of the solidity file")
args = parser.parse_args()
filename = args.filename
cwd = os.getcwd()
datadir = os.path.join(cwd, "data")
contractsdir = os.path.join(cwd, "contracts")
compile_separate()
| 29.258065
| 136
| 0.680265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.275634
|
e15d5e4f994eb2a0d1d81f21279363ad0216ad9f
| 1,283
|
py
|
Python
|
app/machine_learning/utils.py
|
jonzxz/project-piscator
|
588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef
|
[
"MIT"
] | null | null | null |
app/machine_learning/utils.py
|
jonzxz/project-piscator
|
588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef
|
[
"MIT"
] | null | null | null |
app/machine_learning/utils.py
|
jonzxz/project-piscator
|
588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef
|
[
"MIT"
] | 1
|
2021-02-18T03:08:21.000Z
|
2021-02-18T03:08:21.000Z
|
import re
import joblib
from sklearn.ensemble import RandomForestClassifier
from typing import List, Tuple
# Cleans up a messed up HTML / tabbed raw content into space delimited content
def clean_up_raw_body(raw_text: str) -> str:
return ' '.join([line.strip() for line in raw_text.strip().splitlines() \
if line.strip()])
# Flattens a list of tuples for (Sender, SenderDomain) into [Sender, SenderDomain]
# By right there SHOULD only be a single pair but kept in list just in case!
# Even indexes are Sender and odd indexs are SenderDomains
def flatten_from_tuples(list_tupl: List[Tuple]) -> List:
return [item for tup in list_tupl for item in tup]
# Retrieves a list of [Sender, SenderDomain] and returns domain names only
# eg. ['Person', 'Person@Company.com']
# Returns [Company.com]
# By right there should only be one entry but kept in list just in case
# set list to remove duplicates
def identify_domains(list_of_sender_domain_pairs: List):
if isinstance(list_of_sender_domain_pairs, list):
return list(set([item.split(sep='@')[1] for item \
in list_of_sender_domain_pairs if '@' in item]))
return list_of_sender_domain_pairs.split(sep='@')[-1]
def load_model(MODEL_NAME) -> RandomForestClassifier:
return joblib.load(MODEL_NAME)
| 42.766667
| 82
| 0.747467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 543
| 0.423227
|
e15e1ca2f5eb878425d154d27fca023b5942afb5
| 1,637
|
py
|
Python
|
sendgrid_email.py
|
ssiddhantsharma/open-source-library-data-collector
|
9def970707a3995239ef75958a9b03736da4a73e
|
[
"MIT"
] | null | null | null |
sendgrid_email.py
|
ssiddhantsharma/open-source-library-data-collector
|
9def970707a3995239ef75958a9b03736da4a73e
|
[
"MIT"
] | null | null | null |
sendgrid_email.py
|
ssiddhantsharma/open-source-library-data-collector
|
9def970707a3995239ef75958a9b03736da4a73e
|
[
"MIT"
] | null | null | null |
import os
import sendgrid
from sendgrid.helpers.mail import Content, Email, Mail
from bs4 import BeautifulSoup
class SendGrid(object):
"""Send an email through SendGrid"""
def __init__(self):
# Check if we are not in heroku
sendgrid_api_key = os.environ.get('SENDGRID_APY_KEY') if os.environ.get('ENV') != 'prod' else os.environ['SENDGRID_API_KEY']
self.sendgrid = sendgrid.SendGridAPIClient(apikey=sendgrid_api_key)
def send_email(self, to_email, from_email, subject, body):
"""Send the email
:param to_email: who the email is going to
(e.g. 'First Last <email@example.com>')
:param from_email: who the email is coming from
(e.g. 'First Last <email@example.com>')
:param subject: the email subject line
:param body: the email body in HTML format
:type to_email: string
:type from_email: string
:type subject: string
:type body: string
:returns: HTML status code and JSON message from SendGrid's API
:rtype: Integer, JSON
"""
from_email = Email(from_email)
subject = subject
to_email = Email(to_email)
soup = BeautifulSoup(body, "html.parser")
content = Content("text/plain", soup.get_text())
mail = Mail(from_email, subject, to_email, content)
response = self.sendgrid.client.mail.send.post(request_body=mail.get())
return response.status_code, response.body
| 41.974359
| 132
| 0.592547
| 1,523
| 0.93036
| 0
| 0
| 0
| 0
| 0
| 0
| 854
| 0.521686
|
e15e7f996a734bfc0abd1bd0f37b1c7a308de458
| 2,141
|
py
|
Python
|
ckilpailija.py
|
bittikettu/JTimer
|
d0b4a6173e84c89286874865427741bd595cf955
|
[
"MIT"
] | null | null | null |
ckilpailija.py
|
bittikettu/JTimer
|
d0b4a6173e84c89286874865427741bd595cf955
|
[
"MIT"
] | null | null | null |
ckilpailija.py
|
bittikettu/JTimer
|
d0b4a6173e84c89286874865427741bd595cf955
|
[
"MIT"
] | null | null | null |
import json
class kilpailija:
def __init__(self,etunimi,sukunimi,puhelinnumero,seura,kilpasarja,bibnumber):
self.etunimi = etunimi
self.sukunimi = sukunimi
self.puhelinnumero = puhelinnumero
self.seura = seura
self.kilpasarja = kilpasarja
self.bibnumber = bibnumber
self.ajat = []
self.valiajat = []
self.valiaikamaara = 0
self.maaliintuloaika = 0
self.sijoitus = 9999
self.dnf = False
self.dns = False
self.lasttime = 0
self.totaltime = 9999999999
def __str__(self):
if(self.sijoitus == 9999):
return ('%s, %s %s, %s, %s' % (self.bibnumber, self.etunimi, self.sukunimi, self.seura, self.kilpasarja))
else:
return ('%s, %s %s, %s, %s, %d' % (self.bibnumber, self.etunimi, self.sukunimi, self.seura, self.kilpasarja,self.sijoitus))
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True,ensure_ascii=True)#return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def kirjaaAika(self,aika):
self.ajat.append(aika)
self.lasttime = aika
self.valiaikamaara = self.valiaikamaara + 1
#print(int((aika-int(aika))*100))
if len(self.valiajat) == 0:
self.valiajat.append(aika)
self.totaltime = self.valiajat[0]
else:
#print(len(self.ajat))
self.valiajat.append(aika - self.ajat[len(self.ajat)-2])
self.totaltime = self.totaltime + self.valiajat[len(self.valiajat)-1]
#print(self.ajat)
#print(self.valiajat)
#print(self.totaltime)
def Sijoitus(self,sijoitus):
self.sijoitus = sijoitus
def DNF(self):
if self.dnf == False:
self.dnf = True
else:
self.dnf = False
def DNS(self):
if self.dns == False:
self.dns = True
else:
self.dns = False
def GetTimeAmount(self):
return len(self.ajat)
| 33.984127
| 176
| 0.566558
| 2,126
| 0.992994
| 0
| 0
| 0
| 0
| 0
| 0
| 243
| 0.113498
|
e15f0f7648e65dbaf7aa7dffa649f9d29bce17dd
| 1,436
|
py
|
Python
|
hessen/frankfurt.py
|
risklayer/corona-landkreis-crawler
|
2e82448ff614240365de9493eafa0e6a620ac615
|
[
"Unlicense"
] | 12
|
2022-02-23T11:06:06.000Z
|
2022-03-04T17:21:44.000Z
|
hessen/frankfurt.py
|
risklayer/corona-landkreis-crawler
|
2e82448ff614240365de9493eafa0e6a620ac615
|
[
"Unlicense"
] | null | null | null |
hessen/frankfurt.py
|
risklayer/corona-landkreis-crawler
|
2e82448ff614240365de9493eafa0e6a620ac615
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
## Tommy
from botbase import *
_frankfurt_st = re.compile(r"Stand:\s*(\d\d?\. *\w+ 20\d\d, \d\d?(?::\d\d)?) Uhr")
def frankfurt(sheets):
import locale
locale.setlocale(locale.LC_TIME, "de_DE.UTF-8")
soup = get_soup("https://frankfurt.de/service-und-rathaus/verwaltung/aemter-und-institutionen/gesundheitsamt/informationen-zum-neuartigen-coronavirus-sars-cov-2/aktuelle-lage")
header = next(x for x in soup.find_all("h4") if "Aktuelle Infektionszahlen in Frankfurt" in x.get_text())
rows = [[x.text.strip() for x in row.findAll("td")] for row in header.findNext("table").findAll("tr")]
date_text = rows[0][0]
#print(date_text)
date = _frankfurt_st.search(date_text)
date = date.group(1) + (":00" if not ":" in date.group(1) else "")
#print(date)
#if not today().strftime("%d. %B %Y") in date_text: raise NotYetAvailableException("Frankfurt noch alt: " + date_text[:-93])
date = check_date(date, "Frankfurt", datetime.timedelta(hours=8))
assert "Gesamtzahl der COVID-19-Fälle in Frankfurt" in rows[1][0]
assert "Todesfälle" in rows[2][0]
assert "Genesene" in rows[3][0]
c = force_int(rows[1][1])
d = force_int(rows[2][1])
g = force_int(rows[3][1])
update(sheets, 6412, c=c, d=d, g=g, sig="Bot", ignore_delta=True)
return True
schedule.append(Task(8, 5, 12, 5, 360, frankfurt, 6412))
if __name__ == '__main__': frankfurt(googlesheets())
| 47.866667
| 180
| 0.66922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 569
| 0.395413
|
e15f87d69b9f385338407a9fb5c01c89ecaa7425
| 2,065
|
py
|
Python
|
lib/utils/timeout.py
|
kustodian/aerospike-admin
|
931ee55ccd65ba3e20e6611a0294c92b09e8cfcb
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/timeout.py
|
kustodian/aerospike-admin
|
931ee55ccd65ba3e20e6611a0294c92b09e8cfcb
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/timeout.py
|
kustodian/aerospike-admin
|
931ee55ccd65ba3e20e6611a0294c92b09e8cfcb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2018 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import commands
DEFAULT_TIMEOUT = 5.0
class TimeoutException(Exception):
"""A timeout has occurred."""
pass
class call_with_timeout:
def __init__(self, function, timeout=DEFAULT_TIMEOUT):
self.timeout = timeout
self.function = function
def handler(self, signum, frame):
raise TimeoutException()
def __call__(self, *args):
# get the old SIGALRM handler
old = signal.signal(signal.SIGALRM, self.handler)
# set the alarm
signal.setitimer(signal.ITIMER_REAL, self.timeout)
try:
result = self.function(*args)
finally:
# restore existing SIGALRM handler
signal.signal(signal.SIGALRM, old)
signal.setitimer(signal.ITIMER_REAL, 0)
return result
def timeout(timeout):
"""This decorator takes a timeout parameter in seconds."""
def wrap_function(function):
return call_with_timeout(function, timeout)
return wrap_function
def default_timeout(function):
"""This simple decorator 'timesout' after DEFAULT_TIMEOUT seconds."""
return call_with_timeout(function)
def getstatusoutput(command, timeout=DEFAULT_TIMEOUT):
"""This is a timeout wrapper aroung getstatusoutput."""
_gso = call_with_timeout(commands.getstatusoutput, timeout)
try:
return _gso(command)
except TimeoutException:
return (-1, "The command '%s' timed-out after %i seconds." % (command, timeout))
| 29.927536
| 88
| 0.701695
| 745
| 0.360775
| 0
| 0
| 0
| 0
| 0
| 0
| 902
| 0.436804
|
e15f893232695e92454619ed0274fe5e5ba282b5
| 101
|
py
|
Python
|
src/myapp/admin.py
|
anmquangw/viu-upload-file
|
bfbff413cc92e454226fced5fe504b7cebc6c102
|
[
"MIT"
] | null | null | null |
src/myapp/admin.py
|
anmquangw/viu-upload-file
|
bfbff413cc92e454226fced5fe504b7cebc6c102
|
[
"MIT"
] | 2
|
2020-06-21T01:47:59.000Z
|
2020-06-27T12:39:24.000Z
|
src/myapp/admin.py
|
sonnhfit/DocShare
|
50d9b8c333144780385f970197519ddda61bd502
|
[
"MIT"
] | null | null | null |
"""from django.contrib import admin
from .models import DemoModel
admin.site.register(DemoModel)"""
| 20.2
| 35
| 0.782178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.990099
|
e15fdd67f6de5e7d590eb91ddbbf06d3e3d45dea
| 2,015
|
py
|
Python
|
straightlanespipeline.py
|
semeniuta/CarND-AdvancedLaneLines
|
57fbdc6cc9596f299b4517514d487573f7c373b4
|
[
"MIT"
] | null | null | null |
straightlanespipeline.py
|
semeniuta/CarND-AdvancedLaneLines
|
57fbdc6cc9596f299b4517514d487573f7c373b4
|
[
"MIT"
] | null | null | null |
straightlanespipeline.py
|
semeniuta/CarND-AdvancedLaneLines
|
57fbdc6cc9596f299b4517514d487573f7c373b4
|
[
"MIT"
] | null | null | null |
import lanelines
from compgraph import CompGraph, CompGraphRunner
import numpy as np
import cv2
func_dict = {
'grayscale': lanelines.grayscale,
'get_image_shape': lambda im : im.shape,
'canny': lanelines.canny,
'define_lanes_region': lanelines.define_lanes_region,
'apply_region_mask': lanelines.apply_region_mask,
'gaussian_blur': lanelines.gaussian_blur,
'hough_lines': lanelines.find_hough_lines,
'compute_line_tangents': lanelines.compute_line_tangents,
'extend_lines': lanelines.extend_lane_lines_grouped_by_slopes,
'average_endpoints_left': lanelines.average_lines_endpoints,
'average_endpoints_right': lanelines.average_lines_endpoints
}
func_io = {
'grayscale': ('image', 'image_gray'),
'get_image_shape': ('image_gray', ('n_rows', 'n_cols')),
'define_lanes_region': (
('n_rows', 'n_cols', 'x_from', 'x_to', 'y_lim', 'left_offset', 'right_offset'),
'region_vertices'
),
'gaussian_blur': (('image_gray', 'blur_kernel'), 'blurred_image'),
'canny': (('blurred_image', 'canny_lo', 'canny_hi'), 'image_canny'),
'apply_region_mask': (('image_canny', 'region_vertices'), 'masked_image'),
'hough_lines': (('masked_image', 'rho', 'theta', 'hough_threshold', 'min_line_length', 'max_line_gap'), 'lines'),
'compute_line_tangents': ('lines', 'tangents'),
'extend_lines': (('lines', 'tangents', 'y_lim', 'n_rows', 'abs_slope_threshold'), ('extended_lines_left', 'extended_lines_right')),
'average_endpoints_left': ('extended_lines_left', 'avg_line_left'),
'average_endpoints_right': ('extended_lines_right', 'avg_line_right')
}
computational_graph = CompGraph(func_dict, func_io)
parameters = {
'x_from': 560,
'x_to': 710,
'y_lim': 450,
'left_offset': 50,
'right_offset': 0,
'blur_kernel': 11,
'canny_lo': 70,
'canny_hi': 200,
'rho': 1,
'theta': np.pi/180,
'hough_threshold': 20,
'min_line_length': 7,
'max_line_gap': 1,
'abs_slope_threshold': 0.2
}
| 35.350877
| 135
| 0.68536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,068
| 0.530025
|
e1601ec501793267dab5b7a344de5c414ede0c73
| 2,904
|
py
|
Python
|
PA1/ArrayListTests/main_create_tests.py
|
tordisuna/SC-T-201-GSKI
|
1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2
|
[
"MIT"
] | null | null | null |
PA1/ArrayListTests/main_create_tests.py
|
tordisuna/SC-T-201-GSKI
|
1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2
|
[
"MIT"
] | null | null | null |
PA1/ArrayListTests/main_create_tests.py
|
tordisuna/SC-T-201-GSKI
|
1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2
|
[
"MIT"
] | 1
|
2021-02-12T11:36:53.000Z
|
2021-02-12T11:36:53.000Z
|
import random
from random import Random
def write_test_line(f, r, c, clear, o = 0):
f.write("\n")
if(o == 0):
if clear == 0:
o = r.randint(1, 8)
elif clear == 1:
o = 9
else:
o = r.randint(1, 9)
if o == 1:
f.write("prepend")
f.write(" ")
f.write(str(r.randint(10, 99)))
c += 1
elif o == 2:
f.write("insert")
f.write(" ")
f.write(str(r.randint(10, 99)))
f.write(" ")
f.write(str(r.randint(0, c)))
c += 1
elif o == 3:
f.write("append")
f.write(" ")
f.write(str(r.randint(10, 99)))
c += 1
elif o == 4:
f.write("set_at")
f.write(" ")
f.write(str(r.randint(10, 99)))
f.write(" ")
f.write(str(r.randint(0, c)))
elif o == 5:
f.write("get_first")
elif o == 6:
f.write("get_at")
f.write(" ")
f.write(str(r.randint(0, c)))
elif o == 7:
f.write("get_last")
elif o == 8:
f.write("remove_at")
f.write(" ")
f.write(str(r.randint(0, c)))
c -= 1
elif o == 9:
if r.randint(1, clear) == 1:
f.write("clear")
c = 2
return c
def write_insert_ordered_line(f, r, c):
f.write("\n")
f.write("insert_ordered")
f.write(" ")
f.write(str(r.randint(10, 30)))
c += 1
return c
def write_sort_line(f, r, c):
f.write("\n")
f.write("sort")
return c
def write_find_line(f, r, c):
f.write("\n")
f.write("find")
f.write(" ")
f.write(str(r.randint(10, 30)))
return c
def write_remove_value_line(f, r, c):
f.write("\n")
f.write("remove_value")
f.write(" ")
f.write(str(r.randint(10, 30)))
c -= 1
return c
r = Random()
f = open("extra_tests.txt", "w+")
f.write("new int")
c = 2
for _ in range(64):
c = write_test_line(f, r, c, 0)
c = write_test_line(f, r, c, 1)
for _ in range(128):
c = write_test_line(f, r, c, 0)
c = write_test_line(f, r, c, 1)
for _ in range(512):
c = write_test_line(f, r, c, 5)
for _ in range(20):
c = write_insert_ordered_line(f, r, c)
c = write_test_line(f, r, c, 1)
for _ in range(20):
c = write_test_line(f, r, c, 2, 2)
c = write_insert_ordered_line(f, r, c)
for _ in range(32):
c = write_test_line(f, r, c, 2, 1)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
c = write_test_line(f, r, c, 1)
for _ in range(32):
c = write_insert_ordered_line(f, r, c)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
for _ in range(32):
c = write_test_line(f, r, c, 2, 2)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
f.close()
| 19.755102
| 43
| 0.512741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.070592
|
e160ba46a79b2de84010dbbe846ade7d792604fe
| 4,305
|
py
|
Python
|
stock deep learning/7-2.LSTM(stock).py
|
nosy0411/Deep-learning-project
|
b0864579ec1fef4c6224397e3c39e4fce051c93a
|
[
"MIT"
] | null | null | null |
stock deep learning/7-2.LSTM(stock).py
|
nosy0411/Deep-learning-project
|
b0864579ec1fef4c6224397e3c39e4fce051c93a
|
[
"MIT"
] | null | null | null |
stock deep learning/7-2.LSTM(stock).py
|
nosy0411/Deep-learning-project
|
b0864579ec1fef4c6224397e3c39e4fce051c93a
|
[
"MIT"
] | null | null | null |
# LSTM(GRU) 예시 : KODEX200 주가 (2010 ~ 현재)를 예측해 본다.
# KODEX200의 종가와, 10일, 40일 이동평균을 이용하여 향후 10일 동안의 종가를 예측해 본다.
# 과거 20일 (step = 20) 종가, 이동평균 패턴을 학습하여 예측한다.
# 일일 주가에 대해 예측이 가능할까 ??
#
# 2018.11.22, 아마추어퀀트 (조성현)
# --------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from MyUtil import YahooData
nInput = 3
nOutput = 3
nStep = 20
nNeuron = 50
# 2차원 배열의 시계열 데이터로 학습용 배치 파일을 만든다.
# return : xBatch - RNN 입력
# yBatch - RNN 출력
#
# step = 2, n = 3 이라면,
# xData = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], ...]
# xBatch = [[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]], ...]
# yBatch = [[[4,5,6], [7,8,9]], [[10,11,12], [13,14,15]], ...]
def createTrainData(xData, step, n=nInput):
m = np.arange(len(xData) - step)
np.random.shuffle(m)
x, y = [], []
for i in m:
a = xData[i:(i+step)]
x.append(a)
xBatch = np.reshape(np.array(x), (len(m), step, n))
for i in m+1:
a = xData[i:(i+step)]
y.append(a)
yBatch = np.reshape(np.array(y), (len(m), step, n))
return xBatch, yBatch
# 주가 데이터
#df = YahooData.getStockDataYahoo('^KS11', start='2007-01-01')
df = pd.read_csv('StockData/^KS11.csv', index_col=0, parse_dates=True)
df = pd.DataFrame(df['Close'])
df['ma_10'] = pd.DataFrame(df['Close']).rolling(window=10).mean()
df['ma_40'] = pd.DataFrame(df['Close']).rolling(window=40).mean()
df = df.dropna()
df = (df - df.mean()) / df.std()
# 학습 데이터를 생성한다.
data = np.array(df)
xBatch, yBatch = createTrainData(data, nStep)
# RNN 그래프를 생성한다 (Wx, Wh). xBatch를 RNN에 입력한다.
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, nStep, nInput])
rnn = tf.nn.rnn_cell.LSTMCell(nNeuron)
#rnn = tf.nn.rnn_cell.GRUCell(nNeuron)
output, state = tf.nn.dynamic_rnn(rnn, x, dtype=tf.float32)
# RNN의 출력값을 입력으로 받아 3개의 y가 출력되도록 하는 feed-forward network를 생성한다. (Wy)
y = tf.placeholder(tf.float32, [None, nStep, nOutput])
inFC = tf.reshape(output, [-1, nNeuron])
fc1 = tf.contrib.layers.fully_connected(inputs=inFC, num_outputs=nNeuron)
predY = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=nOutput, activation_fn=None)
predY = tf.reshape(predY, [-1, nStep, nOutput])
# Mean square error (MSE)로 Loss를 정의한다. xBatch가 입력되면 yBatch가 출력되도록 함.
loss = tf.reduce_sum(tf.square(predY - y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
minLoss = optimizer.minimize(loss)
# 그래프를 실행한다. 학습한다. (Wx, Wh, Wy를 업데이트함)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
lossHist = []
for i in range(300):
sess.run(minLoss, feed_dict={x: xBatch, y: yBatch})
if i % 5 == 0:
ploss = sess.run(loss, feed_dict={x: xBatch, y: yBatch})
lossHist.append(ploss)
print(i, "\tLoss:", ploss)
# 향후 10 기간 데이터를 예측한다. 향후 1 기간을 예측하고, 예측값을 다시 입력하여 2 기간을 예측한다.
# 이런 방식으로 10 기간까지 예측한다.
nFuture = 10
if len(data) > 100:
lastData = np.copy(data[-100:]) # 원 데이터의 마지막 100개만 그려본다
else:
lastData = np.copy(data)
dx = np.copy(lastData)
estimate = [dx[-1]]
for i in range(nFuture):
# 마지막 nStep 만큼 입력데이로 다음 값을 예측한다
px = dx[-nStep:,]
px = np.reshape(px, (1, nStep, nInput))
# 다음 값을 예측한다.
yHat = sess.run(predY, feed_dict={x: px})[0][-1]
# 예측값을 저장해 둔다
estimate.append(yHat)
# 이전 예측값을 포함하여 또 다음 값을 예측하기위해 예측한 값을 저장해 둔다
dx = np.vstack([dx, yHat])
# Loss history를 그린다
plt.figure(figsize=(8, 3))
plt.plot(lossHist, color='red')
plt.title("Loss History")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
# 주가 차트와 이동평균을 그린다.
plt.figure(figsize=(8, 3))
plt.plot(df['Close'], color='red')
plt.plot(df['ma_10'], color='blue')
plt.plot(df['ma_40'], color='green')
plt.title("KODEX-200 stock price")
plt.show()
# 원 시계열과 예측된 시계열을 그린다
CLOSE = 0 # 종가를 예측한다
estimate = np.array(estimate)
ax1 = np.arange(1, len(lastData[:, CLOSE]) + 1)
ax2 = np.arange(len(lastData), len(lastData) + len(estimate))
plt.figure(figsize=(8, 3))
plt.plot(ax1, lastData[:, CLOSE], 'b-o', color='blue', markersize=4, label='Stock price', linewidth=1)
plt.plot(ax2, estimate[:, CLOSE], 'b-o', color='red', markersize=4, label='Estimate')
plt.axvline(x=ax1[-1], linestyle='dashed', linewidth=1)
plt.legend()
plt.title("KODEX-200 prediction")
plt.show()
| 30.75
| 102
| 0.625784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,164
| 0.427922
|
e1627f07a28ba766216726e8582242703156a953
| 1,486
|
py
|
Python
|
media.py
|
wduncanfraser/movie_trailer_website
|
deaa134862772df09cf8af79a6990d140848fb80
|
[
"MIT"
] | null | null | null |
media.py
|
wduncanfraser/movie_trailer_website
|
deaa134862772df09cf8af79a6990d140848fb80
|
[
"MIT"
] | null | null | null |
media.py
|
wduncanfraser/movie_trailer_website
|
deaa134862772df09cf8af79a6990d140848fb80
|
[
"MIT"
] | null | null | null |
"""media.py: Module for movie_trailer_website, contains Movie class"""
import webbrowser
import urllib
import json
class Movie(object):
"""This class provides a way to store movie related information.
constructor takes movie title, imdb_id and a url for a youtube trailer as input.
All other values are populated via OMDB API"""
def __init__(self, title, imdb_id, trailer_youtube_url):
# Initialize instance variables for passed parameters
self.title = title
self.imdb_id = imdb_id
self.trailer_youtube_url = trailer_youtube_url
# Query OMDB API for json response of movie data
response = urllib.urlopen("http://www.omdbapi.com/?i=" + self.imdb_id + "&plot=short&r=json")
movie_json = json.loads(response.read())
# Download movie posters locally
# IMDB does not allow hotlinking of images
f = open('posters/' + self.imdb_id + '.jpg', 'wb')
f.write(urllib.urlopen(movie_json['Poster']).read())
f.close()
# Populate remaining instance variables from json response and downloaded poster
self.plot = movie_json['Plot']
self.genre = movie_json['Genre']
self.year = movie_json['Year']
self.runtime = movie_json['Runtime']
self.rating = movie_json['Rated']
self.imdb_score = movie_json['imdbRating']
self.poster_image_url = f.name
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| 37.15
| 101
| 0.672275
| 1,367
| 0.919919
| 0
| 0
| 0
| 0
| 0
| 0
| 648
| 0.43607
|
e163903fd0678839e9ef90435028e77dc1cbf097
| 103
|
py
|
Python
|
src/moredataframes/mdf_core.py
|
GlorifiedStatistics/MoreDataframes
|
147d5b8104d1cbd1cf2836220f43fb6c8ca099b7
|
[
"MIT"
] | null | null | null |
src/moredataframes/mdf_core.py
|
GlorifiedStatistics/MoreDataframes
|
147d5b8104d1cbd1cf2836220f43fb6c8ca099b7
|
[
"MIT"
] | null | null | null |
src/moredataframes/mdf_core.py
|
GlorifiedStatistics/MoreDataframes
|
147d5b8104d1cbd1cf2836220f43fb6c8ca099b7
|
[
"MIT"
] | null | null | null |
"""
A collection of useful functions for manipulating/encoding pandas dataframes for data science.
"""
| 25.75
| 94
| 0.786408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.990291
|
e163f95d62fc70e17e021921220d7ea02e910aa6
| 23,493
|
py
|
Python
|
superslomo/model.py
|
myungsub/meta-interpolation
|
f7afee9d1786f67e6f548c2734f91858f803c5dc
|
[
"MIT"
] | 74
|
2020-04-03T06:26:39.000Z
|
2022-03-25T16:51:28.000Z
|
superslomo/model.py
|
baiksung/meta-interpolation
|
72dd3b2e56054bb411ed20301583a0e67d9ea293
|
[
"MIT"
] | 6
|
2020-07-09T20:09:23.000Z
|
2021-09-20T11:12:24.000Z
|
superslomo/model.py
|
baiksung/meta-interpolation
|
72dd3b2e56054bb411ed20301583a0e67d9ea293
|
[
"MIT"
] | 19
|
2020-04-16T09:18:38.000Z
|
2021-12-28T08:25:12.000Z
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model_utils import *
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
# Initialize convolutional layers.
# self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
# self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=outChannels, kernel_size=filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv2 = MetaConv2dLayer(in_channels=outChannels, out_channels=outChannels, kernel_size=filterSize, stride=1, padding=int((filterSize - 1) / 2))
def forward(self, x, params=None):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Average pooling with kernel size 2 (2 x 2).
x = F.avg_pool2d(x, 2)
# (Convolution + Leaky ReLU) x 2
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
x = F.leaky_relu(self.conv2(x, params=param_dict['conv2']), negative_slope = 0.1)
else:
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
x = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
# Initialize convolutional layers.
# self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
# (2 * outChannels) is used for accommodating skip connection.
# self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = MetaConv2dLayer(in_channels=2 * outChannels, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
def forward(self, x, skpCn, params=None):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Bilinear interpolation with scaling 2.
x = F.interpolate(x, scale_factor=2, mode='bilinear')
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1), params=param_dict['conv2']), negative_slope = 0.1)
else:
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope = 0.1)
return x
class UNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, W, H, device):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
self.W = W
self.H = H
self.gridX = torch.tensor(gridX, requires_grad=False, device=device)
self.gridY = torch.tensor(gridY, requires_grad=False, device=device)
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).float() + u
y = self.gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid)
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
t = np.linspace(0.125, 0.875, 7)
def getFlowCoeff (indices, device):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C11 = C00 = - (1 - (t[ind])) * (t[ind])
C01 = (t[ind]) * (t[ind])
C10 = (1 - (t[ind])) * (1 - (t[ind]))
return torch.Tensor(C00)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C01)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C10)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C11)[None, None, None, :].permute(3, 0, 1, 2).to(device)
def getWarpCoeff (indices, device):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C0 = 1 - t[ind]
C1 = t[ind]
return torch.Tensor(C0)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C1)[None, None, None, :].permute(3, 0, 1, 2).to(device)
class SuperSloMoModel(nn.Module):
def __init__(self, device):
super(SuperSloMoModel, self).__init__()
self.device = device
self.flowComp = UNet(6, 4)
self.arbTimeFlowIntrp = UNet(20, 5)
self.backwarp = None
def forward(self, I0, I1, ind):
w, h = I0.size(3), I0.size(2)
s = 6 # bits to shift
padW, padH = 0, 0
if w != ((w >> s) << s):
padW = (((w >> s) + 1) << s) - w
if h != ((h >> s) << s):
padH = (((h >> s) + 1) << s) - h
paddingInput = nn.ReflectionPad2d(padding=[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2])
paddingOutput = nn.ReflectionPad2d(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH])
I0 = paddingInput(I0)
I1 = paddingInput(I1)
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = self.backwarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.backwarp(I1, F_t_1_f)
wCoeff = getWarpCoeff(ind, self.device)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
warped_I0, warped_I1 = self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)
Ft_p = paddingOutput(Ft_p)
F_0_1, F_1_0 = paddingOutput(F_0_1), paddingOutput(F_1_0)
g_I0_F_t_0, g_I1_F_t_1 = paddingOutput(g_I0_F_t_0), paddingOutput(g_I1_F_t_1)
warped_I0, warped_I1 = paddingOutput(warped_I0), paddingOutput(warped_I1)
#return Ft_p, # output image
# (F_0_1, F_1_0), # bidirectional flow maps
# (g_I0_F_t_0, g_I1_F_t_1), # warped intermediate images
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)) # warped input image (0-1, 1-0)
return Ft_p, \
(F_0_1, F_1_0), \
(g_I0_F_t_0, g_I1_F_t_1), \
(warped_I0, warped_I1)
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1))
class MetaUNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(MetaUNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=32, kernel_size=7, stride=1, padding=3)
self.conv2 = MetaConv2dLayer(in_channels=32, out_channels=32, kernel_size=7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = MetaConv2dLayer(in_channels=32, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
def forward(self, x, params=None):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x, params=param_dict['conv2']), negative_slope = 0.1)
s2 = self.down1(s1, params=param_dict['down1'])
s3 = self.down2(s2, params=param_dict['down2'])
s4 = self.down3(s3, params=param_dict['down3'])
s5 = self.down4(s4, params=param_dict['down4'])
x = self.down5(s5, params=param_dict['down5'])
x = self.up1(x, s5, params=param_dict['up1'])
x = self.up2(x, s4, params=param_dict['up2'])
x = self.up3(x, s3, params=param_dict['up3'])
x = self.up4(x, s2, params=param_dict['up4'])
x = self.up5(x, s1, params=param_dict['up5'])
x = F.leaky_relu(self.conv3(x, params=param_dict['conv3']), negative_slope = 0.1)
else:
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class MetaSuperSloMo(nn.Module):
def __init__(self, device, resume=False):
super(MetaSuperSloMo, self).__init__()
self.device = device
self.flowComp = MetaUNet(6, 4)
self.arbTimeFlowIntrp = MetaUNet(20, 5)
self.backwarp = None
if resume:
print('Loading model: pretrained_models/superslomo_base.pth')
# checkpoint = torch.load('pretrained_models/meta_superslomo.pth')
checkpoint = torch.load('pretrained_models/superslomo_base.pth')
self.flowComp.load_state_dict(checkpoint['state_dictFC'])
self.arbTimeFlowIntrp.load_state_dict(checkpoint['state_dictAT'])
def forward(self, I0, I1, ind=3, params=None, **kwargs):
ind = ind * torch.ones(I0.size(0), dtype=int)
w, h = I0.size(3), I0.size(2)
s = 6 # bits to shift
padW, padH = 0, 0
if w != ((w >> s) << s):
padW = (((w >> s) + 1) << s) - w
if h != ((h >> s) << s):
padH = (((h >> s) + 1) << s) - h
paddingInput = nn.ReflectionPad2d(padding=[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2])
paddingOutput = nn.ReflectionPad2d(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH])
I0 = paddingInput(I0)
I1 = paddingInput(I1)
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
flowOut = self.flowComp(torch.cat((I0, I1), dim=1), params=param_dict['flowComp'])
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1),
params=param_dict['arbTimeFlowIntrp'])
else:
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = self.backwarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.backwarp(I1, F_t_1_f)
wCoeff = getWarpCoeff(ind, self.device)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
warped_I0, warped_I1 = self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)
Ft_p = paddingOutput(Ft_p)
F_0_1, F_1_0 = paddingOutput(F_0_1), paddingOutput(F_1_0)
g_I0_F_t_0, g_I1_F_t_1 = paddingOutput(g_I0_F_t_0), paddingOutput(g_I1_F_t_1)
warped_I0, warped_I1 = paddingOutput(warped_I0), paddingOutput(warped_I1)
#return Ft_p, # output image
# (F_0_1, F_1_0), # bidirectional flow maps
# (g_I0_F_t_0, g_I1_F_t_1), # warped intermediate images
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)) # warped input image (0-1, 1-0)
return Ft_p, {
'bidirectional_flow': (F_0_1, F_1_0),
'warped_intermediate_frames': (g_I0_F_t_0, g_I1_F_t_1),
'warped_input_frames': (warped_I0, warped_I1)}
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1))
# return Ft_p
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
pass # no batch statistics used
| 35.011923
| 293
| 0.548717
| 20,938
| 0.891244
| 0
| 0
| 0
| 0
| 0
| 0
| 8,753
| 0.372579
|
e164e5d3815dee52bdeb5d8a12184fbf1db5055b
| 11,970
|
py
|
Python
|
adaptfx/discrete_programs/updater_discrete.py
|
rmnldwg/Adaptive-fractionation
|
5525cbd635e9afdbf5556b2a95dd31bdb222db66
|
[
"MIT"
] | 1
|
2021-07-15T12:23:25.000Z
|
2021-07-15T12:23:25.000Z
|
adaptfx/discrete_programs/updater_discrete.py
|
rmnldwg/Adaptive-fractionation
|
5525cbd635e9afdbf5556b2a95dd31bdb222db66
|
[
"MIT"
] | 1
|
2021-11-29T18:50:05.000Z
|
2021-12-10T15:32:50.000Z
|
adaptfx/discrete_programs/updater_discrete.py
|
rmnldwg/Adaptive-fractionation
|
5525cbd635e9afdbf5556b2a95dd31bdb222db66
|
[
"MIT"
] | 1
|
2021-11-29T10:52:42.000Z
|
2021-11-29T10:52:42.000Z
|
# -*- coding: utf-8 -*-
"""
In this file are all the needed functions to calculate an adaptive fractionation treatment plan. The value_eval and the result_calc function are the only ones that should be used
This file requires all sparing factors to be known, therefore, it isnt suited to do active treatment planning but to analyze patient data.
value_eval and result_calc_BEDNT are the most essential codes. The results from value_eval can be used to calculate a treatment plan with result_calc_BEDNT.
The optimal policies for each fraction can be extracted manually(pol4 = first fraction, first index in pol is the last fraction and the last index is the first fraction). but one must know what index represents which sparing factor
Note: This file does not assume all sparing factors to be known at the start, but simulates the treatment planning as if we would get a new sparing factor at each fraction!
This program uses a discrete state space and does not interpolate between states. Therefore, it is less precise than the interpolation programs
"""
import numpy as np
from scipy.stats import truncnorm
import time
from scipy.stats import invgamma
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
'''produces a truncated normal distribution'''
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def std_calc(measured_data,alpha,beta):
'''calculates the most likely standard deviation for a list of k sparing factors and an inverse-gamma conjugate prior
measured_data: list/array with k sparing factors
alpha: shape of inverse-gamma distribution
beta: scale of inverse-gamme distrinbution
return: most likely std based on the measured data and inverse-gamma prior'''
n = len(measured_data)
var_values = np.arange(0.00001,0.25,0.00001)
likelihood_values = np.zeros(len(var_values))
for index,value in enumerate(var_values):
likelihood_values[index] = value**(-alpha-1)/value**(n/2)*np.exp(-beta/value)*np.exp(-np.var(measured_data)*n/(2*value))
std = (np.sqrt(var_values[np.argmax(likelihood_values)]))
return std
def distribution_update(sparing_factors, alpha, beta):
'''produces the updated probability distribution for each fraction based on Variance prior
sparing_factors: list/array of k spraring factors
alpha: shape of inverse-gamma distribution
beta: scale of inverse-gamme distrinbution
return: k-1 dimensional mean and std arrays starting from the second sparing factor (index 1)
'''
means = np.zeros(len(sparing_factors))
stds = np.zeros(len(sparing_factors))
for i in range(len(sparing_factors)):
means[i] = np.mean(sparing_factors[:(i+1)])
stds[i] = std_calc(sparing_factors[:(i+1)],alpha,beta)
means = np.delete(means,0)
stds = np.delete(stds,0) #we get rid of the first value as it is only the planning value and not used in a fraction
return [means,stds]
def updated_distribution_calc(data,sparing_factors):
'''calculates the updated distribution based on prior data that is used to setup an inverse gamma distribution
data shape: nxk where n is the amount of patients and k the amount of sparingfactors per patient
sparing_factors shape: list/array with k entries with the first sparing factor being the planning sparing factor, therefore not being included in the treatment
return: updated means and stds for k-1 fractions.'''
variances = data.var(axis = 1)
alpha,loc,beta = invgamma.fit(variances, floc = 0) #here beta is the scale parameter
[means,stds] = distribution_update(sparing_factors,alpha,beta)
return[means,stds]
def probdistributions(means,stds):
'''produces the truncated normal distribution for several means and standard deviations
means: list/array of n means
stds: list/array of n standard deviations
return: n probability distributions for values [0.01,1.40]'''
distributions = np.zeros(141*len(means)).reshape(len(means),141)
for i in range(len(means)):
X = get_truncated_normal(means[i], stds[i], low=0, upp=1.4)
for index,value in enumerate(np.arange(0,1.41,0.01)):
distributions[i][index] = X.cdf(value+0.004999999999999999999)-X.cdf(value-0.005)
return distributions
def BED_calc0( dose, ab,sparing = 1):
BED = sparing*dose*(1+(sparing*dose)/ab)
return BED
def BED_calc( sf, ab,actionspace):
BED = np.outer(sf,actionspace)*(1+np.outer(sf,actionspace)/ab) #produces a sparing factors x actions space array
return BED
def value_eval(sparing_factors,data,abt = 10,abn = 3,bound = 90,riskfactor = 0):
'''calculates the best policy for a list of k sparing factors with k-1 fractions based on a dynamic programming algorithm. Estimation of the probability distribution is based on prior patient data
sparing_factors: list/array of k sparing factors. A planning sparing factor is necessary!
data: nxk dimensional data of n prior patients with k sparing factors.
abt: alpha beta ratio of tumor
abn: alpha beta ratio of Organ at risk
bound: upper limit of BED in OAR
riskfactor: "risk reducing" factor of zero is a full adaptive fractionation algorithm while a sparing factor of 0.1 slightly forces the algorithm to stay close to the 6Gy per fraction plan. a risk factor of 1 results in a 6Gy per fraction plan.
return:
Values: a sparing_factor-2 x BEDT x sf dimensional matrix with the value of each BEDT/sf state
Values4: Values of the first fraction
policy: a sparing_factor-2 x BEDT x sf dimensional matrix with the policy of each BEDT/sf state. fourth index = first fraction, first index = last fraction
policy4: policy of the first fraction'''
sf= np.arange(0,1.41,0.01) #list of all possible sparing factors
BEDT = np.arange(0,90.3,0.1) #list of all possible Biological effective doses
Values = np.zeros(len(BEDT)*len(sf)*4).reshape(4,len(BEDT),len(sf)) #2d values list with first indice being the BED and second being the sf
actionspace = np.arange(0,22.4,0.1) #list of all possible dose actions
[means,stds] =updated_distribution_calc(data,sparing_factors)
distributions = probdistributions(means,stds)
policy = np.zeros((4,len(BEDT),len(sf)))
upperbound = 90.2
start = time.time()
#here we add the calculation of the distance to the standard treatment
useless,calculator = np.meshgrid(np.zeros(len(actionspace)),sf) #calculator is matrix that has the correct sparing factors
actionspace_expand,useless = np.meshgrid(actionspace,sf)
risk_penalty = abs(6/calculator-actionspace_expand)
delivered_doses = np.round(BED_calc(sf,abn,actionspace),1)
BEDT_rew = BED_calc(1, abt,actionspace) #this is the reward for the dose deposited inside the normal tissue.
BEDT_transformed, meaningless = np.meshgrid(BEDT_rew,np.zeros(len(sf)))
risk_penalty[0] = risk_penalty[1]
for update_loop in range (0,5):
prob = distributions[update_loop]
for state in range(0,5-update_loop): #We have five fractionations with 2 special cases 0 and 4
print(str(state+1) +' loop done')
if state == 4: #first state with no prior dose delivered so we dont loop through BEDT
future_bed = delivered_doses
future_bed[future_bed > upperbound] = upperbound #any dose surpassing 95 is set to 95. Furthermore, 95 will be penalized so strong that the program avoids it at all costs. (95 is basically the upper bound and can be adapted)
future_values_prob = (Values[state-1][(future_bed*10).astype(int)]*prob).sum(axis = 2) #in this array are all future values multiplied with the probability of getting there. shape = sparing factors x actionspace
penalties = np.zeros(future_bed.shape)
penalties[future_bed > bound] = -(future_bed[future_bed > bound]-bound)*5
Vs = future_values_prob + BEDT_transformed + penalties - risk_penalty*riskfactor
policy4 = Vs.argmax(axis=1)
Values4 = Vs.max(axis=1)
else:
future_values_prob_all = (Values[state-1]*prob).sum(axis = 1)
for bed in range(len(BEDT)): #this and the next for loop allow us to loop through all states
future_bed = delivered_doses + bed/10
future_bed[future_bed > upperbound] = upperbound #any dose surpassing 95 is set to 95.
if state == 0: #last state no more further values to add
penalties = np.zeros(future_bed.shape)
penalties[future_bed > bound] = -(future_bed[future_bed > bound]-bound)*5
penalties[future_bed == upperbound] = -10000 #here we produced the penalties for all the values surpassing the limit
Vs = BEDT_transformed + penalties# Value of each sparing factor for each action
else:
penalties = np.zeros(future_bed.shape)
penalties[future_bed == upperbound] = -100
future_values_prob = (future_values_prob_all[(future_bed*10).astype(int)])#in this array are all future values multiplied with the probability of getting there. shape = sparing factors x actionspace
Vs = future_values_prob + BEDT_transformed + penalties - risk_penalty*riskfactor
best_action = Vs.argmax(axis=1)
valer = Vs.max(axis=1)
policy[state][bed] = best_action
Values[state][bed] = valer
end = time.time()
print('time elapsed = ' +str(end - start))
return [Values,policy,Values4,policy4]
def result_calc_BEDNT(pol4,pol,sparing_factors,abt = 10,abn = 3): #this function calculates the fractionation plan according to the reinforcement learning
'''in this function gives the treatment plan for a set of sparing factors based on the sparing factors that have been used to calculate the optimal policy
the pol4 and pol matrices are the ones that are returnedin the value_eval function
pol4: first fraction policy
pol: second - fifth fraction policy
sparing_factors: sparing factors that should be used to make a plan. list starting from first fraction'''
actionspace = np.arange(0,22.4,0.1) #list of all possible dose actions
total_bedt = BED_calc0(actionspace[pol4[round(sparing_factors[0]*100)]],abt)
total_bednt = BED_calc0(actionspace[pol4[round(sparing_factors[0]*100)]],abn,sparing_factors[0])
print('fraction 1 dose delivered: ',actionspace[pol4[round(sparing_factors[0]*100)]])
print('total accumulated biological effective dose in tumor; fraction 1 = ',round(total_bedt,1))
print('total accumulated biological effective dose in normal tissue; fraction 1 = ',round(total_bednt,1))
for index,fraction in enumerate(range(3,-1,-1)):
if fraction == 0:
dose_action = (-sparing_factors[index+1]+np.sqrt(sparing_factors[index+1]**2+4*sparing_factors[index+1]**2*(90-total_bednt)/abn))/(2*sparing_factors[index+1]**2/abn)
else:
dose_action = actionspace[pol[fraction][(round(total_bednt,1)*10).astype(int)][round(sparing_factors[index+1]*100)].astype(int)]
dose_delivered = BED_calc0(dose_action,abt)
total_bedt += dose_delivered
total_bednt += BED_calc0(dose_action,abn,sparing_factors[index+1])
print('fraction ', index+2, 'dose delivered: ', round(dose_action,1))
print('total accumulated dose in tumor; fraction ', index+2, '=', round(total_bedt,1))
print('total accumulated dose in normal tissue; fraction ', index+2, '=', round(total_bednt,1))
| 68.4
| 250
| 0.693317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,895
| 0.492481
|
e16731d27b2926a6e8972922d05bae1f6e5d75bb
| 240
|
py
|
Python
|
ltc/base/admin.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | 4
|
2016-12-30T13:26:59.000Z
|
2017-04-26T12:07:36.000Z
|
ltc/base/admin.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
ltc/base/admin.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from ltc.base.models import Project, Test, Configuration
# Register your models here.
admin.site.register(Project)
admin.site.register(Test)
admin.site.register(Configuration)
| 24
| 56
| 0.808333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.233333
|