hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4daef32de5d0a4eada8ed86fdce977e9cb3a3093
| 117
|
py
|
Python
|
mandaw/examples/window.py
|
mandaw2014/MandawEngineSDL
|
597798e556751c57945b1ed6302f17cb6e9d8d22
|
[
"MIT"
] | null | null | null |
mandaw/examples/window.py
|
mandaw2014/MandawEngineSDL
|
597798e556751c57945b1ed6302f17cb6e9d8d22
|
[
"MIT"
] | null | null | null |
mandaw/examples/window.py
|
mandaw2014/MandawEngineSDL
|
597798e556751c57945b1ed6302f17cb6e9d8d22
|
[
"MIT"
] | 1
|
2021-09-21T08:28:50.000Z
|
2021-09-21T08:28:50.000Z
|
from mandaw import *
mandaw = Mandaw("Window!", width = 800, height = 600, bg_color = (0, 0, 0, 255))
mandaw.loop()
| 23.4
| 80
| 0.641026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.076923
|
4daf588bf7222a0428a4b569a5e2c8de42912a40
| 1,333
|
py
|
Python
|
bio-info/bio-info5.py
|
kyamada101/Python
|
a9be850b1818fb4784cb84e86b20cf2c61784e38
|
[
"MIT"
] | null | null | null |
bio-info/bio-info5.py
|
kyamada101/Python
|
a9be850b1818fb4784cb84e86b20cf2c61784e38
|
[
"MIT"
] | null | null | null |
bio-info/bio-info5.py
|
kyamada101/Python
|
a9be850b1818fb4784cb84e86b20cf2c61784e38
|
[
"MIT"
] | null | null | null |
import numpy as np
with open("./dice.txt",'r') as f:
input_str = f.read()
input_data=list(map(int,input_str))
inf = -float('inf')
class box():
def __init__(self):
self.v = inf
self.root = -1
def __repr__(self):
return str(self.v)
def run_viterbi(n,k):
if X[n][k].v != inf:
return X[n][k].v
if n == 0:
v = dice_t[k][input_data[n]-1] + np.log(0.5)
X[n][k].v = v
X[n][k].root = 0
return v
v = dice_t[k][input_data[n]-1] + np.max([run_viterbi(n-1,l) + transition_t[k][l] for l in range(K)])
X[n][k].v = v
X[n][k].root = np.argmax([run_viterbi(n-1,l) + transition_t[k][l] for l in range(K)])
return v
N = len(input_data)-1
K = 2
trans_p = np.array([[0.95,0.1],[0.05,0.9]])
dice_p = np.array([[1/6,1/6,1/6,1/6,1/6,1/6],[1/10,1/10,1/10,1/10,1/10,1/2]])
transition_t = np.log(trans_p)
dice_t = np.log(dice_p)
X = np.array([[box() for l in range(K)] for k in range(N+1)])
run_viterbi(N,0)
with open('./dice_result.txt','w') as f:
f.write("Eyes of dice:{}".format(input_str))
f.write("\n")
f.write("Anticipation is following: \n")
def trace(n,k):
if n > 0:trace(n-1,X[n][k].root)
if X[n][k].root == 0:
f.write("F")
else:
f.write("L")
return 0
trace(N,0)
| 25.634615
| 104
| 0.534134
| 131
| 0.097981
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.077786
|
4dafa7a5729cfadd647edbfafb8f9ae3d3c677d0
| 10,464
|
py
|
Python
|
src/tests/scheduling/schedule_config_test.py
|
Ket3r/script-server
|
919a2b7eb29c7bba7acba8e374a0a5cc696bd859
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/scheduling/schedule_config_test.py
|
Ket3r/script-server
|
919a2b7eb29c7bba7acba8e374a0a5cc696bd859
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/scheduling/schedule_config_test.py
|
Ket3r/script-server
|
919a2b7eb29c7bba7acba8e374a0a5cc696bd859
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
from unittest import TestCase
from parameterized import parameterized
from scheduling.schedule_config import ScheduleConfig
from utils import date_utils
def to_datetime(short_datetime_string):
dt_string = short_datetime_string + ':0.000000Z'
return date_utils.parse_iso_datetime(dt_string.replace(' ', 'T'))
class TestGetNextTime(TestCase):
@parameterized.expand([
('2020-03-19 11:30', '2020-03-15 16:13', 1, 'days', '2020-03-19 16:13'),
('2020-03-19 17:30', '2020-03-15 16:13', 1, 'days', '2020-03-20 16:13'),
('2020-03-15 11:30', '2020-03-15 16:13', 1, 'days', '2020-03-15 16:13'),
('2020-03-14 11:30', '2020-03-15 16:13', 1, 'days', '2020-03-15 16:13'),
('2020-03-15 16:13', '2020-03-15 16:13', 1, 'days', '2020-03-15 16:13'),
('2020-03-15 16:14', '2020-03-15 16:13', 1, 'days', '2020-03-16 16:13'),
('2020-03-19 11:30', '2020-03-15 16:13', 2, 'days', '2020-03-19 16:13'),
('2020-03-20 11:30', '2020-03-15 16:13', 2, 'days', '2020-03-21 16:13'),
('2020-03-19 16:13', '2020-03-15 16:13', 2, 'days', '2020-03-19 16:13'),
('2020-03-18 11:30', '2020-03-15 16:13', 5, 'days', '2020-03-20 16:13'),
('2020-03-20 11:30', '2020-03-15 16:13', 24, 'days', '2020-04-08 16:13'),
('2020-04-09 11:30', '2020-03-15 16:13', 24, 'days', '2020-05-02 16:13'),
('2020-03-19 11:30', '2020-03-15 16:13', 1, 'hours', '2020-03-19 12:13'),
('2020-03-19 17:30', '2020-03-15 16:13', 1, 'hours', '2020-03-19 18:13'),
('2020-03-15 11:30', '2020-03-15 16:13', 1, 'hours', '2020-03-15 16:13'),
('2020-03-14 11:30', '2020-03-15 16:13', 1, 'hours', '2020-03-15 16:13'),
('2020-03-15 16:13', '2020-03-15 16:13', 1, 'hours', '2020-03-15 16:13'),
('2020-03-15 16:14', '2020-03-15 16:13', 1, 'hours', '2020-03-15 17:13'),
('2022-01-02 06:30', '2022-01-01 04:14', 10, 'minutes', '2022-01-02 06:34'),
# big difference between start and now
('2021-12-31 02:30', '2019-01-01 01:31', 10, 'minutes', '2021-12-31 02:31'),
('2023-08-29 16:14', '2020-03-15 16:13', 1, 'hours', '2023-08-29 17:13'),
('2020-03-19 10:30', '2020-03-15 16:13', 2, 'hours', '2020-03-19 12:13'),
('2020-03-19 11:30', '2020-03-15 16:13', 2, 'hours', '2020-03-19 12:13'),
('2020-03-19 16:13', '2020-03-15 16:13', 2, 'hours', '2020-03-19 16:13'),
('2020-03-18 11:30', '2020-03-15 16:13', 5, 'hours', '2020-03-18 14:13'),
('2020-03-20 11:30', '2020-03-15 16:13', 24, 'hours', '2020-03-20 16:13'),
('2020-04-09 17:30', '2020-03-15 16:13', 24, 'hours', '2020-04-10 16:13'),
('2020-03-19 11:30', '2020-03-15 16:13', 1, 'months', '2020-04-15 16:13'),
('2020-03-19 17:30', '2020-03-15 16:13', 1, 'months', '2020-04-15 16:13'),
('2020-03-15 11:30', '2020-03-15 16:13', 1, 'months', '2020-03-15 16:13'),
('2020-03-14 11:30', '2020-03-15 16:13', 1, 'months', '2020-03-15 16:13'),
('2020-03-15 16:13', '2020-03-15 16:13', 1, 'months', '2020-03-15 16:13'),
('2020-03-15 16:14', '2020-03-15 16:13', 1, 'months', '2020-04-15 16:13'),
('2020-04-01 16:11', '2020-03-31 16:13', 1, 'months', '2020-04-30 16:13'),
('2021-01-31 20:00', '2021-01-31 16:13', 1, 'months', '2021-02-28 16:13'), # Roll to February
('2020-01-31 20:00', '2020-01-31 16:13', 1, 'months', '2020-02-29 16:13'), # Roll to February leap year
('2020-03-19 10:30', '2020-03-15 16:13', 2, 'months', '2020-05-15 16:13'),
('2020-04-19 11:30', '2020-03-15 16:13', 2, 'months', '2020-05-15 16:13'),
('2020-03-15 16:13', '2020-03-15 16:13', 2, 'months', '2020-03-15 16:13'),
('2020-04-01 16:11', '2020-03-31 16:13', 2, 'months', '2020-05-31 16:13'),
('2020-03-18 11:30', '2020-03-15 16:13', 5, 'months', '2020-08-15 16:13'),
('2020-08-18 11:30', '2020-03-15 16:13', 5, 'months', '2021-01-15 16:13'),
('2021-01-18 11:30', '2020-03-15 16:13', 5, 'months', '2021-06-15 16:13'),
('2020-03-16 11:30', '2020-03-15 16:13', 13, 'months', '2021-04-15 16:13'),
('2020-03-19 11:30', '2020-03-15 16:13', 1, 'weeks', '2020-03-20 16:13', ['monday', 'friday']),
('2020-03-15 11:30', '2020-03-15 16:13', 1, 'weeks', '2020-03-16 16:13', ['monday', 'friday']),
('2020-03-16 11:30', '2020-03-15 16:13', 1, 'weeks', '2020-03-16 16:13', ['monday', 'friday']),
('2020-03-16 16:30', '2020-03-15 16:13', 1, 'weeks', '2020-03-20 16:13', ['monday', 'friday']),
('2020-03-20 11:30', '2020-03-15 16:13', 1, 'weeks', '2020-03-20 16:13', ['monday', 'friday']),
('2020-04-04 11:30', '2020-03-15 16:13', 1, 'weeks', '2020-04-06 16:13', ['monday', 'friday']),
('2020-04-07 11:30', '2020-03-15 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'friday']),
('2020-03-16 16:13', '2020-03-16 16:13', 1, 'weeks', '2020-03-16 16:13', ['monday', 'friday']),
('2020-03-16 16:14', '2020-03-16 16:13', 1, 'weeks', '2020-03-20 16:13', ['monday', 'friday']),
# Test for testing start date on different weekdays, now tuesday
('2020-04-07 1:30', '2020-03-15 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-07 2:30', '2020-03-16 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-07 3:30', '2020-03-17 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-07 4:30', '2020-03-18 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-07 5:30', '2020-03-19 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-07 6:30', '2020-03-20 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-07 7:30', '2020-03-21 16:13', 1, 'weeks', '2020-04-08 16:13', ['monday', 'wednesday', 'friday']),
# Test for testing start date on different weekdays, now thursday
('2020-04-09 1:30', '2020-03-15 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-09 2:30', '2020-03-16 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-09 3:30', '2020-03-17 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-09 4:30', '2020-03-18 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-09 5:30', '2020-03-19 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-09 6:30', '2020-03-20 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-09 7:30', '2020-03-21 16:13', 1, 'weeks', '2020-04-10 16:13', ['monday', 'wednesday', 'friday']),
# Test for testing start date on different weekdays, now saturday
('2020-04-11 1:30', '2020-03-15 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-11 2:30', '2020-03-16 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-11 3:30', '2020-03-17 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-11 4:30', '2020-03-18 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-11 5:30', '2020-03-19 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-11 6:30', '2020-03-20 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-11 7:30', '2020-03-21 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
# Test for testing start date on different weekdays, now monday
('2020-04-13 1:30', '2020-03-15 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-13 2:30', '2020-03-16 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-13 3:30', '2020-03-17 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-13 4:30', '2020-03-18 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-13 5:30', '2020-03-19 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-13 6:30', '2020-03-20 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
('2020-04-13 7:30', '2020-03-21 16:13', 1, 'weeks', '2020-04-13 16:13', ['monday', 'wednesday', 'friday']),
# Test for testing start date on different weekdays, now wednesday, when larger interval
('2020-09-16 1:30', '2020-03-14 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-09-16 2:30', '2020-03-15 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-09-16 3:30', '2020-03-16 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-09-16 4:30', '2020-03-17 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-09-16 5:30', '2020-03-18 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-09-16 6:30', '2020-03-19 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-09-16 7:30', '2020-03-20 16:13', 1, 'weeks', '2020-09-19 16:13', ['tuesday', 'saturday']),
('2020-03-16 16:30', '2020-03-15 16:13', 1, 'weeks', '2020-03-18 16:13', ['wednesday']),
('2020-03-19 11:30', '2020-03-15 16:13', 2, 'weeks', '2020-03-23 16:13', ['monday', 'friday']),
('2020-03-24 11:30', '2020-03-15 16:13', 2, 'weeks', '2020-03-27 16:13', ['monday', 'friday']),
('2020-06-07 17:30', '2020-03-15 16:13', 2, 'weeks', '2020-06-15 16:13', ['monday', 'friday']),
('2020-06-07 17:30', '2020-03-15 16:13', 2, 'weeks', '2020-06-16 16:13', ['tuesday', 'wednesday']),
])
def test_next_day_when_repeatable(self, now_dt, start, period, unit, expected, weekdays=None):
date_utils._mocked_now = to_datetime(now_dt)
config = ScheduleConfig(True, to_datetime(start))
config.repeat_period = period
config.repeat_unit = unit
config.weekdays = weekdays
next_time = config.get_next_time()
self.assertEqual(to_datetime(expected), next_time)
def tearDown(self) -> None:
super().tearDown()
date_utils._mocked_now = None
| 79.877863
| 115
| 0.549981
| 10,141
| 0.969132
| 0
| 0
| 10,005
| 0.956135
| 0
| 0
| 7,206
| 0.688647
|
4db08c1ad06a4f3c0a888874af940f73222f14eb
| 1,458
|
py
|
Python
|
mojo/python/tests/bindings_constants_unittest.py
|
zbowling/mojo
|
4d2ed40dc2390ca98a6fea0580e840535878f11c
|
[
"BSD-3-Clause"
] | 1
|
2020-04-28T14:35:10.000Z
|
2020-04-28T14:35:10.000Z
|
mojo/python/tests/bindings_constants_unittest.py
|
TribeMedia/sky_engine
|
4a3894ed246327931b198a7d64652bd0b615b036
|
[
"BSD-3-Clause"
] | null | null | null |
mojo/python/tests/bindings_constants_unittest.py
|
TribeMedia/sky_engine
|
4a3894ed246327931b198a7d64652bd0b615b036
|
[
"BSD-3-Clause"
] | 1
|
2020-04-28T14:35:11.000Z
|
2020-04-28T14:35:11.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
# Generated files
# pylint: disable=F0401
import sample_service_mojom
import test_constants_mojom
class ConstantBindingsTest(unittest.TestCase):
def testConstantGeneration(self):
self.assertEquals(test_constants_mojom.INT8_VALUE, -2)
self.assertEquals(test_constants_mojom.UINT64_VALUE, 9999999999999999999)
self.assertEquals(test_constants_mojom.DOUBLE_INFINITY,
float('inf'))
self.assertEquals(test_constants_mojom.DOUBLE_NEGATIVE_INFINITY,
float('-inf'))
self.assertTrue(math.isnan(test_constants_mojom.DOUBLE_NA_N))
self.assertEquals(test_constants_mojom.FLOAT_INFINITY,
float('inf'))
self.assertEquals(test_constants_mojom.FLOAT_NEGATIVE_INFINITY,
float('-inf'))
self.assertTrue(math.isnan(test_constants_mojom.FLOAT_NA_N))
def testConstantOnStructGeneration(self):
self.assertEquals(test_constants_mojom.StructWithConstants.INT8_VALUE, 5)
def testStructImmutability(self):
with self.assertRaises(AttributeError):
sample_service_mojom.Foo.FOOBY = 0
with self.assertRaises(AttributeError):
del sample_service_mojom.Foo.FOOBY
with self.assertRaises(AttributeError):
sample_service_mojom.Foo.BAR = 1
| 37.384615
| 77
| 0.752401
| 1,166
| 0.799726
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.151578
|
4db107eb3d9119fdeaf236399451aa583978436f
| 5,219
|
py
|
Python
|
home/pi/blissflixx/lib/chanutils/chanutils.py
|
erick-guerra/Royalbox
|
967dbbdddc94b9968e6eba873f0d20328fd86f66
|
[
"MIT"
] | 1
|
2022-01-29T11:17:58.000Z
|
2022-01-29T11:17:58.000Z
|
home/pi/blissflixx/lib/chanutils/chanutils.py
|
erick-guerra/Royalbox
|
967dbbdddc94b9968e6eba873f0d20328fd86f66
|
[
"MIT"
] | null | null | null |
home/pi/blissflixx/lib/chanutils/chanutils.py
|
erick-guerra/Royalbox
|
967dbbdddc94b9968e6eba873f0d20328fd86f66
|
[
"MIT"
] | null | null | null |
import requests, lxml.html, re
import htmlentitydefs, urllib, random
from lxml.cssselect import CSSSelector
from StringIO import StringIO
import cherrypy
import requests
from cachecontrol import CacheControl
_PROXY_LIST = None
_HEADERS = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'accept-language':'en-GB,en-US;q=0.8,en;q=0.6',
'cache-control':'max-age=0',
#'user-agent':'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36',
'user-agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A356 Safari/604.1',
'Client-ID':'tq6hq1srip0i37ipzuscegt7viex9fh' # Just for Twitch API
}
def _get_proxy_url():
global _PROXY_LIST
if _PROXY_LIST is None:
_PROXY_LIST = get_json("http://blissflixx.rocks/feeds/proxies.json")
p = random.randint(0, len(_PROXY_LIST) - 1)
return _PROXY_LIST[p]['url']
def _get_proxy_headers(headers):
headers = headers.copy()
headers['origin'] = 'blissflixx'
return headers
def get(url, params=None, proxy=False, session=None):
headers = _HEADERS
if proxy:
if params is not None:
utfparams = {}
for k, v in params.iteritems():
utfparams[k] = unicode(v).encode('utf-8')
url = url + "?" + urllib.urlencode(utfparams)
params = {'url': url}
url = _get_proxy_url()
headers = _get_proxy_headers(headers)
if session is None:
session = new_session()
cached_sess = CacheControl(session)
#r = session.get(url, params=params, headers=headers, verify=False)
r = cached_sess.get(url, params=params, headers=headers, verify=False)
if r.status_code >= 300:
raise Exception("Request : '" + url + "' returned: " + str(r.status_code))
return r
def post(url, payload, proxy=False, session=None):
headers = _HEADERS
if proxy:
payload['__url__'] = url
url = _get_proxy_url()
headers = _get_proxy_headers(headers)
if session is None:
session = new_session()
r = session.post(url, data=payload, headers=headers, verify=False)
if r.status_code >= 300:
raise Exception("Request : '" + url + "' returned: " + str(r.status_code))
return r
def post_doc(url, payload, **kwargs):
r = post(url, payload, **kwargs)
return lxml.html.fromstring(r.text)
def post_json(url, payload, **kwargs):
r = post(url, payload, **kwargs)
return r.json()
def get_doc(url, params=None, **kwargs):
r = get(url, params=params, **kwargs)
return lxml.html.fromstring(r.text)
def get_xml(url, params=None, **kwargs):
r = requests.get(url)
cherrypy.log(r.content)
doc = lxml.etree.parse(StringIO(r.content), lxml.etree.XMLParser(encoding="utf-8", recover=True))
#root = lxml.etree.fromstring(doc)
return doc
def get_json(url, params=None, **kwargs):
r = get(url, params=params, **kwargs)
return r.json()
def new_session():
return requests.session()
def select_one(tree, expr):
sel = CSSSelector(expr)
el = sel(tree)
if isinstance(el, list) and len(el) > 0:
return el[0]
else:
return None
def select_all(tree, expr):
sel = CSSSelector(expr)
return sel(tree)
def get_attr(el, name):
if el is not None:
return el.get(name)
else:
return None
def get_text(el):
if el is not None and el.text is not None:
return el.text.strip()
else:
return None
def get_text_content(el):
if el is not None:
return el.text_content().strip()
else:
return None
def byte_size(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Y', suffix)
def replace_entity(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def number_commas(x):
if type(x) not in [type(0), type(0L)]:
return '0'
if x < 0:
return '-' + number_commas(-x)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = ",%03d%s" % (r, result)
return "%d%s" % (x, result)
MOVIE_RE = re.compile(r'(.*)[\(\[]?([12][90]\d\d)[^pP][\(\[]?.*$')
SERIES_RE = re.compile(r'(.*)S(\d\d)E(\d\d).*$')
def movie_title_year(name):
name = name.replace('.', ' ')
m = MOVIE_RE.match(name)
if m is None:
return {'title':name}
title = m.group(1)
if title.endswith('(') or title.endswith('['):
title = title[:-1]
title = title.strip()
year = int(m.group(2))
return {'title':title, 'year':year}
def series_season_episode(name):
name = name.replace('.', ' ')
m = SERIES_RE.match(name)
if m is None:
return {'series':name}
series = m.group(1).strip()
season = int(m.group(2))
episode = int(m.group(3))
return {'series':series, 'season':season, 'episode':episode}
| 27.613757
| 153
| 0.637095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,023
| 0.196015
|
4db11a5afb53d4558aa8f33a19f180a1ecbc8f9d
| 5,672
|
py
|
Python
|
test/07-text-custom-field-list-test.py
|
hklarner/couchdb-mango
|
e519f224423ca4696a61d0065530103dd8c6651b
|
[
"Apache-2.0"
] | 39
|
2015-02-04T09:48:20.000Z
|
2021-11-09T22:07:45.000Z
|
test/07-text-custom-field-list-test.py
|
hklarner/couchdb-mango
|
e519f224423ca4696a61d0065530103dd8c6651b
|
[
"Apache-2.0"
] | 37
|
2015-02-24T17:59:26.000Z
|
2021-05-25T12:20:54.000Z
|
test/07-text-custom-field-list-test.py
|
hklarner/couchdb-mango
|
e519f224423ca4696a61d0065530103dd8c6651b
|
[
"Apache-2.0"
] | 21
|
2015-04-26T05:53:44.000Z
|
2021-11-09T22:06:58.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import unittest
@unittest.skipUnless(mango.has_text_service(), "requires text service")
class CustomFieldsTest(mango.UserDocsTextTests):
FIELDS = [
{"name": "favorites.[]", "type": "string"},
{"name": "manager", "type": "boolean"},
{"name": "age", "type": "number"},
# These two are to test the default analyzer for
# each field.
{"name": "location.state", "type": "string"},
{
"name": "location.address.street",
"type": "string"
},
{"name": "name\\.first", "type": "string"}
]
def test_basic(self):
docs = self.db.find({"age": 22})
assert len(docs) == 1
assert docs[0]["user_id"] == 9
def test_multi_field(self):
docs = self.db.find({"age": 22, "manager": True})
assert len(docs) == 1
assert docs[0]["user_id"] == 9
docs = self.db.find({"age": 22, "manager": False})
assert len(docs) == 0
def test_element_acess(self):
docs = self.db.find({"favorites.0": "Ruby"})
assert len(docs) == 3
for d in docs:
assert "Ruby" in d["favorites"]
# This should throw an exception because we only index the array
# favorites.[], and not the string field favorites
def test_index_selection(self):
try:
self.db.find({"selector": {"$or": [{"favorites": "Ruby"},
{"favorites.0":"Ruby"}]}})
except Exception, e:
assert e.response.status_code == 400
def test_in_with_array(self):
vals = ["Lisp", "Python"]
docs = self.db.find({"favorites": {"$in": vals}})
assert len(docs) == 10
# This should also throw an error because we only indexed
# favorites.[] of type string. For the following query to work, the
# user has to index favorites.[] of type number, and also
# favorites.[].Versions.Alpha of type string.
def test_in_different_types(self):
vals = ["Random Garbage", 52, {"Versions": {"Alpha": "Beta"}}]
try:
self.db.find({"favorites": {"$in": vals}})
except Exception, e:
assert e.response.status_code == 400
# This test differs from the situation where we index everything.
# When we index everything the actual number of docs that gets
# returned is 5. That's because of the special situation where we
# have an array of an array, i.e: [["Lisp"]], because we're indexing
# specifically favorites.[] of type string. So it does not count
# the example and we only get 4 back.
def test_nin_with_array(self):
vals = ["Lisp", "Python"]
docs = self.db.find({"favorites": {"$nin": vals}})
assert len(docs) == 4
def test_missing(self):
self.db.find({"location.state": "Nevada"})
def test_missing_type(self):
# Raises an exception
try:
self.db.find({"age": "foo"})
raise Exception("Should have thrown an HTTPError")
except:
return
def test_field_analyzer_is_keyword(self):
docs = self.db.find({"location.state": "New"})
assert len(docs) == 0
docs = self.db.find({"location.state": "New Hampshire"})
assert len(docs) == 1
assert docs[0]["user_id"] == 10
# Since our FIELDS list only includes "name\\.first", we should
# get an error when we try to search for "name.first", since the index
# for that field does not exist.
def test_escaped_field(self):
docs = self.db.find({"name\\.first": "name dot first"})
assert len(docs) == 1
assert docs[0]["name.first"] == "name dot first"
try:
self.db.find({"name.first": "name dot first"})
raise Exception("Should have thrown an HTTPError")
except:
return
def test_filtered_search_fields(self):
docs = self.db.find({"age": 22}, fields = ["age", "location.state"])
assert len(docs) == 1
assert docs == [{"age": 22, "location": {"state": "Missouri"}}]
docs = self.db.find({"age": 22}, fields = ["age", "Random Garbage"])
assert len(docs) == 1
assert docs == [{"age": 22}]
docs = self.db.find({"age": 22}, fields = ["favorites"])
assert len(docs) == 1
assert docs == [{"favorites": ["Lisp", "Erlang", "Python"]}]
docs = self.db.find({"age": 22}, fields = ["favorites.[]"])
assert len(docs) == 1
assert docs == [{}]
docs = self.db.find({"age": 22}, fields = ["all_fields"])
assert len(docs) == 1
assert docs == [{}]
def test_two_or(self):
docs = self.db.find({"$or": [{"location.state": "New Hampshire"},
{"location.state": "Don't Exist"}]})
assert len(docs) == 1
assert docs[0]["user_id"] == 10
def test_all_match(self):
docs = self.db.find({
"favorites": {
"$allMatch": {
"$eq": "Erlang"
}
}
})
assert len(docs) == 1
assert docs[0]["user_id"] == 10
| 35.672956
| 79
| 0.571403
| 5,025
| 0.885931
| 0
| 0
| 5,097
| 0.898625
| 0
| 0
| 2,499
| 0.440585
|
4db2f85a35fd948e670a5be341c3efca737c01ed
| 6,534
|
py
|
Python
|
tests/test_reviews_rest.py
|
miw-upm/betca-tpv-customer-support
|
e36946b934123a5c139924192a189c5ce8f3864c
|
[
"MIT"
] | 1
|
2021-05-04T01:33:00.000Z
|
2021-05-04T01:33:00.000Z
|
tests/test_reviews_rest.py
|
miw-upm/betca-tpv-customer-support
|
e36946b934123a5c139924192a189c5ce8f3864c
|
[
"MIT"
] | null | null | null |
tests/test_reviews_rest.py
|
miw-upm/betca-tpv-customer-support
|
e36946b934123a5c139924192a189c5ce8f3864c
|
[
"MIT"
] | 5
|
2021-04-02T15:42:31.000Z
|
2022-03-07T09:02:16.000Z
|
from http import HTTPStatus
from unittest import TestCase, mock
import jwt
from fastapi.testclient import TestClient
from src.api.review_resource import REVIEWS
from src.config import config
from src.main import app
from src.models.article import Article
from src.models.review import Review
def _bearer(**payload):
payload.setdefault("user", "666666003")
payload.setdefault("name", "customer")
return "Bearer " + jwt.encode(payload, config.JWT_SECRET, algorithm="HS256")
def mock_articles(arg1) -> [Article]:
return [Article(barcode="8400000000017", description="Mock most rated article", retailPrice=30),
Article(barcode="8400000000018", description="Mock", retailPrice=30),
Article(barcode="8400000000019", description="Mock 2", retailPrice=30)]
def mock_assert_article_existing_and_return(token, barcode) -> Article:
switcher = {
"8400000000017": Article(barcode="8400000000017", description="Mock most rated article", retailPrice=30),
"8400000000024": Article(barcode="8400000000024",
description="Mock second most rated article", retailPrice=5, stock=15),
"8400000000031": Article(barcode="8400000000031", description="Mock third most rated article", retailPrice=305),
"8400000000048": Article(barcode="8400000000048", description="Nothing", retailPrice=305),
"8400000000055": Article(barcode="8400000000055", description="Another article", retailPrice=305),
"8400000000079": Article(barcode="8400000000079", description="Another of another article", retailPrice=305),
"8400000000086": Article(barcode="8400000000086", description="Look at this article", retailPrice=305)
}
default_article = Article(barcode="8400000000017", description="Mock most rated article", retailPrice=30)
return switcher.get(barcode, default_article)
def mock_assert_article_existing_without_token(barcode) -> Article:
return mock_assert_article_existing_and_return("", barcode)
class TestReviewResource(TestCase):
@classmethod
def setUpClass(cls):
cls.bearer = _bearer(role="CUSTOMER")
cls.client = TestClient(app)
def test_search_not_token_forbidden_exception(self):
response = self.client.get(REVIEWS + "/search")
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_code)
def test_search_not_role_unauthorized_exception(self):
bearer = _bearer(role="KK")
response = self.client.get(REVIEWS + "/search", headers={"Authorization": bearer})
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_code)
def test_search_invalid_token_unauthorized_exception(self):
bearer = _bearer(role="CUSTOMER") + "kkkk"
response = self.client.get(REVIEWS + "/search", headers={"Authorization": bearer})
self.assertEqual(HTTPStatus.UNAUTHORIZED, response.status_code)
def test_search_not_included_role_forbidden_exception(self):
bearer = _bearer()
response = self.client.get(REVIEWS + "/search", headers={"Authorization": bearer})
self.assertEqual(HTTPStatus.UNAUTHORIZED, response.status_code)
def test_search_expired_token_unauthorized_exception(self):
bearer = _bearer(exp=1371720939, role="CUSTOMER")
response = self.client.get(REVIEWS + "/search", headers={"Authorization": bearer})
self.assertEqual(HTTPStatus.UNAUTHORIZED, response.status_code)
@mock.patch('src.services.review_service.get_all_bought_articles', side_effect=mock_articles)
@mock.patch('src.services.review_service.assert_article_existing_and_return',
side_effect=mock_assert_article_existing_and_return)
def __read_all(self, get_all_bought_articles, mock_article_existing_and_return):
bearer = _bearer(user="66", role="CUSTOMER")
response = self.client.get(REVIEWS + "/search", headers={"Authorization": bearer})
mock_article_existing_and_return.assert_called()
get_all_bought_articles.assert_called()
return response.json()
@mock.patch('src.services.review_service.assert_article_existing_and_return',
side_effect=mock_assert_article_existing_and_return)
def test_create(self, mock_article_existing_and_return):
creation_review = Review(barcode="8400000000031", score=1.5)
response = self.client.post(REVIEWS, json=creation_review.dict(), headers={"Authorization": self.bearer})
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertEqual(creation_review.barcode, response.json()['article']['barcode'])
self.assertEqual(creation_review.score, response.json()['score'])
mock_article_existing_and_return.assert_called()
@mock.patch('src.services.review_service.assert_article_existing_and_return',
side_effect=mock_assert_article_existing_and_return)
def test_update(self, mock_article_existing_and_return):
review = self.__read_all()[0]
update_review = Review(**review, barcode=review['article']['barcode'])
ide = update_review.id
update_review.opinion = 'Changed'
update_review.score = 4.5
bearer = _bearer(user="66", role="CUSTOMER")
response = self.client.put(REVIEWS + "/" + ide,
json=update_review.dict(), headers={"Authorization": bearer})
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertIsNotNone(response.json()['article'])
self.assertEqual('Changed', response.json()['opinion'])
self.assertEqual(4.5, response.json()['score'])
mock_article_existing_and_return.assert_called()
def test_search(self):
reviews = self.__read_all()
for review in reviews:
self.assertIsNotNone(review)
@mock.patch('src.services.review_service.assert_article_existing_without_token',
side_effect=mock_assert_article_existing_without_token)
def test_top_articles(self, assert_article_existing_without_token):
response = self.client.get(REVIEWS + "/topArticles")
self.assertEqual(HTTPStatus.OK, response.status_code)
articles = response.json()
for article in articles:
self.assertIsNotNone(article)
self.assertEqual("8400000000024", articles[0]['barcode'])
self.assertEqual("8400000000048", articles[1]['barcode'])
self.assertEqual("8400000000017", articles[2]['barcode'])
assert_article_existing_without_token.assert_called()
| 50.261538
| 120
| 0.718549
| 4,512
| 0.690542
| 0
| 0
| 3,051
| 0.466942
| 0
| 0
| 1,278
| 0.195592
|
4db7b0957c01b75e339ff138abd1c5327cd961ef
| 2,354
|
py
|
Python
|
python/dset/write_dataset_W1BS.py
|
spongezhang/vlb
|
52a6b2ab8608496182ac2a33c961344db4a84333
|
[
"BSD-2-Clause"
] | 11
|
2017-09-08T16:32:46.000Z
|
2022-02-02T15:28:22.000Z
|
python/dset/write_dataset_W1BS.py
|
albutko/vlb
|
437245c0991948eeb36a277937a7e67d389041e4
|
[
"BSD-2-Clause"
] | 9
|
2017-09-13T20:22:51.000Z
|
2019-03-13T02:38:25.000Z
|
python/dset/write_dataset_W1BS.py
|
albutko/vlb
|
437245c0991948eeb36a277937a7e67d389041e4
|
[
"BSD-2-Clause"
] | 3
|
2017-09-08T21:07:14.000Z
|
2021-02-17T17:42:43.000Z
|
import json
sequence_name_list = ['A','G','L','map2photo','S']
description_list = ['Viewpoint Appearance','Viewpoint','ViewPoint Lighting','Map to Photo','Modality']
label_list = [
['arch', 'obama', 'vprice0', 'vprice1', 'vprice2', 'yosemite'],
['adam', 'boat','ExtremeZoomA','face','fox','graf','mag','shop','there','vin'],
['amos1','bdom','brugge_square', 'GC2','light','madrid',\
'notredame15','paintedladies','rushmore','trevi','vatican'],
['map1', 'map2', 'map3', 'map4', 'map5', 'map6'],
['angiogram','brain1','EO-IR-2',\
'maunaloa','mms68','mms75','treebranch']
]
#label_list = [
# ['arch', 'obama', 'vprice0', 'vprice1', 'vprice2', 'yosemite']
# ]
json_data = {}
json_data['Dataset Name'] = 'W1BS'
json_data['Description'] = 'Baseline Stereo Benchmark'
json_data['url'] = 'http://cmp.felk.cvut.cz/wbs/datasets/W1BS_with_patches.tar.gz'
json_data['Sequence Number'] = len(sequence_name_list)
json_data['Sequence Name List'] = sequence_name_list
json_data['Sequences'] = []
for idx, sequence_name in enumerate(sequence_name_list):
sequence = {}
sequence['Name'] = sequence_name
sequence['Description'] = sequence_name
sequence['Label'] = description_list[idx]
sequence['Images'] = []
sequence['Image Number'] = len(label_list[idx])*2
sequence['Link Number'] = len(label_list[idx])
sequence['Links'] = []
for image_idx, image_label in enumerate(label_list[idx]):
image = {}
image['file'] = '{}/1/{}.bmp'.format(sequence_name,image_label)
image['id'] = str(image_label) + '_1'
image['label'] = str(image_label) + '_1'
sequence['Images'].append(image)
image = {}
image['file'] = '{}/2/{}.bmp'.format(sequence_name,image_label)
image['id'] = str(image_label) + '_2'
image['label'] = str(image_label) + '_2'
sequence['Images'].append(image)
link = {}
link['source'] = str(image_label) + '_1'
link['target'] = str(image_label) + '_2'
link['file'] = '{}/h/{}.txt'.format(sequence_name, image_label)
sequence['Links'].append(link)
json_data['Sequences'].append(sequence)
with open('./datasets/dataset_info/{}.json'.format('W1BS'),'w') as json_file:
json.dump(json_data, json_file, indent=2)
| 41.298246
| 102
| 0.607477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 953
| 0.404843
|
4db85d3fc2b5c525eb2343fee5b61c2b7cad4134
| 27
|
py
|
Python
|
salman.py
|
Fayad-hub/Fayad-hub
|
b132b8266da8a36b6162feb69c67639067a90b69
|
[
"BSD-2-Clause"
] | null | null | null |
salman.py
|
Fayad-hub/Fayad-hub
|
b132b8266da8a36b6162feb69c67639067a90b69
|
[
"BSD-2-Clause"
] | null | null | null |
salman.py
|
Fayad-hub/Fayad-hub
|
b132b8266da8a36b6162feb69c67639067a90b69
|
[
"BSD-2-Clause"
] | 1
|
2021-05-26T06:06:38.000Z
|
2021-05-26T06:06:38.000Z
|
<html> salman.py
<html/>
| 6.75
| 16
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4db8e935817372c07e59b82af45086b871e6303e
| 579
|
py
|
Python
|
broadcast.py
|
InukaRanmira/Image-to-pdf
|
44f7e33b13aba44c03c3ec5c7e4efe4efe0b1911
|
[
"MIT"
] | 1
|
2021-12-24T18:11:49.000Z
|
2021-12-24T18:11:49.000Z
|
broadcast.py
|
InukaRanmira/Image-to-pdf
|
44f7e33b13aba44c03c3ec5c7e4efe4efe0b1911
|
[
"MIT"
] | null | null | null |
broadcast.py
|
InukaRanmira/Image-to-pdf
|
44f7e33b13aba44c03c3ec5c7e4efe4efe0b1911
|
[
"MIT"
] | null | null | null |
from pyrogram import Client ,filters
import os
from helper.database import getid
ADMIN = int(os.environ.get("ADMIN", 1696230986))
@Client.on_message(filters.private & filters.user(ADMIN) & filters.command(["broadcast"]))
async def broadcast(bot, message):
if (message.reply_to_message):
ms = await message.reply_text("Geting All ids from database ...........")
ids = getid()
tot = len(ids)
await ms.edit(f"Starting Broadcast .... \n Sending Message To {tot} Users")
for id in ids:
try:
await message.reply_to_message.copy(id)
except:
pass
| 30.473684
| 90
| 0.690846
| 0
| 0
| 0
| 0
| 446
| 0.770294
| 355
| 0.613126
| 120
| 0.207254
|
4db99233bd49c358c3fdefaa1fa9186de53680eb
| 11,351
|
py
|
Python
|
scripts/calibration/cal_methods.py
|
jielyugt/calibration
|
1b9be673fb7ff8cf481e875153b1a7649e3b6e67
|
[
"MIT"
] | null | null | null |
scripts/calibration/cal_methods.py
|
jielyugt/calibration
|
1b9be673fb7ff8cf481e875153b1a7649e3b6e67
|
[
"MIT"
] | null | null | null |
scripts/calibration/cal_methods.py
|
jielyugt/calibration
|
1b9be673fb7ff8cf481e875153b1a7649e3b6e67
|
[
"MIT"
] | null | null | null |
# Calibration methods including Histogram Binning and Temperature Scaling
import numpy as np
from scipy.optimize import minimize
from sklearn.metrics import log_loss
import pandas as pd
import time
from sklearn.metrics import log_loss, brier_score_loss
from tensorflow.keras.losses import categorical_crossentropy
from os.path import join
import sklearn.metrics as metrics
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.unpickle_probs import unpickle_probs
from utility.evaluation import ECE, MCE
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Parameters:
x (numpy.ndarray): array containing m samples with n-dimensions (m,n)
Returns:
x_softmax (numpy.ndarray) softmaxed values for initial (m,n) array
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=1, keepdims=1)
class HistogramBinning():
"""
Histogram Binning as a calibration method. The bins are divided into equal lengths.
The class contains two methods:
- fit(probs, true), that should be used with validation data to train the calibration model.
- predict(probs), this method is used to calibrate the confidences.
"""
def __init__(self, M=15):
"""
M (int): the number of equal-length bins used
"""
self.bin_size = 1./M # Calculate bin size
self.conf = [] # Initiate confidence list
self.upper_bounds = np.arange(self.bin_size, 1+self.bin_size, self.bin_size) # Set bin bounds for intervals
def _get_conf(self, conf_thresh_lower, conf_thresh_upper, probs, true):
"""
Inner method to calculate optimal confidence for certain probability range
Params:
- conf_thresh_lower (float): start of the interval (not included)
- conf_thresh_upper (float): end of the interval (included)
- probs : list of probabilities.
- true : list with true labels, where 1 is positive class and 0 is negative).
"""
# Filter labels within probability range
filtered = [x[0] for x in zip(true, probs) if x[1] > conf_thresh_lower and x[1] <= conf_thresh_upper]
nr_elems = len(filtered) # Number of elements in the list.
if nr_elems < 1:
return 0
else:
# In essence the confidence equals to the average accuracy of a bin
conf = sum(filtered)/nr_elems # Sums positive classes
return conf
def fit(self, probs, true):
"""
Fit the calibration model, finding optimal confidences for all the bins.
Params:
probs: probabilities of data
true: true labels of data
"""
conf = []
# Got through intervals and add confidence to list
for conf_thresh in self.upper_bounds:
temp_conf = self._get_conf((conf_thresh - self.bin_size), conf_thresh, probs = probs, true = true)
conf.append(temp_conf)
self.conf = conf
# Fit based on predicted confidence
def predict(self, probs):
"""
Calibrate the confidences
Param:
probs: probabilities of the data (shape [samples, classes])
Returns:
Calibrated probabilities (shape [samples, classes])
"""
# Go through all the probs and check what confidence is suitable for it.
for i, prob in enumerate(probs):
idx = np.searchsorted(self.upper_bounds, prob)
probs[i] = self.conf[idx]
return probs
class TemperatureScaling():
def __init__(self, temp = 1, maxiter = 50, solver = "BFGS"):
"""
Initialize class
Params:
temp (float): starting temperature, default 1
maxiter (int): maximum iterations done by optimizer, however 8 iterations have been maximum.
"""
self.temp = temp
self.maxiter = maxiter
self.solver = solver
def _loss_fun(self, x, probs, true):
# Calculates the loss using log-loss (cross-entropy loss)
scaled_probs = self.predict(probs, x)
loss = log_loss(y_true=true, y_pred=scaled_probs)
return loss
# Find the temperature
def fit(self, logits, true):
"""
Trains the model and finds optimal temperature
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the results of optimizer after minimizing is finished.
"""
true = true.flatten() # Flatten y_val
opt = minimize(self._loss_fun, x0 = 1, args=(logits, true), options={'maxiter':self.maxiter}, method = self.solver)
self.temp = opt.x[0]
return opt
def predict(self, logits, temp = None):
"""
Scales logits based on the temperature and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
temp: if not set use temperatures find by model or previously set.
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
if not temp:
return softmax(logits/self.temp)
else:
return softmax(logits/temp)
def evaluate(probs, y_true, verbose = False, normalize = False, bins = 15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, MCE, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
if normalize:
confs = np.max(probs, axis=1)/np.sum(probs, axis=1)
# Check if everything below or equal to 1?
else:
confs = np.max(probs, axis=1) # Take only maximum confidence
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# Calculate ECE
ece = ECE(confs, preds, y_true, bin_size = 1/bins)
# Calculate MCE
mce = MCE(confs, preds, y_true, bin_size = 1/bins)
loss = log_loss(y_true=y_true, y_pred=probs)
y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("MCE:", mce)
print("Loss:", loss)
print("brier:", brier)
return (error, ece, mce, loss, brier)
def cal_results(fn, path, files, m_kwargs = {}, approach = "all"):
"""
Calibrate models scores, using output from logits files and given function (fn).
There are implemented to different approaches "all" and "1-vs-K" for calibration,
the approach of calibration should match with function used for calibration.
TODO: split calibration of single and all into separate functions for more use cases.
Params:
fn (class): class of the calibration method used. It must contain methods "fit" and "predict",
where first fits the models and second outputs calibrated probabilities.
path (string): path to the folder with logits files
files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))
m_kwargs (dictionary): keyword arguments for the calibration class initialization
approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.
Returns:
df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.
"""
df = pd.DataFrame(columns=["Name", "Error", "ECE", "MCE", "Loss", "Brier"])
total_t1 = time.time()
for i, f in enumerate(files):
name = "_".join(f.split("_")[1:-1])
print(name)
t1 = time.time()
FILE_PATH = join(path, f)
(logits_val, y_val), (logits_test, y_test) = unpickle_probs(FILE_PATH)
if approach == "all":
y_val = y_val.flatten()
model = fn(**m_kwargs)
model.fit(logits_val, y_val)
probs_val = model.predict(logits_val)
probs_test = model.predict(logits_test)
error, ece, mce, loss, brier = evaluate(softmax(logits_test), y_test, verbose=True) # Test before scaling
error2, ece2, mce2, loss2, brier2 = evaluate(probs_test, y_test, verbose=False)
print("Error %f; ece %f; mce %f; loss %f, brier %f" % evaluate(probs_val, y_val, verbose=False, normalize=True))
else: # 1-vs-k models
probs_val = softmax(logits_val) # Softmax logits
probs_test = softmax(logits_test)
K = probs_test.shape[1]
# Go through all the classes
for k in range(K):
# Prep class labels (1 fixed true class, 0 other classes)
y_cal = np.array(y_val == k, dtype="int")[:, 0]
# Train model
model = fn(**m_kwargs)
model.fit(probs_val[:, k], y_cal) # Get only one column with probs for given class "k"
probs_val[:, k] = model.predict(probs_val[:, k]) # Predict new values based on the fittting
probs_test[:, k] = model.predict(probs_test[:, k])
# Replace NaN with 0, as it should be close to zero # TODO is it needed?
idx_nan = np.where(np.isnan(probs_test))
probs_test[idx_nan] = 0
idx_nan = np.where(np.isnan(probs_val))
probs_val[idx_nan] = 0
# Get results for test set
error, ece, mce, loss, brier = evaluate(softmax(logits_test), y_test, verbose=True, normalize=False)
error2, ece2, mce2, loss2, brier2 = evaluate(probs_test, y_test, verbose=False, normalize=True)
print("Error %f; ece %f; mce %f; loss %f, brier %f" % evaluate(probs_val, y_val, verbose=False, normalize=True))
df.loc[i*2] = [name, error, ece, mce, loss, brier]
df.loc[i*2+1] = [(name + "_calib"), error2, ece2, mce2, loss2, brier2]
t2 = time.time()
print("Time taken:", (t2-t1), "\n")
total_t2 = time.time()
print("Total time taken:", (total_t2-total_t1))
return df
| 36.616129
| 124
| 0.602854
| 4,664
| 0.410889
| 0
| 0
| 0
| 0
| 0
| 0
| 5,541
| 0.488151
|
4db9d563f39ee0c9eaa0404dfef96153f8e1cbb5
| 2,259
|
py
|
Python
|
neuralmonkey/nn/projection.py
|
Simon-Will/neuralmonkey
|
b686a9d302cb10eda5fca991e1d7ee6b9e84b75a
|
[
"BSD-3-Clause"
] | 5
|
2017-04-24T21:10:03.000Z
|
2019-05-22T13:19:35.000Z
|
neuralmonkey/nn/projection.py
|
Simon-Will/neuralmonkey
|
b686a9d302cb10eda5fca991e1d7ee6b9e84b75a
|
[
"BSD-3-Clause"
] | null | null | null |
neuralmonkey/nn/projection.py
|
Simon-Will/neuralmonkey
|
b686a9d302cb10eda5fca991e1d7ee6b9e84b75a
|
[
"BSD-3-Clause"
] | 5
|
2017-04-25T01:36:44.000Z
|
2019-12-13T15:04:03.000Z
|
"""Module which implements various types of projections."""
from typing import List, Callable
import tensorflow as tf
from neuralmonkey.nn.utils import dropout
def maxout(inputs: tf.Tensor,
size: int,
scope: str = "MaxoutProjection") -> tf.Tensor:
"""Apply a maxout operation.
Implementation of Maxout layer (Goodfellow et al., 2013).
http://arxiv.org/pdf/1302.4389.pdf
z = Wx + b
y_i = max(z_{2i-1}, z_{2i})
Arguments:
inputs: A tensor or list of tensors. It should be 2D tensors with
equal length in the first dimension (batch size)
size: The size of dimension 1 of the output tensor.
scope: The name of the scope used for the variables
Returns:
A tensor of shape batch x size
"""
with tf.variable_scope(scope):
projected = tf.layers.dense(inputs, size * 2, name=scope)
maxout_input = tf.reshape(projected, [-1, 1, 2, size])
maxpooled = tf.nn.max_pool(
maxout_input, [1, 1, 2, 1], [1, 1, 2, 1], "SAME")
reshaped = tf.reshape(maxpooled, [-1, size])
return reshaped
def multilayer_projection(
input_: tf.Tensor,
layer_sizes: List[int],
train_mode: tf.Tensor,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
dropout_keep_prob: float = 1.0,
scope: str = "mlp") -> tf.Tensor:
mlp_input = input_
with tf.variable_scope(scope):
for i, size in enumerate(layer_sizes):
mlp_input = tf.layers.dense(
mlp_input,
size,
activation=activation,
name="mlp_layer_{}".format(i))
mlp_input = dropout(mlp_input, dropout_keep_prob, train_mode)
return mlp_input
def glu(input_: tf.Tensor,
gating_fn: Callable[[tf.Tensor], tf.Tensor] = tf.sigmoid) -> tf.Tensor:
"""Apply a Gated Linear Unit.
Gated Linear Unit - Dauphin et al. (2016).
http://arxiv.org/abs/1612.08083
"""
dimensions = input_.get_shape().as_list()
if dimensions[-1] % 2 != 0:
raise ValueError("Input size should be an even number")
lin, nonlin = tf.split(input_, 2, axis=len(dimensions) - 1)
return lin * gating_fn(nonlin)
| 29.723684
| 79
| 0.611332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 776
| 0.343515
|
4db9da7c1b63e7750aa55027e3ed9ae8620596ff
| 148
|
py
|
Python
|
test_default_application/application.py
|
Ca11MeE/dophon
|
6737b0f0dc9ec2c2229865940c3c6d6ee326fc28
|
[
"Apache-2.0"
] | 1
|
2018-08-13T09:57:34.000Z
|
2018-08-13T09:57:34.000Z
|
test_default_application/application.py
|
Ca11MeE/dophon
|
6737b0f0dc9ec2c2229865940c3c6d6ee326fc28
|
[
"Apache-2.0"
] | null | null | null |
test_default_application/application.py
|
Ca11MeE/dophon
|
6737b0f0dc9ec2c2229865940c3c6d6ee326fc28
|
[
"Apache-2.0"
] | null | null | null |
# properties detail please read \
# default_properties.py in dophon_properties module \
# inside your used dophon module package.
# author: CallMeE
| 29.6
| 53
| 0.790541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.972973
|
4dbb39dfe98094fd9498b0f54ec487acfb06c3ae
| 5,989
|
py
|
Python
|
afk.py
|
bsoyka/sunset-bot
|
ea05000e52e1883ddba77ab754e5f733c8b3375c
|
[
"MIT"
] | 1
|
2021-06-21T16:58:48.000Z
|
2021-06-21T16:58:48.000Z
|
afk.py
|
bsoyka/sunset-bot
|
ea05000e52e1883ddba77ab754e5f733c8b3375c
|
[
"MIT"
] | 4
|
2021-08-13T16:52:51.000Z
|
2021-09-01T13:05:42.000Z
|
afk.py
|
sunset-vacation/bot
|
ea05000e52e1883ddba77ab754e5f733c8b3375c
|
[
"MIT"
] | 4
|
2021-06-21T22:16:12.000Z
|
2021-08-11T21:01:19.000Z
|
from datetime import datetime
from textwrap import shorten
from typing import Optional, Union
import discord
from discord.abc import Messageable
from discord.errors import Forbidden
from discord.ext.commands import Bot, Cog, Context, check, command, is_owner
from discord.utils import get
from config import CONFIG
from database import Afk as DbAfk
from database import User as DbUser
from database import get_user
def afk_access(ctx: Context) -> bool:
return any(
role.id in (CONFIG.guild.roles.afk_access, CONFIG.guild.roles.staff)
for role in ctx.author.roles
)
class AfkCog(Cog, name='AFK'):
def __init__(self, bot: Bot):
self.bot = bot
@command(name='afk')
@check(afk_access)
async def afk_command(self, ctx: Context, *, reason: str) -> None:
"""Marks you as AFK
Shows the given reason (shortened to 75 characters) when someone mentions you in Sunset City and adds `[AFK]` to your nickname
AFK status is automatically removed when you start typing or send a message in our server
"""
reason = shorten(reason, 75, placeholder='...')
account = get_user(ctx.author.id)
afk = DbAfk(reason=reason, old_nick=ctx.author.nick)
try:
await ctx.author.edit(nick='[AFK] ' + ctx.author.display_name)
except Forbidden:
pass
account.afk = afk
account.save()
embed = discord.Embed(
title='You have been marked AFK', color=discord.Color.blurple()
)
embed.add_field(name='Reason', value=reason)
embed.set_footer(
text='This will be cleared automatically when you start typing in '
'Sunset City'
)
await ctx.send(embed=embed, delete_after=5)
await ctx.message.delete()
@command(name='removeafk')
@is_owner()
async def remove_afk(self, ctx: Context, user: discord.Member) -> None:
await self.remove_afk_if_needed(get_user(user.id), user, ctx.channel)
await ctx.message.add_reaction('✅')
async def remove_afk_if_needed(
self,
account: DbUser,
user: discord.Member,
channel: Optional[discord.TextChannel] = None,
) -> None:
if account.afk is None:
return
if channel is None:
channel = self.bot.get_channel(CONFIG.guild.channels.chat)
try:
await user.edit(nick=account.afk.old_nick)
except Forbidden:
pass
await channel.send(
user.mention,
embed=discord.Embed(
title='Your AFK status has been removed',
color=discord.Color.blurple(),
),
delete_after=4,
)
del account.afk
account.save()
@Cog.listener()
async def on_message(self, message: discord.Message) -> None:
if message.author.bot:
return
from_account = get_user(message.author.id)
if from_account is not None:
await self.remove_afk_if_needed(
from_account, message.author, message.channel
)
for mentioned_user in message.mentions:
if (
mentioned_user.bot
or self.bot.get_user(mentioned_user.id) is None
):
return
mentioned_account = get_user(mentioned_user.id)
if (
mentioned_account is not None
and mentioned_account.afk is not None
):
old_name = (
mentioned_account.afk.old_nick
if mentioned_account.afk.old_nick is not None
else mentioned_user.name
)
embed = discord.Embed(
title=f'{old_name} is currently AFK',
color=discord.Color.gold(),
)
embed.add_field(
name='Reason', value=mentioned_account.afk.reason
)
await message.reply(embed=embed, delete_after=8)
@Cog.listener()
async def on_typing(
self,
channel: Messageable,
user: Union[discord.User, discord.Member],
when: datetime,
) -> None:
if user.bot or type(user) != discord.Member:
return
from_account = get_user(user.id)
if from_account is not None:
await self.remove_afk_if_needed(from_account, user, channel)
@Cog.listener()
async def on_raw_reaction_add(
self, payload: discord.RawReactionActionEvent
) -> None:
user = self.bot.get_user(payload.user_id)
if user.bot or payload.member is None:
return
from_account = get_user(user.id)
if from_account is not None:
await self.remove_afk_if_needed(from_account, payload.member)
@Cog.listener()
async def on_user_update(
self, before: discord.User, after: discord.User
) -> None:
member = get(self.bot.guilds, id=CONFIG.guild.id).get_member(after.id)
if member is None:
return
if member.bot:
return
from_account = get_user(member.id)
if from_account is not None:
await self.remove_afk_if_needed(from_account, member)
@Cog.listener()
async def on_invite_create(self, invite: discord.Invite) -> None:
user = self.bot.guilds[0].get_member(invite.inviter.id)
if user is None:
return
if user.bot:
return
from_account = get_user(user.id)
if from_account is not None:
await self.remove_afk_if_needed(from_account, user)
def setup(bot: Bot) -> None:
bot.add_cog(AfkCog(bot))
| 29.79602
| 135
| 0.574553
| 5,307
| 0.885829
| 0
| 0
| 4,400
| 0.734435
| 4,950
| 0.826239
| 491
| 0.081956
|
4dbb6e20146ebd5e06052b663dfca81bbe6df5e3
| 2,176
|
pyde
|
Python
|
dice/dice.pyde
|
ahoefnagel/ProjectA-Digital-Components
|
79d326f9beb433ded191187ef13d3b5a823057ef
|
[
"MIT"
] | null | null | null |
dice/dice.pyde
|
ahoefnagel/ProjectA-Digital-Components
|
79d326f9beb433ded191187ef13d3b5a823057ef
|
[
"MIT"
] | null | null | null |
dice/dice.pyde
|
ahoefnagel/ProjectA-Digital-Components
|
79d326f9beb433ded191187ef13d3b5a823057ef
|
[
"MIT"
] | null | null | null |
# The shape of the numbers for the dice
dice = { 1: [[None, None, None], [None, "", None], [None, None, None]],
2: [["", None, None], [None, None, None], [None, None, ""]],
3: [["", None, None], [None, "", None], [None, None, ""]],
4: [["", None, ""], [None, None, None], ["", None, ""]],
5: [["", None, ""], [None, "", None], ["", None, ""]],
6: [["", None, ""], ["", None, ""], ["", None, ""]]}
dice_cnt = 3
def setup():
# Variable initialization
global dice_no, score, count, debug
dice_no = []
score = 0
count = 0
debug = True
size(130 * dice_cnt, 155)
def draw():
global dice_no, score, count
textAlign(TOP, LEFT)
textSize(12)
# Clear the previous draws on the screen
clear()
fill(255)
rect(0, 0, width, height)
if dice_no == []:
pushMatrix()
fill(0)
textAlign(CENTER, CENTER)
textSize(8*dice_cnt)
text("Click anywhere to roll the dice!", width/2, height/2)
popMatrix()
basex = 35 + width/2 - (dice_cnt * 130) / 2
if debug:
basey = 60
else:
basey = 30
for i in range(len(dice_no)):
bx = (basex+i*130)
# Fill the whole field with color x
fill(232, 227, 218)
# Draw a rect, this will be the dice
rect(bx-30, basey-30, 120, 120, 12, 12, 12, 12)
if dice_no != []:
for y in range(len(dice[dice_no[i]])):
for x in range(len(dice[dice_no[i]][y])):
if dice[dice_no[i]][y][x] != None:
# Draw the eye's of the dice, in the desired color
fill(0)
ellipse(bx + (x * 30), basey + (y * 30), 20, 20)
if debug and dice_no != []:
text("score: " + str(score) + " - Count: " + str(count) + " - Dice_No: " + str(dice_no), 0, 20)
def mousePressed():
global dice_no, score, count
if mouseButton == LEFT:
score = 0
count = 0
# Get a random value for the dice
dice_no = [int(random(1,7)) for x in range(dice_cnt)]
score = sum(dice_no)
| 32
| 103
| 0.483456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 369
| 0.169577
|
4dbc315472764792a06e13d3e501c508bdc38cb4
| 769
|
py
|
Python
|
src/pdf/domain/encrypt.py
|
ichiro-kazusa/PDFCon
|
529c22145bfd20919b015b5ba70e8bab33feed01
|
[
"MIT"
] | null | null | null |
src/pdf/domain/encrypt.py
|
ichiro-kazusa/PDFCon
|
529c22145bfd20919b015b5ba70e8bab33feed01
|
[
"MIT"
] | null | null | null |
src/pdf/domain/encrypt.py
|
ichiro-kazusa/PDFCon
|
529c22145bfd20919b015b5ba70e8bab33feed01
|
[
"MIT"
] | null | null | null |
from ..command.encrypt import EncryptionCommand
class Encryption:
def __init__(self, owner: str = '', user: str = '') -> None:
self.__owner = owner
self.__user = user
self.__verify_entries()
@property
def owner(self):
return self.__owner
@property
def user(self):
return self.__user
def __verify_entries(self):
# ## verify encryption rules
# when user password is given, empty owner password is not allowed .
# -> use user password as owner password
if self.__owner == '' and self.__user != '':
self.__owner = self.__user
@staticmethod
def create_encryption_from_CMD(cmd: EncryptionCommand) -> 'Encryption':
raise NotImplementedError()
| 24.806452
| 76
| 0.625488
| 716
| 0.931079
| 0
| 0
| 239
| 0.310793
| 0
| 0
| 156
| 0.202861
|
4dbd2fe8b25b1e5fbb2859a59ee51157048af1ed
| 709
|
py
|
Python
|
photos/migrations/0002_auto_20190616_1812.py
|
savannah8/The-gallery
|
ddb95d08e81e874fe8b24046a2acc40be0a68cbe
|
[
"Unlicense"
] | null | null | null |
photos/migrations/0002_auto_20190616_1812.py
|
savannah8/The-gallery
|
ddb95d08e81e874fe8b24046a2acc40be0a68cbe
|
[
"Unlicense"
] | 5
|
2020-06-05T21:16:21.000Z
|
2021-09-08T01:03:08.000Z
|
photos/migrations/0002_auto_20190616_1812.py
|
savannah8/The-gallery
|
ddb95d08e81e874fe8b24046a2acc40be0a68cbe
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-06-16 15:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('photos', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='image_category',
),
migrations.RemoveField(
model_name='image',
name='image_location',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Image',
),
migrations.DeleteModel(
name='Location',
),
]
| 21.484848
| 46
| 0.543018
| 561
| 0.791255
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.231312
|
4dbd6eb36e7e0009be0cae3c766793da9b62afee
| 17,853
|
py
|
Python
|
scripts/old_scripts/test1.py
|
noambuckman/mpc-multiple-vehicles
|
a20949c335f1af97962569eed112e6cef46174d9
|
[
"MIT"
] | 1
|
2021-11-02T15:16:17.000Z
|
2021-11-02T15:16:17.000Z
|
scripts/old_scripts/test1.py
|
noambuckman/mpc-multiple-vehicles
|
a20949c335f1af97962569eed112e6cef46174d9
|
[
"MIT"
] | 5
|
2021-04-14T17:08:59.000Z
|
2021-05-27T21:41:02.000Z
|
scripts/old_scripts/test1.py
|
noambuckman/mpc-multiple-vehicles
|
a20949c335f1af97962569eed112e6cef46174d9
|
[
"MIT"
] | 2
|
2022-02-07T08:16:05.000Z
|
2022-03-09T23:30:17.000Z
|
import time, datetime, argparse
import os, sys
import numpy as np
np.set_printoptions(precision=2)
import matplotlib.pyplot as plt
import copy as cp
import pickle
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import casadi as cas
import src.MPC_Casadi as mpc
import src.TrafficWorld as tw
import src.IterativeBestResponseMPCMultiple as mibr
import src.car_plotting_multiple as cmplot
##########################################################
svo_theta = np.pi/4.0
# random_seed = args.random_seed[0]
random_seed = 3
NEW = True
if NEW:
optional_suffix = "ellipses"
subdir_name = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + optional_suffix
folder = "results/" + subdir_name + "/"
os.makedirs(folder)
os.makedirs(folder+"imgs/")
os.makedirs(folder+"data/")
os.makedirs(folder+"vids/")
os.makedirs(folder+"plots/")
else:
subdir_name = "20200224-103456_real_dim_CA"
folder = "results/" + subdir_name + "/"
print(folder)
if random_seed > 0:
np.random.seed(random_seed)
#######################################################################
T = 3 # MPC Planning Horizon
dt = 0.3
N = int(T/dt) #Number of control intervals in MPC
n_rounds_mpc = 6
percent_mpc_executed = 0.5 ## This is the percent of MPC that is executed
number_ctrl_pts_executed = int(np.floor(N*percent_mpc_executed))
XAMB_ONLY = False
n_other = 2
n_rounds_ibr = 2
world = tw.TrafficWorld(2, 0, 1000)
# large_world = tw.TrafficWorld(2, 0, 1000, 5.0)
#########################################################################
actual_xamb = np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1))
actual_uamb = np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed))
actual_xothers = [np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1)) for i in range(n_other)]
actual_uothers = [np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed)) for i in range(n_other)]
actual_all_other_x0 = [np.zeros((6, 2*N)) for i in range(n_other)]
xamb = np.zeros(shape=(6, N+1))
t_start_time = time.time()
####################################################
## Create the Cars in this Problem
all_other_x0 = []
all_other_u = []
all_other_MPC = []
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
next_x0 = 0
for i in range(n_other):
x1_MPC = mpc.MPC(dt)
x1_MPC.n_circles = 3
x1_MPC.theta_iamb = svo_theta
x1_MPC.N = N
x1_MPC.k_change_u_v = 0.001
x1_MPC.max_delta_u = 50 * np.pi/180 * x1_MPC.dt
x1_MPC.k_u_v = 0.01
x1_MPC.k_u_delta = .00001
x1_MPC.k_change_u_v = 0.01
x1_MPC.k_change_u_delta = 0.001
x1_MPC.k_s = 0
x1_MPC.k_x = 0
x1_MPC.k_x_dot = -1.0 / 100.0
x1_MPC.k_lat = 0.001
x1_MPC.k_lon = 0.0
x1_MPC.k_phi_error = 0.001
x1_MPC.k_phi_dot = 0.01
####Vehicle Initial Conditions
if i%2 == 0:
lane_number = 0
next_x0 += x1_MPC.L + 2*x1_MPC.min_dist
else:
lane_number = 1
initial_speed = 0.75*x1_MPC.max_v
traffic_world = world
x1_MPC.fd = x1_MPC.gen_f_desired_lane(traffic_world, lane_number, True)
x0 = np.array([next_x0, traffic_world.get_lane_centerline_y(lane_number), 0, 0, initial_speed, 0]).T
## Set the initial control of the other vehicles
u1 = np.zeros((2,N))
# u1[0,:] = np.clip(np.pi/180 *np.random.normal(size=(1,N)), -2 * np.pi/180, 2 * np.pi/180)
SAME_SIDE = False
if lane_number == 1 or SAME_SIDE:
u1[0,0] = 2 * np.pi/180
else:
u1[0,0] = -2 * np.pi/180
u1[0,0] = 0
all_other_MPC += [x1_MPC]
all_other_x0 += [x0]
all_other_u += [u1]
# Settings for Ambulance
amb_MPC = cp.deepcopy(x1_MPC)
amb_MPC.theta_iamb = 0.0
amb_MPC.k_u_v = 0.0000
amb_MPC.k_u_delta = .01
amb_MPC.k_change_u_v = 0.0000
amb_MPC.k_change_u_delta = 0
amb_MPC.k_s = 0
amb_MPC.k_x = 0
amb_MPC.k_x_dot = -1.0 / 100.0
amb_MPC.k_x = -1.0/100
amb_MPC.k_x_dot = 0
amb_MPC.k_lat = 0.00001
amb_MPC.k_lon = 0.0
# amb_MPC.min_v = 0.8*initial_speed
amb_MPC.max_v = 35 * 0.447 # m/s
amb_MPC.k_phi_error = 0.1
amb_MPC.k_phi_dot = 0.01
NO_GRASS = False
amb_MPC.min_y = world.y_min
amb_MPC.max_y = world.y_max
if NO_GRASS:
amb_MPC.min_y += world.grass_width
amb_MPC.max_y -= world.grass_width
amb_MPC.fd = amb_MPC.gen_f_desired_lane(world, 0, True)
x0_amb = np.array([0, 0, 0, 0, initial_speed , 0]).T
pickle.dump(x1_MPC, open(folder + "data/"+"mpc%d"%i + ".p",'wb'))
pickle.dump(amb_MPC, open(folder + "data/"+"mpcamb" + ".p",'wb'))
########################################################################
#### SOLVE THE MPC #####################################################
for i_mpc in range(n_rounds_mpc):
min_slack = np.infty
actual_t = i_mpc * number_ctrl_pts_executed
###### Update the initial conditions for all vehicles
if i_mpc > 0:
x0_amb = xamb[:, number_ctrl_pts_executed]
for i in range(len(all_other_x0)):
all_other_x0[i] = all_other_x[i][:, number_ctrl_pts_executed]
###### Initial guess for the other u. This will be updated once the other vehicles
###### solve the best response to the ambulance. Initial guess just looks at the last solution. This could also be a lange change
# Obtain a simulated trajectory from other vehicle control inputs
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
all_other_x_des = [np.zeros(shape=(3, N+1)) for i in range(n_other)]
for i in range(n_other):
if i_mpc == 0:
all_other_u[i] = np.zeros(shape=(6,N))
else:
all_other_u[i] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
x_mpci, u_all_i, x_0_i = all_other_MPC[i], all_other_u[i], all_other_x0[i]
all_other_x[i], all_other_x_des[i] = x_mpci.forward_simulate_all(x_0_i, u_all_i)
for i_rounds_ibr in range(n_rounds_ibr):
########## Solve the Ambulance MPC ##########
response_MPC = amb_MPC
response_x0 = x0_amb
nonresponse_MPC_list = all_other_MPC
nonresponse_x0_list = all_other_x0
nonresponse_u_list = all_other_u
nonresponse_x_list = all_other_x
nonresponse_xd_list = all_other_x_des
################# Generate the warm starts ###############################
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
### Ambulance Warm Start
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = uamb
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((uamb[:, number_ctrl_pts_executed:], np.tile(uamb[:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
#######################################################################
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, None, nonresponse_MPC_list )
k_slack = 10000.0
k_CA = 0.000000000000000
k_CA_power = 4
wall_CA = True
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, None, nonresponse_x0_list, 1, slack=False)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
for i in range(n_other):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
### Solve the Optimization
# Debugging
# plot_range = [N]
# bri.opti.callback(lambda i: bri.debug_callback(i, plot_range))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try:
bri.solve(None, nonresponse_u_list)
x1, u1, x1_des, _, _, _, _, _, _ = bri.get_solution()
print("i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print("Dir:", subdir_name)
print("k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
uamb = u1
xamb = x1
xamb_des = x1_des
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print("Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
########### SOLVE FOR THE OTHER VEHICLES ON THE ROAD
if not XAMB_ONLY:
for i in range(len(all_other_MPC)):
response_MPC = all_other_MPC[i]
response_x0 = all_other_x0[i]
nonresponse_MPC_list = all_other_MPC[:i] + all_other_MPC[i+1:]
nonresponse_x0_list = all_other_x0[:i] + all_other_x0[i+1:]
nonresponse_u_list = all_other_u[:i] + all_other_u[i+1:]
nonresponse_x_list = all_other_x[:i] + all_other_x[i+1:]
nonresponse_xd_list = all_other_x_des[:i] + all_other_x_des[i+1:]
################ Warm Start
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = all_other_u[i]
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, amb_MPC, nonresponse_MPC_list)
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, x0_amb, nonresponse_x0_list, 1, slack=False)
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
bri.opti.set_value(bri.xamb_opt, xamb)
for i in range(len(nonresponse_x_list)):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
# Debugging
# bri.opti.callback(lambda i: bri.debug_callback(i, [N]))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try: ### Solve the Optimization
bri.solve(uamb, nonresponse_u_list)
x1_nr, u1_nr, x1_des_nr, _, _, _, _, _, _ = bri.get_solution()
print(" i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print(" J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print(" Dir:", subdir_name)
print(" k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
all_other_u[i] = u1_nr
all_other_x = all_other_x[:i] + [x1_nr] + all_other_x[i:]
all_other_u = all_other_u[:i] + [u1_nr] + all_other_u[i:]
all_other_x_des = all_other_x_des[:i] + [x1_des_nr] + all_other_x_des[i:]
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print(" Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
#
print(" IBR Done: Rd %02d / %02d"%(i_rounds_ibr, n_rounds_ibr))
file_name = folder + "data/"+'r%02d%03d'%(i_mpc, i_rounds_ibr)
if not INFEASIBLE:
mibr.save_state(file_name, xamb, uamb, xamb_des, xothers, uothers, xothers_des)
mibr.save_costs(file_name, bri)
actual_t = i_mpc * number_ctrl_pts_executed
actual_xamb[:,actual_t:actual_t+number_ctrl_pts_executed+1] = xamb[:,:number_ctrl_pts_executed+1]
print(" MPC Done: Rd %02d / %02d"%(i_mpc, n_rounds_mpc))
print(" Full MPC Solution", xamb[0:2,:])
print(" Executed MPC", xamb[0:2,:number_ctrl_pts_executed+1])
print(" Solution Costs...")
for cost in bri.car1_costs_list:
print("%.04f"%bri.solution.value(cost))
print(min_bri.solution.value(min_bri.k_CA * min_bri.collision_cost), min_bri.solution.value(min_bri.collision_cost))
print(min_bri.solution.value(min_bri.k_slack * min_bri.slack_cost), min_bri.solution.value(min_bri.slack_cost))
print(" Save to...", file_name)
actual_uamb[:,actual_t:actual_t+number_ctrl_pts_executed] = uamb[:,:number_ctrl_pts_executed]
plot_range = range(N+1)
for k in plot_range:
cmplot.plot_multiple_cars( k, min_bri.responseMPC, xothers, xamb, True, None, None, None, min_bri.world, 0)
plt.show()
plt.plot(xamb[4,:],'--')
plt.plot(xamb[4,:] * np.cos(xamb[2,:]))
plt.ylabel("Velocity / Vx")
plt.hlines(35*0.447,0,xamb.shape[1])
plt.show()
plt.plot(uamb[1,:],'o')
plt.hlines(amb_MPC.max_v_u,0,xamb.shape[1])
plt.ylabel("delta_u_v")
plt.show()
for i in range(len(xothers)):
actual_xothers[i][:,actual_t:actual_t+number_ctrl_pts_executed+1] = xothers[i][:,:number_ctrl_pts_executed+1]
actual_uothers[i][:,actual_t:actual_t+number_ctrl_pts_executed] = uothers[i][:,:number_ctrl_pts_executed]
# all_other_u[i] = np.concatenate((uothers[i][:, number_ctrl_pts_executed:],uothers[i][:,:number_ctrl_pts_executed]),axis=1)
else:
raise Exception("Xamb is None", i_mpc, i_rounds_ibr, "slack cost", bri.solution.value(bri.slack_cost))
print("Solver Done! Runtime: %.1d"%(time.time()-t_start_time))
| 47.863271
| 286
| 0.58752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,215
| 0.236095
|
4dbda883d570920c96f7e2803783e01d7ee5bd65
| 712
|
py
|
Python
|
RenameMaps.py
|
uesp/uesp-dbmapscripts
|
f2bb914661423d19a5bd4b7c090af2b2142654c2
|
[
"MIT"
] | null | null | null |
RenameMaps.py
|
uesp/uesp-dbmapscripts
|
f2bb914661423d19a5bd4b7c090af2b2142654c2
|
[
"MIT"
] | null | null | null |
RenameMaps.py
|
uesp/uesp-dbmapscripts
|
f2bb914661423d19a5bd4b7c090af2b2142654c2
|
[
"MIT"
] | null | null | null |
import os
import sys
import shutil
import re
INPUT_PATH = "d:\\dbmaps\\test\\final\\"
OUTPUT_PATH = "d:\\dbmaps\\test\\zoom17\\"
for filename in os.listdir(INPUT_PATH):
InputFile = INPUT_PATH + filename
matchResult = re.search('([a-zA-Z]+)-([0-9]+)-([0-9]+)-([0-9]+)\.', filename)
if (not matchResult): continue
cellX = int(matchResult.group(2)) - 90
cellY = int(matchResult.group(3)) - 40
zoom = matchResult.group(4)
newFilename = "db-" + str(cellX) + "-" + str(cellY) + "-" + str(zoom) + ".jpg"
OutputFile = OUTPUT_PATH + newFilename
print("Copying " + filename + " to " + newFilename + "...")
shutil.copyfile(InputFile, OutputFile)
| 25.428571
| 83
| 0.594101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 0.189607
|
4dbe3d037f49de1a5bf9a99b444cecb6fba61822
| 2,984
|
py
|
Python
|
my/rtm.py
|
almereyda/HPI
|
c83bfbd21ce94a96f7af01ab0a82f20535f4aefb
|
[
"MIT"
] | null | null | null |
my/rtm.py
|
almereyda/HPI
|
c83bfbd21ce94a96f7af01ab0a82f20535f4aefb
|
[
"MIT"
] | null | null | null |
my/rtm.py
|
almereyda/HPI
|
c83bfbd21ce94a96f7af01ab0a82f20535f4aefb
|
[
"MIT"
] | null | null | null |
"""
[[https://rememberthemilk.com][Remember The Milk]] tasks and notes
"""
REQUIRES = [
'icalendar',
]
import re
from pathlib import Path
from typing import Dict, List, Optional, Iterator
from datetime import datetime
from .common import LazyLogger, get_files, group_by_key, cproperty, make_dict
from my.config import rtm as config
import icalendar # type: ignore
from icalendar.cal import Todo # type: ignore
logger = LazyLogger(__name__)
# TODO extract in a module to parse RTM's ical?
class MyTodo:
def __init__(self, todo: Todo, revision=None) -> None:
self.todo = todo
self.revision = revision
@cproperty
def notes(self) -> List[str]:
# TODO can there be multiple??
desc = self.todo['DESCRIPTION']
notes = re.findall(r'---\n\n(.*?)\n\nUpdated:', desc, flags=re.DOTALL)
return notes
@cproperty
def tags(self) -> List[str]:
desc = self.todo['DESCRIPTION']
[tags_str] = re.findall(r'\nTags: (.*?)\n', desc, flags=re.DOTALL)
if tags_str == 'none':
return []
tags = [t.strip() for t in tags_str.split(',')]
return tags
@cproperty
def uid(self) -> str:
return str(self.todo['UID'])
@cproperty
def title(self) -> str:
return str(self.todo['SUMMARY'])
def get_status(self) -> str:
if 'STATUS' not in self.todo:
return None # type: ignore
# TODO 'COMPLETED'?
return str(self.todo['STATUS'])
# TODO tz?
@cproperty
def time(self) -> datetime:
t1 = self.todo['DTSTAMP'].dt
t2 = self.todo['LAST-MODIFIED'].dt
assert t1 == t2 # TODO not sure which one is correct
return t1
def is_completed(self) -> bool:
return self.get_status() == 'COMPLETED'
def __repr__(self):
return repr(self.todo)
def __str__(self):
return str(self.todo)
@staticmethod
def alala_key(mtodo):
return (mtodo.revision, mtodo.get_time())
class DAL:
def __init__(self, data: str, revision=None) -> None:
self.cal = icalendar.Calendar.from_ical(data)
self.revision = revision
def all_todos(self) -> Iterator[MyTodo]:
for t in self.cal.walk('VTODO'):
yield MyTodo(t, self.revision)
def get_todos_by_uid(self) -> Dict[str, MyTodo]:
todos = self.all_todos()
return make_dict(todos, key=lambda t: t.uid)
def get_todos_by_title(self) -> Dict[str, List[MyTodo]]:
todos = self.all_todos()
return group_by_key(todos, lambda todo: todo.title)
def dal():
last = get_files(config.export_path)[-1]
data = last.read_text()
return DAL(data=data, revision='TODO')
def all_tasks() -> Iterator[MyTodo]:
yield from dal().all_todos()
def active_tasks() -> Iterator[MyTodo]:
for t in all_tasks():
if not t.is_completed():
yield t
def print_all_todos():
for t in all_tasks():
print(t)
| 24.661157
| 78
| 0.612936
| 2,088
| 0.699732
| 311
| 0.104223
| 952
| 0.319035
| 0
| 0
| 428
| 0.143432
|
4dc1180d1faca62d2a5375c9a1f39fa3d254832a
| 350
|
py
|
Python
|
learnpython/fib.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2019-05-19T11:54:26.000Z
|
2019-05-19T12:03:49.000Z
|
learnpython/fib.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 1
|
2020-11-27T07:55:15.000Z
|
2020-11-27T07:55:15.000Z
|
learnpython/fib.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2021-09-06T18:06:12.000Z
|
2021-12-31T07:44:43.000Z
|
# def fib2(n): # 返回到 n 的斐波那契数列
# result = []
# a, b = 0, 1
# while b < n:
# result.append(b)
# a, b = b, a+b
# return result
#
# a = fib2(500)
# print(a)
def recur_fibo(n):
"""递归函数
输出斐波那契数列"""
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
a = recur_fibo(10)
print(a)
| 15.217391
| 48
| 0.485714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.614213
|
4dc36673ab2928033fca50873ad953a0612cf554
| 820
|
py
|
Python
|
taco/tests/aws_wrappers/dynamodb/integration/consts.py
|
Intsights/taco
|
f9a912d146d74a6539d31c33ec289eff3fbfca8f
|
[
"Apache-2.0"
] | 18
|
2019-09-05T07:53:26.000Z
|
2021-02-15T18:23:45.000Z
|
taco/tests/aws_wrappers/dynamodb/integration/consts.py
|
Intsights/taco
|
f9a912d146d74a6539d31c33ec289eff3fbfca8f
|
[
"Apache-2.0"
] | null | null | null |
taco/tests/aws_wrappers/dynamodb/integration/consts.py
|
Intsights/taco
|
f9a912d146d74a6539d31c33ec289eff3fbfca8f
|
[
"Apache-2.0"
] | null | null | null |
import taco.aws_wrappers.dynamodb_wrapper.consts as dynamodb_consts
from taco.boto3.boto_config import Regions
DEFAULT_REGION = Regions.n_virginia.value
RESPONSE_KEY_NAME = 'Responses'
PRIMARY_KEY_NAME = 'KEY1'
ATTRIBUTE_DEFINITIONS = [
dynamodb_consts.property_schema(PRIMARY_KEY_NAME, dynamodb_consts.AttributeTypes.string_type.value)
]
PRIMARY_KEYS = [dynamodb_consts.property_schema(PRIMARY_KEY_NAME, dynamodb_consts.PrimaryKeyTypes.hash_type.value)]
ITEMS_TO_PUT_WITHOUT_PRIMARY_KEY = [{'q': 'qqq'}]
ITEMS_TO_PUT_WITH_MISMATCH_PRIMARY_KEY_VALUE = [{PRIMARY_KEY_NAME: 12}]
DEFAULT_PRIMARY_KEY_VALUE = '123abc'
VALID_ITEMS_TO_PUT = [{PRIMARY_KEY_NAME: DEFAULT_PRIMARY_KEY_VALUE}]
TABLE_ALREADY_EXISTS_MESSAGE = 'Table already exists'
SKIP_TABLE_DELETION_ERROR_MESSAGE = 'Table does not exists, skip deletion'
| 41
| 115
| 0.842683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.113415
|
4dc47970f015540fdb076bfa3a3c9a472b731090
| 1,790
|
py
|
Python
|
examples/HMF_oxidation_WO3/model.py
|
flboudoire/chemical-kinetics
|
70db1b3fc899f357d86834708950b9559b4d19fb
|
[
"MIT"
] | null | null | null |
examples/HMF_oxidation_WO3/model.py
|
flboudoire/chemical-kinetics
|
70db1b3fc899f357d86834708950b9559b4d19fb
|
[
"MIT"
] | null | null | null |
examples/HMF_oxidation_WO3/model.py
|
flboudoire/chemical-kinetics
|
70db1b3fc899f357d86834708950b9559b4d19fb
|
[
"MIT"
] | 2
|
2021-09-23T14:17:33.000Z
|
2022-03-26T01:06:34.000Z
|
import numpy as np
from scipy import constants
measured_species = ["HMF", "DFF", "HMFCA", "FFCA", "FDCA"]
all_species = measured_species.copy()
all_species.extend(["H_" + s for s in measured_species])
all_species.extend(["Hx_" + s for s in measured_species])
def c_to_q(c):
c_e = list()
for i, s in enumerate(all_species):
c_e.append(2*(i%5 + int(i/5))*c[:,i])
c_e = np.sum(c_e, axis = 0) # uM
c_e *= 1e-6 # M
V = 100e-3 # L
q = c_e*V*constants.N_A*constants.e # number of charge in coulombs
return q
def derivatives(y, t, p):
"""
Calculates the derivatives from local values, used by scipy.integrate.solve_ivp
"""
c = {s:y[i] for i, s in enumerate(all_species)}
dc = dict()
dc["HMF"] = - (p["k11"] + p["k12"] + p["kH1"])*c["HMF"]
dc["DFF"] = p["k11"]*c["HMF"] - (p["k21"] + p["kH21"])*c["DFF"]
dc["HMFCA"] = p["k12"]*c["HMF"] - (p["k22"] + p["kH22"])*c["HMFCA"]
dc["FFCA"] = p["k21"]*c["DFF"] + p["k22"]*c["HMFCA"] - (p["k3"] + p["kH3"])*c["FFCA"]
dc["FDCA"] = p["k3"]*c["FFCA"] - p["kH4"]*c["FDCA"]
dc["H_HMF"] = p["kH1"]*c["HMF"] - p["kHx"]*c["H_HMF"]
dc["H_DFF"] = p["kH21"]*c["DFF"] - p["kHx"]*c["H_DFF"]
dc["H_HMFCA"] = p["kH22"]*c["HMFCA"] - p["kHx"]*c["H_HMFCA"]
dc["H_FFCA"] = p["kH3"]*c["FFCA"] - p["kHx"]*c["H_FFCA"]
dc["H_FDCA"] = p["kH4"]*c["FDCA"] - p["kHx"]*c["H_FDCA"]
dc["Hx_HMF"] = p["kHx"]*c["H_HMF"]
dc["Hx_DFF"] = p["kHx"]*c["H_DFF"]
dc["Hx_HMFCA"] = p["kHx"]*c["H_HMFCA"]
dc["Hx_FFCA"] = p["kHx"]*c["H_FFCA"]
dc["Hx_FDCA"] = p["kHx"]*c["H_FDCA"]
dy = [dc[name] for name in all_species]
return dy
| 31.403509
| 96
| 0.488268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 601
| 0.335754
|
4dc54e3f4ce59c3a9f8980ef33d1443e375f1870
| 905
|
py
|
Python
|
cayennelpp/tests/test_lpp_type_humidity.py
|
smlng/pycayennelpp
|
28f2ba4fba602527d3369c9cfbce16b783916933
|
[
"MIT"
] | 16
|
2019-02-18T10:57:51.000Z
|
2022-03-29T01:54:51.000Z
|
cayennelpp/tests/test_lpp_type_humidity.py
|
smlng/pycayennelpp
|
28f2ba4fba602527d3369c9cfbce16b783916933
|
[
"MIT"
] | 40
|
2018-11-04T17:28:49.000Z
|
2021-11-26T16:05:16.000Z
|
cayennelpp/tests/test_lpp_type_humidity.py
|
smlng/pycayennelpp
|
28f2ba4fba602527d3369c9cfbce16b783916933
|
[
"MIT"
] | 12
|
2018-11-09T19:06:36.000Z
|
2021-05-21T17:44:28.000Z
|
import pytest
from cayennelpp.lpp_type import LppType
@pytest.fixture
def hum():
return LppType.get_lpp_type(104)
def test_humidity(hum):
val = (50.00,)
hum_buf = hum.encode(val)
assert hum.decode(hum_buf) == val
hum_buf = hum.encode(50.25)
assert hum.decode(hum_buf) == val
val = (50.50,)
hum_buf = hum.encode(val)
assert hum.decode(hum_buf) == val
hum_buf = hum.encode(50.75)
assert hum.decode(hum_buf) == val
def test_humidity_negative_val(hum):
with pytest.raises(Exception):
val = (-50.50,)
hum.encode(val)
def test_humidity_invalid_buf(hum):
with pytest.raises(Exception):
hum.decode(bytearray([0x00, 0x00]))
def test_humidity_invalid_val_type(hum):
with pytest.raises(Exception):
hum.encode([0x00])
def test_humidity_invalid_val(hum):
with pytest.raises(Exception):
hum.encode((0, 0))
| 21.046512
| 43
| 0.667403
| 0
| 0
| 0
| 0
| 63
| 0.069613
| 0
| 0
| 0
| 0
|
4dc60387a6447f2906c626319f97969c75a2af08
| 118
|
py
|
Python
|
src/frogtips/api/__init__.py
|
FROG-TIPS/frog.tips-python-client
|
16d1603151469522d90f352fe5bac828e4fb3e3d
|
[
"MIT"
] | 2
|
2019-11-04T04:00:56.000Z
|
2019-11-21T19:53:36.000Z
|
src/frogtips/api/__init__.py
|
FROG-TIPS/frog.tips-python-client
|
16d1603151469522d90f352fe5bac828e4fb3e3d
|
[
"MIT"
] | null | null | null |
src/frogtips/api/__init__.py
|
FROG-TIPS/frog.tips-python-client
|
16d1603151469522d90f352fe5bac828e4fb3e3d
|
[
"MIT"
] | 1
|
2019-11-21T19:53:40.000Z
|
2019-11-21T19:53:40.000Z
|
from frogtips.api.Credentials import Credentials
from frogtips.api.Tip import Tip
from frogtips.api.Tips import Tips
| 23.6
| 48
| 0.838983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4dc612995d0e9e6a026d052503209dc02bc22a03
| 5,218
|
py
|
Python
|
scripts/studs_dist.py
|
inesc-tec-robotics/carlos_controller
|
ffcc45f24dd534bb953d5bd4a47badd3d3d5223d
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/studs_dist.py
|
inesc-tec-robotics/carlos_controller
|
ffcc45f24dd534bb953d5bd4a47badd3d3d5223d
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/studs_dist.py
|
inesc-tec-robotics/carlos_controller
|
ffcc45f24dd534bb953d5bd4a47badd3d3d5223d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from mission_ctrl_msgs.srv import *
from studs_defines import *
import rospy
import time
import carlos_vision as crlv
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from carlos_controller.msg import StudsPoses
#import geometry_msgs.msg
#import std_msgs.msg
# incomming handlers
def set_mode_hnd(req):
print 'Set mode requested: '+str(req.mode)
global working_mode
working_mode = req.mode
# if working_mode != WMODE_STOPPED:
# for i in range(12):
# puntos, stiff1, stiff2, tipo= crlv.calcula_dist(working_mode)
# time.sleep(0.3)
return SetModeResponse(req.mode)
def get_mode_hnd(req):
print 'Mode requested'
global working_mode
return GetModeResponse(working_mode)
# publisher
def publica(dato, stiff1, stiff2, tipo):
global working_mode
global studs_pub
global wall_pub
#pub = rospy.Publisher(CRL_STUDS_POS_MSG, StudsPoses, queue_size=1)
studs = PoseArray()
mes = StudsPoses()
studs.header.stamp = rospy.Time.now()
if tipo==WMODE_TRACK:
#studs.header.frame_id = "orientation"
#studi = Pose()
if dato[0][0][0]!=0 or dato[0][0][1]!=0 or dato[0][0][2]!=0:
wall_or = PointStamped()
wall_or.point.x = dato[0][0][0]+0.0
wall_or.point.y = dato[0][0][1]-0.035
wall_or.point.z = dato[0][0][2]-0.015
wall_pub.publish(wall_or)
elif tipo==WMODE_STOPPED:
studs.header.frame_id = "idle"
studi = Pose()
studi.position.x = 0
studi.position.y = 0
studi.position.z = 0
studi.orientation.x = 0
studi.orientation.y = 0
studi.orientation.z = 0
studi.orientation.w = 0
studs.poses.append(studi)
else:
studs.header.frame_id = "/base_link"
for i in range(len(dato)):
studi = Pose()
studi.position.y = (dato[i][0][0])/1000.0
studi.position.x = -(dato[i][0][1]-110)/1000.0
studi.position.z = (dato[i][0][2]-175)/1000.0
studi.orientation.x = dato[i][1][0]
studi.orientation.y = dato[i][1][1]
studi.orientation.z = dato[i][1][2]
studi.orientation.w = dato[i][1][3]
studs.poses.append(studi)
mes.pose_array=studs
mes.stiff1=stiff1/1000.0
mes.stiff2=stiff2/1000.0
if dato[0][0][0]!=0 or dato[0][0][1]!=0 or dato[0][0][2]!=0:
studs_pub.publish(mes)
# timer callback for state machine update
def timer_callback(event):
global working_mode
global pattern
global init_time
global laser_threshold
if working_mode==WMODE_DETECT:
pattern = rospy.get_param(STUDS_PATTERN)
laser_threshold=rospy.get_param(STUDS_PATTERN_LASER_THRESHOLD)
crlv.cambia_patron(pattern, stud_margin, stud_prox)
puntos, stiff1, stiff2, tipo= crlv.calcula_dist(working_mode, laser_threshold)
print 'Detecting...'
print 'Studs detected: '+ str(len(puntos))
for i in range(len(puntos)):
print 'Stud '+str(i)+': '+ str(puntos[i][0])
print 'Stiff. 1: ' +str(stiff1) + ' Stiff. 2: ' +str(stiff2) + ' SD:' + str(stiff2-stiff1)
if puntos[0][1][0]!=2 and tipo==working_mode:
publica(puntos, stiff1, stiff2, tipo)
else:
publica(puntos, stiff1, stiff2, WMODE_STOPPED)
elif working_mode==WMODE_TRACK:
pattern = rospy.get_param(STUDS_PATTERN)
laser_threshold=rospy.get_param(STUDS_PATTERN_LASER_THRESHOLD)
crlv.cambia_patron(pattern, stud_margin, stud_prox)
puntos, stiff1, stiff2, tipo= crlv.calcula_dist(working_mode, laser_threshold)
print 'Tracking...'
print 'Orientation = ' + str(puntos[0][0])
print 'Stiff. 1: ' +str(stiff1) + ' Stiff. 2: ' +str(stiff2) + ' SD:' + str(stiff2-stiff1)
if puntos[0][1][0]!=2 and tipo==working_mode:
publica(puntos, stiff1, stiff2, tipo)
else:
publica(puntos, stiff1, stiff2, WMODE_STOPPED)
else:
print 'Idle ' + str(rospy.Time.to_sec(rospy.Time.now())-rospy.Time.to_sec(init_time))
def install_params():
global pattern
pattern = rospy.get_param(STUDS_PATTERN)
global stud_margin
stud_margin = rospy.get_param(STUDS_PATTERN_DIST)
global stud_prox
stud_prox = rospy.get_param(STUDS_PATTERN_PROX)
global laser_threshold
laser_threshold = rospy.get_param(STUDS_PATTERN_LASER_THRESHOLD)
def init_server():
global init_time
global thres_laser
thres_laser=220
rospy.init_node(MODULE_NAME)
crlv.arranca()
install_params()
global working_mode
working_mode = WMODE_STOPPED
#working_mode = WMODE_DETECT
#working_mode = WMODE_TRACK
global studs_pub
global wall_pub
wall_pub = rospy.Publisher(CRL_WALL_ORIENT_MSG, PointStamped, queue_size=1)
studs_pub = rospy.Publisher(CRL_STUDS_POS_MSG, StudsPoses, queue_size=1)
s = rospy.Service(SET_MODE_SRV, SetMode, set_mode_hnd)
init_time=rospy.Time.now()
rospy.Timer(rospy.Duration(0.25), timer_callback)
rospy.spin()
if __name__ == "__main__":
init_server()
| 30.87574
| 99
| 0.652166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 678
| 0.129935
|
4dc68b6c713419a1c2bd43c406530fcd60ac199b
| 9,204
|
py
|
Python
|
code/bodmas/utils.py
|
whyisyoung/BODMAS
|
91e63bbacaa53060488c94e54af3a2fb91cfa88a
|
[
"BSD-2-Clause"
] | 18
|
2021-07-20T13:50:06.000Z
|
2022-03-29T18:20:43.000Z
|
code/bodmas/utils.py
|
whyisyoung/BODMAS
|
91e63bbacaa53060488c94e54af3a2fb91cfa88a
|
[
"BSD-2-Clause"
] | 1
|
2022-01-19T23:52:14.000Z
|
2022-01-21T20:35:32.000Z
|
code/bodmas/utils.py
|
whyisyoung/BODMAS
|
91e63bbacaa53060488c94e54af3a2fb91cfa88a
|
[
"BSD-2-Clause"
] | 2
|
2021-11-20T10:44:10.000Z
|
2021-12-31T02:38:08.000Z
|
# -*- coding: utf-8 -*-
"""
utils.py
~~~~~~~~
Helper functions for setting up the environment and parsing args, etc.
"""
import os
os.environ['PYTHONHASHSEED'] = '0'
from numpy.random import seed
import random
random.seed(1)
seed(1)
import sys
import logging
import argparse
import pickle
import json
import numpy as np
import pandas as pd
import smtplib
import traceback
import lightgbm as lgb
from pprint import pformat
from collections import Counter
from email.mime.text import MIMEText
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
def create_folder(name):
if not os.path.exists(name):
os.makedirs(name)
def create_parent_folder(file_path):
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
def parse_multiple_dataset_args():
"""Parse the command line configuration for a particular run.
Raises:
ValueError: if the tree value for RandomForest is negative.
Returns:
argparse.Namespace -- a set of parsed arguments.
"""
p = argparse.ArgumentParser()
p.add_argument('--task', default='binary', choices=['binary', 'multiclass'],
help='Whether to perform binary classification or multi-class classification.')
p.add_argument('--training-set',
help='Which extra dataset to use as training. Blue Hexagon first 3 months is the default training set.')
p.add_argument('--diversity', choices=['no', 'size', 'family', 'timestamp', 'timestamp_part', 'legacy', 'packed', 'family_fixed_size'],
help='Which diversity metric to use in the training set: size, timestamp, family, packed. \
"no" means the original setting: use the 3 months of bluehex dataset as the training set.')
p.add_argument('--setting-name', help='name for this particular setting, for saving corresponding data, model, and results')
p.add_argument('-c', '--classifier', choices=['rf', 'gbdt', 'mlp'],
help='The classifier used for binary classification or multi-class classification')
p.add_argument('--testing-time',
help='The beginning time and ending time (separated by comma) for a particular testing set (bluehex data)')
p.add_argument('--quiet', default=1, type=int, choices=[0, 1], help='whether to print DEBUG logs or just INFO')
p.add_argument('--retrain', type=int, choices=[0, 1], default=0,
help='Whether to retrain the classifier, default NO.')
p.add_argument('--seed', type=int, default=42, help='random seed for training and validation split.')
# sub-arguments for the family (binary) and family_fixed_size (binary) diversity and multi-class classification
p.add_argument('--families', type=int, help='add top N families from the first three months of bluehex.')
# sub-arguments for the MLP classifier.
p.add_argument('--mlp-hidden',
help='The hidden layers of the MLP classifier, e.g.,: "2400-1200-1200", would make the architecture as 2381-2400-1200-1200-2')
p.add_argument('--mlp-batch-size', default=32, type=int,
help='MLP classifier batch_size.')
p.add_argument('--mlp-lr', default=0.001, type=float,
help='MLP classifier Adam learning rate.')
p.add_argument('--mlp-epochs', default=50, type=int,
help='MLP classifier epochs.')
p.add_argument('--mlp-dropout', default=0.2, type=float,
help='MLP classifier Droput rate.')
# sub-arguments for the RandomForest classifier.
p.add_argument('--tree',
type=int,
default=100,
help='The n_estimators of RandomForest classifier when --classifier = "rf"')
args = p.parse_args()
if args.tree < 0:
raise ValueError('invalid tree value')
return args
def get_model_dims(model_name, input_layer_num, hidden_layer_num, output_layer_num):
"""convert hidden layer arguments to the architecture of a model (list)
Arguments:
model_name {str} -- 'MLP' or 'Contrastive AE'.
input_layer_num {int} -- The number of the features.
hidden_layer_num {str} -- The '-' connected numbers indicating the number of neurons in hidden layers.
output_layer_num {int} -- The number of the classes.
Returns:
[list] -- List represented model architecture.
"""
try:
if '-' not in hidden_layer_num:
dims = [input_layer_num, int(hidden_layer_num), output_layer_num]
else:
hidden_layers = [int(dim) for dim in hidden_layer_num.split('-')]
dims = [input_layer_num]
for dim in hidden_layers:
dims.append(dim)
dims.append(output_layer_num)
logging.debug(f'{model_name} dims: {dims}')
except:
logging.error(f'get_model_dims {model_name}\n{traceback.format_exc()}')
sys.exit(-1)
return dims
def dump_json(data, output_dir, filename, overwrite=True):
dump_data('json', data, output_dir, filename, overwrite)
def dump_data(protocol, data, output_dir, filename, overwrite=True):
file_mode = 'w' if protocol == 'json' else 'wb'
fname = os.path.join(output_dir, filename)
logging.info(f'Dumping data to {fname}...')
if overwrite or not os.path.exists(fname):
with open(fname, file_mode) as f:
if protocol == 'json':
json.dump(data, f, indent=4)
else:
# pickle.dump(data, f)
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_json(filename):
with open(filename, 'r') as f:
d = json.load(f) # dict
return d
def parse_drift_args():
"""Parse the command line configuration for a particular run.
Raises:
ValueError: if the tree value for RandomForest is negative.
Returns:
argparse.Namespace -- a set of parsed arguments.
"""
p = argparse.ArgumentParser()
p.add_argument('--task', default='binary', choices=['binary', 'multiclass'],
help='Whether to perform binary classification or multi-class classification.')
p.add_argument('--setting-name', help='name for this particular setting, for saving corresponding data, model, and results')
p.add_argument('-c', '--classifier', choices=['rf', 'gbdt', 'mlp'],
help='The classifier used for binary classification or multi-class classification')
p.add_argument('--testing-time',
help='The beginning time and ending time (separated by comma) for a particular testing set (bluehex data)')
p.add_argument('--month-interval', type=int, default=1, help='specify how many months for sampling.')
# sub-arguments for the family (binary) and family_fixed_size (binary) diversity and multi-class classification
p.add_argument('--families', type=int, help='add top N families from the first three months of bluehex.')
p.add_argument('--quiet', default=1, type=int, choices=[0, 1], help='whether to print DEBUG logs or just INFO')
p.add_argument('--retrain', type=int, choices=[0, 1], default=0,
help='Whether to retrain the classifier, default NO.')
p.add_argument('--sample-ratio', default=0.01, type=float, help='how many samples to add back to the training set for retraining to combat concept drift.')
p.add_argument('--ember-ratio', default=0.3, type=float, help='how many Ember samples to train Transcend / CADE.')
p.add_argument('--seed', default=1, type=int, help='random seed for the random experiment')
args = p.parse_args()
return args
def normalize_sample_month(X_train_origin, X_sample_full):
scaler = MinMaxScaler()
X_train_scale = scaler.fit_transform(X_train_origin)
X_sample_scale = scaler.transform(X_sample_full)
return X_sample_scale
def get_ember_sample_idx_by_pred_proba(X_sample_full, y_sample_full,
sample_month_str, args,
test_begin_time, REPORT_FOLDER,
baseline_model_path, fpr_threshold):
SUB_REPORT_FOLDER = os.path.join(REPORT_FOLDER, 'intermediate')
os.makedirs(SUB_REPORT_FOLDER, exist_ok=True)
report_path = os.path.join(SUB_REPORT_FOLDER, f'{args.classifier}_{test_begin_time}_ranked_proba_sample_{sample_month_str}.csv')
if os.path.exists(report_path):
df = pd.read_csv(report_path)
sorted_sample_idx = df['idx'].to_numpy()
else:
clf = lgb.Booster(model_file=baseline_model_path)
y_sample_pred = clf.predict(X_sample_full)
y_sample_prob = np.array([1 - t if t < 0.5 else t for t in y_sample_pred])
y_sample_pred_label = np.array(y_sample_pred > fpr_threshold, dtype=np.int)
sorted_sample_idx = np.argsort(y_sample_prob)
with open(report_path, 'w') as f:
f.write(f'idx,real,pred,proba\n')
for i in sorted_sample_idx:
f.write(f'{i},{y_sample_full[i]},{y_sample_pred_label[i]},{y_sample_prob[i]}\n')
return sorted_sample_idx
| 39.165957
| 159
| 0.661669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,098
| 0.445241
|
4dc6f19452f1928a12cb21256eb1100495d990ef
| 2,539
|
py
|
Python
|
session11.py
|
sahanashetty31/session_11_epai3_assignment
|
4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647
|
[
"MIT"
] | null | null | null |
session11.py
|
sahanashetty31/session_11_epai3_assignment
|
4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647
|
[
"MIT"
] | null | null | null |
session11.py
|
sahanashetty31/session_11_epai3_assignment
|
4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647
|
[
"MIT"
] | null | null | null |
import math
from functools import lru_cache
class Polygon:
def __init__(self, n, R):
if n < 3:
raise ValueError('Polygon must have at least 3 vertices.')
self._n = n
self._R = R
def __repr__(self):
return f'Polygon(n={self._n}, R={self._R})'
@property
def count_vertices(self):
return self._n
@property
def count_edges(self):
return self._n
@property
def circumradius(self):
return self._R
@property
def interior_angle(self):
return (self._n - 2) * 180 / self._n
@property
def side_length(self):
return 2 * self._R * math.sin(math.pi / self._n)
@property
def apothem(self):
return self._R * math.cos(math.pi / self._n)
@property
def area(self):
return self._n / 2 * self.side_length * self.apothem
@property
def perimeter(self):
return self._n * self.side_length
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.count_edges == other.count_edges and self.circumradius == other.circumradius)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, self.__class__):
return self.count_vertices > other.count_vertices
else:
return NotImplemented
class Polygons:
def __init__(self, m, R):
if m < 3:
raise ValueError('m must be greater than 3')
self._m = m
self._R = R
self._polygons = [Polygon(i, R) for i in range(3, m+1)]
def __len__(self):
return self._m - 2
def __repr__(self):
return f'Polygons(m={self._m}, R={self._R})'
def __getitem__(self, s):
return self._polygons[s]
def __iter__(self):
return self.PolygonIterator(self)
@property
def max_efficiency_polygon(self):
sorted_polygons = sorted(self._polygons,
key=lambda p: p.area/p.perimeter,
reverse=True)
return sorted_polygons[0]
class PolyIterator:
def __init__(self, poly_obj):
self._poly_obj = poly_obj
self._index = 0
def __iter__(self):
return self
def __next__(self):
if self._index >= len(self._poly_obj):
raise StopIteration
else:
item = self._poly_obj._polygons[self._index]
self._index += 1
return item
| 25.39
| 102
| 0.568334
| 2,492
| 0.981489
| 0
| 0
| 852
| 0.335565
| 0
| 0
| 139
| 0.054746
|
4dc6fa3514e2ac738a922e6c666fe8ccb1623cf7
| 1,937
|
py
|
Python
|
Software for Other Building Blocks and Integration/PoW.py
|
fkerem/Cryptocurrency-Blockchain
|
965268a09a6f8b3e700e8bbc741e49a4d54805c6
|
[
"MIT"
] | null | null | null |
Software for Other Building Blocks and Integration/PoW.py
|
fkerem/Cryptocurrency-Blockchain
|
965268a09a6f8b3e700e8bbc741e49a4d54805c6
|
[
"MIT"
] | null | null | null |
Software for Other Building Blocks and Integration/PoW.py
|
fkerem/Cryptocurrency-Blockchain
|
965268a09a6f8b3e700e8bbc741e49a4d54805c6
|
[
"MIT"
] | null | null | null |
"""
PoW.py
"""
import DSA
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
def rootMerkle(TxBlockFile, TxLen): #To Get the Root Hash of the Merkle Tree
TxBlockFileBuffer = open(TxBlockFile, "r")
lines = TxBlockFileBuffer.readlines()
TxBlockFileBuffer.close()
TxCount = len(lines)/TxLen #Num of Transactions in a Block
hashTree = []
for i in range(0,TxCount):
transaction = "".join(lines[i*TxLen:(i+1)*TxLen])
hashTree.append(hashlib.sha3_256(transaction).hexdigest())
t = TxCount
j = 0
while(t>1):
for i in range(j,j+t,2):
hashTree.append(hashlib.sha3_256(hashTree[i]+hashTree[i+1]).hexdigest())
j += t
t = t>>1
h = hashTree[2*TxCount-2]
return h
def PoW(TxBlockFile, ChainFile, PoWLen, TxLen): #Updates LongestChain File for Each Block
block = ""
if TxBlockFile[-5] != "0":
chainBuffer = open(ChainFile, "r")
chain = chainBuffer.readlines()
chainBuffer.close()
block = chain[-1] #PoW for the Previous Transaction Block
else: #for TransactionBlock0.txt
block = "Day Zero Link in the Chain" + "\n"
block += rootMerkle(TxBlockFile, TxLen) + "\n" #The Root Hash of the Merkle Tree
while True:
new_block = block + str(DSA.bitGenerator(2**128-1)) + "\n"
new_PoW = hashlib.sha3_256(new_block).hexdigest()
if new_PoW[:PoWLen] == "0"*PoWLen:
new_PoW += "\n" #PoW
block = new_block
block += new_PoW
break
#Write/append to the ChainFile
if TxBlockFile[-5] != "0":
chainBuffer = open(ChainFile, "a")
chainBuffer.write(block)
chainBuffer.close()
else: #for TransactionBlock0.txt
chainBuffer = open(ChainFile, "w")
chainBuffer.write(block)
chainBuffer.close()
| 29.8
| 90
| 0.588539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 366
| 0.188952
|
4dc7d26cc18475fbd2b690b1e9dc6d7f0d1003fa
| 637
|
py
|
Python
|
core/cdag/node/sub.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
core/cdag/node/sub.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
core/cdag/node/sub.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# SubNode
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional
# NOC modules
from .base import BaseCDAGNode, ValueType, Category
class SubNode(BaseCDAGNode):
"""
Subtract `y` from `y`
"""
name = "sub"
categories = [Category.OPERATION]
def get_value(self, x: ValueType, y: ValueType) -> Optional[ValueType]:
return x - y
| 25.48
| 75
| 0.44113
| 224
| 0.351648
| 0
| 0
| 0
| 0
| 0
| 0
| 362
| 0.568289
|
4dc88ebb3a9af63d834dc6d3c95d28f963145c6a
| 287
|
py
|
Python
|
chapter03/3.5_simulate_output_layer.py
|
Myeonghan-Jeong/deep-learning-from-scratch
|
0df7f9f352920545f5309e8e11c7cf879ad477e5
|
[
"MIT"
] | null | null | null |
chapter03/3.5_simulate_output_layer.py
|
Myeonghan-Jeong/deep-learning-from-scratch
|
0df7f9f352920545f5309e8e11c7cf879ad477e5
|
[
"MIT"
] | 3
|
2021-06-08T21:22:11.000Z
|
2021-09-08T01:55:11.000Z
|
chapter03/3.5_simulate_output_layer.py
|
myeonghan-nim/deep-learning-from-scratch
|
fef3e327c49593b5df74728a1cba1144948a2999
|
[
"MIT"
] | null | null | null |
import numpy as np
# softmax function
def softmax(a):
exp_a = np.exp(a)
sum_a = np.sum(exp_a)
return exp_a / sum_a
# modified softmax function
def modified_softmax(a):
maxA = np.max(a)
exp_a = np.exp(a - maxA)
sum_a = np.sum(exp_a)
return exp_a / sum_a
| 15.105263
| 28
| 0.634146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.156794
|
4dc8b44f56e787d0b3156d5c7fc12d0fb557c818
| 1,770
|
py
|
Python
|
example.py
|
luisfciencias/intro-cv
|
2908d21dd8058acf13b5479a2cb409a6e00859c1
|
[
"MIT"
] | null | null | null |
example.py
|
luisfciencias/intro-cv
|
2908d21dd8058acf13b5479a2cb409a6e00859c1
|
[
"MIT"
] | 5
|
2020-01-28T22:54:12.000Z
|
2022-02-10T00:26:51.000Z
|
example.py
|
luisfciencias/intro-cv
|
2908d21dd8058acf13b5479a2cb409a6e00859c1
|
[
"MIT"
] | null | null | null |
# example of mask inference with a pre-trained model (COCO)
import sys
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.visualize import display_instances
from tools import load_config
# load config params - labels
cfg_dict = load_config('config.yaml')
class_names = cfg_dict['class_names']
# config settings for model inference
class ConfigParams(Config):
NAME = "test"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 80
# replicate the model for pure inference
rcnn_model = MaskRCNN(mode='inference', model_dir='models/', config=ConfigParams())
# model weights input
<<<<<<< HEAD
rcnn_model.load_weights('models/mask_rcnn_coco.h5', by_name=True)
=======
>>>>>>> 2ffc4581f4632ec494d19a7af0f5912e7482a631
path_weights_file = 'models/mask_rcnn_coco.h5'
rcnn_model.load_weights(path_weights_file, by_name=True)
# single image input
path_to_image = sys.argv[1]
img = load_img(path_to_image)
# transition to array
img = img_to_array(img)
print('Image shape:', img.shape)
# make inference
results = rcnn_model.detect([img], verbose=0)
# the output is a list of dictionaries, where each dict has a single object detection
# {'rois': array([[ 30, 54, 360, 586]], dtype=int32),
# 'class_ids': array([21], dtype=int32),
# 'scores': array([0.9999379], dtype=float32),
# 'masks': huge_boolean_array_here ...
result_params = results[0]
# show photo with bounding boxes, masks, class labels and scores
display_instances(img,
result_params['rois'],
result_params['masks'],
result_params['class_ids'],
class_names,
result_params['scores'])
| 32.181818
| 85
| 0.719774
| 111
| 0.062712
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.408475
|
4dc8ffc44718b6bc253375644e19671ce86d5269
| 8,260
|
py
|
Python
|
rubi/datasets/vqa2.py
|
abhipsabasu/rubi.bootstrap.pytorch
|
9fa9639c1ee4a040958d976eeb5dca2dd2203980
|
[
"BSD-3-Clause"
] | 83
|
2021-03-02T07:49:14.000Z
|
2022-03-30T03:07:26.000Z
|
rubi/datasets/vqa2.py
|
abhipsabasu/rubi.bootstrap.pytorch
|
9fa9639c1ee4a040958d976eeb5dca2dd2203980
|
[
"BSD-3-Clause"
] | 14
|
2019-07-14T14:10:28.000Z
|
2022-01-27T18:53:34.000Z
|
cfvqa/cfvqa/datasets/vqa2.py
|
yuleiniu/introd
|
a40407c7efee9c34e3d4270d7947f5be2f926413
|
[
"Apache-2.0"
] | 14
|
2019-09-20T01:49:13.000Z
|
2022-03-29T16:42:34.000Z
|
import os
import csv
import copy
import json
import torch
import numpy as np
from os import path as osp
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import tqdm
import h5py
class VQA2(AbstractVQA):
def __init__(self,
dir_data='data/vqa2',
split='train',
batch_size=10,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
adversarial=False,
dir_cnn=None
):
super(VQA2, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=True,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.load_image_features()
# to activate manually in visualization context (notebo# to activate manually in visualization context (notebook)
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn.get('norm_rois', None)
item['nb_regions'] = item['visual'].size(0)
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.tensor(question['question_wids'], dtype=torch.long)
item['lengths'] = torch.tensor([len(question['question_wids'])], dtype=torch.long)
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.tensor([item['answer_id']], dtype=torch.long)
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
else:
if item['question_id'] in self.is_qid_testdev:
item['is_testdev'] = True
else:
item['is_testdev'] = False
# if Options()['model.network.name'] == 'xmn_net':
# num_feat = 36
# relation_mask = np.zeros((num_feat, num_feat))
# boxes = item['coord']
# for i in range(num_feat):
# for j in range(i+1, num_feat):
# # if there is no overlap between two bounding box
# if boxes[0,i]>boxes[2,j] or boxes[0,j]>boxes[2,i] or boxes[1,i]>boxes[3,j] or boxes[1,j]>boxes[3,i]:
# pass
# else:
# relation_mask[i,j] = relation_mask[j,i] = 1
# relation_mask = torch.from_numpy(relation_mask).byte()
# item['relation_mask'] = relation_mask
return item
def download(self):
dir_zip = osp.join(self.dir_raw, 'zip')
os.system('mkdir -p '+dir_zip)
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Train_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Val_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Test_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Annotations_Train_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Annotations_Val_mscoco.zip -P '+dir_zip)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Train_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Val_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Test_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Annotations_Train_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Annotations_Val_mscoco.zip')+' -d '+dir_ann)
os.system('mv '+osp.join(dir_ann, 'v2_mscoco_train2014_annotations.json')+' '
+osp.join(dir_ann, 'mscoco_train2014_annotations.json'))
os.system('mv '+osp.join(dir_ann, 'v2_mscoco_val2014_annotations.json')+' '
+osp.join(dir_ann, 'mscoco_val2014_annotations.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_train2014_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_train2014_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_val2014_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_val2014_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_test2015_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_test2015_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_test-dev2015_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_test-dev2015_questions.json'))
| 45.635359
| 122
| 0.607385
| 7,959
| 0.963559
| 0
| 0
| 0
| 0
| 0
| 0
| 2,719
| 0.329177
|
4dcb123bec4d3c0380f0862774b7117039deb91f
| 281
|
py
|
Python
|
W3Schools/dates.py
|
FRX-DEV/Python-Practice-Challenges
|
8cddfb8f4181f987aa71cb75dee1f65d4d766954
|
[
"MIT"
] | null | null | null |
W3Schools/dates.py
|
FRX-DEV/Python-Practice-Challenges
|
8cddfb8f4181f987aa71cb75dee1f65d4d766954
|
[
"MIT"
] | null | null | null |
W3Schools/dates.py
|
FRX-DEV/Python-Practice-Challenges
|
8cddfb8f4181f987aa71cb75dee1f65d4d766954
|
[
"MIT"
] | null | null | null |
import datetime
x = datetime.datetime.now()
print(x)
# 2021-07-13 22:55:43.029046
print(x.year)
print(x.strftime("%A"))
"""
2021
Tuesday
"""
x = datetime.datetime(2020, 5, 17)
print(x)
# 2020-05-17 00:00:00
x = datetime.datetime(2018, 6, 1)
print(x.strftime("%B"))
# June
| 12.217391
| 34
| 0.647687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.30605
|
4dcb5c28ab7f560dea6a9712a1a25dda90260ee7
| 2,564
|
py
|
Python
|
venv/lib/python2.7/site-packages/ebcli/objects/tier.py
|
zwachtel11/fruitful-backend
|
45b8994917182e7b684b9e25944cc79c9494c9f3
|
[
"MIT"
] | 4
|
2017-01-17T09:09:07.000Z
|
2018-12-19T14:06:22.000Z
|
venv/lib/python2.7/site-packages/ebcli/objects/tier.py
|
zwachtel11/fruitful-backend
|
45b8994917182e7b684b9e25944cc79c9494c9f3
|
[
"MIT"
] | 1
|
2020-06-03T13:57:07.000Z
|
2020-06-22T10:27:48.000Z
|
venv/lib/python2.7/site-packages/ebcli/objects/tier.py
|
zwachtel11/fruitful-backend
|
45b8994917182e7b684b9e25944cc79c9494c9f3
|
[
"MIT"
] | 4
|
2017-08-13T09:09:31.000Z
|
2020-11-04T04:58:58.000Z
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..objects.exceptions import NotFoundError
import re
class Tier():
def __init__(self, name, typ, version):
self.name = name
self.type = typ
self.version = version.strip()
self.string = self.__str__()
def to_struct(self):
strct = {
'Name': self.name,
'Type': self.type,
}
if self.version:
strct['Version'] = self.version
return strct
def __str__(self):
s = self.name + '-' + self.type
if self.version:
s += '-' + self.version
return s
def __eq__(self, other):
if not isinstance(other, Tier):
return False
return self.string.lower() == other.string.lower()
@staticmethod
def get_all_tiers():
lst = [
Tier('WebServer', 'Standard', '1.0'),
Tier('Worker', 'SQS/HTTP', '1.0'),
Tier('Worker', 'SQS/HTTP', '1.1'),
Tier('Worker', 'SQS/HTTP', ''),
]
return lst
@staticmethod
def parse_tier(string):
if string.lower() == 'web' or string.lower() == 'webserver':
return Tier('WebServer', 'Standard', '1.0')
if string.lower() == 'worker':
return Tier('Worker', 'SQS/HTTP', '')
params = string.split('-')
if len(params) == 3:
name, typ, version = string.split('-')
elif len(params) == 2:
name, typ = string.split('-')
if re.match('\d+[.]\d+', typ):
version = typ
else:
version = ''
else:
raise NotFoundError('Tier Not found')
# we want to return the Proper, uppercase version
if name.lower() == 'webserver' or name.lower() == 'web':
return Tier('WebServer', 'Standard', version)
elif name.lower() == 'worker':
return Tier('Worker', 'SQS/HTTP', version)
# tier not found
raise NotFoundError('Tier Not found')
| 31.654321
| 73
| 0.559282
| 1,943
| 0.7578
| 0
| 0
| 1,247
| 0.486349
| 0
| 0
| 920
| 0.358814
|
4dcb9047f54eac204a9bac1c46c12bc3341a699a
| 11,237
|
py
|
Python
|
Leetcode.py
|
SakuraSa/Leetcode_CodeDownloader
|
cba23e3ec85b24e14fdf856e0e7eefb2c95644eb
|
[
"Apache-2.0"
] | 3
|
2015-10-20T13:05:18.000Z
|
2020-07-27T19:45:58.000Z
|
Leetcode.py
|
SakuraSa/Leetcode_CodeDownloader
|
cba23e3ec85b24e14fdf856e0e7eefb2c95644eb
|
[
"Apache-2.0"
] | null | null | null |
Leetcode.py
|
SakuraSa/Leetcode_CodeDownloader
|
cba23e3ec85b24e14fdf856e0e7eefb2c95644eb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
import os
import re
import requests
import datetime
import BeautifulSoup
#url requests setting
host_url = 'https://oj.leetcode.com'
login_url = 'https://oj.leetcode.com/accounts/login/'
question_list_url = 'https://oj.leetcode.com/problems/'
code_base_url = 'https://oj.leetcode.com/submissions/detail/%s/'
code_list_base_url = 'https://oj.leetcode.com/submissions/%d/'
github_login_url = 'https://oj.leetcode.com/accounts/github/login/'
code_regex = re.compile("storage\.put\('(python|cpp|java)', '([^']+)'\);")
leetcode_request_header = {
'Host': 'oj.leetcode.com',
'Origin': 'https://oj.leetcode.com',
'Referer': 'https://oj.leetcode.com/accounts/login/'
}
github_request_header = {
'Host': 'github.com',
'Origin': 'https://github.com',
'Referer': 'https://github.com/'
}
#code setting
ext_dic = {'python': '.py', 'cpp': '.cpp', 'java': '.java'}
comment_char_dic = {'python': '#', 'cpp': '//', 'java': '//'}
class LeetcodeDownloader(object):
def __init__(self, proxies=None, code_path='codes/', output_encoding='utf-8', session=None):
self.proxies = proxies or {}
self.code_path = code_path
self.output_encoding = output_encoding
self.session = session or requests.Session()
self.session.proxies = self.proxies
self.username = self.password = ''
def login(self, username, password):
self.username = username
self.password = password
login_page = self.session.get(login_url)
soup = BeautifulSoup.BeautifulSoup(login_page.text)
secret_input = soup.find('form').find('input', type='hidden')
payload = dict(
login=self.username,
password=self.password,
)
payload[secret_input['name']] = secret_input['value']
self.session.post(login_url, data=payload, headers=leetcode_request_header)
return self.is_logged_in
@property
def is_logged_in(self):
return bool(self.session.cookies.get("PHPSESSID", None))
def login_from_github(self, username, password):
self.username = username
self.password = password
leetcode_github_login_page = self.session.get('https://github.com/login')
soup = BeautifulSoup.BeautifulSoup(leetcode_github_login_page.text)
post_div = soup.find('div', id='login')
github_post_url = 'https://github.com/session'
payload = dict()
for ip in post_div.findAll('input'):
value = ip.get('value', None)
if value:
payload[ip['name']] = value
payload['login'], payload['password'] = username, password
self.session.post(github_post_url, data=payload, headers=github_request_header)
if self.session.cookies['logged_in'] != 'yes':
return False
rsp = self.session.get(github_login_url)
return rsp.status_code == 200
def get_questions(self):
rsp = self.session.get(question_list_url)
soup = BeautifulSoup.BeautifulSoup(rsp.text)
question_table = soup.find('table', id='problemList')
question_table_body = question_table.find('tbody')
for table_row in question_table_body.findAll('tr'):
table_data = table_row.findAll('td')
status = table_data[0].find('span')['class']
name = table_data[1].find('a').text
url = table_data[1].find('a')['href']
date = datetime.datetime.strptime(table_data[2].text, '%Y-%m-%d')
per = float(table_data[3].text.strip('%'))
yield dict(
status=status,
name=name,
url=url,
date=date,
per=per
)
def get_question_description(self, url):
rsp = self.session.get(url)
soup = BeautifulSoup.BeautifulSoup(rsp.text)
name = soup.find("h3").text
accepted_count = int(soup.find("span", attrs={"class": "total-ac text-info"}).find("strong").text)
submission_count = int(soup.find("span", attrs={"class": "total-submit text-info"}).find("strong").text)
def transform(div):
lst = []
for item in div:
if isinstance(item, BeautifulSoup.NavigableString):
lst.append(item)
elif isinstance(item, BeautifulSoup.Tag):
if item.name == "p":
lst.append("%s\n" % transform(item))
elif item.name == "b":
lst.append("###%s###" % transform(item))
elif item.name == "a":
lst.append("[%s](%s)" % (transform(item), item["href"]))
elif item.name == "code":
lst.append("`%s`" % transform(item))
elif item.name == "pre":
lst.append("```%s```" % transform(item))
elif item.name == "ul":
lst.append(transform(item))
elif item.name == "div":
lst.append(transform(item))
elif item.name == "li":
lst.append("* %s" % transform(item))
elif item.name == "br":
lst.append("\n")
else:
lst.append(item.text)
return "".join(lst)
description = transform(soup.find("div", attrs={"class": "question-content"}))
return {
'name': name,
'accepted_count': accepted_count,
'submission_count': submission_count,
'description': description.replace("\r", "")
}
def code(self, code_id):
code_url = code_base_url % code_id
rsp = self.session.get(code_url)
match = code_regex.search(rsp.text)
return match.group(2).decode('raw_unicode_escape')
def page_code(self, page_index=0):
code_url = code_list_base_url % page_index
rsp = self.session.get(code_url)
soup = BeautifulSoup.BeautifulSoup(rsp.text)
table = soup.find('table', id='result_testcases')
if table is None:
return []
table_body = table.find('tbody')
number_reg = re.compile('\d+')
lst = list()
for table_row in table_body.findAll('tr'):
table_data = table_row.findAll('td')
name = table_data[1].find('a').text
questions_url = host_url + table_data[1].find('a')['href']
status = table_data[2].find('strong').text
code_id = int(number_reg.search(table_data[2].find('a')['href']).group(0))
runtime = table_data[3].text.strip()
lang = table_data[4].text
file_name = "%s-%s" % (status, code_id)
file_ext = ext_dic.get(lang, '.txt')
file_path = os.path.join(self.code_path, name)
file_full_name = os.path.join(file_path, file_name + file_ext)
exists = os.path.exists(file_full_name)
lst.append(dict(
name=name,
questions_url=questions_url,
status=status,
code_id=code_id,
runtime=runtime,
lang=lang,
exists=exists
))
return lst
def page_code_all(self):
page_index = 0
while 1:
lst = self.page_code(page_index)
if lst:
for data in lst:
yield data
else:
break
page_index += 1
def save_code(self, table_data_list):
file_path = os.path.join(self.code_path, table_data_list['name'])
if not os.path.exists(file_path):
os.makedirs(file_path)
file_name = "%s-%s" % (table_data_list['status'], table_data_list['code_id'])
file_ext = ext_dic.get(table_data_list['lang'], '.txt')
file_full_name = os.path.join(file_path, file_name + file_ext)
exists = os.path.exists(file_full_name)
if not exists:
comment_char = comment_char_dic.get(table_data_list['lang'], '//')
description = self.get_question_description(table_data_list['questions_url'])
with open(file_full_name, 'w') as file_handle:
file_handle.write(comment_char + 'Author : %s\n' % self.username)
file_handle.write(comment_char + 'Question : %s\n' % table_data_list['name'])
file_handle.write(comment_char + 'Link : %s\n' % table_data_list['questions_url'])
file_handle.write(comment_char + 'Language : %s\n' % table_data_list['lang'])
file_handle.write(comment_char + 'Status : %s\n' % table_data_list['status'])
file_handle.write(comment_char + 'Run Time : %s\n' % table_data_list['runtime'])
file_handle.write(comment_char + 'Description: \n')
for line in description["description"].split("\n"):
if line.strip():
file_handle.write(comment_char)
file_handle.write(line.encode(self.output_encoding))
file_handle.write("\n")
file_handle.write('\n')
file_handle.write(comment_char + 'Code : \n')
file_handle.write(self.code(table_data_list['code_id'])
.encode(self.output_encoding)
.replace('\r', ''))
return {
"file_full_name": file_full_name,
"exists": exists,
}
def get_and_save_all_codes(self):
for table_data_list in self.page_code_all():
result = dict(table_data_list)
code_result = self.save_code(table_data_list)
result['path'] = code_result["file_full_name"]
result['exists'] = code_result["exists"]
yield result
if __name__ == '__main__':
#login form leetcode account
USERNAME = 'YOUR USERNAME'
PASSWORD = 'YOUR PASSWORD'
#login form github account
#downloader.login_from_github(username='YOUR USERNAME', password='YOUR PASSWORD')
from taskbar import TaskBar
downloader = LeetcodeDownloader()
print "Logging..."
if downloader.login(username=USERNAME, password=PASSWORD):
print "ok, logged in."
else:
print "error, logging failed."
exit()
def func(row):
result = dict(row)
code_result = downloader.save_code(row)
result['path'] = code_result["file_full_name"]
result['exists'] = code_result["exists"]
return result
task_bar = TaskBar(40)
print "Loading submissions..."
task_param_list = task_bar.processing(
task=lambda: list((func, ([table_data_list], {})) for table_data_list in downloader.page_code_all()),
title=" Loading submissions...",
show_total=False
)
print "ok, %s submissions found in %.2fs." % (len(task_param_list), task_bar.time_cost)
print "Downloading submissions..."
task_bar.do_task(task_param_list)
| 41.464945
| 112
| 0.568924
| 9,016
| 0.802349
| 1,402
| 0.124766
| 102
| 0.009077
| 0
| 0
| 1,983
| 0.176471
|
4dccb31b43009dc8e9a6ff9aaa09678332eccb6f
| 1,028
|
py
|
Python
|
tokenizer.py
|
momennaas/kalam-lp
|
fdf032ca71a155169f507cba40275ca38f409c87
|
[
"MIT"
] | 6
|
2019-03-31T04:46:27.000Z
|
2020-02-27T16:39:31.000Z
|
tokenizer.py
|
momennaas/kalam-lp
|
fdf032ca71a155169f507cba40275ca38f409c87
|
[
"MIT"
] | null | null | null |
tokenizer.py
|
momennaas/kalam-lp
|
fdf032ca71a155169f507cba40275ca38f409c87
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
##############################################################
## Author: Abdulmumen Naas
## Description: Arabic Natural Language Processor (Kalam-lp)
## Version: 0.0.1
## Copyright (c) 2014 Abdulmumen Naas
##############################################################
import re
import string
from constants import *
class Tokenizer:
@classmethod
def wordTokenize(self, s, xPunct= False):
"""wordTokenize: return a list words as tokens from a raw of text."""
if not s or s == "":
return None
punct_regex = re.compile(r"[%s\s]+" % re.escape(string.punctuation))
w_regex = re.compile(r"[\w]+|[%s]" % re.escape(string.punctuation))
#_TODO_: Adding arabic punctuations
if not xPunct:
#Not exclude punctuation
tokens = re.findall(w_regex, s)
return tokens
elif xPunct:
r = punct_regx.sub(" ", s)
tokens = r.split()
return tokens
else:
return None
def main():
print "It works"
if __name__ == "__main__":
main()
| 25.073171
| 72
| 0.559339
| 591
| 0.574903
| 0
| 0
| 572
| 0.55642
| 0
| 0
| 476
| 0.463035
|
4dcd01eb4188987a9436e56ef1dddd73f316c897
| 1,617
|
py
|
Python
|
Class4/shoppingcart_pom/features/lib/pages/summer_dresses_catalog_page.py
|
techsparksguru/python_ci_automation
|
65e66266fdf2c14f593c6f098a23770621faef41
|
[
"MIT"
] | null | null | null |
Class4/shoppingcart_pom/features/lib/pages/summer_dresses_catalog_page.py
|
techsparksguru/python_ci_automation
|
65e66266fdf2c14f593c6f098a23770621faef41
|
[
"MIT"
] | 9
|
2020-02-13T09:14:12.000Z
|
2022-01-13T03:17:03.000Z
|
Class4/shoppingcart_pom/features/lib/pages/summer_dresses_catalog_page.py
|
techsparksguru/python_ci_automation
|
65e66266fdf2c14f593c6f098a23770621faef41
|
[
"MIT"
] | 1
|
2021-03-10T03:27:37.000Z
|
2021-03-10T03:27:37.000Z
|
__author__ = 'techsparksguru'
from selenium.webdriver.common.by import By
from .base_page_object import BasePage
class SummerDressesCatalogPage(BasePage):
def __init__(self, context):
BasePage.__init__(
self,
context.browser,
base_url='http://www.automationpractice.com')
locator_dictionary = {
"category_name":(By.CLASS_NAME,"cat-name"),
"size_short":(By.ID,"layered_id_attribute_group_1"),
"size_medium":(By.ID,"layered_id_attribute_group_2"),
"color_white":(By.ID,"layered_id_attribute_group_8")
}
class PrintedSummerDress1(BasePage):
def __init__(self, context):
BasePage.__init__(
self,
context.browser,
base_url='http://www.automationpractice.com')
locator_dictionary = {
"img_dress1":(By.XPATH,"//*[@id='center_column']/ul/li[1]/div/div[1]/div/a[1]/img"),
"add_cart_dress1":(By.XPATH,"//*[@id='center_column']/ul/li[1]/div/div[2]/div[2]/a[1]"),
"product_price":(By.CLASS_NAME,"product-price")
}
class CartPopup(BasePage):
def __init__(self, context):
BasePage.__init__(
self,
context.browser,
base_url='http://www.automationpractice.com')
locator_dictionary = {
"continue_shopping":(By.XPATH,"//*[@title='Continue shopping']"),
"proceed_to_checkout":(By.XPATH,"//*[@title='Proceed to checkout']")
}
| 37.604651
| 100
| 0.564626
| 1,501
| 0.928262
| 0
| 0
| 0
| 0
| 0
| 0
| 558
| 0.345083
|
4dcdd9abff0ad027ebd337ca976c53333922e6fc
| 446
|
py
|
Python
|
ch3/collatz_test.py
|
jakdept/pythonbook
|
862e445ef1bcb36c890fe7e27e144354f6c855b5
|
[
"MIT"
] | null | null | null |
ch3/collatz_test.py
|
jakdept/pythonbook
|
862e445ef1bcb36c890fe7e27e144354f6c855b5
|
[
"MIT"
] | null | null | null |
ch3/collatz_test.py
|
jakdept/pythonbook
|
862e445ef1bcb36c890fe7e27e144354f6c855b5
|
[
"MIT"
] | null | null | null |
import unittest
import collatz
class TestCollatz(unittest.TestCase):
'''tests the collatz.py script'''
def test_collatz(self):
'''table driven test to verify collatz'''
tests = ((742, 371),
(418, 209),
(118, 59),
(1978, 989))
for test in tests:
self.assertEqual(collatz.collatz(test[0]), test[1])
if __name__ == "__main__":
unittest.main()
| 21.238095
| 63
| 0.544843
| 361
| 0.809417
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.188341
|
4dceebb4aaf3cbc5f66e75e0222673f73c95b189
| 4,046
|
py
|
Python
|
test/surrogate/test_sk_random_forest.py
|
Dee-Why/lite-bo
|
804e93b950148fb98b7e52bd56c713edacdb9b6c
|
[
"BSD-3-Clause"
] | 184
|
2021-06-02T06:35:25.000Z
|
2022-03-31T10:33:11.000Z
|
test/surrogate/test_sk_random_forest.py
|
ZongWei-HUST/open-box
|
011791aba4e44b20a6544020c73601638886d143
|
[
"MIT"
] | 16
|
2021-11-15T11:13:57.000Z
|
2022-03-24T12:51:17.000Z
|
test/surrogate/test_sk_random_forest.py
|
ZongWei-HUST/open-box
|
011791aba4e44b20a6544020c73601638886d143
|
[
"MIT"
] | 24
|
2021-06-18T04:52:57.000Z
|
2022-03-30T11:14:03.000Z
|
from sklearn.ensemble import RandomForestRegressor
from openbox.utils.config_space import ConfigurationSpace
from openbox.utils.config_space import UniformFloatHyperparameter, \
CategoricalHyperparameter, Constant, UniformIntegerHyperparameter
import numpy as np
from openbox.utils.config_space.util import convert_configurations_to_array
import threading
from joblib import Parallel, delayed
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.utils.validation import check_is_fitted
from sklearn.ensemble._base import _partition_estimators
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
def _collect_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
out.append(prediction)
def predictmv(rf, X):
check_is_fitted(rf)
# Check data
X = rf._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(rf.n_estimators, rf.n_jobs)
print('n_jobs=', n_jobs)
# avoid storing the output of every estimator by summing them here
if rf.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], rf.n_outputs_), dtype=np.float64)
else:
print('here, rf.n_outputs_=1')
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
# Parallel(n_jobs=n_jobs, verbose=rf.verbose,
# **_joblib_parallel_args(require="sharedmem"))(
# delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
# for e in rf.estimators_)
#
# y_hat /= len(rf.estimators_)
#
# return y_hat
all_y_preds = list()
Parallel(n_jobs=n_jobs, verbose=rf.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_collect_prediction)(e.predict, X, all_y_preds, lock)
for e in rf.estimators_)
all_y_preds = np.asarray(all_y_preds, dtype=np.float64)
return all_y_preds
def get_cs():
cs = ConfigurationSpace()
n_estimators = UniformIntegerHyperparameter("n_estimators", 100, 1000, default_value=500, q=50)
num_leaves = UniformIntegerHyperparameter("num_leaves", 31, 2047, default_value=128)
max_depth = Constant('max_depth', 15)
learning_rate = UniformFloatHyperparameter("learning_rate", 1e-3, 0.3, default_value=0.1, log=True)
min_child_samples = UniformIntegerHyperparameter("min_child_samples", 5, 30, default_value=20)
subsample = UniformFloatHyperparameter("subsample", 0.7, 1, default_value=1, q=0.1)
colsample_bytree = UniformFloatHyperparameter("colsample_bytree", 0.7, 1, default_value=1, q=0.1)
cs.add_hyperparameters([n_estimators, num_leaves, max_depth, learning_rate, min_child_samples, subsample,
colsample_bytree])
return cs
n_obs = 50
n_new = 5
cs = get_cs()
cs.seed(1)
configs = cs.sample_configuration(n_obs)
new_configs = cs.sample_configuration(n_new)
X = convert_configurations_to_array(configs)
Y = np.random.RandomState(47).random(size=(n_obs,))
pX = convert_configurations_to_array(new_configs)
print('shape of pX', pX.shape)
rf = RandomForestRegressor(random_state=np.random.RandomState(47), n_estimators=3)
rf.fit(X, Y)
preds = rf.predict(pX)
print(preds)
ppp = predictmv(rf, pX)
print('final predict', ppp)
m = np.mean(ppp, axis=0)
v = np.var(ppp, axis=0)
print(m, v)
print(type(m), type(v))
from joblib import effective_n_jobs
print(effective_n_jobs(None))
| 32.894309
| 109
| 0.712803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 945
| 0.233564
|
4dcf592e4a02e009b4cb4e7b4d57ff918fb14acc
| 3,258
|
py
|
Python
|
cli_wrapper.py
|
anirbandas18/report-engine
|
de7d3c0caab972243a61e681abbb9a06e9c54857
|
[
"MIT"
] | null | null | null |
cli_wrapper.py
|
anirbandas18/report-engine
|
de7d3c0caab972243a61e681abbb9a06e9c54857
|
[
"MIT"
] | null | null | null |
cli_wrapper.py
|
anirbandas18/report-engine
|
de7d3c0caab972243a61e681abbb9a06e9c54857
|
[
"MIT"
] | null | null | null |
import subprocess, os
# constants with global scope
INPUT = "--input"
OUTPUT = "--output"
FILTERS = "--filters"
SUPPPLEMENTS = "--supplements"
JAR_DIRECTORY = "target"
JAR_NAME = "report-engine.jar"
def build_jar():
should_package = input("\nBuild " + JAR_NAME + " file from src (Y/N) ? ")
# check if jar is to be built
if len(should_package) != 0 and (should_package[0] == 'Y' or should_package[0] == 'y'):
# define build commands for maven
mvn_cmd = ['mvn', 'clean', 'package']
print("\nBuilding " + JAR_NAME + " from src using command:\n" + ' '.join(mvn_cmd) + '\n')
# build jar using maven through an external process spawned from python
mvn_process = subprocess.Popen(mvn_cmd, shell=True)
mvn_process.wait()
return mvn_process.returncode
else:
return None
def execute_jar(app_cmd_args):
should_run = input("\nRun " + JAR_NAME + " file from target (Y/N) ? ")
# check if jar is to be run
if len(should_run) != 0 and (should_run[0] == 'Y' or should_run[0] == 'y'):
# form jar file path based on underlying os
jar_location = os.path.join(JAR_DIRECTORY,JAR_NAME)
# define commands for executing .jar file using java
jar_cmd = ['java', '-jar', jar_location]
# parse arguments
for key,value in app_cmd_args.items():
jar_cmd.append(key + '=' + value)
print("\nExecuting " + JAR_NAME + " using command: \n" + ' '.join(jar_cmd) + '\n')
# execute jar using java through an external process spawned from python
jar_process = subprocess.Popen(jar_cmd, shell=True)
jar_process.wait()
return jar_process.returncode;
else:
return None
def main():
# input from user through stdin
input_path = input("Enter the directory path containing profiles to be parsed (--input): ")
output_path = input("Enter the directory path where reports will be dumped (--output): ")
filters = input("Profile properties for which filtered reports will be generated (--filters optional): ")
supplements = input("Profile properties for which supplementary reports will be generated (--supplements optional): ")
# format arguments
app_cmd_args = dict([(INPUT,input_path)])
# validate optional arguments
if len(filters) != 0:
app_cmd_args[FILTERS] = filters
if len(output_path) != 0:
app_cmd_args[OUTPUT] = output_path
if len(supplements) != 0:
app_cmd_args[SUPPPLEMENTS] = supplements
# validate arguments
if len(app_cmd_args.get(INPUT)) == 0:
print("\n" + INPUT + " option is mandatory! Please re-run the cli_wrapper.py script\n")
else :
# argument validated successfully
mvn_exit_code = build_jar()
# execute .jar file only if maven build is successful
print("\nMaven exit code: " + str(mvn_exit_code))
if mvn_exit_code == 0 or mvn_exit_code == None:
jar_exit_code = execute_jar(app_cmd_args)
print("\nJava exit code: " + str(jar_exit_code))
print('\nReport engine terminated!')
if __name__ == '__main__':
main()
| 42.868421
| 123
| 0.624002
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,329
| 0.407919
|
4dcfc7f344c60db35f7d0923585dc078c2f43a3c
| 11,267
|
py
|
Python
|
19-05-150_protein_ridge/inference.py
|
danhtaihoang/sparse-network
|
763a19f5f333df5cfa9852d965a7110e813d52d5
|
[
"MIT"
] | null | null | null |
19-05-150_protein_ridge/inference.py
|
danhtaihoang/sparse-network
|
763a19f5f333df5cfa9852d965a7110e813d52d5
|
[
"MIT"
] | null | null | null |
19-05-150_protein_ridge/inference.py
|
danhtaihoang/sparse-network
|
763a19f5f333df5cfa9852d965a7110e813d52d5
|
[
"MIT"
] | null | null | null |
##========================================================================================
import numpy as np
from scipy import linalg
from sklearn.preprocessing import OneHotEncoder
from scipy.spatial import distance
#=========================================================================================
def itab(n,m):
i1 = np.zeros(n)
i2 = np.zeros(n)
for i in range(n):
i1[i] = i*m
i2[i] = (i+1)*m
return i1.astype(int),i2.astype(int)
#=========================================================================================
# generate coupling matrix w0: wji from j to i
def generate_interactions(n,m,g,sp):
nm = n*m
w = np.random.normal(0.0,g/np.sqrt(nm),size=(nm,nm))
i1tab,i2tab = itab(n,m)
# sparse
for i in range(n):
for j in range(n):
if (j != i) and (np.random.rand() < sp):
w[i1tab[i]:i2tab[i],i1tab[j]:i2tab[j]] = 0.
# sum_j wji to each position i = 0
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
w[:,i1:i2] -= w[:,i1:i2].mean(axis=1)[:,np.newaxis]
# no self-interactions
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
w[i1:i2,i1:i2] = 0. # no self-interactions
# symmetry
for i in range(nm):
for j in range(nm):
if j > i: w[i,j] = w[j,i]
return w
#=========================================================================================
def generate_external_local_field(n,m,g):
nm = n*m
h0 = np.random.normal(0.0,g/np.sqrt(nm),size=nm)
i1tab,i2tab = itab(n,m)
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
h0[i1:i2] -= h0[i1:i2].mean(axis=0)
return h0
#=========================================================================================
# 2018.10.27: generate time series by MCMC
def generate_sequences(w,h0,n,m,l):
i1tab,i2tab = itab(n,m)
# initial s (categorical variables)
s_ini = np.random.randint(0,m,size=(l,n)) # integer values
# onehot encoder
enc = OneHotEncoder(n_values=m)
s = enc.fit_transform(s_ini).toarray()
nrepeat = 5*n
for irepeat in range(nrepeat):
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
h = h0[np.newaxis,i1:i2] + s.dot(w[:,i1:i2]) # h[t,i1:i2]
k0 = np.argmax(s[:,i1:i2],axis=1)
for t in range(l):
k = np.random.randint(0,m)
while k == k0[t]:
k = np.random.randint(0,m)
if np.exp(h[t,k] - h[t,k0[t]]) > np.random.rand():
s[t,i1:i2],s[t,i1+k] = 0.,1.
if irepeat%n == 0: print('irepeat:',irepeat)
return s
#===================================================================================================
# 2018.12.22: inverse of covariance between values of x
def cov_inv(x,y):
l,mx = x.shape
my = y.shape[1]
cab_inv = np.empty((my,my,mx,mx))
for ia in range(my):
for ib in range(my):
if ib != ia:
eps = y[:,ia] - y[:,ib]
which_ab = eps !=0.
xab = x[which_ab]
xab_av = np.mean(xab,axis=0)
dxab = xab - xab_av
cab = np.cov(dxab,rowvar=False,bias=True)
cab_inv[ia,ib,:,:] = linalg.pinv(cab,rcond=1e-15)
return cab_inv
#=========================================================================================
# 2018.12.28: fit interaction to residues at position i
# additive update
def fit_additive(x,y,regu,nloop=10):
mx = x.shape[1]
my = y.shape[1]
# find elements having low probs, set w = 0
#iprobs = y.sum(axis=0)/float(y.shape[0])
#ilow = [i for i in range(my) if iprobs[i] < 0.02]
#print(ilow)
x_av = x.mean(axis=0)
dx = x - x_av
c = np.cov(dx,rowvar=False,bias=True)
c_inv = linalg.pinvh(c)
w = np.random.normal(0.0,1./np.sqrt(mx),size=(mx,my))
h0 = np.random.normal(0.0,1./np.sqrt(mx),size=my)
cost = np.full(nloop,100.)
for iloop in range(nloop):
h = h0[np.newaxis,:] + x.dot(w)
p = np.exp(h)
p_sum = p.sum(axis=1)
p /= p_sum[:,np.newaxis]
#cost[iloop] = ((y - p)**2).mean() + l1*np.sum(np.abs(w))
cost[iloop] = ((y - p)**2).mean() + regu*np.sum(w**2)
#print(iloop,cost[iloop])
if iloop > 1 and cost[iloop] >= cost[iloop-1]: break
h += y - p
h_av = h.mean(axis=0)
dh = h - h_av
dhdx = dh[:,np.newaxis,:]*dx[:,:,np.newaxis]
dhdx_av = dhdx.mean(axis=0)
w = c_inv.dot(dhdx_av)
h0 = h_av - x_av.dot(w)
#if len(ilow) > 0:
# w[:,ilow] = 0.
# h0[ilow] = 0.
w -= w.mean(axis=0)
h0 -= h0.mean()
return w,h0,cost,iloop
#=========================================================================================
# 2019.02.25: fit interaction to residues at position i
# multiplicative update (new version, NOT select each pair as the old version)
def fit_multiplicative(x,y,nloop=10):
mx = x.shape[1]
my = y.shape[1]
y2 = 2*y-1
x_av = x.mean(axis=0)
dx = x - x_av
c = np.cov(dx,rowvar=False,bias=True)
c_inv = linalg.pinvh(c)
w = np.random.normal(0.0,1./np.sqrt(mx),size=(mx,my))
h0 = np.random.normal(0.0,1./np.sqrt(mx),size=my)
cost = np.full(nloop,100.)
for iloop in range(nloop):
h = h0[np.newaxis,:] + x.dot(w)
p = np.exp(h)
# normalize
p_sum = p.sum(axis=1)
p /= p_sum[:,np.newaxis]
h = np.log(p)
#p2 = p_sum[:,np.newaxis] - p
p2 = 1. - p
h2 = np.log(p2)
hh2 = h-h2
model_ex = np.tanh(hh2/2)
cost[iloop] = ((y2 - model_ex)**2).mean()
if iloop > 0 and cost[iloop] >= cost[iloop-1]: break
#print(cost[iloop])
t = hh2 !=0
h[t] = h2[t] + y2[t]*hh2[t]/model_ex[t]
h[~t] = h2[~t] + y2[~t]*2
h_av = h.mean(axis=0)
dh = h - h_av
dhdx = dh[:,np.newaxis,:]*dx[:,:,np.newaxis]
dhdx_av = dhdx.mean(axis=0)
w = c_inv.dot(dhdx_av)
w -= w.mean(axis=0)
# 2019.03.29: ignore small w
#for i in range(my):
# j = np.abs(w[:,i]) < fraction*np.mean(np.abs(w[:,i]))
# w[j,i] = 0.
h0 = h_av - x_av.dot(w)
h0 -= h0.mean()
return w,h0,cost,iloop
#=========================================================================================
# 2019.05.15: add ridge regression term to coupling w
def fit_multiplicative_ridge(x,y,nloop=10,lamda=0.1):
mx = x.shape[1]
my = y.shape[1]
y2 = 2*y-1
x_av = x.mean(axis=0)
dx = x - x_av
c = np.cov(dx,rowvar=False,bias=True)
# 2019.05.15: ridge regression
c += lamda*np.identity(mx)
c_inv = linalg.pinvh(c)
w = np.random.normal(0.0,1./np.sqrt(mx),size=(mx,my))
h0 = np.random.normal(0.0,1./np.sqrt(mx),size=my)
cost = np.full(nloop,100.)
for iloop in range(nloop):
h = h0[np.newaxis,:] + x.dot(w)
p = np.exp(h)
# normalize
p_sum = p.sum(axis=1)
p /= p_sum[:,np.newaxis]
h = np.log(p)
#p2 = p_sum[:,np.newaxis] - p
p2 = 1. - p
h2 = np.log(p2)
hh2 = h-h2
model_ex = np.tanh(hh2/2)
cost[iloop] = ((y2 - model_ex)**2).mean()
if iloop > 0 and cost[iloop] >= cost[iloop-1]: break
#print(cost[iloop])
t = hh2 !=0
h[t] = h2[t] + y2[t]*hh2[t]/model_ex[t]
h[~t] = h2[~t] + y2[~t]*2
h_av = h.mean(axis=0)
dh = h - h_av
dhdx = dh[:,np.newaxis,:]*dx[:,:,np.newaxis]
dhdx_av = dhdx.mean(axis=0)
w = c_inv.dot(dhdx_av)
w -= w.mean(axis=0)
h0 = h_av - x_av.dot(w)
h0 -= h0.mean()
return w,h0,cost,iloop
#===================================================================================================
def dca(s0,theta=0.2,pseudo_weight=0.5):
#input: s0[L,n] (integer values, not one-hot)
#theta: threshold for finding similarity of sequences
#pseudo_weight = lamda/(lamda + pseudo_weight)
#output: w[mx_cumsum,mx_cumsum] coupling matrix ; di[n,n]: direct information
n = s0.shape[1]
mx = np.array([len(np.unique(s0[:,i])) for i in range(n)])
mx_cumsum = np.insert(mx.cumsum(),0,0)
i1i2 = np.stack([mx_cumsum[:-1],mx_cumsum[1:]]).T
# hamming distance
dst = distance.squareform(distance.pdist(s0, 'hamming'))
ma = (dst <= theta).sum(axis=1).astype(float)
Meff = (1/ma).sum()
# convert to onehot
onehot_encoder = OneHotEncoder(sparse=False)
s = onehot_encoder.fit_transform(s0)
fi_true = (s/ma[:,np.newaxis]).sum(axis=0)
fi_true /= Meff
fij_true = (s[:,:,np.newaxis]*s[:,np.newaxis,:]/ma[:,np.newaxis,np.newaxis]).sum(axis=0)
fij_true /= Meff
# add pseudo_weight
fi = (1 - pseudo_weight)*fi_true + pseudo_weight/mx[0] ## q = mx[0]
fij = (1 - pseudo_weight)*fij_true + pseudo_weight/(mx[0]**2) ## q = mx[0]
cw = fij - fi[:,np.newaxis]*fi[np.newaxis,:]
cw_inv = -linalg.pinvh(cw)
# set self-interations to be zeros
for i0 in range(n):
i1,i2 = i1i2[i0,0],i1i2[i0,1]
cw_inv[i1:i2,i1:i2] = 0.
# normalize w
w = cw_inv.copy()
for i0 in range(n):
i1,i2 = i1i2[i0,0],i1i2[i0,1]
w[:,i1:i2] -= w[:,i1:i2].mean(axis=1)[:,np.newaxis]
w[i1:i2,:] -= w[i1:i2,:].mean(axis=0)[np.newaxis,:]
#-----------------------------------------------------------------------------------------------
# calculate direct information
ew_all = np.exp(w)
di = np.zeros((n,n))
tiny = 10**(-100.)
diff_thres = 10**(-4.)
for i0 in range(n-1):
i1,i2 = i1i2[i0,0],i1i2[i0,1]
fi0 = fi[i1:i2]
for j0 in range(i0+1,n):
j1,j2 = i1i2[j0,0],i1i2[j0,1]
fj0 = fi[j1:j2]
# fit h1[A] and h2[B] (eh = np.exp(h), ew = np.exp(w))
ew = ew_all[i1:i2,j1:j2]
diff = diff_thres + 1.
# initial value
eh1 = np.full(mx[i0],1./mx[i0])
eh2 = np.full(mx[i0],1./mx[i0])
for iloop in range(100):
eh_ew1 = eh2.dot(ew.T)
eh_ew2 = eh1.dot(ew)
eh1_new = fi0/eh_ew1
eh1_new /= eh1_new.sum()
eh2_new = fi0/eh_ew2
eh2_new /= eh2_new.sum()
diff = max(np.max(np.abs(eh1_new - eh1)),np.max(np.abs(eh2_new - eh2)))
eh1,eh2 = eh1_new,eh2_new
if diff < diff_thres: break
# direct information
pdir = ew*((eh1.T).dot(eh2))
pdir /= pdir.sum()
di[i0,j0] = (pdir*np.log(pdir+tiny/np.outer(fi0+tiny,fj0+tiny))).sum()
# fill the lower triangular part
di = di+di.T
return w,di
| 30.125668
| 101
| 0.454158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,690
| 0.23875
|
4dcff13d4501aa2f3c3df9d643bf2c4ada7cfd82
| 335
|
py
|
Python
|
src/test/resources/script/jython/testReturnString.py
|
adchilds/jythonutil
|
24e6b945cf7474358be1f43e0a72f37411289e39
|
[
"CNRI-Jython"
] | 5
|
2016-02-05T19:44:57.000Z
|
2017-05-26T10:26:29.000Z
|
src/test/resources/script/jython/testReturnString.py
|
adchilds/jythonutil
|
24e6b945cf7474358be1f43e0a72f37411289e39
|
[
"CNRI-Jython"
] | 1
|
2017-02-03T06:19:21.000Z
|
2017-02-11T03:55:55.000Z
|
src/test/resources/script/jython/testReturnString.py
|
adchilds/jythonutil
|
24e6b945cf7474358be1f43e0a72f37411289e39
|
[
"CNRI-Jython"
] | null | null | null |
import sys
if __name__ == '__main__':
# Set the defaults
a = ''
b = ''
# If arguments were passed to this script, use those
try:
a = sys.argv[1]
b = sys.argv[2]
except Exception:
pass
# Sets the result to the longer of the two Strings
result = a if len(a) > len(b) else b
| 19.705882
| 56
| 0.552239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.4
|
4dd0b98da97f43f66eaf8f6486394d5b6746b436
| 5,050
|
py
|
Python
|
scraper.py
|
quake0day/chessreview
|
1cb1aa6689f2db46546da9b1bf328da25b1b67ba
|
[
"Apache-2.0"
] | null | null | null |
scraper.py
|
quake0day/chessreview
|
1cb1aa6689f2db46546da9b1bf328da25b1b67ba
|
[
"Apache-2.0"
] | null | null | null |
scraper.py
|
quake0day/chessreview
|
1cb1aa6689f2db46546da9b1bf328da25b1b67ba
|
[
"Apache-2.0"
] | null | null | null |
"""
PGN Scraper is a small program which downloads each of a user's archived games from chess.com and stores them in a pgn file.
When running the user is asked for the account name which shall be scraped and for game types.
The scraper only downloads games of the correct type.
Supported types are: bullet, rapid, blitz
rated, unrated
standard chess, other ruless (chess960, oddchess, etc.)
"""
from datetime import datetime
import json
import urllib.request
import os
def CheckFileName(file_name):
"""
This function checks if a file with file_name already exists. If yes an error message is printed and the script aborted.
"""
if os.path.isfile(os.getcwd()+f"/{file_name}"):
print(f"Error: A file named '{file_name}' already exists.")
print("Exiting...")
quit()
def GameTypeTrue(game,game_type,rated,rules):
"""
This function checks if the game is of the type defined in game_type (bullet, rapid or blitz) and returns either True or False.
"""
# Check if game is of the correct type
for type in game_type:
for ra in rated:
for ru in rules:
if (game["time_class"] == type) and (game["rated"] == ra) and ( (game["rules"] == "chess") == ru):
return True
# If not correct type return False
return False
def initScrape():
"""
This functions is used to set up the scraping parameters like account name and game type.
"""
# Input account name
acc_name = input("Enter account name: ").strip()
# Check if acc_name is empty
if bool(acc_name) == False:
print("Error: Empty account name!")
quit()
# Input game type
#game_type_code = input("Enter game type [1] All (default), [2] Rapid, [3] Blitz, [4] Bullet, [5] Rapid and Blitz: ").strip()
# If game_type_code is empty set to 1
#if bool(game_type_code) == False:
game_type_code = "1"
# Create dictionary for different game type options und apply input
game_type_dict = {
"1" : ["bullet", "blitz", "rapid"],
"2" : ["rapid"],
"3" : ["blitz"],
"4" : ["bullet"],
"5" : ["blitz", "rapid"]
}
game_type = game_type_dict["1"]
# Input rated/unrated
#rated_code = input("Consider [1] only rated games (default), [2] only unrated or [3] all games: ").strip()
# If rated_code is empty set to 1
#if bool(rated_code) == False:
rated_code = "1"
# Create dictionary for rated/unraked and apply input
rated_dict = {
"1" : [True],
"2" : [False],
"3" : [True, False]
}
# try:
rated = rated_dict["3"]
# except KeyError:
# print("Error: Invalid input!\nExiting...")
# quit()
# Input rules ("chess"/other)
# rules_code = input("Consider [1] only standard chess (default), [2] only other modes (oddchess, bughouse etc.) or [3] any type: ").strip()
# If rules_code is empty set to 1
# if bool(rules_code) == False:
rules_code = "1"
# Create dictionary for rules and apply input
rules_dict = {
"1" : [True],
"2" : [False],
"3" : [True, False]
}
#try:
rules = rules_dict[rules_code]
# except KeyError:
# print("Error: Invalid input!\nExiting...")
# quit()
# Print warning if only rated and only other rules are selected
if (rated_code == "1") and (rules_code == "2"):
print("Warning: You selected only rated AND only other chess modes!")
print(" Other chess modes are often unrated!")
return [acc_name, game_type, rated, rules]
def beginScrape(params):
"""
The downloading of the PGN archives happens here.
The file is saved as "username_YYYY-MM-dd.pgn"
"""
# Passing the predefined parameters
acc_name = params[0]
game_type = params[1]
rated = params[2]
rules = params[3]
# Create name of pgn file
now = datetime.now()
date = now.strftime("%Y-%m-%d")
game_type_string = "_".join(game_type)
file_name = f"{acc_name}_{date}_{game_type_string}.pgn"
# Check if file already exists
CheckFileName(file_name)
# Run the request, check games for type and write correct ones to file
with urllib.request.urlopen(f"https://api.chess.com/pub/player/{acc_name}/games/archives") as url:
archives = list(dict(json.loads(url.read().decode()))["archives"])
for archive in archives:
with urllib.request.urlopen(archive) as url:
games = list(dict(json.loads(url.read().decode()))["games"])
for game in games:
if GameTypeTrue(game,game_type,rated,rules):
with open(file_name, "a") as text_file:
print(game["pgn"], file=text_file)
print("\n", file=text_file)
def main():
"""
Scrape PGN files from chess.com .
"""
params = initScrape()
beginScrape(params)
if __name__ == '__main__':
main()
| 31.36646
| 143
| 0.60396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,837
| 0.561782
|
4dd0f6aca6f1e8e85ab78942074e05e47cb24566
| 2,117
|
py
|
Python
|
testpro1/DB_handler_jjd.py
|
dongkakika/OXS
|
95166365fb5e35155af3b8de6859ec87f3d9ca78
|
[
"MIT"
] | 4
|
2020-04-22T08:42:01.000Z
|
2021-07-31T19:28:51.000Z
|
testpro1/DB_handler_jjd.py
|
dongkakika/OXS
|
95166365fb5e35155af3b8de6859ec87f3d9ca78
|
[
"MIT"
] | null | null | null |
testpro1/DB_handler_jjd.py
|
dongkakika/OXS
|
95166365fb5e35155af3b8de6859ec87f3d9ca78
|
[
"MIT"
] | null | null | null |
import sqlite3
import codecs # for using '한글'
import os
# 타이틀 정보 읽어오기
f = codecs.open("jjd_info_title.txt", "r")
title_list = []
while True:
line = f.readline() # 한 줄씩 읽
if not line: break # break the loop when it's End Of File
title_list.append(line) # split the line and append it to list
f.close()
# 날짜 정보 읽어오기
f = codecs.open("jjd_info_date.txt", "r")
date_list = []
while True:
line = f.readline() # 한 줄씩 읽
if not line: break # break the loop when it's End Of File
date_list.append(line) # split the line and append it to list
f.close()
# 조회수 정보 읽어오기
f = codecs.open("jjd_info_view.txt", "r")
view_list = []
while True:
line = f.readline()
if not line: break
view_list.append(line)
f.close
# href(링크) 정보 읽어오기
f = codecs.open("jjd_info_href.txt", "r")
href_list = []
while True:
line = f.readline()
if not line: break
href_list.append(line)
f.close
################################################################################
###################################### DB ######################################
# below 'print' is for checking the data structure. Don't care.
#print("saved data(1) : ", list[0][0])
#print("saved data(2) : ", list[1])
# connect 'db.sqlite3' in the django folder and manipulate it
con = sqlite3.connect("db.sqlite3")
cur = con.cursor() # use 'cursor' to use DB
# you don't need to care the below CREATE TABLE command.
# cur.execute("CREATE TABLE if not exists website1_crawlingdata(Name text, Period text);")
total_list = []
for i in range(len(date_list)):
temp = [str(i+1), title_list[i], date_list[i], view_list[i], href_list[i]]
total_list.append(temp)
# print(total_list)
cur.execute("delete from website1_jjd_info;")
idx = 0 # 리스트의 인덱스에 접근하는 변수
while idx < len(date_list):
cur.execute("INSERT INTO website1_jjd_info VALUES(?, ?, ?, ?, ?);", total_list[idx])
# 'INSERT' each value of the total_list to the table of DB.
idx += 1
con.commit() # The new input is gonna be saved in the DB with 'commit' command
idx = 0
con.close()
| 28.608108
| 90
| 0.600378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,233
| 0.552172
|
4dd104cc2e6c9e4bdd3ba911a3d5a31df0366e7f
| 429
|
py
|
Python
|
scripts/regression_tests.py
|
zhangxaochen/Opt
|
7f1af802bfc84cc9ef1adb9facbe4957078f529a
|
[
"MIT"
] | 260
|
2017-03-02T19:57:51.000Z
|
2022-01-21T03:52:03.000Z
|
scripts/regression_tests.py
|
zhangxaochen/Opt
|
7f1af802bfc84cc9ef1adb9facbe4957078f529a
|
[
"MIT"
] | 102
|
2017-03-03T00:42:56.000Z
|
2022-03-30T14:15:20.000Z
|
scripts/regression_tests.py
|
zhangxaochen/Opt
|
7f1af802bfc84cc9ef1adb9facbe4957078f529a
|
[
"MIT"
] | 71
|
2017-03-02T20:22:33.000Z
|
2022-01-02T03:49:04.000Z
|
from opt_utils import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip_compilation", action='store_true', help="skip compilation")
args = parser.parse_args()
if not args.skip_compilation:
compile_all_opt_examples()
for example in all_examples:
args = []
output = run_example(example, args, True).decode('ascii')
with open(example + ".log", "w") as text_file:
text_file.write(output)
| 28.6
| 93
| 0.748252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.16317
|
4dd688bf34007f2b88b7cc72d6792e3f5c02e4ad
| 801
|
py
|
Python
|
rec/migrations/0005_auto_20200922_1701.py
|
lpkyrius/rg1
|
6132ec5cd8db86088f8635f2e12ce6bf16aeff8e
|
[
"MIT"
] | null | null | null |
rec/migrations/0005_auto_20200922_1701.py
|
lpkyrius/rg1
|
6132ec5cd8db86088f8635f2e12ce6bf16aeff8e
|
[
"MIT"
] | 2
|
2020-09-16T14:06:34.000Z
|
2020-09-16T18:14:26.000Z
|
rec/migrations/0005_auto_20200922_1701.py
|
lpkyrius/rg1
|
6132ec5cd8db86088f8635f2e12ce6bf16aeff8e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-22 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rec', '0004_auto_20200922_1633'),
]
operations = [
migrations.AlterField(
model_name='convenio',
name='cnpj',
field=models.CharField(blank=True, default=' ', max_length=14, null=True),
),
migrations.AlterField(
model_name='convenio',
name='obs',
field=models.CharField(blank=True, default=' ', max_length=300, null=True),
),
migrations.AlterField(
model_name='convenio',
name='telefone',
field=models.CharField(blank=True, default=' ', max_length=100, null=True),
),
]
| 27.62069
| 87
| 0.574282
| 708
| 0.883895
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.171036
|
4dd73302eae1ae2e039d31c3cb2e7f24834961a5
| 6,452
|
py
|
Python
|
deeppavlov/deep.py
|
cclauss/DeepPavlov
|
8726173c92994b3f789790b5879052d2f7953f47
|
[
"Apache-2.0"
] | 3
|
2020-04-16T04:25:10.000Z
|
2021-05-07T23:04:43.000Z
|
deeppavlov/deep.py
|
sachinsingh3107/Deeppavlov_Chatbot
|
f10b9485c118cdec69e73c89833a1a5a164404de
|
[
"Apache-2.0"
] | 12
|
2020-01-28T22:14:04.000Z
|
2022-02-10T00:10:17.000Z
|
deeppavlov/deep.py
|
sachinsingh3107/Deeppavlov_Chatbot
|
f10b9485c118cdec69e73c89833a1a5a164404de
|
[
"Apache-2.0"
] | 1
|
2021-02-05T13:01:48.000Z
|
2021-02-05T13:01:48.000Z
|
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from logging import getLogger
from deeppavlov.core.commands.infer import interact_model, predict_on_stream
from deeppavlov.core.commands.train import train_evaluate_model_from_config
from deeppavlov.core.common.cross_validation import calc_cv_score
from deeppavlov.core.common.file import find_config
from deeppavlov.download import deep_download
from deeppavlov.utils.alexa.server import run_alexa_default_agent
from deeppavlov.utils.alice import start_alice_server
from deeppavlov.utils.ms_bot_framework.server import run_ms_bf_default_agent
from deeppavlov.utils.pip_wrapper import install_from_config
from deeppavlov.utils.server.server import start_model_server
from deeppavlov.utils.telegram.telegram_ui import interact_model_by_telegram
log = getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("mode", help="select a mode, train or interact", type=str,
choices={'train', 'evaluate', 'interact', 'predict', 'interactbot', 'interactmsbot',
'alexa', 'riseapi', 'download', 'install', 'crossval'})
parser.add_argument("config_path", help="path to a pipeline json config", type=str)
parser.add_argument("-e", "--start-epoch-num", dest="start_epoch_num", default=None,
help="Start epoch number", type=int)
parser.add_argument("--recursive", action="store_true", help="Train nested configs")
parser.add_argument("-b", "--batch-size", dest="batch_size", default=1, help="inference batch size", type=int)
parser.add_argument("-f", "--input-file", dest="file_path", default=None, help="Path to the input file", type=str)
parser.add_argument("-d", "--download", action="store_true", help="download model components")
parser.add_argument("--folds", help="number of folds", type=int, default=5)
parser.add_argument("-t", "--token", default=None, help="telegram bot token", type=str)
parser.add_argument("-i", "--ms-id", default=None, help="microsoft bot framework app id", type=str)
parser.add_argument("-s", "--ms-secret", default=None, help="microsoft bot framework app secret", type=str)
parser.add_argument("--multi-instance", action="store_true", help="allow rising of several instances of the model")
parser.add_argument("--stateful", action="store_true", help="interact with a stateful model")
parser.add_argument("--no-default-skill", action="store_true", help="do not wrap with default skill")
parser.add_argument("--https", action="store_true", help="run model in https mode")
parser.add_argument("--key", default=None, help="ssl key", type=str)
parser.add_argument("--cert", default=None, help="ssl certificate", type=str)
parser.add_argument("-p", "--port", default=None, help="api port", type=str)
parser.add_argument("--api-mode", help="rest api mode: 'basic' with batches or 'alice' for Yandex.Dialogs format",
type=str, default='basic', choices={'basic', 'alice'})
def main():
args = parser.parse_args()
pipeline_config_path = find_config(args.config_path)
https = args.https
ssl_key = args.key
ssl_cert = args.cert
if args.download or args.mode == 'download':
deep_download(pipeline_config_path)
multi_instance = args.multi_instance
stateful = args.stateful
if args.mode == 'train':
train_evaluate_model_from_config(pipeline_config_path, recursive=args.recursive,
start_epoch_num=args.start_epoch_num)
elif args.mode == 'evaluate':
train_evaluate_model_from_config(pipeline_config_path, to_train=False, start_epoch_num=args.start_epoch_num)
elif args.mode == 'interact':
interact_model(pipeline_config_path)
elif args.mode == 'interactbot':
token = args.token
interact_model_by_telegram(model_config=pipeline_config_path,
token=token,
default_skill_wrap=not args.no_default_skill)
elif args.mode == 'interactmsbot':
ms_id = args.ms_id
ms_secret = args.ms_secret
run_ms_bf_default_agent(model_config=pipeline_config_path,
app_id=ms_id,
app_secret=ms_secret,
multi_instance=multi_instance,
stateful=stateful,
port=args.port,
https=https,
ssl_key=ssl_key,
ssl_cert=ssl_cert,
default_skill_wrap=not args.no_default_skill)
elif args.mode == 'alexa':
run_alexa_default_agent(model_config=pipeline_config_path,
multi_instance=multi_instance,
stateful=stateful,
port=args.port,
https=https,
ssl_key=ssl_key,
ssl_cert=ssl_cert,
default_skill_wrap=not args.no_default_skill)
elif args.mode == 'riseapi':
alice = args.api_mode == 'alice'
if alice:
start_alice_server(pipeline_config_path, https, ssl_key, ssl_cert, port=args.port)
else:
start_model_server(pipeline_config_path, https, ssl_key, ssl_cert, port=args.port)
elif args.mode == 'predict':
predict_on_stream(pipeline_config_path, args.batch_size, args.file_path)
elif args.mode == 'install':
install_from_config(pipeline_config_path)
elif args.mode == 'crossval':
if args.folds < 2:
log.error('Minimum number of Folds is 2')
else:
n_folds = args.folds
calc_cv_score(pipeline_config_path, n_folds=n_folds, is_loo=False)
if __name__ == "__main__":
main()
| 47.094891
| 116
| 0.66739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,780
| 0.275883
|
4dd83f2bdedcce578bc2f4f15b92a56d3b2455a9
| 3,345
|
py
|
Python
|
test/test_cfg/read_grammar.py
|
wannaphong/pycfg
|
ffa67958ed1c3deb73cadb3969ac086336fb1269
|
[
"MIT"
] | 8
|
2017-12-18T08:51:27.000Z
|
2020-11-26T02:21:06.000Z
|
test/test_cfg/read_grammar.py
|
wannaphong/pycfg
|
ffa67958ed1c3deb73cadb3969ac086336fb1269
|
[
"MIT"
] | 1
|
2020-01-09T15:41:09.000Z
|
2020-01-09T15:41:09.000Z
|
test/test_cfg/read_grammar.py
|
wannaphong/pycfg
|
ffa67958ed1c3deb73cadb3969ac086336fb1269
|
[
"MIT"
] | 6
|
2017-06-12T16:58:40.000Z
|
2019-11-27T06:55:07.000Z
|
'''Read grammar specifications for test cases.'''
import re
import sys
from pprint import pprint
from cfg.core import ContextFreeGrammar, Terminal, Nonterminal, Marker
from cfg.table import END_MARKER, ParseTableNormalForm
class GrammarTestCase(object):
'''Contains a CFG and optionally a parse table.'''
def __init__(self, sections, filename):
self._sections = sections
self.filename = filename
def __getattr__(self, name):
return self._sections.get(name)
def __str__(self):
return self.filename + '\n' + '\n'.join(self._section_strs())
def _section_strs(self):
for k, v in self._sections.iteritems():
yield '''\
==%s==
%s
''' % (k.upper(), v)
label_re = re.compile('^\s*==\s*(.*?)\s*==\s*$')
comment_re = re.compile('^([^#]*)')
shift_re = re.compile('^sh(\d+)$')
reduce_re = re.compile('^re(\d+)$')
def read_test_case(finname):
'''Read a grammar test case from a file.'''
label = 'grammar'
sections = {}
with open(finname, 'r') as fin:
for line in filter(None, map(lambda s: comment_re.match(s).group(1).strip(), fin)):
m = label_re.match(line)
if m:
label = m.group(1).lower()
else:
sections.setdefault(label, []).append(line)
def retype(s, t):
if s in sections:
sections[s] = t(sections[s])
retype('grammar', read_grammar)
def retype_table(lines):
return read_table(lines, sections['grammar'])
retype('table', retype_table)
retype('tablea', retype_table)
retype('tableb', retype_table)
retype('result', read_bool)
return GrammarTestCase(sections, finname)
def read_grammar(lines):
return ContextFreeGrammar('\n'.join(lines))
def read_table(lines, grammar):
terminals = grammar.terminals
nonterminals = grammar.nonterminals
T = ParseTableNormalForm()
for line in lines:
left, right = line.split('=')
q, X = left.split(',')
q = int(q)
is_terminal = False
if Terminal(X) in terminals:
is_terminal = True
X = Terminal(X)
elif Marker(X) == END_MARKER:
is_terminal = True
X = END_MARKER
if is_terminal:
actions = right.split(',')
for a in actions:
m = shift_re.match(a)
if m:
T.set_gotoshift(q, X, int(m.group(1)))
else:
m = reduce_re.match(a)
if m:
T.add_reduction(q, X, int(m.group(1)))
elif a == 'acc':
T.set_accept(q, X)
else:
raise ValueError('cell value %r not recognized' % a)
elif Nonterminal(X) in nonterminals:
T.set_gotoshift(q, Nonterminal(X), int(right))
else:
raise ValueError('a symbol in the table is not in the grammar at %s,%s' % (q, X))
return T
def read_bool(lines):
s = '\n'.join(lines).strip().lower()
if s == 'true': return True
elif s == 'false': return False
else: return bool(int(s))
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write('Usage: read_grammar.py <file>\n')
sys.exit(1)
print read_test_case(sys.argv[1])
| 31.261682
| 93
| 0.564425
| 497
| 0.14858
| 126
| 0.037668
| 0
| 0
| 0
| 0
| 448
| 0.133931
|
4dd85a981091c632d855dbb819f62a7e6d570ba9
| 59,286
|
py
|
Python
|
pype/plugins/global/publish/extract_review.py
|
barklaya/pype
|
db3f708b1918d4f81951b36e1575eb3ecf0551c5
|
[
"MIT"
] | null | null | null |
pype/plugins/global/publish/extract_review.py
|
barklaya/pype
|
db3f708b1918d4f81951b36e1575eb3ecf0551c5
|
[
"MIT"
] | null | null | null |
pype/plugins/global/publish/extract_review.py
|
barklaya/pype
|
db3f708b1918d4f81951b36e1575eb3ecf0551c5
|
[
"MIT"
] | null | null | null |
import os
import re
import copy
import json
import pyblish.api
import clique
import pype.api
import pype.lib
class ExtractReview(pyblish.api.InstancePlugin):
"""Extracting Review mov file for Ftrack
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.
All new represetnations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/
publish.json:ExtractReview:outputs`.
"""
label = "Extract Review"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere", "harmony"]
# Supported extensions
image_exts = ["exr", "jpg", "jpeg", "png", "dpx"]
video_exts = ["mov", "mp4"]
supported_exts = image_exts + video_exts
# FFmpeg tools paths
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# Preset attributes
profiles = None
# Legacy attributes
outputs = {}
ext_filter = []
to_width = 1920
to_height = 1080
def process(self, instance):
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (
getattr(instance, "label", None)
or instance.data.get("label")
or instance.data.get("name")
)
self.log.info((
"Instance \"{}\" contain \"multipartExr\". Skipped."
).format(instance_label))
return
# Use legacy processing when `profiles` is not set.
if self.profiles is None:
return self.legacy_process(instance)
# Run processing
self.main_process(instance)
# Make sure cleanup happens and pop representations with "delete" tag.
for repre in tuple(instance.data["representations"]):
tags = repre.get("tags") or []
if "delete" in tags:
instance.data["representations"].remove(repre)
def main_process(self, instance):
host_name = os.environ["AVALON_APP"]
task_name = os.environ["AVALON_TASK"]
family = self.main_family_from_instance(instance)
self.log.info("Host: \"{}\"".format(host_name))
self.log.info("Task: \"{}\"".format(task_name))
self.log.info("Family: \"{}\"".format(family))
profile = self.find_matching_profile(
host_name, task_name, family
)
if not profile:
self.log.info((
"Skipped instance. None of profiles in presets are for"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\""
).format(host_name, family, task_name))
return
self.log.debug("Matching profile: \"{}\"".format(json.dumps(profile)))
instance_families = self.families_from_instance(instance)
_profile_outputs = self.filter_outputs_by_families(
profile, instance_families
)
if not _profile_outputs:
self.log.info((
"Skipped instance. All output definitions from selected"
" profile does not match to instance families. \"{}\""
).format(str(instance_families)))
return
# Store `filename_suffix` to save arguments
profile_outputs = []
for filename_suffix, definition in _profile_outputs.items():
definition["filename_suffix"] = filename_suffix
profile_outputs.append(definition)
# Loop through representations
for repre in tuple(instance.data["representations"]):
tags = repre.get("tags") or []
if "review" not in tags or "thumbnail" in tags:
continue
input_ext = repre["ext"]
if input_ext.startswith("."):
input_ext = input_ext[1:]
if input_ext not in self.supported_exts:
self.log.info(
"Representation has unsupported extension \"{}\"".format(
input_ext
)
)
continue
# Filter output definition by representation tags (optional)
outputs = self.filter_outputs_by_tags(profile_outputs, tags)
if not outputs:
self.log.info((
"Skipped representation. All output definitions from"
" selected profile does not match to representation's"
" tags. \"{}\""
).format(str(tags)))
continue
for _output_def in outputs:
output_def = copy.deepcopy(_output_def)
# Make sure output definition has "tags" key
if "tags" not in output_def:
output_def["tags"] = []
# Create copy of representation
new_repre = copy.deepcopy(repre)
# Remove "delete" tag from new repre if there is
if "delete" in new_repre["tags"]:
new_repre["tags"].remove("delete")
# Add additional tags from output definition to representation
for tag in output_def["tags"]:
if tag not in new_repre["tags"]:
new_repre["tags"].append(tag)
self.log.debug(
"New representation tags: `{}`".format(new_repre["tags"])
)
temp_data = self.prepare_temp_data(instance, repre, output_def)
ffmpeg_args = self._ffmpeg_arguments(
output_def, instance, new_repre, temp_data
)
subprcs_cmd = " ".join(ffmpeg_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
output_name = output_def["filename_suffix"]
if temp_data["without_handles"]:
output_name += "_noHandles"
new_repre.update({
"name": output_def["filename_suffix"],
"outputName": output_name,
"outputDef": output_def,
"frameStartFtrack": temp_data["output_frame_start"],
"frameEndFtrack": temp_data["output_frame_end"]
})
# Force to pop these key if are in new repre
new_repre.pop("preview", None)
new_repre.pop("thumbnail", None)
# adding representation
self.log.debug(
"Adding new representation: {}".format(new_repre)
)
instance.data["representations"].append(new_repre)
def input_is_sequence(self, repre):
"""Deduce from representation data if input is sequence."""
# TODO GLOBAL ISSUE - Find better way how to find out if input
# is sequence. Issues( in theory):
# - there may be multiple files ant not be sequence
# - remainders are not checked at all
# - there can be more than one collection
return isinstance(repre["files"], (list, tuple))
def prepare_temp_data(self, instance, repre, output_def):
"""Prepare dictionary with values used across extractor's process.
All data are collected from instance, context, origin representation
and output definition.
There are few required keys in Instance data: "frameStart", "frameEnd"
and "fps".
Args:
instance (Instance): Currently processed instance.
repre (dict): Representation from which new representation was
copied.
output_def (dict): Definition of output of this plugin.
Returns:
dict: All data which are used across methods during process.
Their values should not change during process but new keys
with values may be added.
"""
frame_start = instance.data["frameStart"]
handle_start = instance.data.get(
"handleStart",
instance.context.data["handleStart"]
)
frame_end = instance.data["frameEnd"]
handle_end = instance.data.get(
"handleEnd",
instance.context.data["handleEnd"]
)
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
# Change output frames when output should be without handles
without_handles = bool("no-handles" in output_def["tags"])
if without_handles:
output_frame_start = frame_start
output_frame_end = frame_end
else:
output_frame_start = frame_start_handle
output_frame_end = frame_end_handle
return {
"fps": float(instance.data["fps"]),
"frame_start": frame_start,
"frame_end": frame_end,
"handle_start": handle_start,
"handle_end": handle_end,
"frame_start_handle": frame_start_handle,
"frame_end_handle": frame_end_handle,
"output_frame_start": int(output_frame_start),
"output_frame_end": int(output_frame_end),
"pixel_aspect": instance.data.get("pixelAspect", 1),
"resolution_width": instance.data.get("resolutionWidth"),
"resolution_height": instance.data.get("resolutionHeight"),
"origin_repre": repre,
"input_is_sequence": self.input_is_sequence(repre),
"without_handles": without_handles
}
def _ffmpeg_arguments(self, output_def, instance, new_repre, temp_data):
"""Prepares ffmpeg arguments for expected extraction.
Prepares input and output arguments based on output definition and
input files.
Args:
output_def (dict): Currently processed output definition.
instance (Instance): Currently processed instance.
new_repre (dict): Reprensetation representing output of this
process.
temp_data (dict): Base data for successfull process.
"""
# Get FFmpeg arguments from profile presets
out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {}
ffmpeg_input_args = out_def_ffmpeg_args.get("input") or []
ffmpeg_output_args = out_def_ffmpeg_args.get("output") or []
ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or []
ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or []
# Prepare input and output filepaths
self.input_output_paths(new_repre, output_def, temp_data)
if temp_data["input_is_sequence"]:
# Set start frame
ffmpeg_input_args.append(
"-start_number {}".format(temp_data["output_frame_start"])
)
# TODO add fps mapping `{fps: fraction}` ?
# - e.g.: {
# "25": "25/1",
# "24": "24/1",
# "23.976": "24000/1001"
# }
# Add framerate to input when input is sequence
ffmpeg_input_args.append(
"-framerate {}".format(temp_data["fps"])
)
elif temp_data["without_handles"]:
# TODO use frames ubstead if `-ss`:
# `select="gte(n\,{handle_start}),setpts=PTS-STARTPTS`
# Pros:
# 1.) Python is not good at float operation
# 2.) FPS on instance may not be same as input's
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec))
# Set output frames len to 1 when ouput is single image
if (
temp_data["output_ext_is_image"]
and not temp_data["output_is_sequence"]
):
output_frames_len = 1
else:
output_frames_len = (
temp_data["output_frame_end"]
- temp_data["output_frame_start"]
+ 1
)
# NOTE used `-frames` instead of `-t` - should work the same way
# NOTE this also replaced `-shortest` argument
ffmpeg_output_args.append("-frames {}".format(output_frames_len))
# Add video/image input path
ffmpeg_input_args.append(
"-i \"{}\"".format(temp_data["full_input_path"])
)
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"]:
audio_in_args, audio_filters, audio_out_args = self.audio_args(
instance, temp_data
)
ffmpeg_input_args.extend(audio_in_args)
ffmpeg_audio_filters.extend(audio_filters)
ffmpeg_output_args.extend(audio_out_args)
res_filters = self.rescaling_filters(temp_data, output_def, new_repre)
ffmpeg_video_filters.extend(res_filters)
ffmpeg_input_args = self.split_ffmpeg_args(ffmpeg_input_args)
lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args)
ffmpeg_video_filters.extend(lut_filters)
# Add argument to override output file
ffmpeg_output_args.append("-y")
# NOTE This must be latest added item to output arguments.
ffmpeg_output_args.append(
"\"{}\"".format(temp_data["full_output_path"])
)
return self.ffmpeg_full_args(
ffmpeg_input_args,
ffmpeg_video_filters,
ffmpeg_audio_filters,
ffmpeg_output_args
)
def split_ffmpeg_args(self, in_args):
"""Makes sure all entered arguments are separated in individual items.
Split each argument string with " -" to identify if string contains
one or more arguments.
"""
splitted_args = []
for arg in in_args:
sub_args = arg.split(" -")
if len(sub_args) == 1:
if arg and arg not in splitted_args:
splitted_args.append(arg)
continue
for idx, arg in enumerate(sub_args):
if idx != 0:
arg = "-" + arg
if arg and arg not in splitted_args:
splitted_args.append(arg)
return splitted_args
def ffmpeg_full_args(
self, input_args, video_filters, audio_filters, output_args
):
"""Post processing of collected FFmpeg arguments.
Just verify that output arguments does not contain video or audio
filters which may cause issues because of duplicated argument entry.
Filters found in output arguments are moved to list they belong to.
Args:
input_args (list): All collected ffmpeg arguments with inputs.
video_filters (list): All collected video filters.
audio_filters (list): All collected audio filters.
output_args (list): All collected ffmpeg output arguments with
output filepath.
Returns:
list: Containing all arguments ready to run in subprocess.
"""
output_args = self.split_ffmpeg_args(output_args)
video_args_dentifiers = ["-vf", "-filter:v"]
audio_args_dentifiers = ["-af", "-filter:a"]
for arg in tuple(output_args):
for identifier in video_args_dentifiers:
if identifier in arg:
output_args.remove(arg)
arg = arg.replace(identifier, "").strip()
video_filters.append(arg)
for identifier in audio_args_dentifiers:
if identifier in arg:
output_args.remove(arg)
arg = arg.replace(identifier, "").strip()
audio_filters.append(arg)
all_args = []
all_args.append(self.ffmpeg_path)
all_args.extend(input_args)
if video_filters:
all_args.append("-filter:v {}".format(",".join(video_filters)))
if audio_filters:
all_args.append("-filter:a {}".format(",".join(audio_filters)))
all_args.extend(output_args)
return all_args
def input_output_paths(self, new_repre, output_def, temp_data):
"""Deduce input nad output file paths based on entered data.
Input may be sequence of images, video file or single image file and
same can be said about output, this method helps to find out what
their paths are.
It is validated that output directory exist and creates if not.
During process are set "files", "stagingDir", "ext" and
"sequence_file" (if output is sequence) keys to new representation.
"""
staging_dir = new_repre["stagingDir"]
repre = temp_data["origin_repre"]
if temp_data["input_is_sequence"]:
collections = clique.assemble(repre["files"])[0]
full_input_path = os.path.join(
staging_dir,
collections[0].format("{head}{padding}{tail}")
)
filename = collections[0].format("{head}")
if filename.endswith("."):
filename = filename[:-1]
# Make sure to have full path to one input file
full_input_path_single_file = os.path.join(
staging_dir, repre["files"][0]
)
else:
full_input_path = os.path.join(
staging_dir, repre["files"]
)
filename = os.path.splitext(repre["files"])[0]
# Make sure to have full path to one input file
full_input_path_single_file = full_input_path
filename_suffix = output_def["filename_suffix"]
output_ext = output_def.get("ext")
# Use input extension if output definition do not specify it
if output_ext is None:
output_ext = os.path.splitext(full_input_path)[1]
# TODO Define if extension should have dot or not
if output_ext.startswith("."):
output_ext = output_ext[1:]
# Store extension to representation
new_repre["ext"] = output_ext
self.log.debug("New representation ext: `{}`".format(output_ext))
# Output is image file sequence witht frames
output_ext_is_image = bool(output_ext in self.image_exts)
output_is_sequence = bool(
output_ext_is_image
and "sequence" in output_def["tags"]
)
if output_is_sequence:
new_repre_files = []
frame_start = temp_data["output_frame_start"]
frame_end = temp_data["output_frame_end"]
filename_base = "{}_{}".format(filename, filename_suffix)
# Temporary tempalte for frame filling. Example output:
# "basename.%04d.exr" when `frame_end` == 1001
repr_file = "{}.%{:0>2}d.{}".format(
filename_base, len(str(frame_end)), output_ext
)
for frame in range(frame_start, frame_end + 1):
new_repre_files.append(repr_file % frame)
new_repre["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file
)
else:
repr_file = "{}_{}.{}".format(
filename, filename_suffix, output_ext
)
full_output_path = os.path.join(staging_dir, repr_file)
new_repre_files = repr_file
# Store files to representation
new_repre["files"] = new_repre_files
# Make sure stagingDire exists
staging_dir = os.path.normpath(os.path.dirname(full_output_path))
if not os.path.exists(staging_dir):
self.log.debug("Creating dir: {}".format(staging_dir))
os.makedirs(staging_dir)
# Store stagingDir to representaion
new_repre["stagingDir"] = staging_dir
# Store paths to temp data
temp_data["full_input_path"] = full_input_path
temp_data["full_input_path_single_file"] = full_input_path_single_file
temp_data["full_output_path"] = full_output_path
# Store information about output
temp_data["output_ext_is_image"] = output_ext_is_image
temp_data["output_is_sequence"] = output_is_sequence
self.log.debug("Input path {}".format(full_input_path))
self.log.debug("Output path {}".format(full_output_path))
def audio_args(self, instance, temp_data):
"""Prepares FFMpeg arguments for audio inputs."""
audio_in_args = []
audio_filters = []
audio_out_args = []
audio_inputs = instance.data.get("audio")
if not audio_inputs:
return audio_in_args, audio_filters, audio_out_args
for audio in audio_inputs:
# NOTE modified, always was expected "frameStartFtrack" which is
# STRANGE?!!! There should be different key, right?
# TODO use different frame start!
offset_seconds = 0
frame_start_ftrack = instance.data.get("frameStartFtrack")
if frame_start_ftrack is not None:
offset_frames = frame_start_ftrack - audio["offset"]
offset_seconds = offset_frames / temp_data["fps"]
if offset_seconds > 0:
audio_in_args.append(
"-ss {}".format(offset_seconds)
)
elif offset_seconds < 0:
audio_in_args.append(
"-itsoffset {}".format(abs(offset_seconds))
)
audio_in_args.append("-i \"{}\"".format(audio["filename"]))
# NOTE: These were changed from input to output arguments.
# NOTE: value in "-ac" was hardcoded to 2, changed to audio inputs len.
# Need to merge audio if there are more than 1 input.
if len(audio_inputs) > 1:
audio_out_args.append("-filter_complex amerge")
audio_out_args.append("-ac {}".format(len(audio_inputs)))
return audio_in_args, audio_filters, audio_out_args
def rescaling_filters(self, temp_data, output_def, new_repre):
"""Prepare vieo filters based on tags in new representation.
It is possible to add letterboxes to output video or rescale to
different resolution.
During this preparation "resolutionWidth" and "resolutionHeight" are
set to new representation.
"""
filters = []
letter_box = output_def.get("letter_box")
# Get instance data
pixel_aspect = temp_data["pixel_aspect"]
# NOTE Skipped using instance's resolution
full_input_path_single_file = temp_data["full_input_path_single_file"]
input_data = pype.lib.ffprobe_streams(full_input_path_single_file)[0]
input_width = input_data["width"]
input_height = input_data["height"]
self.log.debug("pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug("input_width: `{}`".format(input_width))
self.log.debug("input_height: `{}`".format(input_height))
# NOTE Setting only one of `width` or `heigth` is not allowed
output_width = output_def.get("width")
output_height = output_def.get("height")
# Use instance resolution if output definition has not set it.
if output_width is None or output_height is None:
output_width = temp_data["resolution_width"]
output_height = temp_data["resolution_height"]
# Use source's input resolution instance does not have set it.
if output_width is None or output_height is None:
self.log.debug("Using resolution from input.")
output_width = input_width
output_height = input_height
self.log.debug(
"Output resolution is {}x{}".format(output_width, output_height)
)
# Skip processing if resolution is same as input's and letterbox is
# not set
if (
output_width == input_width
and output_height == input_height
and not letter_box
and pixel_aspect == 1
):
self.log.debug(
"Output resolution is same as input's"
" and \"letter_box\" key is not set. Skipping reformat part."
)
new_repre["resolutionWidth"] = input_width
new_repre["resolutionHeight"] = input_height
return filters
# defining image ratios
input_res_ratio = (
(float(input_width) * pixel_aspect) / input_height
)
output_res_ratio = float(output_width) / float(output_height)
self.log.debug("input_res_ratio: `{}`".format(input_res_ratio))
self.log.debug("output_res_ratio: `{}`".format(output_res_ratio))
# Round ratios to 2 decimal places for comparing
input_res_ratio = round(input_res_ratio, 2)
output_res_ratio = round(output_res_ratio, 2)
# get scale factor
scale_factor_by_width = (
float(output_width) / (input_width * pixel_aspect)
)
scale_factor_by_height = (
float(output_height) / input_height
)
self.log.debug(
"scale_factor_by_with: `{}`".format(scale_factor_by_width)
)
self.log.debug(
"scale_factor_by_height: `{}`".format(scale_factor_by_height)
)
# letter_box
if letter_box:
if input_res_ratio == output_res_ratio:
letter_box /= pixel_aspect
elif input_res_ratio < output_res_ratio:
letter_box /= scale_factor_by_width
else:
letter_box /= scale_factor_by_height
scale_filter = "scale={}x{}:flags=lanczos".format(
output_width, output_height
)
top_box = (
"drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c=black"
).format(letter_box)
bottom_box = (
"drawbox=0:ih-round((ih-(iw*(1/{0})))/2)"
":iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black"
).format(letter_box)
# Add letter box filters
filters.extend([scale_filter, "setsar=1", top_box, bottom_box])
# scaling none square pixels and 1920 width
if (
input_height != output_height
or input_width != output_width
or pixel_aspect != 1
):
if input_res_ratio < output_res_ratio:
self.log.debug(
"Input's resolution ratio is lower then output's"
)
width_scale = int(input_width * scale_factor_by_height)
width_half_pad = int((output_width - width_scale) / 2)
height_scale = output_height
height_half_pad = 0
else:
self.log.debug("Input is heigher then output")
width_scale = output_width
width_half_pad = 0
height_scale = int(input_height * scale_factor_by_width)
height_half_pad = int((output_height - height_scale) / 2)
self.log.debug("width_scale: `{}`".format(width_scale))
self.log.debug("width_half_pad: `{}`".format(width_half_pad))
self.log.debug("height_scale: `{}`".format(height_scale))
self.log.debug("height_half_pad: `{}`".format(height_half_pad))
filters.extend([
"scale={}x{}:flags=lanczos".format(
width_scale, height_scale
),
"pad={}:{}:{}:{}:black".format(
output_width, output_height,
width_half_pad, height_half_pad
),
"setsar=1"
])
new_repre["resolutionWidth"] = output_width
new_repre["resolutionHeight"] = output_height
return filters
def lut_filters(self, new_repre, instance, input_args):
"""Add lut file to output ffmpeg filters."""
filters = []
# baking lut file application
lut_path = instance.data.get("lutPath")
if not lut_path or "bake-lut" not in new_repre["tags"]:
return filters
# Prepare path for ffmpeg argument
lut_path = lut_path.replace("\\", "/").replace(":", "\\:")
# Remove gamma from input arguments
if "-gamma" in input_args:
input_args.remove("-gamme")
# Prepare filters
filters.append("lut3d=file='{}'".format(lut_path))
# QUESTION hardcoded colormatrix?
filters.append("colormatrix=bt601:bt709")
self.log.info("Added Lut to ffmpeg command.")
return filters
def main_family_from_instance(self, instance):
"""Returns main family of entered instance."""
family = instance.data.get("family")
if not family:
family = instance.data["families"][0]
return family
def families_from_instance(self, instance):
"""Returns all families of entered instance."""
families = []
family = instance.data.get("family")
if family:
families.append(family)
for family in (instance.data.get("families") or tuple()):
if family not in families:
families.append(family)
return families
def compile_list_of_regexes(self, in_list):
"""Convert strings in entered list to compiled regex objects."""
regexes = []
if not in_list:
return regexes
for item in in_list:
if not item:
continue
try:
regexes.append(re.compile(item))
except TypeError:
self.log.warning((
"Invalid type \"{}\" value \"{}\"."
" Expected string based object. Skipping."
).format(str(type(item)), str(item)))
return regexes
def validate_value_by_regexes(self, value, in_list):
"""Validates in any regexe from list match entered value.
Args:
in_list (list): List with regexes.
value (str): String where regexes is checked.
Returns:
int: Returns `0` when list is not set or is empty. Returns `1` when
any regex match value and returns `-1` when none of regexes
match value entered.
"""
if not in_list:
return 0
output = -1
regexes = self.compile_list_of_regexes(in_list)
for regex in regexes:
if re.match(regex, value):
output = 1
break
return output
def profile_exclusion(self, matching_profiles):
"""Find out most matching profile byt host, task and family match.
Profiles are selectivelly filtered. Each profile should have
"__value__" key with list of booleans. Each boolean represents
existence of filter for specific key (host, taks, family).
Profiles are looped in sequence. In each sequence are split into
true_list and false_list. For next sequence loop are used profiles in
true_list if there are any profiles else false_list is used.
Filtering ends when only one profile left in true_list. Or when all
existence booleans loops passed, in that case first profile from left
profiles is returned.
Args:
matching_profiles (list): Profiles with same values.
Returns:
dict: Most matching profile.
"""
self.log.info(
"Search for first most matching profile in match order:"
" Host name -> Task name -> Family."
)
# Filter all profiles with highest points value. First filter profiles
# with matching host if there are any then filter profiles by task
# name if there are any and lastly filter by family. Else use first in
# list.
idx = 0
final_profile = None
while True:
profiles_true = []
profiles_false = []
for profile in matching_profiles:
value = profile["__value__"]
# Just use first profile when idx is greater than values.
if not idx < len(value):
final_profile = profile
break
if value[idx]:
profiles_true.append(profile)
else:
profiles_false.append(profile)
if final_profile is not None:
break
if profiles_true:
matching_profiles = profiles_true
else:
matching_profiles = profiles_false
if len(matching_profiles) == 1:
final_profile = matching_profiles[0]
break
idx += 1
final_profile.pop("__value__")
return final_profile
def find_matching_profile(self, host_name, task_name, family):
""" Filter profiles by Host name, Task name and main Family.
Filtering keys are "hosts" (list), "tasks" (list), "families" (list).
If key is not find or is empty than it's expected to match.
Args:
profiles (list): Profiles definition from presets.
host_name (str): Current running host name.
task_name (str): Current context task name.
family (str): Main family of current Instance.
Returns:
dict/None: Return most matching profile or None if none of profiles
match at least one criteria.
"""
matching_profiles = None
if not self.profiles:
return matching_profiles
highest_profile_points = -1
# Each profile get 1 point for each matching filter. Profile with most
# points is returnd. For cases when more than one profile will match
# are also stored ordered lists of matching values.
for profile in self.profiles:
profile_points = 0
profile_value = []
# Host filtering
host_names = profile.get("hosts")
match = self.validate_value_by_regexes(host_name, host_names)
if match == -1:
self.log.debug(
"\"{}\" not found in {}".format(host_name, host_names)
)
continue
profile_points += match
profile_value.append(bool(match))
# Task filtering
task_names = profile.get("tasks")
match = self.validate_value_by_regexes(task_name, task_names)
if match == -1:
self.log.debug(
"\"{}\" not found in {}".format(task_name, task_names)
)
continue
profile_points += match
profile_value.append(bool(match))
# Family filtering
families = profile.get("families")
match = self.validate_value_by_regexes(family, families)
if match == -1:
self.log.debug(
"\"{}\" not found in {}".format(family, families)
)
continue
profile_points += match
profile_value.append(bool(match))
if profile_points < highest_profile_points:
continue
if profile_points > highest_profile_points:
matching_profiles = []
highest_profile_points = profile_points
if profile_points == highest_profile_points:
profile["__value__"] = profile_value
matching_profiles.append(profile)
if not matching_profiles:
self.log.warning((
"None of profiles match your setup."
" Host \"{}\" | Task: \"{}\" | Family: \"{}\""
).format(host_name, task_name, family))
return
if len(matching_profiles) == 1:
# Pop temporary key `__value__`
matching_profiles[0].pop("__value__")
return matching_profiles[0]
self.log.warning((
"More than one profile match your setup."
" Host \"{}\" | Task: \"{}\" | Family: \"{}\""
).format(host_name, task_name, family))
return self.profile_exclusion(matching_profiles)
def families_filter_validation(self, families, output_families_filter):
"""Determines if entered families intersect with families filters.
All family values are lowered to avoid unexpected results.
"""
if not output_families_filter:
return True
single_families = []
combination_families = []
for family_filter in output_families_filter:
if not family_filter:
continue
if isinstance(family_filter, (list, tuple)):
_family_filter = []
for family in family_filter:
if family:
_family_filter.append(family.lower())
combination_families.append(_family_filter)
else:
single_families.append(family_filter.lower())
for family in single_families:
if family in families:
return True
for family_combination in combination_families:
valid = True
for family in family_combination:
if family not in families:
valid = False
break
if valid:
return True
return False
def filter_outputs_by_families(self, profile, families):
"""Return outputs matching input instance families.
Output definitions without families filter are marked as valid.
Args:
profile (dict): Profile from presets matching current context.
families (list): All families of current instance.
Returns:
list: Containg all output definitions matching entered families.
"""
outputs = profile.get("outputs") or []
if not outputs:
return outputs
# lower values
# QUESTION is this valid operation?
families = [family.lower() for family in families]
filtered_outputs = {}
for filename_suffix, output_def in outputs.items():
output_filters = output_def.get("filter")
# If no filter on output preset, skip filtering and add output
# profile for farther processing
if not output_filters:
filtered_outputs[filename_suffix] = output_def
continue
families_filters = output_filters.get("families")
if not self.families_filter_validation(families, families_filters):
continue
filtered_outputs[filename_suffix] = output_def
return filtered_outputs
def filter_outputs_by_tags(self, outputs, tags):
"""Filter output definitions by entered representation tags.
Output definitions without tags filter are marked as valid.
Args:
outputs (list): Contain list of output definitions from presets.
tags (list): Tags of processed representation.
Returns:
list: Containg all output definitions matching entered tags.
"""
filtered_outputs = []
repre_tags_low = [tag.lower() for tag in tags]
for output_def in outputs:
valid = True
output_filters = output_def.get("filter")
if output_filters:
# Check tag filters
tag_filters = output_filters.get("tags")
if tag_filters:
tag_filters_low = [tag.lower() for tag in tag_filters]
valid = False
for tag in repre_tags_low:
if tag in tag_filters_low:
valid = True
break
if not valid:
continue
if valid:
filtered_outputs.append(output_def)
return filtered_outputs
def legacy_process(self, instance):
self.log.warning("Legacy review presets are used.")
output_profiles = self.outputs or {}
inst_data = instance.data
context_data = instance.context.data
fps = float(inst_data.get("fps"))
frame_start = inst_data.get("frameStart")
frame_end = inst_data.get("frameEnd")
handle_start = inst_data.get("handleStart",
context_data.get("handleStart"))
handle_end = inst_data.get("handleEnd",
context_data.get("handleEnd"))
pixel_aspect = inst_data.get("pixelAspect", 1)
resolution_width = inst_data.get("resolutionWidth", self.to_width)
resolution_height = inst_data.get("resolutionHeight", self.to_height)
self.log.debug("Families In: `{}`".format(inst_data["families"]))
self.log.debug("__ frame_start: {}".format(frame_start))
self.log.debug("__ frame_end: {}".format(frame_end))
self.log.debug("__ handle_start: {}".format(handle_start))
self.log.debug("__ handle_end: {}".format(handle_end))
# get representation and loop them
representations = inst_data["representations"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] not in self.ext_filter:
continue
tags = repre.get("tags", [])
if instance.data.get("multipartExr") is True:
# ffmpeg doesn't support multipart exrs
continue
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
# append repre tags into profile tags
for t in tags:
if t not in p_tags:
p_tags.append(t)
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
# no handles switch from profile tags
no_handles = "no-handles" in p_tags
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
profile_family_check = False
for _family in profile['families']:
if _family in instance.data['families']:
profile_family_check = True
break
if not profile_family_check:
continue
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
if isinstance(repre["files"], list):
if frame_start_handle != repre.get(
"detectedStart", frame_start_handle):
frame_start_handle = repre.get("detectedStart")
# exclude handle if no handles defined
if no_handles:
frame_start_handle = frame_start
frame_end_handle = frame_end
input_args.append(
"-start_number {0} -framerate {1}".format(
frame_start_handle, fps))
else:
if no_handles:
start_sec = float(handle_start) / fps
input_args.append("-ss {:0.2f}".format(start_sec))
frame_start_handle = frame_start
frame_end_handle = frame_end
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("frameStartFtrack") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height
delivery_ratio = float(self.to_width) / float(self.to_height)
self.log.debug(
"__ resolution_ratio: `{}`".format(resolution_ratio))
self.log.debug(
"__ delivery_ratio: `{}`".format(delivery_ratio))
# get scale factor
scale_factor = float(self.to_height) / (
resolution_height * pixel_aspect)
# shorten two decimals long float number for testing conditions
resolution_ratio_test = float(
"{:0.2f}".format(resolution_ratio))
delivery_ratio_test = float(
"{:0.2f}".format(delivery_ratio))
if resolution_ratio_test != delivery_ratio_test:
scale_factor = float(self.to_width) / (
resolution_width * pixel_aspect)
if int(scale_factor * 100) == 100:
scale_factor = (
float(self.to_height) / resolution_height
)
self.log.debug("__ scale_factor: `{}`".format(scale_factor))
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpeg_width = self.to_width
ffmpeg_height = self.to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio_test != delivery_ratio_test:
ffmpeg_width = resolution_width
ffmpeg_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio_test != delivery_ratio_test:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpeg_width, ffmpeg_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
if no_handles:
duration_sec = float(frame_end_handle - frame_start_handle + 1) / fps
output_args.append("-t {:0.2f}".format(duration_sec))
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio_test < delivery_ratio_test:
self.log.debug("lower then delivery")
width_scale = int(self.to_width * scale_factor)
width_half_pad = int((
self.to_width - width_scale)/2)
height_scale = self.to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = self.to_width
width_half_pad = 0
scale_factor = float(self.to_width) / (float(
resolution_width) * pixel_aspect)
self.log.debug(
"__ scale_factor: `{}`".format(
scale_factor))
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(self.to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
self.to_width, self.to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
"frameStartFtrack": frame_start_handle,
"frameEndFtrack": frame_end_handle
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if no_handles:
repre_new.update({
"outputName": name + "_noHandles",
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
for repre in representations_new:
if "delete" in repre.get("tags", []):
representations_new.remove(repre)
instance.data.update({
"reviewToWidth": self.to_width,
"reviewToHeight": self.to_height
})
self.log.debug(
"new representations: {}".format(representations_new))
instance.data["representations"] = representations_new
self.log.debug("Families Out: `{}`".format(instance.data["families"]))
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Args:
args (list): list of string arguments
inserting_arg (str): string argument we want to add
(without flag `-vf`)
Returns:
str: long joined argument to be added back to list of arguments
"""
# find all video format settings
vf_settings = [p for p in args
for v in ["-filter:v", "-vf"]
if v in p]
self.log.debug("_ vf_settings: `{}`".format(vf_settings))
# remove them from output args list
for p in vf_settings:
self.log.debug("_ remove p: `{}`".format(p))
args.remove(p)
self.log.debug("_ args: `{}`".format(args))
# strip them from all flags
vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
for p in vf_settings]
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
vf_fixed.insert(0, inserting_arg)
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
# create new video filter setting
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back
| 38.003846
| 95
| 0.540347
| 59,174
| 0.998111
| 0
| 0
| 0
| 0
| 0
| 0
| 18,102
| 0.305333
|
4dd8bacf6b045e8713670a0e2435de01e5e09f0a
| 6,683
|
py
|
Python
|
tests/peerfinder_test.py
|
wusel42/PeerFinder
|
35f132b45f2947902adfb6327ebcdf60bce4bdc2
|
[
"MIT"
] | 49
|
2017-07-13T13:58:14.000Z
|
2022-03-04T12:23:35.000Z
|
tests/peerfinder_test.py
|
wusel42/PeerFinder
|
35f132b45f2947902adfb6327ebcdf60bce4bdc2
|
[
"MIT"
] | 9
|
2017-07-11T13:23:15.000Z
|
2021-02-06T22:25:15.000Z
|
tests/peerfinder_test.py
|
wusel42/PeerFinder
|
35f132b45f2947902adfb6327ebcdf60bce4bdc2
|
[
"MIT"
] | 17
|
2017-07-11T12:37:25.000Z
|
2022-01-29T14:19:35.000Z
|
import unittest
from unittest.mock import Mock
import mock
import peerfinder.peerfinder as peerfinder
import requests
from ipaddress import IPv6Address, IPv4Address
class testPeerFinder(unittest.TestCase):
def setUp(self):
self.netixlan_set = {
"id": 1,
"ix_id": 2,
"name": "Test IX",
"ixlan_id": 3,
"notes": "",
"speed": 1000,
"asn": 65536,
"ipaddr4": ["192.0.2.1"],
"ipaddr6": ["0100::"],
"is_rs_peer": True,
"operational": True,
"created": "2010-01-01T00:00:00Z",
"updated": "2010-01-01T00:00:00Z",
"status": "ok",
}
self.netfac_set = {
"id": 1,
"name": "Test Facility",
"city": "Dublin",
"country": "IE",
"fac_id": 1,
"local_asn": 65536,
"created": "2010-01-01T00:00:00Z",
"updated": "2010-01-01T00:00:00Z",
"status": "ok",
}
self.peer = {"name": "Test Peer", "asn": 65536}
def test_pdb_to_ixp(self):
expected = peerfinder.IXP(
name="Test IX",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
self.assertEqual(expected, peerfinder.pdb_to_ixp(self.netixlan_set))
def test_pdb_to_peer(self):
ixp = peerfinder.pdb_to_ixp(self.netixlan_set)
fac = peerfinder.pdb_to_fac(self.netfac_set)
expected = peerfinder.Peer(
name="Test Peer", ASN=65536, peering_on=ixp, present_in=fac,
)
self.assertEqual(expected, peerfinder.pdb_to_peer(self.peer, ixp, fac))
def test_pdb_to_fac(self):
expected = peerfinder.Facility(name="Test Facility", ASN=65536)
self.assertEqual(expected, peerfinder.pdb_to_fac(self.netfac_set))
def test__dedup_ixs(self):
expected = {
"Test IX": {
"ipaddr4": [["192.0.2.1"], ["192.0.2.1"]],
"ipaddr6": [["0100::"], ["0100::"]],
"name": "Test IX",
"speed": 2000,
}
}
self.assertEqual(
expected, peerfinder._dedup_ixs([self.netixlan_set, self.netixlan_set]),
)
def test_fetch_ix_from_ixps(self):
expected = peerfinder.pdb_to_ixp(self.netixlan_set)
ixp = [peerfinder.pdb_to_ixp(self.netixlan_set)]
self.assertEqual(expected, peerfinder.fetch_ix_from_ixps("Test IX", ixp))
def test_fetch_fac_from_facilities(self):
expected = peerfinder.pdb_to_fac(self.netfac_set)
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
self.assertEqual(expected, peerfinder.fetch_ix_from_ixps("Test Facility", fac))
def test_fetch_common_ixps(self):
ixp = [peerfinder.pdb_to_ixp(self.netixlan_set)]
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
peer = [peerfinder.pdb_to_peer(self.peer, ixp, fac)]
expected = {"Test IX"}
self.assertEqual(expected, peerfinder.fetch_common_ixps(peer))
def test_fetch_common_facilities(self):
ixp = [peerfinder.pdb_to_ixp(self.netixlan_set)]
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
peer = [peerfinder.pdb_to_peer(self.peer, ixp, fac)]
expected = {"Test Facility"}
self.assertEqual(expected, peerfinder.fetch_common_facilities(peer))
@mock.patch.object(requests, "get", autospec=True)
def test_getPeeringDBSuccess(self, requests_mock):
r_mock = Mock()
r_mock.status_code = 200
r_mock.text = "some text"
r_mock.json.return_value = {"data": [0]}
requests_mock.return_value = r_mock
expected = {"data": [0]}
self.assertEqual(expected, peerfinder.getPeeringDB("23456"))
def test_fetch_fac_from_facilities(self):
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
fac_name = "Test Facility"
expected = peerfinder.Facility(name="Test Facility", ASN=65536)
self.assertEqual(expected, peerfinder.fetch_fac_from_facilities(fac_name, fac))
def test_fetch_different_ixps(self):
ix1 = peerfinder.IXP(
name="Test IX1",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
ix2 = peerfinder.IXP(
name="Test IX2",
subnet4=[IPv4Address("192.0.2.2")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
expected = ["Test IX1", "Test IX2"]
peer1 = peerfinder.Peer(name="peer1", ASN=1, present_in=[], peering_on=[ix1])
peer2 = peerfinder.Peer(name="peer2", ASN=1, present_in=[], peering_on=[ix2])
self.assertEqual(expected, peerfinder.fetch_different_ixps([peer1, peer2]))
def test_print_ixp(self):
ix1 = peerfinder.IXP(
name="Test IX1",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
ix2 = peerfinder.IXP(
name="Test IX2",
subnet4=[IPv4Address("192.0.2.2")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
peer1 = peerfinder.Peer(name="peer1", ASN=1, present_in=[], peering_on=[ix1])
peer2 = peerfinder.Peer(
name="peer2", ASN=1, present_in=[], peering_on=[ix1, ix2]
)
self.assertIsNone(peerfinder.print_ixp([peer1, peer2]))
def test_print_fac(self):
fac1 = peerfinder.Facility(name="Test Facility 1", ASN=1,)
fac2 = peerfinder.Facility(name="Test Facility 2", ASN=1,)
peer1 = peerfinder.Peer(
name="peer1", ASN=1, present_in=[fac1, fac2], peering_on=[]
)
peer2 = peerfinder.Peer(name="peer2", ASN=1, present_in=[fac1], peering_on=[])
self.assertIsNone(peerfinder.print_fac([peer1, peer2]))
def test_print_uncommon(self):
ix1 = peerfinder.IXP(
name="Test IX1",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
ix2 = peerfinder.IXP(
name="Test IX2",
subnet4=[IPv4Address("192.0.2.2")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
peer1 = peerfinder.Peer(name="peer1", ASN=1, present_in=[], peering_on=[ix1])
peer2 = peerfinder.Peer(
name="peer2", ASN=1, present_in=[], peering_on=[ix1, ix2]
)
self.assertIsNone(peerfinder.print_uncommon([peer1, peer2]))
if __name__ == "__main__":
unittest.main()
| 36.519126
| 87
| 0.575939
| 6,466
| 0.96753
| 0
| 0
| 391
| 0.058507
| 0
| 0
| 908
| 0.135867
|
4dd917ca4b89b1723693aa78f18f3c1b80e9acd7
| 5,372
|
py
|
Python
|
ceilometer/network/notifications.py
|
rackerlabs/instrumented-ceilometer
|
6ac5215ac0476120d9c99adcabc9cad0d32963da
|
[
"Apache-2.0"
] | 3
|
2021-04-18T00:37:48.000Z
|
2021-07-21T10:20:11.000Z
|
ceilometer/network/notifications.py
|
lexxito/monitoring
|
bec8dfb8d3610331c7ae5ec543e0b8da0948c164
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/network/notifications.py
|
lexxito/monitoring
|
bec8dfb8d3610331c7ae5ec543e0b8da0948c164
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for producing network counter messages from Neutron notification
events.
"""
from oslo.config import cfg
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log
from ceilometer import plugin
from ceilometer import sample
OPTS = [
cfg.StrOpt('neutron_control_exchange',
default='neutron',
help="Exchange name for Neutron notifications",
deprecated_name='quantum_control_exchange'),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
class NetworkNotificationBase(plugin.NotificationBase):
resource_name = None
@property
def event_types(self):
return [
# NOTE(flwang): When the *.create.start notification sending,
# there is no resource id assigned by Neutron yet. So we ignore
# the *.create.start notification for now and only listen the
# *.create.end to make sure the resource id is existed.
'%s.create.end' % (self.resource_name),
'%s.update.*' % (self.resource_name),
'%s.exists' % (self.resource_name),
# FIXME(dhellmann): Neutron delete notifications do
# not include the same metadata as the other messages,
# so we ignore them for now. This isn't ideal, since
# it may mean we miss charging for some amount of time,
# but it is better than throwing away the existing
# metadata for a resource when it is deleted.
##'%s.delete.start' % (self.resource_name),
]
@staticmethod
def get_exchange_topics(conf):
"""Return a sequence of ExchangeTopics defining the exchange and topics
to be connected for this plugin.
"""
return [
plugin.ExchangeTopics(
exchange=conf.neutron_control_exchange,
topics=set(topic + ".info"
for topic in conf.notification_topics)),
]
def process_notification(self, message):
LOG.info(_('network notification %r') % message)
message['payload'] = message['payload'][self.resource_name]
counter_name = getattr(self, 'counter_name', self.resource_name)
unit_value = getattr(self, 'unit', self.resource_name)
yield sample.Sample.from_notification(
name=counter_name,
type=sample.TYPE_GAUGE,
unit=unit_value,
volume=1,
user_id=message['_context_user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['id'],
message=message)
event_type_split = message['event_type'].split('.')
if len(event_type_split) > 2:
yield sample.Sample.from_notification(
name=counter_name
+ "." + event_type_split[1],
type=sample.TYPE_DELTA,
unit=unit_value,
volume=1,
user_id=message['_context_user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['id'],
message=message)
class Network(NetworkNotificationBase):
"""Listen for Neutron network notifications in order to mediate with the
metering framework.
"""
resource_name = 'network'
class Subnet(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'subnet'
class Port(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'port'
class Router(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'router'
class FloatingIP(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'floatingip'
counter_name = 'ip.floating'
unit = 'ip'
class Bandwidth(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
event_types = ['l3.meter']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='bandwidth',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes'],
user_id=None,
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['label_id'],
message=message)
| 32.167665
| 79
| 0.642033
| 4,113
| 0.765494
| 1,569
| 0.292016
| 1,357
| 0.252559
| 0
| 0
| 2,532
| 0.471245
|
4dda7edb222a2d84997df6163df89166d292eb6b
| 2,407
|
py
|
Python
|
optax/_src/update_test.py
|
pierricklee/optax
|
a75dbf99ce7af05e18bb6a2c518531ddc7303d13
|
[
"Apache-2.0"
] | 2
|
2021-03-13T23:25:27.000Z
|
2022-03-09T09:38:27.000Z
|
optax/_src/update_test.py
|
rwightman/optax
|
ba0bc11d172054d65b4387ecae840c04e2bc7035
|
[
"Apache-2.0"
] | null | null | null |
optax/_src/update_test.py
|
rwightman/optax
|
ba0bc11d172054d65b4387ecae840c04e2bc7035
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `update.py`."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
from optax._src import update
class UpdateTest(chex.TestCase):
@chex.all_variants()
def test_apply_updates(self):
params = ({'a': jnp.ones((3, 2))}, jnp.ones((1,)))
grads = jax.tree_map(lambda t: 2 * t, params)
exp_params = jax.tree_map(lambda t: 3 * t, params)
new_params = self.variant(update.apply_updates)(params, grads)
chex.assert_tree_all_close(
exp_params, new_params, atol=1e-10, rtol=1e-5)
@chex.all_variants()
def test_incremental_update(self):
params_1 = ({'a': jnp.ones((3, 2))}, jnp.ones((1,)))
params_2 = jax.tree_map(lambda t: 2 * t, params_1)
exp_params = jax.tree_map(lambda t: 1.5 * t, params_1)
new_params = self.variant(
update.incremental_update)(params_2, params_1, 0.5)
chex.assert_tree_all_close(
exp_params, new_params, atol=1e-10, rtol=1e-5)
@chex.all_variants()
def test_periodic_update(self):
params_1 = ({'a': jnp.ones((3, 2))}, jnp.ones((1,)))
params_2 = jax.tree_map(lambda t: 2 * t, params_1)
update_period = 5
update_fn = self.variant(update.periodic_update)
for j in range(3):
for i in range(1, update_period):
new_params = update_fn(
params_2, params_1, j*update_period+i, update_period)
chex.assert_tree_all_close(
params_1, new_params, atol=1e-10, rtol=1e-5)
new_params = update_fn(
params_2, params_1, (j+1)*update_period, update_period)
chex.assert_tree_all_close(
params_2, new_params, atol=1e-10, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
| 33.901408
| 80
| 0.665974
| 1,501
| 0.623598
| 0
| 0
| 1,457
| 0.605318
| 0
| 0
| 747
| 0.310345
|
4ddab5e3d9aa744300fde8fef5e302f340725170
| 44,868
|
py
|
Python
|
scripts/venv/lib/python2.7/site-packages/cogent/core/entity.py
|
sauloal/cnidaria
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
[
"MIT"
] | 3
|
2015-11-20T08:44:42.000Z
|
2016-12-14T01:40:03.000Z
|
scripts/venv/lib/python2.7/site-packages/cogent/core/entity.py
|
sauloal/cnidaria
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
[
"MIT"
] | 1
|
2017-09-04T14:04:32.000Z
|
2020-05-26T19:04:00.000Z
|
scripts/venv/lib/python2.7/site-packages/cogent/core/entity.py
|
sauloal/cnidaria
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Provides the entities, the building blocks of the SMRCA hierachy
representation of a macromolecular structure.
The MultiEntity class is a special Entity class to hold multiple instances of
other entities. All Entities apart from the Atom can hold others and inherit
from the MultiEntity. The Entity is the most basic class to deal with
structural and molecular data. Do not use it directly since some functions
depend on methods provided by sub-classes. Classes inheriting from MultiEntity
have to provide some attributes during init e.g: self.level = a valid string
inside the SMCRA hierarchy). Holders of entities are like normal MultiEntities,
but are temporary and are outside the parent-children axes.
"""
import cogent
from cogent.core.annotation import SimpleVariable
from numpy import (sqrt, arctan2, power, array, mean, sum)
from cogent.data.protein_properties import AA_NAMES, AA_ATOM_BACKBONE_ORDER, \
AA_ATOM_REMOTE_ORDER, AREAIMOL_VDW_RADII, \
DEFAULT_AREAIMOL_VDW_RADIUS, AA_NAMES_3to1
from cogent.data.ligand_properties import HOH_NAMES, LIGAND_AREAIMOL_VDW_RADII
from operator import itemgetter, gt, ge, lt, le, eq, ne, or_, and_, contains, \
is_, is_not
from collections import defaultdict
from itertools import izip
from copy import copy, deepcopy
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "mpc4p@virginia.edu"
__status__ = "Development"
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_ '
HIERARCHY = ['H', 'S', 'M', 'C', 'R', 'A']
AREAIMOL_VDW_RADII.update(LIGAND_AREAIMOL_VDW_RADII)
# error while creating a structure (non-recoverable error)
class ConstructionError(Exception):
"""Cannot unambiguously create a structure."""
pass
# warning while creating a structure
# (something wrong with the input, but recoverable)
class ConstructionWarning(Exception):
"""Input violates some construction rules (contiguity)."""
pass
def sort_id_list(id_list, sort_tuple):
"""Sorts lists of id tuples. The order is defined by the PDB file
specification."""
(hol_loc, str_loc, mod_loc, chn_loc, res_loc, at_loc) = sort_tuple
# even a simple id is a tuple, this makes sorting general
def space_last(ch_id1, ch_id2): # this is for chain sorting
if ch_id1 == ' ' and ch_id2 != ' ':
return 1
if ch_id2 == ' ' and ch_id1 != ' ':
return - 1
if ch_id1 == ' ' and ch_id2 == ' ':
return 0
return cmp(ch_id1, ch_id2)
def atom(at_id1, at_id2):
# hydrogen atoms come last
is_hydrogen1 = (at_id1[0] == 'H')
is_hydrogen2 = (at_id2[0] == 'H')
diff = cmp(is_hydrogen1, is_hydrogen2)
# back bone come first
if not diff:
order1 = AA_ATOM_BACKBONE_ORDER.get(at_id1)
order2 = AA_ATOM_BACKBONE_ORDER.get(at_id2)
diff = cmp(order2, order1)
# (B)eta, (D)elta, (G)amma, .... o(X)t
if not diff:
remote1 = AA_ATOM_REMOTE_ORDER.get(at_id1[1:2])
remote2 = AA_ATOM_REMOTE_ORDER.get(at_id2[1:2])
diff = cmp(remote1, remote2)
# branching comes last
if not diff:
diff = cmp(at_id1[2:4], at_id2[2:4])
return diff
# SE vs CE - selenium first
if not diff:
alpha1 = ALPHABET.index(at_id1[0:1])
alpha2 = ALPHABET.index(at_id2[0:1])
diff = cmp(alpha2, alpha1)
def residue(res_id1, res_id2):
r1, r2 = 1, 1
if res_id1 in AA_NAMES: r1 = 2
if res_id1 in HOH_NAMES: r1 = 0
if res_id2 in AA_NAMES: r2 = 2
if res_id2 in HOH_NAMES: r2 = 0
if r1 is r2:
return cmp(res_id1, res_id2)
else:
return cmp(r2, r1)
# this assumes that the implementation of sorting is stable.
# does it work for others then cPython.
if res_loc or res_loc is 0:
id_list.sort(key=itemgetter(res_loc), cmp=lambda x, y: residue(x[0], y[0])) # by res_name
if at_loc or at_loc is 0:
id_list.sort(key=itemgetter(at_loc), cmp=lambda x, y: space_last(x[1], y[1])) # by alt_loc
if at_loc or at_loc is 0:
id_list.sort(key=itemgetter(at_loc), cmp=lambda x, y: atom(x[0], y[0])) # by at_id
if res_loc or res_loc is 0:
id_list.sort(key=itemgetter(res_loc), cmp=lambda x, y: cmp(x[2], y[2])) # by res_ic
if res_loc or res_loc is 0:
id_list.sort(key=itemgetter(res_loc), cmp=lambda x, y: cmp(x[1], y[1])) # by res_id
if chn_loc or chn_loc is 0:
id_list.sort(key=itemgetter(chn_loc), cmp=space_last) # by chain
if mod_loc or mod_loc is 0:
id_list.sort(key=itemgetter(mod_loc)) # by model
if str_loc or str_loc is 0:
id_list.sort(key=itemgetter(str_loc)) # by structure
return id_list
def merge(dicts):
"""Merges multiple dictionaries into a new one."""
master_dict = {}
for dict_ in dicts:
master_dict.update(dict_)
return master_dict
def unique(lists):
"""Merges multiple iterables into a unique sorted tuple (sorted set)."""
master_set = set()
for set_ in lists:
master_set.update(set_)
return tuple(sorted(master_set))
class Entity(dict):
"""Container object all entities inherit from it. Inherits from dict."""
def __init__(self, id, name=None, *args):
# This class has to be sub-classed!
# the masked attribute has to be set before the __init__ of an Entity
# because during __setstate__, __getstate__ sub-entities are iterated
# by .values(), which relies on the attribute masked. to decide which
# children should be omitted.
self.masked = False
self.parent = None # mandatory parent attribute
self.modified = True # modified on creation
self.id = (id,) # ids are non-zero lenght tuples
self.name = (name or id) # prefer name over duplicate id
self.xtra = {} # mandatory xtra dict attribute
# Dictionary that keeps additional properties
dict.__init__(self, *args) # finish init as dictionary
def __copy__(self):
return deepcopy(self)
def __deepcopy__(self, memo):
new_state = self.__getstate__()
new_instance = self.__new__(type(self))
new_instance.__setstate__(new_state)
return new_instance
def __getstate__(self):
new_state = copy(self.__dict__) # shallow
new_state['parent'] = None
return new_state
def __setstate__(self, new_state):
self.__dict__.update(new_state)
def __repr__(self):
"""Default representation."""
# mandatory getLevel from sub-class
return "<Entity id=%s, level=%s>" % (self.getId(), self.getLevel())
def __sub__(self, entity):
"""Override "-" as Euclidean distance between coordinates."""
return sqrt(sum(pow(self.coords - entity.coords, 2)))
def _setId(self, id):
self.name = id[0]
def _getId(self):
return (self.name,)
def getId(self):
"""Return the id."""
return self._getId()
def getFull_id(self):
"""Return the full id."""
parent = self.getParent()
if parent:
full_id = parent.getFull_id()
else:
full_id = () # we create a tuple on the top
full_id = full_id + self.getId() # merge tuples from the left
return full_id
def setId(self, id_=None):
"""Set the id. Calls the ``_setId`` method."""
if (id_ and id_ != self.id) or (not id_ and (self.getId() != self.id)):
self.id = (id_ or self.getId())
self.setModified(True, True)
self._setId(self.id)
if self.parent:
self.parent.updateIds()
def _setMasked(self, masked, force=False):
if masked != self.masked or force:
self.masked = masked # mask or unmask
self.setModified(True, False) # set parents as modified
def setMasked(self, *args, **kwargs):
"""Set masked flag (``masked``) ``True``."""
self._setMasked(True, *args, **kwargs)
def setUnmasked(self, *args, **kwargs):
"""Set masked flag (``masked``) ``False``."""
self._setMasked(False, *args, **kwargs)
def setModified(self, up=True, down=False):
"""Set modified flag (``modified``) ``True``."""
self.modified = True
if up and self.parent:
self.parent.setModified(True, False)
def setUnmodified(self, up=False, down=False):
"""Set modified flag (``modified``) ``False``."""
self.modified = False
if up and self.parent:
self.parent.setUnmodified(True, False)
def setParent(self, entity):
"""Set the parent ``Entity`` and adds oneself as the child."""
if self.parent != entity:
# delete old parent
self.delParent()
# add new parent
self.parent = entity
self.parent.addChild(self)
self.setModified(False, True)
def delParent(self):
"""Detach mutually from the parent. Sets both child and parent modified
flags (``modified``) as ``True``."""
if self.parent:
self.parent.pop(self.getId())
self.parent.setModified(True, False)
self.parent = None
self.setModified(False, True)
def getModified(self):
"""Return value of the modified flag (``modified``)."""
return self.modified
def getMasked(self):
"""Return value of the masked flag (``masked``)."""
return self.masked
def setLevel(self, level):
"""Set level (``level``)."""
self.level = level
def getLevel(self):
"""Return level (``level``)in the hierarchy."""
return self.level
def setName(self, name):
"""Set name."""
self.name = name
self.setId()
def getName(self):
"""Return name."""
return self.name
def getParent(self, level=None):
"""Return the parent ``Entity`` instance."""
if not level:
return self.parent
elif level == self.level:
return self
return self.parent.getParent(level)
def move(self, origin):
"""Subtract the origin coordinates from the coordintats (``coords``)."""
self.coords = self.coords - origin
def setCoords(self, coords):
"""Set the entity coordinates. Coordinates should be a
``numpy.array``."""
self.coords = coords
def getCoords(self):
"""Get the entity coordinates."""
return self.coords
def getScoords(self):
"""Return spherical (r, theta, phi) coordinates."""
x, y, z = self.coords
x2, y2, z2 = power(self.coords, 2)
scoords = array([sqrt(x2 + y2 + z2), \
arctan2(sqrt(x2 + y2), z), \
arctan2(y, x)])
return scoords
def getCcoords(self):
"""Return redundant, polar, clustering-coordinates on the unit-sphere.
This is only useful for clustering."""
x, y, z = self.coords
x2, y2, z2 = power(self.coords, 2)
ccoords = array([arctan2(sqrt(y2 + z2), x), \
arctan2(sqrt(x2 + z2), y), \
arctan2(sqrt(x2 + y2), z)
])
return ccoords
def setScoords(self):
"""Set ``entity.scoords``, see: getScoords."""
self.scoords = self.getScoords()
def setCcoords(self):
"""Set ``entity.ccoords``, see: getCcoords."""
self.ccoords = self.getCcoords()
class MultiEntity(Entity):
"""The ``MultiEntity`` contains other ``Entity`` or ``MultiEntity``
instances."""
def __init__(self, long_id, short_id=None, *args):
self.index = HIERARCHY.index(self.level) # index corresponding to the hierarchy level
self.table = dict([(level, {}) for level in HIERARCHY[self.index + 1:]]) # empty table
Entity.__init__(self, long_id, short_id, *args)
def __repr__(self):
id_ = self.getId()
return "<MultiEntity id=%s, holding=%s>" % (id_, len(self))
def _link(self):
"""Recursively adds a parent pointer to children."""
for child in self.itervalues(unmask=True):
child.parent = self
try:
child._link()
except AttributeError:
pass
def _unlink(self):
"""Recursively deletes the parent pointer from children."""
for child in self.itervalues(unmask=True):
child.parent = None
try:
child._unlink()
except AttributeError:
pass
def __getstate__(self):
new_dict = copy(self.__dict__) # shallow copy
new_dict['parent'] = None # remove recursion
new_children = []
for child in self.itervalues(unmask=True):
new_child_instance = deepcopy(child)
new_children.append(new_child_instance)
return (new_children, new_dict)
def __setstate__(self, new_state):
new_children, new_dict = new_state
self.__dict__.update(new_dict)
for child in new_children:
self.addChild(child)
def __copy__(self):
return deepcopy(self)
def __deepcopy__(self, memo):
new_state = self.__getstate__()
new_instance = self.__new__(type(self))
new_instance.__setstate__(new_state)
return new_instance
def __iter__(self):
return self.itervalues()
def setSort_tuple(self, sort_tuple=None):
"""Set the ``sort_tuple attribute``. The ``sort_tuple`` is a tuple
needed by the ``sort_id_list`` function to correctly sort items within
entities."""
if sort_tuple:
self.sort_tuple = sort_tuple
else: # making the sort tuple, ugly, uggly, uaughhlly ble
sort_tuple = [None, None, None, None, None, None]
key_lenght = len(self.keys()[0])
stop_i = self.index + 2 # next level, open right [)
start_i = stop_i - key_lenght # before all nones
indexes = range(start_i, stop_i) # Nones to change
for value, index in enumerate(indexes):
sort_tuple[index] = value
self.sort_tuple = sort_tuple
def getSort_tuple(self):
"""Return the ``sort_tuple`` attribute. If not set calls the
``setSort_tuple`` method first. See: ``setSort_tuple``."""
if not hasattr(self, 'sort_tuple'):
self.setSort_tuple()
return self.sort_tuple
def itervalues(self, unmask=False):
return (v for v in super(MultiEntity, self).itervalues() if not v.masked or unmask)
def iteritems(self, unmask=False):
return ((k, v) for k, v in super(MultiEntity, self).iteritems() if not v.masked or unmask)
def iterkeys(self, unmask=False):
return (k for k, v in super(MultiEntity, self).iteritems() if not v.masked or unmask)
def values(self, *args, **kwargs):
return list(self.itervalues(*args, **kwargs))
def items(self, *args, **kwargs):
return list(self.iteritems(*args, **kwargs))
def keys(self, *args, **kwargs):
return list(self.iterkeys(*args, **kwargs))
def __contains__(self, key, *args, **kwargs):
return key in self.keys(*args, **kwargs)
def sortedkeys(self, *args, **kwargs):
list_ = sort_id_list(self.keys(*args, **kwargs), self.getSort_tuple())
return list_
def sortedvalues(self, *args, **kwargs):
values = [self[i] for i in self.sortedkeys(*args, **kwargs)]
return values
def sorteditems(self, *args, **kwargs):
items = [(i, self[i]) for i in self.sortedkeys()]
return items
def _setMasked(self, masked, force=False):
"""Set the masked flag (``masked``) recursively. If forced proceed even
if the flag is already set correctly."""
if masked != self.masked or force: # the second condition is when
if masked: # an entity has all children masked
# we have to mask children # but is not masked itself
for child in self.itervalues(): # only unmasked children
child.setMasked()
child.setModified(False, False)
else:
# we have to unmask children
for child in self.itervalues(unmask=True):
if child.masked or force: # only masked children
child.setUnmasked(force=force)
child.setModified(False, False)
self.masked = masked
self.setModified(True, False) # set parents as modified
def setModified(self, up=True, down=True):
"""Set the modified flag (``modified``) ``True``. If down proceeds
recursively for all children. If up proceeds recursively for all
parents."""
self.modified = True
if up and self.parent:
self.parent.setModified(True, False)
if down:
for child in self.itervalues(unmask=True):
child.setModified(False, True)
def setUnmodified(self, up=False, down=False):
"""Set the modified (``modified``) flag ``False``. If down proceeds
recursively for all children. If up proceeds recursively for all
parents."""
self.modified = False
if up and self.parent:
self.parent.setUnmodified(True, False)
if down:
for child in self.itervalues(unmask=True):
child.setUnmodified(False, True)
def _initChild(self, child):
"""Initialize a child (during construction)."""
child.parent = self
self[child.getId()] = child
def addChild(self, child):
"""Add a child."""
child.setParent(self)
child_id = child.getId()
self[child_id] = child
self.setModified(True, False)
def delChild(self, child_id):
"""Remove a child."""
child = self.get(child_id)
if child:
child.delParent()
self.setModified(True, False)
def getChildren(self, ids=None, **kwargs):
"""Return a copy of the list of children."""
if ids:
children = []
for (id_, child) in self.iteritems(**kwargs):
if id_ in ids:
children.append(child)
else:
children = self.values(**kwargs)
return children
def _setTable(self, entity):
"""Recursive helper method for ``entity.setTable``."""
for e in entity.itervalues():
self.table[e.getLevel()].update({e.getFull_id():e})
self._setTable(e)
def setTable(self, force=True, unmodify=True):
"""Populate the children table (``table``) recursively with all children
grouped into hierarchy levels. If forced is ``True`` the table will be
updated even if the ``Entity`` instance is not modified. If unmodify is
``True`` the ``Entity`` modified flag (``modified``) will be set
``False`` afterwards."""
if self.modified or force:
# a table is accurate as long as the contents of a dictionary do not
# change.
self.delTable()
self._setTable(self)
if unmodify:
self.setUnmodified()
def delTable(self):
"""Delete all children from the children-table (``table``). This does
not modify the hierarchy."""
self.table = dict([(level, {}) for level in HIERARCHY[self.index + 1:]])
self.modified = True
def getTable(self, level):
"""Return children of given level from the children-table
(``table``)."""
return self.table[level]
def updateIds(self):
"""Update self with children ids."""
ids = []
for (id_, child) in self.iteritems():
new_id = child.getId()
if id_ != new_id:
ids.append((id_, new_id))
for (old_id, new_id) in ids:
child = self.pop(old_id)
self.update(((new_id, child),))
def getData(self, attr, xtra=False, method=False, forgiving=True, sorted=False):
"""Get data from children attributes, methods and xtra dicts as a list.
If is ``True`` forgiving remove ``None`` values from the output.
``Nones`` are place-holders if a child does not have the requested data.
If xtra is True the xtra dictionary (``xtra``) will be searched, if
method is ``True`` the child attribute will be called."""
values = self.sortedvalues() if sorted else self.values()
if xtra:
# looking inside the xtra of children
data = [child.xtra.get(attr) for child in values] # could get None
else:
# looking at attributes
data = []
for child in values:
try:
if not method:
data.append(getattr(child, attr))
else:
data.append(getattr(child, attr)())
except AttributeError: #
data.append(None)
if forgiving: # remove Nones
data = [point for point in data if point is not None]
return data
def propagateData(self, function, level, attr, **kwargs):
"""Propagate data from child level to this ``Entity`` instance. The
function defines how children data should be transformed to become
the parents data e.g. summed."""
if self.index <= HIERARCHY.index(level) - 2:
for child in self.itervalues():
child.propagateData(function, level, attr, **kwargs)
datas = self.getData(attr, **kwargs)
if isinstance(function, basestring):
function = eval(function)
transformed_datas = function(datas)
if kwargs.get('xtra'):
self.xtra[attr] = transformed_datas
else:
setattr(self, attr, transformed_datas)
return transformed_datas
def countChildren(self, *args, **kwargs):
"""Count children based on ``getData``. Additional arguments and
keyworded arguments are passed to the ``getData`` method."""
data = self.getData(*args, **kwargs)
children = defaultdict(int) # by default returns 0
for d in data:
children[d] += 1
return children
def freqChildren(self, *args, **kwargs):
"""Frequency of children based on ``countChildren``. Additional
arguments and keyworded arguments are passed to the ``countChildren``
method."""
children_count = self.countChildren(*args, **kwargs)
lenght = float(len(self)) # it could be len(children_count)?
for (key_, value_) in children_count.iteritems():
children_count[key_] = value_ / lenght
return children_count
def splitChildren(self, *args, **kwargs):
"""Splits children into groups children based on ``getData``.
Additional arguments and keyworded arguments are passed to the
``getData`` method."""
kwargs['forgiving'] = False
data = self.getData(*args, **kwargs)
clusters = defaultdict(dict) # by default returns {}
for (key, (id_, child)) in izip(data, self.iteritems()):
clusters[key].update({id_:child})
return clusters
def selectChildren(self, value, operator, *args, **kwargs):
"""Generic method to select children, based on ``getData``.
Returns a dictionary of children indexed by ids. Compares the data item
for each child using the operator name e.g. "eq" and a value e.g.
"H_HOH". Additional arguments and keyworded arguments are passed to the
``getData`` method."""
kwargs['forgiving'] = False
data = self.getData(*args, **kwargs)
children = {}
for (got, (id_, child)) in izip(data, self.iteritems()):
if isinstance(operator, basestring):
operator = eval(operator)
if operator(value, got):
children.update({id_:child})
return children
def ornamentChildren(self, *args, **kwargs):
"""Return a list of (ornament, (id, child)) tuples, based on
``getData``. Useful for sorting see: Schwartzian transform.
Forgiving is set False. Additional arguments and keyworded arguments are
passed to the ``getData`` method."""
kwargs['forgiving'] = False
data = self.getData(*args, **kwargs)
children = []
for (got, (id_, child)) in izip(data, self.iteritems()):
children.append((got, (id_, child)))
return children
def ornamentdictChildren(self, *args, **kwargs):
"""Return a dictionary of ornaments indexed by child ids, based on
``getData``. Forgiving is set False. Additional arguments and
keyworded arguments are passed to the ``getData`` method."""
kwargs['forgiving'] = False
data = self.getData(*args, **kwargs)
propertydict = {}
for (got, id_) in izip(data, self.iterkeys()):
propertydict.update(((id_, got),))
return propertydict
def stripChildren(self, *args, **kwargs):
"""Strips children based on selection criteria. See:
``selectChildren``. Additional arguments and keyworded arguments are
passed to the ``selectChildren`` method."""
children_ids = self.selectChildren(*args, **kwargs).keys()
for id_ in children_ids:
self.delChild(id_)
def maskChildren(self, *args, **kwargs):
"""Mask children based on selection criteria. See: ``selectChildren``.
Additional arguments and keyworded arguments are passed to the
``selectChildren`` method."""
children = self.selectChildren(*args, **kwargs).itervalues()
for child in children:
child.setMasked() # child.setModified child.parent.setModified
def unmaskChildren(self, *args, **kwargs):
"""Unmask children based on selection criteria. See:
``selectChildren``. Additional arguments and keyworded arguments are
passed to the ``selectChildren`` method."""
children = self.selectChildren(*args, **kwargs).itervalues()
for child in children:
child.setUnmasked() # child.setModified child.parent.setModified
def moveRecursively(self, origin):
"""Move ``Entity`` instance recursively to the origin."""
for child in self.itervalues():
try:
child.moveRecursively(origin)
except:
# Atoms do not have this
child.move(origin)
pass
self.setCoords()
def setCoordsRecursively(self):
"""Set coordinates (``coords``) recursively. Useful if any child had its
coordinates changed."""
for child in self.itervalues():
try:
child.setCoordsRecursively()
except:
#Atoms do not have this
pass
self.setCoords()
def setCoords(self, *args, **kwargs):
"""Set coordinates (``coords``) as a centroid of children coordinates.
A subset of children can be selected for the calculation. See:
``Entity.selectChildren``. Additional arguments and keyworded arguments
are passed to the ``getData`` method."""
# select only some children
if args or kwargs:
children = self.selectChildren(*args, **kwargs).values()
else:
children = self
coords = []
for child in children:
coords.append(child.getCoords())
self.coords = mean(coords, axis=0)
def getCoords(self):
"""Returns the current coordinates (``coords``). Raises an
``AttributeError`` if not set."""
try:
return self.coords
except AttributeError:
raise AttributeError, "Entity has coordinates not set."
def dispatch(self, method, *args, **kwargs):
"""Calls a method of all children with given arguments and keyworded
arguments."""
for child in self.itervalues():
getattr(child, method)(*args, **kwargs)
class Structure(MultiEntity):
"""The ``Structure`` instance contains ``Model`` instances."""
def __init__(self, id, *args, **kwargs):
self.level = 'S'
MultiEntity.__init__(self, id, *args, **kwargs)
def __repr__(self):
return '<Structure id=%s>' % self.getId()
def removeAltmodels(self):
"""Remove all models with an id != 0"""
self.stripChildren((0,), 'ne', 'id', forgiving=False)
def getDict(self):
"""See: ``Entity.getDict``."""
return {'structure':self.getId()[0]}
class Model(MultiEntity):
"""The ``Model`` instance contains ``Chain`` instances."""
def __init__(self, id, *args, **kwargs):
self.level = 'M'
MultiEntity.__init__(self, id, *args, **kwargs)
def __repr__(self):
return "<Model id=%s>" % self.getId()
def getDict(self):
"""See: ``Entity.getDict``."""
try:
from_parent = self.parent.getDict()
except AttributeError:
# we are allowed to silence this becaus a structure id is not
# required to write a proper pdb line.
from_parent = {}
from_parent.update({'model':self.getId()[0]})
return from_parent
class Chain(MultiEntity):
"""The ``Chain`` instance contains ``Residue`` instances."""
def __init__(self, id, *args, **kwargs):
self.level = 'C'
MultiEntity.__init__(self, id, *args, **kwargs)
def __repr__(self):
return "<Chain id=%s>" % self.getId()
def removeHetero(self):
"""Remove residues with the hetero flag."""
self.stripChildren('H', 'eq', 'h_flag', forgiving=False)
def removeWater(self):
"""Remove water residues."""
self.stripChildren('H_HOH', 'eq', 'name', forgiving=False)
def residueCount(self):
"""Count residues based on ``name``."""
return self.countChildren('name')
def residueFreq(self):
"""Calculate residue frequency (based on ``name``)."""
return self.freqChildren('name')
def getSeq(self, moltype ='PROTEIN'):
"""Returns a Sequence object from the ordered residues.
The "seq_type" determines allowed residue names."""
if moltype == 'PROTEIN':
valid_names = AA_NAMES
moltype = cogent.PROTEIN
elif moltype == 'DNA':
raise NotImplementedError('The sequence type: %s is not implemented' % moltype)
elif moltype == 'RNA':
raise NotImplementedError('The sequence type: %s is not implemented' % moltype)
else:
raise ValueError('The \'seq_type\' is not supported.')
aa = ResidueHolder('aa', self.selectChildren(valid_names, contains, 'name'))
aa_noic = ResidueHolder('noic', aa.selectChildren(' ', eq, 'res_ic'))
raw_seq = []
full_ids = []
for res in aa_noic.sortedvalues():
raw_seq.append(AA_NAMES_3to1[res.name])
full_ids.append(res.getFull_id()[1:])
raw_seq = "".join(raw_seq)
seq = cogent.Sequence(moltype, raw_seq, self.getName())
seq.addAnnotation(SimpleVariable, 'entity_id', 'S_id', full_ids)
return seq
def getDict(self):
"""See: ``Entity.getDict``."""
from_parent = self.parent.getDict()
from_parent.update({'chain_id':self.getId()[0]})
return from_parent
class Residue(MultiEntity):
"""The ``Residue`` instance contains ``Atom`` instances."""
def __init__(self, res_long_id, h_flag, seg_id, *args, **kwargs):
self.level = 'R'
self.seg_id = seg_id
self.h_flag = h_flag
self.res_id = res_long_id[1] #ID number
self.res_ic = res_long_id[2] #ID long NAME
MultiEntity.__init__(self, res_long_id, res_long_id[0], *args, **kwargs)
def __repr__(self):
res_name, res_id, res_ic = self.getId()[0]
full_name = (res_name, res_id, res_ic)
return "<Residue %s resseq=%s icode=%s>" % full_name
def _getId(self):
"""Return the residue full id. ``(name, res_id, res_ic)``."""
return ((self.name, self.res_id, self.res_ic),)
def _setId(self, id):
"""Set the residue id ``res_id``, name ``name`` and insertion code
``res_ic`` from a full id."""
(self.name, self.res_id, self.res_ic) = id[0]
def removeHydrogens(self):
"""Remove hydrogen atoms."""
self.stripChildren(' H', 'eq', 'element', forgiving=False)
def getSeg_id(self):
"""Return the segment id."""
return self.seg_id
def setSeg_id(self, seg_id):
"""Set the segment id. This does not change the id."""
self.seg_id = seg_id
def getIc(self):
"""Return the insertion code."""
return self.res_ic
def setIc(self, res_ic):
"""Set the insertion code."""
self.res_ic = res_ic
self.setId()
def getRes_id(self):
"""Get the id."""
return self.res_id
def setRes_id(self, res_id):
"""Set the id."""
self.res_id = res_id
self.setId()
def getH_flag(self):
"""Return the hetero flag."""
return self.h_flag
def setH_flag(self, h_flag):
"""Sets the hetero flag. A valid flag is ' ' or 'H'. If 'H' the flag
becomes part of the residue name i.e. H_XXX."""
if not h_flag in (' ', 'H'):
raise AttributeError, "Only ' ' and 'H' hetero flags allowed."
if len(self.name) == 3:
self.name = "%s_%s" % (h_flag, self.name)
elif len(self.name) == 5:
self.name = "%s_%s" % (h_flag, self.name[2:])
else:
raise ValueError, 'Non-standard residue name'
self.h_flag = h_flag
self.setId()
def getDict(self):
"""See: ``Entity.getDict``."""
from_parent = self.parent.getDict()
if self.h_flag != ' ':
at_type = 'HETATM'
else:
at_type = 'ATOM '
from_parent.update({'at_type': at_type,
'h_flag': self.h_flag,
'res_name': self.name,
'res_long_id': self.getId()[0],
'res_id': self.res_id,
'res_ic': self.res_ic,
'seg_id': self.seg_id, })
return from_parent
class Atom(Entity):
"""The ``Atom`` class contains no children."""
def __init__(self, at_long_id, at_name, ser_num, coords, occupancy, bfactor, element):
self.level = 'A'
self.index = HIERARCHY.index(self.level)
self.coords = coords
self.bfactor = bfactor
self.occupancy = occupancy
self.ser_num = ser_num
self.at_id = at_long_id[0]
self.alt_loc = at_long_id[1]
self.table = dict([(level, {}) for level in HIERARCHY[self.index + 1:]])
self.element = element
Entity.__init__(self, at_long_id, at_name)
def __nonzero__(self):
return bool(self.id)
def __repr__(self):
return "<Atom %s>" % self.getId()
def _getId(self):
"""Return the full id. The id of an atom is not its ' XX ' name
but this string after left/right spaces striping. The full id is
``(at_id, alt_loc)``."""
return ((self.at_id, self.alt_loc),)
def _setId(self, id):
"""Set the atom id ``at_id`` and alternate location ``alt_loc`` from a
full id. See: ``_getId``."""
(self.at_id, self.alt_loc) = id[0]
def setElement(self, element):
"""Set the atom element ``element``."""
self.element = element
def setName(self, name):
"""Set name and update the id."""
self.name = name
self.setAt_id(name.strip())
def setAt_id(self, at_id):
"""Set id. An atom id should be derived from the atom name. See:
``_getId``."""
self.at_id = at_id
self.setId()
def setAlt_loc(self, alt_loc):
"""Set alternate location identifier."""
self.alt_loc = alt_loc
self.setId()
def setSer_num(self, n):
"""Set serial number."""
self.ser_num = n
def setBfactor(self, bfactor):
"""Set B-factor."""
self.bfactor = bfactor
def setOccupancy(self, occupancy):
"""Set occupancy."""
self.occupancy = occupancy
def setRadius(self, radius=None, radius_type=AREAIMOL_VDW_RADII, \
default_radius=DEFAULT_AREAIMOL_VDW_RADIUS):
"""Set radius, defaults to the AreaIMol VdW radius."""
if radius:
self.radius = radius
else:
try:
self.radius = radius_type[(self.parent.name, self.name)]
except KeyError:
self.radius = default_radius
def getSer_num(self):
"""Return the serial number."""
return self.ser_num
def getBfactor(self):
"""Return the B-factor."""
return self.bfactor
def getOccupancy(self):
"""Return the occupancy."""
return self.occupancy
def getRadius(self):
"""Return the radius."""
return self.radius
def getDict(self):
"""See: ``Entity.getDict``."""
from_parent = self.parent.getDict()
from_parent.update({'at_name': self.name,
'ser_num': self.ser_num,
'coords': self.coords,
'occupancy': self.occupancy,
'bfactor': self.bfactor,
'alt_loc': self.alt_loc,
'at_long_id': self.getId()[0],
'at_id': self.at_id,
'element': self.element})
return from_parent
class Holder(MultiEntity):
"""The ``Holder`` instance exists outside the SMCRA hierarchy. Elements in
a ``Holder`` instance are indexed by the full id."""
def __init__(self, name, *args):
if not hasattr(self, 'level'):
self.level = name
MultiEntity.__init__(self, name, name, *args)
def __repr__(self):
return '<Holder level=%s name=%s>' % (self.level, self.getName())
def addChild(self, child):
"""Add a child."""
child_id = child.getFull_id()
self[child_id] = child
def delChild(self, child_id):
"""Remove a child."""
self.pop(child_id)
def updateIds(self):
"""Update self with children long ids."""
ids = []
for (id_, child) in self.iteritems():
new_id = child.getFull_id()
if id_ != new_id:
ids.append((id_, new_id))
for (old_id, new_id) in ids:
child = self.pop(old_id)
self.update(((new_id, child),))
class StructureHolder(Holder):
"""The ``StructureHolder`` contains ``Structure`` instances. See:
``Holder``."""
def __init__(self, *args):
self.level = 'H'
Holder.__init__(self, *args)
def __repr__(self):
return "<StructureHolder name=%s>" % self.getName()
class ModelHolder(Holder):
"""The ``ModelHolder`` contains ``Model`` instances. See: ``Holder``."""
def __init__(self, *args):
self.level = 'S'
Holder.__init__(self, *args)
def __repr__(self):
return "<ModelHolder name=%s>" % self.getName()
class ChainHolder(Holder):
"""The ``ChainHolder`` contains ``Chain`` instances. See: ``Holder``."""
def __init__(self, *args):
self.level = 'M'
Holder.__init__(self, *args)
def __repr__(self):
return "<ChainHolder name=%s>" % self.getName()
class ResidueHolder(Holder):
"""The ``ResidueHolder`` contains ``Residue`` instances. See: ``Holder``."""
def __init__(self, *args):
self.level = 'C'
Holder.__init__(self, *args)
def __repr__(self):
return "<ResidueHolder name=%s>" % self.getName()
class AtomHolder(Holder):
"""The ``AtomHolder`` contains ``Atom`` instances. See: ``Holder``."""
def __init__(self, *args):
self.level = 'R'
Holder.__init__(self, *args)
def __repr__(self):
return "<AtomHolder name=%s>" % self.getName()
class StructureBuilder(object):
"""Constructs a ``Structure`` object. The ``StructureBuilder`` class is used
by a parser class to parse a file into a ``Structure`` object. An instance
of a ``StructureBuilder`` has methods to create ``Entity`` instances and add
them into the SMCRA hierarchy``."""
def __init__(self):
self.structure = None
def initStructure(self, structure_id):
"""Initialize a ``Structure`` instance."""
self.structure = Structure(structure_id)
def initModel(self, model_id):
"""Initialize a ``Model`` instance and add it as a child to the
``Structure`` instance. If a model is defined twice a
``ConstructionError`` is raised."""
if not (model_id,) in self.structure:
self.model = Model(model_id)
self.model.junk = AtomHolder('junk')
self.structure._initChild(self.model)
else:
raise ConstructionError
def initChain(self, chain_id):
"""Initialize a ``Chain`` instance and add it as a child to the
``Model`` instance. If a chain is defined twice a
``ConstructionWarning`` is raised. This means that the model is not
continuous."""
if not (chain_id,) in self.model:
self.chain = Chain(chain_id)
self.model._initChild(self.chain)
else:
self.chain = self.model[(chain_id,)]
raise ConstructionWarning, "Chain %s is not continous" % chain_id
def initSeg(self, seg_id):
"""Does not create an ``Entity`` instance, but updates the segment id,
``seg_id`` which is used to initialize ``Residue`` instances."""
self.seg_id = seg_id
def initResidue(self, res_long_id, res_name):
"""Initialize a ``Residue`` instance and add it as a child to the
``Chain`` instance. If a residue is defined twice a
``ConstructionWarning`` is raised. This means that the chain is not
continuous."""
if not (res_long_id,) in self.chain:
self.residue = Residue(res_long_id, res_name, self.seg_id)
self.chain._initChild(self.residue)
else:
self.residue = self.chain[(res_long_id,)]
raise ConstructionWarning, "Residue %s%s%s is not continuous" % \
res_long_id
def initAtom(self, at_long_id, at_name, ser_num, coord, occupancy, \
bfactor, element):
"""Initialize an ``Atom`` instance and add is as child to the
``Residue`` instance. If an atom is defined twice a
``ConstructionError`` is raised and the ``Atom`` instance is added to
the ``structure.model.junk`` ``Holder`` instance."""
if not (at_long_id,) in self.residue:
self.atom = Atom(at_long_id, at_name, ser_num, coord, occupancy, \
bfactor, element)
self.residue._initChild(self.atom)
else:
full_id = (tuple(self.residue[(at_long_id,)].getFull_id()), \
ser_num)
self.model.junk._initChild(Atom(full_id, at_name, ser_num, coord, \
occupancy, bfactor, element))
raise ConstructionError, 'Atom %s%s is defined twice.' % at_long_id
def getStructure(self):
"""Update coordinates (``coords``), set the children-table (``table``)
and return the ``Structure`` instance."""
self.structure.setTable()
self.structure.setCoordsRecursively()
return self.structure
| 37.483709
| 99
| 0.584693
| 39,531
| 0.881051
| 0
| 0
| 0
| 0
| 0
| 0
| 14,554
| 0.324374
|
4ddb38d835903f3211b8436bd705a411ed81f133
| 3,381
|
py
|
Python
|
venv/lib/python3.9/site-packages/ajsonrpc/tests/test_dispatcher.py
|
janten/ESP32-Paxcounter
|
212317f3800ec87aef4847e7d60971d4bb9e7d70
|
[
"Apache-2.0"
] | 12
|
2019-03-06T03:44:42.000Z
|
2021-07-22T03:47:24.000Z
|
venv/lib/python3.9/site-packages/ajsonrpc/tests/test_dispatcher.py
|
janten/ESP32-Paxcounter
|
212317f3800ec87aef4847e7d60971d4bb9e7d70
|
[
"Apache-2.0"
] | 10
|
2020-10-28T10:04:58.000Z
|
2021-07-21T20:47:27.000Z
|
venv/lib/python3.9/site-packages/ajsonrpc/tests/test_dispatcher.py
|
janten/ESP32-Paxcounter
|
212317f3800ec87aef4847e7d60971d4bb9e7d70
|
[
"Apache-2.0"
] | 4
|
2021-07-21T20:00:14.000Z
|
2021-10-12T19:43:30.000Z
|
import unittest
from ..dispatcher import Dispatcher
class Math:
@staticmethod
def sum(a, b):
return a + b
@classmethod
def diff(cls, a, b):
return a - b
def mul(self, a, b):
return a * b
class TestDispatcher(unittest.TestCase):
def test_empty(self):
self.assertEqual(len(Dispatcher()), 0)
def test_add_function(self):
d = Dispatcher()
@d.add_function
def one():
return 1
def two():
return 2
d.add_function(two)
d.add_function(two, name="two_alias")
self.assertIn("one", d)
self.assertEqual(d["one"](), 1)
self.assertIsNotNone(one) # do not remove function from the scope
self.assertIn("two", d)
self.assertIn("two_alias", d)
def test_class(self):
d1 = Dispatcher()
d1.add_class(Math)
self.assertIn("math.sum", d1)
self.assertIn("math.diff", d1)
self.assertIn("math.mul", d1)
self.assertEqual(d1["math.sum"](3, 8), 11)
self.assertEqual(d1["math.diff"](6, 9), -3)
self.assertEqual(d1["math.mul"](2, 3), 6)
d2 = Dispatcher(Math)
self.assertNotIn("__class__", d2)
self.assertEqual(d1.keys(), d2.keys())
for method in ["math.sum", "math.diff"]:
self.assertEqual(d1[method], d2[method])
def test_class_prefix(self):
d = Dispatcher(Math, prefix="")
self.assertIn("sum", d)
self.assertNotIn("math.sum", d)
def test_object(self):
math = Math()
d1 = Dispatcher()
d1.add_object(math)
self.assertIn("math.sum", d1)
self.assertIn("math.diff", d1)
self.assertEqual(d1["math.sum"](3, 8), 11)
self.assertEqual(d1["math.diff"](6, 9), -3)
d2 = Dispatcher(math)
self.assertNotIn("__class__", d2)
self.assertEqual(d1, d2)
def test_object_prefix(self):
d = Dispatcher(Math(), prefix="")
self.assertIn("sum", d)
self.assertNotIn("math.sum", d)
def test_add_dict(self):
d = Dispatcher()
d.add_prototype({"sum": lambda *args: sum(args)}, "util.")
self.assertIn("util.sum", d)
self.assertEqual(d["util.sum"](13, -2), 11)
def test_init_from_dict(self):
d = Dispatcher({
"one": lambda: 1,
"two": lambda: 2,
})
self.assertIn("one", d)
self.assertIn("two", d)
def test_del_method(self):
d = Dispatcher()
d["method"] = lambda: ""
self.assertIn("method", d)
del d["method"]
self.assertNotIn("method", d)
def test_to_dict(self):
d = Dispatcher()
def func():
return ""
d["method"] = func
self.assertEqual(dict(d), {"method": func})
def test__getattr_function(self):
# class
self.assertEqual(Dispatcher._getattr_function(Math, "sum")(3, 2), 5)
self.assertEqual(Dispatcher._getattr_function(Math, "diff")(3, 2), 1)
self.assertEqual(Dispatcher._getattr_function(Math, "mul")(3, 2), 6)
# object
self.assertEqual(Dispatcher._getattr_function(Math(), "sum")(3, 2), 5)
self.assertEqual(Dispatcher._getattr_function(Math(), "diff")(3, 2), 1)
self.assertEqual(Dispatcher._getattr_function(Math(), "mul")(3, 2), 6)
| 27.487805
| 79
| 0.561964
| 3,323
| 0.982845
| 0
| 0
| 166
| 0.049098
| 0
| 0
| 408
| 0.120674
|
4ddd26506c5a2c32c298c1cac79c89b498178da9
| 7,206
|
py
|
Python
|
mesh.py
|
msellens/pms
|
d175fded80087a907e8fab6ae09f6d1be69b3353
|
[
"MIT"
] | null | null | null |
mesh.py
|
msellens/pms
|
d175fded80087a907e8fab6ae09f6d1be69b3353
|
[
"MIT"
] | null | null | null |
mesh.py
|
msellens/pms
|
d175fded80087a907e8fab6ae09f6d1be69b3353
|
[
"MIT"
] | null | null | null |
from itertools import product
import struct
import pickle
import numpy as np
from scipy import sparse
from scipy import isnan as scipy_isnan
import numpy.matlib
ASCII_FACET = """facet normal 0 0 0
outer loop
vertex {face[0][0]:.4f} {face[0][1]:.4f} {face[0][2]:.4f}
vertex {face[1][0]:.4f} {face[1][1]:.4f} {face[1][2]:.4f}
vertex {face[2][0]:.4f} {face[2][1]:.4f} {face[2][2]:.4f}
endloop
endfacet
"""
BINARY_HEADER ="80sI"
BINARY_FACET = "12fH"
class ASCII_STL_Writer(object):
""" Export 3D objects build of 3 or 4 vertices as ASCII STL file.
"""
def __init__(self, stream):
self.fp = stream
self._write_header()
def _write_header(self):
self.fp.write("solid python\n")
def close(self):
self.fp.write("endsolid python\n")
def _write(self, face):
self.fp.write(ASCII_FACET.format(face=face))
def _split(self, face):
p1, p2, p3, p4 = face
return (p1, p2, p3), (p3, p4, p1)
def add_face(self, face):
""" Add one face with 3 or 4 vertices. """
if len(face) == 4:
face1, face2 = self._split(face)
self._write(face1)
self._write(face2)
elif len(face) == 3:
self._write(face)
else:
raise ValueError('only 3 or 4 vertices for each face')
def add_faces(self, faces):
""" Add many faces. """
for face in faces:
self.add_face(face)
class Binary_STL_Writer(ASCII_STL_Writer):
""" Export 3D objects build of 3 or 4 vertices as binary STL file.
"""
def __init__(self, stream):
self.counter = 0
super(Binary_STL_Writer, self).__init__(stream)
def close(self):
self._write_header()
def _write_header(self):
self.fp.seek(0)
self.fp.write(struct.pack(BINARY_HEADER, b'Python Binary STL Writer', self.counter))
def _write(self, face):
self.counter += 1
data = [
0., 0., 0.,
face[0][0], face[0][1], face[0][2],
face[1][0], face[1][1], face[1][2],
face[2][0], face[2][1], face[2][2],
0
]
self.fp.write(struct.pack(BINARY_FACET, *data))
def get_quad(center, n, side=1.):
x, y, z = np.array(center).astype('float64')
n1, n2, n3 = np.array(n).astype('float64')
l = side/2.
nm = np.linalg.norm
s = np.sign
if any(np.isnan(v) for v in n):
return
if np.allclose(n, np.zeros(n.shape)):
return
# Build two vectors orthogonal between themselves and the normal
if (np.abs(n2) > 0.2 or np.abs(n3) > 0.2):
C = np.array([1, 0, 0])
else:
C = np.array([0, 1, 0])
ortho1 = np.cross(n, C)
ortho1 *= l / np.linalg.norm(ortho1)
ortho2 = np.cross(n, ortho1)
ortho2 *= l / np.linalg.norm(ortho2)
#ortho1[[2,1]] = ortho1[[1,2]]
#ortho2[[2,1]] = ortho2[[1,2]]
ortho1[1] = -ortho1[1]
ortho2[1] = -ortho2[1]
return [[
center + ortho1,
center + ortho2,
center - ortho1,
center - ortho2,
]]
def surfaceFromNormals(normals):
valid_indices = ~np.isnan(normals)
w, h, d = normals.shape
nx = np.transpose(np.hstack((
normals[:,:,0].ravel(),
normals[:,:,0].ravel(),
)))
ny = np.transpose(np.hstack((
normals[:,:,1].ravel(),
normals[:,:,1].ravel(),
)))
nz = np.transpose(np.hstack((
normals[:,:,2].ravel(),
normals[:,:,2].ravel(),
)))
vectorsize = nz.shape
valid_idx = ~np.isnan(nz)
M = sparse.dia_matrix((2*w*h, w*h), dtype=np.float64)
# n_z z(x + 1, y) - n_z z(x,y) = n_x
M.setdiag(-nz, 0)
M.setdiag(nz, 1)
# n_z z(x, y + 1) - n_z z(x,y) = n_y
M.setdiag(-nz, -w*h)
M.setdiag(np.hstack(([0] * w, nz)), -w*h + w)
# Boundary values
# n_y ( z(x,y) - z(x + 1, y)) = n_x ( z(x,y) - z(x, y + 1))
# TODO: Redo for boundaries in Y-axis
M = M.tolil()
half_size = valid_idx.size // 2
bidxd = np.hstack((np.diff(valid_idx.astype('int8')[:half_size]), [0]))
inner_boundaries = np.roll(bidxd==1, 1) | (bidxd==-1)
outer_boundaries = (bidxd==1) | (np.roll(bidxd==-1, 1))
nz_t = np.transpose(valid_idx.reshape((w,h,d*2//3)), (1, 0, 2)).ravel()
valid_idx_t = ~np.isnan(nz_t)
bidxd = np.hstack((np.diff(valid_idx_t.astype('int8')[:half_size]), [0]))
inner_boundaries |= np.roll(bidxd==1, 1) | (bidxd==-1)
outer_boundaries |= (bidxd==1) | (np.roll(bidxd==-1, 1))
bidx = np.zeros((half_size,), dtype=np.bool)
bidx[inner_boundaries] = True
bidx = np.indices(bidx.shape)[0][bidx]
M[bidx, bidx] = nx[bidx]
M[bidx, bidx + w] = -nx[bidx]
M[bidx + half_size, bidx] = ny[bidx]
M[bidx + half_size, bidx + 1] = -ny[bidx]
M = M.tocsr()[valid_idx]
weight = 1
OB = np.zeros((outer_boundaries.sum(), w*h,))
OB[np.indices((outer_boundaries.sum(),))[0], np.where(outer_boundaries==True)] = weight
M = sparse.vstack((M,OB))
# Build [ n_x n_y ]'
m = np.hstack((
normals[:,:,0].ravel(),
normals[:,:,1].ravel(),
)).reshape(-1, 1)
print(inner_boundaries.shape, m.shape)
i_b = np.hstack((inner_boundaries, inner_boundaries)).reshape(-1,1)
print(i_b.shape, m.shape)
m[i_b] = 0
m = m[valid_idx]
m = np.vstack((
m,
np.zeros((outer_boundaries.sum(), 1)),
))
# Solve least squares
assert not np.isnan(m).any()
# x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = sparse.linalg.lsqr(M, m)
x, istop, itn, normr, normar, norma, conda, normx = sparse.linalg.lsmr(M, m)
# Build the surface (x, y, z) with the computed values of z
surface = np.dstack((
np.indices((w, h))[0],
np.indices((w, h))[1],
x.reshape((w, h))
))
return surface
def writeMesh(surface, normals, filename):
s = surface
with open(filename, 'wb') as fp:
writer = Binary_STL_Writer(fp)
for x in range(0, s.shape[0], 5):
for y in range(0, s.shape[1], 5):
#for x, y in product(range(s.shape[0]), range(s.shape[1])):
quad = get_quad(
s[x,y,:],
normals[x,y,:],
4,
)
if quad:
writer.add_faces(quad)
writer.close()
def write3dNormals(normals, filename):
with open(filename, 'wb') as fp:
writer = Binary_STL_Writer(fp)
for x in range(0, normals.shape[0], 5):
for y in range(0, normals.shape[1], 5):
quad = get_quad(
(0, x, y),
normals[x,y,:],
4,
)
if quad:
writer.add_faces(quad)
writer.close()
def surfaceToHeight(surface):
minH = np.amin(surface[:,:,2])
maxH = np.amax(surface[:,:,2])
scale = maxH - minH
height = (surface[:,:,2] - minH) / scale
return height
def writeObj(surface, normals, filename):
print('obj here')
if __name__ == '__main__':
with open('data.pkl', 'rb') as fhdl:
normals = pickle.load(fhdl)
writeMesh(normals)
| 28.709163
| 96
| 0.543991
| 1,745
| 0.242159
| 0
| 0
| 0
| 0
| 0
| 0
| 1,181
| 0.163891
|
4ddd878eccdd7091a7bbb342e9e801e07d0428f5
| 4,759
|
py
|
Python
|
vaccine.py
|
brannbrann/findavaccinesms
|
91e21a91a25d69efed3266c2ccbb5b0e76f5ca1b
|
[
"Apache-2.0"
] | null | null | null |
vaccine.py
|
brannbrann/findavaccinesms
|
91e21a91a25d69efed3266c2ccbb5b0e76f5ca1b
|
[
"Apache-2.0"
] | null | null | null |
vaccine.py
|
brannbrann/findavaccinesms
|
91e21a91a25d69efed3266c2ccbb5b0e76f5ca1b
|
[
"Apache-2.0"
] | null | null | null |
'''
This is a python script that requires you have python installed, or in a cloud environment.
This script scrapes the CVS website looking for vaccine appointments in the cities you list.
To update for your area, update the locations commented below.
If you receive an error that says something is not installed, type
pip install requests
etc.
Happy vaccination!
'''
import requests
import time
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime, timedelta
def send(message, thetime, state):
carriers = {
'att': '@mms.att.net',
'tmobile': '@tmomail.net',
'verizon': '@vtext.com',
'sprint': '@page.nextel.com',
'gmail': '@gmail.com'
}
# Replace the receivernumber, senderaddr, and senderpass with your own
# Consider using a list for multiple recievers.
# To use gmail, you need to allow less secure apps to connect
# Also, probably a good idea to set up a burner gmail for the sending
to_number = f"RECEIVERADDR{carriers['tmobile']}" # ", ".join() for multiple
sender = f"SENDERADDR{carriers['gmail']}"
password = 'SENDERPASS'
subject = f"CVS Availability in {state}"
# prepend thetime
message.insert(0, thetime.strftime("%m/%d/%Y, %H:%M %p"))
# append the link
if len(message) == 1:
message.append('No new appointments available.')
else:
message.append('https://www.cvs.com/vaccine/intake/store/covid-screener/covid-qns')
port = 587 # 587 for starttls, 465 for SSL and use ssl
smtp_server = "smtp.gmail.com"
msg_body = ", ".join(message)
msg = MIMEMultipart('alternative')
msg['From'] = sender
msg['To'] = to_number
msg['subject'] = subject
part = MIMEText(msg_body, 'plain', 'UTF-8')
msg.attach(part)
# Establish a secure session with gmail's outgoing SMTP server using your gmail account
server = smtplib.SMTP( smtp_server, port )
server.starttls()
server.login(sender, password)
# Send text message through SMS gateway of destination number
server.sendmail( sender, to_number, msg.as_string())
server.quit()
def findAVaccine():
timer = 3600
init_time = datetime.now()
hours_to_run = 24 ###Update this to set the number of hours you want the script to run.
max_time = init_time + timedelta(hours=hours_to_run)
state = 'CA' ###Update with your state abbreviation. Be sure to use all CAPS, e.g. RI
cvs_url = f"https://www.cvs.com/immunizations/covid-19-vaccine.vaccine-status.{state.lower()}.json?vaccineinfo"
header = "https://www.cvs.com/immunizations/covid-19-vaccine"
###Update with your cities nearby
cities = ['ALAMEDA', 'ALAMO', 'ALBANY', 'ANTIOCH', 'BERKELEY', 'CHICO', 'COLMA', 'CUPERTINO', 'DALY CITY', 'DAVIS',
'EAST PALO ALTO', 'HAYWARD', 'LAFAYETTE', 'LATHROP', 'LIVERMORE', 'LOS GATOS', 'DANVILLE', 'DIXON', 'DUBLIN', 'EL CERRITO',
'ELK GROVE', 'EMERYVILLE' 'FAIRFIELD', 'FREMONT', 'MENLO PARK', 'SAN FRANCISCO', 'OAKLAND', 'WOODLAND', 'SACRAMENTO',
'STOCKTON', 'VACAVILLE', 'VALLEJO', 'WALNUT CREEK', 'MILL VALLEY', 'MORAGA', 'NEWARK', 'NOVATO', 'ORINDA', 'PITTSBURG',
'PINOLE', 'PLEASANT HILL', 'REDWOOD CITY', 'RICHMOND', 'SAN ANSELMO', 'SAN BRUNO', 'SAN CARLOS', 'SAN LEANDRO', 'SAN MATEO',
'SAN RAFAEL', 'SAN RAMON', 'SAUSALITO', 'SARATOGA'
]
previousmessage = []
while datetime.now() < max_time:
thetime = datetime.now()
message = []
response = requests.get(cvs_url, headers={"Referer":header})
payload = response.json()
print(thetime)
for item in payload["responsePayloadData"]["data"][state]:
city = item.get('city')
status = item.get('status')
if (city in cities) and (status == 'Available'):
message.append(f"{city}, {state} -- {status}")
print(f"{city}, {state} -- {status}")
print()
# Decouple the checking to sending alerts
# if no change for an hour, just send a message that there's no change
if (message != previousmessage) or ((thetime - init_time).total_seconds() > timer):
# set previous to this new one
previousmessage = message[:]
# reset the timer
init_time = datetime.now()
# send the email!
print('Sending status update...')
send(message, thetime, state)
# This runs every 300 seconds (5 minutes)
# Email will be sent every hour, or when a change is detected
time.sleep(300)
if __name__ == '__main__':
try:
findAVaccine()
except KeyboardInterrupt:
print('Exiting...')
| 37.769841
| 128
| 0.63921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,591
| 0.544442
|
4ddf8f7618bc1ce4a506f069f1a4aa3da6ef6a1b
| 22
|
py
|
Python
|
pefile/__init__.py
|
0x1F9F1/binja-msvc
|
be2577c22c8d37fd1e2e211f80b1c9a920705bd2
|
[
"MIT"
] | 9
|
2019-02-08T10:01:39.000Z
|
2021-04-29T12:27:34.000Z
|
pefile/__init__.py
|
DatBrick/binja-msvc
|
751ffc1450c569bad23ac67a761d0f1fbd4ca4c4
|
[
"MIT"
] | 1
|
2019-07-04T20:09:57.000Z
|
2019-07-12T11:10:15.000Z
|
pefile/__init__.py
|
DatBrick/binja-msvc
|
751ffc1450c569bad23ac67a761d0f1fbd4ca4c4
|
[
"MIT"
] | 2
|
2019-03-03T13:00:14.000Z
|
2020-05-01T05:35:04.000Z
|
from .pefile import *
| 11
| 21
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4de04f66464c9444c5a3decd7af60b9026030890
| 6,643
|
py
|
Python
|
examples/viewer3DVolume.py
|
vincefn/silx
|
4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
examples/viewer3DVolume.py
|
vincefn/silx
|
4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
examples/viewer3DVolume.py
|
vincefn/silx
|
4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444
|
[
"CC0-1.0",
"MIT"
] | 1
|
2017-04-02T18:00:14.000Z
|
2017-04-02T18:00:14.000Z
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This script illustrates the use of :class:`silx.gui.plot3d.ScalarFieldView`.
It loads a 3D scalar data set from a file and displays iso-surfaces and
an interactive cutting plane.
It can also be started without providing a file.
"""
from __future__ import absolute_import, division, unicode_literals
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "05/01/2017"
import argparse
import logging
import os.path
import sys
import numpy
from silx.gui import qt
from silx.gui.plot3d.ScalarFieldView import ScalarFieldView
from silx.gui.plot3d import SFViewParamTree
logging.basicConfig()
_logger = logging.getLogger(__name__)
import h5py
def load(filename):
"""Load 3D scalar field from file.
It supports 3D stack HDF5 files and numpy files.
:param str filename: Name of the file to open
and path in file for hdf5 file
:return: numpy.ndarray with 3 dimensions.
"""
if not os.path.isfile(filename.split('::')[0]):
raise IOError('No input file: %s' % filename)
if h5py.is_hdf5(filename.split('::')[0]):
if '::' not in filename:
raise ValueError(
'HDF5 path not provided: Use <filename>::<path> format')
filename, path = filename.split('::')
path, indices = path.split('#')[0], path.split('#')[1:]
with h5py.File(filename) as f:
data = f[path]
# Loop through indices along first dimensions
for index in indices:
data = data[int(index)]
data = numpy.array(data, order='C', dtype='float32')
else: # Try with numpy
try:
data = numpy.load(filename)
except IOError:
raise IOError('Unsupported file format: %s' % filename)
if data.ndim != 3:
raise RuntimeError(
'Unsupported data set dimensions, only supports 3D datasets')
return data
def default_isolevel(data):
"""Compute a default isosurface level: mean + 1 std
:param numpy.ndarray data: The data to process
:rtype: float
"""
data = data[numpy.isfinite(data)]
if len(data) == 0:
return 0
else:
return numpy.mean(data) + numpy.std(data)
# Parse input arguments
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument(
'-l', '--level', nargs='?', type=float, default=float('nan'),
help="The value at which to generate the iso-surface")
parser.add_argument(
'-sx', '--xscale', nargs='?', type=float, default=1.,
help="The scale of the data on the X axis")
parser.add_argument(
'-sy', '--yscale', nargs='?', type=float, default=1.,
help="The scale of the data on the Y axis")
parser.add_argument(
'-sz', '--zscale', nargs='?', type=float, default=1.,
help="The scale of the data on the Z axis")
parser.add_argument(
'-ox', '--xoffset', nargs='?', type=float, default=0.,
help="The offset of the data on the X axis")
parser.add_argument(
'-oy', '--yoffset', nargs='?', type=float, default=0.,
help="The offset of the data on the Y axis")
parser.add_argument(
'-oz', '--zoffset', nargs='?', type=float, default=0.,
help="The offset of the data on the Z axis")
parser.add_argument(
'filename',
nargs='?',
default=None,
help="""Filename to open.
It supports 3D volume saved as .npy or in .h5 files.
It also support nD data set (n>=3) stored in a HDF5 file.
For HDF5, provide the filename and path as: <filename>::<path_in_file>.
If the data set has more than 3 dimensions, it is possible to choose a
3D data set as a subset by providing the indices along the first n-3
dimensions with '#':
<filename>::<path_in_file>#<1st_dim_index>...#<n-3th_dim_index>
E.g.: data.h5::/data_5D#1#1
""")
args = parser.parse_args(args=sys.argv[1:])
# Start GUI
app = qt.QApplication([])
# Create the viewer main window
window = ScalarFieldView()
# Create a parameter tree for the scalar field view
treeView = SFViewParamTree.TreeView(window)
treeView.setSfView(window) # Attach the parameter tree to the view
# Add the parameter tree to the main window in a dock widget
dock = qt.QDockWidget()
dock.setWindowTitle('Parameters')
dock.setWidget(treeView)
window.addDockWidget(qt.Qt.RightDockWidgetArea, dock)
# Load data from file
if args.filename is not None:
data = load(args.filename)
_logger.info('Data:\n\tShape: %s\n\tRange: [%f, %f]',
str(data.shape), data.min(), data.max())
else:
# Create dummy data
_logger.warning('Not data file provided, creating dummy data')
coords = numpy.linspace(-10, 10, 64)
z = coords.reshape(-1, 1, 1)
y = coords.reshape(1, -1, 1)
x = coords.reshape(1, 1, -1)
data = numpy.sin(x * y * z) / (x * y * z)
# Set ScalarFieldView data
window.setData(data)
# Set scale of the data
window.setScale(args.xscale, args.yscale, args.zscale)
# Set offset of the data
window.setTranslation(args.xoffset, args.yoffset, args.zoffset)
# Set axes labels
window.setAxesLabels('X', 'Y', 'Z')
# Add an iso-surface
if not numpy.isnan(args.level):
# Add an iso-surface at the given iso-level
window.addIsosurface(args.level, '#FF0000FF')
else:
# Add an iso-surface from a function
window.addIsosurface(default_isolevel, '#FF0000FF')
window.show()
app.exec_()
| 32.091787
| 79
| 0.663104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,666
| 0.551859
|
4de27831141702d223c7260054a467c2f0b9791f
| 260
|
py
|
Python
|
solentware_misc/core/__init__.py
|
RogerMarsh/solentware-misc
|
3b031b26bc747193f25f7ffc9e6d24d7278ad30b
|
[
"BSD-3-Clause"
] | null | null | null |
solentware_misc/core/__init__.py
|
RogerMarsh/solentware-misc
|
3b031b26bc747193f25f7ffc9e6d24d7278ad30b
|
[
"BSD-3-Clause"
] | null | null | null |
solentware_misc/core/__init__.py
|
RogerMarsh/solentware-misc
|
3b031b26bc747193f25f7ffc9e6d24d7278ad30b
|
[
"BSD-3-Clause"
] | null | null | null |
# __init__.py
# Copyright 2017 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Miscellaneous modules for applications available at solentware.co.uk.
These do not belong in the solentware_base or solentware_grid packages,
siblings of solentware_misc.
"""
| 26
| 72
| 0.792308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 255
| 0.980769
|
4de2f8a837d616a9960e145e5c2a45f95ecf9856
| 127
|
py
|
Python
|
learn_tf/MNIST.py
|
pkumusic/AI
|
912f1b6f12177e301c4a7efccc305bcb52e4d823
|
[
"MIT"
] | 1
|
2017-05-26T15:23:03.000Z
|
2017-05-26T15:23:03.000Z
|
learn_tf/MNIST.py
|
pkumusic/AI
|
912f1b6f12177e301c4a7efccc305bcb52e4d823
|
[
"MIT"
] | null | null | null |
learn_tf/MNIST.py
|
pkumusic/AI
|
912f1b6f12177e301c4a7efccc305bcb52e4d823
|
[
"MIT"
] | null | null | null |
__author__ = "Music"
# MNIST For ML Beginners
# https://www.tensorflow.org/versions/r0.9/tutorials/mnist/beginners/index.html
| 25.4
| 79
| 0.771654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.866142
|
4de340ca20d63248997dbff4ccd4dfac76793fb6
| 294
|
py
|
Python
|
EXC/CW1/task7/mapper.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | null | null | null |
EXC/CW1/task7/mapper.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | 1
|
2022-02-23T07:34:53.000Z
|
2022-02-23T07:34:53.000Z
|
EXC/CW1/task7/mapper.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# mapper.py
import sys
for line in sys.stdin:
row, values = line.strip().split('\t')
row_values = values.split(' ')
for (col, col_value) in enumerate(row_values):
# out: <col> <row> <value>
print("{0}\t{1}\t{2}".format(col, row, col_value))
| 26.727273
| 58
| 0.585034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.278912
|
4de518de130a1d423998bfe32aad3a8e89b7b784
| 171
|
py
|
Python
|
rllib/algorithms/maddpg/__init__.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | null | null | null |
rllib/algorithms/maddpg/__init__.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | null | null | null |
rllib/algorithms/maddpg/__init__.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 1
|
2019-09-24T16:24:49.000Z
|
2019-09-24T16:24:49.000Z
|
from ray.rllib.algorithms.maddpg.maddpg import (
MADDPGConfig,
MADDPGTrainer,
DEFAULT_CONFIG,
)
__all__ = ["MADDPGConfig", "MADDPGTrainer", "DEFAULT_CONFIG"]
| 21.375
| 61
| 0.730994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.263158
|
4de6e32302e33f5a63e0ba995f624e069fef3439
| 1,849
|
py
|
Python
|
Fig8_RTM/RTM.py
|
GeoCode-polymtl/Seis_float16
|
5f9660cbdc37e5ab7f6054f7547df2ffb661a81d
|
[
"MIT"
] | null | null | null |
Fig8_RTM/RTM.py
|
GeoCode-polymtl/Seis_float16
|
5f9660cbdc37e5ab7f6054f7547df2ffb661a81d
|
[
"MIT"
] | 5
|
2020-01-28T22:17:04.000Z
|
2022-02-09T23:33:07.000Z
|
Fig8_RTM/RTM.py
|
GeoCode-polymtl/Seis_float16
|
5f9660cbdc37e5ab7f6054f7547df2ffb661a81d
|
[
"MIT"
] | 3
|
2019-11-27T06:06:04.000Z
|
2020-06-05T17:18:15.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Perform RTM on marmousi
"""
import os
import numpy as np
import h5py as h5
from scipy.ndimage.filters import gaussian_filter
import sys
import shutil
from SeisCL import SeisCL
names = ['fp32', 'fp16io', 'fp16com']
filedata = os.getcwd() + '/marmfp32'
seis = SeisCL()
seis.file = os.getcwd() + '/marmfp32'
seis.read_csts(workdir="")
seis.file = 'SeisCL'
seis.file_datalist = filedata + '_din.mat'
seis.file_din = filedata + '_din.mat'
file = h5.File(filedata + '_model.mat', "r")
models = {'vp': gaussian_filter(np.transpose(file['vp']), sigma=3),
'vs': np.transpose(file['vs']),
'rho': np.transpose(file['rho'])}
file.close()
"""
_________________Set inversion parameters for SeisCL____________________
"""
seis.csts['gradout'] = 1 # SeisCl has to output the gradient
seis.csts['scalerms'] = 0 # We don't scale each trace by the rms of the data
seis.csts['scalermsnorm'] = 0 # We don't scale each trave by the rms its rms
seis.csts['scaleshot'] = 0 # We don't scale each shots
seis.csts['back_prop_type'] = 1
seis.csts['restype'] = 1 # Migration cost function
seis.csts['tmin'] = 0*(np.float(seis.csts['NT'])-2) * seis.csts['dt']
for ii, FP16 in enumerate([1, 2, 3]):
"""
_______________________Constants for inversion__________________________
"""
filework = os.getcwd() + '/marmgrad_' + names[ii]
seis.csts['FP16'] = FP16
"""
_________________________Perform Migration______________________________
"""
if not os.path.isfile(filework + '_gout.mat'):
seis.set_forward(seis.src_pos_all[3, :], models, withgrad=True)
seis.execute()
shutil.copy2(seis.workdir + "/" + seis.file_gout, filework + '_gout.mat')
sys.stdout.write('Gradient calculation completed \n')
sys.stdout.flush()
| 31.87931
| 81
| 0.670092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 804
| 0.43483
|
4de77886992362775de86d085f926f5ea3304df0
| 954
|
py
|
Python
|
doc/default_issue/fix.py
|
nadavweidman/pytconf
|
6203d3607c1cc383c60d1c138efc1109c7a6ab59
|
[
"MIT"
] | null | null | null |
doc/default_issue/fix.py
|
nadavweidman/pytconf
|
6203d3607c1cc383c60d1c138efc1109c7a6ab59
|
[
"MIT"
] | 1
|
2021-12-03T11:35:46.000Z
|
2021-12-03T11:52:52.000Z
|
doc/default_issue/fix.py
|
nadavweidman/pytconf
|
6203d3607c1cc383c60d1c138efc1109c7a6ab59
|
[
"MIT"
] | 8
|
2021-12-03T11:07:55.000Z
|
2022-03-23T13:35:05.000Z
|
#!/usr/bin/python3
from typing import List
from registry import the_registry
from param_collector import the_collector
class MetaConfig(type):
"""
Meta class for all configs
"""
def __new__(mcs, name, bases, namespace):
ret = super().__new__(mcs, name, bases, namespace)
i = 0
for k, v in namespace.items():
if not k.startswith("__") and not isinstance(v, classmethod):
the_registry.register(ret, k, the_collector.get_item(i))
i += 1
the_collector.clear()
return ret
class Config(metaclass=MetaConfig):
"""
Base class for all configs
"""
class Unique:
pass
NO_DEFAULT = Unique()
NO_DEFAULT_TYPE = type(NO_DEFAULT)
def create_list_int(
default=None,
) -> List[int]:
if default is None:
return []
return default
class Foobar(Config):
columns = create_list_int()
for x in Foobar.columns:
print(x)
| 18.346154
| 73
| 0.627883
| 605
| 0.634172
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.111111
|
4de7d409e55429843384ad1f22b9b00b0eb2103a
| 3,437
|
py
|
Python
|
argonneV14.py
|
floresab/Toy-Models
|
0b990563e1be903cbdcb56ead57d83bc3ca71198
|
[
"MIT"
] | null | null | null |
argonneV14.py
|
floresab/Toy-Models
|
0b990563e1be903cbdcb56ead57d83bc3ca71198
|
[
"MIT"
] | null | null | null |
argonneV14.py
|
floresab/Toy-Models
|
0b990563e1be903cbdcb56ead57d83bc3ca71198
|
[
"MIT"
] | null | null | null |
"""
File : argonneV14.py
Language : Python 3.6
Created : 7/13/2018
Edited : 7/13/2018
San Digeo State University
Department of Physics and Astronomy
#https://journals.aps.org/prc/pdf/10.1103/PhysRevC.51.38 --argonneV18
This code implements Argonne V14 potential outlined in ...
--CONSTANTS --
Hbar*c | 197.33 MeV fm
pion-Mass | 138.03 MeV
Wood-Saxon|
R | 0.5 fm
a | 0.2 fm
Operator | p | Ip | Sp | Index |
-----------------------------------------------------------
central | c | -4.801125 | 2061.5625 | 0 |
tao dot tao | tao | 0.798925 | -477.3125 | 1 |
sigma dot sigma| sigma | 1.189325 | -502.3125 | 2 |
(sigma)(tao) | sigma-tao | 0.182875 | 97.0625 | 3 |
Sij | t | -0.1575 | 108.75 | 4 |
Sij(tao) | t-tao | -0.7525 | 297.25 | 5 |
L dot S | b | 0.5625 | -719.75 | 6 |
L dot S (tao) | b-tao | 0.0475 | -159.25 | 7 |
L squared | q | 0.070625 | 8.625 | 8 |
L^2(tao) | q-tao | -0.148125 | 5.625 | 9 |
L^2(sigma | q-sigma | -0.040625 | 17.375 | 10 |
L^2(sigma)(tao)| q-sigma-tao | -0.001875 | -33.625 | 11 |
(L dot S)^2 | bb | -0.5425 | 391.0 | 12 |
(LS)^2(tao) | bb-tao | 0.0025 | 145.0 | 13 |
"""
import numpy as np
def Yukawa(r):
#mu = mc/hbar
c = 2 #1/fm^2
mu = 138.03/197.33
return np.exp(-r*mu)/(mu*r)*(1-np.exp(-c*r**2))
def Tensor(r):
c = 2 #1/fm^2
mu = 138.03/197.33
return (1+3/(mu*r) + 3/(mu*r)**2)*(np.exp(-mu*r)/(mu*r))*(1-np.exp(-c*r**2))**2
def V14(r):
Vst = np.array([0,0,0,1,0,0,0,0,0,0,0,0,0,0])
Vttao = np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0])
#Short Range Strengths
Sp = np.array([\
2061.5625,\
-477.3125,\
-502.3125,\
97.0625,\
108.75,\
297.25,\
-719.75,\
-159.25,\
8.625,\
5.625,\
17.375,\
-33.625,\
391.0,\
145.0])
# phenomenological Strengths
Ip = np.array([\
-4.801125,\
0.798925,\
1.189325,\
0.182875,\
-0.1575,\
-0.7525,\
0.5625,\
0.0475,\
0.070625,\
-0.148125,\
-0.040625,\
-0.001875,\
-0.5425,\
0.0025])
#woods-Saxon
short_range = Sp*1.0/(1+np.exp((r-0.5)/.2))
# phenomenological shape
phenomenological = Ip*(Tensor(r)**2)
#Explcit Pion Exchange
pion = 3.72681*(Vst*Yukawa(r) + Vttao*Tensor(r))
#compute V14 potential -- of indivdual operators
return short_range + phenomenological + pion
| 33.696078
| 84
| 0.364562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,864
| 0.542333
|
4de80e2e1c94dbe6762d16201a946a481593a775
| 543
|
py
|
Python
|
solutions/python3/problem1556.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
solutions/python3/problem1556.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
solutions/python3/problem1556.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
1556. Thousand Separator
Given an integer n, add a dot (".") as the thousands separator and return it in string format.
Constraints:
0 <= n < 2^31
"""
class Solution:
def thousandSeparator(self, n: int) -> str:
res = ""
str_n = str(n)
count = 0
ind = len(str_n) - 1
while ind >= 0:
count += 1
if count == 4:
res = "." + res
count = 1
res = str_n[ind] + res
ind -= 1
return res
| 19.392857
| 94
| 0.46593
| 358
| 0.6593
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.3407
|
4de95b2ae160d83f0a0fab9908a283c692256619
| 6,483
|
py
|
Python
|
app/resources/base.py
|
smartlab-br/datahub-api
|
193e71172bb4891a5bbffc902da07ef57df9ab07
|
[
"MIT"
] | 1
|
2019-07-25T21:15:05.000Z
|
2019-07-25T21:15:05.000Z
|
app/resources/base.py
|
smartlab-br/datahub-api
|
193e71172bb4891a5bbffc902da07ef57df9ab07
|
[
"MIT"
] | 44
|
2019-08-05T15:24:00.000Z
|
2022-01-31T23:11:31.000Z
|
app/resources/base.py
|
smartlab-br/datahub-api
|
193e71172bb4891a5bbffc902da07ef57df9ab07
|
[
"MIT"
] | 1
|
2021-05-11T07:49:51.000Z
|
2021-05-11T07:49:51.000Z
|
''' Controller para fornecer dados da CEE '''
from flask_restful import Resource
from service.qry_options_builder import QueryOptionsBuilder
from model.thematic import Thematic
class BaseResource(Resource):
''' Classe de base de resource '''
DEFAULT_SWAGGER_PARAMS = [
{"name": "valor", "required": False, "type": 'string', "in": "query",
"description": "Coluna com o valor agregado. Agrega o valor \
presente na coluna informada (vide opções nas categorias), de \
acordo com a função de agregação informada (vide parâmetro \
agregacao)."},
{"name": "agregacao", "required": False, "type": 'string', "in": "query",
"description": "Função de agregação a ser usada. As funções \
disponíveis são DISTINCT, COUNT, SUM, MAX, MIN, PCT_COUNT, \
PCT_SUM, RANK_COUNT, RANK_SUM, RANK_DENSE_COUNT e \
RANK_DESNE_SUM. \
Os atributos retornados terão nome formado pelo nome da \
função precedido de 'agr_' (ex. 'agr_sum')."},
{"name": "ordenacao", "required": False, "type": 'string', "in": "query",
"description": "Colunas de ordenação para o resultado, dentre \
as colunas presentes nas categorias. Adicionalmente, pode-se \
incluir a coluna de agregação (ex. 'sum'). Uma coluna com \
ordenação inversa deve ser precedida de '-' \
(ex. order=-sum)."},
{"name": "filtros", "required": False, "type": 'string', "in": "query",
"description": "Operações lógicas para a filtragem dos registros \
do resultado. Operadores disponíveis: eq, ne, in, gt, ge, lt, le, \
and e or. Como redigir: ',' para separar operações e '-' para \
separar parâmetros da operação. \
Exemplo: &filtros=ge-ano-2014,and,lt-ano-2018."},
{"name": "calcs", "required": False, "type": 'string', "in": "query",
"description": "Campo calculado sobre grupos padrões do resource. \
Havendo qualquer agregação, o agrupamento será feito pelas \
categorias fornecidas na query. \
Calcs disponiveis: min_part, max_part, avg_part, var_part, \
ln_var_part, norm_pos_part, ln_norm_pos_part, norm_part e \
ln_norm_part."}
]
CAT_DETAIL = "Para renomear campos do dataset de retorno, após o campo de \
consulta, adicionar o novo nome, separado por '-' (ex: campo-campo_novo)."
CAT_IND_BR = "Informações que devem ser trazidas no dataset. \
Campos disponíveis: cd_mun_ibge, nu_competencia, \
cd_indicador, ds_agreg_primaria, ds_agreg_secundaria, \
ds_indicador, ds_fonte, nu_competencia_min, nu_competencia_max, \
vl_indicador, vl_indicador_min, vl_indicador_max e media_br. \
" + CAT_DETAIL
CAT_IND_UF = "Informações que devem ser trazidas no dataset. \
Campos disponíveis: cd_mun_ibge, nu_competencia, \
nu_competencia_min, nu_competencia_max, nm_uf, sg_uf, \
cd_prt, nm_prt, cd_regiao, nm_regiao, cd_uf, cd_indicador, \
ds_agreg_primaria, ds_agreg_secundaria, ds_indicador, \
ds_fonte, vl_indicador, vl_indicador_br, vl_indicador_min_br, \
vl_indicador_max_br, media_br, pct_br, rank_br e \
rank_br_total. " + CAT_DETAIL
CAT_IND_MUN = "Informações que devem ser trazidas no dataset. \
Campos disponíveis: cd_mun_ibge, nu_competencia, \
nu_competencia_min, nu_competencia_max, nm_municipio_uf, \
latitude, longitude, nm_uf, sg_uf, cd_unidade, cd_prt, \
nm_prt, nm_unidade, tp_unidade, sg_unidade, cd_mesorregiao, \
nm_mesorregiao, cd_microrregiao, nm_microrregiao, \
cd_regiao, nm_regiao, cd_mun_ibge_dv, nm_municipio, cd_uf, \
cd_indicador, ds_agreg_primaria, ds_agreg_secundaria, \
ds_indicador, vl_indicador, vl_indicador_uf, \
vl_indicador_min_uf, vl_indicador_max_uf, media_uf, pct_uf, \
rank_uf, rank_uf_total, vl_indicador_br, vl_indicador_min_br \
vl_indicador_max_br, media_br, pct_br, rank_br e \
rank_br_total. " + CAT_DETAIL
EMPRESA_DEFAULT_SWAGGER_PARAMS = [
{
"name": "dados",
"description": "Fonte de dados para consulta (rais, caged, catweb etc)",
"required": False,
"type": 'string',
"in": "query"
},
{
"name": "competencia",
"description": "Competência a ser retornada. Depende da fonte de dados \
(ex. para uma fonte pode ser AAAA, enquanto para outras AAAAMM)",
"required": False,
"type": 'string',
"in": "query"
},
{
"name": "id_pf",
"description": "Identificador da Pessoa Física, dentro da empresa. \
Tem que informar o dataset (param 'dados')",
"required": False,
"type": 'string',
"in": "query"
},
{
"name": "perspectiva",
"description": "Valor que filtra uma perspectiva predefinida de um dataset \
(ex. No catweb, 'Empregador'). Nem todos os datasets tem essa opção.",
"required": False,
"type": 'string',
"in": "query"
},
{
"name": "reduzido",
"description": "Sinalizador que indica conjunto reduzido de colunas (S para sim)",
"required": False,
"type": 'string',
"in": "query"
},
{
"name": "cnpj_raiz", "required": True, "type": 'string', "in": "path",
"description": "CNPJ Raiz da empresa consultada"
}
]
@staticmethod
def build_options(r_args, rules='query'):
''' Constrói as opções da pesquisa '''
return QueryOptionsBuilder.build_options(r_args, rules)
@staticmethod
def build_person_options(r_args, mod='empresa'):
''' Constrói as opções da pesquisa '''
return QueryOptionsBuilder.build_person_options(r_args, mod)
def __init__(self):
''' Construtor'''
self.domain = None
self.set_domain()
def get_domain(self):
''' Carrega o modelo de domínio, se não o encontrar '''
if self.domain is None:
self.domain = Thematic()
return self.domain
def set_domain(self):
''' Setter invoked from constructor '''
self.domain = Thematic()
| 46.640288
| 94
| 0.608669
| 6,363
| 0.972638
| 0
| 0
| 358
| 0.054723
| 0
| 0
| 4,809
| 0.735096
|
4de9705438995df854b9ebaf6e2d9530e21d53a7
| 3,155
|
py
|
Python
|
tapioca_trello/resource_mapping/checklist.py
|
humrochagf/tapioca-trello
|
a7067a4c43b22e64cef67b68068580448a4cb420
|
[
"MIT"
] | null | null | null |
tapioca_trello/resource_mapping/checklist.py
|
humrochagf/tapioca-trello
|
a7067a4c43b22e64cef67b68068580448a4cb420
|
[
"MIT"
] | null | null | null |
tapioca_trello/resource_mapping/checklist.py
|
humrochagf/tapioca-trello
|
a7067a4c43b22e64cef67b68068580448a4cb420
|
[
"MIT"
] | 1
|
2018-07-31T23:04:34.000Z
|
2018-07-31T23:04:34.000Z
|
# -*- coding: utf-8 -*-
CHECKLIST_MAPPING = {
'checklist_retrieve': {
'resource': '/checklists/{id}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsid'
),
'methods': ['GET'],
},
'checklist_field_retrieve': {
'resource': '/checklists/{id}/{field}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidfield'
),
'methods': ['GET'],
},
'checklist_board_retrieve': {
'resource': '/checklists/{id}/board',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidboard'
),
'methods': ['GET'],
},
'checklist_card_retrieve': {
'resource': '/checklists/{id}/cards',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcards'
),
'methods': ['GET'],
},
'checklist_item_list': {
'resource': '/checklists/{id}/checkItems',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcardscheckitems'
),
'methods': ['GET'],
},
'checklist_item_retrieve': {
'resource': '/checklists/{id}/checkItems/{idCheckItem}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcardscheckitemscheckitemid'
),
'methods': ['GET'],
},
'checklist_update': {
'resource': '/checklists/{id}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsid-1'
),
'methods': ['PUT'],
},
'checklist_item_update': {
'resource': '/checklists/{id}/checkItems/{idCheckItem}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcheckitemsidcheckitem'
),
'methods': ['PUT'],
},
'checklist_name_update': {
'resource': '/checklists/{id}/name',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidname'
),
'methods': ['PUT'],
},
'checklist_create': {
'resource': '/checklists',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklists'
),
'methods': ['POST'],
},
'checklist_item_create': {
'resource': '/checklists/{id}/checkItems',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcheckitems'
),
'methods': ['POST'],
},
'checklist_delete': {
'resource': '/checklists/{id}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsid-2'
),
'methods': ['DELETE'],
},
'checklist_item_delete': {
'resource': '/checklists/{id}/checkItems/{idCheckItem}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcheckitemsid'
),
'methods': ['DELETE'],
},
}
| 28.944954
| 64
| 0.496672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,959
| 0.620919
|
4dea1d4995a7ebb956d68ed48040d475a502bb1f
| 2,962
|
py
|
Python
|
investimentos.py
|
isaiaspereira307/invest
|
ad0aa40dca4ece75fb7dad98415e73dc382f662a
|
[
"MIT"
] | null | null | null |
investimentos.py
|
isaiaspereira307/invest
|
ad0aa40dca4ece75fb7dad98415e73dc382f662a
|
[
"MIT"
] | null | null | null |
investimentos.py
|
isaiaspereira307/invest
|
ad0aa40dca4ece75fb7dad98415e73dc382f662a
|
[
"MIT"
] | null | null | null |
import json
import os
def calculo(self):
meta = float(input('valor da meta: ')) # 1000000
valorinicial = float(input('valor inicial: ')) # 5637.99
valormensal = float(input('investimento mensal: ')) # 150
dividendos = float(input('dividendos: ')) # 16.86
meta = meta - valorinicial - valormensal - dividendos
i = 0
while i < meta:
meta = meta - valormensal - dividendos
print(meta)
dividendos = dividendos + 1.37
i = i + 1
print (i)
return i
def viver_de_renda_hglg():
preco = 194
div = 0.78
magic_number = int(preco/div)
valor = preco * magic_number
rmd = 1000 #renda mensal desejada
valor_nescessario = magic_number*rmd
return valor_nescessario
def viver_de_renda_knri(self):
preco = 185
div = 0.74
magic_number = int(preco/div)
valor = preco * magic_number
rmd = 10000 #renda mensal desejada
valor_nescessario = magic_number*rmd
return valor_nescessario
def viver_de_renda_bcff(self):
preco = 99
div = 0.53
magic_number = int(preco/div)
valor = preco * magic_number
rmd = 10000 #renda mensal desejada
valor_nescessario = magic_number*rmd
return valor_nescessario
def vdrFII():
preco = 478
div = 2.05
rmd = 10000
magic_number = int(preco/div)
valor_nescessario = magic_number*rmd
print(valor_nescessario)
def sair():
print("\nObrigado por utilizar a calculadora. Até logo!")
exit()
def chamarMenu():
#os.system('clear')
escolha = int(input("##### INVESTPY #####"
"\nDigite: "
"\n<1> adicionar ação"
"\n<2> exibir ações"
"\n<3> sair\n> "))
return escolha
def ler_arquivo(arquivo):
if os.path.exists(arquivo):
with open(arquivo, "r") as arq_json:
dicionario=json.load(arq_json)
else:
dicionario = {}
return dicionario
def gravar_arquivo(dicionario,arquivo):
with open(arquivo, "w") as arq_json:
json.dump(dicionario, arq_json)
def registrar(dicionario, arquivo):
resp = "S"
while resp == "S":
dicionario[input("Digite o nome da ação: ")] = [
input("Digite o valor da ação: "),
input("Digite o valor do dividendo: "),
input("Digite a quantidade de cotas:")]
resp = input("Digite <S> para continuar.").upper()
gravar_arquivo(dicionario,arquivo)
return "JSON gerado!!!!"
def exibir(arquivo):
dicionario = ler_arquivo(arquivo)
for chave, dado in dicionario.items():
print("Ação.........: ", chave)
print("Valor........: ", dado[0])
print("Dividendos...: ", dado[1])
print("Cotas........: ", dado[2])
acoes = ler_arquivo("acoes.json")
opcao=chamarMenu()
while opcao > 0 and opcao < 5:
if opcao == 1:
print(registrar(acoes, "acoes.json"))
elif opcao == 2:
exibir("acoes.json")
elif opcao == 3:
sair()
opcao = chamarMenu()
| 27.174312
| 61
| 0.609723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 613
| 0.206189
|
4dea6041225ae15383493ad1d5f6078ade49cd6b
| 10,718
|
py
|
Python
|
lib/ipython_view.py
|
drewp/light9
|
ab173a40d095051546e532962f7a33ac502943a6
|
[
"MIT"
] | 2
|
2018-10-05T13:32:46.000Z
|
2022-01-01T22:51:20.000Z
|
lib/ipython_view.py
|
drewp/light9
|
ab173a40d095051546e532962f7a33ac502943a6
|
[
"MIT"
] | 4
|
2021-06-08T19:33:40.000Z
|
2022-03-11T23:18:06.000Z
|
lib/ipython_view.py
|
drewp/light9
|
ab173a40d095051546e532962f7a33ac502943a6
|
[
"MIT"
] | null | null | null |
# this version is adapted from http://wiki.ipython.org/Old_Embedding/GTK
"""
Backend to the console plugin.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
"""
# this file is a modified version of source code from the Accerciser project
# http://live.gnome.org/accerciser
from gi.repository import Gtk
from gi.repository import Gdk
import re
import sys
import os
from gi.repository import Pango
from io import StringIO
from functools import reduce
try:
import IPython
except Exception as e:
raise "Error importing IPython (%s)" % str(e)
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
class IterableIPShell:
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
# This is to get rid of the blockage that occurs during
# IPython.Shell.InteractiveShell.user_setup()
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def execute(self):
self.history_level = 0
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
# keep cache in sync with the prompt counter:
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
def historyBack(self):
self.history_level -= 1
return self._getHistory()
def historyForward(self):
self.history_level += 1
return self._getHistory()
def _getHistory(self):
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
self.history_level = 0
rv = ''
return rv
def updateNamespace(self, ns_dict):
self.IP.user_ns.update(ns_dict)
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = reduce(self._commonPrefix, possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def _commonPrefix(self, str1, str2):
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print(header+cmd)
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print(output.read())
output.close()
input.close()
class ConsoleView(Gtk.TextView):
def __init__(self):
Gtk.TextView.__init__(self)
self.modify_font(Pango.FontDescription('Mono'))
self.set_cursor_visible(True)
self.text_buffer = self.get_buffer()
self.mark = self.text_buffer.create_mark('scroll_mark',
self.text_buffer.get_end_iter(),
False)
for code in ansi_colors:
self.text_buffer.create_tag(code,
foreground=ansi_colors[code],
weight=700)
self.text_buffer.create_tag('0')
self.text_buffer.create_tag('notouch', editable=False)
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = \
self.text_buffer.create_mark('line_start',
self.text_buffer.get_end_iter(), True
)
self.connect('key-press-event', self._onKeypress)
self.last_cursor_pos = 0
def write(self, text, editable=False):
segments = self.color_pat.split(text)
segment = segments.pop(0)
start_mark = self.text_buffer.create_mark(None,
self.text_buffer.get_end_iter(),
True)
self.text_buffer.insert(self.text_buffer.get_end_iter(), segment)
if segments:
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(),
segments[i+1], tag)
segments.pop(i)
if not editable:
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(start_mark),
self.text_buffer.get_end_iter())
self.text_buffer.delete_mark(start_mark)
self.scroll_mark_onscreen(self.mark)
def showPrompt(self, prompt):
self.write(prompt)
self.text_buffer.move_mark(self.line_start,self.text_buffer.get_end_iter())
def changeLine(self, text):
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.delete(self.text_buffer.get_iter_at_mark(self.line_start), iter)
self.write(text, True)
def getCurrentLine(self):
rv = self.text_buffer.get_slice(self.text_buffer.get_iter_at_mark(self.line_start),
self.text_buffer.get_end_iter(), False)
return rv
def showReturned(self, text):
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(self.line_start),
iter)
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
self.text_buffer.move_mark(self.line_start,self.text_buffer.get_end_iter())
self.text_buffer.place_cursor(self.text_buffer.get_end_iter())
def _onKeypress(self, obj, event):
if not event.string:
return
insert_mark = self.text_buffer.get_insert()
insert_iter = self.text_buffer.get_iter_at_mark(insert_mark)
selection_mark = self.text_buffer.get_selection_bound()
selection_iter = self.text_buffer.get_iter_at_mark(selection_mark)
start_iter = self.text_buffer.get_iter_at_mark(self.line_start)
if start_iter.compare(insert_iter) <= 0 and \
start_iter.compare(selection_iter) <= 0:
return
elif start_iter.compare(insert_iter) > 0 and \
start_iter.compare(selection_iter) > 0:
self.text_buffer.place_cursor(start_iter)
elif insert_iter.compare(selection_iter) < 0:
self.text_buffer.move_mark(insert_mark, start_iter)
elif insert_iter.compare(selection_iter) > 0:
self.text_buffer.move_mark(selection_mark, start_iter)
class IPythonView(ConsoleView, IterableIPShell):
def __init__(self, **kw):
ConsoleView.__init__(self)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout,cerr=self.cout,
input_func=self.raw_input, **kw)
self.connect('key_press_event', self.keyPress)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
self.interrupt = False
def raw_input(self, prompt=''):
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def keyPress(self, widget, event):
if event.state & Gdk.ModifierType.CONTROL_MASK and event.keyval == 99:
self.interrupt = True
self._processLine()
return True
elif event.keyval == Gtk.keysyms.Return:
self._processLine()
return True
elif event.keyval == Gtk.keysyms.Up:
self.changeLine(self.historyBack())
return True
elif event.keyval == Gtk.keysyms.Down:
self.changeLine(self.historyForward())
return True
# todo: Home needs to advance past the ipython prompt
elif event.keyval == Gtk.keysyms.Tab:
if not self.getCurrentLine().strip():
return False
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return True
def _processLine(self):
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
| 35.026144
| 90
| 0.628755
| 9,292
| 0.866953
| 0
| 0
| 0
| 0
| 0
| 0
| 1,307
| 0.121944
|
4deba880f54b833c42a876a0e52201d76815fdfb
| 513
|
py
|
Python
|
todo/urls.py
|
incomparable/Django
|
ba2f38f694b1055215559c4ca4173c245918fabf
|
[
"Apache-2.0"
] | null | null | null |
todo/urls.py
|
incomparable/Django
|
ba2f38f694b1055215559c4ca4173c245918fabf
|
[
"Apache-2.0"
] | null | null | null |
todo/urls.py
|
incomparable/Django
|
ba2f38f694b1055215559c4ca4173c245918fabf
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^get', views.index, name='index'),
url(r'^details/(?P<id>\w)/$', views.details, name='details'),
url(r'^add', views.add, name='add'),
url(r'^delete', views.delete, name='delete'),
url(r'^update', views.update, name='update'),
# url(r'^signup', views.signup, name='signup'),
# url(r'^login', views.login, name='login'),
# url(r'^login/$', auth_views.login),
]
| 28.5
| 65
| 0.598441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.45809
|
4debda04f4303a03d05f73d0f622731078a63cdf
| 336
|
py
|
Python
|
first_steps_in_coding_and_simple_operations_and_calculations/exercise/charity_campaign.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
first_steps_in_coding_and_simple_operations_and_calculations/exercise/charity_campaign.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
first_steps_in_coding_and_simple_operations_and_calculations/exercise/charity_campaign.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
days = int(input())
sladkar = int(input())
cake = int(input())
gofreta = int(input())
pancake = int(input())
cake_price = cake*45
gofreta_price = gofreta*5.8
pancake_price = pancake*3.2
day_price = (cake_price + gofreta_price + pancake_price)*sladkar
total_price = days*day_price
campaign = total_price - (total_price/8)
print(campaign)
| 28
| 64
| 0.741071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4dec80c0904955f695f9881970d5b2f7945e222c
| 9,234
|
py
|
Python
|
deepbiome/loss_and_metric.py
|
Young-won/deepbiome
|
644bc226f1149038d0af7203a03a77ca6e931835
|
[
"BSD-3-Clause"
] | 4
|
2019-10-20T15:56:19.000Z
|
2021-03-17T16:48:35.000Z
|
deepbiome/loss_and_metric.py
|
Young-won/deepbiome
|
644bc226f1149038d0af7203a03a77ca6e931835
|
[
"BSD-3-Clause"
] | 1
|
2019-11-11T22:47:57.000Z
|
2019-11-11T22:47:57.000Z
|
deepbiome/loss_and_metric.py
|
Young-won/deepbiome
|
644bc226f1149038d0af7203a03a77ca6e931835
|
[
"BSD-3-Clause"
] | 1
|
2019-11-11T18:17:58.000Z
|
2019-11-11T18:17:58.000Z
|
######################################################################
## DeepBiome
## - Loss and metrics (mse, cross-entropy)
##
## July 10. 2019
## Youngwon (youngwon08@gmail.com)
##
## Reference
## - Keras (https://github.com/keras-team/keras)
######################################################################
import numpy as np
import sklearn.metrics as skmetrics
from keras.callbacks import Callback
import tensorflow as tf
import keras.backend as K
from keras.losses import mean_squared_error, mean_absolute_error, binary_crossentropy, categorical_crossentropy, sparse_categorical_crossentropy
from keras.metrics import binary_accuracy, categorical_accuracy, sparse_categorical_accuracy
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
###############################################################################################################################
# tf loss functions
def recall(y_true, y_pred):
y_pred = K.round(y_pred)
score = tf.py_function(lambda y_true, y_pred : recall_score(y_true, y_pred, average='macro', sample_weight=None).astype('float32'),
[y_true, y_pred],
Tout=tf.float32,
name='sklearnRecall')
return score
# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
# predicted_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
# recall = true_positives / (predicted_positives + K.epsilon())
# return recall
# return K.sum(y_true==y_pred)/(K.sum(y_true==y_pred) + K.sum(y_true==(1-y_pred)) + 1e-10)
def precision(y_true, y_pred):
y_pred = K.round(y_pred)
score = tf.py_function(lambda y_true, y_pred : precision_score(y_true, y_pred, average='macro', sample_weight=None).astype('float32'),
[y_true, y_pred],
Tout=tf.float32,
name='sklearnPrecision')
return score
# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
# predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
# precision = true_positives / (predicted_positives + K.epsilon())
# return precision
# return K.sum(y_true==y_pred)/(K.sum(y_true==y_pred) + K.sum((1-y_true)==y_pred) + 1e-10)
def sensitivity(y_true, y_pred):
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), 0.5), K.floatx())
neg_y_pred = 1 - y_pred
true_positive = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
false_negative = K.round(K.sum(K.clip(y_true * neg_y_pred, 0, 1)))
return (true_positive) / (true_positive + false_negative + K.epsilon())
def specificity(y_true, y_pred):
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), 0.5), K.floatx())
neg_y_true = 1 - y_true
neg_y_pred = 1 - y_pred
false_positive = K.round(K.sum(K.clip(neg_y_true * y_pred, 0, 1)))
true_negative = K.round(K.sum(K.clip(neg_y_true * neg_y_pred, 0, 1)))
return (true_negative) / (false_positive + true_negative + K.epsilon())
def gmeasure(y_true, y_pred):
return (sensitivity(y_true, y_pred) * specificity(y_true, y_pred)) ** 0.5
def auc(y_true, y_pred):
# https://stackoverflow.com/questions/43263111/defining-an-auc-metric-for-keras-to-support-evaluation-of-validation-dataset
score = tf.py_function(lambda y_true, y_pred : roc_auc_score(y_true, y_pred, average='macro', sample_weight=None).astype('float32'),
[y_true, y_pred],
Tout=tf.float32,
name='sklearnAUC')
return score
def f1_score_with_nan(y_true, y_pred, average='macro', sample_weight=None):
try:
score = f1_score(y_true, y_pred, average=average, sample_weight=sample_weight)
except:
score = np.nan
return score
def f1(y_true, y_pred):
# precision = precision(y_true, y_pred)
# recall = recall(y_true, y_pred)
# return 2 * (precision * recall) / (precision + recall + K.epsilon())
# https://stackoverflow.com/questions/43263111/defining-an-auc-metric-for-keras-to-support-evaluation-of-validation-dataset
y_pred = K.round(y_pred)
score = tf.py_function(lambda y_true, y_pred : f1_score_with_nan(y_true, y_pred, average='macro', sample_weight=None).astype('float32'),
[y_true, y_pred],
Tout=tf.float32,
name='sklearnF1')
return score
def ss(a, axis=0):
# a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def pearsonr(x,y):
n = len(x)
mx = np.mean(x)
my = np.mean(y)
xm, ym = x-mx, y-my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(ss(xm) * ss(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
return r
def correlation_coefficient(y_true, y_pred):
score = tf.py_function(lambda y_true, y_pred : pearsonr(y_true, y_pred),
[y_true, y_pred],
Tout=tf.float32,
name='correlation_coefficient')
return score
# TODO
# https://stackoverflow.com/questions/41032551/how-to-compute-receiving-operating-characteristic-roc-and-auc-in-keras
# def auc(y_true, y_pred):
# return NotImplementedError()
###############################################################################################################################
# helper
def np_binary_accuracy(y_true, y_pred):
y_pred = (y_pred>=0.5).astype(np.int32)
return skmetrics.accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
def np_precision(y_true, y_pred):
return precision_score(y_true, y_pred, labels=None, pos_label=1, average='macro', sample_weight=None)
# return (np.sum(y_true*y_pred) + 1e-7)/(np.sum(y_true*y_pred) + np.sum((1-y_true)*y_pred) + 1e-7)
def np_recall(y_true, y_pred):
return recall_score(y_true, y_pred, labels=None, pos_label=1, average='macro', sample_weight=None)
# return (np.sum(y_true*y_pred) + 1e-7)/(np.sum(y_true*y_pred) + np.sum(y_true*(1-y_pred)) + 1e-7)
def np_f1_score(y_true, y_pred):
return skmetrics.f1_score(y_true, y_pred, labels=None, pos_label=1, average='macro', sample_weight=None)
def np_roc_auc(y_true, y_pred):
return skmetrics.roc_auc_score(y_true, y_pred, average='macro', sample_weight=None)
def np_confusion_matrix(y_true, y_pred):
return skmetrics.confusion_matrix(y_true, y_pred).ravel()
def np_sensitivity(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred >= 0.5).astype(np.int32)
neg_y_pred = 1 - y_pred
tp = np.sum(y_true * y_pred)
fn = np.sum(y_true * neg_y_pred)
return tp / (tp+fn)
def np_specificity(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred >= 0.5).astype(np.int32)
neg_y_true = 1 - y_true
neg_y_pred = 1 - y_pred
fp = np.sum(neg_y_true * y_pred)
tn = np.sum(neg_y_true * neg_y_pred)
return tn / (tn+fp)
def np_PPV(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred >= 0.5).astype(np.int32)
neg_y_true = 1 - y_true
tp = np.sum(y_true * y_pred)
fp = np.sum(neg_y_true * y_pred)
return tp/(tp+fp)
def np_gmeasure(y_true, y_pred):
sensitivity = np_sensitivity(y_true, y_pred)
specificity = np_specificity(y_true, y_pred)
return (sensitivity*specificity)**0.5
def metric_test(y_true, y_pred):
return (np_sensitivity(y_true, y_pred), np_specificity(y_true, y_pred),
np_gmeasure(y_true, y_pred), np_binary_accuracy(y_true, y_pred),
np_roc_auc(y_true, y_pred))
###############################################################################################################################
# if __name__ == "__main__":
# test_metrics = {'Accuracy':binary_accuracy, 'Precision':precision, 'Recall':recall}
# print('Test loss functions %s' % test_metrics.keys())
# y_true_set = np.array([[[0,0,0,0,0],
# [0,0,0,0,0],
# [0,1,1,0,0],
# [1,1,1,0,0],
# [0,1,0,0,0]]])
# y_pred_set = np.array([[[0,0,0,0,1],
# [0,0,0,0,0],
# [0,1,0.6,0,0],
# [0,1,1,0,0],
# [0,0.3,0,0,0]]])
# def test(acc, y_true_set, y_pred_set):
# sess = tf.Session()
# K.set_session(sess)
# with sess.as_default():
# return acc.eval(feed_dict={y_true: y_true_set, y_pred: y_pred_set})
# # tf
# y_true = tf.placeholder("float32", shape=(None,y_true_set.shape[1],y_true_set.shape[2]))
# y_pred = tf.placeholder("float32", shape=(None,y_pred_set.shape[1],y_pred_set.shape[2]))
# metric_list = [binary_accuracy(y_true, y_pred),
# precision(y_true, y_pred),
# recall(y_true, y_pred)]
# # numpy
# print('%15s %15s %15s' % tuple(test_metrics.keys()))
# print('tf : {}'.format([test(acc, y_true_set, y_pred_set) for acc in metric_list]))
# print('np : {}'.format(np.round(metric_test(y_true_set[0],y_pred_set[0]),8)))
| 42.164384
| 144
| 0.594975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,788
| 0.410223
|
4ded2765ebba38c75e11130b9978c0647bfd5359
| 3,177
|
py
|
Python
|
Hough.py
|
andresgmz/Scripts-Python
|
1f56e5790dc9c38d9bbf5dc040ead45a8f3ca937
|
[
"MIT"
] | null | null | null |
Hough.py
|
andresgmz/Scripts-Python
|
1f56e5790dc9c38d9bbf5dc040ead45a8f3ca937
|
[
"MIT"
] | null | null | null |
Hough.py
|
andresgmz/Scripts-Python
|
1f56e5790dc9c38d9bbf5dc040ead45a8f3ca937
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
img = cv2.imread(root.filename)
root.destroy()
# Convert to gray-scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur the image to reduce noise
img_blur = cv2.medianBlur(gray, 5)
# Apply hough transform on the image8 $$$img.shape[0]/16, param1=100, param2=11, minRadius=62, maxRadius=67
# Draw detected circles; circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/16, param1=200, param2=25, minRadius=60, maxRadius=67)
face_cascade = cv2.CascadeClassifier('C:/Users/andre/Desktop/NovenoSemestre/VisionArtificial/Python/haarcascade_frontalface_alt.xml')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
center = (x + w//2, y + h//2)
#circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/128, param1=100, param2=11, minRadius=50, maxRadius=100)
circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/128, param1=100, param2=11, minRadius=(w//2-10), maxRadius=(w//2+10))
(h, w) = img_blur.shape[:2] #Calcular tamaño de la imageb
(pointRefX,pointRefY) = center
puntoMinimo =100
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
#Definir el circulo mas cercano de la
xCercano =np.absolute(i[0]-pointRefX)
yCercano =np.absolute(i[1]-pointRefY)
puntoCercano = xCercano+yCercano
if (puntoCercano < puntoMinimo):
puntoMinimo = puntoCercano
circuloCercano = i
# Draw outer circle
#frame = cv2.ellipse(img, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
cv2.ellipse(img, (circuloCercano[0], circuloCercano[1]),(circuloCercano[2],circuloCercano[2]+15),0,0,360,(0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2], (0, 255, 0), 2)
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3)
""" cv2.circle(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3) """
""" if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
#Definir el circulo mas cercano de la
xCercano =np.absolute(i[0]-pointRefX)
yCercano =np.absolute(i[1]-pointRefY)
puntoCercano = xCercano+yCercano
if (puntoCercano < puntoMinimo):
puntoMinimo = puntoCercano
circuloCercano = i
# Draw outer circle
cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
"""
cv2.imshow("Mascara",img)
cv2.waitKey(0)
| 38.743902
| 153
| 0.645892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,638
| 0.515419
|
4dee15ccda1b59264009aac028177487941365ec
| 3,927
|
py
|
Python
|
src/SentimentAnalyzer.py
|
IChowdhury01/Sentiment-Analyzer
|
0a566365eed00b0e76feb77c638579dd80f75068
|
[
"MIT"
] | null | null | null |
src/SentimentAnalyzer.py
|
IChowdhury01/Sentiment-Analyzer
|
0a566365eed00b0e76feb77c638579dd80f75068
|
[
"MIT"
] | null | null | null |
src/SentimentAnalyzer.py
|
IChowdhury01/Sentiment-Analyzer
|
0a566365eed00b0e76feb77c638579dd80f75068
|
[
"MIT"
] | null | null | null |
# Binary Sentiment Analysis using Recurrent Neural Networks
# Import libraries & dataset list
import tensorflow as tf
import tensorflow_datasets as dslist
# Load Dataset
print("\nLoading dataset...")
# Download dataset and dataset info
DATASET_CODE = 'imdb_reviews/subwords8k' # Using a TensorFlow binary sentiment classification dataset
dataset, dsinfo = dslist.load(DATASET_CODE,
with_info=True,
as_supervised=True)
# Separate into training and testing data.
training = dataset['train']
testing = dataset['test']
# Declare encoder (maps each word in a string to its index in the dataset's vocabulary)
encoder = dsinfo.features['text'].encoder
print("Dataset loaded.")
# Setup for training
# Prepare data. Create batches of encoded strings and zero-pad them.
BUFFER_SIZE = 10000
BATCH_SIZE = 64 # Max number of encoded strings in batch
padded_shapes = ([None], ())
training = (training
.shuffle(BUFFER_SIZE)
.padded_batch(BATCH_SIZE, padded_shapes=padded_shapes))
testing = (testing
.padded_batch(BATCH_SIZE, padded_shapes=padded_shapes))
# Setup Recurrent Neural Network (RNN)
# Create RNN model using Keras.
OUTPUT_SIZE = 64
rnn_model = tf.keras.Sequential([ # Keras Sequential model: processes sequence of encoded strings (indices), embeds each index into vector, then processes through embedding layer
tf.keras.layers.Embedding(encoder.vocab_size, OUTPUT_SIZE), # Add embedding layer: stores each word as trainable vector
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), # Make input sequence iterate both directions through LTSM layer (helps learn long-range dependencies).
# Add layers
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
# Compile RNN model
rnn_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
# Train RNN
NUM_ITERATIONS = 1
print("\nTraining neural network...")
history = rnn_model.fit(training, epochs=NUM_ITERATIONS, validation_data=testing)
print("Training complete.")
# Test RNN.
print("\nTesting on dataset...")
loss, accuracy = rnn_model.evaluate(testing) # Return test loss and test accuracy
print("Testing complete.")
# Process and print results
loss = round(loss, 3)
accuracy = round(accuracy*100, 2)
print("Test Loss: {}".format(loss))
print("Test Accuracy: {}%".format(accuracy))
# Prediction
# Zero-pads a vector up to a target size.
def pad_vector(vec, target_size):
num_zeros = [0] * (target_size - len(vec))
vec.extend(num_zeros)
return vec
# Predicts sentiment. Output will be a decimal number.
def predict_sentiment(review):
encoded_review = encoder.encode(review) # Encode review. Map each word to an index.
encoded_review = pad_vector(encoded_review, BATCH_SIZE) # Zero-padding
encoded_review = tf.cast(encoded_review, tf.float32)
prediction = rnn_model.predict(tf.expand_dims(encoded_review, 0))
return prediction
# Predictions with value over 0.5 are positive sentiments.
def interpret_prediction(prediction):
if prediction >= 0.5:
return "Positive"
else:
return "Negative"
# Predict sentiment of user-inputted review
user_query = input("\nEnter a review to predict its sentiment, or enter nothing to exit the program:\n")
while(user_query != ""):
prediction = predict_sentiment(user_query)
sentiment = interpret_prediction(prediction)
print("\nSentiment: {} (Value: {})".format(sentiment, prediction))
user_query = input("\n\nEnter a review to predict its sentiment, or enter nothing to exit the program:\n")
| 30.679688
| 181
| 0.689585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,692
| 0.430863
|
4dee9911d375f6b557bb57e2701f998ccd07ef1c
| 5,146
|
py
|
Python
|
google_image_scraping_script_for_arg.py
|
KuoYuHong/Shihu-Cat-Image-Recognition-System
|
5f184e4902fa6edb4602f01369b56ef03ad4790d
|
[
"MIT"
] | 1
|
2021-11-24T14:46:06.000Z
|
2021-11-24T14:46:06.000Z
|
google_image_scraping_script_for_arg.py
|
KuoYuHong/Shihu-Cat-Image-Recognition-System
|
5f184e4902fa6edb4602f01369b56ef03ad4790d
|
[
"MIT"
] | null | null | null |
google_image_scraping_script_for_arg.py
|
KuoYuHong/Shihu-Cat-Image-Recognition-System
|
5f184e4902fa6edb4602f01369b56ef03ad4790d
|
[
"MIT"
] | null | null | null |
import selenium
from selenium import webdriver
import time
import requests
import os
from PIL import Image
import io
import hashlib
# All in same directory
DRIVER_PATH = 'chromedriver.exe'
def fetch_image_urls(query:str, max_links_to_fetch:int, wd:webdriver, sleep_between_interactions:int=0.5):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# build the google query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
# load the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
error_clicks = 0
while (image_count < max_links_to_fetch) & (error_clicks < 30): # error clicks to stop when there are no more results to show by Google Images. You can tune the number
scroll_to_end(wd)
print('Starting search for Images')
# get all image thumbnail results
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")
if(results_start == number_results):
print("results_start == number_results")
break
else:
pass
for img in thumbnail_results[results_start:max_links_to_fetch]:
# try to click every thumbnail such that we can get the real image behind it
print("Total Errors till now:", error_clicks)
try:
print('Trying to Click the Image')
img.click()
time.sleep(sleep_between_interactions)
print('Image Click Successful!')
except Exception:
error_clicks = error_clicks + 1
print('ERROR: Unable to Click the Image')
if(results_start < number_results):
continue
else:
break
results_start = results_start + 1
# extract image urls
print('Extracting of Image URLs')
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
print('Current Total Image Count:', image_count)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
results_start = len(thumbnail_results)
return image_urls
def persist_image(folder_path:str,file_name:str,url:str):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
folder_path = os.path.join(folder_path,file_name)
if os.path.exists(folder_path):
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
else:
os.mkdir(folder_path)
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(f"SUCCESS - saved {url} - as {file_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
if __name__ == '__main__':
'''
使用方法:
在相同資料夾底下放置此程式和chromedriver.exe,並使用cmd執行指令撈取圖片
輸入格式:
python google_image_scraping_script_for_arg.py 要搜尋的關鍵字 要抓取的圖片數量
範例:
python google_image_scraping_script_for_arg.py 虎斑貓 30
python google_image_scraping_script_for_arg.py 石虎 50
'''
import sys
import ast
if len(sys.argv) == 3:
query_name = sys.argv[1]
number_of_picture = sys.argv[2]
print("query_name:",query_name) #str
print("number_of_picture:",number_of_picture) #str
wd = webdriver.Chrome(executable_path=DRIVER_PATH)
queries = [query_name] #change your set of queries here
for query in queries:
wd.get('https://google.com')
search_box = wd.find_element_by_css_selector('input.gLFyf')
search_box.send_keys(query)
links = fetch_image_urls(query,int(number_of_picture),wd) # 200 denotes no. of images you want to download
images_path = './'
for i in links:
persist_image(images_path,query,i)
wd.quit()
else:
print("Error input format")
| 36.496454
| 171
| 0.621842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,645
| 0.3125
|
4df16cb84c883d268ef0671570a73d61fad65816
| 1,515
|
py
|
Python
|
pyslowloris/utils.py
|
goasdsdkai/daas
|
78ef23b254893efca22748fe619ef22648b8c1e8
|
[
"MIT"
] | 75
|
2017-06-15T05:58:02.000Z
|
2022-03-31T22:59:25.000Z
|
pyslowloris/utils.py
|
goasdsdkai/daas
|
78ef23b254893efca22748fe619ef22648b8c1e8
|
[
"MIT"
] | 8
|
2017-08-25T04:14:19.000Z
|
2021-09-10T06:21:33.000Z
|
pyslowloris/utils.py
|
goasdsdkai/daas
|
78ef23b254893efca22748fe619ef22648b8c1e8
|
[
"MIT"
] | 32
|
2017-03-22T22:52:26.000Z
|
2022-03-07T15:53:01.000Z
|
"""
MIT License
Copyright (c) 2020 Maxim Krivich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
url_pattern = re.compile(
r"^(?:http)s?://" # http:// or https://
# domain...
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)"
r"+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|"
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$", re.IGNORECASE
)
def validate_url(url: str) -> bool:
return bool(url_pattern.match(url))
| 36.95122
| 78
| 0.69769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,333
| 0.879868
|
4df1faa8f49c3cdacafcecb2f8765081676e89ad
| 5,305
|
py
|
Python
|
brahe/data_models/geojson.py
|
duncaneddy/brahe
|
4a1746ef3c14211b0709de6e7e34b6f52fc0e686
|
[
"MIT"
] | 14
|
2019-05-29T13:36:55.000Z
|
2022-02-11T15:26:13.000Z
|
brahe/data_models/geojson.py
|
duncaneddy/brahe
|
4a1746ef3c14211b0709de6e7e34b6f52fc0e686
|
[
"MIT"
] | 1
|
2020-05-27T12:14:39.000Z
|
2020-05-27T15:51:21.000Z
|
brahe/data_models/geojson.py
|
duncaneddy/brahe
|
4a1746ef3c14211b0709de6e7e34b6f52fc0e686
|
[
"MIT"
] | 2
|
2019-10-24T05:20:54.000Z
|
2019-12-08T03:59:10.000Z
|
"""The geojson module provides data model classes for initialization and storing
of GeoJSON objects.
"""
import typing
import typing_extensions
import pydantic
import numpy as np
import brahe.astro as astro
import brahe.coordinates as coords
import brahe.frames as frames
geographic_point = pydantic.conlist(float, min_items=2, max_items=3)
class GeoJSONGeometry(pydantic.BaseModel):
type: typing_extensions.Literal['Point', 'LineString', 'Polygon', 'MultiPoint', 'MultiLineString', 'MultiPolygon']
coordinates: typing.Union[geographic_point,
typing.List[geographic_point],
typing.List[typing.List[geographic_point]],
typing.List[typing.List[typing.List[geographic_point]]]] = pydantic.Field(..., description='Geomtry Coordinates')
class GeoJSONObject(pydantic.BaseModel):
type: typing_extensions.Literal['Feature'] = pydantic.Field('Feature', description='GeoJSON Object type')
geometry: GeoJSONGeometry = pydantic.Field('Feature', description='GeoJSON object type')
properties: typing.Optional[dict] = pydantic.Field({}, description='Additional properties')
@property
def geotype(self):
'''Return GeoJSON geometry type.
Returns:
str: GeoJSON geometry type
'''
return self.geometry.type
@property
def num_points(self):
'''Returns the number of unique points in the GeoJSON Object
Returns:
int: Number of points in object
'''
if self.geometry.type == 'Point':
return 1
elif self.geometry.type == 'LineString':
return len(self.geometry.coordinates)
elif self.geometry.type == 'Polygon':
return len(self.geometry.coordinates[0]) - 1
else:
raise NotImplementedError(f'Function not implemented for GeoJSON Geometry type: {self.geotype}')
@property
def center(self):
'''Return center point of object. Given at [lon, lat] in degrees.
Returns:
np.ndarray: Center point of Geometry object. Units: [deg]
'''
if self.geometry.type == 'Point':
return np.array(self.geometry.coordinates)
elif self.geometry.type == 'LineString':
center = np.zeros(len(self.geometry.coordinates[0]))
for idx in range(0, self.num_points):
center += self.geometry.coordinates[0][idx]/self.num_points
return center
elif self.geometry.type == 'Polygon':
center = np.zeros(len(self.geometry.coordinates[0][0]))
for idx in range(0, self.num_points):
center += np.array(self.geometry.coordinates[0][idx])/self.num_points
return center
else:
raise NotImplementedError(f'Function not implemented for GeoJSON Geometry type: {self.geotype}')
@property
def center_ecef(self):
'''Return center point of object. Given as ecef coordinates.
Returns:
np.ndarray: Center point of Geometry object in ECEF frame. Units: [m]
'''
center = self.center
# Ensure input has altitude
if len(center) == 2:
center = np.array([center[0], center[1], 0.0])
ecef = coords.sGEODtoECEF(center, use_degrees=True)
# Convert point to ECEF frame
return ecef
@property
def coordinates(self):
'''Returns the coordinates of the GeoJSON Object. If polygon type
the last point will be repeated.
'''
if self.geometry.type == 'Point':
pnt = self.geometry.coordinates
if len(pnt) == 2:
yield np.array([pnt[0], pnt[1], 0.0])
else:
yield np.array(pnt)
elif self.geometry.type == 'Polygon':
if len(self.geometry.coordinates) > 1:
raise RuntimeError('Polygon with multiple lines are not currently supported.')
for idx in range(0, self.num_points + 1):
pnt = self.geometry.coordinates[0][idx]
if len(pnt) == 2:
yield np.array([pnt[0], pnt[1], 0.0])
else:
yield np.array(pnt)
else:
raise NotImplementedError(f'Function not implemented for GeoJSON Geometry type: {self.geotype}')
@property
def vertices(self):
'''Returns the unique vertices of the GeoJSON Object. This ensures
for polygon types the last point won't be repeated.
'''
if self.geometry.type == 'Point':
pnt = self.geometry.coordinates
if len(pnt) == 2:
yield np.array([pnt[0], pnt[1], 0.0])
else:
yield np.array(pnt)
elif self.geometry.type == 'Polygon':
if len(self.geometry.coordinates) > 1:
raise RuntimeError('Polygon with multiple lines are not currently supported.')
for idx in range(0, self.num_points):
pnt = self.geometry.coordinates[0][idx]
if len(pnt) == 2:
yield np.array([pnt[0], pnt[1], 0.0])
else:
yield np.array(pnt)
else:
raise NotImplementedError(f'Function not implemented for GeoJSON Geometry type: {self.geotype}')
| 35.604027
| 121
| 0.604713
| 4,958
| 0.93459
| 1,922
| 0.3623
| 4,158
| 0.783789
| 0
| 0
| 1,653
| 0.311593
|
4df2f7977ee6df4348bd5f199099edb4427af89e
| 521
|
py
|
Python
|
lab7/7.7.py
|
rikudo765/algorithms
|
eb78852143662bc2e42df6271e9a015cfa8ffdd1
|
[
"MIT"
] | 1
|
2020-11-16T18:46:24.000Z
|
2020-11-16T18:46:24.000Z
|
lab7/7.7.py
|
rikudo765/algorithms
|
eb78852143662bc2e42df6271e9a015cfa8ffdd1
|
[
"MIT"
] | null | null | null |
lab7/7.7.py
|
rikudo765/algorithms
|
eb78852143662bc2e42df6271e9a015cfa8ffdd1
|
[
"MIT"
] | null | null | null |
n = int(input())
lst = list(map(int, input().split()))
def sort1(arr):
l = len(arr)
for i in range(1, n):
cur = arr[i]
pos = i
check = False
while pos > 0:
if arr[pos - 1] > cur:
check = True
arr[pos] = arr[pos - 1]
else:
break
pos -= 1
arr[pos] = cur
if check:
for j in range(len(arr)):
print(arr[j], end=' ')
print('')
sort1(lst)
| 19.296296
| 39
| 0.380038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.009597
|
4df34ddd891c605f94b640242ef9b998d8ecdfb4
| 7,141
|
py
|
Python
|
CORE/engines/Gudmundsson_Constraint.py
|
geoffreynyaga/ostrich-project
|
157cd7a3c3d9014e31ef21ca21de43f04d039997
|
[
"MIT"
] | 15
|
2017-11-08T10:03:26.000Z
|
2021-12-21T07:02:44.000Z
|
CORE/engines/Gudmundsson_Constraint.py
|
geoffreynyaga/ostrich-project
|
157cd7a3c3d9014e31ef21ca21de43f04d039997
|
[
"MIT"
] | 9
|
2020-01-17T15:09:22.000Z
|
2022-03-25T19:02:05.000Z
|
CORE/engines/Gudmundsson_Constraint.py
|
geoffreynyaga/ostrich-project
|
157cd7a3c3d9014e31ef21ca21de43f04d039997
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
##################################################################################
# File: c:\Projects\KENYA ONE PROJECT\CORE\engines\Gudmundsson_Constraint.py #
# Project: c:\Projects\KENYA ONE PROJECT\CORE\engines #
# Created Date: Thursday, January 9th 2020, 8:56:55 pm #
# Author: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# Last Modified: Thursday January 9th 2020 8:56:55 pm #
# Modified By: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# MIT License #
# #
# Copyright (c) 2020 KENYA ONE PROJECT #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of#
# this software and associated documentation files (the "Software"), to deal in #
# the Software without restriction, including without limitation the rights to #
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do #
# so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# ----- #
# Copyright (c) 2020 KENYA ONE PROJECT #
##################################################################################
import sys
sys.path.append("../")
from CORE.API.db_API import write_to_db, read_from_db # type: ignore
from math import sqrt, pi
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
grossWeight = read_from_db("finalMTOW")
cruiseSpeed = read_from_db("cruiseSpeed")
ROC = read_from_db("rateOfClimb") * 3.28 * 60
vLof = read_from_db("stallSpeed") * 1.1
AR = read_from_db("AR")
cdMin = read_from_db("cdMin")
wsfromsizing = read_from_db("WS")
rhoSL = read_from_db("rhoSL")
propEff = read_from_db("propEff")
cruiseAltitude: int = 10000 # ft
gForce: float = 2.0
V_ROC: float = 80.0
groundRun: int = 900
serviceCeiling: int = 18000
wsInitial: float = 22.6 # lb/f**2
g: float = 32.174
CDto: float = 0.04
CLto: float = 0.5
groundFriction: float = 0.04
def oswaldEff(AR: float) -> float:
e = (1.78 * (1 - (0.045 * AR ** 0.68))) - 0.64
return e
e = oswaldEff(AR)
k: float = 1 / (pi * AR * e)
write_to_db("k", k)
# dynamic pressure at altitude
def rhoAlt(cruiseAltitude: int) -> float:
rhoalt = rhoSL * (1 - 0.0000068756 * cruiseAltitude) ** 4.2561
return rhoalt
rhoCruise = rhoAlt(cruiseAltitude)
# print ('air density at cruise altitude, rho = ' +str(rhoCruise))
qAltitude = 0.5 * rhoCruise * (1.688 * cruiseSpeed) ** 2
# print('dynamic pressure at altitude = ' +str(qAltitude))
# Gag Ferrar Model
def gagFerrar(bhp):
"takes in bhp and returns normalised bhp"
normBhp = bhp / (1.132 * (rhoCruise / rhoSL) - 0.132)
return normBhp
WS = np.arange(10, 30)
twTurn = qAltitude * ((cdMin / WS) + k * (gForce / qAltitude) ** 2 * (WS))
qROC = 0.5 * rhoSL * (V_ROC * 1.688) ** 2
Vv = ROC / 60
twROC = (Vv / (V_ROC * 1.688)) + (qROC * cdMin / WS) + (k * WS / qROC)
qVlof = 0.5 * rhoSL * (vLof * 1.688 / sqrt(2)) ** 2
twVlof = (
((vLof * 1.688) ** 2 / (2 * g * groundRun))
+ (qVlof * CDto / WS)
+ (groundFriction * (1 - (qVlof * CLto / WS)))
)
rhoCeiling = rhoAlt(serviceCeiling)
# print(rhoCeiling)
twCruise = qAltitude * cdMin * (1 / WS) + (k)
twCeiling = (1.667 / (np.sqrt((2 * WS / rhoCeiling) * sqrt(k / 3 * cdMin)))) + (
(k * cdMin / 3) * 4
)
plt.figure(1)
plt.subplot(121)
plt.plot(WS, twTurn, label="Rate of Turn")
plt.plot(WS, twROC, label="Rate of Climb")
plt.plot(WS, twVlof, label="Vlof")
plt.plot(WS, twCruise, label="Cruise")
plt.plot(WS, twCeiling, label="Ceiling")
plt.axvline(x=wsfromsizing)
plt.title(" Graph 1 \n HP/Weight ratio")
plt.legend()
# ax = plt.gca()
# ax.set_xticklabels([])
###NORMAlization
norm_twTurn = gagFerrar((grossWeight * twTurn * 1.688 * cruiseSpeed / (propEff * 550)))
test = grossWeight * twTurn * 1.688 * cruiseSpeed / (propEff * 550)
norm_twROC = gagFerrar((grossWeight * twROC * 1.688 * V_ROC / (propEff * 550)))
norm_twVlof = gagFerrar((grossWeight * twVlof * 1.688 * vLof / (propEff * 550)))
norm_twCruise = gagFerrar(
(grossWeight * twCruise * 1.688 * cruiseSpeed / (propEff * 550))
)
norm_twCeiling = gagFerrar(
(grossWeight * twCeiling * 1.688 * cruiseSpeed / (propEff * 550))
)
plt.subplot(122)
plt.plot(WS, norm_twTurn, label="Rate of Turn")
plt.plot(WS, norm_twROC, label="Rate of Climb")
plt.plot(WS, norm_twVlof, label="Vlof")
plt.plot(WS, norm_twCruise, label="Cruise")
plt.plot(WS, norm_twCeiling, label="Ceiling")
plt.title("Graph 2 \n Normalised BHP")
plt.legend()
plt.axvline(x=wsfromsizing)
plt.tight_layout()
if __name__ == "__main__":
plt.show()
def find_nearest(array, value: float) -> int:
idx = (np.abs(array - value)).argmin()
return idx
# print(find_nearest(ws, plotWS))
plotWS = read_from_db("WS")
myidx = find_nearest(WS, plotWS)
def point() -> float:
cruiseidx = norm_twCruise[myidx]
takeoffidx = norm_twVlof[myidx]
climbidx = norm_twROC[myidx]
turnidx = norm_twTurn[myidx]
ceilingidx = norm_twCeiling[myidx]
# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])
# print (cruiseidx,"cruiseidx")
x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx])
return x[np.argmax(x)]
finalBHP = point()
write_to_db("finalBHP", finalBHP)
print(finalBHP, "The Final normalised BHP")
# now switch back to figure 1 and make some changes
| 37
| 88
| 0.555805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,617
| 0.506512
|
4df5b2217528684af4f56e2341cb113e5407f9fe
| 3,988
|
py
|
Python
|
libs/blocks/tests/test_variable_filter.py
|
dendisuhubdy/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 295
|
2015-09-25T21:15:04.000Z
|
2022-01-13T01:16:18.000Z
|
libs/blocks/tests/test_variable_filter.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 21
|
2015-10-28T19:06:32.000Z
|
2022-03-11T23:13:05.000Z
|
libs/blocks/tests/test_variable_filter.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 114
|
2015-09-26T21:23:02.000Z
|
2021-11-19T02:36:41.000Z
|
from nose.tools import raises
from blocks.bricks import Bias, Linear, Logistic
from blocks.bricks.parallel import Merge
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import BIAS, FILTER, PARAMETER, OUTPUT
from theano import tensor
def test_variable_filter():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
brick2 = Bias(2, name='bias1')
activation = Logistic(name='sigm')
x = tensor.vector()
h1 = brick1.apply(x)
h2 = activation.apply(h1)
h2.name = "h2act"
y = brick2.apply(h2)
cg = ComputationGraph(y)
parameters = [brick1.W, brick1.b, brick2.parameters[0]]
bias = [brick1.b, brick2.parameters[0]]
brick1_bias = [brick1.b]
# Testing filtering by role
role_filter = VariableFilter(roles=[PARAMETER])
assert parameters == role_filter(cg.variables)
role_filter = VariableFilter(roles=[FILTER])
assert [] == role_filter(cg.variables)
# Testing filtering by role using each_role flag
role_filter = VariableFilter(roles=[PARAMETER, BIAS])
assert parameters == role_filter(cg.variables)
role_filter = VariableFilter(roles=[PARAMETER, BIAS], each_role=True)
assert not parameters == role_filter(cg.variables)
assert bias == role_filter(cg.variables)
# Testing filtering by bricks classes
brick_filter = VariableFilter(roles=[BIAS], bricks=[Linear])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by bricks instances
brick_filter = VariableFilter(roles=[BIAS], bricks=[brick1])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by brick instance
brick_filter = VariableFilter(roles=[BIAS], bricks=[brick1])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by name
name_filter = VariableFilter(name='W_norm')
assert [cg.variables[2]] == name_filter(cg.variables)
# Testing filtering by name regex
name_filter_regex = VariableFilter(name_regex='W_no.?m')
assert [cg.variables[2]] == name_filter_regex(cg.variables)
# Testing filtering by theano name
theano_name_filter = VariableFilter(theano_name='h2act')
assert [cg.variables[11]] == theano_name_filter(cg.variables)
# Testing filtering by theano name regex
theano_name_filter_regex = VariableFilter(theano_name_regex='h2a.?t')
assert [cg.variables[11]] == theano_name_filter_regex(cg.variables)
# Testing filtering by application
appli_filter = VariableFilter(applications=[brick1.apply])
variables = [cg.variables[1], cg.variables[8]]
assert variables == appli_filter(cg.variables)
# Testing filtering by application
appli_filter_list = VariableFilter(applications=[brick1.apply])
assert variables == appli_filter_list(cg.variables)
input1 = tensor.matrix('input1')
input2 = tensor.matrix('input2')
merge = Merge(['input1', 'input2'], [5, 6], 2)
merged = merge.apply(input1, input2)
merge_cg = ComputationGraph(merged)
outputs = VariableFilter(
roles=[OUTPUT], bricks=[merge])(merge_cg.variables)
assert merged in outputs
assert len(outputs) == 3
outputs_application = VariableFilter(
roles=[OUTPUT], applications=[merge.apply])(merge_cg.variables)
assert outputs_application == [merged]
@raises(TypeError)
def test_variable_filter_roles_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
# testing role error
VariableFilter(roles=PARAMETER)(cg.variables)
@raises(TypeError)
def test_variable_filter_applications_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
VariableFilter(applications=brick1.apply)(cg.variables)
| 34.678261
| 73
| 0.719157
| 0
| 0
| 0
| 0
| 610
| 0.152959
| 0
| 0
| 605
| 0.151705
|
4df630aed0715b9f32b05663f7a43496c48ccb52
| 12,437
|
py
|
Python
|
techminer/gui/comparative_analysis.py
|
jdvelasq/techMiner
|
c611d96d2f812b0890513514d9d19787a1edfe2d
|
[
"MIT"
] | 2
|
2020-09-25T02:42:34.000Z
|
2021-08-22T11:27:58.000Z
|
techminer/gui/comparative_analysis.py
|
jdvelasq/techMiner
|
c611d96d2f812b0890513514d9d19787a1edfe2d
|
[
"MIT"
] | 1
|
2020-10-17T14:38:45.000Z
|
2020-10-17T14:50:19.000Z
|
techminer/gui/comparative_analysis.py
|
jdvelasq/techMiner
|
c611d96d2f812b0890513514d9d19787a1edfe2d
|
[
"MIT"
] | 2
|
2019-10-14T18:05:25.000Z
|
2021-07-17T19:28:04.000Z
|
from collections import Counter
import pandas as pd
import ipywidgets as widgets
import techminer.core.dashboard as dash
from techminer.core import (
CA,
Dashboard,
TF_matrix,
TFIDF_matrix,
add_counters_to_axis,
clustering,
corpus_filter,
exclude_terms,
)
# from techminer.core.params import EXCLUDE_COLS
from techminer.plots import counters_to_node_sizes, xy_clusters_plot
from techminer.core.filter_records import filter_records
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
##
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
#
# Filter for cluster members
#
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
self.column = None
self.min_occ = None
self.max_items = None
self.clustering_method = None
self.n_clusters = None
self.affinity = None
self.linkage = None
self.random_state = None
self.top_n = None
self.color_scheme = None
self.x_axis = None
self.y_axis = None
self.width = None
self.height = None
def apply(self):
##
## Comparative analysis
## from https://tlab.it/en/allegati/help_en_online/mcluster.htm
##
##
## Computes TF matrix for terms in min_occurrence
##
TF_matrix_ = TF_matrix(
data=self.data,
column=self.column,
scheme="binary",
min_occurrence=self.min_occ,
)
##
## Exclude Terms
##
TF_matrix_ = exclude_terms(data=TF_matrix_, axis=1)
##
## Adds counter to axies
##
TF_matrix_ = add_counters_to_axis(
X=TF_matrix_, axis=1, data=self.data, column=self.column
)
##
## Computtes TFIDF matrix and select max_term frequent terms
##
## tf-idf = tf * (log(N / df) + 1)
##
TFIDF_matrix_ = TFIDF_matrix(
TF_matrix=TF_matrix_,
norm=None,
use_idf=True,
smooth_idf=False,
sublinear_tf=False,
max_items=self.max_items,
)
##
## Correspondence Analysis
## 10 first factors for ploting
##
ca = CA()
ca.fit(TFIDF_matrix_)
self.eigenvalues_ = ca.eigenvalues_[0:10]
self.explained_variance_ = ca.explained_variance_[0:10]
z = ca.principal_coordinates_rows_
z = z[z.columns[:10]]
self.principal_coordinates_rows_ = z
z = ca.principal_coordinates_cols_
z = z[z.columns[:10]]
self.principal_coordinates_cols_ = z
self.TF_matrix_ = TF_matrix_
self.TFIDF_matrix_ = TFIDF_matrix_
def ca_plot_of_keywords(self):
self.apply()
##
## Selects the first n_factors to cluster
##
X = pd.DataFrame(
self.principal_coordinates_cols_,
columns=["Dim-{}".format(i) for i in range(10)],
index=self.TFIDF_matrix_.columns,
)
return xy_clusters_plot(
x=X["Dim-{}".format(self.x_axis)],
y=X["Dim-{}".format(self.y_axis)],
x_axis_at=0,
y_axis_at=0,
labels=self.TFIDF_matrix_.columns,
node_sizes=counters_to_node_sizes(self.TFIDF_matrix_.columns),
color_scheme=self.color_scheme,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def cluster_plot_of_keywords(self):
self.apply()
X = pd.DataFrame(
self.principal_coordinates_cols_,
columns=["Dim-{}".format(i) for i in range(10)],
index=self.TFIDF_matrix_.columns,
)
(
self.n_clusters,
self.labels_,
self.cluster_members_,
self.cluster_centers_,
self.cluster_names_,
) = clustering(
X=X,
method=self.clustering_method,
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage,
random_state=self.random_state,
top_n=self.top_n,
name_prefix="Cluster {}",
)
##
## Cluster filters
##
self.generate_cluster_filters(
terms=X.index,
labels=self.labels_,
)
y = self.cluster_members_.copy()
y = y.applymap(lambda w: pd.NA if w == "" else w)
node_sizes = [500 + 2500 * len(y[m].dropna()) for m in y.columns]
return xy_clusters_plot(
x=self.cluster_centers_["Dim-{}".format(self.x_axis)],
y=self.cluster_centers_["Dim-{}".format(self.y_axis)],
x_axis_at=0,
y_axis_at=0,
labels=["CLUST_{} xxx".format(i) for i in range(self.n_clusters)],
node_sizes=node_sizes,
color_scheme=self.color_scheme,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def cluster_plot_of_documents(self):
self.apply()
X = pd.DataFrame(
self.principal_coordinates_rows_,
columns=["Dim-{}".format(i) for i in range(10)],
index=[
"{} {}".format(i, i)
for i in range(len(self.principal_coordinates_rows_))
],
)
(
self.n_clusters,
self.labels_,
self.cluster_members_,
self.cluster_centers_,
self.cluster_names_,
) = clustering(
X=X,
method=self.clustering_method,
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage,
random_state=self.random_state,
top_n=self.top_n,
name_prefix="Cluster {}",
)
##
## Cluster filters
##
self.generate_cluster_filters(
terms=X.index,
labels=self.labels_,
)
y = self.cluster_members_.copy()
y = y.applymap(lambda w: pd.NA if w == "" else w)
node_sizes = [500 + 2500 * len(y[m].dropna()) for m in y.columns]
return xy_clusters_plot(
x=self.cluster_centers_["Dim-{}".format(self.x_axis)],
y=self.cluster_centers_["Dim-{}".format(self.y_axis)],
x_axis_at=0,
y_axis_at=0,
labels=["CLUST_{} xxx".format(i) for i in range(self.n_clusters)],
node_sizes=node_sizes,
color_scheme=self.color_scheme,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
###############################################################################
##
## DASHBOARD
##
###############################################################################
COLUMNS = [
"Author_Keywords_CL",
"Author_Keywords",
"Index_Keywords_CL",
"Index_Keywords",
"Keywords_CL",
]
class App(Dashboard, Model):
def __init__(
self,
limit_to=None,
exclude=None,
years_range=None,
):
data = filter_records(pd.read_csv("corpus.csv"))
Model.__init__(
self,
data=data,
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
)
# COLUMNS = sorted(
# [column for column in sorted(data.columns) if column not in EXCLUDE_COLS]
# )
self.command_panel = [
dash.HTML("Display:", hr=False, margin="0px, 0px, 0px, 5px"),
dash.Dropdown(
options=[
"CA plot of keywords",
"Cluster plot of keywords",
"Cluster plot of documents",
],
),
dash.HTML("Parameters:"),
dash.Dropdown(
description="Column:",
options=sorted(data.columns),
),
dash.min_occurrence(),
dash.max_items(),
dash.HTML("Clustering:"),
dash.clustering_method(),
dash.n_clusters(m=3, n=50, i=1),
dash.affinity(),
dash.linkage(),
dash.random_state(),
dash.HTML("Visualization:"),
dash.top_n(m=10, n=51, i=5),
dash.color_scheme(),
dash.x_axis(),
dash.y_axis(),
dash.fig_width(),
dash.fig_height(),
]
#
# interactive output function
#
widgets.interactive_output(
f=self.interactive_output,
controls={
# Display:
"menu": self.command_panel[1],
# Parameters:
"column": self.command_panel[3],
"min_occ": self.command_panel[4],
"max_items": self.command_panel[5],
# Clustering
"clustering_method": self.command_panel[7],
"n_clusters": self.command_panel[8],
"affinity": self.command_panel[9],
"linkage": self.command_panel[10],
"random_state": self.command_panel[11],
# Visualization
"top_n": self.command_panel[13],
"colors": self.command_panel[14],
"x_axis": self.command_panel[15],
"y_axis": self.command_panel[16],
"width": self.command_panel[17],
"height": self.command_panel[18],
},
)
Dashboard.__init__(self)
def interactive_output(self, **kwargs):
Dashboard.interactive_output(self, **kwargs)
def visualization_disabled():
self.set_disabled("Color Scheme:")
self.set_disabled("X-axis:")
self.set_disabled("Y-axis:")
self.set_disabled("Width:")
self.set_disabled("Height:")
def visualization_enabled():
self.set_enabled("Color Scheme:")
self.set_enabled("X-axis:")
self.set_enabled("Y-axis:")
self.set_enabled("Width:")
self.set_enabled("Height:")
def clustering_disabled():
self.set_disabled("N Factors:")
self.set_disabled("Clustering Method:")
self.set_disabled("N Clusters:")
self.set_disabled("Affinity:")
self.set_disabled("Linkage:")
self.set_disabled("Random State:")
def clustering_enabled():
self.set_enabled("N Factors:")
self.set_enabled("Clustering Method:")
self.set_enabled("N Clusters:")
self.set_enabled("Affinity:")
self.set_enabled("Linkage:")
self.set_enabled("Random State:")
self.enable_disable_clustering_options(include_random_state=True)
if self.menu == "Correspondence analysis plot":
clustering_disabled()
visualization_enabled()
if self.menu == "Cluster members":
clustering_enabled()
visualization_disabled()
if self.menu == "Cluster plot":
clustering_enabled()
visualization_enabled()
###############################################################################
##
## EXTERNAL INTERFACE
##
###############################################################################
def comparative_analysis(
limit_to=None,
exclude=None,
years_range=None,
):
return App(
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
).run()
| 28.265909
| 88
| 0.514674
| 11,083
| 0.890845
| 0
| 0
| 0
| 0
| 0
| 0
| 2,169
| 0.174343
|
4df876adfaa448099ddfc3311827d0272a1fac44
| 56,425
|
py
|
Python
|
WayOfTheTurtle1.0.py
|
BYHu-2/-
|
3243d3a0ccd9144573943b00ac4364dc5c320207
|
[
"MIT"
] | 2
|
2021-12-25T00:04:12.000Z
|
2021-12-25T00:14:35.000Z
|
WayOfTheTurtle1.0.py
|
BYHu-2/Turtle
|
3243d3a0ccd9144573943b00ac4364dc5c320207
|
[
"MIT"
] | null | null | null |
WayOfTheTurtle1.0.py
|
BYHu-2/Turtle
|
3243d3a0ccd9144573943b00ac4364dc5c320207
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import qtawesome
import matplotlib.pyplot as plt
import csv
import numpy as np
import datetime
import os
class Stack:
def __init__(self):
self.items=[]
def isEmpty(self):
return self.items==[]
def push(self,item):
self.items.append(item)
def peek(self):
return self.items[len(self.items)-1]
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
class MainUI(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
self.advice=[]
self.stack=Stack()
self.isLeftPressDown = False
self.dragPosition = 0
self.Numbers = self.enum(UP=0, DOWN=1, LEFT=2, RIGHT=3, LEFTTOP=4, LEFTBOTTOM=5, RIGHTBOTTOM=6, RIGHTTOP=7,NONE=8)
self.dir = self.Numbers.NONE
self.setMouseTracking(True)
def enum(self, **enums):
return type('Enum', (), enums)
def mouseReleaseEvent(self, event):
if (event.button() == Qt.LeftButton):
self.isLeftPressDown = False
if (self.dir != self.Numbers.NONE):
self.releaseMouse()
def mousePressEvent(self, event):
if (event.button() == Qt.LeftButton):
self.isLeftPressDown = True
if (self.dir != self.Numbers.NONE):
self.mouseGrabber()
else:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
def mouseMoveEvent(self, event):
gloPoint = event.globalPos()
rect = self.rect()
tl = self.mapToGlobal(rect.topLeft())
rb = self.mapToGlobal(rect.bottomRight())
if (not self.isLeftPressDown):
self.region(gloPoint)
else:
if (self.dir != self.Numbers.NONE):
rmove = QRect(tl, rb)
if (self.dir == self.Numbers.LEFT):
if (rb.x() - gloPoint.x() <= self.minimumWidth()):
rmove.setX(tl.x())
else:
rmove.setX(gloPoint.x())
elif (self.dir == self.Numbers.RIGHT):
rmove.setWidth(gloPoint.x() - tl.x())
elif (self.dir == self.Numbers.UP):
if (rb.y() - gloPoint.y() <= self.minimumHeight()):
rmove.setY(tl.y())
else:
rmove.setY(gloPoint.y())
elif (self.dir == self.Numbers.DOWN):
rmove.setHeight(gloPoint.y() - tl.y())
elif (self.dir == self.Numbers.LEFTTOP):
if (rb.x() - gloPoint.x() <= self.minimumWidth()):
rmove.setX(tl.x())
else:
rmove.setX(gloPoint.x())
if (rb.y() - gloPoint.y() <= self.minimumHeight()):
rmove.setY(tl.y())
else:
rmove.setY(gloPoint.y())
elif (self.dir == self.Numbers.RIGHTTOP):
rmove.setWidth(gloPoint.x() - tl.x())
rmove.setY(gloPoint.y())
elif (self.dir == self.Numbers.LEFTBOTTOM):
rmove.setX(gloPoint.x())
rmove.setHeight(gloPoint.y() - tl.y())
elif (self.dir == self.Numbers.RIGHTBOTTOM):
rmove.setWidth(gloPoint.x() - tl.x())
rmove.setHeight(gloPoint.y() - tl.y())
else:
pass
self.setGeometry(rmove)
else:
self.move(event.globalPos() - self.dragPosition)
event.accept()
def initUI(self):
self.setFixedSize(1200,900)
self.main_widget = QWidget()
self.main_layout = QGridLayout()
self.main_widget.setLayout(self.main_layout)
self.left_widget = QWidget()
self.left_widget.setObjectName('left_widget')
self.left_layout = QGridLayout()
self.left_widget.setLayout(self.left_layout)
self.right_widget = QWidget()
self.right_widget.setObjectName('right_widget')
self.right_layout = QGridLayout()
self.right_widget.setLayout(self.right_layout)
self.main_layout.addWidget(self.left_widget,0,0,16,2)
self.main_layout.addWidget(self.right_widget,0,2,16,9)
self.setCentralWidget(self.main_widget)
self.left_label_1 = QPushButton("参数设置")
self.left_label_1.setObjectName('left_label')
self.left_label_1.setEnabled(False)
self.left_label_2 = QPushButton("图像显示")
self.left_label_2.setObjectName('left_label')
self.left_label_2.setEnabled(False)
self.left_label_3 = QPushButton("帮助")
self.left_label_3.setObjectName('left_label')
self.left_label_3.setEnabled(False)
self.left_button_1 = QPushButton(qtawesome.icon('fa.rmb', color='white'), "设置期初资金")
self.left_button_1.setObjectName('left_button')
self.left_button_1.clicked.connect(self.buttonDialog1)
self.left_button_2 = QPushButton(qtawesome.icon('fa.hourglass-start', color='white'), "设置交易开始时间")
self.left_button_2.setObjectName('left_button')
self.left_button_2.clicked.connect(self.buttonDialog2)
self.left_button_3 = QPushButton(qtawesome.icon('fa.hourglass-end', color='white'), "设置交易结束时间")
self.left_button_3.setObjectName('left_button')
self.left_button_3.clicked.connect(self.buttonDialog3)
self.left_button_4 = QPushButton(qtawesome.icon('fa.line-chart', color='white'), "修改唐奇安通道")
self.left_button_4.setObjectName('left_button')
self.left_button_4.clicked.connect(self.buttonDialog4)
self.left_button_5 = QPushButton(qtawesome.icon('fa.check-circle-o', color='white'), "修改ATR")
self.left_button_5.setObjectName('left_button')
self.left_button_5.clicked.connect(self.buttonDialog5)
self.left_button_6 = QPushButton(qtawesome.icon('fa.pie-chart', color='white'), "修改手续费")
self.left_button_6.setObjectName('left_button')
self.left_button_6.clicked.connect(self.buttonDialog6)
self.left_button_7 = QPushButton(qtawesome.icon('fa.sort-amount-asc', color='white'), "修改投资系数")
self.left_button_7.setObjectName('left_button')
self.left_button_7.clicked.connect(self.buttonDialog7)
self.left_checkbox_1 = QCheckBox('策略收益')
self.left_checkbox_1.setChecked(True)
self.left_checkbox_2 = QCheckBox('沪深300')
self.left_checkbox_2.setChecked(True)
self.left_checkbox_3 = QCheckBox('仓位图')
self.left_checkbox_3.setChecked(True)
self.left_button_8 = QPushButton(qtawesome.icon('fa.question', color='white'), "专业名词含义查询")
self.left_button_8.setObjectName('left_button')
self.left_button_8.clicked.connect(self.buttonDialog8)
self.left_button_9 = QPushButton(qtawesome.icon('fa.comment', color='white'), "反馈建议")
self.left_button_9.setObjectName('left_button')
self.left_button_9.clicked.connect(self.buttonDialog9)
self.left_button_10 = QPushButton(qtawesome.icon('fa.envelope', color='white'), "联系我们")
self.left_button_10.setObjectName('left_button')
self.left_button_10.clicked.connect(self.buttonDialog10)
self.left_layout.addWidget(self.left_label_1, 0, 0, 1, 3)
self.left_layout.addWidget(self.left_button_1, 1, 0, 1, 3)
self.left_layout.addWidget(self.left_button_2, 2, 0, 1, 3)
self.left_layout.addWidget(self.left_button_3, 3, 0, 1, 3)
self.left_layout.addWidget(self.left_button_4, 4, 0, 1, 3)
self.left_layout.addWidget(self.left_button_5, 5, 0, 1, 3)
self.left_layout.addWidget(self.left_button_6, 6, 0, 1, 3)
self.left_layout.addWidget(self.left_button_7, 7, 0, 1, 3)
self.left_layout.addWidget(self.left_label_2, 8, 0, 1, 3)
self.left_layout.addWidget(self.left_checkbox_1, 9, 0, 1, 3)
self.left_layout.addWidget(self.left_checkbox_2, 10, 0, 1, 3)
self.left_layout.addWidget(self.left_checkbox_3, 11, 0, 1, 3)
self.left_layout.addWidget(self.left_label_3, 12, 0, 1, 3)
self.left_layout.addWidget(self.left_button_8, 13, 0, 1, 3)
self.left_layout.addWidget(self.left_button_9, 14, 0, 1, 3)
self.left_layout.addWidget(self.left_button_10, 15, 0, 1, 3)
self.left_checkbox_1.setStyleSheet("QCheckBox{color:rgb(255,250,250)}")
self.left_checkbox_2.setStyleSheet("QCheckBox{color:rgb(255,250,250)}")
self.left_checkbox_3.setStyleSheet("QCheckBox{color:rgb(255,250,250)}")
self.left_widget.setStyleSheet('''
QCheckBox{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
font-size:16px}
QPushButton{border:none;
color:white;
text-align: left;
font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
font-size:16px}
QPushButton#left_label{
border:none;
border-bottom:1px solid white;
font-size:20px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
}
QPushButton#left_button:hover{border-left:4px solid blue;font-weight:700;}
QWidget#left_widget{
background:gray;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
}
''')
self.right_label_0 =QLabel('')
self.right_label_1 = QLabel('期初资金')
self.right_label_1.setAlignment(Qt.AlignCenter)
self.right_label_1.setFont(QFont('KaiTi',12))
self.right_label_2 = QLabel('总资产')
self.right_label_2.setAlignment(Qt.AlignCenter)
self.right_label_2.setFont(QFont('KaiTi', 12))
self.right_label_3 = QLabel('累计盈亏')
self.right_label_3.setAlignment(Qt.AlignCenter)
self.right_label_3.setFont(QFont('KaiTi', 12))
self.right_label_4 = QLabel('可交易天数')
self.right_label_4.setAlignment(Qt.AlignCenter)
self.right_label_4.setFont(QFont('KaiTi', 12))
self.right_label_5 = QLabel('基准收益率')
self.right_label_5.setAlignment(Qt.AlignCenter)
self.right_label_5.setFont(QFont('KaiTi', 12))
self.right_label_6 = QLabel('年化收益率')
self.right_label_6.setAlignment(Qt.AlignCenter)
self.right_label_6.setFont(QFont('KaiTi', 12))
self.right_label_7 = QLabel('开始时间')
self.right_label_7.setAlignment(Qt.AlignCenter)
self.right_label_7.setFont(QFont('KaiTi', 12))
self.right_label_8 = QLabel('结束时间')
self.right_label_8.setAlignment(Qt.AlignCenter)
self.right_label_8.setFont(QFont('KaiTi', 12))
self.right_label_9 = QLabel('胜率')
self.right_label_9.setAlignment(Qt.AlignCenter)
self.right_label_9.setFont(QFont('KaiTi', 12))
self.right_layout.addWidget(self.right_label_0, 0, 3, 1, 3)
self.right_layout.addWidget(self.right_label_1, 1, 3, 1, 1)
self.right_layout.addWidget(self.right_label_2, 1, 4, 1, 1)
self.right_layout.addWidget(self.right_label_3, 1, 5, 1, 1)
self.right_layout.addWidget(self.right_label_4, 1, 6, 1, 1)
self.right_layout.addWidget(self.right_label_5, 1, 7, 1, 1)
self.right_layout.addWidget(self.right_label_6, 1, 8, 1, 1)
self.right_layout.addWidget(self.right_label_7, 1, 9, 1, 1)
self.right_layout.addWidget(self.right_label_8, 1, 10, 1, 1)
self.right_layout.addWidget(self.right_label_9, 1, 11, 1, 1)
self.right_lineEdit_1 = QLineEdit()
self.right_lineEdit_1.setReadOnly(True)
self.right_lineEdit_1.setText('')
self.right_lineEdit_2 = QLineEdit()
self.right_lineEdit_2.setReadOnly(True)
self.right_lineEdit_2.setText('')
self.right_lineEdit_3 = QLineEdit()
self.right_lineEdit_3.setReadOnly(True)
self.right_lineEdit_3.setText('')
self.right_lineEdit_4 = QLineEdit()
self.right_lineEdit_4.setReadOnly(True)
self.right_lineEdit_4.setText('')
self.right_lineEdit_5 = QLineEdit()
self.right_lineEdit_5.setReadOnly(True)
self.right_lineEdit_5.setText('')
self.right_lineEdit_6 = QLineEdit()
self.right_lineEdit_6.setReadOnly(True)
self.right_lineEdit_6.setText('')
self.right_lineEdit_7 = QLineEdit()
self.right_lineEdit_7.setReadOnly(True)
self.right_lineEdit_7.setText('')
self.right_lineEdit_8 = QLineEdit()
self.right_lineEdit_8.setReadOnly(True)
self.right_lineEdit_8.setText('')
self.right_lineEdit_9 = QLineEdit()
self.right_lineEdit_9.setReadOnly(True)
self.right_lineEdit_9.setText('')
self.right_layout.addWidget(self.right_lineEdit_1, 2, 3, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_2, 2, 4, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_3, 2, 5, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_4, 2, 6, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_5, 2, 7, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_6, 2, 8, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_7, 2, 9, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_8, 2, 10, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_9, 2, 11, 1, 1)
self.right_figure_1 = QLabel()
self.figure_1 = QPixmap("猫咪老师4.png")
self.right_figure_1.setPixmap(self.figure_1)
self.right_figure_1.setScaledContents(True)
self.right_figure_2 = QLabel()
self.figure_2 = QPixmap("喵.png")
self.right_figure_2.setPixmap(self.figure_2)
self.right_figure_2.setScaledContents(True)
self.right_layout.addWidget(self.right_figure_1, 3, 3, 7, 9)
self.right_layout.addWidget(self.right_figure_2, 10, 3, 5, 9)
self.right_button_1 = QPushButton(qtawesome.icon('fa.repeat', color='blue'), "测试/重测")
self.right_button_1.clicked.connect(self.start)
self.right_button_1.clicked.connect(self.tryOrRepeat1)
self.right_button_1.clicked.connect(self.tryOrRepeat2)
self.right_button_2 = QPushButton(qtawesome.icon('fa.floppy-o', color='gray'), "删除当前结果")
self.right_button_2.clicked.connect(self.figuredelete)
self.right_button_3 = QPushButton(qtawesome.icon('fa.times', color='red'), "退出")
self.right_button_3.clicked.connect(self.quitApplicaton)
self.right_layout.addWidget(self.right_button_1, 16, 3, 1, 3)
self.right_layout.addWidget(self.right_button_2, 16, 6, 1, 3)
self.right_layout.addWidget(self.right_button_3, 16, 9, 1, 3)
self.right_widget.setStyleSheet('''
QWidget#right_widget{
color:#232C51;
background:white;
border-top:1px solid darkGray;
border-bottom:1px solid darkGray;
border-right:1px solid darkGray;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
QLabel{
border:None;
font-weight:700;
font-size=25px;
font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
}
QLineEdit{
font:bold;
border:1px solid gray;
width:300px;
padding:2px 4px;
background-color:rgb(255,250,250);
selection-color:white;
}
QPushButton{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
font-size:16px}
''')
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.FramelessWindowHint)
self.main_layout.setSpacing(0)
def buttonDialog1(self):
self.dialog1 = QDialog()
self.dialog1.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog1.resize(250,100)
self.dialog1.setWindowTitle('设置期初资金')
formLayout = QFormLayout()
label = QLabel('请输入您的期初资金(整数万元)')
self.edit1 = QLineEdit()
self.edit1.setValidator(QIntValidator())
self.edit1.setAlignment(Qt.AlignRight)
self.edit1.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk1)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel1)
formLayout.addRow(label)
formLayout.addRow(self.edit1)
formLayout.addRow(button_ok, button_cancel)
self.dialog1.setLayout(formLayout)
self.dialog1.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog1.setWindowModality(Qt.ApplicationModal)
self.dialog1.exec_()
def okk1(self):
if self.edit1.text() != '':
global initial_cash
global cash
initial_cash=eval(self.edit1.text())*10000
self.dialog1.close()
def cancel1(self):
self.edit1.setText('')
def buttonDialog2(self):
self.dialog2 = QDialog()
self.dialog2.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog2.resize(280,100)
self.dialog2.setWindowTitle('设置交易开始时间')
formLayout = QFormLayout()
label1 = QLabel('请输入您的交易开始时间')
label2 = QLabel('时间格式示例:2011-03-01')
label3 = QLabel('时间范围为2011-03-01至2021-04-01')
self.edit2 = QLineEdit()
self.edit2.setAlignment(Qt.AlignRight)
self.edit2.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk2)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel2)
formLayout.addRow(label1)
formLayout.addRow(label2)
formLayout.addRow(label3)
formLayout.addRow(self.edit2)
formLayout.addRow(button_ok, button_cancel)
self.dialog2.setLayout(formLayout)
self.dialog2.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog2.setWindowModality(Qt.ApplicationModal)
self.dialog2.exec_()
def okk2(self):
if self.edit2.text()!='':
global start_time
start_time=self.edit2.text()
start_time = nearestdate(start_time, 1)
self.dialog2.close()
def cancel2(self):
self.edit2.setText('')
def buttonDialog3(self):
self.dialog3 = QDialog()
self.dialog3.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog3.resize(280,100)
self.dialog3.setWindowTitle('设置交易结束时间')
formLayout = QFormLayout()
label1 = QLabel('请输入您的交易结束时间')
label2 = QLabel('时间格式示例:2021-04-01')
label3 = QLabel('时间范围为2011-03-01至2021-04-01')
self.edit3 = QLineEdit()
self.edit3.setAlignment(Qt.AlignRight)
self.edit3.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk3)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel3)
formLayout.addRow(label1)
formLayout.addRow(label2)
formLayout.addRow(label3)
formLayout.addRow(self.edit3)
formLayout.addRow(button_ok, button_cancel)
self.dialog3.setLayout(formLayout)
self.dialog3.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog3.setWindowModality(Qt.ApplicationModal)
self.dialog3.exec_()
def okk3(self):
if self.edit3.text()!='':
global end_time
end_time=self.edit3.text()
end_time = nearestdate(end_time, -1)
self.dialog3.close()
def cancel3(self):
self.edit3.setText('')
def buttonDialog4(self):
self.dialog4 = QDialog()
self.dialog4.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog4.resize(280,100)
self.dialog4.setWindowTitle('修改唐奇安通道')
formLayout = QFormLayout()
label = QLabel('唐奇安通道修改为(5~50):')
self.edit4 = QLineEdit('20')
self.edit4.setReadOnly(True)
self.edit4.setAlignment(Qt.AlignRight)
self.edit4.setFont(QFont('Arial', 10))
self.slider1 = QSlider(Qt.Horizontal)
self.slider1.setMinimum(5)
self.slider1.setMaximum(50)
self.slider1.setSingleStep(1)
self.slider1.setValue(20)
self.slider1.setTickPosition(QSlider.TicksBelow)
self.slider1.setTickInterval(1)
self.slider1.valueChanged.connect(self.valueChange1)
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk4)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel4)
formLayout.addRow(label)
formLayout.addRow(self.edit4)
formLayout.addRow(self.slider1)
formLayout.addRow(button_ok, button_cancel)
self.dialog4.setLayout(formLayout)
self.dialog4.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog4.setWindowModality(Qt.ApplicationModal)
self.dialog4.exec_()
def okk4(self):
global Dontime
Dontime=int(self.edit4.text())
self.dialog4.close()
def cancel4(self):
self.slider1.setValue(20)
def valueChange1(self):
self.edit4.setText('%d'%self.slider1.value())
def buttonDialog5(self):
self.dialog5 = QDialog()
self.dialog5.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog5.resize(250,100)
self.dialog5.setWindowTitle('修改ATR')
formLayout = QFormLayout()
label = QLabel('ATR修改为(5~50):')
self.edit5 = QLineEdit('20')
self.edit5.setReadOnly(True)
self.edit5.setAlignment(Qt.AlignRight)
self.edit5.setFont(QFont('Arial', 10))
self.slider2 = QSlider(Qt.Horizontal)
self.slider2.setMinimum(5)
self.slider2.setMaximum(50)
self.slider2.setSingleStep(1)
self.slider2.setValue(20)
self.slider2.setTickPosition(QSlider.TicksBelow)
self.slider2.setTickInterval(1)
self.slider2.valueChanged.connect(self.valueChange2)
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk5)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel5)
formLayout.addRow(label)
formLayout.addRow(self.edit5)
formLayout.addRow(self.slider2)
formLayout.addRow(button_ok, button_cancel)
self.dialog5.setLayout(formLayout)
self.dialog5.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog5.setWindowModality(Qt.ApplicationModal)
self.dialog5.exec_()
def okk5(self):
global atrtime
atrtime=int(self.edit5.text())
self.dialog5.close()
def cancel5(self):
self.slider2.setValue(20)
def valueChange2(self):
self.edit5.setText('%d'%self.slider2.value())
def buttonDialog6(self):
self.dialog6 = QDialog()
self.dialog6.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog6.resize(280,100)
self.dialog6.setWindowTitle('修改手续费')
formLayout = QFormLayout()
label = QLabel('修改手续费为(单位:万分之一):')
self.edit6 = QLineEdit('1')
self.edit6.setValidator(QIntValidator())
self.edit6.setAlignment(Qt.AlignRight)
self.edit6.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk6)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel6)
formLayout.addRow(label)
formLayout.addRow(self.edit6)
formLayout.addRow(button_ok, button_cancel)
self.dialog6.setLayout(formLayout)
self.dialog6.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog6.setWindowModality(Qt.ApplicationModal)
self.dialog6.exec_()
def okk6(self):
if self.edit6.text() != '':
global backtest_commission_ratio
backtest_commission_ratio=eval(self.edit6.text())/10000
self.dialog6.close()
def cancel6(self):
self.edit6.setText('1')
def buttonDialog7(self):
self.dialog7 = QDialog()
self.dialog7.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog7.resize(280,100)
self.dialog7.setWindowTitle('修改投资系数')
formLayout = QFormLayout()
label = QLabel('修改投资系数为(单位:百分之一):')
self.edit7 = QLineEdit('1')
self.edit7.setAlignment(Qt.AlignRight)
self.edit7.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk7)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel7)
formLayout.addRow(label)
formLayout.addRow(self.edit7)
formLayout.addRow(button_ok, button_cancel)
self.dialog7.setLayout(formLayout)
self.dialog7.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog7.setWindowModality(Qt.ApplicationModal)
self.dialog7.exec_()
def okk7(self):
if self.edit7.text() != '':
global unit_rate
unit_rate=eval(self.edit7.text())/100
self.dialog7.close()
def cancel7(self):
self.edit7.setText('1')
def buttonDialog8(self):
self.dialog8 = QDialog()
self.dialog8.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog8.resize(280,100)
self.dialog8.setWindowTitle('专业名词含义查询')
layout=QVBoxLayout()
self.label = QLabel('请选择专业名词:')
self.cb = QComboBox()
self.cb.addItems(['唐奇安通道', 'ATR', '投资系数', '基准收益率','年化收益率'])
self.cb.currentIndexChanged.connect(self.selectionChange)
layout.addWidget(self.label)
layout.addWidget(self.cb)
self.dialog8.setLayout(layout)
self.dialog8.setStyleSheet('''
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QComboBox{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog8.setWindowModality(Qt.ApplicationModal)
self.dialog8.exec_()
def selectionChange(self,i):
dict0={'唐奇安通道':"唐奇安通道主要是一个突破型趋势跟踪指标,可以提供两种不同的突破信号", 'ATR':"ATR是日内指数最大波动的平均振幅,由当日最高、最低价和上一交易日的收盘价决定", '投资系数':"每一次开仓交易合约数unit的确定是将总资产的投资系数除以价值波动量得到", '基准收益率':"默认沪深300指数收益",'年化收益率':"年化收益率是指投资期限为一年的收益率"}
self.label.setText('%s'%dict0[self.cb.currentText()])
def buttonDialog9(self):
self.dialog9 = QDialog()
self.dialog9.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog9.resize(250,100)
self.dialog9.setWindowTitle('反馈建议')
formlayout=QFormLayout()
label = QLabel('您的反馈与建议是:')
self.edit9 = QTextEdit('')
self.edit9.setAlignment(Qt.AlignLeft)
self.edit9.setFont(QFont('KaiTi', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk9)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel9)
formlayout.addRow(label)
formlayout.addRow(self.edit9)
formlayout.addRow(button_ok,button_cancel)
self.dialog9.setLayout(formlayout)
self.dialog9.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog9.setWindowModality(Qt.ApplicationModal)
self.dialog9.exec_()
def okk9(self):
QMessageBox.about(self,'感谢','感谢您的反馈与建议!基于您的反馈与建议,我们会努力做得更好!')
self.dialog9.close()
def cancel9(self):
self.edit9.setText('')
def buttonDialog10(self):
self.dialog10 = QDialog()
self.dialog10.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog10.resize(250,150)
self.dialog10.setWindowTitle('联系我们')
layout=QVBoxLayout()
label1 = QLabel('欢迎您来信联系我们!')
label2 = QLabel('我们的邮箱是:')
label5 = QLabel('hl1127591548@stu.pku.edu.cn')
label6 = QLabel('byhu2018@pku.edu.cn')
label7 = QLabel('stevenhu@stu.pku.edu.cn')
label3 = QLabel('')
label3.setOpenExternalLinks(True)
label3.setText("<A href='https://mail.163.com/'>网易邮箱</a>")
label3.setAlignment(Qt.AlignCenter)
label3.setToolTip('点击进入网易邮箱主页')
label4 = QLabel('')
label4.setOpenExternalLinks(True)
label4.setText("<A href='https://mail.qq.com/'>QQ邮箱</a>")
label4.setAlignment(Qt.AlignCenter)
label4.setToolTip('点击进入QQ邮箱主页')
layout.addWidget(label1)
layout.addWidget(label2)
layout.addWidget(label5)
layout.addWidget(label6)
layout.addWidget(label7)
layout.addWidget(label3)
layout.addWidget(label4)
self.dialog10.setLayout(layout)
self.dialog10.setStyleSheet('''
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog10.setWindowModality(Qt.ApplicationModal)
self.dialog10.exec_()
def tryOrRepeat1(self):
if self.left_checkbox_1.isChecked() or self.left_checkbox_2.isChecked():
plt.figure()
plt.title('Asset-Time')
if self.left_checkbox_1.isChecked():
plt.plot(xs, l_asset, linestyle='-', color='firebrick', linewidth=1.5, label='Asset')
if self.left_checkbox_2.isChecked():
plt.plot(xs, l_index, linestyle='-', color='royalblue', linewidth=1, label='Index')
plt.plot(xs, l_initial, linestyle='--', color='black', label='Initial')
plt.xlabel('Time')
plt.ylabel('Asset')
plt.gcf().autofmt_xdate()
plt.legend()
plt.rcParams['figure.figsize'] = (9.0, 4.0)
theTime1=datetime.datetime.now()
figure_1_name='figure_1'+str(theTime1)+'.png'
figure_1_name = ''.join(figure_1_name.split(':'))
self.stack.push(figure_1_name)
plt.savefig(figure_1_name,dpi=300,bbox_inches='tight')
plt.close()
self.figure_1=QPixmap(figure_1_name)
self.right_figure_1.setPixmap(self.figure_1)
else:
self.figure_1 = QPixmap("猫咪老师4.png")
self.right_figure_1.setPixmap(self.figure_1)
def tryOrRepeat2(self):
if self.left_checkbox_3.isChecked():
plt.figure()
plt.title('Long/Short-Time')
long_tem = []
short_tem = []
initial_bar = []
for i in range(len(position_long)-1):
long_tem.append(position_long[i][1])
short_tem.append(-position_short[i][1])
initial_bar.append(0)
plt.bar(xs, long_tem,linestyle='-', color='firebrick', linewidth=1, label='long')
plt.bar(xs, short_tem,linestyle='-', color='royalblue', linewidth=1, label='short')
plt.plot(xs, initial_bar, linestyle='--', color='black', label='Initial')
plt.xlabel('Time')
plt.ylabel('')
plt.gcf().autofmt_xdate()
plt.legend()
plt.rcParams['figure.figsize'] = (9.0, 4.0)
theTime2 = datetime.datetime.now()
figure_2_name = 'figure_2' + str(theTime2) + '.png'
figure_2_name = ''.join(figure_2_name.split(':'))
self.stack.push(figure_2_name)
plt.savefig(figure_2_name, dpi=300, bbox_inches='tight')
plt.close()
self.figure_2 = QPixmap(figure_2_name)
self.right_figure_2.setPixmap(self.figure_2)
else:
self.figure_2 = QPixmap("喵.png")
self.right_figure_2.setPixmap(self.figure_2)
def quitApplicaton(self):
app = MainUI.instance()
app.quit()
def figuredelete(self):
figure_1_delete=self.stack.pop()
figure_2_delete = self.stack.pop()
os.remove(figure_1_delete)
os.remove(figure_2_delete)
self.right_button_2.setEnabled(False)
def start(self):
global time
global date
global winningRate
global baseline
global annualized_rate
global xs
global l_initial
global position_long
global position_short
global l_time
global l_asset
global l_index
self.right_button_2.setEnabled(True)
position_long = []
position_short = []
for n in range(finddatepos(start_time), finddatepos(end_time) + 1):
position_long.append([result[n][0], 0])
position_short.append([result[n][0], 0])
cash.append([result[n][0], 0])
cash[0][1] = initial_cash
start_date_position = finddatepos(start_time)
end_date_position = finddatepos(end_time)
for d in range(start_date_position + 1, end_date_position + 1):
on_bar(result[d][0], atrtime)
in_bar(result[d][0], atrtime)
l_time = []
l_asset = []
l_index = []
time = 0
for d in range(start_date_position + 1, end_date_position + 1):
time += 1
l_time.append(result[d][0])
l_asset.append(current_asset(result[d][0]))
l_index.append(result[d][4] * initial_cash / result[start_date_position + 1][4])
if position_short[time][1] != position_short[time - 1][1] or position_long[time][1] != \
position_long[time - 1][1]:
date += 1
if current_asset(result[d][0]) >= current_asset(result[d - 1][0]):
winningRate += 1
winningRate /= date
baseline = (l_index[-1] / l_index[0]) - 1
d1 = datetime.datetime(int(start_time.split('-')[0]), int(start_time.split('-')[1]),
int(start_time.split('-')[2]))
d2 = datetime.datetime(int(end_time.split('-')[0]), int(end_time.split('-')[1]), int(end_time.split('-')[2]))
interval = d2 - d1
annualized_rate = ((current_asset(end_time) / current_asset(start_time)) - 1) * 365 / interval.days
xs =[]
xs = [datetime.datetime.strptime(d, '%Y-%m-%d').date() for d in l_time]
l_initial = []
l_initial = [initial_cash] * (end_date_position - start_date_position)
self.right_lineEdit_1.setText('%d' % int(initial_cash))
self.right_lineEdit_2.setText('%d' % int(current_asset(end_time)))
self.right_lineEdit_3.setText('%d' % int(current_asset(end_time)-initial_cash))
self.right_lineEdit_4.setText('%d' % date)
baseline0 = baseline * 100
self.right_lineEdit_5.setText('%.2f' % baseline0 + '%')
annualized_rate0 = annualized_rate * 100
self.right_lineEdit_6.setText('%.2f' % annualized_rate0 + '%')
self.right_lineEdit_7.setText('%s' % start_time)
self.right_lineEdit_8.setText('%s' % end_time)
winningRate0 = winningRate * 100
self.right_lineEdit_9.setText('%.2f' % winningRate0 + '%')
def main():
app = QApplication(sys.argv)
gui = MainUI()
gui.show()
sys.exit(app.exec_())
def finddatepos(date):
i = 0
while result[i][0] != date:
i += 1
return i
def calAtr(result, start_time, end_time, tr_list): # Calculate atr
counter = 0
atr_list = []
for i in range(1, len(result)-1):
if result[i][0] == start_time:
counter = 1
if counter == 1:
tr = max(float(result[i][2])-float(result[i][3]), float(result[i][2])-float(result[i-1][4]), float(result[i-1][4])-float(result[i][3]))
tr_list.append([result[i][0], tr])
atr_list.append(tr)
if result[i][0] == end_time:
counter = 0
atr = int(np.floor(np.mean(atr_list)))
atr_half = int(np.floor(0.5 * atr))
return [atr, atr_half]
def calDon(result, time, atr_half, Dontime = 30): # Calculate Donchian tunnel
for i in range(Dontime, len(result)-1):
high_list = []
low_list = []
if result[i][0] == time:
for j in range(i-Dontime, i):
high_list.append(result[j][2])
low_list.append(result[j][3])
don_open = np.max(high_list)
don_close = np.min(low_list)
short_add_point = don_close - atr_half
short_stop_loss = don_close + atr_half
long_add_point = don_open + atr_half
long_stop_loss = don_open - atr_half
return [long_add_point, long_stop_loss, short_add_point, short_stop_loss]
def on_bar(date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
yesterday = result[i-1][0]
startatrday = result[i-atrtime][0]
open = result[i][1]
atr = calAtr(result, startatrday, yesterday, tr_list)[0]
atr_half = calAtr(result, startatrday, yesterday, tr_list)[1]
Donlst = calDon(result, date, atr_half)
long_add_point = Donlst[0]
long_stop_loss = Donlst[1]
short_add_point = Donlst[2]
short_stop_loss = Donlst[3]
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
position_long[date_pos][1] = position_long[date_pos - 1][1]
position_short[date_pos][1] = position_short[date_pos - 1][1]
cash[date_pos][1] = cash[date_pos - 1][1]
if position_long[date_pos][1] == 0 and position_short[date_pos][1] == 0:
if open > long_add_point - atr_half:
# 如果向上突破唐奇安通道,则开多
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday):
position_long[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday)
else:
position_long[date_pos][1] = cash[date_pos][1] / (1 + backtest_commission_ratio) / open
print(date, '开多仓%.1f'%(cash[date_pos][1] / (1 + backtest_commission_ratio) / open))
cash[date_pos][1] = 0
if open < short_add_point + atr_half:
# 如果向下突破唐奇安通道,则开空
position_short[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开空仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday)
if position_long[date_pos][1] != 0:
if open > long_add_point:
# 当突破1/2atr时加仓
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday):
position_long[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday)
else:
position_long[date_pos][1] += cash[date_pos][1] / (1 + backtest_commission_ratio) / open
print(date, '继续加仓%.1f' % (cash[date_pos][1] / (1 + backtest_commission_ratio) / open))
cash[date_pos][1] = 0
if open < long_stop_loss:
# 持多仓,止损位计算
if position_long[date_pos][1] - unit(current_asset(yesterday),yesterday) >= 0:
print(date, '平多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday),
yesterday)
else:
print(date, '平多仓%.1f' % (position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * position_long[date_pos][1] * open
position_long[date_pos][1] = max(position_long[date_pos][1] - unit(current_asset(yesterday),yesterday), 0)
'''print(date, '平多仓%.1f'%(position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * position_long[date_pos][1]
position_long[date_pos][1] = 0'''
if position_short[date_pos][1] != 0:
if open < short_add_point:
# 当突破1/2atr时加仓
position_short[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday)
if open > short_stop_loss:
# 持空仓,止损位计算
m = min(position_short[date_pos][1] * open, open * unit(current_asset(yesterday),yesterday), cash[date_pos][1] / (1 + backtest_commission_ratio))
print(date, '平空仓%.1f'%(m / open))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / open
'''m = position_short[date_pos][1] * open
print(date, '平空仓%.1f'%(m / open))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / open'''
def in_bar(date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
yesterday = result[i-1][0]
startatrday = result[i-atrtime][0]
close = result[i][4]
atr = calAtr(result, startatrday, yesterday, tr_list)[0]
atr_half = calAtr(result, startatrday, yesterday, tr_list)[1]
Donlst = calDon(result, date, atr_half)
long_add_point = Donlst[0]
long_stop_loss = Donlst[1]
short_add_point = Donlst[2]
short_stop_loss = Donlst[3]
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
if position_long[date_pos][1] == 0 and position_short[date_pos][1] == 0:
if close > long_add_point - atr_half:
# 如果向上突破唐奇安通道,则开多
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday),yesterday):
position_long[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday),yesterday)
else:
position_long[date_pos][1] = cash[date_pos][1] / (1 + backtest_commission_ratio) / close
print(date, '开多仓%.1f'%(cash[date_pos][1] / (1 + backtest_commission_ratio) / close))
cash[date_pos][1] = 0
if close < short_add_point + atr_half:
# 如果向下突破唐奇安通道,则开空
position_short[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开空仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * unit(current_asset(yesterday),yesterday)
if position_long[date_pos][1] != 0:
if close > long_add_point:
# 当突破1/2atr时加仓
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday), yesterday):
position_long[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday), yesterday)
else:
position_long[date_pos][1] += cash[date_pos][1] / (1 + backtest_commission_ratio) / close
print(date, '继续加仓%.1f' % (cash[date_pos][1] / (1 + backtest_commission_ratio) / close))
cash[date_pos][1] = 0
if close < long_stop_loss:
# 持多仓,止损位计算
if position_long[date_pos][1] - unit(current_asset(yesterday),yesterday) >= 0:
print(date, '平多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * unit(current_asset(yesterday),
yesterday)
else:
print(date, '平多仓%.1f' % (position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * position_long[date_pos][1] * close
position_long[date_pos][1] = max(position_long[date_pos][1] - unit(current_asset(yesterday),yesterday), 0)
'''print(date, '平多仓%.1f'%(position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * position_long[date_pos][1]
position_long[date_pos][1] = 0'''
if position_short[date_pos][1] != 0:
if close < short_add_point:
# 当突破1/2atr时加仓
position_short[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * unit(current_asset(yesterday), yesterday)
if close > short_stop_loss:
# 持空仓,止损位计算
m = min(position_short[date_pos][1] * close, close * unit(current_asset(yesterday),yesterday), cash[date_pos][1] / (1 + backtest_commission_ratio))
print(date, '平空仓%.1f'%(m / close))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / close
'''m = position_short[date_pos][1] * close
print(date, '平空仓%.1f'%(m / close))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / close'''
def unit(total_asset, date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
end_time = result[i + atrtime - 1][0]
DV = calAtr(result, date, end_time, tr_list)[0]
return total_asset * unit_rate / DV
def current_asset(date):
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
return cash[date_pos][1] + (position_long[date_pos][1] - position_short[date_pos][1]) * result[finddatepos(date)][4]
def nearestdate(date, counter = 1):
dateset = set()
for k in range(len(result)):
dateset.add(result[k][0])
while date not in dateset:
dt = datetime.datetime.strptime(date, '%Y-%m-%d')
if counter == 1:
date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if date[8] == '0':
date = date[:8] + date[9:]
if date[5] == '0':
date = date[:5] + date[6:]
elif counter == -1:
date = (dt - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if date[8] == '0':
date = date[:8] + date[9:]
if date[5] == '0':
date = date[:5] + date[6:]
return date
if __name__ == '__main__':
csvFile = open("data.csv", "r")
reader = csv.reader(csvFile)
result = []
for item in reader:
# Ignore first line
if reader.line_num == 1:
continue
result.append(
[item[0], float(item[1]), float(item[2]), float(item[3]), float(item[4])]) # date, open, high, low, close
csvFile.close()
initial_cash = 0
backtest_commission_ratio = 0.0001
start_time = '2021-03-01'
end_time = '2021-04-27'
tr_list = []
cash = []
position_short = []
position_long = []
atrtime = 20
Dontime = 30
unit_rate = 0.01
winningRate = 0
date = 0
time = 0
baseline = 0
annualized_rate = 0
l_time = []
l_asset = []
l_index = []
xs=[]
l_initial = []
main()
| 43.504241
| 208
| 0.572955
| 44,353
| 0.764694
| 0
| 0
| 0
| 0
| 0
| 0
| 15,048
| 0.259444
|
4dfab55975cccc588661b8464faec98ada96eafa
| 11,800
|
py
|
Python
|
posthog/test/test_update_person_props.py
|
csmatar/posthog
|
4587cfe18625f302726c531f06a32c18e9749e9d
|
[
"MIT"
] | 58
|
2020-08-26T16:26:18.000Z
|
2022-03-30T05:32:23.000Z
|
posthog/test/test_update_person_props.py
|
csmatar/posthog
|
4587cfe18625f302726c531f06a32c18e9749e9d
|
[
"MIT"
] | 15
|
2021-11-09T10:49:34.000Z
|
2021-11-09T16:11:01.000Z
|
posthog/test/test_update_person_props.py
|
csmatar/posthog
|
4587cfe18625f302726c531f06a32c18e9749e9d
|
[
"MIT"
] | 13
|
2020-09-08T13:27:07.000Z
|
2022-03-19T17:27:10.000Z
|
from datetime import datetime
from django.db import connection
from posthog.models import Person
from posthog.test.base import BaseTest
# How we expect this function to behave:
# | call | value exists | call TS is ___ existing TS | previous fn | write/override
# 1| set | no | N/A | N/A | yes
# 2| set_once | no | N/A | N/A | yes
# 3| set | yes | before | set | no
# 4| set | yes | before | set_once | yes
# 5| set | yes | after | set | yes
# 6| set | yes | after | set_once | yes
# 7| set_once | yes | before | set | no
# 8| set_once | yes | before | set_once | yes
# 9| set_once | yes | after | set | no
# 10| set_once | yes | after | set_once | no
# 11| set | yes | equal | set | no
# 12| set_once | yes | equal | set | no
# 13| set | yes | equal | set_once | yes
# 14| set_once | yes | equal | set_once | no
FUTURE_TIMESTAMP = datetime(2050, 1, 1, 1, 1, 1).isoformat()
PAST_TIMESTAMP = datetime(2000, 1, 1, 1, 1, 1).isoformat()
# Refers to migration 0176_update_person_props_function
# This is a Postgres function we use in the plugin server
class TestShouldUpdatePersonProp(BaseTest):
def test_update_without_properties_last_updated_at(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# dont update set_once call
self.assertEqual(updated_person.properties, {"a": 1, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertIsNotNone(updated_person.properties_last_updated_at["a"])
def test_update_without_properties_last_operation(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# dont update set_once call
self.assertEqual(updated_person.properties, {"a": 1, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set"})
self.assertNotEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
# tests cases 1 and 2 from the table
def test_update_non_existent_prop(self):
person = Person.objects.create(
team=self.team, properties={}, properties_last_updated_at={}, properties_last_operation={}
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# both updated
self.assertEqual(updated_person.properties, {"a": 1, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertIsNotNone(updated_person.properties_last_updated_at["a"])
self.assertIsNotNone(updated_person.properties_last_updated_at["b"])
# # tests cases 3 and 4 from the table
def test_set_operation_with_earlier_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# b updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set"})
self.assertEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], FUTURE_TIMESTAMP)
# # tests cases 5 and 6 from the table
def test_set_operation_with_older_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": PAST_TIMESTAMP, "b": PAST_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# both updated
self.assertEqual(updated_person.properties, {"a": 1, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set"})
self.assertNotEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
# tests cases 7 and 8 from the table
def test_set_once_operation_with_earlier_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set_once', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# b updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], FUTURE_TIMESTAMP)
# tests cases 9 and 10 from the table
def test_set_once_operation_with_older_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": PAST_TIMESTAMP, "b": PAST_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set_once', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# neither updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
# # tests cases 11-14 from the table
def test_equal_timestamps(self):
timestamp = PAST_TIMESTAMP
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0, "c": 0, "d": 0},
properties_last_updated_at={"a": timestamp, "b": timestamp, "c": timestamp, "d": timestamp},
properties_last_operation={"a": "set", "b": "set", "c": "set_once", "d": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
'{timestamp}',
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update,
row('set', 'c', '1'::jsonb)::person_property_update,
row('set_once', 'd', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# update if current op is set and last op is set_once i.e. "c"
self.assertEqual(updated_person.properties, {"a": 0, "b": 0, "c": 1, "d": 0})
self.assertEqual(
updated_person.properties_last_operation, {"a": "set", "b": "set", "c": "set", "d": "set_once"}
) # c changed
self.assertEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["c"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["c"], PAST_TIMESTAMP)
| 42.446043
| 107
| 0.527203
| 10,178
| 0.862542
| 0
| 0
| 0
| 0
| 0
| 0
| 5,221
| 0.442458
|
4dfb10a7a1f3430a5ca4e269077867482eeda87b
| 762
|
py
|
Python
|
setup.py
|
cclauss/AIF360
|
4fb4e0d3e4ed65c9b4d7a2d5238881a04cc334c1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
cclauss/AIF360
|
4fb4e0d3e4ed65c9b4d7a2d5238881a04cc334c1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
cclauss/AIF360
|
4fb4e0d3e4ed65c9b4d7a2d5238881a04cc334c1
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='aif360',
version='0.1.0',
description='IBM AI Fairness 360',
author='aif360 developers',
author_email='aif360@us.ibm.com',
url='https://github.com/IBM/AIF360',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
packages=find_packages(),
# python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <3.7',
install_requires=[
'numpy',
'scipy',
'pandas==0.23.3',
'scikit-learn',
'numba',
],
include_package_data=True,
zip_safe=False)
| 29.307692
| 83
| 0.57874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.370079
|
4dfb5ac8775c4305591fb5eb4b61c6ac65e66c47
| 390
|
py
|
Python
|
src/examples/customstyle/wow_style/widgetstyle/radiobutton.py
|
robertkist/qtmodernredux
|
c7f791a1492ff855f3e4b963b8e9f20c46ba503f
|
[
"Apache-2.0"
] | 4
|
2021-04-12T19:30:47.000Z
|
2022-02-11T18:24:16.000Z
|
src/examples/customstyle/wow_style/widgetstyle/radiobutton.py
|
robertkist/qtmodernredux
|
c7f791a1492ff855f3e4b963b8e9f20c46ba503f
|
[
"Apache-2.0"
] | null | null | null |
src/examples/customstyle/wow_style/widgetstyle/radiobutton.py
|
robertkist/qtmodernredux
|
c7f791a1492ff855f3e4b963b8e9f20c46ba503f
|
[
"Apache-2.0"
] | null | null | null |
radiobutton_style = '''
QRadioButton:disabled {
background: transparent;
}
QRadioButton::indicator {
background: palette(dark);
width: 8px;
height: 8px;
border: 3px solid palette(dark);
border-radius: 7px;
}
QRadioButton::indicator:checked {
background: palette(highlight);
}
QRadioButton::indicator:checked:disabled {
background: palette(midlight);
}
'''
| 18.571429
| 42
| 0.697436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 370
| 0.948718
|
4dfbb4858f95304472fccbca8344763f96bb417e
| 1,788
|
py
|
Python
|
engine.py
|
kevioconnor/day0
|
6a72bf55dba1021850b810e647c87cb53ef86763
|
[
"MIT"
] | null | null | null |
engine.py
|
kevioconnor/day0
|
6a72bf55dba1021850b810e647c87cb53ef86763
|
[
"MIT"
] | null | null | null |
engine.py
|
kevioconnor/day0
|
6a72bf55dba1021850b810e647c87cb53ef86763
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import lzma, pickle
from typing import TYPE_CHECKING
from numpy import e
from tcod.console import Console
from tcod.map import compute_fov
import exceptions, render_functions
from message_log import MessageLog
if TYPE_CHECKING:
from entity import Actor
from game_map import GameMap, GameWorld
class Engine:
game_map: GameMap
game_world: GameWorld
def __init__(self, player: Actor):
self.message_log = MessageLog()
self.mouse_location = (0, 0)
self.player = player
def handle_enemy_turns(self) -> None:
for entity in set(self.game_map.actors) - {self.player}:
if entity.ai:
try:
entity.ai.perform()
except exceptions.Impossible:
pass
def update_fov(self) -> None:
self.game_map.visible[:] = compute_fov(
self.game_map.tiles["transparent"],
(self.player.x, self.player.y),
radius=8
)
self.game_map.explored |= self.game_map.visible
def render(self, console: Console) -> None:
self.game_map.render(console)
self.message_log.render(console=console, x=21, y=45, width=40, height=5)
render_functions.render_bar(console=console, current_val=self.player.fighter.hp,
max_val=self.player.fighter.max_hp, total_width=20)
render_functions.render_level(console=console, dungeon_level=self.game_world.current_floor, location=(0, 47))
render_functions.render_name_at_location(console=console, x=21, y=44, engine=self)
def save_as(self, filename: str) -> None:
save_data = lzma.compress(pickle.dumps(self))
with open(filename, "wb") as f:
f.write(save_data)
| 33.111111
| 117
| 0.657718
| 1,445
| 0.808166
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.009508
|
4dfbb723c6f3d56895498fae876785ec1b7ea406
| 19,132
|
py
|
Python
|
pysnmp/ERI-DNX-STS1-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/ERI-DNX-STS1-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/ERI-DNX-STS1-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ERI-DNX-STS1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ERI-DNX-STS1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:51:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection")
DecisionType, LinkCmdStatus, PortStatus, LinkPortAddress, FunctionSwitch, devices, trapSequence = mibBuilder.importSymbols("ERI-DNX-SMC-MIB", "DecisionType", "LinkCmdStatus", "PortStatus", "LinkPortAddress", "FunctionSwitch", "devices", "trapSequence")
eriMibs, = mibBuilder.importSymbols("ERI-ROOT-SMI", "eriMibs")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, Gauge32, IpAddress, Counter64, ObjectIdentity, iso, Unsigned32, MibIdentifier, Counter32, Bits, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "Gauge32", "IpAddress", "Counter64", "ObjectIdentity", "iso", "Unsigned32", "MibIdentifier", "Counter32", "Bits", "TimeTicks", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
eriDNXSts1MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 644, 3, 4))
if mibBuilder.loadTexts: eriDNXSts1MIB.setLastUpdated('200204080000Z')
if mibBuilder.loadTexts: eriDNXSts1MIB.setOrganization('Eastern Research, Inc.')
dnxSTS1 = MibIdentifier((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3))
sts1Config = MibIdentifier((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1))
sts1Diag = MibIdentifier((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2))
class VtGroupType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("vt2-0-e1", 0), ("vt1-5-ds1", 1))
sts1MapperConfigTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1), )
if mibBuilder.loadTexts: sts1MapperConfigTable.setStatus('current')
sts1MapperConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1MapperAddr"))
if mibBuilder.loadTexts: sts1MapperConfigEntry.setStatus('current')
sts1MapperAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperAddr.setStatus('current')
sts1MapperResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperResource.setStatus('current')
sts1VtGroup1 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 3), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup1.setStatus('current')
sts1VtGroup2 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 4), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup2.setStatus('current')
sts1VtGroup3 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 5), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup3.setStatus('current')
sts1VtGroup4 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 6), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup4.setStatus('current')
sts1VtGroup5 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 7), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup5.setStatus('current')
sts1VtGroup6 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 8), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup6.setStatus('current')
sts1VtGroup7 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 9), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup7.setStatus('current')
sts1VtMapping = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("standardVT", 0), ("sequencialFrm", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtMapping.setStatus('current')
sts1Timing = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("internal", 0), ("ec1-Line", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1Timing.setStatus('current')
sts1ShortCable = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 12), FunctionSwitch()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1ShortCable.setStatus('current')
sts1MaprCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 13), LinkCmdStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1MaprCmdStatus.setStatus('current')
sts1T1E1LinkConfigTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2), )
if mibBuilder.loadTexts: sts1T1E1LinkConfigTable.setStatus('current')
sts1T1E1LinkConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1T1E1CfgLinkAddr"))
if mibBuilder.loadTexts: sts1T1E1LinkConfigEntry.setStatus('current')
sts1T1E1CfgLinkAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1T1E1CfgLinkAddr.setStatus('current')
sts1T1E1CfgResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1T1E1CfgResource.setStatus('current')
sts1T1E1CfgLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1CfgLinkName.setStatus('current')
sts1T1E1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 4), PortStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1Status.setStatus('current')
sts1T1E1Clear = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("disabled", 0), ("framed", 1), ("unframed", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1Clear.setStatus('current')
sts1T1E1Framing = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(5, 6, 7))).clone(namedValues=NamedValues(("t1Esf", 5), ("t1D4", 6), ("t1Unframed", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1Framing.setStatus('current')
sts1T1E1NetLoop = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 7), FunctionSwitch()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1NetLoop.setStatus('current')
sts1T1E1YelAlrm = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 8), DecisionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1YelAlrm.setStatus('current')
sts1T1E1RecoverTime = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3, 10, 15))).clone(namedValues=NamedValues(("timeout-3-secs", 3), ("timeout-10-secs", 10), ("timeout-15-secs", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1RecoverTime.setStatus('current')
sts1T1E1EsfFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("att-54016", 0), ("ansi-t1-403", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1EsfFormat.setStatus('current')
sts1T1E1IdleCode = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("busy", 0), ("idle", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1IdleCode.setStatus('current')
sts1T1E1CfgCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 12), LinkCmdStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1CfgCmdStatus.setStatus('current')
sts1T1E1Gr303Facility = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 13), DecisionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1T1E1Gr303Facility.setStatus('obsolete')
sts1MapperStatusTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1), )
if mibBuilder.loadTexts: sts1MapperStatusTable.setStatus('current')
sts1MapperStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1MapperStatusAddr"))
if mibBuilder.loadTexts: sts1MapperStatusEntry.setStatus('current')
sts1MapperStatusAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusAddr.setStatus('current')
sts1MapperStatusResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusResource.setStatus('current')
sts1MapperStatusState = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 32, 256, 512, 1024, 8192, 131072, 2147483647))).clone(namedValues=NamedValues(("ok", 0), ("lof", 32), ("lop", 256), ("oof", 512), ("ais", 1024), ("los", 8192), ("lomf", 131072), ("errors", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusState.setStatus('current')
sts1MapperStatusLOSErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusLOSErrs.setStatus('current')
sts1MapperStatusOOFErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusOOFErrs.setStatus('current')
sts1MapperStatusLOFErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusLOFErrs.setStatus('current')
sts1MapperStatusLOPtrErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusLOPtrErrs.setStatus('current')
sts1MapperStatusAISErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusAISErrs.setStatus('current')
sts1MapperStatusMultiFErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusMultiFErrs.setStatus('current')
sts1MapperStatusRxTraceErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusRxTraceErrs.setStatus('current')
sts1MapperStatusTotErrSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusTotErrSecs.setStatus('current')
sts1MapperStatusCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 14, 101, 114, 200, 206, 500, 501, 502))).clone(namedValues=NamedValues(("ready-for-command", 0), ("update", 1), ("clearErrors", 14), ("update-successful", 101), ("clear-successful", 114), ("err-general-test-error", 200), ("err-field-cannot-be-set", 206), ("err-snmp-parse-failed", 500), ("err-invalid-snmp-type", 501), ("err-invalid-snmp-var-size", 502)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1MapperStatusCmdStatus.setStatus('current')
sts1LIUTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2), )
if mibBuilder.loadTexts: sts1LIUTable.setStatus('current')
sts1LIUEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1LIUAddr"))
if mibBuilder.loadTexts: sts1LIUEntry.setStatus('current')
sts1LIUAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUAddr.setStatus('current')
sts1LIUResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUResource.setStatus('current')
sts1LIUBertState = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(45, 44))).clone(namedValues=NamedValues(("off", 45), ("liu-bert", 44)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1LIUBertState.setStatus('current')
sts1LIUBertErrSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUBertErrSecs.setStatus('current')
sts1LIUBertDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUBertDuration.setStatus('current')
sts1LIULoopType = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 39))).clone(namedValues=NamedValues(("off", 0), ("mapper", 1), ("liu", 39)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1LIULoopType.setStatus('current')
sts1LIUDigitalErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUDigitalErrs.setStatus('current')
sts1LIUAnalogErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUAnalogErrs.setStatus('current')
sts1LIUExcessZeros = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUExcessZeros.setStatus('current')
sts1LIUCodingViolationErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUCodingViolationErrs.setStatus('current')
sts1LIUPRBSErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUPRBSErrs.setStatus('current')
sts1LIUCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 14, 101, 114, 200, 202, 203, 205, 206, 500, 501, 502))).clone(namedValues=NamedValues(("ready-for-command", 0), ("update", 1), ("clearErrors", 14), ("update-successful", 101), ("clear-successful", 114), ("err-general-test-error", 200), ("err-invalid-loop-type", 202), ("err-invalid-bert-type", 203), ("err-test-in-progress", 205), ("err-field-cannot-be-set", 206), ("err-snmp-parse-failed", 500), ("err-invalid-snmp-type", 501), ("err-invalid-snmp-var-size", 502)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1LIUCmdStatus.setStatus('current')
dnxSTS1Enterprise = ObjectIdentity((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 0))
if mibBuilder.loadTexts: dnxSTS1Enterprise.setStatus('current')
sts1MapperConfigTrap = NotificationType((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 0, 1)).setObjects(("ERI-DNX-SMC-MIB", "trapSequence"), ("ERI-DNX-STS1-MIB", "sts1MapperAddr"), ("ERI-DNX-STS1-MIB", "sts1MaprCmdStatus"))
if mibBuilder.loadTexts: sts1MapperConfigTrap.setStatus('current')
sts1T1E1ConfigTrap = NotificationType((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 0, 2)).setObjects(("ERI-DNX-SMC-MIB", "trapSequence"), ("ERI-DNX-STS1-MIB", "sts1T1E1CfgLinkAddr"), ("ERI-DNX-STS1-MIB", "sts1T1E1CfgCmdStatus"))
if mibBuilder.loadTexts: sts1T1E1ConfigTrap.setStatus('current')
mibBuilder.exportSymbols("ERI-DNX-STS1-MIB", sts1MapperStatusCmdStatus=sts1MapperStatusCmdStatus, sts1MapperStatusTotErrSecs=sts1MapperStatusTotErrSecs, sts1MapperStatusEntry=sts1MapperStatusEntry, PYSNMP_MODULE_ID=eriDNXSts1MIB, sts1T1E1YelAlrm=sts1T1E1YelAlrm, sts1Config=sts1Config, sts1VtGroup5=sts1VtGroup5, sts1MapperStatusState=sts1MapperStatusState, sts1LIUDigitalErrs=sts1LIUDigitalErrs, sts1Diag=sts1Diag, sts1LIUBertDuration=sts1LIUBertDuration, sts1T1E1NetLoop=sts1T1E1NetLoop, sts1MapperResource=sts1MapperResource, sts1ShortCable=sts1ShortCable, sts1MapperStatusAISErrs=sts1MapperStatusAISErrs, sts1LIUCodingViolationErrs=sts1LIUCodingViolationErrs, sts1VtGroup1=sts1VtGroup1, sts1MapperAddr=sts1MapperAddr, sts1LIUResource=sts1LIUResource, sts1LIUBertState=sts1LIUBertState, dnxSTS1=dnxSTS1, sts1T1E1CfgLinkName=sts1T1E1CfgLinkName, sts1LIULoopType=sts1LIULoopType, sts1T1E1ConfigTrap=sts1T1E1ConfigTrap, sts1T1E1CfgResource=sts1T1E1CfgResource, sts1LIUAnalogErrs=sts1LIUAnalogErrs, sts1MapperStatusLOPtrErrs=sts1MapperStatusLOPtrErrs, sts1LIUAddr=sts1LIUAddr, sts1VtGroup6=sts1VtGroup6, sts1T1E1Status=sts1T1E1Status, sts1VtMapping=sts1VtMapping, VtGroupType=VtGroupType, sts1VtGroup3=sts1VtGroup3, sts1T1E1IdleCode=sts1T1E1IdleCode, sts1LIUBertErrSecs=sts1LIUBertErrSecs, sts1VtGroup4=sts1VtGroup4, sts1MapperConfigTable=sts1MapperConfigTable, sts1MapperStatusAddr=sts1MapperStatusAddr, sts1T1E1Gr303Facility=sts1T1E1Gr303Facility, sts1Timing=sts1Timing, sts1MapperStatusOOFErrs=sts1MapperStatusOOFErrs, sts1MapperStatusResource=sts1MapperStatusResource, sts1VtGroup2=sts1VtGroup2, eriDNXSts1MIB=eriDNXSts1MIB, sts1T1E1Framing=sts1T1E1Framing, sts1MapperStatusLOFErrs=sts1MapperStatusLOFErrs, sts1LIUTable=sts1LIUTable, sts1T1E1LinkConfigTable=sts1T1E1LinkConfigTable, sts1MapperStatusMultiFErrs=sts1MapperStatusMultiFErrs, sts1LIUExcessZeros=sts1LIUExcessZeros, sts1VtGroup7=sts1VtGroup7, sts1MapperStatusLOSErrs=sts1MapperStatusLOSErrs, sts1T1E1CfgLinkAddr=sts1T1E1CfgLinkAddr, sts1T1E1RecoverTime=sts1T1E1RecoverTime, dnxSTS1Enterprise=dnxSTS1Enterprise, sts1MaprCmdStatus=sts1MaprCmdStatus, sts1T1E1EsfFormat=sts1T1E1EsfFormat, sts1MapperStatusRxTraceErrs=sts1MapperStatusRxTraceErrs, sts1MapperConfigEntry=sts1MapperConfigEntry, sts1T1E1LinkConfigEntry=sts1T1E1LinkConfigEntry, sts1LIUCmdStatus=sts1LIUCmdStatus, sts1MapperConfigTrap=sts1MapperConfigTrap, sts1LIUEntry=sts1LIUEntry, sts1LIUPRBSErrs=sts1LIUPRBSErrs, sts1T1E1CfgCmdStatus=sts1T1E1CfgCmdStatus, sts1MapperStatusTable=sts1MapperStatusTable, sts1T1E1Clear=sts1T1E1Clear)
| 127.546667
| 2,554
| 0.744041
| 224
| 0.011708
| 0
| 0
| 0
| 0
| 0
| 0
| 3,222
| 0.168409
|
4dfc7fdfe3108af912d30eab1c90b722d5d0ec3d
| 357
|
py
|
Python
|
friday/models/__init__.py
|
alexa-infra/friday
|
297f9bfd94e88490d53e460c93727c399b2efcb2
|
[
"MIT"
] | 1
|
2019-03-17T08:11:18.000Z
|
2019-03-17T08:11:18.000Z
|
friday/models/__init__.py
|
alexa-infra/friday
|
297f9bfd94e88490d53e460c93727c399b2efcb2
|
[
"MIT"
] | null | null | null |
friday/models/__init__.py
|
alexa-infra/friday
|
297f9bfd94e88490d53e460c93727c399b2efcb2
|
[
"MIT"
] | null | null | null |
# flake8: noqa
# pylint: disable=cyclic-import
from .base import db, Model, metadata
from .link import Link
from .user import User
from .event import Event, Repeat
from .bookmark import Bookmark
from .tag import Tag
from .doc import Doc, DocTag
from .recipe import Recipe, RecipeImage
from .pagination import paginate, Pagination
from .todo import TodoItem
| 27.461538
| 44
| 0.792717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.12605
|
4dfd222e1995b07a6acae65ab8a9083933dc5471
| 632
|
py
|
Python
|
nqs_tf/models/ffnn.py
|
ameya1101/neural-quantum-states
|
2ab4f970e4cd7ed2a4ed3ebfdfe66bab396c11af
|
[
"MIT"
] | null | null | null |
nqs_tf/models/ffnn.py
|
ameya1101/neural-quantum-states
|
2ab4f970e4cd7ed2a4ed3ebfdfe66bab396c11af
|
[
"MIT"
] | null | null | null |
nqs_tf/models/ffnn.py
|
ameya1101/neural-quantum-states
|
2ab4f970e4cd7ed2a4ed3ebfdfe66bab396c11af
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from activations.activations import tan_sigmoid, exponential, ReLU
class FFNN(Model):
""" Creates a generic Feedforward neural network.
"""
def __init__(self):
super(FFNN, self).__init__()
def build(self, input_shape):
self.dense1 = Dense(units=input_shape[-1], activation=ReLU)
self.output_layer = Dense(units=1, activation=exponential)
def call(self, inputs):
x = self.dense1(inputs)
x = self.output_layer(x)
return tf.reduce_sum(x, axis=-1)
| 30.095238
| 67
| 0.683544
| 454
| 0.718354
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.09019
|
15005a003729bb6329d26f74028fc03fd8df4427
| 3,495
|
py
|
Python
|
examples/other/text_frontend/test_g2p.py
|
zh794390558/DeepSpeech
|
34178893327ad359cb816e55d7c66a10244fa08a
|
[
"Apache-2.0"
] | null | null | null |
examples/other/text_frontend/test_g2p.py
|
zh794390558/DeepSpeech
|
34178893327ad359cb816e55d7c66a10244fa08a
|
[
"Apache-2.0"
] | null | null | null |
examples/other/text_frontend/test_g2p.py
|
zh794390558/DeepSpeech
|
34178893327ad359cb816e55d7c66a10244fa08a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
from pathlib import Path
from parakeet.frontend.zh_frontend import Frontend as zhFrontend
from parakeet.utils.error_rate import word_errors
SILENCE_TOKENS = {"sp", "sil", "sp1", "spl"}
def text_cleaner(raw_text):
text = re.sub('#[1-4]|“|”|(|)', '', raw_text)
text = text.replace("…。", "。")
text = re.sub(':|;|——|……|、|…|—', ',', text)
return text
def get_avg_wer(raw_dict, ref_dict, frontend, output_dir):
edit_distances = []
ref_lens = []
wf_g2p = open(output_dir / "text.g2p", "w")
wf_ref = open(output_dir / "text.ref.clean", "w")
for utt_id in raw_dict:
if utt_id not in ref_dict:
continue
raw_text = raw_dict[utt_id]
text = text_cleaner(raw_text)
g2p_phones = frontend.get_phonemes(text)
g2p_phones = sum(g2p_phones, [])
gt_phones = ref_dict[utt_id].split(" ")
# delete silence tokens in predicted phones and ground truth phones
g2p_phones = [phn for phn in g2p_phones if phn not in SILENCE_TOKENS]
gt_phones = [phn for phn in gt_phones if phn not in SILENCE_TOKENS]
gt_phones = " ".join(gt_phones)
g2p_phones = " ".join(g2p_phones)
wf_ref.write(gt_phones + "(baker_" + utt_id + ")" + "\n")
wf_g2p.write(g2p_phones + "(baker_" + utt_id + ")" + "\n")
edit_distance, ref_len = word_errors(gt_phones, g2p_phones)
edit_distances.append(edit_distance)
ref_lens.append(ref_len)
return sum(edit_distances) / sum(ref_lens)
def main():
parser = argparse.ArgumentParser(description="g2p example.")
parser.add_argument(
"--input-dir",
default="data/g2p",
type=str,
help="directory to preprocessed test data.")
parser.add_argument(
"--output-dir",
default="exp/g2p",
type=str,
help="directory to save g2p results.")
args = parser.parse_args()
input_dir = Path(args.input_dir).expanduser()
output_dir = Path(args.output_dir).expanduser()
output_dir.mkdir(parents=True, exist_ok=True)
assert input_dir.is_dir()
raw_dict, ref_dict = dict(), dict()
raw_path = input_dir / "text"
ref_path = input_dir / "text.ref"
with open(raw_path, "r") as rf:
for line in rf:
line = line.strip()
line_list = line.split(" ")
utt_id, raw_text = line_list[0], " ".join(line_list[1:])
raw_dict[utt_id] = raw_text
with open(ref_path, "r") as rf:
for line in rf:
line = line.strip()
line_list = line.split(" ")
utt_id, phones = line_list[0], " ".join(line_list[1:])
ref_dict[utt_id] = phones
frontend = zhFrontend()
avg_wer = get_avg_wer(raw_dict, ref_dict, frontend, output_dir)
print("The avg WER of g2p is:", avg_wer)
if __name__ == "__main__":
main()
| 35.30303
| 77
| 0.640343
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,033
| 0.292717
|
15011a09f8a6b93bb0cb155a2b3d2cf4e30e89b7
| 530
|
py
|
Python
|
data_split.py
|
DataXujing/ExtremeNet-Pytorch
|
fc8bf91cb748c144e85d2de271aea117ea54e808
|
[
"BSD-3-Clause"
] | 9
|
2020-01-15T05:54:54.000Z
|
2021-12-08T06:01:37.000Z
|
data_split.py
|
DataXujing/ExtremeNet-Pytorch
|
fc8bf91cb748c144e85d2de271aea117ea54e808
|
[
"BSD-3-Clause"
] | 3
|
2020-12-01T10:26:19.000Z
|
2021-01-20T07:51:47.000Z
|
data_split.py
|
DataXujing/ExtremeNet-Pytorch
|
fc8bf91cb748c144e85d2de271aea117ea54e808
|
[
"BSD-3-Clause"
] | 3
|
2020-03-31T14:40:08.000Z
|
2021-02-22T07:49:34.000Z
|
# VOC分割训练集和测试集
import os
import random
import shutil
trainval_percent = 0.1
train_percent = 0.9
imgfilepath = '../myData/JPEGImages' #原数据存放地
total_img = os.listdir(imgfilepath)
sample_num = len(total_img)
trains = random.sample(total_img,int(sample_num*train_percent))
for file in total_img:
if file in trains:
shutil.copy(os.path.join(imgfilepath,file),"./myData/coco/images/train/"+file)
else:
shutil.copy(os.path.join(imgfilepath,file),"./myData/coco/images/val/"+file)
print(file)
| 17.096774
| 86
| 0.711321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.230357
|
15035bfbd1a02ccbee3c988cf9c68e7e783016d5
| 3,104
|
py
|
Python
|
sciunit/models/examples.py
|
russelljjarvis/sciun
|
f8f6ede84299dc700afe94b07ae4e98f87a19116
|
[
"MIT"
] | 1
|
2020-05-28T00:35:23.000Z
|
2020-05-28T00:35:23.000Z
|
sciunit/models/examples.py
|
ChihweiLHBird/sciunit
|
f5669d165fa505c3a17ac17af3d3c78aafd44ae2
|
[
"MIT"
] | 1
|
2020-12-29T04:28:57.000Z
|
2020-12-29T04:28:57.000Z
|
sciunit/models/examples.py
|
russelljjarvis/sciunit
|
f8f6ede84299dc700afe94b07ae4e98f87a19116
|
[
"MIT"
] | null | null | null |
"""Example SciUnit model classes."""
import random
from sciunit.models import Model
from sciunit.capabilities import ProducesNumber
from sciunit.utils import class_intern, method_cache
from sciunit.utils import method_memoize # Decorator for caching of capability method results.
from typing import Union
class ConstModel(Model, ProducesNumber):
"""A model that always produces a constant number as output."""
def __init__(self, constant: Union[int, float], name: str=None):
self.constant = constant
super(ConstModel, self).__init__(name=name, constant=constant)
def produce_number(self) -> Union[int, float]:
return self.constant
class UniformModel(Model, ProducesNumber):
"""A model that always produces a random uniformly distributed number.
in [a,b] as output."""
def __init__(self, a, b, name=None):
self.a, self.b = a, b
super(UniformModel, self).__init__(name=name, a=a, b=b)
def produce_number(self) -> float:
"""Produece a number between `a` and `b`.
Returns:
float: The number between a and b.
"""
return random.uniform(self.a, self.b)
################################################################
# Here are several examples of caching and sharing can be used
# to reduce the computational load of testing.
################################################################
class UniqueRandomNumberModel(Model, ProducesNumber):
"""An example model to ProducesNumber."""
def produce_number(self) -> float:
"""Each call to this method will produce a different random number.
Returns:
float: A random number produced.
"""
return random.random()
class RepeatedRandomNumberModel(Model, ProducesNumber):
"""An example model to demonstrate ProducesNumber with cypy.lazy."""
@method_memoize
def produce_number(self):
"""Each call to this method will produce the same random number as was returned in the first call, ensuring reproducibility and eliminating computational overhead.
Returns:
float: A random number produced.
"""
return random.random()
@class_intern
class SharedModel(Model):
"""A model that, each time it is instantiated with the same parameters,
will return the same instance at the same locaiton in memory.
Attributes should not be set post-instantiation
unless the goal is to set those attributes on all models of this class."""
pass
class PersistentUniformModel(UniformModel):
"""TODO"""
def run(self) -> None:
self._x = random.uniform(self.a, self.b)
def produce_number(self) -> float:
return self._x
class CacheByInstancePersistentUniformModel(PersistentUniformModel):
"""TODO"""
@method_cache(by='instance', method='run')
def produce_number(self) -> float:
return self._x
class CacheByValuePersistentUniformModel(PersistentUniformModel):
"""TODO"""
@method_cache(by='value', method='run')
def produce_number(self) -> float:
return self._x
| 30.732673
| 171
| 0.661727
| 2,519
| 0.811534
| 0
| 0
| 849
| 0.273518
| 0
| 0
| 1,417
| 0.456508
|
1504d1248cc2e761c3fb76bb1b97319d6ca7d7fb
| 140
|
py
|
Python
|
semantic/semantic/model/model.py
|
VladimirSiv/semantic-search-system
|
96b6581f191aacb1157b1408b2726e317ddc2c49
|
[
"MIT"
] | 1
|
2021-07-01T08:53:46.000Z
|
2021-07-01T08:53:46.000Z
|
semantic/semantic/model/model.py
|
VladimirSiv/semantic-search-system
|
96b6581f191aacb1157b1408b2726e317ddc2c49
|
[
"MIT"
] | null | null | null |
semantic/semantic/model/model.py
|
VladimirSiv/semantic-search-system
|
96b6581f191aacb1157b1408b2726e317ddc2c49
|
[
"MIT"
] | 1
|
2021-12-29T01:18:38.000Z
|
2021-12-29T01:18:38.000Z
|
from sentence_transformers import SentenceTransformer
from semantic.config import CONFIG
model = SentenceTransformer(CONFIG["model_name"])
| 28
| 53
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.085714
|
1504effc59c426c8cdd37004ed34fbfb801a2d4e
| 8,619
|
py
|
Python
|
utils/models.py
|
miladalipour99/time_series_augmentation
|
3c314468df689a70e84ae6b433f9cdf5bae63400
|
[
"Apache-2.0"
] | 140
|
2020-04-21T05:01:42.000Z
|
2022-03-30T20:03:21.000Z
|
utils/models.py
|
miladalipour99/time_series_augmentation
|
3c314468df689a70e84ae6b433f9cdf5bae63400
|
[
"Apache-2.0"
] | 5
|
2021-06-08T01:43:46.000Z
|
2021-12-22T11:37:28.000Z
|
utils/models.py
|
miladalipour99/time_series_augmentation
|
3c314468df689a70e84ae6b433f9cdf5bae63400
|
[
"Apache-2.0"
] | 32
|
2020-04-26T14:00:58.000Z
|
2022-03-09T01:25:32.000Z
|
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Input
from tensorflow.keras.layers import MaxPooling1D, Conv1D
from tensorflow.keras.layers import LSTM, Bidirectional
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling1D, Permute, concatenate, Activation, add
import numpy as np
import math
def get_model(model_name, input_shape, nb_class):
if model_name == "vgg":
model = cnn_vgg(input_shape, nb_class)
elif model_name == "lstm1":
model = lstm1(input_shape, nb_class)
elif model_name == "lstm":
model = lstm1v0(input_shape, nb_class)
elif model_name == "lstm2":
model = lstm2(input_shape, nb_class)
elif model_name == "blstm1":
model = blstm1(input_shape, nb_class)
elif model_name == "blstm2":
model = blstm2(input_shape, nb_class)
elif model_name == "lstmfcn":
model = lstm_fcn(input_shape, nb_class)
elif model_name == "resnet":
model = cnn_resnet(input_shape, nb_class)
elif model_name == "mlp":
model = mlp4(input_shape, nb_class)
elif model_name == "lenet":
model = cnn_lenet(input_shape, nb_class)
else:
print("model name missing")
return model
def mlp4(input_shape, nb_class):
# Z. Wang, W. Yan, T. Oates, "Time Series Classification from Scratch with Deep Neural Networks: A Strong Baseline," Int. Joint Conf. Neural Networks, 2017, pp. 1578-1585
ip = Input(shape=input_shape)
fc = Flatten()(ip)
fc = Dropout(0.1)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.2)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.2)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.3)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def cnn_lenet(input_shape, nb_class):
# Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner, “Gradient-based learning applied to document recognition,” Proceedings of the IEEE, vol. 86, no. 11, pp. 2278–2324, 1998.
ip = Input(shape=input_shape)
conv = ip
nb_cnn = int(round(math.log(input_shape[0], 2))-3)
print("pooling layers: %d"%nb_cnn)
for i in range(nb_cnn):
conv = Conv1D(6+10*i, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = MaxPooling1D(pool_size=2)(conv)
flat = Flatten()(conv)
fc = Dense(120, activation='relu')(flat)
fc = Dropout(0.5)(fc)
fc = Dense(84, activation='relu')(fc)
fc = Dropout(0.5)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def cnn_vgg(input_shape, nb_class):
# K. Simonyan and A. Zisserman, "Very deep convolutional networks for large-scale image recognition," arXiv preprint arXiv:1409.1556, 2014.
ip = Input(shape=input_shape)
conv = ip
nb_cnn = int(round(math.log(input_shape[0], 2))-3)
print("pooling layers: %d"%nb_cnn)
for i in range(nb_cnn):
num_filters = min(64*2**i, 512)
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
if i > 1:
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = MaxPooling1D(pool_size=2)(conv)
flat = Flatten()(conv)
fc = Dense(4096, activation='relu')(flat)
fc = Dropout(0.5)(fc)
fc = Dense(4096, activation='relu')(fc)
fc = Dropout(0.5)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def lstm1v0(input_shape, nb_class):
# Original proposal:
# S. Hochreiter and J. Schmidhuber, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997.
ip = Input(shape=input_shape)
l2 = LSTM(512)(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm1(input_shape, nb_class):
# Original proposal:
# S. Hochreiter and J. Schmidhuber, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997.
# Hyperparameter choices:
# N. Reimers and I. Gurevych, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017
ip = Input(shape=input_shape)
l2 = LSTM(100)(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm2(input_shape, nb_class):
ip = Input(shape=input_shape)
l1 = LSTM(100, return_sequences=True)(ip)
l2 = LSTM(100)(l1)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def blstm1(input_shape, nb_class):
# Original proposal:
# M. Schuster and K. K. Paliwal, “Bidirectional recurrent neural networks,” IEEE Transactions on Signal Processing, vol. 45, no. 11, pp. 2673–2681, 1997.
# Hyperparameter choices:
# N. Reimers and I. Gurevych, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017
ip = Input(shape=input_shape)
l2 = Bidirectional(LSTM(100))(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def blstm2(input_shape, nb_class):
ip = Input(shape=input_shape)
l1 = Bidirectional(LSTM(100, return_sequences=True))(ip)
l2 = Bidirectional(LSTM(100))(l1)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm_fcn(input_shape, nb_class):
# F. Karim, S. Majumdar, H. Darabi, and S. Chen, “LSTM Fully Convolutional Networks for Time Series Classification,” IEEE Access, vol. 6, pp. 1662–1669, 2018.
ip = Input(shape=input_shape)
# lstm part is a 1 time step multivariate as described in Karim et al. Seems strange, but works I guess.
lstm = Permute((2, 1))(ip)
lstm = LSTM(128)(lstm)
lstm = Dropout(0.8)(lstm)
conv = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(ip)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
flat = GlobalAveragePooling1D()(conv)
flat = concatenate([lstm, flat])
out = Dense(nb_class, activation='softmax')(flat)
model = Model([ip], [out])
model.summary()
return model
def cnn_resnet(input_shape, nb_class):
# I. Fawaz, G. Forestier, J. Weber, L. Idoumghar, P-A Muller, "Data augmentation using synthetic data for time series classification with deep residual networks," International Workshop on Advanced Analytics and Learning on Temporal Data ECML/PKDD, 2018
ip = Input(shape=input_shape)
residual = ip
conv = ip
for i, nb_nodes in enumerate([64, 128, 128]):
conv = Conv1D(nb_nodes, 8, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(nb_nodes, 5, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(nb_nodes, 3, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
if i < 2:
# expands dimensions according to Fawaz et al.
residual = Conv1D(nb_nodes, 1, padding='same', kernel_initializer="glorot_uniform")(residual)
residual = BatchNormalization()(residual)
conv = add([residual, conv])
conv = Activation('relu')(conv)
residual = conv
flat = GlobalAveragePooling1D()(conv)
out = Dense(nb_class, activation='softmax')(flat)
model = Model([ip], [out])
model.summary()
return model
| 31.922222
| 257
| 0.647987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,414
| 0.279107
|
1504fcdc48e346e97fc1b686d7489c610536fa41
| 2,468
|
py
|
Python
|
ai_flow/test/util/test_sqlalchemy_db.py
|
flink-extended/ai-flow
|
d1427a243097d94d77fedbe1966500ae26975a13
|
[
"Apache-2.0"
] | 79
|
2021-10-15T07:32:27.000Z
|
2022-03-28T04:10:19.000Z
|
ai_flow/test/util/test_sqlalchemy_db.py
|
flink-extended/ai-flow
|
d1427a243097d94d77fedbe1966500ae26975a13
|
[
"Apache-2.0"
] | 153
|
2021-10-15T05:23:46.000Z
|
2022-02-23T06:07:10.000Z
|
ai_flow/test/util/test_sqlalchemy_db.py
|
flink-extended/ai-flow
|
d1427a243097d94d77fedbe1966500ae26975a13
|
[
"Apache-2.0"
] | 23
|
2021-10-15T02:36:37.000Z
|
2022-03-17T02:59:27.000Z
|
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
import sqlalchemy
from ai_flow.store.db.base_model import base
from ai_flow.util import sqlalchemy_db
SQLITE_FILE = 'ai_flow.db'
TEST_URL = 'sqlite:///ai_flow.db'
def create_engine(url):
return sqlalchemy.create_engine(url)
def get_tables(url):
return sqlalchemy.inspect(create_engine(url)).get_table_names()
def all_ai_flow_tables_exist(url):
tables = set(get_tables(url))
for key in base.metadata.tables.keys():
if key not in tables:
return False
return True
def none_ai_flow_tables_exist(url):
tables = set(get_tables(url))
for key in base.metadata.tables.keys():
if key in tables:
return False
return True
class TestSqlalchemyDB(unittest.TestCase):
def setUp(self) -> None:
sqlalchemy_db.clear_db(TEST_URL, base.metadata)
def tearDown(self) -> None:
if os.path.exists(SQLITE_FILE):
os.remove(SQLITE_FILE)
def test_upgrade(self):
self.assertTrue(none_ai_flow_tables_exist(TEST_URL))
sqlalchemy_db.upgrade(TEST_URL)
self.assertTrue(all_ai_flow_tables_exist(TEST_URL))
def test_upgrade_with_version(self):
self.assertTrue(none_ai_flow_tables_exist(TEST_URL))
sqlalchemy_db.upgrade(TEST_URL, 'de1c96ef582a')
self.assertFalse(all_ai_flow_tables_exist(TEST_URL))
self.assertTrue(len(get_tables(TEST_URL)) > 0)
sqlalchemy_db.upgrade(TEST_URL)
self.assertTrue(all_ai_flow_tables_exist(TEST_URL))
def test_downgrade(self):
self.assertTrue(none_ai_flow_tables_exist(TEST_URL))
sqlalchemy_db.upgrade(TEST_URL)
self.assertTrue(all_ai_flow_tables_exist(TEST_URL))
sqlalchemy_db.downgrade(TEST_URL, 'de1c96ef582a')
self.assertFalse(all_ai_flow_tables_exist(TEST_URL))
if __name__ == '__main__':
unittest.main()
| 29.73494
| 67
| 0.724878
| 1,111
| 0.450162
| 0
| 0
| 0
| 0
| 0
| 0
| 643
| 0.260535
|
1506feffa85f0e03250b9a11fac052405432fbe0
| 628
|
py
|
Python
|
test.py
|
blodzbyte/isEven
|
18e42cfdad052d34318900fdd91167a533b52210
|
[
"MIT"
] | 44
|
2020-03-11T16:44:41.000Z
|
2022-03-16T07:55:24.000Z
|
test.py
|
blodzbyte/isEven
|
18e42cfdad052d34318900fdd91167a533b52210
|
[
"MIT"
] | 9
|
2020-03-11T21:07:01.000Z
|
2021-07-08T18:49:23.000Z
|
test.py
|
blodzbyte/isEven
|
18e42cfdad052d34318900fdd91167a533b52210
|
[
"MIT"
] | 18
|
2020-03-11T20:03:50.000Z
|
2021-07-22T21:40:00.000Z
|
#!/usr/bin/env python3
from isEven import isEven
def testRange(min, max, evens):
print('Testing [{},{}] {}...'.format(min, max, 'evens' if evens else 'odds'))
for i in range(min, max, 2):
i = i if evens else i - 1
result = isEven(i)
if(not result and evens):
raise Exception('Test failed. Got: isEven({}) = {}. Expected: '\
'isEven({}) = {}.'.format(i, str(result), i,\
str(evens)))
print('Test passed.')
def main():
testRange(0, 10000, True)
testRange(0, 10000, False)
if __name__ == '__main__':
main()
| 29.904762
| 80
| 0.517516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.234076
|
15073013e66266b93b368bf7d20e3350da16c0c6
| 1,139
|
py
|
Python
|
comm.py
|
thedognexttothetrashcan/spi_tmall
|
021dc9a6a23841373000a5f09ca300abd376ad15
|
[
"Apache-2.0"
] | null | null | null |
comm.py
|
thedognexttothetrashcan/spi_tmall
|
021dc9a6a23841373000a5f09ca300abd376ad15
|
[
"Apache-2.0"
] | null | null | null |
comm.py
|
thedognexttothetrashcan/spi_tmall
|
021dc9a6a23841373000a5f09ca300abd376ad15
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# encoding=utf-8
import os
import datetime,time
from selenium import webdriver
import config
import threading
import numpy as np
def writelog(msg,log):
nt=datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
text="[%s] %s " % (nt,msg)
os.system("echo %s >> %s" % (text.encode('utf8'),log))
def create_chrome():
ops=webdriver.ChromeOptions()
ops.add_experimental_option('mobileEmulation',config.mobileEmulation)
web=webdriver.Chrome(chrome_options=ops)
web.set_page_load_timeout(10)
web.set_script_timeout(10)
web.set_window_size(config.mWidth,config.mHeight)
return web
#Create Threading Pool
def threading_pool(tnum,funname):
threadlist=[]
for i in range(tnum):
t=threading.Thread(target=funname)
threadlist.append(t)
for t in threadlist:
t.setDaemon(True)
t.start()
for t in threadlist:
t.join()
return threadlist
def set_interval(*args):
s = 3
e = 6
if len(args)>=1:
s = args[0]
if len(args)>=2:
e = args[1]
f = np.random.uniform(s,e)
time.sleep(f)
| 21.903846
| 73
| 0.637401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.108867
|
15077392ea3f2519132c06a08d94b11524ea1c19
| 1,584
|
py
|
Python
|
sherlockpipe/objectinfo/preparer/LightcurveBuilder.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
sherlockpipe/objectinfo/preparer/LightcurveBuilder.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
sherlockpipe/objectinfo/preparer/LightcurveBuilder.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
import re
from abc import ABC, abstractmethod
from sherlockpipe.star.EpicStarCatalog import EpicStarCatalog
from sherlockpipe.star.KicStarCatalog import KicStarCatalog
from sherlockpipe.star.TicStarCatalog import TicStarCatalog
class LightcurveBuilder(ABC):
OBJECT_ID_REGEX = "^(KIC|TIC|EPIC)[-_ ]([0-9]+)$"
NUMBERS_REGEX = "[0-9]+$"
MISSION_ID_KEPLER = "KIC"
MISSION_ID_KEPLER_2 = "EPIC"
MISSION_ID_TESS = "TIC"
def __init__(self):
self.star_catalogs = {}
self.star_catalogs[self.MISSION_ID_KEPLER] = KicStarCatalog()
self.star_catalogs[self.MISSION_ID_KEPLER_2] = EpicStarCatalog()
self.star_catalogs[self.MISSION_ID_TESS] = TicStarCatalog()
self.authors = {}
self.authors["Kepler"] = "Kepler"
self.authors["K2"] = "K2"
self.authors["TESS"] = "SPOC"
@abstractmethod
def build(self, object_info, sherlock_dir):
pass
def parse_object_id(self, object_id):
object_id_parsed = re.search(self.OBJECT_ID_REGEX, object_id)
mission_prefix = object_id[object_id_parsed.regs[1][0]:object_id_parsed.regs[1][1]]
id = object_id[object_id_parsed.regs[2][0]:object_id_parsed.regs[2][1]]
if mission_prefix == self.MISSION_ID_KEPLER:
mission = "Kepler"
elif mission_prefix == self.MISSION_ID_KEPLER_2:
mission = "K2"
elif mission_prefix == self.MISSION_ID_TESS:
mission = "TESS"
else:
raise ValueError("Invalid object id " + object_id)
return mission, mission_prefix, int(id)
| 38.634146
| 91
| 0.674874
| 1,354
| 0.854798
| 0
| 0
| 76
| 0.04798
| 0
| 0
| 130
| 0.082071
|