hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bffadd5ef9ed55259f862e153a071bf79963137d | 5,742 | py | Python | lib/pyfrc/sim/robot_controller.py | prensing/pyfrc | 3b84a9a66ed97ee6531489baee26ce6c8478865b | [
"MIT"
] | null | null | null | lib/pyfrc/sim/robot_controller.py | prensing/pyfrc | 3b84a9a66ed97ee6531489baee26ce6c8478865b | [
"MIT"
] | null | null | null | lib/pyfrc/sim/robot_controller.py | prensing/pyfrc | 3b84a9a66ed97ee6531489baee26ce6c8478865b | [
"MIT"
] | null | null | null |
from hal_impl import mode_helpers
from hal_impl.data import hal_data
import threading
import time
import wpilib
from ..physics.core import PhysicsInterface
from .sim_manager import SimManager
class RobotController:
'''
This manages the active state of the robot
'''
mode_map = {
SimManager.MODE_AUTONOMOUS: "Autonomous",
SimManager.MODE_DISABLED: "Disabled",
SimManager.MODE_OPERATOR_CONTROL: "OperatorControl",
SimManager.MODE_TEST: "Test"
}
def __init__(self, robot_class, robot_path, fake_time, config_obj):
self.mode = SimManager.MODE_DISABLED
self.mode_callback = None
self.robot_class = robot_class
self.fake_time = fake_time
self.physics_controller = PhysicsInterface(robot_path, fake_time, config_obj)
# any data shared with the ui must be protected by
# this since it's running in a different thread
self._lock = threading.RLock()
self.thread = threading.Thread(target=self._robot_thread, name="Robot Thread")
self.thread.daemon = True
self.ds_thread = threading.Thread(target=self._ds_thread, name="Fake DS Thread")
self.ds_thread.daemon = True
def run(self):
self._run_code = True
self.thread.start()
self.ds_thread.start()
def wait_for_robotinit(self):
# Do this so that we don't initialize the UI until robotInit is done
while hal_data['user_program_state'] is None and self.is_alive():
time.sleep(0.025)
def stop(self):
# Since we're using OperatorControl, there isn't a way to kill
# the robot. Just exit and hopefully everything is ok
return True
#with self._lock:
# self._run_code = False
# if the robot code is spinning in any of the modes, then
# we need to change the mode so it returns back to us
# if self.mode == SimManager.MODE_DISABLED:
# self.mode = SimManager.MODE_OPERATOR_CONTROL
# else:
# self.mode = SimManager.MODE_DISABLED
# resume the robot just in case it's hung somewhere
#self.fake_time.resume()
#try:
# self.thread.join(timeout=5.0)
#except RuntimeError:
# return False
#return not self.thread.is_alive()
#
# API used by the ui
#
def has_physics(self):
return self.physics_controller._has_engine()
def is_alive(self):
return self.thread.is_alive()
def on_mode_change(self, callable):
'''When the robot mode changes, call the function with the mode'''
with self._lock:
self.mode_callback = callable
def set_mode(self, mode, game_specific_message=None):
if mode not in [SimManager.MODE_DISABLED,
SimManager.MODE_AUTONOMOUS,
SimManager.MODE_OPERATOR_CONTROL,
SimManager.MODE_TEST]:
raise ValueError("Invalid value for mode: %s" % mode)
with self._lock:
# TODO: need a way to notify the caller that the set failed. Perhaps an exception?
if not self.is_alive():
return
old_mode = self.mode
self.mode = mode
callback = self.mode_callback
# don't call from inside the lock
if old_mode != mode:
if mode == SimManager.MODE_DISABLED:
mode_helpers.set_disabled()
elif mode == SimManager.MODE_AUTONOMOUS:
mode_helpers.set_autonomous(True, game_specific_message=game_specific_message)
elif mode == SimManager.MODE_OPERATOR_CONTROL:
mode_helpers.set_teleop_mode(True)
elif mode == SimManager.MODE_TEST:
mode_helpers.set_test_mode(True)
self.physics_controller._set_robot_enabled(mode != SimManager.MODE_DISABLED)
if callback is not None:
callback(mode)
def get_mode(self):
with self._lock:
return self.mode
def get_position(self):
'''Returns x,y,angle'''
return self.physics_controller.get_position()
def _get_vector(self):
return self.physics_controller._get_vector()
#
# Runs the code
#
def _check_sleep(self, idx):
'''This ensures that the robot code called Wait() at some point'''
# TODO: There are some cases where it would be ok to do this...
if not self.fake_time.slept[idx]:
errstr = '%s() function is not calling wpilib.Timer.delay() in its loop!' % self.mode_map[self.mode]
raise RuntimeError(errstr)
self.fake_time.slept[idx] = False
def _ds_thread(self):
# TODO: This needs to be fixed, breaks things when paused in IterativeRobot
while True:
time.sleep(0.020)
mode_helpers.notify_new_ds_data()
def _robot_thread(self):
# Initialize physics time hook -- must be done on
# robot thread, since it uses a threadlocal variable to work
self.physics_controller.setup_main_thread()
# setup things for the robot
self.driver_station = wpilib.DriverStation.getInstance()
try:
wpilib.RobotBase.main(self.robot_class)
finally:
self.set_mode(SimManager.MODE_DISABLED)
| 32.258427 | 112 | 0.590038 | 5,544 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 1,607 | 0.279868 |
bffb4ca3bb22fbd938fe7554fd918c6bfaaf6614 | 1,053 | py | Python | tests/core/engine/test_hybrid.py | halk/recowise | 3fd7c33182a79b3f7bea2d7ff1f3fd734764f04d | [
"MIT-0"
] | 1 | 2015-08-24T11:25:01.000Z | 2015-08-24T11:25:01.000Z | tests/core/engine/test_hybrid.py | halk/recowise | 3fd7c33182a79b3f7bea2d7ff1f3fd734764f04d | [
"MIT-0"
] | null | null | null | tests/core/engine/test_hybrid.py | halk/recowise | 3fd7c33182a79b3f7bea2d7ff1f3fd734764f04d | [
"MIT-0"
] | null | null | null | import unittest
from core.engine.hybrid import HybridEngine
from core.engine.simple import Engine
from core.taxonomy import Taxonomy
class HybridTestCase(unittest.TestCase):
def setUp(self):
taxonomy = Taxonomy('base', {'key': 'value', 'key2': 'value2'})
component1 = Engine('recommender1', taxonomy, {
'base_url': 'http://localhost'
})
component2 = Engine('recommender2', taxonomy, {
'base_url': 'http://localhost2'
})
components = {'component1': component1, 'component2': component2}
settings = {'test': 'value'}
self.engine = HybridEngine('hybrid', taxonomy, components, settings)
def test_components(self):
components = self.engine.get_components()
self.assertEqual(len(components), 2)
self.assertEqual(components['component1'].name, 'recommender1')
self.assertEqual(components['component2'].name, 'recommender2')
def test_recommend(self):
self.assertRaises(NotImplementedError, self.engine.recommend, {})
| 39 | 76 | 0.661918 | 918 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.203229 |
bfff911df94e2189787b5ca569a10579a5dcb4c2 | 11,611 | py | Python | DeepLearning/DeepLearning/02_Deep_ChoTH/deep_learning_7.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
] | 1 | 2019-06-27T04:05:59.000Z | 2019-06-27T04:05:59.000Z | DeepLearning/DeepLearning/02_Deep_ChoTH/deep_learning_7.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
] | null | null | null | DeepLearning/DeepLearning/02_Deep_ChoTH/deep_learning_7.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
] | null | null | null | # CHAPTER 7 ํฉ์ฑ๊ณฑ ์ ๊ฒฝ๋ง(CNN)
# ์ ์ฒด๊ตฌ์กฐ
# ํฉ์ฑ๊ณฑ๊ณ์ธต๊ณผ ํ๋ง๊ณ์ธต์ด ์ถ๊ฐ๋๋ค.
# ์ง๊ธ๊น์ง ๋ณธ ์ ๊ฒฝ๋ง์ ์ธ์ ํ๋ ๊ณ์ธต์ ๋ชจ๋ ๋ด๋ฐ๊ณผ ๊ฒฐํฉ๋์ด ์๋ค. ์ด๋ฅผ ์์ ์ฐ๊ฒฐ์ด๋ผ๊ณ ํ๋ฉฐ, ์์ ํ ์ฐ๊ฒฐ๋ ๊ณ์ธต์ Affine๊ณ์ธต์ด๋ผ๋ ์ด๋ฆ์ผ๋ก ๊ตฌํํ๋ค.
##########################################
# 2์ฐจ์ ๋ฐฐ์ด ํฉ์ฑ๊ณฑ #@#!@#!$!$!@#@!$%!@#!@#
##########################################
import numpy as np
data = np.array(range(0,81)).reshape(9,9)
filter = np.array(range(0,16)).reshape(4,4)
def find_pad(data, filter, s, oh):
h = len(data)
fh = len(filter)
return (((oh-1)*s)+fh-h) / 2
def padding(data, x):
if x%1 == 0:
x = int(x)
return np.pad(data, pad_width=x, mode='constant', constant_values=0)
else:
x1 = int(x+0.5)
x2 = int(x-0.5)
return np.pad(data, pad_width=((x1,x2), (x1,x2)), mode='constant', constant_values=0)
def output(data, filter):
num = len(data) - len(filter) + 1
result = []
for rn in range(num):
for cn in range(num):
result.append(np.sum(data[rn:rn+len(filter), cn:cn+len(filter)] * filter))
return np.array(result).reshape(num, num)
f_p = find_pad(data, filter, 1, 9) # Straid(s) / ์ถ๋ ฅ๊ฐ(oh)
data = padding(data, f_p)
print('q3\n', output(data, filter))
print('q4\n', output(data, filter) * 3)
############################################
# 3์ฐจ์ ๋ฐฐ์ด ํฉ์ฑ๊ณฑ!@#!@#!@#!@#!@#@!#!
##############################################
import numpy as np
def find_pad(data, filter, s, oh):
h = len(data[0])
fh = len(filter[0])
return (((oh-1)*s)+fh-h) / 2
def padding(data, x):
if x%1 == 0:
x = int(x)
lst = []
for i in range(len(data)):
lst.append(np.pad(data[i], pad_width=x, mode='constant', constant_values=0))
lst = np.array(lst)
return lst
else:
x1 = int(x+0.5)
x2 = int(x-0.5)
lst = []
for i in range(len(data)):
lst.append(np.pad(data[i], pad_width=((x1,x2), (x1,x2)), mode='constant', constant_values=0))
lst = np.array(lst)
return lst
def output(data, filter):
num = len(data[0]) - len(filter[0]) + 1 # ๊ฐ์ฅ ์์ ์ฐจ์์ ์๋ ์
๋ ฅ๊ณผ ํํฐ๊ฐ ๊ฐ๋ค. ํ๊ณผ ์ด์ ์ ์ฌ๊ฐํ ํํ๋ฅผ ์ด๋ฃจ์ง๋ง ๋๊ป๊น์ง ๊ฐ์ ์ ์ก๋ฉด์ฒด๋ ์๋๋ค.
result = []
for i in range(len(data)):
for rn in range(num):
for cn in range(num):
result.append(np.sum(data[i, rn:rn+len(filter[0]), cn:cn+len(filter[0])] * filter[i]))
return np.array(result).reshape(len(data), num, num)
data = np.array([[[1,2,0,0], [0,1,-2,0], [0,0,1,2], [2,0,0,1]], [[1,0,0,0], [0,0,-2,-1], [3,0,1,0], [2,0,0,1]]])
filter = np.array([[[-1,0,3], [2,0,-1], [0,2,1]], [[0,0,0], [2,0,-1], [0,-2,1]]])
f_p = find_pad(data, filter, 1, 3) # Straid(s) / ์ถ๋ ฅ๊ฐ(oh)
data = padding(data, f_p)
print('q\n', output(data, filter))
# ๋ธ๋ก์ผ๋ก ์๊ฐํ๊ธฐ
# 3์ฐจ์์ ํฉ์ฑ๊ณฑ ์ฐ์ฐ์ ๋ฐ์ดํฐ์ ํํฐ๋ฅผ ์ง์ก๋ฉด์ฒด ๋ธ๋ก์ด๋ผ๊ณ ์๊ฐํ๋ฉด ์ฝ๋ค.
# 3์ฐจ์ ๋ฐ์ดํฐ๋ฅผ ๋ค์ฐจ์ ๋ฐฐ์ด๋ก ๋ํ๋ผ ๋๋ (์ฑ๋, ๋์ด, ๋๋น) ์์๋ก ์ด๋ค.
# ์ฑ๋:C, ๋์ด:H, ๋๋น:W // ํํฐ์ฑ๋:C, ํํฐ๋์ด:FH, ํํฐ๋๋น:FW
# ํฉ์ฑ๊ณฑ์์ ์ถ๋ ฅ๋๋ ๋ฐ์ดํฐ๋ ํ์ฅ์ ํน์ง๋งต์ด๋ค.
# ๊ทธ๋ ๋ค๋ฉด ํฉ์ฑ๊ณฑ ์ฐ์ฐ์ ์ถ๋ ฅ์ผ๋ก ๋ค์์ ์ฑ๋์ ๋ด๋ณด๋ด๋ ค๋ฉด ์ด๋ป๊ฒ ํด์ผํ ๊น?
# ๊ทธ ๋ต์ ํํฐ(๊ฐ์ค์น)๋ฅผ ๋ค์ ์ฌ์ฉํ๋ ๊ฒ์ด๋ค.
# ํํฐ๋ฅผ FN๊ฐ ์ ์ฉํ๋ฉด ์ถ๋ ฅ๋งต๊ณ FN๊ฐ๊ฐ ๋๋ค. ๊ทธ๋ฆฌ๊ณ FN๊ฐ์ ๋งต์ ๋ชจ์ผ๋ฉด ํ์์ด (FN, OH, OW)์ธ ๋ธ๋ก์ด ์์ฑ๋๋ค.
# (์ด ์์ฑ๋ ๋ธ๋ก์ ๋ค์ ๊ณ์ธต์ผ๋ก ๋๊ธฐ๊ฒ ๋ค๋ ๊ฒ์ด CNN์ ์ฒ๋ฆฌํ๋ฆ์ด๋ค.)
# ์์ ์์ฒ๋ผ ํฉ์ฑ๊ณฑ ์ฐ์ฐ์์๋ ํํฐ์ ์๋ ๊ณ ๋ คํด์ผํ๋ค. ํํฐ์ ๊ฐ์ค์น ๋ฐ์ดํฐ๋ 4์ฐจ์๋ฐ์ดํฐ์ด๋ฉฐ
# (์ถ๋ ฅ์ฑ๋์, ์
๋ ฅ์ฑ๋์, ๋์ด, ๋๋น) ์์ผ๋ก ์ด๋ค. p.238 ์ฐธ์กฐ!
# ํธํฅ์ ์ฑ๋ ํ๋์ ๊ฐ ํ๋์ฉ์ผ๋ก ๊ตฌ์ฑ๋๋ค. ํ์์ด ๋ค๋ฅด์ง๋ง ๋๋ฌธ์ ๋ํ์ด์ ๋ธ๋ก๋์บ์คํธ ๊ธฐ๋ฅ์ ์ด์ฉํด ์ฝ๊ฒ ๊ตฌํํ ์ ์๋ค.
# ๋ฐฐ์น์ฒ๋ฆฌ
# ํฉ์ฑ๊ณฑ ์ฐ์ฐ๋ ๋ง์ฐฌ๊ฐ์ง๋ก ๋ฐฐ์น์ฒ๋ฆฌ๋ฅผ ํตํด ์ง์ํ๋ ค๊ณ ํ๋ค.
# ์
๋ ฅ๋ฐ์ดํฐ(N,C,H,W) --> ํํฐ(FN,C,FH,FW) --> (N,FN,OH,OW) + ํธํฅ(FN,1,1) --> ์ถ๋ ฅ๋ฐ์ดํฐ(N,FN,OH,OW)
# ์ด์ฒ๋ผ ๋ฐ์ดํฐ๋ 4์ฐจ์ ํ์์ ๊ฐ์ง ์ฑ ๊ฐ ๊ณ์ธต์ ํ๊ณ ํ๋ฅธ๋ค.
# ์ฌ๊ธฐ์ ์ฃผ์ํ ์ ์ ์ ๊ฒฝ๋ง์ 4์ฐจ์ ๋ฐ์ดํฐ๊ฐ ํ๋ ํ๋ฅผ ๋๋ง๋ค ๋ฐ์ดํฐ N๊ฐ์ ๋ํ ํฉ์ฑ๊ณฑ ์ฐ์ฐ์ด ์ด๋ค์ง๋ค๋ ๊ฒ์ด๋ค.
# ์ฆ Nํ๋ถ์ ์ฒ๋ฆฌ๋ฅผ ํ๋ฒ์ ์ํํ๋ค.
# ํ๋ง๊ณ์ธต
# ํ๋ง์ ๊ฐ๋ก, ์ธ๋ก ๋ฐฉํฅ์ ๊ณต๊ฐ์ ์ค์ด๋ ์ฐ์ฐ์ด๋ค. ์ฆ ๊ฐ๋ก์ธ๋ก์ ๊ฐ์ ๋ํํ๋ ๊ฐ์ ๋ฝ๋๋ค.
# p.240 ๊ทธ๋ฆผ์ 2x2 ์ต๋ํ๋ง์ ์คํธ๋ผ์ด๋ 2๋ก ์ฒ๋ฆฌํ๋ ์์์ด๋ค.
# ์ต๋ํ๋ง์ด๋ ์ต๋๊ฐ์ ๊ตฌํ๋ ์ฐ์ฐ์ผ๋ก ์ ํด์ง ์์ญ์์ ์ต๋๊ฐ์ ๊บผ๋ธ๋ค.
# ์ฐธ๊ณ ๋ก, ํ๋ง์ ์๋์ฐ ํฌ๊ธฐ์ ์คํธ๋ผ์ดํธ๋ ๊ฐ์ ๊ฐ์ผ๋ก ์ค์ ํ๋ ๊ฒ์ด ๋ณดํต์ด๋ค. ์ฆ mEcEํ๊ฒ ํ๊ธฐ ์ํด์
# ํ๊ท ํ๋ง๋ ์์ง๋ง ์ด๋ฏธ์ง ์ธ์ ๋ถ์ผ์์๋ ์ฃผ๋ก ์ต๋ํ๋ง์ ์ด์ฉํ๋ค.
# ํ๋ง๊ณ์ธต์ ํน์ง
# 1. ํ์ตํด์ผํ ๋งค๊ฐ๋ณ์๊ฐ ์๋ค.
# ํ๋ง๊ณ์ธต์ ํฉ์ฑ๊ณฑ๊ณ์ธต๊ณผ ๋ฌ๋ฆฌ ํ์ตํด์ผํ ๋งค๊ฐ๋ณ์๊ฐ ์๋ค.
# 2. ์ฑ๋ ์๊ฐ ๋ณํ์ง ์๋๋ค.
# ํ๋ง์ฐ์ฐ์ ์
๋ ฅ๋ฐ์ดํฐ์ ์ฑ๋ ์ ๊ทธ๋๋ก ์ถ๋ ฅ๋ฐ์ดํฐ๋ก ๋ด๋ณด๋ธ๋ค.
# 3. ์
๋ ฅ์ ๋ณํ๋ ์ํฅ์ ์ ๊ฒ ๋ฐ๋๋ค.
# ์
๋ ฅ๋ฐ์ดํฐ๊ฐ ์กฐ๊ธ ๋ณํด๋ ํ๋ง์ ๊ฒฐ๊ณผ๋ ์ ๋ณํ์ง ์๋๋ค.
# ํฉ์ฑ๊ณฑ ํ๋ง๊ณ์ธต ๊ตฌํํ๊ธฐ
# 4์ฐจ์ ๋ฐฐ์ด
# ๋ฐ์ดํฐ์ ํ์์ด (10,1,28,28)์ด๋ผ๋ฉด ์ด๋ ๋์ด 28, ๋๋น28, ์ฑ๋1์ธ ๋ฐ์ดํฐ10๊ฐ๋ผ๋ ๋ป์ด๋ค.
# ์ด๋ฅผ ํ์ด์ฌ์ผ๋ก ๊ตฌํํ๋ฉด ๋ค์๊ณผ ๊ฐ๋ค.
import numpy as np
x = np.random.rand(10, 1, 28, 28)
x.shape
x[0].shape
x[1].shape
x[0,0] # ๋ฐ์ดํฐ์ ์ ๊ทผํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ๋ค.
x[0][0]
###########################
# im2col๋ก ๋ฐ์ดํฐ ์ ๊ฐํ๊ธฐ
###########################
import numpy as np
def im2col(input_data, filter_h, filter_w, stride=1, pad=0):
"""๋ค์์ ์ด๋ฏธ์ง๋ฅผ ์
๋ ฅ๋ฐ์ 2์ฐจ์ ๋ฐฐ์ด๋ก ๋ณํํ๋ค(ํํํ).
Parameters
----------
input_data : 4์ฐจ์ ๋ฐฐ์ด ํํ์ ์
๋ ฅ ๋ฐ์ดํฐ(์ด๋ฏธ์ง ์, ์ฑ๋ ์, ๋์ด, ๋๋น)
filter_h : ํํฐ์ ๋์ด
filter_w : ํํฐ์ ๋๋น
stride : ์คํธ๋ผ์ด๋
pad : ํจ๋ฉ
Returns
-------
col : 2์ฐจ์ ๋ฐฐ์ด
"""
N, C, H, W = input_data.shape
out_h = (H + 2 * pad - filter_h) // stride + 1
out_w = (W + 2 * pad - filter_w) // stride + 1
img = np.pad(input_data, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant')
col = np.zeros((N, C, filter_h, filter_w, out_h, out_w))
for y in range(filter_h):
y_max = y + stride * out_h
for x in range(filter_w):
x_max = x + stride * out_w
col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]
col = col.transpose(0, 4, 5, 1, 2, 3).reshape(N * out_h * out_w, -1)
return col
##################################
##################################
##################################
import sys, os
sys.path.append(os.pardir)
from common.util import im2col
x1 = np.random.rand(1,3,7,7)
col1 = im2col(x1, 5, 5, stride=1, pad=0)
print(col1.shape)
x2 = np.random.rand(10, 3, 7, 7)
col2 = im2col(x2, 5, 5, stride=1, pad=0)
print(col2.shape)
####################################################
################## ํฉ์ฑ๊ณฑ๊ณ์ธต ๊ตฌํ ###################
####################################################
class Convolution: #
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = int(1 + (H + 2*self.pad - FH) / self.stride)
out_w = int(1 + (W + 2*self.pad - FH) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
return
##################################################
################## ํ๋ง๊ณ์ธต ๊ตฌํ ###################
##################################################
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 +(H-self.pool_h) / self.stride)
out_w = int(1 +(W-self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad) # ์ ๊ฐ (1)
col = col.reshape(-1, self.pool_h*self.pool_w)
out = np.max(col, axis=1) # ์ต๋๊ฐ (2)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2) # ์ฑํ (3)
return out
# ํ๋ง๊ณ์ธต ๊ตฌํ์ [๊ทธ๋ฆผ 7-22]์ ๊ฐ์ด ๋ค์์ ์ธ ๋จ๊ณ๋ก ์งํํฉ๋๋ค.
# 1. ์
๋ ฅ๋ฐ์ดํฐ๋ฅผ ์ ๊ฐํ๋ค.
# 2. ํ ๋ณ ์ต๋๊ฐ์ ๊ตฌํ๋ค.
# 3. ์ ์ ํ ๋ชจ์์ผ๋ก ์ฑํํ๋ค.
# ์์ ์ฝ๋์์์ ๊ฐ์ด ๊ฐ ๋จ๊ณ๋ ํ ๋์ค ์ ๋๋ก ๊ฐ๋จํ ๊ตฌํ๋ฉ๋๋ค.
# CNN ๊ตฌํํ๊ธฐ
# ํฉ์ฑ๊ณฑ ๊ณ์ธต๊ณผ ํ๋ง๊ณ์ธต์ ์กฐํฉํ์ฌ ์๊ธ์จ ์ซ์๋ฅผ ์ธ์ํ๋ CNN์ ์กฐ๋ฆฝํ ์ ์๋ค.
# ๋จ์ CNN์ ๋คํธ์ํฌ ๊ตฌ์ฑ
# conv -> relu -> pooling -> affine -> relu -> affine -> softmax ->
# ์์ ์์๋ก ํ๋ฅด๋ CNN ์ ๊ฒฝ๋ง ๊ตฌํ
# ์ด๊ธฐํ ๋ ๋ฐ๋ ์ธ์
# input_dim - ์
๋ ฅ๋ฐ์ดํฐ(์ฑ๋ ์, ๋์ด, ๋๋น)์ ์ฐจ์
# conv_param - ํฉ์ฑ๊ณฑ๊ณ์ธต์ ํ์ดํผํ๋ผ๋ฏธํฐ(๋์
๋๋ฆฌ). ๋์
๋๋ฆฌ์ ํค๋ ๋ค์๊ณผ ๊ฐ๋ค.
# filter_num - ํํฐ ์
# filter_size - ํํฐํฌ๊ธฐ
# stride - ์คํธ๋ผ์ด๋
# pad - ํจ๋ฉ
# hidden_size - ์๋์ธต(์์ ์ฐ๊ฒฐ)์ ๋ด๋ฐ์
# output_size - ์ถ๋ ฅ์ธต(์์ ์ฐ๊ฒฐ)์ ๋ด๋ฐ์
# weight_init_std - ์ด๊ธฐํ ๋์ ๊ฐ์ค์น ํ์คํธ์ฐจ
# ์ฌ๊ธฐ์ ํฉ์ฑ๊ณฑ ๊ณ์ธต์ ๋งค๊ฐ๋ณ์๋ ๋์
๋๋ฆฌ ํํ๋ก ์ฃผ์ด์ง๋ค.(conv_param)
# ์๋ฅผ ๋ค์ด {'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1}์ฒ๋ผ ์ ์ฅ๋๋ค.
class SimpleConvNet: # CNN ์ด๊ธฐํ
def __init__(self, input_dim=(1,28,28), conv_param={'filter_num':30, 'filter_size':5
'pad':0, 'stride':1},
hidden_size=100, output_size=10, weight_init_std=0.01):
filter_num = conv_param['filter_num'] # ์ด๊ธฐํ ์ธ์๋ก ์ฃผ์ด์ง ํฉ์ฑ๊ณฑ ๊ณ์ธต์ ํ์ดํผํ๋ผ๋ฏธํฐ๋ฅผ ๋์
๋๋ฆฌ์์ ๊บผ๋ธ๋ค.
filter_size = conv_param['filter_size']
filter_pad = conv_param['pad']
filter_stride = conv_param['stride']
input_size = input_dim[1]
conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter + 1
pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2)) # ํฉ์ฑ๊ณฑ ๊ณ์ธต์ ์ถ๋ ฅํฌ๊ธฐ๋ฅผ ๊ณ์ฐํ๋ค.
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
self.params['b1'] = np.zeros(filter_num)
self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
self.params['b2'] = np.zeros(hidden_size)
self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b3'] = np.zeros(output_size)
self.layers = OrderedDict() # ์์๊ฐ ์๋ ๋์
๋๋ฆฌ -> layers์ ๊ณ์ธต๋ค์ ์ฐจ๋ก๋๋ก ์ถ๊ฐ
self.layers['Conv1'] = Convolution(self.params['W1'],
self.params['b1'],
conv_param['stride'],
conv_param['pad'])
self.layers['Relu1'] = Relu()
self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
self.layers['Affine1'] = Affine(self.params['W2'],
self.params['b2'])
self.layers['Relu2'] = Relu()
self.layers['Affine2'] = Affine(self.params['W3'],
self.params['b3'])
self.last_layers = SoftmaxWithLoss() # ๋ง์ง๋ง๊ณ์ธต์ ๋ฐ๋ก ์ ์ฅ
def predict(self, x):
for layer in layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.last_layer.forward(y, t)
def gradient(self, x, t): # ๋งค๊ฐ๋ณ์์ ๊ธฐ์ธ๊ธฐ๋ ์ค์ฐจ์ญ์ ํ๋ก ๊ตฌํ๋ค. ์ด ๊ณผ์ ์ ์์ ํ์ ์ญ์ ํ๋ฅผ ๋ฐ๋ณตํ๋ค.
self.loss(x, t) # ์์ ํ
dout = 1 # ์ญ์ ํ
dout = self.last_layer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads = {}
grads['W1'] = self.layers['Conv1'].dW
grads['b1'] = self.layers['Conv1'].dW
grads['W2'] = self.layers['Affine1'].dW
grads['b2'] = self.layers['Affine1'].dW
grads['W3'] = self.layers['Affine2'].dW
grads['b3'] = self.layers['Affine2'].dW
return grads
# CNN ์๊ฐํํ๊ธฐ
# CNN์ ๊ตฌ์ฑํ๋ ํฉ์ฑ๊ณฑ๊ณ์ธต์ ์
๋ ฅ์ผ๋ก ๋ฐ์ ์ด๋ฏธ์ง์์ ๋ณด๊ณ ์๋ ๊ฒ์ด ๋ฌด์์ธ์ง ์์๋ณด๋๋ก ํ์!
# 1๋ฒ์งธ ์ธต์ ๊ฐ์ค์น ์๊ฐํํ๊ธฐ
# 1๋ฒ์งธ ์ธต์ ํฉ์ฑ๊ณฑ ๊ณ์ธต์ ๊ฐ์ค์น๋ (30, 1, 5, 5)์ด๋ค. - ํํฐ30๊ฐ, ์ฑ๋1๊ฐ, 5X5 ํฌ๊ธฐ - ํ์์กฐํํฐ!
# ํ์ต์ ๋ง์น ํํฐ๋ ๊ท์น์ฑ ์๋ ์ด๋ฏธ์ง๊ฐ ๋๋ค.
# ์ธต ๊น์ด์ ๋ฐ๋ฅธ ์ถ์ถ์ ๋ณด ๋ณํ
# ๊ณ์ธต์ด ๊น์ด์ง ์๋ก ์ถ์ถ๋๋ ์ ๋ณด(์ ํํ๋ ๊ฐํ๊ฒ ๋ฐ์ํ๋ ๋ด๋ฐ)๋ ๋ ์ถ์ํ ๋๋ค.
# ์ธต์ด ๊น์ด์ง๋ฉด์ ๋ ๋ณต์กํ๊ณ ์ถ์ํ๋ ์ ๋ณด๊ฐ ์ถ์ถ๋๋ค. ์ฒ์์ธต์ ๋จ์ํ ์์ง์ ๋ฐ์ํ๊ณ ์ด์ด์ ํ
์ค์ณ์ ๋ฐ์ํ๋ค.
# ์ธต์ด ๊น์ด์ง๋ฉด์ ๋ด๋ฐ์ด ๋ฐ์ํ๋ ๋์์ด ๋จ์ํ ๋ชจ์์์ '๊ณ ๊ธ'์ ๋ณด๋ก ๋ณํํด๊ฐ๋ค.
# ๋ํ์ ์ธ CNN
# LeNet@@@@์ ์๊ธ์จ ์ซ์๋ฅผ ์ธ์ํ๋ ๋คํธ์ํฌ๋ก 1998๋
์ ์ ์๋์๋ค.
# ํฉ์ฑ๊ณฑ๊ณ์ธต๊ณผ ํ๋ง ๊ณ์ธต(์ ํํ๋ ์์๋ฅผ ์ค์ด๊ธฐ๋ง ํ๋ ์๋ธ์ํ๋ง)์ ๋ฐ๋ณตํ๊ณ , ๋ง์ง๋ง์ผ๋ก ์์ ์ฐ๊ฒฐ ๊ณ์ธต์ ๊ฑฐ์น๋ฉด์ ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํ๋ค.
# LeNet๊ณผ 'ํ์ฌ์ CNN'์ ๋น๊ตํ๋ฉด ๋ช๊ฐ์ง ์ฐจ์ด๊ฐ ์๋ค.
# 1. ํ์ฑํํจ์์ ์ฐจ์ด - ๋ฅด๋ท์ ์๊ทธ๋ชจ์ด๋, ํ์ฌ๋ ๋ ๋ฃจ
# 2. ๋ฅด๋ท์ ์๋ธ์ํ๋ง์ ํ์ฌ ์ค๊ฐ ๋ฐ์ดํฐ์ ํฌ๊ธฐ๊ฐ ๋ฌ๋ผ์ง์ง๋ง ํ์ฌ๋ ์ต๋ํ๋ง์ด ์ฃผ๋ฅ์ด๋ค.
# AlexNet์ ๋ฅ๋ฌ๋ ์ดํ์ ์ผ์ผํค๋ ๋ฐ ํฐ ์ญํ ์ ํ๋ค.
# AlexNet์ ํฉ์ฑ๊ณฑ๊ณ์ธต๊ณผ ํ๋ง๊ณ์ธต์ ๊ฑฐ๋ญํ๋ฉฐ ๋ง์ง๋ง์ผ๋ก ์์ ์ฐ๊ฒฐ ๊ฒ์ธต์ ๊ฑฐ์ณ ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํ๋ค.
# AlexNet์ ํ์ฑํํจ์๋ก ๋ ๋ฃจ๋ฅผ ์ด์ฉํ๋ค.
# LRN์ด๋ผ๋ ๊ตญ์์ ์ ๊ทํ๋ฅผ ์ค์ํ๋ ๊ณ์ธต์ ์ด์ฉํ๋ค.
# ๋๋กญ์์์ ์ฌ์ฉํ๋ค.
# ์ ๋ฆฌ
# CNN์ ์ง๊ธ๊น์ง์ ์์ ์ฐ๊ฒฐ๊ณ์ธต ๋คํธ์ํฌ์ ํฉ์ฑ๊ณฑ ๊ณ์ธต๊ณผ ํ๋ง๊ณ์ธต์ ์๋ก ์ถ๊ฐํ๋ค.
# ํฉ์ฑ๊ณฑ๊ณ์ธต๊ณผ ํ๋ง๊ณ์ธต์ im2col์ ์ด์ฉํ๋ฉด ๊ฐ๋จํ๊ณ ํจ์จ์ ์ผ๋ก ๊ตฌํํ ์ ์๋ค.
# CNN์ ์๊ฐํํด๋ณด๋ฉด ๊ณ์ธต์ด ๊น์ด์ง ์๋ก ๊ณ ๊ธ์ ๋ณด๊ฐ ์ถ์ถ๋๋ ๋ชจ์ต์ ํ์ธํ ์ ์๋ค.
# ๋ํ์ ์ธ CNN ์๋ ๋ฅด๋ท๊ณผ ์๋ ์ค๋ท์ด ์๋ค.
# ๋ฅ๋ฌ๋์ ๋ฐ์ ์๋ ๋น
๋ฐ์ดํฐ์ GPU๊ฐ ๊ณตํํ๋ค.
| 35.39939 | 113 | 0.551977 | 1,439 | 0.09122 | 0 | 0 | 0 | 0 | 0 | 0 | 8,887 | 0.56336 |
8701baa38dbace3db22665ae3165e5ae00163f86 | 1,303 | py | Python | handlers/userProvider.py | saraheisa/RishaCar | 5e74b4996f956944fdf6733fc13efa0cc37cd7e1 | [
"MIT"
] | 1 | 2020-01-02T09:39:55.000Z | 2020-01-02T09:39:55.000Z | handlers/userProvider.py | prosarahgamal/RishaCar | 5e74b4996f956944fdf6733fc13efa0cc37cd7e1 | [
"MIT"
] | 3 | 2021-02-08T20:53:43.000Z | 2021-06-02T00:13:47.000Z | handlers/userProvider.py | prosarahgamal/RishaCar | 5e74b4996f956944fdf6733fc13efa0cc37cd7e1 | [
"MIT"
] | null | null | null | import json
import logging
from bson import json_util
from handlers.base import BaseHandler
from lib.DBConnection import DriveFunctions
from lib.DBConnection import UserFunctions
logger = logging.getLogger('rishacar.' + __name__)
class UserProviderHandler(BaseHandler):
async def post(self):
if self.request.body:
data = json.loads(self.request.body)
if 'email' in data:
if await self.email_exists(data['email']):
userFunc = UserFunctions()
user = await userFunc.getUser(email=data['email'])
if user:
res = {"isExist":True, "provider":user['provider']}
self.set_status(200)
self.write(res)
self.finish()
else:
self.set_status(500)
self.write({"message":"database error"})
self.finish()
else:
res = {"isExist":False}
self.set_status(200)
self.write(res)
self.finish()
else:
self.set_status(400)
self.write({"message":"missing email"})
self.finish()
else:
self.set_status(400)
self.write({"message":"missing data"})
self.finish()
async def email_exists(self, email):
userFunc = UserFunctions()
return await userFunc.getEmail(email)
| 28.326087 | 63 | 0.606293 | 1,069 | 0.820414 | 0 | 0 | 0 | 0 | 1,014 | 0.778204 | 142 | 0.108979 |
8701d9777abb9867105564e069f85e55ea2668a5 | 806 | py | Python | project/settings/general/databases.py | danielbraga/hcap | a3ca0d6963cff19ed6ec0436cce84e2b41615454 | [
"MIT"
] | null | null | null | project/settings/general/databases.py | danielbraga/hcap | a3ca0d6963cff19ed6ec0436cce84e2b41615454 | [
"MIT"
] | null | null | null | project/settings/general/databases.py | danielbraga/hcap | a3ca0d6963cff19ed6ec0436cce84e2b41615454 | [
"MIT"
] | null | null | null | """
django:
https://docs.djangoproject.com/en/3.0/ref/settings/#databases
"""
from ..env import env
from .paths import SQLITE_PATH
DATABASE_TYPE = env("HCAP__DATABASE_TYPE", default="sqlite")
if DATABASE_TYPE == "sqlite":
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": str(SQLITE_PATH)}}
elif DATABASE_TYPE == "postgresql":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": env("HCAP__POSTGRES_DB", default="hcap"),
"USER": env("HCAP__POSTGRES_USER", default="pydemic"),
"PASSWORD": env("HCAP__POSTGRES_PASSWORD", default="pydemic"),
"HOST": env("HCAP__POSTGRES_HOST", default="postgres"),
"PORT": env("HCAP__POSTGRES_PORT", default=5432),
}
}
| 32.24 | 95 | 0.626551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.501241 |
8703dc62fa5bebbb67aa2d61d8d7ec816c00964c | 16,217 | py | Python | wbia_cnn/netrun.py | WildMeOrg/wbia-plugin-cnn | c31ff09e77b731be4dffc348a1c40303e8c05994 | [
"Apache-2.0"
] | null | null | null | wbia_cnn/netrun.py | WildMeOrg/wbia-plugin-cnn | c31ff09e77b731be4dffc348a1c40303e8c05994 | [
"Apache-2.0"
] | null | null | null | wbia_cnn/netrun.py | WildMeOrg/wbia-plugin-cnn | c31ff09e77b731be4dffc348a1c40303e8c05994 | [
"Apache-2.0"
] | 1 | 2021-05-27T15:33:26.000Z | 2021-05-27T15:33:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
FIXME:
sometimes you have to chown -R user:user ~/.theano or run with sudo the
first time after roboot, otherwise you get errors
CommandLineHelp:
python -m wbia_cnn --tf netrun <networkmodel>
--dataset, --ds = <dstag>:<subtag>
dstag is the main dataset name (eg PZ_MTEST), subtag are parameters to
modify (max_examples=3)
--weights, -w = \|new\|<checkpoint_tag>\|<dstag>:<checkpoint_tag> (default: <checkpoint_tag>)
new will initialize clean weights.
a checkpoint tag will try to to match a saved model state in the history.
can load weights from an external dataset.
<checkpoint_tag> defaults to current
--arch, -a = <archtag>
model architecture tag (eg siaml2_128, siam2stream, viewpoint)
--device = <processor>
sets theano device flag to a processor like gpu0, gpu1, or cpu0
"""
import logging
from wbia_cnn import models
from wbia_cnn import ingest_data
from wbia_cnn import experiments
import utool as ut
import sys
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger()
# This is more of a history tag
CHECKPOINT_TAG_ALIAS = {
'current': None,
'': None,
}
# second level of alias indirection
# This is more of a dataset tag
DS_TAG_ALIAS2 = {
'flankhack': "dict(acfg_name='ctrl:pername=None,excluderef=False,contributor_contains=FlankHack', colorspace='gray', db='PZ_Master1')",
'pzmtest-bgr': "PZ_MTEST;dict(colorspace='bgr', controlled=True, max_examples=None, num_top=None)", # NOQA
'pzmtest': "PZ_MTEST;dict(colorspace='gray', controlled=True, max_examples=None, num_top=None)", # NOQA
'gz-gray': "GZ_ALL;dict(colorspace='gray', controlled=False, max_examples=None, num_top=None)", # NOQA
'liberty': "liberty;dict(detector='dog', pairs=250000)",
'combo': 'combo_vdsujffw',
'timectrl_pzmaster1': "PZ_Master1;dict(acfg_name='timectrl', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm2': "PZ_Master1;dict(acfg_name='timectrl:pername=None', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm3': "PZ_Master1;dict(acfg_name=None, colorspace='gray', controlled=True, min_featweight=0.8)",
#'pzm3' : "PZ_Master1;dict(acfg_name='default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm4': "PZ_Master1;dict(acfg_name='default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok', colorspace='gray', min_featweight=0.8)",
}
def netrun():
r"""
CommandLine:
# --- UTILITY
python -m wbia_cnn --tf get_juction_dpath --show
# --- DATASET BUILDING ---
# Build Dataset Aliases
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl --ensuredata
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=category
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=siam-patch
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --ensuredata --show --datatype=siam-part
# Parts based datasets
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --datatype=siam-part --ensuredata --show
# Patch based dataset (big one)
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --ensuredata --show --vtd
python -m wbia_cnn --tf netrun --ds pzm4 --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --test
python -m wbia_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --veryverbose --no-flask
# --- TRAINING ---
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --train --weights=new --arch=siaml2_128 --monitor # NOQA
python -m wbia_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False --train --weights=new --arch=siaml2_128 --monitor # NOQA
python -m wbia_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor --DEBUG_AUGMENTATION
python -m wbia_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds flankhack --weights=new --arch=siaml2_partmatch --train --monitor --learning_rate=.00001
python -m wbia_cnn --tf netrun --ds flankhack --weights=new --arch=siam_deepfaceish --train --monitor --learning_rate=.00001
# Different ways to train mnist
python -m wbia_cnn --tf netrun --db mnist --weights=new --arch=mnist_siaml2 --train --monitor --datatype=siam-patch
python -m wbia_cnn --tf netrun --db mnist --weights=new --arch=mnist-category --train --monitor --datatype=category
# --- INITIALIZED-TRAINING ---
python -m wbia_cnn --tf netrun --ds pzmtest --arch=siaml2_128 --weights=gz-gray:current --train --monitor
# --- TESTING ---
python -m wbia_cnn --tf netrun --db liberty --weights=liberty:current --arch=siaml2_128 --test
python -m wbia_cnn --tf netrun --db PZ_Master0 --weights=combo:current --arch=siaml2_128 --testall
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.netrun import * # NOQA
>>> netrun()
>>> ut.show_if_requested()
"""
ut.colorprint('[netrun] NET RUN', 'red')
requests, hyperparams, tags = parse_args()
ds_tag = tags['ds_tag']
datatype = tags['datatype']
extern_ds_tag = tags['extern_ds_tag']
arch_tag = tags['arch_tag']
checkpoint_tag = tags['checkpoint_tag']
# ----------------------------
# Choose the main dataset
ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
dataset = ingest_data.grab_dataset(ds_tag, datatype)
if extern_ds_tag is not None:
extern_dpath = ingest_data.get_extern_training_dpath(extern_ds_tag)
else:
extern_dpath = None
logger.info('dataset.training_dpath = %r' % (dataset.training_dpath,))
logger.info('Dataset Alias Key: %r' % (dataset.alias_key,))
logger.info(
'Current Dataset Tag: %r'
% (ut.invert_dict(DS_TAG_ALIAS2).get(dataset.alias_key, None),)
)
if requests['ensuredata']:
# Print alias key that maps to this particular dataset
if ut.show_was_requested():
interact_ = dataset.interact() # NOQA
return
logger.info('...exiting')
sys.exit(1)
# ----------------------------
# Choose model architecture
# TODO: data will need to return info about number of labels in viewpoint models
# Specify model archichitecture
ut.colorprint('[netrun] Architecture Specification', 'yellow')
if arch_tag == 'siam2stream':
model = models.SiameseCenterSurroundModel(
data_shape=dataset.data_shape,
training_dpath=dataset.training_dpath,
**hyperparams
)
elif arch_tag.startswith('siam'):
model = models.SiameseL2(
data_shape=dataset.data_shape,
arch_tag=arch_tag,
training_dpath=dataset.training_dpath,
**hyperparams
)
elif arch_tag == 'mnist-category':
model = models.MNISTModel(
data_shape=dataset.data_shape,
output_dims=dataset.output_dims,
arch_tag=arch_tag,
training_dpath=dataset.training_dpath,
**hyperparams
)
pass
else:
raise ValueError('Unknown arch_tag=%r' % (arch_tag,))
ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
model.init_arch()
# ----------------------------
# Choose weight initialization
ut.colorprint('[netrun] Setting weights', 'yellow')
if checkpoint_tag == 'new':
ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
model.reinit_weights()
else:
checkpoint_tag = model.resolve_fuzzy_checkpoint_pattern(
checkpoint_tag, extern_dpath
)
ut.colorprint(
'[netrun] * Resolving weights checkpoint_tag=%r' % (checkpoint_tag,),
'lightgray',
)
if extern_dpath is not None:
model.load_extern_weights(dpath=extern_dpath, checkpoint_tag=checkpoint_tag)
elif model.has_saved_state(checkpoint_tag=checkpoint_tag):
model.load_model_state(checkpoint_tag=checkpoint_tag)
else:
model_state_fpath = model.get_model_state_fpath(checkpoint_tag=checkpoint_tag)
logger.info('model_state_fpath = %r' % (model_state_fpath,))
ut.checkpath(model_state_fpath, verbose=True)
logger.info(
'Known checkpoints are: ' + ut.repr3(model.list_saved_checkpoints())
)
raise ValueError(
('Unresolved weight init: ' 'checkpoint_tag=%r, extern_ds_tag=%r')
% (
checkpoint_tag,
extern_ds_tag,
)
)
# logger.info('Model State:')
# logger.info(model.get_state_str())
# ----------------------------
if not model.is_train_state_initialized():
ut.colorprint('[netrun] Need to initialize training state', 'yellow')
X_train, y_train = dataset.subset('train')
model.ensure_data_params(X_train, y_train)
# Run Actions
if requests['train']:
ut.colorprint('[netrun] Training Requested', 'yellow')
# parse training arguments
config = ut.argparse_dict(
dict(
era_size=15,
max_epochs=1200,
rate_decay=0.8,
)
)
model.monitor_config.update(**config)
X_train, y_train = dataset.subset('train')
X_valid, y_valid = dataset.subset('valid')
model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)
elif requests['test']:
# assert model.best_results['epoch'] is not None
ut.colorprint('[netrun] Test Requested', 'yellow')
if requests['testall']:
ut.colorprint('[netrun] * Testing on all data', 'lightgray')
X_test, y_test = dataset.subset('all')
flat_metadata = dataset.subset_metadata('all')
else:
ut.colorprint('[netrun] * Testing on test subset', 'lightgray')
X_test, y_test = dataset.subset('test')
flat_metadata = dataset.subset_metadata('test')
data, labels = X_test, y_test
dataname = dataset.alias_key
experiments.test_siamese_performance(model, data, labels, flat_metadata, dataname)
else:
if not ut.get_argflag('--cmd'):
raise ValueError('nothing here. need to train or test')
if requests['publish']:
ut.colorprint('[netrun] Publish Requested', 'yellow')
publish_dpath = ut.truepath('~/Dropbox/IBEIS')
published_model_state = ut.unixjoin(
publish_dpath, model.arch_tag + '_model_state.pkl'
)
ut.copy(model.get_model_state_fpath(), published_model_state)
ut.view_directory(publish_dpath)
logger.info(
'You need to get the dropbox link and '
'register it into the appropriate file'
)
# pip install dropbox
# https://www.dropbox.com/developers/core/start/python
# import dropbox # need oauth
# client.share('/myfile.txt', short_url=False)
# https://wildbookiarepository.azureedge.net/models/siaml2_128_model_state.pkl
if ut.get_argflag('--cmd'):
ut.embed()
def parse_args():
ds_default = None
arch_default = 'siaml2_128'
weights_tag_default = None
# Test values
if False:
ds_default = 'liberty'
weights_tag_default = 'current'
assert ut.inIPython()
# Parse commandline args
ds_tag = ut.get_argval(('--dataset', '--ds'), type_=str, default=ds_default)
datatype = ut.get_argval(('--datatype', '--dt'), type_=str, default='siam-patch')
arch_tag = ut.get_argval(('--arch', '-a'), default=arch_default)
weights_tag = ut.get_argval(
('--weights', '+w'), type_=str, default=weights_tag_default
)
# Incorporate new config stuff?
# NEW = False
# if NEW:
# default_dstag_cfg = {
# 'ds': 'PZ_MTEST',
# 'mode': 'patches',
# 'arch': arch_default
# }
# named_defaults_dict = {
# '': default_dstag_cfg
# }
# ut.parse_argv_cfg('dstag', named_defaults_dict=named_defaults_dict)
hyperparams = ut.argparse_dict(
{
#'batch_size': 128,
'batch_size': 256,
#'learning_rate': .0005,
'learning_rate': 0.1,
'momentum': 0.9,
#'weight_decay': 0.0005,
'weight_decay': 0.0001,
},
alias_dict={
'weight_decay': ['decay'],
'learning_rate': ['learn_rate'],
},
)
requests = ut.argparse_dict(
{
'train': False,
'test': False,
'testall': False,
'publish': False,
'ensuredata': False,
}
)
requests['test'] = requests['test'] or requests['testall']
# breakup weights tag into extern_ds and checkpoint
if weights_tag is not None and ':' in weights_tag:
extern_ds_tag, checkpoint_tag = weights_tag.split(':')
else:
extern_ds_tag = None
checkpoint_tag = weights_tag
# resolve aliases
ds_tag = DS_TAG_ALIAS2.get(ds_tag, ds_tag)
extern_ds_tag = DS_TAG_ALIAS2.get(extern_ds_tag, extern_ds_tag)
checkpoint_tag = CHECKPOINT_TAG_ALIAS.get(checkpoint_tag, checkpoint_tag)
tags = {
'ds_tag': ds_tag,
'extern_ds_tag': extern_ds_tag,
'checkpoint_tag': checkpoint_tag,
'arch_tag': arch_tag,
'datatype': datatype,
}
ut.colorprint('[netrun] * ds_tag=%r' % (ds_tag,), 'lightgray')
ut.colorprint('[netrun] * arch_tag=%r' % (arch_tag,), 'lightgray')
ut.colorprint('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,), 'lightgray')
ut.colorprint('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray')
return requests, hyperparams, tags
def merge_ds_tags(ds_alias_list):
r"""
CommandLine:
python -m wbia_cnn --tf merge_ds_tags --alias-list gz-gray girm pzmtest nnp
TODO:
http://stackoverflow.com/questions/18492273/combining-hdf5-files
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.netrun import * # NOQA
>>> ds_alias_list = ut.get_argval('--alias-list', type_=list, default=[])
>>> result = merge_ds_tags(ds_alias_list)
>>> print(result)
"""
ds_tag_list = [DS_TAG_ALIAS2.get(ds_tag, ds_tag) for ds_tag in ds_alias_list]
dataset_list = [ingest_data.grab_siam_dataset(ds_tag) for ds_tag in ds_tag_list]
merged_dataset = ingest_data.merge_datasets(dataset_list)
logger.info(merged_dataset.alias_key)
return merged_dataset
if __name__ == '__main__':
"""
CommandLine:
python -m wbia_cnn.netrun
python -m wbia_cnn.netrun --allexamples
python -m wbia_cnn.netrun --allexamples --noface --nosrc
"""
# train_pz()
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
# import warnings
# with warnings.catch_warnings():
# # Cause all warnings to always be triggered.
# warnings.filterwarnings("error", ".*get_all_non_bias_params.*")
ut.doctest_funcs()
| 40.441397 | 194 | 0.635444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,249 | 0.570327 |
8703e2d3131fbbf507719f582a432f8703d7572d | 537 | py | Python | exerciciosPython/Mundo1GB/ex035.py | gutembergdomingos13/ExerciciosPhyton | 67e046d3ed91d1c10d8227bc5a89735ed6a0abff | [
"MIT"
] | null | null | null | exerciciosPython/Mundo1GB/ex035.py | gutembergdomingos13/ExerciciosPhyton | 67e046d3ed91d1c10d8227bc5a89735ed6a0abff | [
"MIT"
] | null | null | null | exerciciosPython/Mundo1GB/ex035.py | gutembergdomingos13/ExerciciosPhyton | 67e046d3ed91d1c10d8227bc5a89735ed6a0abff | [
"MIT"
] | null | null | null | # Desenvolva um programa que leia o comprimento de trรชs retas
# e diga ao usuรกrio se elas podem ou nรฃo formar um triรขngulo.
print("-=-" * 15)
print("Vamos analisar um triรขngulo...")
print('-=-' * 15)
r1 = float(input('Informe o primeiro segmento: '))
r2 = float(input('Informe o segundo seguimento: '))
r3 = float(input('Informe o terceiro seguimento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os seguimentos a cima podem formar um triangulo!')
else:
print('Os seguimentos nรฃo potem formar um triangulo!')
| 33.5625 | 61 | 0.687151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.668508 |
870471c2d2cfa3694cd35571e655bc89ffdfcb61 | 1,032 | py | Python | python/test/test_super_multiple_order_api.py | ashwinkp/ksapi | c348765cefb4d51fd90febcbfa9ff890b67bdc7d | [
"Apache-2.0"
] | 7 | 2022-02-05T16:20:37.000Z | 2022-02-27T16:48:28.000Z | python/test/test_super_multiple_order_api.py | ashwinkp/ksapi | c348765cefb4d51fd90febcbfa9ff890b67bdc7d | [
"Apache-2.0"
] | 19 | 2022-02-03T12:40:08.000Z | 2022-03-30T09:12:46.000Z | python/test/test_super_multiple_order_api.py | ashwinkp/ksapi | c348765cefb4d51fd90febcbfa9ff890b67bdc7d | [
"Apache-2.0"
] | 12 | 2021-12-23T06:14:21.000Z | 2022-03-28T07:47:19.000Z | # coding: utf-8
from __future__ import absolute_import
import unittest
import ks_api_client
from ks_api_client.api.super_multiple_order_api import SuperMultipleOrderApi # noqa: E501
from ks_api_client.rest import ApiException
class TestSuperMultipleOrderApi(unittest.TestCase):
"""SuperMultipleOrderApi unit test stubs"""
def setUp(self):
self.api = ks_api_client.api.super_multiple_order_api.SuperMultipleOrderApi() # noqa: E501
def tearDown(self):
pass
def test_cancel_sm_order(self):
"""Test case for cancel_sm_order
Cancel an Super Multiple order # noqa: E501
"""
pass
def test_modify_sm_order(self):
"""Test case for modify_sm_order
Modify an existing super multiple order # noqa: E501
"""
pass
def test_place_new_sm_order(self):
"""Test case for place_new_sm_order
Place a New Super Multiple order # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.5 | 99 | 0.679264 | 747 | 0.723837 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.387597 |
8705be63c63c4c6cc5d6f7cc5c1159e4435dd4ec | 956 | py | Python | examples/img-classifier/main.py | robianmcd/keras-mri | dd8619ca848cb64555fbd7aca5b7aa1941cdc08b | [
"MIT"
] | 12 | 2019-04-18T13:32:48.000Z | 2020-06-19T13:45:34.000Z | examples/img-classifier/main.py | robianmcd/keras-mri | dd8619ca848cb64555fbd7aca5b7aa1941cdc08b | [
"MIT"
] | 1 | 2019-06-20T03:44:07.000Z | 2019-06-21T14:35:44.000Z | examples/img-classifier/main.py | robianmcd/keras-mri | dd8619ca848cb64555fbd7aca5b7aa1941cdc08b | [
"MIT"
] | 3 | 2019-04-18T19:36:52.000Z | 2020-01-30T22:51:02.000Z | import numpy as np
import os
import os.path as path
from keras.applications import vgg16, inception_v3, resnet50, mobilenet
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import kmri
base_path = path.dirname(path.realpath(__file__))
img_path = path.join(base_path, 'img')
## Load the VGG model
# model = vgg16.VGG16(weights='imagenet')
# normalize_pixels = True
## Load the MobileNet model
# model = mobilenet.MobileNet(weights='imagenet')
# normalize_pixels = True
## Load the ResNet50 model
model = resnet50.ResNet50(weights='imagenet')
normalize_pixels = False
def get_img(file_name):
image = load_img(path.join(img_path, file_name), target_size=(224, 224))
if normalize_pixels:
return img_to_array(image) / 256
else:
return img_to_array(image)
img_input = np.array([get_img(file_name) for file_name in os.listdir(img_path)])
kmri.visualize_model(model, img_input)
| 26.555556 | 80 | 0.761506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.23954 |
8707f87d551c2ced2234e8a164a65f88799aa23a | 1,483 | py | Python | tests/test_voting_phase.py | SeanAmmirati/secrethitlergame | a7266593a3d0df36dbca4f4b476ab8604e780382 | [
"MIT"
] | null | null | null | tests/test_voting_phase.py | SeanAmmirati/secrethitlergame | a7266593a3d0df36dbca4f4b476ab8604e780382 | [
"MIT"
] | null | null | null | tests/test_voting_phase.py | SeanAmmirati/secrethitlergame | a7266593a3d0df36dbca4f4b476ab8604e780382 | [
"MIT"
] | null | null | null | import pytest
from secrethitlergame.phase import Phase
from unittest import mock
from secrethitlergame.voting_phase import VotingPhase
def test_initialization():
vp = VotingPhase()
assert isinstance(vp, Phase)
assert vp.chancelor is None
assert vp.president is None
def test_get_previous_government():
player = mock.Mock()
player.person.return_value = 'John Doe'
vp_old = VotingPhase()
vp_old.chancelor = player
vp_old.president = player
ret = vp_old.get_previous_government()
assert all(x is None for x in ret)
assert len(ret) == 2
vp = VotingPhase(previous_phase=vp_old)
ret = vp.get_previous_government()
assert all(x is not None for x in ret)
assert len(ret) == 2
assert ret[0] == player
def test_add_chancelor():
player = mock.Mock()
player2 = mock.Mock()
vp_old = VotingPhase()
vp_old.add_chancelor(player)
assert vp_old.chancelor is not None
assert vp_old.chancelor == player
vp = VotingPhase(previous_phase=vp_old)
with pytest.raises(ValueError):
vp.add_chancelor(player)
assert vp.chancelor is None
vp.add_chancelor(player2)
assert vp.chancelor is not None
assert vp.chancelor == player2
def test_add_president():
player = mock.Mock()
vp = VotingPhase()
vp.add_president(player)
assert vp.president == player
def test_failed():
vp = VotingPhase()
x = vp.failed()
assert vp.next_phase == x
assert vp != x
| 23.919355 | 53 | 0.691841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.006743 |
870a18d23ecfa11d93e662286c89a4d7d8fe6f2e | 328 | py | Python | python_handler.py | toddcooke/aws-lambda-function-cold-start-time | a5abced3718aaa73c0cc42f827647549a72fda71 | [
"MIT"
] | null | null | null | python_handler.py | toddcooke/aws-lambda-function-cold-start-time | a5abced3718aaa73c0cc42f827647549a72fda71 | [
"MIT"
] | null | null | null | python_handler.py | toddcooke/aws-lambda-function-cold-start-time | a5abced3718aaa73c0cc42f827647549a72fda71 | [
"MIT"
] | null | null | null | # import datetime
# import logging
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
def run(event, context):
# current_time = datetime.datetime.now().time()
# name = context.function_name
# logger.info("Your cron function " + name + " ran at " + str(current_time))
return "hello world!"
| 25.230769 | 80 | 0.685976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.820122 |
870a1fd5ff0795c011afc5632b304b463b0623e3 | 131 | py | Python | testapp2/admin.py | gabrielbiasi/django-improved-permissions | 9cf6d0ddb8a4dcfa2e58d3adbf1357e56a64ce71 | [
"MIT"
] | 12 | 2018-03-22T00:30:32.000Z | 2021-04-24T16:26:08.000Z | testapp2/admin.py | s-sys/django-improved-permissions | 9cf6d0ddb8a4dcfa2e58d3adbf1357e56a64ce71 | [
"MIT"
] | 27 | 2018-03-18T00:43:37.000Z | 2020-06-05T18:09:18.000Z | testapp2/admin.py | gabrielbiasi/django-improved-permissions | 9cf6d0ddb8a4dcfa2e58d3adbf1357e56a64ce71 | [
"MIT"
] | 2 | 2018-03-28T17:54:43.000Z | 2021-01-11T21:17:08.000Z | """ testapp2 admin configs """
from django.contrib import admin
from testapp2.models import Library
admin.site.register(Library)
| 18.714286 | 35 | 0.78626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.229008 |
870a8d637ee605f55604fde4b7ffc2547f1122a2 | 958 | py | Python | tests/unitary/ERC20CRV/test_setters.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 217 | 2020-06-24T14:01:21.000Z | 2022-03-29T08:35:24.000Z | tests/unitary/ERC20CRV/test_setters.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 25 | 2020-06-24T09:39:02.000Z | 2022-03-22T17:03:00.000Z | tests/unitary/ERC20CRV/test_setters.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 110 | 2020-07-10T22:45:49.000Z | 2022-03-29T02:51:08.000Z | import brownie
def test_set_minter_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_minter(accounts[2], {"from": accounts[1]})
def test_set_admin_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_admin(accounts[2], {"from": accounts[1]})
def test_set_name_admin_only(accounts, token):
with brownie.reverts("Only admin is allowed to change name"):
token.set_name("Foo Token", "FOO", {"from": accounts[1]})
def test_set_minter(accounts, token):
token.set_minter(accounts[1], {"from": accounts[0]})
assert token.minter() == accounts[1]
def test_set_admin(accounts, token):
token.set_admin(accounts[1], {"from": accounts[0]})
assert token.admin() == accounts[1]
def test_set_name(accounts, token):
token.set_name("Foo Token", "FOO", {"from": accounts[0]})
assert token.name() == "Foo Token"
assert token.symbol() == "FOO"
| 26.611111 | 65 | 0.676409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.162839 |
870aa75776aa600e03d1f7176f47f56559bbbd5d | 1,235 | py | Python | data/data_analysis.py | poch4319/Method-Exploration-Relation-Extraction-Using-BERT | 81ffbae2508f641b9028baba4983f673edecb424 | [
"Apache-2.0"
] | null | null | null | data/data_analysis.py | poch4319/Method-Exploration-Relation-Extraction-Using-BERT | 81ffbae2508f641b9028baba4983f673edecb424 | [
"Apache-2.0"
] | null | null | null | data/data_analysis.py | poch4319/Method-Exploration-Relation-Extraction-Using-BERT | 81ffbae2508f641b9028baba4983f673edecb424 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import json
from collections import Counter
import matplotlib.pyplot as plt
DATASET_DIR = './dataset/tacred/train_mod.json'
with open(DATASET_DIR) as f:
examples = json.load(f)
def plot_counts(data):
counts = Counter(data)
del counts["no_relation"]
labels, values = zip(*counts.items())
indexes = np.arange(len(labels))
width = 1
idx = list(reversed(np.argsort(values)))
indexes_sorted = indexes[idx]
values_sorted = np.array(values)[idx]
labels_sorted = np.array(labels)[idx]
print(values_sorted)
plt.bar(range(len(indexes_sorted)), values_sorted, width)
plt.xticks(indexes_sorted + width * 0.5, labels_sorted, rotation='vertical')
plt.ylabel("Number of examples")
plt.tight_layout()
plt.show()
# relation distribution
print('NUM EXAMPLES', len(examples))
relations = [e['relation'] for e in examples]
print("NUM_UNIQUE_RELATIONS", len(Counter(relations)))
plot_counts(relations)
def plot_counts_sent(data):
plt.hist(sents, range=(0, 100), bins=100)
plt.ylabel("Number of examples")
plt.xlabel("Sentence Length")
plt.show()
# sentence length distribution
sents = [len(e['token']) for e in examples]
plot_counts_sent(sents)
| 25.204082 | 80 | 0.706883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.177328 |
870ba9ec72cbb4826cda872685bfc48f3a36eeaf | 1,776 | py | Python | Lab_4/4.3_rsa_cracking.py | Saif-M-Dhrubo/Crypto-Lab | 2807320d7ca389adee86f69f704b9f91f9cd8054 | [
"MIT"
] | 2 | 2019-04-28T16:34:15.000Z | 2019-04-28T17:54:06.000Z | Lab_4/4.3_rsa_cracking.py | saif-mahmud/Crypto-Lab | 2807320d7ca389adee86f69f704b9f91f9cd8054 | [
"MIT"
] | null | null | null | Lab_4/4.3_rsa_cracking.py | saif-mahmud/Crypto-Lab | 2807320d7ca389adee86f69f704b9f91f9cd8054 | [
"MIT"
] | 1 | 2020-01-08T06:48:08.000Z | 2020-01-08T06:48:08.000Z | import sys
from fractions import Fraction as frac
from math import gcd, floor
from isqrt import isqrt
sys.setrecursionlimit(10**4)
# text=int(open("4.3_ciphertext.hex").read())
e=int(open("4.4_public_key.hex").read(),0)
n=int((open("4.5_modulo.hex").read()),0)
p = 0
q = 0
# print(text,"\n",e,"\n",n)
def validate(x):
k = x.numerator
d = x.denominator
totient = frac(e * d - 1, k)
if (n - totient + 1) ** 2 - 4 * n < 0:
return False, None, None,None
D = isqrt(((n - totient + 1) ** 2 - 4 * n).numerator)
if D * D != (n - totient + 1) ** 2 - 4 * n:
return False, None, None,None
x = ((n - totient + 1) + (D)) / (2)
y = ((n - totient + 1) - (D)) / (2)
v = False
if x == floor(x):
v = True
return v, x, y,d
def extendedEuclid(l, s):
if s == 0:
return (1, 0, l)
x, y, d = extendedEuclid(s, l % s)
return (y, x - floor(l / s) * y, d)
def value(x):
sum = x[len(x) - 1]
for i in range(len(x) - 2, -1, -1):
sum = frac(1, sum) + x[i]
return sum
def cont(r):
i = floor(r)
f = r - frac(i, 1)
if f == frac(0, 1):
return [i]
return ([i] + cont(frac(1, f)))
def bigmod(x, y, p):
if y == 1:
return x % p
if y % 2 == 1:
return ((x % p) * bigmod((x*x)%p, y//2, p) )% p
return (bigmod((x*x)%p, y//2, p)) % p
x=cont(frac(e,n))
for i in range(len(x)):
c = (value(x[:i + 1]))
if c != 0 and c.denominator % 2 != 0:
v, p, q, d = validate(c)
if v:
break
totient = (p - 1) * (q - 1)
d2, y, z = extendedEuclid(e, totient)
# print(d==d2)
# m = bigmod(text, d, n)
print("Private Key:",d,d==d2,p*q==n)
# print("Message:",m)
| 24 | 58 | 0.472973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.10473 |
870c5accc8a181d69392e0b19859b15d5dbaf7c7 | 2,161 | py | Python | MachineLearning/Ensemble/GradientBoostingTree.py | percycat/Tutorial | d01fbb974c3a85436ea68ec277ca55cd553dd61f | [
"MIT"
] | null | null | null | MachineLearning/Ensemble/GradientBoostingTree.py | percycat/Tutorial | d01fbb974c3a85436ea68ec277ca55cd553dd61f | [
"MIT"
] | null | null | null | MachineLearning/Ensemble/GradientBoostingTree.py | percycat/Tutorial | d01fbb974c3a85436ea68ec277ca55cd553dd61f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 06:10:55 2018
@author: PC Lee
Demo of gradient boosting tree
A very nice reference for gradient boosting
http://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf
LightGBM
https://github.com/Microsoft/LightGBM/tree/master/examples/python-guide
Catboost
https://github.com/catboost/tutorials
Comparative study of different gradient boosting tree
https://towardsdatascience.com/catboost-vs-light-gbm-vs-xgboost-5f93620723db
"""
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import lightgbm as lgb
import catboost as cb
df_wine = pd.read_csv('../Data/winequality-red.csv', sep=';')
df_shape = df_wine.shape
X, y = df_wine.iloc[:, 0:df_shape[1]-1], df_wine.iloc[:, df_shape[1]-1]
y = y - np.min(y)
X = X.values #covert to numpy array
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=0)
gbt = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, random_state=1)
gbt.fit(X_train, y_train)
print( "score: {}".format( gbt.score(X_test, y_test) ) )
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 6,
'metric': ('l1', 'l2'),
'verbose': 0
}
print('Starting training...')
# train
evals_result = {}
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=['f' + str(i + 1) for i in range(X_train.shape[-1])],
categorical_feature=[11],
evals_result=evals_result,
verbose_eval=10)
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=5)
plt.show()
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:{}'.format( mean_squared_error(y_test, y_pred) ** 0.5) )
| 28.813333 | 90 | 0.710782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.352152 |
870cea4c86e4cc9024721aa610576f983d0f1449 | 215 | py | Python | rurouni/exceptions.py | PinkDiamond1/Kenshin | bb5dfa05f5d10b4bdd1e0403c9e7d3c7e4399fcb | [
"Apache-2.0"
] | 219 | 2016-04-24T10:09:25.000Z | 2022-02-02T22:12:17.000Z | rurouni/exceptions.py | PinkDiamond1/Kenshin | bb5dfa05f5d10b4bdd1e0403c9e7d3c7e4399fcb | [
"Apache-2.0"
] | 16 | 2016-04-22T07:45:26.000Z | 2018-01-11T03:16:30.000Z | rurouni/exceptions.py | PinkDiamond1/Kenshin | bb5dfa05f5d10b4bdd1e0403c9e7d3c7e4399fcb | [
"Apache-2.0"
] | 29 | 2016-05-12T05:47:34.000Z | 2021-11-05T19:19:12.000Z | # coding: utf-8
class RurouniException(Exception):
pass
class ConfigException(RurouniException):
pass
class TokenBucketFull(RurouniException):
pass
class UnexpectedMetric(RurouniException):
pass | 15.357143 | 41 | 0.767442 | 191 | 0.888372 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.069767 |
870cf724477b120338bce4e51453e770e70b63e5 | 1,756 | py | Python | aydin/it/transforms/test/test_fixed_pattern.py | royerloic/aydin | f9c61a24030891d008c318b250da5faec69fcd7d | [
"BSD-3-Clause"
] | 78 | 2021-11-08T16:11:23.000Z | 2022-03-27T17:51:04.000Z | aydin/it/transforms/test/test_fixed_pattern.py | royerloic/aydin | f9c61a24030891d008c318b250da5faec69fcd7d | [
"BSD-3-Clause"
] | 19 | 2021-11-08T17:15:40.000Z | 2022-03-30T17:46:55.000Z | aydin/it/transforms/test/test_fixed_pattern.py | royerloic/aydin | f9c61a24030891d008c318b250da5faec69fcd7d | [
"BSD-3-Clause"
] | 7 | 2021-11-09T17:42:32.000Z | 2022-03-09T00:37:57.000Z | import numpy
from scipy.ndimage import gaussian_filter
from skimage.data import binary_blobs
from skimage.util import random_noise
from aydin.it.transforms.fixedpattern import FixedPatternTransform
def add_patterned_noise(image, n):
image = image.copy()
image *= 1 + 0.1 * (numpy.random.rand(n, n) - 0.5)
image += 0.1 * numpy.random.rand(n, n)
# image += 0.1*numpy.random.rand(n)[]
image = random_noise(image, mode="gaussian", var=0.00001, seed=0)
image = random_noise(image, mode="s&p", amount=0.000001, seed=0)
return image
def test_fixed_pattern_real():
n = 128
image = binary_blobs(length=n, seed=1, n_dim=3, volume_fraction=0.01).astype(
numpy.float32
)
image = gaussian_filter(image, sigma=4)
noisy = add_patterned_noise(image, n).astype(numpy.float32)
bs = FixedPatternTransform(sigma=0)
preprocessed = bs.preprocess(noisy)
postprocessed = bs.postprocess(preprocessed)
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(preprocessed, name='preprocessed')
# viewer.add_image(postprocessed, name='postprocessed')
assert image.shape == postprocessed.shape
assert image.dtype == postprocessed.dtype
assert numpy.abs(preprocessed - image).mean() < 0.007
assert preprocessed.dtype == postprocessed.dtype
assert numpy.abs(postprocessed - noisy).mean() < 1e-8
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(corrected, name='corrected')
| 33.132075 | 81 | 0.678246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.300114 |
870dbc44ca3ddd2e95df266b346d82599c7bb3ea | 1,069 | py | Python | lib/geovista/__init__.py | trexfeathers/geovista | f11a7a54ef11d8542be632c29f9fe6653572879e | [
"BSD-3-Clause"
] | 11 | 2021-05-12T08:45:51.000Z | 2022-03-15T11:45:22.000Z | lib/geovista/__init__.py | trexfeathers/geovista | f11a7a54ef11d8542be632c29f9fe6653572879e | [
"BSD-3-Clause"
] | 13 | 2021-11-22T17:10:20.000Z | 2022-03-29T08:10:51.000Z | lib/geovista/__init__.py | trexfeathers/geovista | f11a7a54ef11d8542be632c29f9fe6653572879e | [
"BSD-3-Clause"
] | 5 | 2021-05-14T13:36:00.000Z | 2022-03-08T11:49:20.000Z | from os import environ
from pathlib import Path
from appdirs import user_cache_dir
from ._version import version as __version__ # noqa: F401
from .bridge import Transform # noqa: F401
from .core import combine # noqa: F401
from .geodesic import BBox, line, panel, wedge # noqa: F401
from .geometry import get_coastlines # noqa: F401
from .geoplotter import GeoBackgroundPlotter, GeoMultiPlotter, GeoPlotter # noqa: F401
from .log import get_logger
# Configure the top-level logger.
logger = get_logger(__name__)
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
_cache_dir = Path(environ.get("XDG_CACHE_HOME", user_cache_dir())) / __package__
#: GeoVista configuration dictionary.
config = dict(cache_dir=_cache_dir)
try:
from .siteconfig import update_config as _update_config
_update_config(config)
del _update_config
except ImportError:
pass
try:
from geovista_config import update_config as _update_config
_update_config(config)
del _update_config
except ImportError:
pass
del _cache_dir
| 26.725 | 87 | 0.781104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.220767 |
870dfc6d56b7b14caa775a8da4e5a06fd9e0533c | 421 | py | Python | app.py | Nohossat/Smart-tic-tac-toe | 204b97bf4874e0ee604abdb9b215e5b8ddcef497 | [
"MIT"
] | null | null | null | app.py | Nohossat/Smart-tic-tac-toe | 204b97bf4874e0ee604abdb9b215e5b8ddcef497 | [
"MIT"
] | null | null | null | app.py | Nohossat/Smart-tic-tac-toe | 204b97bf4874e0ee604abdb9b215e5b8ddcef497 | [
"MIT"
] | null | null | null | from morpion import Morpion
import argparse
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Play Tic-Tac-Toe\n")
parser.add_argument('-mode', '--mode', help='play against Human', default=False)
args = parser.parse_args()
mode = False
if args.mode in ["h", "human", "humain", "manuel"]:
mode = True
morpion_game = Morpion(human=mode)
morpion_game.start_game() | 28.066667 | 84 | 0.674584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.216152 |
870f4a06886086706c0904777ac053129591b6cd | 715 | py | Python | src/dcos_e2e_cli/dcos_docker/commands/_cgroup_mount_option.py | jongiddy/dcos-e2e | b52ef9a1097a8fb328902064345cc6c8b0bf5779 | [
"Apache-2.0"
] | 63 | 2018-05-17T21:02:14.000Z | 2021-11-15T19:18:03.000Z | src/dcos_e2e_cli/dcos_docker/commands/_cgroup_mount_option.py | jongiddy/dcos-e2e | b52ef9a1097a8fb328902064345cc6c8b0bf5779 | [
"Apache-2.0"
] | 225 | 2017-09-08T02:24:58.000Z | 2018-05-16T12:18:58.000Z | src/dcos_e2e_cli/dcos_docker/commands/_cgroup_mount_option.py | jongiddy/dcos-e2e | b52ef9a1097a8fb328902064345cc6c8b0bf5779 | [
"Apache-2.0"
] | 21 | 2018-06-14T21:58:24.000Z | 2021-11-15T19:18:06.000Z | """
Mount /sys/fs/cgroup Option
"""
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option(
'--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True,
show_default=True,
help=(
'Mounting ``/sys/fs/cgroup`` from the host is required to run '
'applications which require ``cgroup`` isolation. '
'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '
'the host.'
),
)(command) # type: Callable[..., None]
return function
| 27.5 | 79 | 0.587413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.535664 |
870fcfd9e03efc22e487caad092766b6eab4008d | 5,676 | py | Python | soapfish/soap.py | jocassid/soapfish | 7926335089ec286d3f4f491f72d84ec7096c79c9 | [
"BSD-3-Clause"
] | null | null | null | soapfish/soap.py | jocassid/soapfish | 7926335089ec286d3f4f491f72d84ec7096c79c9 | [
"BSD-3-Clause"
] | 1 | 2018-02-19T22:40:44.000Z | 2018-02-19T22:40:44.000Z | soapfish/soap.py | jocassid/soapfish | 7926335089ec286d3f4f491f72d84ec7096c79c9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''
SOAP protocol implementation, dispatchers and client stub.
'''
from __future__ import absolute_import
import logging
import string
import requests
import six
from . import core, namespaces as ns, soap11, soap12, wsa
from .utils import uncapitalize
SOAP_HTTP_Transport = ns.wsdl_soap_http
logger = logging.getLogger('soapfish')
class SOAPVersion:
SOAP12 = soap12
SOAP11 = soap11
@classmethod
def get_version(cls, namespace):
if namespace == cls.SOAP11.ENVELOPE_NAMESPACE or namespace == cls.SOAP11.BINDING_NAMESPACE:
return cls.SOAP11
elif namespace == cls.SOAP12.ENVELOPE_NAMESPACE or namespace == cls.SOAP12.BINDING_NAMESPACE:
return cls.SOAP12
else:
raise ValueError("SOAP version with namespace '%s' is not supported." % namespace)
@classmethod
def get_version_name(cls, namespace):
version = cls.get_version(namespace)
return version.__name__
@classmethod
def get_version_from_xml(cls, xml):
namespaces = {'wsdl': ns.wsdl, 'soap12': ns.wsdl_soap12}
if xml.xpath('wsdl:binding/soap12:binding', namespaces=namespaces):
return cls.SOAP12
else:
return cls.SOAP11
class Service(object):
'''
Describes service aggregating information required for dispatching and
WSDL generation.
'''
def __init__(self, targetNamespace, location, schemas, methods,
version=SOAPVersion.SOAP11, name='Service',
input_header=None, output_header=None, use_wsa=False):
'''
:param targetNamespace: string
:param location: string, endpoint url.
:param schemas: xsd.Schema instances.
:param methods: list of xsd.Methods
'''
self.name = name
self.targetNamespace = targetNamespace
self.location = location
self.schemas = schemas
self.methods = methods
self.version = version
self.use_wsa = use_wsa
if use_wsa and input_header is None:
input_header = wsa.WSAsaHeader
if use_wsa and output_header is None:
output_header = wsa.WSAHeader
self.input_header = input_header
self.output_header = output_header
def get_method(self, operationName):
return next(m for m in self.methods if m.operationName == operationName)
def find_element_by_name(self, name):
element = None
for schema in self.schemas:
element = schema.get_element_by_name(name)
if element is not None:
break
return element
def route(self, operationName):
"""Decorator to bind a Python function to service method."""
method = self.get_method(operationName)
def wrapper(func):
method.function = func
return func
return wrapper
class Stub(object):
'''
Client stub. Handles only document style calls.
'''
SERVICE = None
SCHEME = 'http'
HOST = 'www.example.net'
def __init__(self, username=None, password=None, service=None, location=None):
self.username = username
self.password = password
self.service = service if service else self.SERVICE
context = {'scheme': self.SCHEME, 'host': self.HOST}
if location is None:
location = lambda template, context: string.Template(template).safe_substitute(**context)
if callable(location):
self.location = location(self.service.location, context)
elif isinstance(location, six.string_types):
self.location = location
else:
raise TypeError('Expected string or callable for location.')
def _handle_response(self, method, http_headers, content):
soap = self.service.version
envelope = soap.Envelope.parsexml(content)
if envelope.Header and method and method.output_header:
response_header = envelope.Header.parse_as(method.output_header)
else:
response_header = None
if envelope.Body.Fault:
code, message, actor = soap.parse_fault_message(envelope.Body.Fault)
error = core.SOAPError(code=code, message=message, actor=actor)
raise error
if isinstance(method.output, six.string_types):
_type = self.service.find_element_by_name(method.output)._type.__class__
else:
_type = method.output
body = envelope.Body.parse_as(_type)
return core.SOAPResponse(body, soap_header=response_header)
def call(self, operationName, parameter, header=None):
'''
:raises: lxml.etree.XMLSyntaxError -- validation problems.
'''
soap = self.service.version
method = self.service.get_method(operationName)
if isinstance(method.input, six.string_types):
tagname = method.input
else:
tagname = uncapitalize(parameter.__class__.__name__)
auth = (self.username, self.password) if self.username else None
data = soap.Envelope.response(tagname, parameter, header=header)
headers = soap.build_http_request_headers(method.soapAction)
logger.info("Call '%s' on '%s'", operationName, self.location)
logger.debug('Request Headers: %s', headers)
logger.debug('Request Envelope: %s', data)
r = requests.post(self.location, auth=auth, headers=headers, data=data)
logger.debug('Response Headers: %s', r.headers)
logger.debug('Response Envelope: %s', r.content)
return self._handle_response(method, r.headers, r.content)
| 33.585799 | 101 | 0.650106 | 5,305 | 0.934637 | 0 | 0 | 818 | 0.144116 | 0 | 0 | 893 | 0.157329 |
8710f101398ef0acb5a1b6361f52aad333d1ef26 | 2,505 | py | Python | performance/build/testsuite.py | marksantos/thrust | 67bbec34a7fdb9186aea6c269870b517e42e4388 | [
"Apache-2.0"
] | 28 | 2017-10-04T03:41:56.000Z | 2021-07-18T14:53:36.000Z | performance/build/testsuite.py | marksantos/thrust | 67bbec34a7fdb9186aea6c269870b517e42e4388 | [
"Apache-2.0"
] | 13 | 2017-09-06T10:49:44.000Z | 2019-09-07T13:53:24.000Z | performance/build/testsuite.py | marksantos/thrust | 67bbec34a7fdb9186aea6c269870b517e42e4388 | [
"Apache-2.0"
] | 15 | 2018-02-13T00:08:55.000Z | 2022-02-14T05:46:43.000Z | """functions that generate reports and figures using the .xml output from the performance tests"""
__all__ = ['TestSuite', 'parse_testsuite_xml']
class TestSuite:
def __init__(self, name, platform, tests):
self.name = name
self.platform = platform
self.tests = tests
def __repr__(self):
import pprint
return 'TestSuite' + pprint.pformat( (self.name, self.platform, self.tests) )
class Test:
def __init__(self, name, variables, results):
self.name = name
self.variables = variables
self.results = results
def __repr__(self):
return 'Test' + repr( (self.name, self.variables, self.results) )
def scalar_element(element):
value = element.get('value')
try:
return int(value)
except:
try:
return float(value)
except:
return value
def parse_testsuite_platform(et):
testsuite_platform = {}
platform_element = et.find('platform')
device_element = platform_element.find('device')
device = {}
device['name'] = device_element.get('name')
for property_element in device_element.findall('property'):
device[property_element.get('name')] = scalar_element(property_element)
testsuite_platform['device'] = device
return testsuite_platform
def parse_testsuite_tests(et):
testsuite_tests = {}
for test_element in et.findall('test'):
# test name
test_name = test_element.get('name')
# test variables: name -> value
test_variables = {}
for variable_element in test_element.findall('variable'):
test_variables[variable_element.get('name')] = scalar_element(variable_element)
# test results: name -> (value, units)
test_results = {}
for result_element in test_element.findall('result'):
# TODO make this a thing that can be converted to its first element when treated like a number
test_results[result_element.get('name')] = scalar_element(result_element)
testsuite_tests[test_name] = Test(test_name, test_variables, test_results)
return testsuite_tests
def parse_testsuite_xml(filename):
import xml.etree.ElementTree as ET
et = ET.parse(filename)
testsuite_name = et.getroot().get('name')
testsuite_platform = parse_testsuite_platform(et)
testsuite_tests = parse_testsuite_tests(et)
return TestSuite(testsuite_name, testsuite_platform, testsuite_tests)
| 29.821429 | 106 | 0.668663 | 532 | 0.212375 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.171657 |
8711dd388b808ba1f43903d57986edc525c23668 | 2,538 | py | Python | snippets_dataset.py | AAlben/kaggle_SETI_search_ET | 9194db0372d0d49ab914f29f1bf221ea8a32474a | [
"MIT"
] | null | null | null | snippets_dataset.py | AAlben/kaggle_SETI_search_ET | 9194db0372d0d49ab914f29f1bf221ea8a32474a | [
"MIT"
] | null | null | null | snippets_dataset.py | AAlben/kaggle_SETI_search_ET | 9194db0372d0d49ab914f29f1bf221ea8a32474a | [
"MIT"
] | null | null | null | import os
import pdb
import torch
import random
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
class SnippetsDataset(Dataset):
def __init__(self, data_path, labels_csv_file, mode, transform=None, train_valid_rate=0.8, seed=123):
random.seed(seed)
self.seed = seed
self.data_path = data_path
self.mode = mode
self.train_valid_rate = train_valid_rate
self.transform = transform
self.files, self.labels = self.load(labels_csv_file)
def __getitem__(self, index):
file, label = self.files[index], self.labels[index]
data = np.load(file).astype(float) # before - dtype('float16'); after - dtype('float64')
data = data / np.array([np.abs(data).max() for i in range(6)]).reshape(6, 1, 1)
if self.transform:
data = self.transform(data)
return data, label
def __len__(self):
return len(self.files)
def load(self, labels_csv_file):
df = pd.read_csv(labels_csv_file)
df['file'] = df['id'].apply(lambda x: os.path.join(self.data_path, x[0], f'{x}.npy'))
if self.mode == 'train':
df = df.sample(frac=self.train_valid_rate, random_state=self.seed)
elif self.mode == 'valid':
df = df.sample(frac=1 - self.train_valid_rate, random_state=self.seed)
else:
raise Exception('', '')
return df.file.tolist(), df.target.tolist()
class SnippetsDatasetTest(Dataset):
def __init__(self, data_path, transform=None, seed=123):
random.seed(seed)
self.data_path = data_path
self.transform = transform
self.files = self.load()
def __getitem__(self, index):
file = self.files[index]
data = np.load(file).astype(float)
data = data / np.array([np.abs(data).max() for i in range(6)]).reshape(6, 1, 1)
if self.transform:
data = self.transform(data)
return data
def __len__(self):
return len(self.files)
def load(self):
files = []
for folder in os.listdir(self.data_path):
if not os.path.isdir(os.path.join(self.data_path, folder)):
continue
folder_path = os.path.join(self.data_path, folder)
folder_files = os.listdir(folder_path)
random_file = random.choice(folder_files)
files.append(os.path.join(folder_path, folder_files[0]))
files.append(os.path.join(folder_path, random_file))
return files
| 33.84 | 105 | 0.618597 | 2,408 | 0.948779 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.035855 |
87122655263199d9b374ca6048b68654898e3165 | 2,809 | py | Python | tf_agents/keras_layers/bias_layer.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 2 | 2021-10-30T16:57:37.000Z | 2021-11-17T10:21:17.000Z | tf_agents/keras_layers/bias_layer.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | null | null | null | tf_agents/keras_layers/bias_layer.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 2 | 2020-06-05T18:38:16.000Z | 2020-07-08T14:41:42.000Z | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras layer mirroring tf.contrib.layers.bias_add."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
class BiasLayer(tf.keras.layers.Layer):
"""Keras layer that only adds a bias to the input.
`BiasLayer` implements the operation:
`output = input + bias`
Arguments:
bias_initializer: Initializer for the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`. The most common
situation would be a 2D input with shape `(batch_size, input_dim)`. Note
a rank of at least 2 is required.
Output shape:
nD tensor with shape: `(batch_size, ..., input_dim)`. For instance, for a
2D input with shape `(batch_size, input_dim)`, the output would have
shape `(batch_size, input_dim)`.
"""
def __init__(self, bias_initializer='zeros', **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(BiasLayer, self).__init__(**kwargs)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.supports_masking = True
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if input_shape.rank == 1:
shape = (1,)
else:
shape = (tf.compat.dimension_value(input_shape[-1]),)
self.bias = self.add_weight(
'bias',
shape=shape,
initializer=self.bias_initializer,
dtype=self.dtype,
trainable=True)
self.built = True
def call(self, inputs):
if inputs.shape.rank == 1:
expanded_inputs = tf.expand_dims(inputs, -1)
with_bias = tf.nn.bias_add(expanded_inputs, self.bias)
return with_bias[..., 0]
return tf.nn.bias_add(inputs, self.bias)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'bias_initializer':
tf.keras.initializers.serialize(self.bias_initializer),
}
base_config = super(BiasLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 33.843373 | 80 | 0.698825 | 1,958 | 0.697045 | 0 | 0 | 0 | 0 | 0 | 0 | 1,397 | 0.49733 |
8712aff8958c54d885bd36104125e3e7046bb22c | 29,060 | py | Python | ScriptsForDiffPlatforms/SteamOrderPricesCSmoney.py | dr11m/steamSkins | 74202ec23c2b096ef18bad0a408ca7f6cccd187f | [
"MIT"
] | null | null | null | ScriptsForDiffPlatforms/SteamOrderPricesCSmoney.py | dr11m/steamSkins | 74202ec23c2b096ef18bad0a408ca7f6cccd187f | [
"MIT"
] | null | null | null | ScriptsForDiffPlatforms/SteamOrderPricesCSmoney.py | dr11m/steamSkins | 74202ec23c2b096ef18bad0a408ca7f6cccd187f | [
"MIT"
] | 1 | 2022-02-20T21:01:16.000Z | 2022-02-20T21:01:16.000Z | import os
from selenium import webdriver
import random
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import linecache
import sys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import requests
import MySQLdb
import datetime
import mysql.connector
import logging
import re
import urllib
from pyvirtualdisplay import Display
import subprocess
#ะฝะฐัััะพะนะบะฐ ะธ ะณะปะฐะฒะฝัะต ััะฝะบัะธะธ
try:
min_perccent = "39"
allowed_min_percent = 0.37
#loging errors
# Create a logging instance
logger = logging.getLogger('SteamOrderPrices')
logger.setLevel(logging.INFO) # you can set this to be DEBUG, INFO, ERROR
# Assign a file-handler to that instance
fh = logging.FileHandler("ErrorsSteamOrderPrices.txt")
fh.setLevel(logging.ERROR) # again, you can set this differently
# Format your logs (optional)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter) # This will set the format to the file handler
# Add the handler to your logging instance
logger.addHandler(fh)
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
try:
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
except:
pass
close_script()
def PrintException_only_print():
succsess = 0
print("was an error")
# exception output
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
def close_modals():
try:
driver.find_element_by_css_selector("body > div.newmodal > div.newmodal_header_border > div > div.newmodal_close").click()
except:
pass
def close_mysql_connection():
global mydb, mycursor
try:
mycursor.close
except:
pass
try:
mydb.close
except:
pass
def close_script():
try:
mycursor.close()
except:
pass
try:
mydb.close()
except:
pass
try:
driver.close()
except:
pass
try:
driver.quit()
except:
pass
sys.exit()
# rub_usd
rub_usd = requests.get("https://www.cbr-xml-daily.ru/daily_json.js")
rub_usd = rub_usd.json()
rub_usd = float(rub_usd["Valute"]["USD"]["Value"])
print("current exchange rate", rub_usd)
#start driver
display = Display(visible=0, size=(1600, 900), backend='xvfb')
display.start()
chrome_options = Options()
chrome_options.add_argument("user-data-dir=/home/work/profilesForAll/SteamOrders2") # linux
chrome_options.add_argument("window-size=1600,900")
driver = webdriver.Chrome(executable_path='/usr/bin/chromedriver', chrome_options=chrome_options)
driver.set_window_size(1600, 900)
print("-- check if need to loging into steam")
wait = WebDriverWait(driver, 15)
driver.get("https://steamcommunity.com/market/")
time.sleep(5)
try:
element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#marketWalletBalanceAmount")))
except:
print("cant verify login into steam on the first try")
time.sleep(5)
driver.get("https://steamcommunity.com/market/")
try:
element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#marketWalletBalanceAmount")))
except:
print("login into steam")
# ะฒั
ะพะดะธะผ ะฒ ััะธะผ
driver.find_element_by_css_selector("#global_action_menu > a").click()
time.sleep(1)
# login
input_field = driver.find_element_by_css_selector('#input_username')
input_field.clear()
time.sleep(2)
# pass
input_field = driver.find_element_by_css_selector('#input_password')
input_field.clear()
time.sleep(3)
input_field = driver.find_element_by_css_selector('#twofactorcode_entry')
# get 2fa code
os.chdir("/home/work/steamguard-cli")
guard = subprocess.check_output('build/steamguard 2fa', shell=True).decode("utf-8").strip()
print(guard)
input_field.clear()
input_field.send_keys(guard)
time.sleep(2)
driver.find_element_by_css_selector(
"#login_twofactorauth_buttonset_entercode > div.auth_button.leftbtn > div.auth_button_h5").click()
time.sleep(10)
# ัะฝะพะฒะฐ ะฟัะพะฒะตััะตะผ, ะฒะพัะปะธ ะปะธ ะผั ะฒ ัะธััะตะผั
try:
element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#marketWalletBalanceAmount")))
except:
msg = "need to login into steam"
logger.exception(msg)
raise ValueError('need to login into steam')
#ะฟะพะปััะฐะตะผ ะฟัะตะดะผะตัั ะฝะฐ tryskins
print("-- get items from tryskins")
wait = WebDriverWait(driver, 60)
skins_from_parser = []
dates_diff = 777
if dates_diff > 60:
driver.get(
"https://table.altskins.com/site/items?ItemsFilter%5Bknife%5D=0&ItemsFilter%5Bknife%5D=1&ItemsFilter%5Bstattrak%5D=0&ItemsFilter%5Bstattrak%5D=1&ItemsFilter%5Bsouvenir%5D=0&ItemsFilter%5Bsouvenir%5D=1&ItemsFilter%5Bsticker%5D=0&ItemsFilter%5Bsticker%5D=1&ItemsFilter%5Btype%5D=1&ItemsFilter%5Bservice1%5D=showsteama&ItemsFilter%5Bservice2%5D=showcsmoneyw&ItemsFilter%5Bunstable1%5D=1&ItemsFilter%5Bunstable2%5D=0&ItemsFilter%5Bhours1%5D=192&ItemsFilter%5Bhours2%5D=192&ItemsFilter%5BpriceFrom1%5D=22&ItemsFilter%5BpriceTo1%5D=&ItemsFilter%5BpriceFrom2%5D=&ItemsFilter%5BpriceTo2%5D=&ItemsFilter%5BsalesBS%5D=&ItemsFilter%5BsalesTM%5D=&ItemsFilter%5BsalesST%5D=3&ItemsFilter%5Bname%5D=&ItemsFilter%5Bservice1Minutes%5D=301&ItemsFilter%5Bservice2Minutes%5D=301&ItemsFilter%5BpercentFrom1%5D="+ min_perccent +"&ItemsFilter%5BpercentFrom2%5D=&ItemsFilter%5Btimeout%5D=777&ItemsFilter%5Bservice1CountFrom%5D=&ItemsFilter%5Bservice1CountTo%5D=&ItemsFilter%5Bservice2CountFrom%5D=&ItemsFilter%5Bservice2CountTo%5D=&ItemsFilter%5BpercentTo1%5D=&ItemsFilter%5BpercentTo2%5D=&refreshonoff=1")
time.sleep(6)
element = 0
try:
element = driver.find_element_by_css_selector("#page-wrapper > div.row.border-bottom > nav > ul > li:nth-child(4) > a > img")
except:pass
if element != 0:
driver.find_element_by_css_selector("#page-wrapper > div.row.border-bottom > nav > ul > li:nth-child(4) > a > img").click()
time.sleep(5)
driver.find_element_by_css_selector("#imageLogin").click()
time.sleep(5)
driver.get("https://www.google.com/")
time.sleep(3)
driver.get(
"https://table.altskins.com/site/items?ItemsFilter%5Bknife%5D=0&ItemsFilter%5Bknife%5D=1&ItemsFilter%5Bstattrak%5D=0&ItemsFilter%5Bstattrak%5D=1&ItemsFilter%5Bsouvenir%5D=0&ItemsFilter%5Bsouvenir%5D=1&ItemsFilter%5Bsticker%5D=0&ItemsFilter%5Bsticker%5D=1&ItemsFilter%5Btype%5D=1&ItemsFilter%5Bservice1%5D=showsteama&ItemsFilter%5Bservice2%5D=showcsmoneyw&ItemsFilter%5Bunstable1%5D=1&ItemsFilter%5Bunstable2%5D=0&ItemsFilter%5Bhours1%5D=192&ItemsFilter%5Bhours2%5D=192&ItemsFilter%5BpriceFrom1%5D=22&ItemsFilter%5BpriceTo1%5D=&ItemsFilter%5BpriceFrom2%5D=&ItemsFilter%5BpriceTo2%5D=&ItemsFilter%5BsalesBS%5D=&ItemsFilter%5BsalesTM%5D=&ItemsFilter%5BsalesST%5D=3&ItemsFilter%5Bname%5D=&ItemsFilter%5Bservice1Minutes%5D=301&ItemsFilter%5Bservice2Minutes%5D=301&ItemsFilter%5BpercentFrom1%5D="+ min_perccent +"&ItemsFilter%5BpercentFrom2%5D=&ItemsFilter%5Btimeout%5D=777&ItemsFilter%5Bservice1CountFrom%5D=&ItemsFilter%5Bservice1CountTo%5D=&ItemsFilter%5Bservice2CountFrom%5D=&ItemsFilter%5Bservice2CountTo%5D=&ItemsFilter%5BpercentTo1%5D=&ItemsFilter%5BpercentTo2%5D=&refreshonoff=1")
time.sleep(8)
element = 0
try:
element = driver.find_element_by_css_selector("#page-wrapper > div.row.border-bottom > nav > ul > li:nth-child(4) > a > img")
except: pass
if element != 0:
raise ValueError('tryskins login error')
#ะฟะพะดะณััะถะฐะตะผ ะฒัะต ัะปะตะผะตะฝัั ะธะท parser'a
for x in range(2):
try:
mainBlocks = driver.find_elements_by_css_selector('table > tbody > tr:nth-child(n)')
len_start = len(mainBlocks)
element = driver.find_element_by_css_selector(
'table > tbody > tr:nth-child(' + str(len(mainBlocks)) + ')')
element.location_once_scrolled_into_view
time.sleep(0.2)
element = driver.find_element_by_css_selector(
'table > tbody > tr:nth-child(' + str(len(mainBlocks) - 29) + ')')
element.location_once_scrolled_into_view
time.sleep(0.2)
element = driver.find_element_by_css_selector(
'table > tbody > tr:nth-child(' + str(len(mainBlocks)) + ')')
element.location_once_scrolled_into_view
except: continue
for ind in range (10):
mainBlocks = driver.find_elements_by_css_selector('table > tbody > tr:nth-child(n)')
len_after_scroll = len(mainBlocks)
if len_start == len_after_scroll:
time.sleep(0.5)
if len_start != len_after_scroll:
break
mainBlocks = driver.find_elements_by_css_selector('table > tbody > tr:nth-child(n)')
len_after_scroll = len(mainBlocks)
if len_start == len_after_scroll:
break
XML = driver.find_element_by_css_selector('#w0 > table > tbody').get_attribute('innerHTML')
XML = XML.split('<tr class="tr"')
del XML[0]
for item_xml in XML:
try:
name = re.search('market_hash_name=([^<]*)&sort_by=price', item_xml)
name = name[1].strip()
price_csm = re.search('attribute="pricecsmoneyw">([^<]*)</span><span', item_xml).group(1).strip()
sales = re.search('class="sales">([^<]*)</div><img src="/images/steam', item_xml)
sales = sales[1].strip()
purchased_count = 0
#ะฟะพะปััะฐะตะผ, ัะบะพะปัะบะพ ะฟัะตะดะผะตัะพะฒ ะฑัะปะพ ะบัะฟะปะตะฝะพ ะทะฐ ะฟะพัะปะตะดะฝะธะต 7 ะดะฝะตะน
close_mysql_connection()
mycursor.execute("SELECT name FROM PurchasedItems WHERE DATE(date) > (NOW() - INTERVAL 7 DAY);")
items_mysql_last7days = mycursor.fetchall()
close_mysql_connection()
connect_to_mysql("SteamBuyOrders")
for item_mysql in items_mysql_last7days:
if item_mysql == name:
print("added count")
purchased_count += 1
skins_from_parser.append({"name":name, "price_csm": float(price_csm), "sales": sales, "overstock": -1, "db_id": -1, "purchased_count": purchased_count, "allowed_count": 0, "buy_order_price": 0})
except Exception as e:
PrintException_only_print()
continue
print("******************************")
print(len(skins_from_parser))
print("******************************")
#ะฟัะพะฒะตััะตะผ, ะตััั ะปะธ ัะผััะป ะฟะพะปััะฐัั ะพะฒะตัััะพะบ ะฝะฐ ัะฐะนัะต (ััะพ ะฝัะถะฝะพ ะดะตะปะฐัั ะบะฐะถะดัะต 24 ัะฐัะฐ)
need_to_check_on_cs_money = False
mycursor.execute("SELECT name,quanity,date,id FROM csMoneyLimits")
items_mysql = mycursor.fetchall()
now = datetime.datetime.now()
for item_db in items_mysql:
for item_parser in skins_from_parser:
if item_db[0] == item_parser["name"]:
item_parser["db_id"] = item_db[3] #add table's id
delta = now - item_db[2]
# ะบะพะป-ะฒะพ ัะฐัะพะฒ
time_diff_in_hours = int(delta.total_seconds()) / 60 / 60
print('time difference: ', time_diff_in_hours)
if time_diff_in_hours < 24:
#print("string before change -", item_parser) #debug
#print("change overstock to DB value (DB value -)", item_db[1]) #debug
item_parser["overstock"] = item_db[1]
#print("string after change -", item_parser) #debug
#input("test input")
break
print("******************************")
print(len(skins_from_parser))
print("******************************")
#ะฟัะพะฒะตััะตะผ, ะฝัะถะฝะพ ะปะธ ะทะฐั
ะพะดะธัั ะฝะฐ csm ะฝะดะปั ะฟะพะปััะตะฝะธั overstocka (ะตััั ะปะธ ะฟัะตะดะผะตัั overstock == -1)
for item in skins_from_parser:
if item["overstock"] == -1:
need_to_check_on_cs_money = True
#ะฝัะถะฝะพ ะฟะพะปััะธัั ะพะฒะตัััะพะบะธ ั ัะฐะนัะฐ csmoney
if need_to_check_on_cs_money == True:
print("-- getting overstocks from csmoney")
#check if I'm logged into csMoney
driver.get("https://old.cs.money/")
try:
element = wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, "div.header_menu_mobile > div.balance_header.superclass_space.block_balance")))
time.sleep(1)
except:
#ะฒั
ะพะดะธะผ ะฒ csMoney
print("start entering into csMoney")
driver.find_element_by_css_selector("#authenticate_button > a").click()
time.sleep(1)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#imageLogin")))
driver.find_element_by_css_selector("#imageLogin").click()
time.sleep(5)
# ัะฝะพะฒะฐ ะฟัะพะฒะตััะตะผ, ะฒะพัะปะธ ะปะธ ะผั ะฒ ัะธััะตะผั
try:
element = wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, "div.header_menu_mobile > div.balance_header.superclass_space.block_balance")))
except:
raise ValueError('need to login into csMoney')
#ะฝะฐะดะพ ะฟะพะปะฝะพัััั ะฟัะพะณััะทะธัั ัััะฐะฝะธัั
try:
time.sleep(20)
element = wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, "#main_container_bot > div.items > div:nth-child(1)")))
except:
raise ValueError('cant properly load csmoney')
#ะฟัะพะฒะตััะตะผ ะพะฒะตัััะพะบะธ
driver.find_element_by_css_selector("#header_panel > ul.header_menu > li:nth-child(1) > a").click()
time.sleep(5)
for item in skins_from_parser:
if item["overstock"] == -1:
input_field = driver.find_element_by_css_selector('#universal_skin_input')
input_field.clear()
time.sleep(0.2)
input_field.send_keys(item["name"])
time.sleep(0.2)
driver.find_element_by_css_selector("#check_skin_status_btn > a").click()
time.sleep(2)
try:
overstock_csm = driver.find_element_by_css_selector("#overstock_difference").text
overstock_csm = int(overstock_csm)
print("overstock -", overstock_csm)
except:
print("was error getting overstock_difference")
continue
#ะปะธะฑะพ ะพะฑะฝะพะฒะปัะตะผ ะธะฝัะพัะผะฐัะธั ั ะฟัะตะดะผะตัะฐ ะฒ ะฑะด, ะปะธะฑะพ ะดะพะฑะฐะฒะปัะตะผ ะฝะพะฒัั ะทะฐะฟะธัั
#ะตัะปะธ ะฟัะตะดะผะตัะฐ ะฝะต ะฑัะปะพ ะฒ ะฑะด
if item["db_id"] == -1:
mycursor.execute(
"INSERT INTO csMoneyLimits (name, quanity) VALUES (%s, %s)",
(item["name"], str(overstock_csm),))
mydb.commit()
#ะตัะปะธ ะฟัะตะดะผะตั ะฑัะป ะฒ ะฑะด
if item["db_id"] != -1:
print("item exists in DB (id not -1)", item["db_id"]) #debug
#ัะฟะตัะฒะฐ ัะดะฐะปัะตะผ ัััะฐัะตะฒัะธะน ะฟัะตะดะผะตั ะธะท ะฑะด
mycursor.execute(
"DELETE FROM `csMoneyLimits` WHERE id = %s",
(item["db_id"],))
mydb.commit()
print("deleted from DB") #debug
#ัะตะฟะตัั ะดะพะฑะฐะฒะปัะตะผ ะฝะพะฒัะน
mycursor.execute(
"INSERT INTO csMoneyLimits (name, quanity) VALUES (%s, %s)",
(item["name"], str(overstock_csm),))
mydb.commit()
print("added to DB") #debug
#ะฟัะพััะฐะฒะปัะตะผ ะพะฒะตัััะพะบ
item["overstock"] = int(overstock_csm)
#ัะพััะธััะตะผ ะธ ะฒัััะฐะฒะปัะตะผ ัะฐะทัะตัะตะฝะฝะพะต ะบะพะป-ะฒะพ ะฟะพะบัะฟะพะบ
index = -1
for item in skins_from_parser:
index += 1
if item["overstock"] == -1:
del skins_from_parser[index]
continue
predicted_purchases = 0
if int(item["sales"]) > 0:
predicted_purchases = int(int(item["sales"]) / 3)
allowed_count = int(item["overstock"]) - (predicted_purchases + int(item["purchased_count"]))
item["allowed_count"] = allowed_count
if allowed_count < 1:
del skins_from_parser[index]
# ะพะฟัััะธะปะธ, ัะฐะบ ะบะฐะบ ะฒัะตัะฐะฒะฝะพ ะฝัะถะฝะพ ะพัะบััะฒะฐัั ัััะฐะฝะธัั ะบะฐะถะดะพะณะพ ัะบะธะฝะฐ ะดะปั ะฒัััะฐะฒะปะตะฝะธั ะฑะฐะนะพัะดะตัะฐ (ัะฐะผ ะธ ะฑัะดะตะผ ะฑัะฐัั ะผะธะฝ ัะตะฝั)
"""
#ะฟะพะปััะฐะตะผ usd-rub, ะดะปั ััะพะณะพ ะฒะพะทะผะพะถะฝะพ ะฝัะถะฝะพ ะฑัะดะตั ะทะฐะณััะทะธัั ะฝะตัะบะพะปัะบะพ ะฟัะตะดะผะตัะพะฒ
#ะฟัะพะฒะตััะตะผ, ะฝะตั ะปะธ ั ะฝะฐั ะณะพัะพะฒะพะณะพ ะบัััะฐ ะฒ ะฑะด (ะฝะต ััะฐััะต 3ั ัะฐัะพะฒ)
mycursor.execute("SELECT quanity, date FROM csMoneyLimits WHERE id = 38")
exchange_rate_and_date = mycursor.fetchone()
exchange_rate = 0
#ะฟะพะปััะฐะตะผ, ะตัะปะธ ะฝะต ััะฐััะต 4ั
ัะฐัะพะฒ
now = datetime.datetime.now()
delta = now - exchange_rate_and_date[1]
# ะบะพะป-ะฒะพ ัะฐัะพะฒ
time_diff_in_hours = int(delta.total_seconds()) / 60 / 60
print('time difference exchange rate: ', time_diff_in_hours)
if time_diff_in_hours < 4:
exchange_rate = exchange_rate_and_date[0]
#ะฟะพะปััะฐะตะผ ะบััั ะฒะฐะปัั (ััะฐะฒะฝะธะฒะฐั ัะตะฝั ะดะพะปะปะฐัะฐ ะฝะฐ ะฟะฐััะตัะต ะธ ัะตะฝั ะฒ ััะฑะปัั
ะฒ ััะธะผะต ั ะฝะตัะบะพะปัะบะธั
ะฟัะตะดะผะตัะพะฒ)
if exchange_rate == 0:
skins_for_exchange_rate = []
#ะฟะพะปััะฐะตะผ ะผะธะฝะธะผะฐะปัะฝัั ัะตะฝั ะพัะดะตัะฐ ะฒ ััะฑะปัั
for index in range(3):
print("index exchange rate -", index)
url_name = urllib.parse.quote(skins_from_parser[index]["name"])
driver.get("https://steamcommunity.com/market/listings/730/"+url_name)
try:
element = wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, "span.market_commodity_orders_header_promote:last-child")))
except:
raise ValueError('cant get exchange rate')
try:
driver.find_element_by_css_selector("#market_buyorder_info_show_details > span").click() #View more details
except: pass
lowest_prices = driver.find_elements_by_css_selector("span.market_commodity_orders_header_promote:last-child")
if len(lowest_prices) == 1:
print("lenth is 1")
lowest_price = lowest_prices[0].text
if len(lowest_prices) == 2:
print("lenth is 2")
lowest_price = lowest_prices[1].text
lowest_price = float(lowest_price.strip().replace(',', '.')[:-5])
print("name -", skins_from_parser[index]["name"], "lowest price -", lowest_price)
skins_for_exchange_rate.append({"name": skins_from_parser[index]["name"], "price_usd": skins_from_parser[index]["price"], "price_rub": lowest_price})
#ะฟะพะปััะฐะตะผ ะบััั ะฒะฐะปัั ะธ ะฟัะพะฒะตััะตะผ ะตะณะพ ะฝะฐ ะฐะดะตะบะฒะฐัะฝะพััั
print("skins_for_exchange_rate", skins_for_exchange_rate)
rates = []
#ะฟะพะปััะฐะตะผ ะบััั ะดะปั ััะตั
ะฟัะตะดะผะตัะพะฒ
index = 0
for item in skins_for_exchange_rate:
index += 1
compared_price = float(item["price_rub"]) / float(item["price_usd"])
rates.append({"name":index, "exchange_rate": compared_price})
print("rates", rates)
#ััะฐะฒะฝะธะฒะฐะตะผ ะบัััั ะฒะฐะปัั
for rate_1 in rates:
for rate_2 in rates:
if rate_1["name"] == rate_2["name"]:
continue
rates_difference = rate_1["exchange_rate"] - rate_2["exchange_rate"]
if rates_difference < 0:
rates_difference = rates_difference * -1
#ะตัะปะธ ะทะฝะฐัะตะฝะธะต ะฑะพะปััะต, ัะพ ะฑะตัะตะผ!
print("rates_difference -", rates_difference)
if rates_difference < 0.1:
print("gotcha!")
#ะฑะตัะตะผ ะฝะฐะธะฑะพะปััะธะน ะบััั ะฒะฐะปัั
if rate_1["exchange_rate"] > rate_2["exchange_rate"]:
exchange_rate = rate_1["exchange_rate"]
if rate_1["exchange_rate"] < rate_2["exchange_rate"]:
exchange_rate = rate_2["exchange_rate"]
break
#ะฟัะพััะฐะฒะปัะตะผ ะบััั ะฒะฐะปัั ะฒ ะฑะด
if exchange_rate != 0:
now = datetime.datetime.now()
mycursor.execute(
"UPDATE `csMoneyLimits` SET `quanity`=%s,`date`=%s WHERE id = 38",
(exchange_rate, now,))
mydb.commit()
print("exchange rate -", exchange_rate)
# ะฟัะพััะฐะฒะปัะตะผ ัะตะฝั ะดะปั ะฑะฐะนะพัะดะตัะฐ ะดะปั ะบะฐะถะดะพะณะพ ะฟัะตะดะผะตัะฐ
for item in skins_from_parser:
#usd min market price #exchange rate #plus 1 rubble
item["buy_order_price"] = item["price"] * float(exchange_rate) + 1
"""
# ัะดะฐะปัะตะผ ะฒัะต ะฒัััะฐะฒะปะตะฝะฝัะต ะฑะฐะนะพัะดะตัะฐ
print("-- start deleting buyorders")
driver.get("https://steamcommunity.com/market/")
time.sleep(3)
try:
element = wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, "#result_0")))
except:
raise ValueError('cant load steam market page')
# ะฟะพะปััะฐะตะผ ะฑะฐะปะฐะฝั, ััะพะฑั ะพััะตะธัั ะดะพัะพะณะธะต ะฟัะตะดะผะตัั
balance = driver.find_element_by_css_selector("#header_wallet_balance").text.strip()[:-5]
try:
balance = balance.replace(',', '.') # ะตัะปะธ ัะตะปะพะต ัะธัะปะพ (ะฑะตะท ะทะฐะฟััะพะน)
except:
print("integer price")
pass
balance = float(balance)
high_limit_of_orders = balance * 10
# ัะดะฐะปัะตะผ ะฑะฐะนะพัะดะตัะฐ
buy_orders_cancel_buttons = driver.find_elements_by_css_selector(
"#tabContentsMyListings > div:last-child > div.market_listing_row.market_recent_listing_row > div.market_listing_edit_buttons.actual_content > div > a")
print("length items to del", len(buy_orders_cancel_buttons))
for index in range(len(buy_orders_cancel_buttons)):
driver.find_element_by_css_selector(
"#tabContentsMyListings > div:last-child > div:nth-child(3) > div.market_listing_edit_buttons.actual_content > div > a").click() # ะบะปะธะบะฐะตะผ ะฒัะตะณะดะฐ ะฝะฐ ะฟะตัะฒัั ะบะฝะพะฟะบั cancel, ัะบ ะพะฝะธ ะฟัะพะฟะฐะดะฐัั ะฟะพัะปะต ะบะปะธะบะฐ
buy_orders_cancel_buttons = driver.find_elements_by_css_selector("#tabContentsMyListings > div:last-child > div.market_listing_row.market_recent_listing_row > div.market_listing_edit_buttons.actual_content > div > a")
print("length items to del", len(buy_orders_cancel_buttons))
time.sleep(6)
#ะฒัััะฐะฒะปัะตะผ ะฑะฐะนะพัะดะตัะฐ
errors = 0
spent_money = 0
for item in skins_from_parser:
#ะตัะปะธ ะทะฐ ะฟัะพั
ะพะด ะผั ะฟะพะปััะฐะตะผ >= 3 ะพัะธะฑะบะธ, ัะพ ะฟะตัะบัะฐัะฐะตะผ ัะฐะฑะพัั ะธ ะพัะบััะฒะฐะตะผ ะปะพะณะธ
if errors == 3:
raise ValueError('got 3 errors while placing buy orders')
#ะพัะบััะฒะฐะตะผ ะฟัะตะดะผะตั
url_name = urllib.parse.quote(item["name"])
driver.get("https://steamcommunity.com/market/listings/730/" + url_name)
time.sleep(7)
try:
element = wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, "span.market_commodity_orders_header_promote:last-child")))
except:
print("can load items page +1 error")
errors += 1
continue
try:
driver.find_element_by_css_selector("#market_buyorder_info_show_details > span").click() #View more details
except: pass
#ะฟะพะปััะฐะตะผ ะผะธะฝ ัะตะฝั
lowest_prices = driver.find_elements_by_css_selector("span.market_commodity_orders_header_promote:last-child")
if len(lowest_prices) == 1:
print("lenth is 1")
lowest_price = lowest_prices[0].text
if len(lowest_prices) == 2:
print("lenth is 2")
lowest_price = lowest_prices[1].text
lowest_price = lowest_price.strip()[:-5]
try:
lowest_price = lowest_price.replace(',', '.') #ะตัะปะธ ัะตะปะพะต ัะธัะปะพ (ะฑะตะท ะทะฐะฟััะพะน)
except:
print("integer price")
pass
#ะฟัะพะฒะตััะตะผ, ะฟะพะดั
ะพะดะธั ะปะธ ะฟัะตะดะผะตั ะฟะพ ะฟัะพัะธัั
print("lowest_price", lowest_price)
lowest_price = float(lowest_price) + 3 # ะฟัะธะฑะฐะฒะปัะตะผ ััะฑะปั ะบ ัะตะฝะต
dep_price_rub = item["price_csm"] * rub_usd * 0.96
expected_profit = (lowest_price / dep_price_rub -1) * -1
print("expected profit -", expected_profit)
if expected_profit < allowed_min_percent:
print("expected profit is less than allowed min profit")
continue
#ะฟัะพะฒะตััะตะผ, ั
ะฒะฐัะฐะตั ะปะธ ะฑะฐะปะฐะฝัะฐ ะฝะฐ ะฟะพะบัะฟะบั ะฟัะตะดะผะตัะฐ
if lowest_price > balance:
print("items's price is more than balance")
continue
#ะฟะพะปััะฐะตะผ ะธะผั ะฟัะตะดะผะตัะฐ ัะพ ัััะฐะฝะธัั (ะดะพะฟ ะฟัะพะฒะตัะบะฐ)
name_of_the_item_on_page = driver.find_element_by_css_selector("#mainContents > div.market_listing_nav_container > div.market_listing_nav > a:nth-child(2)").text.strip()
if name_of_the_item_on_page != item["name"]:
print("name on the market page doesn't math with the actual item name")
continue
spent_money += lowest_price
# ะฒ ััะธะผะต ะฒะตัั
ะฝัั ะฟะปะฐะฝะบะฐ ััะพ ั
10 ะพั ะฑะฐะปะฐะฝัะฐ
if spent_money > high_limit_of_orders:
print("limit exceeded", spent_money, high_limit_of_orders)
break
#ะฒัััะฐะฒะปัะตะผ ะพัะดะตั
if len(lowest_prices) == 1:
driver.find_element_by_css_selector("#market_buyorder_info > div:nth-child(1) > div:nth-child(1) > a > span").click() #place buy order
if len(lowest_prices) == 2:
driver.find_element_by_css_selector("#market_commodity_order_spread > div:nth-child(2) > div > div.market_commodity_orders_header > a > span").click()
time.sleep(1)
input_field = driver.find_element_by_css_selector('#market_buy_commodity_input_price')
input_field.clear()
time.sleep(0.2)
input_field.send_keys(str(lowest_price)) #ะฒะฒะพะดะธะผ ัะตะฝั
time.sleep(0.2)
driver.find_element_by_css_selector("#market_buyorder_dialog_purchase > span").click() #ะฒัััะฐะฒะปัะตะผ ะพัะดะตั
time.sleep(0.2)
#ะฟัะพะฒะตััะตะผ, ะฝะต ะฟะพัะฒะธะปะฐัั ะปะธ ะพัะธะฑะบะฐ ะฟัะธ ะฟะพะฟััะบะต ะฒัััะฐะฒะธัั ะพัะดะตั (ะฝัะถะฝะพ ะฟะพััะฐะฒะธัั ะณะฐะปะพัะบั)
try:
error_text = driver.find_element_by_css_selector("#market_buyorder_dialog_error_text").text.strip()
except:
pass
if error_text == "You must agree to the terms of the Steam Subscriber Agreement to complete this transaction.":
print("tick!")
driver.find_element_by_css_selector("#market_buyorder_dialog_accept_ssa").click() #ััะฐะฒะธะผ ั
ะฐะปะพัะบั
time.sleep(0.2)
driver.find_element_by_css_selector("#market_buyorder_dialog_purchase > span").click() #ัะฝะพะฒะฐ ะฒัััะฐะฒะปัะตะผ ะพัะดะตั
time.sleep(0.2)
print("add one more buy order with a price -", lowest_price)
time.sleep(4)
close_modals()
except Exception as e:
telegram_bot_sendtext("SteamOrderPrices: ะะพะทะฝะธะบะปะฐ ะพัะธะฑะบะฐ, ะฝัะถะฝะพ ะฒัััะฝััั")
logger.exception(e) # Will send the errors to the file
PrintException()
close_script()
#test print
#for item in skins_from_parser:
# print(item)
print("Successful")
close_script()
| 43.699248 | 1,098 | 0.618582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15,813 | 0.511946 |
871306cf01b343214f8b57e9282bd4078015ff09 | 308 | py | Python | Example/Algo/LinearRegressionTest.py | m-payal/AlgorithmsAndDataStructure | db53da00cefa3b681b4fcebfc0d22ee91cd489f9 | [
"MIT"
] | 4 | 2021-04-12T06:18:57.000Z | 2021-12-03T15:18:09.000Z | Example/Algo/LinearRegressionTest.py | m-payal/AlgorithmsAndDataStructure | db53da00cefa3b681b4fcebfc0d22ee91cd489f9 | [
"MIT"
] | 1 | 2021-07-14T16:40:21.000Z | 2021-07-14T16:40:21.000Z | Example/Algo/LinearRegressionTest.py | m-payal/AlgorithmsAndDataStructure | db53da00cefa3b681b4fcebfc0d22ee91cd489f9 | [
"MIT"
] | 2 | 2020-12-04T17:49:11.000Z | 2021-01-04T15:25:33.000Z | # __Date__ : 1/5/2020.
# __Author__ : CodePerfectPlus
# __Package__ : Python 3
# __GitHub__ : https://www.github.com/codeperfectplus
#
from Algorithms import LinearRegression
X = [12, 24, 36]
y = [25, 49, 73]
lr = LinearRegression()
lr.fit(X, y)
y_predict = lr.predict(12)
print(y_predict)
| 19.25 | 56 | 0.675325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.470779 |
871332137eb3d17fcc5d980cab9a1e1de4de3d41 | 1,657 | py | Python | release/stubs.min/System/Windows/Forms/__init___parts/ControlStyles.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/System/Windows/Forms/__init___parts/ControlStyles.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/System/Windows/Forms/__init___parts/ControlStyles.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class ControlStyles(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the style and behavior of a control.
enum (flags) ControlStyles,values: AllPaintingInWmPaint (8192),CacheText (16384),ContainerControl (1),DoubleBuffer (65536),EnableNotifyMessage (32768),FixedHeight (64),FixedWidth (32),Opaque (4),OptimizedDoubleBuffer (131072),ResizeRedraw (16),Selectable (512),StandardClick (256),StandardDoubleClick (4096),SupportsTransparentBackColor (2048),UserMouse (1024),UserPaint (2),UseTextForAccessibility (262144)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllPaintingInWmPaint=None
CacheText=None
ContainerControl=None
DoubleBuffer=None
EnableNotifyMessage=None
FixedHeight=None
FixedWidth=None
Opaque=None
OptimizedDoubleBuffer=None
ResizeRedraw=None
Selectable=None
StandardClick=None
StandardDoubleClick=None
SupportsTransparentBackColor=None
UserMouse=None
UserPaint=None
UseTextForAccessibility=None
value__=None
| 32.490196 | 409 | 0.731442 | 1,653 | 0.997586 | 0 | 0 | 0 | 0 | 0 | 0 | 822 | 0.496077 |
871339da3c5ad19f363b1ca88a51d6c2fea28c59 | 3,858 | py | Python | scripts/venv/lib/python2.7/site-packages/cogent/align/progressive.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | 3 | 2015-11-20T08:44:42.000Z | 2016-12-14T01:40:03.000Z | scripts/venv/lib/python2.7/site-packages/cogent/align/progressive.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | 1 | 2017-09-04T14:04:32.000Z | 2020-05-26T19:04:00.000Z | scripts/venv/lib/python2.7/site-packages/cogent/align/progressive.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import with_statement
from cogent import LoadTree
from cogent.phylo import nj as NJ
from cogent.phylo.distance import EstimateDistances
from cogent.core.info import Info
from cogent.util import progress_display as UI
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Peter Maxwell"
__email__ = "pm67nz@gmail.com"
__status__ = "Production"
@UI.display_wrap
def TreeAlign(model, seqs, tree=None, indel_rate=0.01, indel_length=0.01,
ui = None, ests_from_pairwise=True, param_vals=None):
"""Returns a multiple alignment and tree.
Uses the provided substitution model and a tree for determining the
progressive order. If a tree is not provided a Neighbour Joining tree is
constructed from pairwise distances estimated from pairwise aligning the
sequences. If running in parallel, only the distance estimation is
parallelised and only the master CPU returns the alignment and tree, other
CPU's return None, None.
Arguments:
- model: a substitution model
- seqs: a sequence collection
- indel_rate, indel_length: parameters for the progressive pair-HMM
- ests_from_pairwise: if no tree provided and True, the median value
of the substitution model parameters are used
- param_vals: named key, value pairs for model parameters. These
override ests_from_pairwise.
"""
_exclude_params = ['mprobs', 'rate', 'bin_switch']
if param_vals:
param_vals = dict(param_vals)
else:
param_vals = {}
if isinstance(seqs, dict):
seq_names = seqs.keys()
else:
seq_names = seqs.getSeqNames()
two_seqs = len(seq_names) == 2
if tree:
tip_names = tree.getTipNames()
tip_names.sort()
seq_names.sort()
assert tip_names == seq_names, \
"names don't match between seqs and tree: tree=%s; seqs=%s" % \
(tip_names, seq_names)
ests_from_pairwise = False
elif two_seqs:
tree = LoadTree(tip_names=seqs.getSeqNames())
ests_from_pairwise = False
else:
if ests_from_pairwise:
est_params = [param for param in model.getParamList() \
if param not in _exclude_params]
else:
est_params = None
dcalc = EstimateDistances(seqs, model, do_pair_align=True,
est_params=est_params)
dcalc.run()
dists = dcalc.getPairwiseDistances()
tree = NJ.nj(dists)
LF = model.makeLikelihoodFunction(tree.bifurcating(name_unnamed=True), aligned=False)
if ests_from_pairwise and not param_vals:
# we use the Median to avoid the influence of outlier pairs
param_vals = {}
for param in est_params:
numbers = dcalc.getParamValues(param)
print "Param Estimate Summary Stats: %s" % param
print numbers.summarize()
param_vals[param] = numbers.Median
ui.display("Doing %s alignment" % ["progressive", "pairwise"][two_seqs])
with LF.updatesPostponed():
for param, val in param_vals.items():
LF.setParamRule(param, value=val, is_constant=True)
LF.setParamRule('indel_rate', value=indel_rate, is_constant=True)
LF.setParamRule('indel_length', value=indel_length, is_constant=True)
LF.setSequences(seqs)
edge = LF.getLogLikelihood().edge
(vtLnL, align) = edge.getViterbiScoreAndAlignment(0.5)
info = Info()
info["AlignParams"] = param_vals
info["AlignParams"].update(dict(indel_length=indel_length, indel_rate=indel_rate))
align.Info = info
return align, tree
| 38.58 | 89 | 0.664593 | 0 | 0 | 0 | 0 | 3,331 | 0.863401 | 0 | 0 | 1,313 | 0.340332 |
8713e249df8ab7b42369aeb0097bb65f5da669b5 | 992 | py | Python | di_scoring/admin.py | tboz203/django-scoring | 9766ce861022e2f42d42ced156966099f18c2397 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | di_scoring/admin.py | tboz203/django-scoring | 9766ce861022e2f42d42ced156966099f18c2397 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | di_scoring/admin.py | tboz203/django-scoring | 9766ce861022e2f42d42ced156966099f18c2397 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from django.contrib import admin
from di_scoring import models
# copypastad with love from :
# `http://stackoverflow.com/questions/10543032/how-to-show-all-fields-of-model-in-admin-page`
# subclassed modeladmins' list_displays will contain all model fields except
# for id
class CustomModelAdminMixin(object):
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields
if field.name != "id"]
super(CustomModelAdminMixin, self).__init__(model, admin_site)
@admin.register(
models.Manager,
models.School,
models.TeamChallenge,
models.Location,
models.Team,
models.TC_Field,
models.TC_Appraiser,
models.TC_Event,
models.TC_Score,
models.IC_Appraiser,
models.IC_Event,
models.IC_Score,
models.TC_Appraiser_Permission,
)
class DefaultModelAdmin(CustomModelAdminMixin, admin.ModelAdmin):
pass
| 30.060606 | 93 | 0.681452 | 348 | 0.350806 | 0 | 0 | 439 | 0.44254 | 0 | 0 | 210 | 0.211694 |
8713f50880ce1a36e5b15f90eca7dd6b247d359c | 4,654 | py | Python | Day Pogress - 18~100/Day 17/constructor.py | Abbhiishek/Python | 3ad5310ca29469f353f9afa99531f01273eec6bd | [
"MIT"
] | 1 | 2022-02-04T07:04:34.000Z | 2022-02-04T07:04:34.000Z | Day Pogress - 18~100/Day 17/constructor.py | Abbhiishek/Python | 3ad5310ca29469f353f9afa99531f01273eec6bd | [
"MIT"
] | 12 | 2022-02-13T12:10:32.000Z | 2022-02-17T09:36:49.000Z | Day Pogress - 18~100/Day 17/constructor.py | Abbhiishek/Python | 3ad5310ca29469f353f9afa99531f01273eec6bd | [
"MIT"
] | null | null | null | """
how to create a class
a class is constructed through constructor
=> a class can be constructed through specific function called "__init__(self)"
"""
# let's create a Class
class Person:
"""
A class to create a person with the following attributes:
age, marks, phone, user_id, profession, goal
Have the following methods:
get_age()
get_marks()
get_phone()
get_user_id()
get_profession()
get_goal()
set_age(age)
set_marks(marks)
set_phone(phone)
set_user_id(user_id)
set_profession(profession)
set_goal(goal)
print_details()
"""
# now we have to define the shape of the class for that we use __init__
def __init__(self, age, marks, phone, user_id, profession, goal):
# this is a special function for the initialising the variables
# construct the shape
## to add new parameters we pass in init method
"""
this is a function to initialise the variables of the class
:param age: int
:param marks: int
:param phone: int
:param user_id: int
:param profession: string
:param goal: string
"""
self.age = age
self.marks = marks
self.phone = phone
self.user_id = user_id
self.profession = profession
self.goal = goal
def get_age(self):
"""
this is a function to get the age of the class
:return: int
"""
return self.age
def get_marks(self):
"""
this is a function to get the marks of the class
:return: int
"""
return self.marks
def get_phone(self):
"""
this is a function to get the phone of the class
:return: int
"""
return self.phone
def get_user_id(self):
"""
this is a function to get the user_id of the class
:return: int
"""
return self.user_id
def get_profession(self):
"""
this is a function to get the profession of the class
:return: string
"""
return self.profession
def get_goal(self):
"""
this is a function to get the goal of the class
:return: goal
"""
return self.goal
def set_age(self, age):
"""
this is a function to set the age of the class
:param age: int
:return: Age
"""
self.age = age
def set_marks(self, marks):
"""
this is a function to set the marks of the class
:param marks: int
:return: marks
"""
self.marks = marks
def set_phone(self, phone):
"""
this is a function to set the phone of the class
:param phone: int
:return: phone
"""
self.phone = phone
def set_user_id(self, user_id):
"""
this is a function to set the user_id of the class
:param user_id: int
:return: user_id
"""
self.user_id = user_id
def set_profession(self, profession):
"""
this is a function to set the profession of the class
:param profession: string
:return: Profession
"""
self.profession = profession
def set_goal(self, goal):
"""
this is a function to set the goal of the class
:param goal: string
:return: goal
"""
self.goal = goal
def print_details(self):
"""
this is a function to print the details of the class
:return: age , marks , phone , user_id , profession , goal
"""
print("age: ", self.age)
print("marks: ", self.marks)
print("phone: ", self.phone)
print("user_id: ", self.user_id)
print("profession: ", self.profession)
print("goal: ", self.goal)
abhishek = Person(21, 90, 31114111, 12345, "student", "to be a good programmer")
abhishek.set_age(20)
abhishek.set_marks(90)
abhishek.set_phone(9674144556) # yess this is a valid phone number of mine :)
abhishek.set_user_id(1)
abhishek.set_profession("student")
abhishek.set_goal("to be a good programmer")
print("the address is ")
print(abhishek) # this is the address of the object
print(abhishek.__dict__) # this is a dictionary of the class
print(abhishek.__doc__) # this is a special function to print the docstring of the class
abhishek.print_details() # this is to print the details of the class
profession = abhishek.get_profession() # this is to get the profession of the class
print("the profession is " + profession) # this is to print the profession of the class | 26.747126 | 89 | 0.595617 | 3,690 | 0.792866 | 0 | 0 | 0 | 0 | 0 | 0 | 2,970 | 0.638161 |
8714cfb5d5d59c4388d610853c7edfc7de4282a8 | 9,309 | py | Python | src/compas_rhino/artists/networkartist.py | duchaoyu/compas | d484500d68d44fd6e227c3bbee20a2edde6e6c96 | [
"MIT"
] | null | null | null | src/compas_rhino/artists/networkartist.py | duchaoyu/compas | d484500d68d44fd6e227c3bbee20a2edde6e6c96 | [
"MIT"
] | null | null | null | src/compas_rhino/artists/networkartist.py | duchaoyu/compas | d484500d68d44fd6e227c3bbee20a2edde6e6c96 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from functools import partial
import compas_rhino
from compas.geometry import centroid_points
from compas.utilities import color_to_colordict
from compas.artists import NetworkArtist
from .artist import RhinoArtist
colordict = partial(color_to_colordict, colorformat='rgb', normalize=False)
class NetworkArtist(RhinoArtist, NetworkArtist):
"""Artist for drawing network data structures.
Parameters
----------
network : :class:`compas.datastructures.Network`
A COMPAS network.
layer : str, optional
The parent layer of the network.
nodes : list of int, optional
A list of node identifiers.
Default is ``None``, in which case all nodes are drawn.
edges : list, optional
A list of edge keys (as uv pairs) identifying which edges to draw.
The default is ``None``, in which case all edges are drawn.
nodecolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the nodes.
edgecolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the edges.
show_nodes : bool, optional
show_edges : bool, optional
"""
def __init__(self,
network,
layer=None,
nodes=None,
edges=None,
nodecolor=None,
edgecolor=None,
show_nodes=True,
show_edges=True,
**kwargs):
super(NetworkArtist, self).__init__(network=network, layer=layer, **kwargs)
self.nodes = nodes
self.edges = edges
self.node_color = nodecolor
self.edge_color = edgecolor
self.show_nodes = show_nodes
self.show_edges = show_edges
# ==========================================================================
# clear
# ==========================================================================
def clear(self):
guids = compas_rhino.get_objects(name="{}.*".format(self.network.name))
compas_rhino.delete_objects(guids, purge=True)
def clear_nodes(self):
guids = compas_rhino.get_objects(name="{}.vertex.*".format(self.network.name))
compas_rhino.delete_objects(guids, purge=True)
def clear_edges(self):
guids = compas_rhino.get_objects(name="{}.edge.*".format(self.network.name))
compas_rhino.delete_objects(guids, purge=True)
def clear_nodelabels(self):
guids = compas_rhino.get_objects(name="{}.nodexlabel.*".format(self.network.name))
compas_rhino.delete_objects(guids, purge=True)
def clear_edgelabels(self):
guids = compas_rhino.get_objects(name="{}.edgelabel.*".format(self.network.name))
compas_rhino.delete_objects(guids, purge=True)
# ==========================================================================
# draw
# ==========================================================================
def draw(self, nodes=None, edges=None, nodecolor=None, edgecolor=None):
"""Draw the network using the chosen visualisation settings.
Parameters
----------
nodes : list, optional
A list of nodes to draw.
Default is ``None``, in which case all nodes are drawn.
edges : list, optional
A list of edges to draw.
The default is ``None``, in which case all edges are drawn.
nodecolor : tuple or dict of tuple, optional
The color specififcation for the nodes.
The default color is the value of ``~NetworkArtist.default_nodecolor``.
edgecolor : tuple or dict of tuple, optional
The color specififcation for the edges.
The default color is the value of ``~NetworkArtist.default_edgecolor``.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
self.clear()
guids = self.draw_nodes(nodes=nodes, color=nodecolor)
guids += self.draw_edges(edges=edges, color=edgecolor)
return guids
def draw_nodes(self, nodes=None, color=None):
"""Draw a selection of nodes.
Parameters
----------
nodes : list, optional
A list of nodes to draw.
Default is ``None``, in which case all nodes are drawn.
color : tuple or dict of tuple, optional
The color specififcation for the nodes.
The default color is the value of ``~NetworkArtist.default_nodecolor``.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
self.node_color = color
node_xyz = self.node_xyz
nodes = nodes or self.nodes
points = []
for node in nodes:
points.append({
'pos': node_xyz[node],
'name': "{}.node.{}".format(self.network.name, node),
'color': self.node_color.get(node, self.default_nodecolor)
})
return compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)
def draw_edges(self, edges=None, color=None):
"""Draw a selection of edges.
Parameters
----------
edges : list, optional
A list of edges to draw.
The default is ``None``, in which case all edges are drawn.
color : tuple or dict of tuple, optional
The color specififcation for the edges.
The default color is the value of ``~NetworkArtist.default_edgecolor``.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
self.edge_color = color
node_xyz = self.node_xyz
edges = edges or self.edges
lines = []
for edge in edges:
lines.append({
'start': node_xyz[edge[0]],
'end': node_xyz[edge[1]],
'color': self.edge_color.get(edge, self.default_edgecolor),
'name': "{}.edge.{}-{}".format(self.network.name, *edge)
})
return compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
# ==========================================================================
# draw labels
# ==========================================================================
def draw_nodelabels(self, text=None, color=None):
"""Draw labels for a selection nodes.
Parameters
----------
text : dict, optional
A dictionary of node labels as node-text pairs.
The default value is ``None``, in which case every node will be labelled with its key.
color : 3-tuple or dict of 3-tuple, optional
The color sepcification of the labels.
The default color is the same as the default color of the nodes.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
if not text or text == 'key':
node_text = {node: str(node) for node in self.nodes}
elif text == 'index':
node_text = {node: str(index) for index, node in enumerate(self.nodes)}
elif isinstance(text, dict):
node_text = text
else:
raise NotImplementedError
node_xyz = self.node_xyz
node_color = colordict(color, node_text.keys(), default=self.default_nodecolor)
labels = []
for node in node_text:
labels.append({
'pos': node_xyz[node],
'name': "{}.nodelabel.{}".format(self.network.name, node),
'color': node_color[node],
'text': node_text[node]
})
return compas_rhino.draw_labels(labels, layer=self.layer, clear=False, redraw=False)
def draw_edgelabels(self, text=None, color=None):
"""Draw labels for a selection of edges.
Parameters
----------
text : dict, optional
A dictionary of edgelabels as edge-text pairs.
The default value is ``None``, in which case every edge will be labelled with its key.
color : 3-tuple or dict of 3-tuple, optional
The color sepcification of the labels.
The default color is the same as the default color of the edges.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
if text is None:
edge_text = {edge: "{}-{}".format(*edge) for edge in self.edges}
elif isinstance(text, dict):
edge_text = text
else:
raise NotImplementedError
node_xyz = self.node_xyz
edge_color = colordict(color, edge_text.keys(), default=self.default_edgecolor)
labels = []
for edge in edge_text:
labels.append({
'pos': centroid_points([node_xyz[edge[0]], node_xyz[edge[1]]]),
'name': "{}.edgelabel.{}-{}".format(self.network.name, *edge),
'color': edge_color[edge],
'text': edge_text[edge]
})
return compas_rhino.draw_labels(labels, layer=self.layer, clear=False, redraw=False)
| 37.236 | 98 | 0.5615 | 8,902 | 0.956279 | 0 | 0 | 0 | 0 | 0 | 0 | 4,512 | 0.484692 |
8714de68982283bcaeff5e6a0b71251d452dbab8 | 1,873 | py | Python | tests/x7/lib/test_shell_tools.py | gribbg/x7-lib | 1ec5807d2c85d522a9f678f995d0f2fe42735d18 | [
"BSD-2-Clause"
] | null | null | null | tests/x7/lib/test_shell_tools.py | gribbg/x7-lib | 1ec5807d2c85d522a9f678f995d0f2fe42735d18 | [
"BSD-2-Clause"
] | null | null | null | tests/x7/lib/test_shell_tools.py | gribbg/x7-lib | 1ec5807d2c85d522a9f678f995d0f2fe42735d18 | [
"BSD-2-Clause"
] | null | null | null | # Originally auto-generated on 2021-02-15-12:14:36 -0500 EST
# By '--verbose --verbose x7.lib.shell_tools'
from unittest import TestCase
from x7.lib.annotations import tests
from x7.testing.support import Capture
from x7.lib import shell_tools
from x7.lib.shell_tools_load import ShellTool
@tests(shell_tools)
class TestModShellTools(TestCase):
"""Tests for stand-alone functions in x7.lib.shell_tools module"""
@tests(shell_tools.Dir)
def test_dir(self):
self.assertIn('__init__', dir(self))
self.assertNotIn('__init__', shell_tools.Dir(self))
self.assertIn('test_dir', shell_tools.Dir(self))
@tests(shell_tools.help)
def test_help(self):
with Capture() as orig:
help(shell_tools.Dir)
with Capture() as modified:
shell_tools.help(shell_tools.Dir)
self.assertEqual(orig.stdout(), modified.stdout())
self.assertIn('Like dir(v), but only non __ names', orig.stdout())
st_dir = ShellTool('Dir', shell_tools.Dir)
with Capture() as as_shell_tool:
shell_tools.help(st_dir)
self.assertEqual(orig.stdout(), as_shell_tool.stdout())
self.assertNotIn('__init__', as_shell_tool.stdout())
with Capture() as orig_as_shell_tool:
help(st_dir)
self.assertIn('__init__', orig_as_shell_tool.stdout())
@tests(shell_tools.help)
def test_help_on_help(self):
with Capture() as orig:
help(help)
with Capture() as modified:
shell_tools.help(ShellTool('help', shell_tools.help))
self.assertEqual(orig.stdout(), modified.stdout())
@tests(shell_tools.tools)
def test_tools(self):
with Capture() as out:
shell_tools.tools()
self.assertIn('Help for tools', out.stdout())
self.assertGreaterEqual(out.stdout().count('\n'), 5)
| 36.019231 | 74 | 0.662573 | 1,559 | 0.832355 | 0 | 0 | 1,579 | 0.843033 | 0 | 0 | 288 | 0.153764 |
8716b23b59dc13aa5f3ac5d9e7d97c51ba82770e | 754 | py | Python | HmacCalculation/Hmac256Calculation.py | likloadm/cross_platform_crypto | e828c9c9504c022d3882d174f7a8fd424aa19991 | [
"Unlicense"
] | 15 | 2020-11-14T09:50:50.000Z | 2022-03-13T16:39:19.000Z | HmacCalculation/Hmac256Calculation.py | likloadm/cross_platform_crypto | e828c9c9504c022d3882d174f7a8fd424aa19991 | [
"Unlicense"
] | 1 | 2021-04-26T07:42:27.000Z | 2021-07-06T07:39:17.000Z | HmacCalculation/Hmac256Calculation.py | likloadm/cross_platform_crypto | e828c9c9504c022d3882d174f7a8fd424aa19991 | [
"Unlicense"
] | 6 | 2021-11-03T11:28:36.000Z | 2022-03-16T13:03:20.000Z | from Crypto.Hash import HMAC, SHA256
import base64
def hmac256Calculation(keyHmac, data):
h = HMAC.new(keyHmac.encode("ascii"), digestmod=SHA256)
h.update(data.encode("ascii"))
return h.digest()
def base64Encoding(input):
dataBase64 = base64.b64encode(input)
dataBase64P = dataBase64.decode("UTF-8")
return dataBase64P
print("HMAC 256 calculation")
hmac256KeyString = "hmac256ForAesEncryption"
plaintext = "The quick brown fox jumps over the lazy dog"
print ("hmac256Key: " + hmac256KeyString)
print("plaintext: " + plaintext)
hmac256 = hmac256Calculation(hmac256KeyString, plaintext)
hmacBase64 = base64Encoding(hmac256)
print ("hmac256 length: " + str(len(hmac256)) + " (Base64) data: " + base64Encoding(hmac256))
| 32.782609 | 94 | 0.733422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.233422 |
8716b6c023660b91d85d95f0d7e719aaefef6ec6 | 408 | py | Python | elvaapp/elvaapp/doctype/vache/vache_dashboard.py | ovresko/elvadesk | 10090244ce5bd379cf16763e216e6011870de937 | [
"MIT"
] | 1 | 2020-12-28T16:35:41.000Z | 2020-12-28T16:35:41.000Z | elvaapp/troupeau/doctype/vache/vache_dashboard.py | ovresko/elvadesk | 10090244ce5bd379cf16763e216e6011870de937 | [
"MIT"
] | null | null | null | elvaapp/troupeau/doctype/vache/vache_dashboard.py | ovresko/elvadesk | 10090244ce5bd379cf16763e216e6011870de937 | [
"MIT"
] | null | null | null | from frappe import _
def get_data():
return {
'fieldname': 'vache',
'non_standard_fieldnames': {
'Insemination': 'vache'
},
'transactions': [
{
'label': _('Reproduction'),
'items': ['Insemination','Velage','Diagnostique']
},
{
'label': _('Production'),
'items': ['Lactation item']
},
{
'label': _('Sante'),
'items': ['Dossier medical','Poids']
}
]
}
| 17 | 53 | 0.544118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.561275 |
8716deba2bfaed14d7a9b60985573ae32efd9715 | 1,447 | py | Python | setup.py | patlegu/python-SNS-API | 5eafe0309bdceacc8a5b617067d01f5a21814b14 | [
"Apache-2.0"
] | 9 | 2019-02-22T10:18:12.000Z | 2022-03-25T11:23:32.000Z | setup.py | patlegu/python-SNS-API | 5eafe0309bdceacc8a5b617067d01f5a21814b14 | [
"Apache-2.0"
] | 4 | 2019-10-10T10:48:40.000Z | 2021-12-21T14:23:23.000Z | setup.py | patlegu/python-SNS-API | 5eafe0309bdceacc8a5b617067d01f5a21814b14 | [
"Apache-2.0"
] | 5 | 2020-01-23T19:23:04.000Z | 2022-03-22T16:44:38.000Z | #!/usr/bin/python
import setuptools
import os
version = {}
with open(os.path.join('stormshield', 'sns', 'sslclient', '__version__.py'), 'r') as fh:
exec(fh.read(), version)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="stormshield.sns.sslclient",
version=version['__version__'],
author="Remi Pauchet",
author_email="remi.pauchet@stormshield.eu",
description="SSL API client for Stormshield Network Security appliances",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/stormshield/python-SNS-API",
license='Apache License 2.0',
packages=setuptools.find_packages(),
entry_points={
'console_scripts': ['snscli=stormshield.sns.cli:main'],
},
install_requires=[
'pygments',
'requests[socks]',
'requests_toolbelt',
'colorlog',
'defusedxml',
'pyreadline; platform_system == "Windows"',
'py2-ipaddress; python_version < "3"'
],
include_package_data=True,
tests_require=["nose"],
test_suite='nose.collector',
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: System :: Networking",
"Environment :: Console"
],
)
| 30.145833 | 88 | 0.641327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 740 | 0.511403 |
8719077594c4ee561b05b1f90d93c6bca7fc1d70 | 23 | py | Python | vpc_hyp2/ansible/roles/vm_create/files/variables.py | dhanraj-vedanth/IaaS_VPC_CDN | 262dbc7db63d5e76398dadc8015256fb37986e36 | [
"MIT"
] | null | null | null | vpc_hyp2/ansible/roles/vm_create/files/variables.py | dhanraj-vedanth/IaaS_VPC_CDN | 262dbc7db63d5e76398dadc8015256fb37986e36 | [
"MIT"
] | null | null | null | vpc_hyp2/ansible/roles/vm_create/files/variables.py | dhanraj-vedanth/IaaS_VPC_CDN | 262dbc7db63d5e76398dadc8015256fb37986e36 | [
"MIT"
] | null | null | null |
dom_list = ['an_h1']
| 5.75 | 20 | 0.565217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.304348 |
871a4c4bcd5ba8f6c98873acee5df7de49d7bdba | 50,713 | py | Python | Examples/ApiExamples/ex_structured_document_tag.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 3 | 2021-12-04T22:17:28.000Z | 2022-02-22T03:30:01.000Z | Examples/ApiExamples/ex_structured_document_tag.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 4 | 2021-11-26T10:01:06.000Z | 2021-12-14T15:01:11.000Z | Examples/ApiExamples/ex_structured_document_tag.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 2 | 2021-10-20T18:06:22.000Z | 2021-10-29T20:59:18.000Z | # Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import uuid
from datetime import datetime
import aspose.words as aw
import aspose.pydrawing as drawing
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, GOLDS_DIR
from document_helper import DocumentHelper
class ExStructuredDocumentTag(ApiExampleBase):
def test_repeating_section(self):
#ExStart
#ExFor:StructuredDocumentTag.sdt_type
#ExSummary:Shows how to get the type of a structured document tag.
doc = aw.Document(MY_DIR + "Structured document tags.docx")
sd_tags = [node.as_structured_document_tag() for node in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]
self.assertEqual(aw.markup.SdtType.REPEATING_SECTION, sd_tags[0].sdt_type)
self.assertEqual(aw.markup.SdtType.REPEATING_SECTION_ITEM, sd_tags[1].sdt_type)
self.assertEqual(aw.markup.SdtType.RICH_TEXT, sd_tags[2].sdt_type)
#ExEnd
def test_apply_style(self):
#ExStart
#ExFor:StructuredDocumentTag
#ExFor:StructuredDocumentTag.node_type
#ExFor:StructuredDocumentTag.style
#ExFor:StructuredDocumentTag.style_name
#ExFor:MarkupLevel
#ExFor:SdtType
#ExSummary:Shows how to work with styles for content control elements.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Below are two ways to apply a style from the document to a structured document tag.
# 1 - Apply a style object from the document's style collection:
quote_style = doc.styles.get_by_style_identifier(aw.StyleIdentifier.QUOTE)
sdt_plain_text = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
sdt_plain_text.style = quote_style
# 2 - Reference a style in the document by name:
sdt_rich_text = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.RICH_TEXT, aw.markup.MarkupLevel.INLINE)
sdt_rich_text.style_name = "Quote"
builder.insert_node(sdt_plain_text)
builder.insert_node(sdt_rich_text)
self.assertEqual(aw.NodeType.STRUCTURED_DOCUMENT_TAG, sdt_plain_text.node_type)
tags = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)
for node in tags:
sdt = node.as_structured_document_tag()
self.assertEqual(aw.StyleIdentifier.QUOTE, sdt.style.style_identifier)
self.assertEqual("Quote", sdt.style_name)
#ExEnd
def test_check_box(self):
#ExStart
#ExFor:StructuredDocumentTag.__init__(DocumentBase,SdtType,MarkupLevel)
#ExFor:StructuredDocumentTag.checked
#ExFor:StructuredDocumentTag.set_checked_symbol(int,str)
#ExFor:StructuredDocumentTag.set_unchecked_symbol(int,str)
#ExSummary:Show how to create a structured document tag in the form of a check box.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
sdt_check_box = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.CHECKBOX, aw.markup.MarkupLevel.INLINE)
sdt_check_box.checked = True
# We can set the symbols used to represent the checked/unchecked state of a checkbox content control.
sdt_check_box.set_checked_symbol(0x00A9, "Times New Roman")
sdt_check_box.set_unchecked_symbol(0x00AE, "Times New Roman")
builder.insert_node(sdt_check_box)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.check_box.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.check_box.docx")
tags = [node.as_structured_document_tag() for node in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]
self.assertTrue(tags[0].checked)
self.assertEqual(tags[0].xml_mapping.store_item_id, "")
def test_date(self):
#ExStart
#ExFor:StructuredDocumentTag.calendar_type
#ExFor:StructuredDocumentTag.date_display_format
#ExFor:StructuredDocumentTag.date_display_locale
#ExFor:StructuredDocumentTag.date_storage_format
#ExFor:StructuredDocumentTag.full_date
#ExSummary:Shows how to prompt the user to enter a date with a structured document tag.
doc = aw.Document()
# Insert a structured document tag that prompts the user to enter a date.
# In Microsoft Word, this element is known as a "Date picker content control".
# When we click on the arrow on the right end of this tag in Microsoft Word,
# we will see a pop up in the form of a clickable calendar.
# We can use that popup to select a date that the tag will display.
sdt_date = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.DATE, aw.markup.MarkupLevel.INLINE)
# Display the date, according to the Saudi Arabian Arabic locale.
sdt_date.date_display_locale = 1025 #CultureInfo.get_culture_info("ar-SA").LCID
# Set the format with which to display the date.
sdt_date.date_display_format = "dd MMMM, yyyy"
sdt_date.date_storage_format = aw.markup.SdtDateStorageFormat.DATE_TIME
# Display the date according to the Hijri calendar.
sdt_date.calendar_type = aw.markup.SdtCalendarType.HIJRI
# Before the user chooses a date in Microsoft Word, the tag will display the text "Click here to enter a date.".
# According to the tag's calendar, set the "full_date" property to get the tag to display a default date.
sdt_date.full_date = datetime(1440, 10, 20)
builder = aw.DocumentBuilder(doc)
builder.insert_node(sdt_date)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.date.docx")
#ExEnd
def test_plain_text(self):
#ExStart
#ExFor:StructuredDocumentTag.color
#ExFor:StructuredDocumentTag.contents_font
#ExFor:StructuredDocumentTag.end_character_font
#ExFor:StructuredDocumentTag.id
#ExFor:StructuredDocumentTag.level
#ExFor:StructuredDocumentTag.multiline
#ExFor:StructuredDocumentTag.tag
#ExFor:StructuredDocumentTag.title
#ExFor:StructuredDocumentTag.remove_self_only
#ExFor:StructuredDocumentTag.appearance
#ExSummary:Shows how to create a structured document tag in a plain text box and modify its appearance.
doc = aw.Document()
# Create a structured document tag that will contain plain text.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the title and color of the frame that appears when you mouse over the structured document tag in Microsoft Word.
tag.title = "My plain text"
tag.color = drawing.Color.magenta
# Set a tag for this structured document tag, which is obtainable
# as an XML element named "tag", with the string below in its "@val" attribute.
tag.tag = "MyPlainTextSDT"
# Every structured document tag has a random unique ID.
self.assertGreater(tag.id, 0)
# Set the font for the text inside the structured document tag.
tag.contents_font.name = "Arial"
# Set the font for the text at the end of the structured document tag.
# Any text that we type in the document body after moving out of the tag with arrow keys will use this font.
tag.end_character_font.name = "Arial Black"
# By default, this is False and pressing enter while inside a structured document tag does nothing.
# When set to True, our structured document tag can have multiple lines.
# Set the "multiline" property to "False" to only allow the contents
# of this structured document tag to span a single line.
# Set the "multiline" property to "True" to allow the tag to contain multiple lines of content.
tag.multiline = True
# Set the "Appearance" property to "SdtAppearance.TAGS" to show tags around content.
# By default structured document tag shows as BoundingBox.
tag.appearance = aw.markup.SdtAppearance.TAGS
builder = aw.DocumentBuilder(doc)
builder.insert_node(tag)
# Insert a clone of our structured document tag in a new paragraph.
tag_clone = tag.clone(True).as_structured_document_tag()
builder.insert_paragraph()
builder.insert_node(tag_clone)
# Use the "remove_self_only" method to remove a structured document tag, while keeping its contents in the document.
tag_clone.remove_self_only()
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.plain_text.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.plain_text.docx")
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("My plain text", tag.title)
self.assertEqual(drawing.Color.magenta.to_argb(), tag.color.to_argb())
self.assertEqual("MyPlainTextSDT", tag.tag)
self.assertGreater(tag.id, 0)
self.assertEqual("Arial", tag.contents_font.name)
self.assertEqual("Arial Black", tag.end_character_font.name)
self.assertTrue(tag.multiline)
self.assertTrue(aw.markup.SdtAppearance.TAGS, tag.appearance)
def test_is_temporary(self):
for is_temporary in (False, True):
with self.subTest(is_temporary=is_temporary):
#ExStart
#ExFor:StructuredDocumentTag.is_temporary
#ExSummary:Shows how to make single-use controls.
doc = aw.Document()
# Insert a plain text structured document tag,
# which will act as a plain text form that the user may enter text into.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the "is_temporary" property to "True" to make the structured document tag disappear and
# assimilate its contents into the document after the user edits it once in Microsoft Word.
# Set the "is_temporary" property to "False" to allow the user to edit the contents
# of the structured document tag any number of times.
tag.is_temporary = is_temporary
builder = aw.DocumentBuilder(doc)
builder.write("Please enter text: ")
builder.insert_node(tag)
# Insert another structured document tag in the form of a check box and set its default state to "checked".
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.CHECKBOX, aw.markup.MarkupLevel.INLINE)
tag.checked = True
# Set the "is_temporary" property to "True" to make the check box become a symbol
# once the user clicks on it in Microsoft Word.
# Set the "is_temporary" property to "False" to allow the user to click on the check box any number of times.
tag.is_temporary = is_temporary
builder.write("\nPlease click the check box: ")
builder.insert_node(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.is_temporary.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.is_temporary.docx")
self.assertEqual(2, len([sdt.as_structured_document_tag().is_temporary == is_temporary for sdt in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]))
def test_placeholder_building_block(self):
for is_showing_placeholder_text in (False, True):
with self.subTest(is_showing_placeholder_text=is_showing_placeholder_text):
#ExStart
#ExFor:StructuredDocumentTag.is_showing_placeholder_text
#ExFor:StructuredDocumentTag.placeholder
#ExFor:StructuredDocumentTag.placeholder_name
#ExSummary:Shows how to use a building block's contents as a custom placeholder text for a structured document tag.
doc = aw.Document()
# Insert a plain text structured document tag of the "PLAIN_TEXT" type, which will function as a text box.
# The contents that it will display by default are a "Click here to enter text." prompt.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# We can get the tag to display the contents of a building block instead of the default text.
# First, add a building block with contents to the glossary document.
glossary_doc = doc.glossary_document
substitute_block = aw.buildingblocks.BuildingBlock(glossary_doc)
substitute_block.name = "Custom Placeholder"
substitute_block.append_child(aw.Section(glossary_doc))
substitute_block.first_section.append_child(aw.Body(glossary_doc))
substitute_block.first_section.body.append_paragraph("Custom placeholder text.")
glossary_doc.append_child(substitute_block)
# Then, use the structured document tag's "placeholder_name" property to reference that building block by name.
tag.placeholder_name = "Custom Placeholder"
# If "placeholder_name" refers to an existing block in the parent document's glossary document,
# we will be able to verify the building block via the "placeholder" property.
self.assertEqual(substitute_block, tag.placeholder)
# Set the "is_showing_placeholder_text" property to "True" to treat the
# structured document tag's current contents as placeholder text.
# This means that clicking on the text box in Microsoft Word will immediately highlight all the tag's contents.
# Set the "is_showing_placeholder_text" property to "False" to get the
# structured document tag to treat its contents as text that a user has already entered.
# Clicking on this text in Microsoft Word will place the blinking cursor at the clicked location.
tag.is_showing_placeholder_text = is_showing_placeholder_text
builder = aw.DocumentBuilder(doc)
builder.insert_node(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.placeholder_building_block.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.placeholder_building_block.docx")
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
substitute_block = doc.glossary_document.get_child(aw.NodeType.BUILDING_BLOCK, 0, True).as_building_block()
self.assertEqual("Custom Placeholder", substitute_block.name)
self.assertEqual(is_showing_placeholder_text, tag.is_showing_placeholder_text)
self.assertEqual(substitute_block, tag.placeholder)
self.assertEqual(substitute_block.name, tag.placeholder_name)
def test_lock(self):
#ExStart
#ExFor:StructuredDocumentTag.lock_content_control
#ExFor:StructuredDocumentTag.lock_contents
#ExSummary:Shows how to apply editing restrictions to structured document tags.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert a plain text structured document tag, which acts as a text box that prompts the user to fill it in.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the "lock_contents" property to "True" to prohibit the user from editing this text box's contents.
tag.lock_contents = True
builder.write("The contents of this structured document tag cannot be edited: ")
builder.insert_node(tag)
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the "lock_content_control" property to "True" to prohibit the user from
# deleting this structured document tag manually in Microsoft Word.
tag.lock_content_control = True
builder.insert_paragraph()
builder.write("This structured document tag cannot be deleted but its contents can be edited: ")
builder.insert_node(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.lock.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.lock.docx")
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertTrue(tag.lock_contents)
self.assertFalse(tag.lock_content_control)
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 1, True).as_structured_document_tag()
self.assertFalse(tag.lock_contents)
self.assertTrue(tag.lock_content_control)
def test_list_item_collection(self):
#ExStart
#ExFor:SdtListItem
#ExFor:SdtListItem.__init__(str)
#ExFor:SdtListItem.__init__(str,str)
#ExFor:SdtListItem.display_text
#ExFor:SdtListItem.value
#ExFor:SdtListItemCollection
#ExFor:SdtListItemCollection.add(SdtListItem)
#ExFor:SdtListItemCollection.clear
#ExFor:SdtListItemCollection.count
#ExFor:SdtListItemCollection.__iter__
#ExFor:SdtListItemCollection.__getitem__(int)
#ExFor:SdtListItemCollection.remove_at(int)
#ExFor:SdtListItemCollection.selected_value
#ExFor:StructuredDocumentTag.list_items
#ExSummary:Shows how to work with drop down-list structured document tags.
doc = aw.Document()
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.DROP_DOWN_LIST, aw.markup.MarkupLevel.BLOCK)
doc.first_section.body.append_child(tag)
# A drop-down list structured document tag is a form that allows the user to
# select an option from a list by left-clicking and opening the form in Microsoft Word.
# The "list_items" property contains all list items, and each list item is an "SdtListItem".
list_items = tag.list_items
list_items.add(aw.markup.SdtListItem("Value 1"))
self.assertEqual(list_items[0].display_text, list_items[0].value)
# Add 3 more list items. Initialize these items using a different constructor to the first item
# to display strings that are different from their values.
list_items.add(aw.markup.SdtListItem("Item 2", "Value 2"))
list_items.add(aw.markup.SdtListItem("Item 3", "Value 3"))
list_items.add(aw.markup.SdtListItem("Item 4", "Value 4"))
self.assertEqual(4, list_items.count)
# The drop-down list is displaying the first item. Assign a different list item to the "selected_value" to display it.
list_items.selected_value = list_items[3]
self.assertEqual("Value 4", list_items.selected_value.value)
# Enumerate over the collection and print each element.
for item in list_items:
if item is not None:
print(f"List item: {item.display_text}, value: {item.value}")
# Remove the last list item.
list_items.remove_at(3)
self.assertEqual(3, list_items.count)
# Since our drop-down control is set to display the removed item by default, give it an item to display which exists.
list_items.selected_value = list_items[1]
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.list_item_collection.docx")
# Use the "clear" method to empty the entire drop-down item collection at once.
list_items.clear()
self.assertEqual(0, list_items.count)
#ExEnd
def test_creating_custom_xml(self):
#ExStart
#ExFor:CustomXmlPart
#ExFor:CustomXmlPart.clone
#ExFor:CustomXmlPart.data
#ExFor:CustomXmlPart.id
#ExFor:CustomXmlPart.schemas
#ExFor:CustomXmlPartCollection
#ExFor:CustomXmlPartCollection.add(CustomXmlPart)
#ExFor:CustomXmlPartCollection.add(str,str)
#ExFor:CustomXmlPartCollection.clear
#ExFor:CustomXmlPartCollection.clone
#ExFor:CustomXmlPartCollection.count
#ExFor:CustomXmlPartCollection.get_by_id(str)
#ExFor:CustomXmlPartCollection.__iter__
#ExFor:CustomXmlPartCollection.__getitem__(int)
#ExFor:CustomXmlPartCollection.remove_at(int)
#ExFor:Document.custom_xml_parts
#ExFor:StructuredDocumentTag.xml_mapping
#ExFor:XmlMapping.set_mapping(CustomXmlPart,str,str)
#ExSummary:Shows how to create a structured document tag with custom XML data.
doc = aw.Document()
# Construct an XML part that contains data and add it to the document's collection.
# If we enable the "Developer" tab in Microsoft Word,
# we can find elements from this collection in the "XML Mapping Pane", along with a few default elements.
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Hello world!</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
self.assertEqual(xml_part_content.encode('ascii'), xml_part.data)
self.assertEqual(xml_part_id, xml_part.id)
# Below are two ways to refer to XML parts.
# 1 - By an index in the custom XML part collection:
self.assertEqual(xml_part, doc.custom_xml_parts[0])
# 2 - By GUID:
self.assertEqual(xml_part, doc.custom_xml_parts.get_by_id(xml_part_id))
# Add an XML schema association.
xml_part.schemas.add("http://www.w3.org/2001/XMLSchema")
# Clone a part, and then insert it into the collection.
xml_part_clone = xml_part.clone()
xml_part_clone.id = str(uuid.uuid4())
doc.custom_xml_parts.add(xml_part_clone)
self.assertEqual(2, doc.custom_xml_parts.count)
# Iterate through the collection and print the contents of each part.
for index, part in enumerate(doc.custom_xml_parts):
print(f"XML part index {index}, ID: {part.id}")
print(f"\tContent: {part.data.decode('utf-8')}")
# Use the "remove_at" method to remove the cloned part by index.
doc.custom_xml_parts.remove_at(1)
self.assertEqual(1, doc.custom_xml_parts.count)
# Clone the XML parts collection, and then use the "Clear" method to remove all its elements at once.
custom_xml_parts = doc.custom_xml_parts.clone()
custom_xml_parts.clear()
# Create a structured document tag that will display our part's contents and insert it into the document body.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
tag.xml_mapping.set_mapping(xml_part, "/root[1]/text[1]", "")
doc.first_section.body.append_child(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.creating_custom_xml.docx")
#ExEnd
self.assertTrue(DocumentHelper.compare_docs(ARTIFACTS_DIR + "StructuredDocumentTag.creating_custom_xml.docx", GOLDS_DIR + "StructuredDocumentTag.CustomXml Gold.docx"))
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.creating_custom_xml.docx")
xml_part = doc.custom_xml_parts[0]
xml_part_id = uuid.UUID(xml_part.id)
self.assertEqual("<root><text>Hello world!</text></root>", xml_part.data.decode('utf-8'))
self.assertEqual("http://www.w3.org/2001/XMLSchema", xml_part.schemas[0])
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("Hello world!", tag.get_text().strip())
self.assertEqual("/root[1]/text[1]", tag.xml_mapping.xpath)
self.assertEqual("", tag.xml_mapping.prefix_mappings)
self.assertEqual(xml_part.data_checksum, tag.xml_mapping.custom_xml_part.data_checksum)
def test_data_checksum(self):
#ExStart
#ExFor:CustomXmlPart.data_checksum
#ExSummary:Shows how the checksum is calculated in a runtime.
doc = aw.Document()
rich_text = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.RICH_TEXT, aw.markup.MarkupLevel.BLOCK)
doc.first_section.body.append_child(rich_text)
# The checksum is read-only and computed using the data of the corresponding custom XML data part.
rich_text.xml_mapping.set_mapping(doc.custom_xml_parts.add(str(uuid.uuid4()),
"<root><text>ContentControl</text></root>"), "/root/text", "")
checksum = rich_text.xml_mapping.custom_xml_part.data_checksum
print(checksum)
rich_text.xml_mapping.set_mapping(doc.custom_xml_parts.add(str(uuid.uuid4()),
"<root><text>Updated ContentControl</text></root>"), "/root/text", "")
updated_checksum = rich_text.xml_mapping.custom_xml_part.data_checksum
print(updated_checksum)
# We changed the XmlPart of the tag, and the checksum was updated at runtime.
self.assertNotEqual(checksum, updated_checksum)
#ExEnd
def test_xml_mapping(self):
#ExStart
#ExFor:XmlMapping
#ExFor:XmlMapping.custom_xml_part
#ExFor:XmlMapping.delete
#ExFor:XmlMapping.is_mapped
#ExFor:XmlMapping.prefix_mappings
#ExFor:XmlMapping.xpath
#ExSummary:Shows how to set XML mappings for custom XML parts.
doc = aw.Document()
# Construct an XML part that contains text and add it to the document's CustomXmlPart collection.
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Text element #1</text><text>Text element #2</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
# Create a structured document tag that will display the contents of our CustomXmlPart.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
# Set a mapping for our structured document tag. This mapping will instruct
# our structured document tag to display a portion of the XML part's text contents that the XPath points to.
# In this case, it will be contents of the the second "<text>" element of the first "<root>" element: "Text element #2".
tag.xml_mapping.set_mapping(xml_part, "/root[1]/text[2]", "xmlns:ns='http://www.w3.org/2001/XMLSchema'")
self.assertTrue(tag.xml_mapping.is_mapped)
self.assertEqual(xml_part, tag.xml_mapping.custom_xml_part)
self.assertEqual("/root[1]/text[2]", tag.xml_mapping.xpath)
self.assertEqual("xmlns:ns='http://www.w3.org/2001/XMLSchema'", tag.xml_mapping.prefix_mappings)
# Add the structured document tag to the document to display the content from our custom part.
doc.first_section.body.append_child(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.xml_mapping.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.xml_mapping.docx")
xml_part = doc.custom_xml_parts[0]
xml_part_id = uuid.UUID(xml_part.id)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("Text element #2", tag.get_text().strip())
self.assertEqual("/root[1]/text[2]", tag.xml_mapping.xpath)
self.assertEqual("xmlns:ns='http://www.w3.org/2001/XMLSchema'", tag.xml_mapping.prefix_mappings)
def test_structured_document_tag_range_start_xml_mapping(self):
#ExStart
#ExFor:StructuredDocumentTagRangeStart.xml_mapping
#ExSummary:Shows how to set XML mappings for the range start of a structured document tag.
doc = aw.Document(MY_DIR + "Multi-section structured document tags.docx")
# Construct an XML part that contains text and add it to the document's CustomXmlPart collection.
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Text element #1</text><text>Text element #2</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
# Create a structured document tag that will display the contents of our CustomXmlPart in the document.
sdt_range_start = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, 0, True).as_structured_document_tag_range_start()
# If we set a mapping for our structured document tag,
# it will only display a portion of the CustomXmlPart that the XPath points to.
# This XPath will point to the contents second "<text>" element of the first "<root>" element of our CustomXmlPart.
sdt_range_start.xml_mapping.set_mapping(xml_part, "/root[1]/text[2]", None)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.structured_document_tag_range_start_xml_mapping.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.structured_document_tag_range_start_xml_mapping.docx")
xml_part = doc.custom_xml_parts[0]
xml_part_id = uuid.UUID(xml_part.id)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
sdt_range_start = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, 0, True).as_structured_document_tag_range_start()
self.assertEqual("/root[1]/text[2]", sdt_range_start.xml_mapping.xpath)
def test_custom_xml_schema_collection(self):
#ExStart
#ExFor:CustomXmlSchemaCollection
#ExFor:CustomXmlSchemaCollection.add(str)
#ExFor:CustomXmlSchemaCollection.clear
#ExFor:CustomXmlSchemaCollection.clone
#ExFor:CustomXmlSchemaCollection.count
#ExFor:CustomXmlSchemaCollection.__iter__
#ExFor:CustomXmlSchemaCollection.index_of(str)
#ExFor:CustomXmlSchemaCollection.__getitem__(int)
#ExFor:CustomXmlSchemaCollection.remove(str)
#ExFor:CustomXmlSchemaCollection.remove_at(int)
#ExSummary:Shows how to work with an XML schema collection.
doc = aw.Document()
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Hello, World!</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
# Add an XML schema association.
xml_part.schemas.add("http://www.w3.org/2001/XMLSchema")
# Clone the custom XML part's XML schema association collection,
# and then add a couple of new schemas to the clone.
schemas = xml_part.schemas.clone()
schemas.add("http://www.w3.org/2001/XMLSchema-instance")
schemas.add("http://schemas.microsoft.com/office/2006/metadata/contentType")
self.assertEqual(3, schemas.count)
self.assertEqual(2, schemas.index_of("http://schemas.microsoft.com/office/2006/metadata/contentType"))
# Enumerate the schemas and print each element.
for schema in schemas:
print(schema)
# Below are three ways of removing schemas from the collection.
# 1 - Remove a schema by index:
schemas.remove_at(2)
# 2 - Remove a schema by value:
schemas.remove("http://www.w3.org/2001/XMLSchema")
# 3 - Use the "clear" method to empty the collection at once.
schemas.clear()
self.assertEqual(0, schemas.count)
#ExEnd
def test_custom_xml_part_store_item_id_read_only(self):
#ExStart
#ExFor:XmlMapping.store_item_id
#ExSummary:Shows how to get the custom XML data identifier of an XML part.
doc = aw.Document(MY_DIR + "Custom XML part in structured document tag.docx")
# Structured document tags have IDs in the form of GUIDs.
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("{F3029283-4FF8-4DD2-9F31-395F19ACEE85}", tag.xml_mapping.store_item_id)
#ExEnd
def test_custom_xml_part_store_item_id_read_only_null(self):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
sdt_check_box = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.CHECKBOX, aw.markup.MarkupLevel.INLINE)
sdt_check_box.checked = True
builder.insert_node(sdt_check_box)
doc = DocumentHelper.save_open(doc)
sdt = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
print("The Id of your custom xml part is:", sdt.xml_mapping.store_item_id)
def test_clear_text_from_structured_document_tags(self):
#ExStart
#ExFor:StructuredDocumentTag.clear
#ExSummary:Shows how to delete contents of structured document tag elements.
doc = aw.Document()
# Create a plain text structured document tag, and then append it to the document.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
doc.first_section.body.append_child(tag)
# This structured document tag, which is in the form of a text box, already displays placeholder text.
self.assertEqual("Click here to enter text.", tag.get_text().strip())
self.assertTrue(tag.is_showing_placeholder_text)
# Create a building block with text contents.
glossary_doc = doc.glossary_document
substitute_block = aw.buildingblocks.BuildingBlock(glossary_doc)
substitute_block.name = "My placeholder"
substitute_block.append_child(aw.Section(glossary_doc))
substitute_block.first_section.ensure_minimum()
substitute_block.first_section.body.first_paragraph.append_child(aw.Run(glossary_doc, "Custom placeholder text."))
glossary_doc.append_child(substitute_block)
# Set the structured document tag's "placeholder_name" property to our building block's name to get
# the structured document tag to display the contents of the building block in place of the original default text.
tag.placeholder_name = "My placeholder"
self.assertEqual("Custom placeholder text.", tag.get_text().strip())
self.assertTrue(tag.is_showing_placeholder_text)
# Edit the text of the structured document tag and hide the placeholder text.
run = tag.get_child(aw.NodeType.RUN, 0, True).as_run()
run.text = "New text."
tag.is_showing_placeholder_text = False
self.assertEqual("New text.", tag.get_text().strip())
# Use the "clear" method to clear this structured document tag's contents and display the placeholder again.
tag.clear()
self.assertTrue(tag.is_showing_placeholder_text)
self.assertEqual("Custom placeholder text.", tag.get_text().strip())
#ExEnd
def test_access_to_building_block_properties_from_doc_part_obj_sdt(self):
doc = aw.Document(MY_DIR + "Structured document tags with building blocks.docx")
doc_part_obj_sdt = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual(aw.markup.SdtType.DOC_PART_OBJ, doc_part_obj_sdt.sdt_type)
self.assertEqual("Table of Contents", doc_part_obj_sdt.building_block_gallery)
def test_access_to_building_block_properties_from_plain_text_sdt(self):
doc = aw.Document(MY_DIR + "Structured document tags with building blocks.docx")
plain_text_sdt = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 1, True).as_structured_document_tag()
self.assertEqual(aw.markup.SdtType.PLAIN_TEXT, plain_text_sdt.sdt_type)
with self.assertRaises(Exception, msg="BuildingBlockType is only accessible for BuildingBlockGallery SDT type."):
building_block_gallery = plain_text_sdt.building_block_gallery
def test_building_block_categories(self):
#ExStart
#ExFor:StructuredDocumentTag.building_block_category
#ExFor:StructuredDocumentTag.building_block_gallery
#ExSummary:Shows how to insert a structured document tag as a building block, and set its category and gallery.
doc = aw.Document()
building_block_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.BUILDING_BLOCK_GALLERY, aw.markup.MarkupLevel.BLOCK)
building_block_sdt.building_block_category = "Built-in"
building_block_sdt.building_block_gallery = "Table of Contents"
doc.first_section.body.append_child(building_block_sdt)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.BuildingBlockCategories.docx")
#ExEnd
building_block_sdt = doc.first_section.body.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual(aw.markup.SdtType.BUILDING_BLOCK_GALLERY, building_block_sdt.sdt_type)
self.assertEqual("Table of Contents", building_block_sdt.building_block_gallery)
self.assertEqual("Built-in", building_block_sdt.building_block_category)
def test_update_sdt_content(self):
for update_sdt_content in (False, True):
with self.subTest(update_sdt_content=update_sdt_content):
#ExStart
#ExFor:SaveOptions.update_sdt_content
#ExSummary:Shows how to update structured document tags while saving a document to PDF.
doc = aw.Document()
# Insert a drop-down list structured document tag.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.DROP_DOWN_LIST, aw.markup.MarkupLevel.BLOCK)
tag.list_items.add(aw.markup.SdtListItem("Value 1"))
tag.list_items.add(aw.markup.SdtListItem("Value 2"))
tag.list_items.add(aw.markup.SdtListItem("Value 3"))
# The drop-down list currently displays "Choose an item" as the default text.
# Set the "selected_value" property to one of the list items to get the tag to
# display that list item's value instead of the default text.
tag.list_items.selected_value = tag.list_items[1]
doc.first_section.body.append_child(tag)
# Create a "PdfSaveOptions" object to pass to the document's "Save" method
# to modify how that method saves the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "update_sdt_content" property to "False" not to update the structured document tags
# while saving the document to PDF. They will display their default values as they were at the time of construction.
# Set the "update_sdt_content" property to "True" to make sure the tags display updated values in the PDF.
options.update_sdt_content = update_sdt_content
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.update_sdt_content.pdf", options)
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "StructuredDocumentTag.UpdateSdtContent.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#self.assertEqual(
# "Value 2" if update_sdt_content else "Choose an item.",
# text_absorber.text)
def test_fill_table_using_repeating_section_item(self):
#ExStart
#ExFor:SdtType
#ExSummary:Shows how to fill a table with data from in an XML part.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
xml_part = doc.custom_xml_parts.add("Books",
"<books>" +
"<book>" +
"<title>Everyday Italian</title>" +
"<author>Giada De Laurentiis</author>" +
"</book>" +
"<book>" +
"<title>The C Programming Language</title>" +
"<author>Brian W. Kernighan, Dennis M. Ritchie</author>" +
"</book>" +
"<book>" +
"<title>Learning XML</title>" +
"<author>Erik T. Ray</author>" +
"</book>" +
"</books>")
# Create headers for data from the XML content.
table = builder.start_table()
builder.insert_cell()
builder.write("Title")
builder.insert_cell()
builder.write("Author")
builder.end_row()
builder.end_table()
# Create a table with a repeating section inside.
repeating_section_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.REPEATING_SECTION, aw.markup.MarkupLevel.ROW)
repeating_section_sdt.xml_mapping.set_mapping(xml_part, "/books[1]/book", "")
table.append_child(repeating_section_sdt)
# Add repeating section item inside the repeating section and mark it as a row.
# This table will have a row for each element that we can find in the XML document
# using the "/books[1]/book" XPath, of which there are three.
repeating_section_item_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.REPEATING_SECTION_ITEM, aw.markup.MarkupLevel.ROW)
repeating_section_sdt.append_child(repeating_section_item_sdt)
row = aw.tables.Row(doc)
repeating_section_item_sdt.append_child(row)
# Map XML data with created table cells for the title and author of each book.
title_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.CELL)
title_sdt.xml_mapping.set_mapping(xml_part, "/books[1]/book[1]/title[1]", "")
row.append_child(title_sdt)
author_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.CELL)
author_sdt.xml_mapping.set_mapping(xml_part, "/books[1]/book[1]/author[1]", "")
row.append_child(author_sdt)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.fill_table_using_repeating_section_item.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.fill_table_using_repeating_section_item.docx")
tags = [node.as_structured_document_tag() for node in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]
self.assertEqual("/books[1]/book", tags[0].xml_mapping.xpath)
self.assertEqual("", tags[0].xml_mapping.prefix_mappings)
self.assertEqual("", tags[1].xml_mapping.xpath)
self.assertEqual("", tags[1].xml_mapping.prefix_mappings)
self.assertEqual("/books[1]/book[1]/title[1]", tags[2].xml_mapping.xpath)
self.assertEqual("", tags[2].xml_mapping.prefix_mappings)
self.assertEqual("/books[1]/book[1]/author[1]", tags[3].xml_mapping.xpath)
self.assertEqual("", tags[3].xml_mapping.prefix_mappings)
self.assertEqual("Title\u0007Author\u0007\u0007" +
"Everyday Italian\u0007Giada De Laurentiis\u0007\u0007" +
"The C Programming Language\u0007Brian W. Kernighan, Dennis M. Ritchie\u0007\u0007" +
"Learning XML\u0007Erik T. Ray\u0007\u0007", doc.first_section.body.tables[0].get_text().strip())
def test_custom_xml_part(self):
xml_string = ("<?xml version=\"1.0\"?>" +
"<Company>" +
"<Employee id=\"1\">" +
"<FirstName>John</FirstName>" +
"<LastName>Doe</LastName>" +
"</Employee>" +
"<Employee id=\"2\">" +
"<FirstName>Jane</FirstName>" +
"<LastName>Doe</LastName>" +
"</Employee>" +
"</Company>")
doc = aw.Document()
# Insert the full XML document as a custom document part.
# We can find the mapping for this part in Microsoft Word via "Developer" -> "XML Mapping Pane", if it is enabled.
xml_part = doc.custom_xml_parts.add(str(uuid.uuid4()), xml_string)
# Create a structured document tag, which will use an XPath to refer to a single element from the XML.
sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
sdt.xml_mapping.set_mapping(xml_part, "Company//Employee[@id='2']/FirstName", "")
# Add the StructuredDocumentTag to the document to display the element in the text.
doc.first_section.body.append_child(sdt)
def test_multi_section_tags(self):
#ExStart
#ExFor:StructuredDocumentTagRangeStart
#ExFor:StructuredDocumentTagRangeStart.id
#ExFor:StructuredDocumentTagRangeStart.title
#ExFor:StructuredDocumentTagRangeStart.placeholder_name
#ExFor:StructuredDocumentTagRangeStart.is_showing_placeholder_text
#ExFor:StructuredDocumentTagRangeStart.lock_content_control
#ExFor:StructuredDocumentTagRangeStart.lock_contents
#ExFor:StructuredDocumentTagRangeStart.level
#ExFor:StructuredDocumentTagRangeStart.range_end
#ExFor:StructuredDocumentTagRangeStart.color
#ExFor:StructuredDocumentTagRangeStart.sdt_type
#ExFor:StructuredDocumentTagRangeStart.tag
#ExFor:StructuredDocumentTagRangeEnd
#ExFor:StructuredDocumentTagRangeEnd.id
#ExSummary:Shows how to get the properties of multi-section structured document tags.
doc = aw.Document(MY_DIR + "Multi-section structured document tags.docx")
range_start_tag = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, True)[0].as_structured_document_tag_range_start()
range_end_tag = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_END, True)[0].as_structured_document_tag_range_end()
self.assertEqual(range_start_tag.id, range_end_tag.id) #ExSkip
self.assertEqual(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, range_start_tag.node_type) #ExSkip
self.assertEqual(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_END, range_end_tag.node_type) #ExSkip
print("StructuredDocumentTagRangeStart values:")
print(f"\t|id: {range_start_tag.id}")
print(f"\t|title: {range_start_tag.title}")
print(f"\t|placeholder_name: {range_start_tag.placeholder_name}")
print(f"\t|is_showing_placeholder_text: {range_start_tag.is_showing_placeholder_text}")
print(f"\t|lock_content_control: {range_start_tag.lock_content_control}")
print(f"\t|lock_contents: {range_start_tag.lock_contents}")
print(f"\t|level: {range_start_tag.level}")
print(f"\t|node_type: {range_start_tag.node_type}")
print(f"\t|range_end: {range_start_tag.range_end}")
print(f"\t|color: {range_start_tag.color.to_argb()}")
print(f"\t|sdt_type: {range_start_tag.sdt_type}")
print(f"\t|tag: {range_start_tag.tag}\n")
print("StructuredDocumentTagRangeEnd values:")
print(f"\t|id: {range_end_tag.id}")
print(f"\t|node_type: {range_end_tag.node_type}")
#ExEnd
def test_sdt_child_nodes(self):
#ExStart
#ExFor:StructuredDocumentTagRangeStart.child_nodes
#ExFor:StructuredDocumentTagRangeStart.get_child_nodes(NodeType,bool)
#ExSummary:Shows how to get child nodes of StructuredDocumentTagRangeStart.
doc = aw.Document(MY_DIR + "Multi-section structured document tags.docx")
tag = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, True)[0].as_structured_document_tag_range_start()
print("StructuredDocumentTagRangeStart values:")
print(f"\t|Child nodes count: {tag.child_nodes.count}\n")
for node in tag.child_nodes:
print(f"\t|Child node type: {node.node_type}")
for node in tag.get_child_nodes(aw.NodeType.RUN, True):
print(f"\t|Child node text: {node.get_text()}")
#ExEnd
#ExStart
#ExFor:StructuredDocumentTagRangeStart.__init__(DocumentBase,SdtType)
#ExFor:StructuredDocumentTagRangeEnd.__init__(DocumentBase,int)
#ExFor:StructuredDocumentTagRangeStart.remove_self_only
#ExFor:StructuredDocumentTagRangeStart.remove_all_children
#ExSummary:Shows how to create/remove structured document tag and its content.
def test_sdt_range_extended_methods(self):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("StructuredDocumentTag element")
range_start = self.insert_structured_document_tag_ranges(doc)
# Removes ranged structured document tag, but keeps content inside.
range_start.remove_self_only()
range_start = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, 0, False)
self.assertIsNone(range_start)
range_end = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_END, 0, False)
self.assertIsNone(range_end)
self.assertEqual("StructuredDocumentTag element", doc.get_text().strip())
range_start = self.insert_structured_document_tag_ranges(doc)
paragraph_node = range_start.last_child
self.assertEqual("StructuredDocumentTag element", paragraph_node.get_text().strip())
# Removes ranged structured document tag and content inside.
range_start.remove_all_children()
paragraph_node = range_start.last_child
self.assertIsNone(None, paragraph_node.get_text())
def insert_structured_document_tag_ranges(self, doc: aw.Document) -> aw.markup.StructuredDocumentTagRangeStart:
range_start = aw.markup.StructuredDocumentTagRangeStart(doc, aw.markup.SdtType.PLAIN_TEXT)
range_end = aw.markup.StructuredDocumentTagRangeEnd(doc, range_start.id)
doc.first_section.body.insert_before(range_start, doc.first_section.body.first_paragraph)
doc.last_section.body.insert_after(range_end, doc.first_section.body.first_paragraph)
return range_start
#ExEnd
| 48.809432 | 179 | 0.692643 | 50,210 | 0.990081 | 0 | 0 | 0 | 0 | 0 | 0 | 22,862 | 0.450811 |
871afb26bdc862a0ddb469ca8e427353406c19fc | 690 | py | Python | tests/emulator/kernel/test_kernel.py | FKD13/RCPU | 1f27246494f60eaa2432470b2d218bb3f63578c7 | [
"MIT"
] | 17 | 2017-07-26T13:08:34.000Z | 2022-02-19T20:44:02.000Z | tests/emulator/kernel/test_kernel.py | FKD13/RCPU | 1f27246494f60eaa2432470b2d218bb3f63578c7 | [
"MIT"
] | 4 | 2017-10-12T20:56:39.000Z | 2020-05-04T09:19:44.000Z | tests/emulator/kernel/test_kernel.py | FKD13/RCPU | 1f27246494f60eaa2432470b2d218bb3f63578c7 | [
"MIT"
] | 4 | 2017-10-16T16:24:16.000Z | 2022-03-21T19:07:06.000Z | from .utils import init_kernel
def test_read_string():
k = init_kernel()
k.RAM.load([ord(char) for char in "ABCDE"] + [0])
assert k.read_string(0) == "ABCDE"
k.RAM.load([ord(char) for char in "PYTHON"] + [0], base_address=40)
assert k.read_string(40) == "PYTHON"
def test_write_string():
k = init_kernel()
k.write_string(0, "Hello World!")
assert k.read_string(0) == "Hello World!"
# Test empty string
k = init_kernel()
k.write_string(20, '')
assert k.read_string(20) == ''
def test_read_string_empty():
k = init_kernel()
k.RAM.set(0, 0)
assert k.read_string(0) == ""
k.RAM.set(40, 0)
assert k.read_string(40) == ""
| 24.642857 | 71 | 0.614493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.123188 |
871c1782c66580c2fb34c35862df10f3a69db67c | 10,183 | py | Python | tensorflow_graphics/rendering/barycentrics.py | Tensorflow-Devs/graphics | e03fb62b8ee15a026bde7e27e4f3aa5ebc9413a1 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/rendering/barycentrics.py | Tensorflow-Devs/graphics | e03fb62b8ee15a026bde7e27e4f3aa5ebc9413a1 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/rendering/barycentrics.py | Tensorflow-Devs/graphics | e03fb62b8ee15a026bde7e27e4f3aa5ebc9413a1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of functions to compute differentiable barycentric coordinates."""
from typing import Tuple
import tensorflow as tf
from tensorflow_graphics.rendering import framebuffer as fb
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
def differentiable_barycentrics(
framebuffer: fb.Framebuffer, clip_space_vertices: type_alias.TensorLike,
triangles: type_alias.TensorLike) -> fb.Framebuffer:
"""Computes differentiable barycentric coordinates from a Framebuffer.
The barycentric coordinates will be differentiable w.r.t. the input vertices.
Later, we may support derivatives w.r.t. pixel position for mip-mapping.
Args:
framebuffer: a multi-layer Framebuffer containing triangle ids and a
foreground mask with shape [batch, num_layers, height, width, 1]
clip_space_vertices: a 2-D float32 tensor with shape [vertex_count, 4] or a
3-D tensor with shape [batch, vertex_count, 4] containing homogenous
vertex positions (xyzw).
triangles: a 2-D int32 tensor with shape [triangle_count, 3] or a 3-D tensor
with shape [batch, triangle_count, 3] containing per-triangle vertex
indices in counter-clockwise order.
Returns:
a copy of `framebuffer`, but the differentiable barycentric coordinates will
replace any barycentric coordinates already in the `framebuffer`.
"""
rank = lambda t: len(t.shape)
clip_space_vertices = tf.convert_to_tensor(clip_space_vertices)
shape.check_static(
tensor=clip_space_vertices,
tensor_name="clip_space_vertices",
has_rank_greater_than=1,
has_rank_less_than=4)
if rank(clip_space_vertices) == 2:
clip_space_vertices = tf.expand_dims(clip_space_vertices, axis=0)
triangles = tf.convert_to_tensor(triangles)
shape.check_static(
tensor=triangles,
tensor_name="triangles",
has_rank_greater_than=1,
has_rank_less_than=4)
if rank(triangles) == 2:
triangles = tf.expand_dims(triangles, axis=0)
else:
shape.compare_batch_dimensions(
tensors=(clip_space_vertices, triangles),
last_axes=(-3, -3),
broadcast_compatible=False)
shape.compare_batch_dimensions(
tensors=(clip_space_vertices, framebuffer.triangle_id),
last_axes=(-3, -4),
broadcast_compatible=False)
# Compute image pixel coordinates.
px, py = normalized_pixel_coordinates(framebuffer.width, framebuffer.height)
def compute_barycentrics_fn(
slices: Tuple[type_alias.TensorLike, type_alias.TensorLike,
type_alias.TensorLike]
) -> tf.Tensor:
clip_vertices_slice, triangle_slice, triangle_id_slice = slices
triangle_id_slice = triangle_id_slice[..., 0]
if rank(triangle_id_slice) == 2: # There is no layer dimension.
triangle_id_slice = tf.expand_dims(triangle_id_slice, axis=0)
# Compute per-triangle inverse matrices.
triangle_matrices = compute_triangle_matrices(clip_vertices_slice,
triangle_slice)
# Compute per-pixel barycentric coordinates.
barycentric_coords = compute_barycentric_coordinates(
triangle_id_slice, triangle_matrices, px, py)
barycentric_coords = tf.transpose(barycentric_coords, perm=[1, 2, 3, 0])
return barycentric_coords
per_image_barycentrics = tf.vectorized_map(
compute_barycentrics_fn,
(clip_space_vertices, triangles, framebuffer.triangle_id))
barycentric_coords = tf.stack(per_image_barycentrics, axis=0)
# After stacking barycentrics will have layers dimension no matter what.
# In order to make sure we return differentiable barycentrics of the same
# shape - reshape the tensor using original shape.
barycentric_coords = tf.reshape(
barycentric_coords, shape=tf.shape(framebuffer.barycentrics.value))
# Mask out barycentrics for background pixels.
barycentric_coords = barycentric_coords * framebuffer.foreground_mask
return fb.Framebuffer(
triangle_id=framebuffer.triangle_id,
vertex_ids=framebuffer.vertex_ids,
foreground_mask=framebuffer.foreground_mask,
attributes=framebuffer.attributes,
barycentrics=fb.RasterizedAttribute(barycentric_coords, None, None))
def normalized_pixel_coordinates(
image_width: int, image_height: int) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the normalized pixel coordinates for the specified image size.
The x-coordinates will range from -1 to 1 left to right.
The y-coordinates will range from -1 to 1 top to bottom.
The extrema +-1 will fall onto the exterior pixel boundaries, while the
coordinates will be evaluated at pixel centers. So, image of width 4 will have
normalized pixel x-coordinates at [-0.75 -0.25 0.25 0.75], while image of
width 3 will have them at [-0.667 0 0.667].
Args:
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
Returns:
Two float32 tensors with shape [image_height, image_width] containing x- and
y- coordinates, respecively, for each image pixel.
"""
width = tf.cast(image_width, tf.float32)
height = tf.cast(image_height, tf.float32)
x_range = (2 * tf.range(width) + 1) / width - 1
y_range = (2 * tf.range(height) + 1) / height - 1
x_coords, y_coords = tf.meshgrid(x_range, y_range)
return x_coords, y_coords
def compute_triangle_matrices(clip_space_vertices: type_alias.TensorLike,
triangles: type_alias.TensorLike) -> tf.Tensor:
"""Computes per-triangle matrices used in barycentric coordinate calculation.
The result corresponds to the inverse matrix from equation (4) in the paper
"Triangle Scan Conversion using 2D Homogeneous Coordinates". Our matrix
inverses are not divided by the determinant, only multiplied by its sign. The
division happens in compute_barycentric_coordinates.
Args:
clip_space_vertices: float32 tensor with shape [vertex_count, 4] containing
vertex positions in clip space (x, y, z, w).
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
contains a triangle's vertex indices in counter-clockwise order.
Returns:
3-D float32 tensor with shape [3, 3, triangle_count] containing per-triangle
matrices.
"""
# First make a vertex tensor of size [triangle_count, 3, 3], where the last
# dimension contains x, y, w coordinates of the corresponding vertex in each
# triangle
xyw = tf.stack([
clip_space_vertices[:, 0], clip_space_vertices[:, 1],
clip_space_vertices[:, 3]
],
axis=1)
xyw = tf.gather(xyw, triangles)
xyw = tf.transpose(xyw, perm=[0, 2, 1])
# Compute the sub-determinants.
d11 = xyw[:, 1, 1] * xyw[:, 2, 2] - xyw[:, 1, 2] * xyw[:, 2, 1]
d21 = xyw[:, 1, 2] * xyw[:, 2, 0] - xyw[:, 1, 0] * xyw[:, 2, 2]
d31 = xyw[:, 1, 0] * xyw[:, 2, 1] - xyw[:, 1, 1] * xyw[:, 2, 0]
d12 = xyw[:, 2, 1] * xyw[:, 0, 2] - xyw[:, 2, 2] * xyw[:, 0, 1]
d22 = xyw[:, 2, 2] * xyw[:, 0, 0] - xyw[:, 2, 0] * xyw[:, 0, 2]
d32 = xyw[:, 2, 0] * xyw[:, 0, 1] - xyw[:, 2, 1] * xyw[:, 0, 0]
d13 = xyw[:, 0, 1] * xyw[:, 1, 2] - xyw[:, 0, 2] * xyw[:, 1, 1]
d23 = xyw[:, 0, 2] * xyw[:, 1, 0] - xyw[:, 0, 0] * xyw[:, 1, 2]
d33 = xyw[:, 0, 0] * xyw[:, 1, 1] - xyw[:, 0, 1] * xyw[:, 1, 0]
matrices = tf.stack([[d11, d12, d13], [d21, d22, d23], [d31, d32, d33]])
# Multiply by the sign of the determinant, avoiding divide by zero.
determinant = xyw[:, 0, 0] * d11 + xyw[:, 1, 0] * d12 + xyw[:, 2, 0] * d13
sign = tf.sign(determinant) + tf.cast(determinant == 0, tf.float32)
matrices = sign * matrices
return matrices
def compute_barycentric_coordinates(triangle_ids: type_alias.TensorLike,
triangle_matrices: type_alias.TensorLike,
px: type_alias.TensorLike,
py: type_alias.TensorLike) -> tf.Tensor:
"""Computes per-pixel barycentric coordinates.
Args:
triangle_ids: 2-D int tensor with shape [image_height, image_width]
containing per-pixel triangle ids, as computed by rasterize_triangles.
triangle_matrices: 3-D float32 tensor with shape [3, 3, triangle_count]
containing per-triangle matrices computed by compute_triangle_matrices.
px: 2-D float32 tensor with shape [image_height, image_width] containing
per-pixel x-coordinates, as computed by normalized_pixel_coordinates.
py: 2-D float32 tensor with shape [image_height, image_width] containing
per-pixel y-coordinates, as computed by normalized_pixel_coordinates.
Returns:
3-D float32 tensor with shape [height, width, 3] containing the barycentric
coordinates of the point at each pixel within the triangle specified by
triangle_ids.
"""
# Gather per-pixel triangle matrices into m.
pixel_triangle_matrices = tf.gather(triangle_matrices, triangle_ids, axis=-1)
# Compute barycentric coordinates by evaluating edge equations.
barycentric_coords = (
pixel_triangle_matrices[:, 0] * px + pixel_triangle_matrices[:, 1] * py +
pixel_triangle_matrices[:, 2])
# Normalize so the barycentric coordinates sum to 1. Guard against division
# by zero in the case that the barycentrics sum to zero, which can happen for
# background pixels when the 0th triangle in the list is degenerate, due to
# the way we use triangle id 0 for both background and the first triangle.
barycentric_coords = tf.math.divide_no_nan(
barycentric_coords, tf.reduce_sum(barycentric_coords, axis=0))
return barycentric_coords
| 44.859031 | 80 | 0.7104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,063 | 0.497201 |
871cf554500d1f5fcf075f0dd9341f26889da829 | 1,780 | py | Python | to_remove/Crawler/measurement_loc_crawler.py | jsdelivrbot/SeoulWind | be4ee24981df651104c178f04fe59c5be9cd946c | [
"MIT"
] | null | null | null | to_remove/Crawler/measurement_loc_crawler.py | jsdelivrbot/SeoulWind | be4ee24981df651104c178f04fe59c5be9cd946c | [
"MIT"
] | null | null | null | to_remove/Crawler/measurement_loc_crawler.py | jsdelivrbot/SeoulWind | be4ee24981df651104c178f04fe59c5be9cd946c | [
"MIT"
] | 2 | 2018-06-22T01:16:55.000Z | 2018-12-09T03:37:55.000Z | from parameters import Parameters
from bs4 import BeautifulSoup
import requests
import json
def addr2latlng(addr):
params = Parameters('parameters.txt').load()
payload = {'clientID': params['apikey_naver_id'], 'query': addr}
headers = {
'Host': 'openapi.naver.com',
'User-Agent': 'curl/7.43.0',
'Accept': '*/*',
'Content-Type': 'application/json',
'X-Naver-Client-Id': params['apikey_naver_id'],
'X-Naver-Client-Secret': params['apikey_naver_secret'],
}
url = 'https://openapi.naver.com/v1/map/geocode?clientId={}&query={}'.format(params['apikey_naver_id'], addr)
res = requests.get(url, data=json.dumps(payload), headers=headers)
output = json.loads(res.text)
try:
x = output['result']['items'][0]['point']['x']
y = output['result']['items'][0]['point']['y']
except KeyError:
x = 0.0
y = 0.0
return x, y
f = open('observatory.csv', 'w', encoding='utf8')
url = 'http://cleanair.seoul.go.kr/inform.htm?method=observatory'
res = requests.get(url)
soup = BeautifulSoup(res.text, "html.parser")
table_pages = soup.find('table', {'class': 'tbl4'})
loc_items = table_pages.find('tbody').findAll('tr')
for loc in loc_items:
td_item = loc.findAll('td')
item_dict = dict()
addr_str = td_item[1].text.strip().split(' ')
item_dict['ObservatoryLocation'] = addr_str[1]
str_index = addr_str[3].find('(')
addr_str[3] = addr_str[3][0:str_index]
addr = ' '.join(addr_str[0:4])
latlng = addr2latlng(addr)
msg = '{},{},{},{},\n'.format(addr_str[1], str(latlng[1]), str(latlng[0]), addr)
# exception
# ๋งํฌ๊ตฌ,37.5734407,126.90072,์์ธ์ ๋งํฌ๊ตฌ ์ฑ์ค๊ธธ 82,
f.write(msg)
f.close() | 32.363636 | 113 | 0.600562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.338692 |
871edef7298e1f6e8174cf24afd5721a5373f0fa | 3,297 | py | Python | google_analytics/predictor.py | hadisotudeh/jads_kaggle | 384928f8504053692ee4569f9692dd8ecebe1df3 | [
"MIT"
] | 11 | 2018-01-20T08:17:34.000Z | 2019-12-04T08:40:11.000Z | google_analytics/predictor.py | hadisotudeh/jads_kaggle | 384928f8504053692ee4569f9692dd8ecebe1df3 | [
"MIT"
] | 55 | 2018-01-19T15:23:45.000Z | 2019-11-01T09:51:21.000Z | google_analytics/predictor.py | hadisotudeh/jads_kaggle | 384928f8504053692ee4569f9692dd8ecebe1df3 | [
"MIT"
] | 28 | 2018-01-17T16:18:23.000Z | 2020-05-25T14:14:49.000Z | import numpy as np
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.base import BaseEstimator
from utils import timing # noqa
TUNING_OUTPUT_DEFAULT = 'tuning.txt'
RANDOM_STATE = 42 # Used for reproducible results
class Predictor(BaseEstimator):
"""
An abstract class modeling our notion of a predictor.
Concrete implementations should follow the predictors
interface
"""
name = 'Abstract Predictor'
def __init__(self, name=name):
"""
Base constructor. The input training is expected to be preprocessed and contain
features extracted for each sample along with the true values
:param params: a dictionary of named model parameters
:param name: Optional model name, used for logging
"""
self.name = name
def __str__(self):
return self.name
def fit(self, train_x, train_y):
"""
A function that fits the predictor to the provided dataset
:param train_x: train data, a pd.DataFrame of features to fit on.
:param train_y: The labels for the training data.
"""
self.model.fit(train_x, train_y)
def predict(self, test_x):
"""
Predicts the target for the given input
:param test_x: a pd.DataFrame of features to be used for predictions
:return: The predicted labels
"""
return self.model.predict(test_x)
def predict_proba(self, test_x):
"""
Predicts the probability of the label for the given input
:param test_x: a pd.DataFrame of features to be used for predictions
:return: The predicted probabilities
"""
return self.model.predict_proba(test_x)
@timing
def evaluate(self, x, y, method="split", nfolds=3, val_size=0.3):
"""
Evaluate performance of the predictor. The default method `CV` is a lot more robust, however it is also a lot slower
since it goes through `nfolds` iterations. The `split` method is based on a train-test split which makes it a lot faster.
:param x: Input features to be used for fitting
:param y: Target values
:param method: String denoting the evaluation method. Acceptable values are 'cv' for cross validation and 'split' for train-test split
:param nfolds: Number of folds per tag in case CV is the evaluation method. Ignored otherwise
:param val_size: Ratio of the training set to be used as validation in case split is the evaluation method. Ignored otherwise
:return: The average log loss error across all tags
"""
if method == 'CV':
scorer = make_scorer(mean_squared_error)
scores = cross_val_score(self.model, x, y, cv=nfolds, scoring=scorer)
return np.mean(scores ** (1/2))
if method == 'split':
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=val_size, random_state=RANDOM_STATE)
self.fit(train_x, train_y)
predictions = self.predict(val_x)
return mean_squared_error(val_y, predictions) ** (1/2)
raise ValueError("Method must be either 'stratified_CV', 'CV' or 'split', not {}".format(method))
| 40.703704 | 142 | 0.670003 | 2,985 | 0.905369 | 0 | 0 | 1,495 | 0.453443 | 0 | 0 | 1,980 | 0.600546 |
871f5dc52afe55d8105df47c84018b6b4a55294d | 3,544 | py | Python | py/fft.py | bradowen2011/lightshowpi | 15713bdd64dd4e7be52abee3b1aac9286eebaf9a | [
"BSD-2-Clause"
] | 3 | 2016-10-13T20:47:13.000Z | 2018-11-30T00:53:26.000Z | py/fft.py | bradowen2011/lightshowpi | 15713bdd64dd4e7be52abee3b1aac9286eebaf9a | [
"BSD-2-Clause"
] | null | null | null | py/fft.py | bradowen2011/lightshowpi | 15713bdd64dd4e7be52abee3b1aac9286eebaf9a | [
"BSD-2-Clause"
] | 8 | 2015-10-10T05:08:59.000Z | 2021-01-31T15:57:27.000Z | #
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
"""FFT methods for computing / analyzing frequency response of audio.
This is simply a wrapper around FFT support in numpy.
Initial FFT code inspired from the code posted here:
http://www.raspberrypi.org/phpBB3/viewtopic.php?t=35838&p=454041
Optimizations from work by Scott Driscoll:
http://www.instructables.com/id/Raspberry-Pi-Spectrum-Analyzer-with-RGB-LED-Strip-/
Third party dependencies:
numpy: for FFT calculation - http://www.numpy.org/
"""
from numpy import sum as npsum
from numpy import abs as npabs
from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros
def calculate_levels(data, chunk_size, sample_rate, frequency_limits, num_bins, input_channels=2):
"""Calculate frequency response for each channel defined in frequency_limits
:param data: decoder.frames(), audio data for fft calculations
:type data: decoder.frames
:param chunk_size: chunk size of audio data
:type chunk_size: int
:param sample_rate: audio file sample rate
:type sample_rate: int
:param frequency_limits: list of frequency_limits
:type frequency_limits: list
:param num_bins: length of gpio to process
:type num_bins: int
:param input_channels: number of audio input channels to process for (default=2)
:type input_channels: int
:return:
:rtype: numpy.array
"""
# create a numpy array, taking just the left channel if stereo
data_stereo = frombuffer(data, dtype=int16)
if input_channels == 2:
# data has 2 bytes per channel
data = empty(len(data) / (2 * input_channels))
# pull out the even values, just using left channel
data[:] = data_stereo[::2]
elif input_channels == 1:
data = data_stereo
# if you take an FFT of a chunk of audio, the edges will look like
# super high frequency cutoffs. Applying a window tapers the edges
# of each end of the chunk down to zero.
data = data * hanning(len(data))
# Apply FFT - real data
fourier = fft.rfft(data)
# Remove last element in array to make it the same size as chunk_size
fourier = delete(fourier, len(fourier) - 1)
# Calculate the power spectrum
power = npabs(fourier) ** 2
matrix = zeros(num_bins, dtype='float64')
for pin in range(num_bins):
# take the log10 of the resulting sum to approximate how human ears
# perceive sound levels
# Get the power array index corresponding to a particular frequency.
idx1 = int(chunk_size * frequency_limits[pin][0] / sample_rate)
idx2 = int(chunk_size * frequency_limits[pin][1] / sample_rate)
# if index1 is the same as index2 the value is an invalid value
# we can fix this by incrementing index2 by 1, This is a temporary fix
# for RuntimeWarning: invalid value encountered in double_scalars
# generated while calculating the standard deviation. This warning
# results in some channels not lighting up during playback.
if idx1 == idx2:
idx2 += 1
npsums = npsum(power[idx1:idx2:1])
# if the sum is 0 lets not take log10, just use 0
# eliminates RuntimeWarning: divide by zero encountered in log10, does not insert -inf
if npsums == 0:
matrix[pin] = 0
else:
matrix[pin] = log10(npsums)
return matrix
| 34.407767 | 98 | 0.683126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,307 | 0.650959 |
871f9d647a8ce835e1b9294a55164bca63acffe5 | 2,625 | py | Python | benchmark/test.py | franklx/misaka | 616d360bee576e4b1dfb36c58bd05a5166f59a38 | [
"MIT"
] | 1 | 2017-09-04T05:32:10.000Z | 2017-09-04T05:32:10.000Z | benchmark/test.py | franklx/misaka | 616d360bee576e4b1dfb36c58bd05a5166f59a38 | [
"MIT"
] | null | null | null | benchmark/test.py | franklx/misaka | 616d360bee576e4b1dfb36c58bd05a5166f59a38 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from misaka import Markdown, BaseRenderer, HtmlRenderer, \
SmartyPants, \
EXT_FENCED_CODE, EXT_TABLES, EXT_AUTOLINK, EXT_STRIKETHROUGH, \
EXT_SUPERSCRIPT, HTML_USE_XHTML, \
TABLE_ALIGN_L, TABLE_ALIGN_R, TABLE_ALIGN_C, \
TABLE_ALIGNMASK, TABLE_HEADER
class BleepRenderer(HtmlRenderer, SmartyPants):
def block_code(self, text, lang):
if lang:
lang = ' class="%s"' % lang
else:
lang = ''
return '\n<pre%s><code>%s</code></pre>\n' % (lang, text)
def block_quote(self, text):
return '\n<blockquote>%s</blockquote>\n' % text
def block_html(self, text):
return '\n%s' % text
def header(self, text, level):
return '\n<h%d>%s</h%d>\n' % (level, text, level)
def hrule(self):
if self.flags & HTML_USE_XHTML:
return '\n<hr/>\n'
else:
return '\n<hr>\n'
def list(self, text, is_ordered):
if is_ordered:
return '\n<ol>%s</ol>\n' % text
else:
return '\n<ul>%s</ul>\n' % text
def list_item(self, text, is_ordered):
return '<li>%s</li>\n' % text
def paragraph(self, text):
# No hard wrapping yet. Maybe with:
# http://docs.python.org/library/textwrap.html
return '\n<p>%s</p>\n' % text
def table(self, header, body):
return '\n<table><thead>\n%s</thead><tbody>\n%s</tbody></table>\n' % \
(header, body)
def table_row(self, text):
return '<tr>\n%s</tr>\n' % text
def table_cell(self, text, flags):
flags = flags & TABLE_ALIGNMASK
if flags == TABLE_ALIGN_C:
align = 'align="center"'
elif flags == TABLE_ALIGN_L:
align = 'align="left"'
elif flags == TABLE_ALIGN_R:
align = 'align="right"'
else:
align = ''
if flags & TABLE_HEADER:
return '<th%s>%s</th>\n' % (align, text)
else:
return '<td%s>%s</td>\n' % (align, text)
def autolink(self, link, is_email):
if is_email:
return '<a href="mailto:%(link)s">%(link)s</a>' % {'link': link}
else:
return '<a href="%(link)s">%(link)s</a>' % {'link': link}
def preprocess(self, text):
return text.replace(' ', '_')
md = Markdown(BleepRenderer(),
EXT_FENCED_CODE | EXT_TABLES | EXT_AUTOLINK |
EXT_STRIKETHROUGH | EXT_SUPERSCRIPT)
print(md.render('''
Unordered
- One
- Two
- Three
And now ordered:
1. Three
2. Two
3. One
An email: example@example.com
And an URL: http://example.com
'''))
| 25.240385 | 78 | 0.552762 | 2,039 | 0.776762 | 0 | 0 | 0 | 0 | 0 | 0 | 687 | 0.261714 |
87214198b671c3bd959654596c5df39383991bf6 | 3,663 | py | Python | sample_project/batchimport/batchimport_settings.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | sample_project/batchimport/batchimport_settings.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | sample_project/batchimport/batchimport_settings.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | """
The batchimport_settings.py module initializes itself with defaults but
allows for the values to be overridden via the django project's settings
file.
NOTE: These values should be considered CONSTANTS even though I'm kind
of cheating and using them as variables to initialize them here.
"""
import settings
def get_setting(setting_name, default):
"""
A simple setting retrieval function to pull settings from the
main settings.py file.
"""
setting = default
print setting_name
print default
try:
setting=getattr(settings, setting_name)
except (AttributeError, NameError):
pass
return setting
# INITIALIZE BATCHIMPORT SETTINGS...
BATCH_IMPORT_START_TEMPLATE = get_setting('BATCH_IMPORT_START_TEMPLATE', 'batchimport/start.html')
BATCH_IMPORT_OPTIONS_TEMPLATE = get_setting('BATCH_IMPORT_OPTIONS_TEMPLATE', 'batchimport/options.html')
BATCH_IMPORT_EXECUTE_TEMPLATE = get_setting('BATCH_IMPORT_EXECUTE_TEMPLATE', 'batchimport/processing.html')
BATCH_IMPORT_RESULTS_TEMPLATE = get_setting('BATCH_IMPORT_RESULTS_TEMPLATE', 'batchimport/results.html')
# Specify the list of models in your application which are importable
# in batch. If you do not provide a list, the system will use introspection
# to get a list of ALL models in your application (via INSTALLED_APPS).
BATCH_IMPORT_IMPORTABLE_MODELS = get_setting('BATCH_IMPORT_IMPORTABLE_MODELS', [])
# Specify where the uploaded Microsoft Excel file will be saved to the
# system.
# NOTE: This must be a absolute path.
# NOTE: Django must have read/write access to this location.
BATCH_IMPORT_TEMPFILE_LOCATION = get_setting('BATCH_IMPORT_TEMPFILE_LOCATION', '/tmp/')
# By default, the system does not allow you to import data for fields
# that are not EDITABLE (i.e. in their model field declarations, you've
# set editable=False). You can override this behavior here:
BATCH_IMPORT_UNEDITABLE_FIELDS = get_setting('BATCH_IMPORT_UNEDITABLE_FIELDS', False)
# Sometimes you will want to override the value coming in from the XLS
# file with a constant or a dynamically generated value.
# The following setting is a dictionary of values (or callables) per
# each fully specified model field.
# NOTE: You must import the item into your settings file if it is a
# callable.
BATCH_IMPORT_VALUE_OVERRIDES = get_setting('BATCH_IMPORT_VALUE_OVERRIDES', {})
# The system can show you individual imports, updates,
# or errors individually using the following boolean options.
# Note that True is assumed for all three if no setting is
# present.
BATCH_IMPORT_SHOW_SUCCESSFUL_IMPORTS = get_setting('BATCH_IMPORT_SHOW_SUCCESSFUL_IMPORTS', True)
BATCH_IMPORT_SHOW_SUCCESSFUL_UPDATES = get_setting('BATCH_IMPORT_SHOW_SUCCESSFUL_UPDATES', True)
BATCH_IMPORT_SHOW_ERRORS = get_setting('BATCH_IMPORT_SHOW_ERRORS', True)
# Whether the system should stop on the first error
# or process the entire uploaded spreadsheet and show
# errors afterwards.
BATCH_IMPORT_STOP_ON_FIRST_ERROR = get_setting('BATCH_IMPORT_STOP_ON_FIRST_ERROR', False)
# Whether or not to update duplicates or simply
# ignore them. Note that duplicates are determined
# based on the user's specification of model fields
# as identification fields. If these are not set, a duplicate
# must match at all column/fields.
BATCH_IMPORT_UPDATE_DUPS = get_setting('BATCH_IMPORT_UPDATE_DUPS', False)
# If no options are set for start/end row, defaults are used that
# assume (1) the spreadsheet has a header row (indicating that data
# starts on row #2 and (2) the entire spreadsheet is to be processed.
BATCH_IMPORT_START_ROW = get_setting('BATCH_IMPORT_START_ROW', 2)
BATCH_IMPORT_END_ROW = get_setting('BATCH_IMPORT_END_ROW', -1)
| 43.094118 | 107 | 0.803986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,651 | 0.723724 |
8722f6f5404b5e8a02c1795e4a9cd19c750cfc01 | 10,119 | py | Python | tests/test_xunit_plugin.py | omergertel/slash | 7dd5710a05822bbbaadc6c6517cefcbaa6397eab | [
"BSD-3-Clause"
] | null | null | null | tests/test_xunit_plugin.py | omergertel/slash | 7dd5710a05822bbbaadc6c6517cefcbaa6397eab | [
"BSD-3-Clause"
] | null | null | null | tests/test_xunit_plugin.py | omergertel/slash | 7dd5710a05822bbbaadc6c6517cefcbaa6397eab | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
import slash
from lxml import etree
def test_xunit_plugin(results, xunit_filename):
assert os.path.exists(xunit_filename), 'xunit file not created'
schema_root = etree.XML(_XUNIT_XSD)
schema = etree.XMLSchema(schema_root)
parser = etree.XMLParser(schema=schema)
with open(xunit_filename) as f:
etree.parse(f, parser)
@pytest.fixture
def results(populated_suite, xunit_filename):
populated_suite.run()
@pytest.fixture
def xunit_filename(tmpdir, request, config_override):
xunit_filename = str(tmpdir.join('xunit.xml'))
slash.plugins.manager.activate('xunit')
slash.config.root.plugins.xunit.filename = xunit_filename
@request.addfinalizer
def deactivate():
slash.plugins.manager.deactivate('xunit')
assert 'xunit' not in slash.config['plugins']
return xunit_filename
# Taken from https://gist.github.com/jzelenkov/959290
_XUNIT_XSD = """<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
elementFormDefault="qualified"
attributeFormDefault="unqualified">
<xs:annotation>
<xs:documentation xml:lang="en">Jenkins xUnit test result schema.
</xs:documentation>
</xs:annotation>
<xs:element name="testsuite" type="testsuite"/>
<xs:simpleType name="ISO8601_DATETIME_PATTERN">
<xs:restriction base="xs:dateTime">
<xs:pattern value="[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="testsuites">
<xs:annotation>
<xs:documentation xml:lang="en">Contains an aggregation of testsuite results</xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:sequence>
<xs:element name="testsuite" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:complexContent>
<xs:extension base="testsuite">
<xs:attribute name="package" type="xs:token" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Derived from testsuite/@name in the non-aggregated documents</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="id" type="xs:int" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Starts at '0' for the first testsuite and is incremented by 1 for each following testsuite</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:extension>
</xs:complexContent>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:complexType name="testsuite">
<xs:annotation>
<xs:documentation xml:lang="en">Contains the results of exexuting a testsuite</xs:documentation>
</xs:annotation>
<xs:sequence>
<xs:element name="testcase" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:choice minOccurs="0">
<xs:element name="error">
<xs:annotation>
<xs:documentation xml:lang="en">Indicates that the test errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. Contains as a text node relevant data for the error, e.g., a stack trace</xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:simpleContent>
<xs:extension base="pre-string">
<xs:attribute name="message" type="xs:string">
<xs:annotation>
<xs:documentation xml:lang="en">The error message. e.g., if a java exception is thrown, the return value of getMessage()</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="type" type="xs:string" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">The type of error that occured. e.g., if a java execption is thrown the full class name of the exception.</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="failure">
<xs:annotation>
<xs:documentation xml:lang="en">Indicates that the test failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals. Contains as a text node relevant data for the failure, e.g., a stack trace</xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:simpleContent>
<xs:extension base="pre-string">
<xs:attribute name="message" type="xs:string">
<xs:annotation>
<xs:documentation xml:lang="en">The message specified in the assert</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="type" type="xs:string" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">The type of the assert.</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="skipped">
<xs:annotation>
<xs:documentation xml:lang="en">Indicates that the test was skipped. A skipped test is a test which was ignored using framework mechanisms. e.g., @Ignore annotation.</xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:simpleContent>
<xs:extension base="pre-string">
<xs:attribute name="type" type="xs:string" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Skip type.</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
</xs:choice>
<xs:attribute name="name" type="xs:token" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Name of the test method</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="classname" type="xs:token" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Full class name for the class the test method is in.</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="time" type="xs:decimal" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Time taken (in seconds) to execute the test</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute name="name" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Full class name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents</xs:documentation>
</xs:annotation>
<xs:simpleType>
<xs:restriction base="xs:token">
<xs:minLength value="1"/>
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<xs:attribute name="timestamp" type="ISO8601_DATETIME_PATTERN" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">when the test was executed. Timezone may not be specified.</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="hostname" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Host on which the tests were executed. 'localhost' should be used if the hostname cannot be determined.</xs:documentation>
</xs:annotation>
<xs:simpleType>
<xs:restriction base="xs:token">
<xs:minLength value="1"/>
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<xs:attribute name="tests" type="xs:int" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">The total number of tests in the suite</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="failures" type="xs:int" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">The total number of tests in the suite that failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="errors" type="xs:int" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">The total number of tests in the suite that errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test.</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="skipped" type="xs:int">
<xs:annotation>
<xs:documentation xml:lang="en">The total number of tests in the suite that skipped. A skipped test is a test which was ignored using framework mechanisms. e.g., @Ignore annotation.</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="time" type="xs:decimal" use="required">
<xs:annotation>
<xs:documentation xml:lang="en">Time taken (in seconds) to execute the tests in the suite</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
<xs:simpleType name="pre-string">
<xs:restriction base="xs:string">
<xs:whiteSpace value="preserve"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
"""
| 45.376682 | 303 | 0.608657 | 0 | 0 | 0 | 0 | 494 | 0.048819 | 0 | 0 | 9,294 | 0.91847 |
87234ddbc75c00fe76141a6b66832d15ac92c6f3 | 2,495 | py | Python | slides/figs/draw.py | hiyouga/AMP-Poster-Slides-LaTeX | c1fd40aa5ef3216f17b4d27dc6e6092e3cc52e40 | [
"MIT"
] | 8 | 2021-05-25T11:56:48.000Z | 2021-12-20T07:12:01.000Z | slides/figs/draw.py | hiyouga/AMP-Poster-Slides-LaTeX | c1fd40aa5ef3216f17b4d27dc6e6092e3cc52e40 | [
"MIT"
] | 1 | 2021-05-28T15:25:37.000Z | 2021-05-30T05:01:24.000Z | slides/figs/draw.py | hiyouga/AMP-Poster-Slides-LaTeX | c1fd40aa5ef3216f17b4d27dc6e6092e3cc52e40 | [
"MIT"
] | 2 | 2021-05-26T01:39:53.000Z | 2021-12-20T06:36:04.000Z | import matplotlib
import numpy as np
import matplotlib.pyplot as plt
default_params = {
'text.usetex': False,
'font.family': 'Times New Roman',
'font.serif': 'Times New Roman'
}
if __name__ == '__main__':
plt.rcParams.update(default_params)
myfont1 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=14)
myfont2 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=12)
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.001 * x ** 2 + 0.02 * 1 / x + 0.02
y2 = 0.12 * x ** 2 + 0.04 * 1 / x + 0.06
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.55
cy = 0.12 * cx ** 2 + 0.04 * 1 / cx + 0.06
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.arrow(1.6, 0.21, 0.0, 0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.arrow(1.6, 0.21, 0.0, -0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.text(1.65, 0.18, 'Generalization gap', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap1.pdf', format='pdf', dpi=900, bbox_inches='tight')
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.005 * x ** 2 + 0.03 * 1 / x + 0.03
y2 = 0.04 * x ** 2 + 0.05 * 1 / x + 0.03
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.855
cy = 0.04 * cx ** 2 + 0.05 * 1 / cx + 0.03
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap2.pdf', format='pdf', dpi=900, bbox_inches='tight')
| 43.77193 | 122 | 0.600802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 438 | 0.175551 |
8723d146de1810223e1e03f81066a7c04d4f199d | 144 | py | Python | tests/unit/test_u_wsgi.py | p15r/HYOK-Wrapper | 257f29150f2f035e12aa2247d95aac4263e1694b | [
"MIT"
] | 2 | 2020-10-02T13:10:26.000Z | 2020-10-12T07:21:55.000Z | tests/unit/test_u_wsgi.py | p15r/HYOK-Wrapper | 257f29150f2f035e12aa2247d95aac4263e1694b | [
"MIT"
] | 12 | 2020-10-03T13:59:50.000Z | 2020-10-26T20:10:32.000Z | tests/unit/test_u_wsgi.py | p15r/HYOK-Wrapper | 257f29150f2f035e12aa2247d95aac4263e1694b | [
"MIT"
] | null | null | null | import wsgi # noqa: F401
def test_connect_to_app(http_client):
response = http_client.get('/')
assert response.status_code == 404
| 20.571429 | 38 | 0.701389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.104167 |
87244a8e220cfb272e1a810228a8534c5aa83e79 | 884 | py | Python | ceefax/pages/index.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | 1 | 2020-03-28T15:53:22.000Z | 2020-03-28T15:53:22.000Z | ceefax/pages/index.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | 1 | 2021-02-05T13:43:52.000Z | 2021-02-05T13:43:52.000Z | ceefax/pages/index.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | null | null | null | from ceefax.page import Page
from ceefax import config
from ceefax import Ceefax
class IndexPage(Page):
def __init__(self, n):
super(IndexPage, self).__init__(n)
self.importance = 5
self.title = "Index"
def generate_content(self):
self.print_image(config.title, 1)
self.add_newline()
self.add_text(" INDEX " * 10, fg="GREEN")
self.add_newline()
self.add_newline()
for i, page in enumerate(Ceefax().page_manager.index_pages()):
self.add_text(page.index_num, fg="MAGENTA")
if i % 2 == 0:
self.move_cursor(x=9)
else:
self.move_cursor(x=45)
self.add_text(page.title, fg="WHITE")
if i % 2 == 0:
self.move_cursor(x=36)
else:
self.add_newline()
i_p = IndexPage("100")
| 26.787879 | 70 | 0.554299 | 775 | 0.876697 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.050905 |
872538f9e004a1966a45af99b31962b33d5a2ea0 | 372 | py | Python | bin/award_ebadge_declare.py | ervikey/SA-ctf_scoreboard | 00b631e9ed2c075f96f660583656ae68eb4b17e0 | [
"CC0-1.0"
] | 106 | 2018-03-09T13:03:05.000Z | 2022-03-10T11:01:48.000Z | bin/award_ebadge_declare.py | ervikey/SA-ctf_scoreboard | 00b631e9ed2c075f96f660583656ae68eb4b17e0 | [
"CC0-1.0"
] | 17 | 2018-05-11T00:53:47.000Z | 2020-05-07T10:14:40.000Z | bin/award_ebadge_declare.py | ervikey/SA-ctf_scoreboard | 00b631e9ed2c075f96f660583656ae68eb4b17e0 | [
"CC0-1.0"
] | 33 | 2018-04-23T20:18:11.000Z | 2022-03-27T16:41:03.000Z | # encode = utf-8
import os
import sys
import re
ta_name = 'SA-ctf_scoreboard'
ta_lib_name = 'sa_ctf_scoreboard'
pattern = re.compile(r"[\\/]etc[\\/]apps[\\/][^\\/]+[\\/]bin[\\/]?$")
new_paths = [path for path in sys.path if not pattern.search(path) or ta_name in path]
new_paths.insert(0, os.path.sep.join([os.path.dirname(__file__), ta_lib_name]))
sys.path = new_paths
| 28.615385 | 86 | 0.688172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.271505 |
87259cd2c6b021df4f9006bba7f609c83a89593f | 503 | py | Python | books/migrations/0009_library_waitlist_items.py | rodbv/kamu | f390d91f7d7755b49176cf5d504648e3fe572237 | [
"MIT"
] | 70 | 2018-05-23T16:44:44.000Z | 2021-12-05T21:48:10.000Z | books/migrations/0009_library_waitlist_items.py | rodbv/kamu | f390d91f7d7755b49176cf5d504648e3fe572237 | [
"MIT"
] | 122 | 2018-10-06T21:31:24.000Z | 2020-11-09T15:04:56.000Z | books/migrations/0009_library_waitlist_items.py | rodbv/kamu | f390d91f7d7755b49176cf5d504648e3fe572237 | [
"MIT"
] | 50 | 2018-05-23T05:49:10.000Z | 2021-11-22T07:53:42.000Z | # Generated by Django 2.0.1 on 2019-01-07 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waitlist', '0001_initial'),
('books', '0008_bookcopy_borrow_date'),
]
operations = [
migrations.AddField(
model_name='library',
name='waitlist_items',
field=models.ManyToManyField(related_name='waitlist_items', through='waitlist.WaitlistItem', to='books.Book'),
),
]
| 25.15 | 122 | 0.628231 | 410 | 0.815109 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.359841 |
8725efc65cc7a34903579071ce0f80f5535bdbf6 | 3,671 | py | Python | main.py | drdoof2019/Twitter-Sentiment-Analysis | b0fd17b2384c6540a08b63e1660c1c241f7645da | [
"MIT"
] | null | null | null | main.py | drdoof2019/Twitter-Sentiment-Analysis | b0fd17b2384c6540a08b63e1660c1c241f7645da | [
"MIT"
] | null | null | null | main.py | drdoof2019/Twitter-Sentiment-Analysis | b0fd17b2384c6540a08b63e1660c1c241f7645da | [
"MIT"
] | null | null | null | import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
# pip install tweepy
# pip install textblob
# developer.twitter.com/en/portal/dashboard
def istenmeyen_karakter_temizle(text):
istenmeyen_karakterler = [':',';','!','*','$','ยฝ','&']
for karakter in istenmeyen_karakterler:
text = text.replace(karakter,'')
return text
def duygu_analizi(tweet,counter):
#print(counter, tweet.text)
blob1 = TextBlob(tweet.full_text)
blob1_clean = istenmeyen_karakter_temizle(blob1)
blob1_lang = blob1_clean.detect_language() # HTTP Error 429: Too Many Requests
#print("lang", blob1_lang)
if blob1_lang != 'en':
blob1_ing = blob1_clean.translate(to='en')
else:
blob1_ing = blob1_clean
#print("blob1_ing", blob1_ing)
#print(blob1_ing.sentiment)
#print("--------------------------------------------------------------")
print("Translate ile yapฤฑldฤฑ.!")
return blob1_clean, blob1_ing.polarity
def duygu_analizi_cevirisiz(tweet,counter):
#print(counter, tweet.text)
blob1 = TextBlob(tweet.full_text)
blob1_clean = istenmeyen_karakter_temizle(blob1)
print("Translatesiz yapฤฑldฤฑ.!", blob1_clean.polarity)
return blob1_clean, blob1_clean.polarity
# Yetkilendirme iลlemleri
consumerKey = "qwe"
consumerSecret = "asd"
accessToken = "qweewq"
accessTokenSecret = "asddsa"
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
#Yetkilendirmeden sorna tweepy ile yazฤฑlarฤฑ alฤฑp textblob ile duygu analizi yapฤฑcaz.
tweet_list = []
neutral_list = []
negative_list = []
positive_list = []
counter = 1
keyword = str(input("Keyword giriniz..\n")) #BritishBasketball #ArmenianGenocide
noOfTweet = int(input("Kaรง adet twit kontrol edilsin\n"))
print(noOfTweet, "adet tweet api ile alฤฑnฤฑyor...")
# tweets = tweepy.Cursor(api.user_timeline, id = 'elonmusk',tweet_mode='extended').items(noOfTweet) # รถzel bir kullanฤฑcฤฑnฤฑn twitlerini alฤฑr
tweets = tweepy.Cursor(api.search, q=keyword,tweet_mode='extended').items(noOfTweet) # kelime รผzerinden twit arฤฑyorsun
print("Tweetlerde duygu analizi yapฤฑlฤฑyor... Tweet sayฤฑsฤฑ fazlaysa bu iลlem birkaรง dakika sรผrebilir")
for tweet in tweets:
try:
text, polarity = duygu_analizi(tweet,counter)
tweet_list.append(text)
except:
text, polarity = duygu_analizi_cevirisiz(tweet,counter)
tweet_list.append(text)
#print("Polarity Tipi:",type(polarity))
if polarity > 0:
positive_list.append(text)
elif polarity < 0:
negative_list.append(text)
else:
neutral_list.append(text)
counter += 1
new_counter = 1
print("<<<<>>>> Pozitif Twit Sayฤฑsฤฑ",len(positive_list))
if len(positive_list) != 0:
print("-----------------Pozitif Twitler-----------------")
for eleman in positive_list:
eleman = eleman.strip()
print(str(new_counter)+".)", eleman)
new_counter += 1
new_counter = 1
print("<<<<>>>> Negatif Twit Sayฤฑsฤฑ",len(negative_list))
if len(negative_list) != 0:
print("-----------------Negatif Twitler-----------------")
for eleman in negative_list:
eleman = eleman.strip()
print(str(new_counter)+".)", eleman)
new_counter += 1
new_counter = 1
print("<<<<>>>> Nรถtr Twit Sayฤฑsฤฑ",len(neutral_list))
if len(neutral_list) != 0:
print("-----------------Nรถtr Twitler-----------------")
for eleman in neutral_list:
eleman = eleman.strip()
print(str(new_counter)+".)", eleman)
new_counter += 1
| 36.346535 | 140 | 0.651321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,281 | 0.345656 |
8726ffb07f7ca1189ea73517ad7af10fd540bf6c | 58 | py | Python | todo/__init__.py | maxios/gtd.py | afe91405c8f5ba7fc6cdd46e1818559b2a6079d3 | [
"BSD-3-Clause"
] | null | null | null | todo/__init__.py | maxios/gtd.py | afe91405c8f5ba7fc6cdd46e1818559b2a6079d3 | [
"BSD-3-Clause"
] | null | null | null | todo/__init__.py | maxios/gtd.py | afe91405c8f5ba7fc6cdd46e1818559b2a6079d3 | [
"BSD-3-Clause"
] | null | null | null | '''gtd.py'''
__version__ = '0.7.0'
__author__ = 'delucks'
| 14.5 | 22 | 0.62069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.482759 |
872757654f8085ced49101ead759a3fbe2d7c7bc | 7,695 | py | Python | SubredditScraper/scrape.py | tomhennessey/Subreddit-Scrape | 224c76fb179b6171634e7dd7739f73015c206d62 | [
"MIT"
] | null | null | null | SubredditScraper/scrape.py | tomhennessey/Subreddit-Scrape | 224c76fb179b6171634e7dd7739f73015c206d62 | [
"MIT"
] | null | null | null | SubredditScraper/scrape.py | tomhennessey/Subreddit-Scrape | 224c76fb179b6171634e7dd7739f73015c206d62 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
A simple subreddit scraper
"""
import datetime as dt
import logging
import time
import os
import sys
import getopt
import praw
from psaw import PushshiftAPI
if __package__ == None:
import db
else:
from . import db
def init_log():
"""
Initiates a logger for psaw to be used with '-v' cli option.
"""
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger = logging.getLogger('psaw')
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
def utc_to_local(utc_dt):
"""
Converts unix utc time format to human readable form for output
Returns
-------
string
A date and time in string format
"""
return dt.datetime.fromtimestamp(utc_dt).strftime('%Y-%m-%d %I:%M:%S%p')
def epoch_generate(month_num, year):
"""
Generates start and end epochs to be used in
generate_submissions_psaw()
Parameters
----------
month_num : int
The month number (1-12) that is being requested in scrape
month_half: int
Half of the month (1-2) that corresponds to first or last
15 days
Returns
-------
tuple
A tuple containing a start and end date in linux utc format
"""
start_time = int(dt.datetime(year, month_num, 1).timestamp())
end_time = int(dt.datetime(year, month_num + 1, 1).timestamp())
return (start_time, end_time)
def generate_submissions_psaw(month_num, subreddit):
"""
Gets submissions between start/end epochs for requested
subreddit
Parameters
----------
month_num: int
The month number to be passed to epoch_generate()
month_half: int
The month half to be passed to epoch_generate()
subreddit: string
The name of the subreddit to be scraped
Returns
-------
generator
A generator object that will be used to loop through
submissions
"""
# init api
api = PushshiftAPI()
epoch_tuple = epoch_generate(month_num, 2020)
start_epoch = epoch_tuple[0]
end_epoch = epoch_tuple[1]
return api.search_submissions(after=start_epoch, before=end_epoch,
subreddit=subreddit, size=1000)
def generate_comments(reddit, submission_id):
"""
Take a PRAW reddit object and finds comments for a given
submissions_id
Parameters
----------
reddit: praw.Reddit
A PRAW Reddit API instance
submission_id: int
The id of the subreddit submission whose comments we want
Returns
-------
submission.comments: praw.models.comment_forest.CommentForest
A Reddit CommentForest that can be iterated through
"""
# get submission from praw via submission_id from psaw
submission = reddit.submission(id=submission_id)
# should load all folded comments
return submission.comments
def praw_timer(reddit):
"""
A timer that counts down remaining PRAW Api requests and
shortly halts and retries when there are less than 10.
Parameters
----------
reddit: praw.Reddit
A PRAW Reddit API instance
"""
if reddit.auth.limits['remaining'] < 10:
print("Waiting for PRAW API limit to reset...", end="\r")
time.sleep(4)
def init_db(db_name):
"""
Creates a SQLite DB connection to put scraped content into
Returns
-------
conn: SQLite DB instance
"""
conn = db.create_connection(db_name)
db.create_table_submissions(conn)
db.create_table_comments(conn)
print("DB Init Success")
return conn
def clear_screen():
"""
Clears the terminal screen depending on OS detected
"""
os.system('cls' if os.name == 'nt' else 'clear')
def get_args():
"""
Retrieve CLI arguments
"""
return getopt.getopt(sys.argv[1:], 'vc')
def iterate_comments(state, submission, conn):
"""
TODO: Docstring
"""
comments = generate_comments(state.reddit, submission.id)
praw_timer(state.reddit)
for j in list(comments):
try:
comment = (str(j.author), str(utc_to_local(j.created_utc)),
str(j.id), str(j.body), str(submission.id))
db.insert_comment(conn, comment)
except AttributeError as err:
print(err)
continue
state.update_praw()
state.inc_comment()
#print("PRAW requests remaining: ", end="")
#print(reddit.auth.limits['remaining'], end="\r")
def update_display(state_obj):
"""
TODO: Docstring
"""
filesize = 0
if os.path.isfile(state_obj.db_name):
filesize = (int(os.stat(state_obj.db_name).st_size)) / 1048576
output = ' PRAW Requests Remaining: {} '\
'|Submission Request #{} '\
'|Comment Request #{} ' \
'|Filesize {} MB'
print(output.format(state_obj.praw_requests,
state_obj.submission_idx, state_obj.comment_idx,
filesize), end=" \r", flush=True)
def usage():
"""
TODO: Docstring
"""
if os.name == 'nt':
output = """Usage: python3 scrape.py [subreddit] [output file]
Options: -v: verbose logging
-c: comments on"""
else:
output = """Usage: ./scrape.py [subreddit] [output file]
Options: -v: verbose logging
-c: comments on"""
print(output)
exit()
class StateObj:
"""
TODO: Docstring
"""
reddit = []
submission_idx = 0
comment_idx = 0
praw_requests = 0
corpus_size = 0
db_name = "./corpus.db"
def __init__(self):
self.submission_idx = 0
self.comment_idx = 0
self.praw_requests = 0
def init_reddit(self):
self.reddit = praw.Reddit("bot1")
def inc_sub(self):
# increment idx
self.submission_idx += 1
def reset_comment(self):
self.comment_idx = 0
def inc_comment(self):
self.comment_idx += 1
def update_praw(self):
self.praw_requests = self.reddit.auth.limits['remaining']
def main():
"""
TODO: Docstring
"""
if len(sys.argv) == 0:
usage()
opts, args = get_args()
subreddit = sys.argv[1]
comment_flag = False
for arg in args:
if arg == '-v':
print("Verbose logging")
init_log()
if arg == '-c':
comment_flag = True
print("Comments on ")
if arg in [('-h'), ('-u')]:
usage()
exit()
state = StateObj()
if comment_flag:
state.init_reddit()
for arg in args:
print(arg)
if ".db" in arg:
state.db_name = arg
conn = init_db(state.db_name)
for month in range(1, 2):
gen = generate_submissions_psaw(month, subreddit)
for i in list(gen):
state.inc_sub()
update_display(state)
# only get submission that are self posts
if hasattr(i, 'selftext'):
if hasattr(i, 'author'):
submission = (i.author, utc_to_local(i.created_utc), i.title,
i.selftext, i.id, i.is_self, utc_to_local(i.retrieved_on),
i.num_comments, i.permalink)
else:
submission = ('deleted', utc_to_local(i.created_utc), i.title,
i.selftext, i.id, i.is_self, utc_to_local(i.retrieved_on),
i.num_comments, i.permalink)
db.insert_submission(conn, submission)
if comment_flag:
iterate_comments(state, i, conn)
if __name__ == "__main__":
main()
| 24.046875 | 86 | 0.591813 | 660 | 0.08577 | 0 | 0 | 0 | 0 | 0 | 0 | 3,032 | 0.394022 |
872a7c2cdb92c261fe174b94da5759ed7dfbd97f | 40 | py | Python | expressive_regex/exceptions.py | fsadannn/expressive_regex | 3bf113e8288a0f7d756f24cf882be8709630d4d3 | [
"MIT"
] | 2 | 2020-07-31T13:49:17.000Z | 2020-09-16T14:47:23.000Z | expressive_regex/exceptions.py | fsadannn/expressive_regex | 3bf113e8288a0f7d756f24cf882be8709630d4d3 | [
"MIT"
] | null | null | null | expressive_regex/exceptions.py | fsadannn/expressive_regex | 3bf113e8288a0f7d756f24cf882be8709630d4d3 | [
"MIT"
] | null | null | null | class BadStatement(Exception):
pass
| 13.333333 | 30 | 0.75 | 39 | 0.975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
872b15d4a269e14f5342a8109b8993012c978013 | 1,222 | py | Python | trieste/models/gpflow/__init__.py | henrymoss/trieste | 4c8d14ead793fb49cdbb883789b799310873db70 | [
"Apache-2.0"
] | 1 | 2021-10-02T19:53:48.000Z | 2021-10-02T19:53:48.000Z | trieste/models/gpflow/__init__.py | TsingQAQ/trieste | 6b2bb0e73649debaac81157f0f9fdb8d3fdfef5b | [
"Apache-2.0"
] | null | null | null | trieste/models/gpflow/__init__.py | TsingQAQ/trieste | 6b2bb0e73649debaac81157f0f9fdb8d3fdfef5b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This package contains the primary interface for Gaussian process models. It also contains a
number of :class:`TrainableProbabilisticModel` wrappers for GPflow-based models.
"""
from . import config, optimizer
from .interface import GPflowPredictor
from .models import GaussianProcessRegression, SparseVariational, VariationalGaussianProcess
from .sampler import (
BatchReparametrizationSampler,
IndependentReparametrizationSampler,
RandomFourierFeatureTrajectorySampler,
)
from .utils import (
M,
assert_data_is_compatible,
check_optimizer,
randomize_hyperparameters,
squeeze_hyperparameters,
)
| 34.914286 | 92 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 756 | 0.618658 |
872daf06ad41c681b338cb82204799f97cdb8c02 | 829 | py | Python | expenses_tracker/expenses_tracker/web/vaidators.py | LBistrev/python-web-basic | e798d5eb22657a014d3dd0f98e4c3c585336c4fc | [
"MIT"
] | null | null | null | expenses_tracker/expenses_tracker/web/vaidators.py | LBistrev/python-web-basic | e798d5eb22657a014d3dd0f98e4c3c585336c4fc | [
"MIT"
] | null | null | null | expenses_tracker/expenses_tracker/web/vaidators.py | LBistrev/python-web-basic | e798d5eb22657a014d3dd0f98e4c3c585336c4fc | [
"MIT"
] | null | null | null | from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
VALIDATE_ONLY_LETTERS_EXCEPTION_MESSAGE = 'Ensure this value contains only letters.'
def validate_only_letters(value):
if not value.isalpha():
raise ValidationError(VALIDATE_ONLY_LETTERS_EXCEPTION_MESSAGE)
@deconstructible
class ImageMaxSizeInMbValidator:
def __init__(self, max_size):
self.max_size = max_size
def __call__(self, value):
filesize = value.file.size
if filesize > self.__megabytes_to_bytes(self.max_size):
raise ValidationError(self.__get_exception_message())
@staticmethod
def __megabytes_to_bytes(value):
return value * 1024 * 1024
def __get_exception_message(self):
return f'Max file size is {self.max_size:.2f}MB'
| 29.607143 | 84 | 0.74427 | 484 | 0.583836 | 0 | 0 | 501 | 0.604343 | 0 | 0 | 83 | 0.100121 |
872deb99006e5413d89c06dfa856df9a1719736e | 2,347 | py | Python | seleniumwire/thirdparty/mitmproxy/net/http/multipart.py | KozminMoci/selenium-wire | 063c44ab42ac5e53e28c8a8c49c9ae7036bd878b | [
"MIT"
] | 975 | 2018-06-23T10:50:42.000Z | 2022-03-31T00:56:03.000Z | seleniumwire/thirdparty/mitmproxy/net/http/multipart.py | KozminMoci/selenium-wire | 063c44ab42ac5e53e28c8a8c49c9ae7036bd878b | [
"MIT"
] | 492 | 2018-07-30T12:49:51.000Z | 2022-03-31T12:46:56.000Z | seleniumwire/thirdparty/mitmproxy/net/http/multipart.py | KozminMoci/selenium-wire | 063c44ab42ac5e53e28c8a8c49c9ae7036bd878b | [
"MIT"
] | 149 | 2018-08-29T06:53:12.000Z | 2022-03-31T09:23:56.000Z | import mimetypes
import re
from urllib.parse import quote
from seleniumwire.thirdparty.mitmproxy.net.http import headers
def encode(head, l):
k = head.get("content-type")
if k:
k = headers.parse_content_type(k)
if k is not None:
try:
boundary = k[2]["boundary"].encode("ascii")
boundary = quote(boundary)
except (KeyError, UnicodeError):
return b""
hdrs = []
for key, value in l:
file_type = mimetypes.guess_type(str(key))[0] or "text/plain; charset=utf-8"
if key:
hdrs.append(b"--%b" % boundary.encode('utf-8'))
disposition = b'form-data; name="%b"' % key
hdrs.append(b"Content-Disposition: %b" % disposition)
hdrs.append(b"Content-Type: %b" % file_type.encode('utf-8'))
hdrs.append(b'')
hdrs.append(value)
hdrs.append(b'')
if value is not None:
# If boundary is found in value then raise ValueError
if re.search(rb"^--%b$" % re.escape(boundary.encode('utf-8')), value):
raise ValueError(b"boundary found in encoded string")
hdrs.append(b"--%b--\r\n" % boundary.encode('utf-8'))
temp = b"\r\n".join(hdrs)
return temp
def decode(hdrs, content):
"""
Takes a multipart boundary encoded string and returns list of (key, value) tuples.
"""
v = hdrs.get("content-type")
if v:
v = headers.parse_content_type(v)
if not v:
return []
try:
boundary = v[2]["boundary"].encode("ascii")
except (KeyError, UnicodeError):
return []
rx = re.compile(br'\bname="([^"]+)"')
r = []
if content is not None:
for i in content.split(b"--" + boundary):
parts = i.splitlines()
if len(parts) > 1 and parts[0][0:2] != b"--":
match = rx.search(parts[1])
if match:
key = match.group(1)
value = b"".join(parts[3 + parts[2:].index(b""):])
r.append((key, value))
return r
return []
| 34.014493 | 92 | 0.484448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.194717 |
872e1c57fa4968272b0db8d13d59197b314f92ad | 566 | py | Python | pretalx_mattermost/apps.py | toshywoshy/pretalx-mattermost | 69499093750f613f74ab47c9798f5f90431372ba | [
"Apache-2.0"
] | null | null | null | pretalx_mattermost/apps.py | toshywoshy/pretalx-mattermost | 69499093750f613f74ab47c9798f5f90431372ba | [
"Apache-2.0"
] | null | null | null | pretalx_mattermost/apps.py | toshywoshy/pretalx-mattermost | 69499093750f613f74ab47c9798f5f90431372ba | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy
class PluginApp(AppConfig):
name = 'pretalx_mattermost'
verbose_name = 'MatterMost integration for pretalx'
class PretalxPluginMeta:
name = gettext_lazy('MatterMost integration for pretalx')
author = 'Toshaan Bharvani'
description = gettext_lazy(
'Receive notifications whenever a submission changes its state.'
)
visible = True
version = '0.0.0'
def ready(self):
from . import signals # NOQA
| 28.3 | 76 | 0.674912 | 479 | 0.84629 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.330389 |
872e646505f83dcab385fa79942ad96da9c731d8 | 1,156 | py | Python | flexselect/views.py | twerp/django-admin-flexselect-py3 | 38563b98cb333e6edd5351f7e88d6156e7a71a34 | [
"CC0-1.0"
] | null | null | null | flexselect/views.py | twerp/django-admin-flexselect-py3 | 38563b98cb333e6edd5351f7e88d6156e7a71a34 | [
"CC0-1.0"
] | null | null | null | flexselect/views.py | twerp/django-admin-flexselect-py3 | 38563b98cb333e6edd5351f7e88d6156e7a71a34 | [
"CC0-1.0"
] | null | null | null | import json
from django.http import HttpResponse
from django.forms.widgets import Select
from django.contrib.auth.decorators import login_required
from flexselect import (FlexSelectWidget, choices_from_instance,
details_from_instance, instance_from_request)
@login_required
def field_changed(request):
"""
Ajax callback called when a trigger field or base field has changed.
Returns html for new options and details for the dependent field as json.
"""
hashed_name = request.POST.__getitem__('hashed_name')
widget = FlexSelectWidget.instances[hashed_name]
instance = instance_from_request(request, widget)
value_fk = getattr(instance, widget.base_field.name)
if bool(int(request.POST.__getitem__('include_options'))):
choices = choices_from_instance(instance, widget)
options = Select(choices=choices).\
render_options([], [value_fk.pk if value_fk else None])
else:
options = None
return HttpResponse(json.dumps({
'options': options,
'details': details_from_instance(instance, widget),
}), content_type='application/json')
| 36.125 | 77 | 0.719723 | 0 | 0 | 0 | 0 | 869 | 0.75173 | 0 | 0 | 228 | 0.197232 |
872f00ae4d30467f373664e873bfd51982763b06 | 20,371 | py | Python | mscreen/autodocktools_prepare_py3k/AutoDockTools/VisionInterface/Adt/Macro/AutodockVS.py | e-mayo/mscreen | a50f0b2f7104007c730baa51b4ec65c891008c47 | [
"MIT"
] | 9 | 2021-03-06T04:24:28.000Z | 2022-01-03T09:53:07.000Z | AutoDockTools/VisionInterface/Adt/Macro/AutodockVS.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 3 | 2021-03-07T05:37:16.000Z | 2021-09-19T15:06:54.000Z | AutoDockTools/VisionInterface/Adt/Macro/AutodockVS.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 4 | 2019-08-28T23:11:39.000Z | 2021-11-27T08:43:36.000Z | ########################################################################
#
# Vision Macro - Python source code - file generated by vision
# Thursday 01 July 2010 13:22:44
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Daniel Stoffler, Michel Sanner and TSRI
#
# revision: Guillaume Vareille
#
#########################################################################
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/VisionInterface/Adt/Macro/AutodockVS.py,v 1.9 2010/07/02 00:22:59 jren Exp $
#
# $Id: AutodockVS.py,v 1.9 2010/07/02 00:22:59 jren Exp $
#
from NetworkEditor.macros import MacroNode
class AutodockVS(MacroNode):
'''
Runs Autodock Virtual Screening on remote server in parallel
Inputs:
port 1: LigandDB object containing info about the ligand library
port 2: autogrid_result object containing info about autogrid results
port 3: DPF template object
Outputs:
port 1: string containing URL to autodock virtual screening results
'''
def __init__(self, constrkw={}, name='AutodockVS', **kw):
kw['name'] = name
MacroNode.__init__(*(self,), **kw)
def beforeAddingToNetwork(self, net):
MacroNode.beforeAddingToNetwork(self, net)
from WebServices.VisionInterface.WSNodes import wslib
from Vision.StandardNodes import stdlib
net.getEditor().addLibraryInstance(wslib,"WebServices.VisionInterface.WSNodes", "wslib")
from WebServices.VisionInterface.WSNodes import addOpalServerAsCategory
try:
addOpalServerAsCategory("http://kryptonite.nbcr.net/opal2", replace=False)
except:
pass
def afterAddingToNetwork(self):
masterNet = self.macroNetwork
from NetworkEditor.macros import MacroNode
MacroNode.afterAddingToNetwork(self)
from WebServices.VisionInterface.WSNodes import wslib
from Vision.StandardNodes import stdlib
## building macro network ##
AutodockVS_9 = self
from traceback import print_exc
from WebServices.VisionInterface.WSNodes import wslib
from Vision.StandardNodes import stdlib
masterNet.getEditor().addLibraryInstance(wslib,"WebServices.VisionInterface.WSNodes", "wslib")
from WebServices.VisionInterface.WSNodes import addOpalServerAsCategory
try:
addOpalServerAsCategory("http://kryptonite.nbcr.net/opal2", replace=False)
except:
pass
try:
## saving node input Ports ##
input_Ports_10 = self.macroNetwork.ipNode
input_Ports_10.configure(*(), **{'paramPanelImmediate': 1, 'expanded': False})
except:
print("WARNING: failed to restore MacroInputNode named input Ports in network self.macroNetwork")
print_exc()
input_Ports_10=None
try:
## saving node output Ports ##
output_Ports_11 = self.macroNetwork.opNode
output_Ports_11.configure(*(), **{'paramPanelImmediate': 1, 'expanded': False})
except:
print("WARNING: failed to restore MacroOutputNode named output Ports in network self.macroNetwork")
print_exc()
output_Ports_11=None
try:
## saving node PrepareADVSInputs ##
from Vision.StandardNodes import Generic
PrepareADVSInputs_12 = Generic(constrkw={}, name='PrepareADVSInputs', library=stdlib)
self.macroNetwork.addNode(PrepareADVSInputs_12,217,76)
PrepareADVSInputs_12.addInputPort(*(), **{'singleConnection': True, 'name': 'ligands', 'cast': True, 'datatype': 'LigandDB', 'defaultValue': None, 'required': True, 'height': 8, 'width': 12, 'shape': 'rect', 'color': '#FFCCFF', 'originalDatatype': 'None'})
PrepareADVSInputs_12.addInputPort(*(), **{'singleConnection': True, 'name': 'autogrid_results', 'cast': True, 'datatype': 'autogrid_results', 'defaultValue': None, 'required': True, 'height': 8, 'width': 12, 'shape': 'triangle', 'color': '#FF33CC', 'originalDatatype': 'None'})
PrepareADVSInputs_12.addInputPort(*(), **{'singleConnection': True, 'name': 'dpf_template_obj', 'cast': True, 'datatype': 'dpf_template', 'defaultValue': None, 'required': True, 'height': 8, 'width': 12, 'shape': 'triangle', 'color': '#9933FF', 'originalDatatype': 'None'})
PrepareADVSInputs_12.addOutputPort(*(), **{'name': 'filter_file', 'datatype': 'string', 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
PrepareADVSInputs_12.addOutputPort(*(), **{'name': 'ligand_lib', 'datatype': 'string', 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
PrepareADVSInputs_12.addOutputPort(*(), **{'name': 'dpf_template_file', 'datatype': 'string', 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
PrepareADVSInputs_12.addOutputPort(*(), **{'name': 'autogrid_res_url', 'datatype': 'string', 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
PrepareADVSInputs_12.addOutputPort(*(), **{'name': 'autogrid_res_local', 'datatype': 'string', 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
code = """def doit(self, ligands, autogrid_results, dpf_template_obj):
dpf = dpf_template_obj.fullpath
if not(os.path.exists(dpf)):
print "ERROR: DPF template " + dpf + " does not exist!"
return '''stop'''
filter_file = ligands.filter_file
if autogrid_results.type == '''url''':
autogrid_result_url = autogrid_results.path
autogrid_result_local = ""
else:
autogrid_result_url = ""
autogrid_result_local = autogrid_results.path
ligand_lib = ligands.loc
pass
self.outputData(filter_file=filter_file, ligand_lib=ligand_lib, dpf_template_file=dpf, autogrid_res_url=autogrid_result_url, autogrid_res_local=autogrid_result_local)
## to ouput data on port filter_file use
## self.outputData(filter_file=data)
## to ouput data on port ligand_lib use
## self.outputData(ligand_lib=data)
## to ouput data on port dpf_template_file use
## self.outputData(dpf_template_file=data)
## to ouput data on port autogrid_res_url use
## self.outputData(autogrid_res_url=data)
## to ouput data on port autogrid_res_local use
## self.outputData(autogrid_res_local=data)
"""
PrepareADVSInputs_12.configure(function=code)
PrepareADVSInputs_12.configure(*(), **{'paramPanelImmediate': 1, 'expanded': False})
except:
print("WARNING: failed to restore Generic named PrepareADVSInputs in network self.macroNetwork")
print_exc()
PrepareADVSInputs_12=None
try:
## saving node autodock_kryptonite_nbcr_net ##
from NetworkEditor.items import FunctionNode
autodock_kryptonite_nbcr_net_13 = FunctionNode(functionOrString='autodock_kryptonite_nbcr_net', host="http://kryptonite.nbcr.net/opal2", namedArgs={'ga_run': '', 'lib': '', 'filter_file_url': '', 'ga_num_evals': '', 'filter_file': '', 'sched': 'SGE', 'urllib': '', 'ga_num_generations': '', 'dpf': '', 'u': '', 'utar': '', 'userlib': '', 'ga_pop_size': '', 'localRun': False, 'email': '', 'execPath': ''}, constrkw={'functionOrString': "'autodock_kryptonite_nbcr_net'", 'host': '"http://kryptonite.nbcr.net/opal2"', 'namedArgs': {'ga_run': '', 'lib': '', 'filter_file_url': '', 'ga_num_evals': '', 'filter_file': '', 'sched': 'SGE', 'urllib': '', 'ga_num_generations': '', 'dpf': '', 'u': '', 'utar': '', 'userlib': '', 'ga_pop_size': '', 'localRun': False, 'email': '', 'execPath': ''}}, name='autodock_kryptonite_nbcr_net', library=wslib)
self.macroNetwork.addNode(autodock_kryptonite_nbcr_net_13,217,132)
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_run'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['lib'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['filter_file_url'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_num_evals'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['filter_file'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['sched'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['urllib'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_num_generations'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['dpf'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['u'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['utar'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['userlib'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_pop_size'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['localRun'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['email'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['execPath'].configure(*(), **{'defaultValue': None})
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_run'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['lib'].widget.configure(*(), **{'choices': ('sample', 'NCIDS_SC', 'NCI_DS1', 'NCI_DS2', 'human_metabolome', 'chembridge_building_blocks', 'drugbank_nutraceutics', 'drugbank_smallmol', 'fda_approved')})
autodock_kryptonite_nbcr_net_13.inputPortByName['lib'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['filter_file_url'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_num_evals'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['filter_file'].rebindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['filter_file'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['filter_file'].unbindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['sched'].widget.configure(*(), **{'choices': ('SGE', 'CSF')})
autodock_kryptonite_nbcr_net_13.inputPortByName['sched'].widget.set(r"SGE", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['urllib'].rebindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['urllib'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['urllib'].unbindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_num_generations'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['dpf'].rebindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['dpf'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['dpf'].unbindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['u'].rebindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['u'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['u'].unbindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['utar'].rebindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['utar'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['utar'].unbindWidget()
autodock_kryptonite_nbcr_net_13.inputPortByName['userlib'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['ga_pop_size'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['localRun'].widget.set(0, run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['email'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.inputPortByName['execPath'].widget.set(r"", run=False)
autodock_kryptonite_nbcr_net_13.configure(*(), **{'paramPanelImmediate': 1, 'expanded': False})
except:
print("WARNING: failed to restore FunctionNode named autodock_kryptonite_nbcr_net in network self.macroNetwork")
print_exc()
autodock_kryptonite_nbcr_net_13=None
try:
## saving node GetMainURLFromList ##
from WebServices.VisionInterface.WSNodes import GetMainURLFromListNode
GetMainURLFromList_14 = GetMainURLFromListNode(constrkw={}, name='GetMainURLFromList', library=wslib)
self.macroNetwork.addNode(GetMainURLFromList_14,217,188)
GetMainURLFromList_14.inputPortByName['urls'].configure(*(), **{'defaultValue': None})
GetMainURLFromList_14.configure(*(), **{'paramPanelImmediate': 1, 'expanded': False})
except:
print("WARNING: failed to restore GetMainURLFromListNode named GetMainURLFromList in network self.macroNetwork")
print_exc()
GetMainURLFromList_14=None
#self.macroNetwork.run()
self.macroNetwork.freeze()
## saving connections for network AutodockVS ##
input_Ports_10 = self.macroNetwork.ipNode
if input_Ports_10 is not None and PrepareADVSInputs_12 is not None:
try:
self.macroNetwork.connectNodes(
input_Ports_10, PrepareADVSInputs_12, "new", "ligands", blocking=True
, splitratio=[0.60597534741634829, 0.41083180453223428])
except:
print("WARNING: failed to restore connection between input_Ports_10 and PrepareADVSInputs_12 in network self.macroNetwork")
if input_Ports_10 is not None and PrepareADVSInputs_12 is not None:
try:
self.macroNetwork.connectNodes(
input_Ports_10, PrepareADVSInputs_12, "new", "autogrid_results", blocking=True
, splitratio=[0.64561658610430228, 0.21974682015753622])
except:
print("WARNING: failed to restore connection between input_Ports_10 and PrepareADVSInputs_12 in network self.macroNetwork")
if input_Ports_10 is not None and PrepareADVSInputs_12 is not None:
try:
self.macroNetwork.connectNodes(
input_Ports_10, PrepareADVSInputs_12, "new", "dpf_template_obj", blocking=True
, splitratio=[0.52491295380143521, 0.32751034461281114])
except:
print("WARNING: failed to restore connection between input_Ports_10 and PrepareADVSInputs_12 in network self.macroNetwork")
if autodock_kryptonite_nbcr_net_13 is not None and GetMainURLFromList_14 is not None:
try:
self.macroNetwork.connectNodes(
autodock_kryptonite_nbcr_net_13, GetMainURLFromList_14, "result", "urls", blocking=True
, splitratio=[0.36974288957131424, 0.63465596053596318])
except:
print("WARNING: failed to restore connection between autodock_kryptonite_nbcr_net_13 and GetMainURLFromList_14 in network self.macroNetwork")
output_Ports_11 = self.macroNetwork.opNode
if GetMainURLFromList_14 is not None and output_Ports_11 is not None:
try:
self.macroNetwork.connectNodes(
GetMainURLFromList_14, output_Ports_11, "newurl", "new", blocking=True
, splitratio=[0.34850477186787743, 0.35637513198385085])
except:
print("WARNING: failed to restore connection between GetMainURLFromList_14 and output_Ports_11 in network self.macroNetwork")
if PrepareADVSInputs_12 is not None and autodock_kryptonite_nbcr_net_13 is not None:
try:
self.macroNetwork.connectNodes(
PrepareADVSInputs_12, autodock_kryptonite_nbcr_net_13, "filter_file", "filter_file", blocking=True
, splitratio=[0.33230642287344903, 0.65770700108889613])
except:
print("WARNING: failed to restore connection between PrepareADVSInputs_12 and autodock_kryptonite_nbcr_net_13 in network self.macroNetwork")
if PrepareADVSInputs_12 is not None and autodock_kryptonite_nbcr_net_13 is not None:
try:
self.macroNetwork.connectNodes(
PrepareADVSInputs_12, autodock_kryptonite_nbcr_net_13, "ligand_lib", "urllib", blocking=True
, splitratio=[0.50680104599665787, 0.51414170500293577])
except:
print("WARNING: failed to restore connection between PrepareADVSInputs_12 and autodock_kryptonite_nbcr_net_13 in network self.macroNetwork")
if PrepareADVSInputs_12 is not None and autodock_kryptonite_nbcr_net_13 is not None:
try:
self.macroNetwork.connectNodes(
PrepareADVSInputs_12, autodock_kryptonite_nbcr_net_13, "dpf_template_file", "dpf", blocking=True
, splitratio=[0.51615646597598808, 0.25661305528484007])
except:
print("WARNING: failed to restore connection between PrepareADVSInputs_12 and autodock_kryptonite_nbcr_net_13 in network self.macroNetwork")
if PrepareADVSInputs_12 is not None and autodock_kryptonite_nbcr_net_13 is not None:
try:
self.macroNetwork.connectNodes(
PrepareADVSInputs_12, autodock_kryptonite_nbcr_net_13, "autogrid_res_url", "u", blocking=True
, splitratio=[0.5760732944947704, 0.2032376887917188])
except:
print("WARNING: failed to restore connection between PrepareADVSInputs_12 and autodock_kryptonite_nbcr_net_13 in network self.macroNetwork")
if PrepareADVSInputs_12 is not None and autodock_kryptonite_nbcr_net_13 is not None:
try:
self.macroNetwork.connectNodes(
PrepareADVSInputs_12, autodock_kryptonite_nbcr_net_13, "autogrid_res_local", "utar", blocking=True
, splitratio=[0.52802808938949819, 0.66978534572736881])
except:
print("WARNING: failed to restore connection between PrepareADVSInputs_12 and autodock_kryptonite_nbcr_net_13 in network self.macroNetwork")
self.macroNetwork.runOnNewData.value = False
## modifying MacroInputNode dynamic ports
input_Ports_10 = self.macroNetwork.ipNode
input_Ports_10.outputPorts[1].configure(name='PrepareADVSInputs_ligands')
input_Ports_10.outputPorts[2].configure(name='PrepareADVSInputs_autogrid_results')
input_Ports_10.outputPorts[3].configure(name='PrepareADVSInputs_dpf_template_obj')
## modifying MacroOutputNode dynamic ports
output_Ports_11 = self.macroNetwork.opNode
output_Ports_11.inputPorts[1].configure(singleConnection='auto')
output_Ports_11.inputPorts[1].configure(name='GetMainURLFromList_newurl')
## configure MacroNode input ports
AutodockVS_9.inputPorts[0].configure(name='PrepareADVSInputs_ligands')
AutodockVS_9.inputPorts[0].configure(datatype='LigandDB')
AutodockVS_9.inputPorts[1].configure(name='PrepareADVSInputs_autogrid_results')
AutodockVS_9.inputPorts[1].configure(datatype='autogrid_results')
AutodockVS_9.inputPorts[2].configure(name='PrepareADVSInputs_dpf_template_obj')
AutodockVS_9.inputPorts[2].configure(datatype='dpf_template')
## configure MacroNode output ports
AutodockVS_9.outputPorts[0].configure(name='GetMainURLFromList_newurl')
AutodockVS_9.outputPorts[0].configure(datatype='string')
AutodockVS_9.shrink()
## reset modifications ##
AutodockVS_9.resetTags()
AutodockVS_9.buildOriginalList()
| 64.059748 | 852 | 0.676206 | 19,664 | 0.965294 | 0 | 0 | 0 | 0 | 0 | 0 | 7,617 | 0.373914 |
872f9d630a0a1249e1763f2b416aea3945ab0e26 | 4,821 | py | Python | taotao-cloud-python/taotao-cloud-oldboy/day70-EdmureBlog/EdmureBlog/backend/views/trouble.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | taotao-cloud-python/taotao-cloud-oldboy/day70-EdmureBlog/EdmureBlog/backend/views/trouble.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | taotao-cloud-python/taotao-cloud-oldboy/day70-EdmureBlog/EdmureBlog/backend/views/trouble.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | from django.shortcuts import render,redirect,HttpResponse
from repository import models
def trouble_list(request):
# user_info = request.session.get('user_info') # {id:'',}
current_user_id = 1
result = models.Trouble.objects.filter(user_id=current_user_id).order_by('status').\
only('title','status','ctime','processer')
return render(request,'backend_trouble_list.html',{'result': result})
from django.forms import Form
from django.forms import fields
from django.forms import widgets
class TroubleMaker(Form):
title = fields.CharField(
max_length=32,
widget=widgets.TextInput(attrs={'class': 'form-control'})
)
detail = fields.CharField(
widget=widgets.Textarea(attrs={'id':'detail','class':'kind-content'})
)
import datetime
def trouble_create(request):
if request.method == 'GET':
form = TroubleMaker()
else:
form = TroubleMaker(request.POST)
if form.is_valid():
# title,content
# form.cleaned_data
dic = {}
dic['user_id'] = 1 # sessionไธญ่ทๅ
dic['ctime'] = datetime.datetime.now()
dic['status'] = 1
dic.update(form.cleaned_data)
models.Trouble.objects.create(**dic)
return redirect('/backend/trouble-list.html')
return render(request, 'backend_trouble_create.html',{'form':form})
def trouble_edit(request,nid):
if request.method == "GET":
obj = models.Trouble.objects.filter(id=nid, status=1).only('id', 'title', 'detail').first()
if not obj:
return HttpResponse('ๅทฒๅค็ไธญ็ไฟๅ็ซ ๆ ๆณไฟฎๆน..')
# initial ไป
ๅๅงๅ
form = TroubleMaker(initial={'title': obj.title,'detail': obj.detail})
# ๆง่กerrorไผ่ฟ่ก้ช่ฏ
return render(request,'backend_trouble_edit.html',{'form':form,'nid':nid})
else:
form = TroubleMaker(data=request.POST)
if form.is_valid():
# ๅๅๅบ็่กๆฐ
v = models.Trouble.objects.filter(id=nid, status=1).update(**form.cleaned_data)
if not v:
return HttpResponse('ๅทฒ็ป่ขซๅค็')
else:
return redirect('/backend/trouble-list.html')
return render(request, 'backend_trouble_edit.html', {'form': form, 'nid': nid})
def trouble_kill_list(request):
from django.db.models import Q
current_user_id = 1
result = models.Trouble.objects.filter(Q(processer_id=current_user_id)|Q(status=1)).order_by('status')
return render(request,'backend_trouble_kill_list.html',{'result':result})
class TroubleKill(Form):
solution = fields.CharField(
widget=widgets.Textarea(attrs={'id':'solution','class':'kind-content'})
)
def trouble_kill(request,nid):
current_user_id = 1
if request.method == 'GET':
ret = models.Trouble.objects.filter(id=nid, processer=current_user_id).count()
# ไปฅๅๆชๅผบ็
if not ret:
v = models.Trouble.objects.filter(id=nid,status=1).update(processer=current_user_id,status=2)
if not v:
return HttpResponse('ๆ้ๅคชๆ
ข...')
obj = models.Trouble.objects.filter(id=nid).first()
form = TroubleKill(initial={'title': obj.title,'solution': obj.solution})
return render(request,'backend_trouble_kill.html',{'obj':obj,'form': form,'nid':nid})
else:
ret = models.Trouble.objects.filter(id=nid, processer=current_user_id,status=2).count()
if not ret:
return HttpResponse('ๅปไฝ ๅฆ็')
form = TroubleKill(request.POST)
if form.is_valid():
dic = {}
dic['status'] = 3
dic['solution'] = form.cleaned_data['solution']
dic['ptime'] = datetime.datetime.now()
models.Trouble.objects.filter(id=nid, processer=current_user_id,status=2).update(**dic)
return redirect('/backend/trouble-kill-list.html')
obj = models.Trouble.objects.filter(id=nid).first()
return render(request, 'backend_trouble_kill.html', {'obj': obj, 'form': form, 'nid': nid})
def trouble_report(request):
return render(request,'backend_trouble_report.html')
def trouble_json_report(request):
# ๆฐๆฎๅบไธญ่ทๅๆฐๆฎ
user_list = models.UserInfo.objects.filter()
response = []
for user in user_list:
from django.db import connection, connections
cursor = connection.cursor()
cursor.execute("""select strftime('%%s',strftime("%%Y-%%m-01",ctime)) * 1000,count(id) from repository_trouble where processer_id = %s group by strftime("%%Y-%%m",ctime)""", [user.nid,])
result = cursor.fetchall()
print(user.username,result)
temp = {
'name': user.username,
'data':result
}
response.append(temp)
import json
return HttpResponse(json.dumps(response))
| 34.435714 | 194 | 0.630367 | 408 | 0.082641 | 0 | 0 | 0 | 0 | 0 | 0 | 1,132 | 0.229289 |
87304ebf26d53abe5381b3ecce55748ffeed5bfe | 442 | py | Python | examples/single_camera.py | Hikki12/camio | c183234083c0382b91ecda8952cda6640e78d974 | [
"MIT"
] | null | null | null | examples/single_camera.py | Hikki12/camio | c183234083c0382b91ecda8952cda6640e78d974 | [
"MIT"
] | null | null | null | examples/single_camera.py | Hikki12/camio | c183234083c0382b91ecda8952cda6640e78d974 | [
"MIT"
] | null | null | null | import cv2
from camio import Camera
camera = Camera(
src=0,
fps=30,
size=None,
emitterIsEnabled=False,
queueModeEnabled=False,
backgroundIsEnabled=True,
)
camera.start()
while True:
image = camera.read()
if image is not None:
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.stop()
cv2.destroyAllWindows()
| 16.37037 | 41 | 0.565611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.022624 |
873062623f7ba6c782e2eb75ee0d4275cd9a9460 | 876 | py | Python | setup.py | Seojiyoon/pydata_jy | 3166b34ea8cf078bf934a8b3b12b395cf01e14fa | [
"MIT"
] | null | null | null | setup.py | Seojiyoon/pydata_jy | 3166b34ea8cf078bf934a8b3b12b395cf01e14fa | [
"MIT"
] | null | null | null | setup.py | Seojiyoon/pydata_jy | 3166b34ea8cf078bf934a8b3b12b395cf01e14fa | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pydata_jy',
version='1.0.0',
description='PyData Location List',
long_description=long_description,
url='https://github.com/takezyou/pydata',
author='jseo',
author_email='sjy950462@gmail.com',
license='MIT',
install_requires=['beautifulsoup4', 'lxml'],
keywords='pydata_jy',
packages=find_packages,
entry_points={
"console_scripts": [
"pydata_jy=pydata_jy.__init__:main",
],
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | 27.375 | 63 | 0.642694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.375571 |
87317b99687d8331d065138ebb8a07bd2daca0b3 | 4,759 | py | Python | src/modules/bot_info.py | 490720818/jx3_bot | e73822914c21d399cd5f51f103efb30fb7d3b534 | [
"MIT"
] | 22 | 2021-09-05T23:29:34.000Z | 2022-02-18T14:45:31.000Z | src/modules/bot_info.py | 490720818/jx3_bot | e73822914c21d399cd5f51f103efb30fb7d3b534 | [
"MIT"
] | 10 | 2021-09-23T07:43:47.000Z | 2021-12-20T04:26:47.000Z | src/modules/bot_info.py | 490720818/jx3_bot | e73822914c21d399cd5f51f103efb30fb7d3b534 | [
"MIT"
] | 8 | 2021-09-09T00:41:54.000Z | 2022-01-17T07:01:11.000Z | from datetime import datetime
from typing import Optional
from src.utils.config import config
from tortoise import fields
from tortoise.models import Model
defaule_nickname: str = config.get('default').get('nickname')
class BotInfo(Model):
'''QQๆบๅจไบบ่กจ'''
bot_id = fields.IntField(pk=True)
'''ๆบๅจไบบQQๅท'''
owner_id = fields.IntField(null=True)
'''็ฎก็ๅ่ดฆๅท'''
nickname = fields.CharField(max_length=255, default=defaule_nickname)
'''ๆบๅจไบบๆต็งฐ'''
last_sign = fields.DatetimeField(null=True)
'''ไธๆฌก็ปๅฝๆถ้ด'''
last_left = fields.DatetimeField(null=True)
'''ไธๆฌก็ฆป็บฟๆถ้ด'''
online = fields.BooleanField(default=True)
'''ๅฝๅๅจ็บฟๆ
ๅต'''
class Meta:
table = "bot_info"
table_description = "็ฎก็QQๆบๅจไบบ่ดฆๅทไฟกๆฏ"
@classmethod
async def bot_connect(cls, bot_id):
'''
:่ฏดๆ
ๆบๅจไบบ้พๆฅ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQๅท
'''
record, _ = await cls.get_or_create(bot_id=bot_id)
now_time = datetime.now()
record.last_sign = now_time
record.online = True
await record.save(update_fields=["last_sign", "online"])
@classmethod
async def bot_disconnect(cls, bot_id):
'''
:่ฏดๆ
ๆบๅจไบบๆญๅผ้พๆฅ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQๅท
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
now_time = datetime.now()
record.last_left = now_time
record.online = False
await record.save(update_fields=["last_left", "online"])
@classmethod
async def set_owner(cls, bot_id, owner_id) -> bool:
'''
:่ฏดๆ
่ฎพ็ฝฎๆบๅจไบบ็ฎก็ๅ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQๅท
* owner_id๏ผ็ฎก็ๅQQๅท
:่ฟๅ
* bool๏ผๆฏๅฆๆๅ
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = owner_id
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_owner(cls, bot_id) -> Optional[int]:
'''
:่ฏดๆ
่ทๅๆบๅจไบบ็ฎก็ๅ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQ
:่ฟๅ
* int๏ผ็ฎก็ๅQQ
* None
'''
record = await cls.get_or_none(bot_id=bot_id)
owner_id = None
if record is not None:
owner_id = record.owner_id
return owner_id
@classmethod
async def clean_owner(cls, bot_id) -> bool:
'''
:่ฏดๆ
ๆธ
้ค็ฎก็ๅ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQ
:่ฟๅ
* bool๏ผๆฏๅฆๆธ
้คๆๅ
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = None
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_online(cls, bot_id) -> Optional[bool]:
'''
:่ฏดๆ
่ทๅๆบๅจไบบๅจ็บฟ็ถๆ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQ
:่ฟๅ
* bool๏ผๆฏๅฆๅจ็บฟ
* None๏ผไธๅญๅจ
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.online
@classmethod
async def set_nickname(cls, bot_id: int, nickname: str) -> bool:
'''
:่ฏดๆ
่ฎพ็ฝฎๆต็งฐ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQ
* nickname๏ผๆต็งฐ
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.nickname = nickname
await record.save(update_fields=["nickname"])
return True
@classmethod
async def get_nickname(cls, bot_id: int) -> Optional[str]:
'''
:่ฏดๆ
่ทๅๆต็งฐ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQ
:่ฟๅ
* str๏ผๆต็งฐ
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.nickname
@classmethod
async def detele_bot(cls, bot_id) -> bool:
'''
:่ฏดๆ
ๅ ้คๆบๅจไบบ
:ๅๆฐ
* bot_id๏ผๆบๅจไบบQQ
:่ฟๅ
* bool๏ผๅ ้คๆฏๅฆๆๅ๏ผๅคฑ่ดฅๅๆฐๆฎไธๅญๅจ
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
await record.delete()
return True
return False
@classmethod
async def get_disconnect_bot(cls) -> list[dict]:
'''
่ทๅ็ฆป็บฟbotๅ่กจ,dict["bot_id", "last_left"]
'''
record_list = await cls.filter(online=False).values("bot_id", "last_left")
return record_list
@classmethod
async def get_all_bot(cls) -> list[dict]:
'''
่ทๅๆๆๆฐๆฎ
'''
record_list = await cls.all().values("bot_id", "owner_id", "nickname", "last_sign", "last_left", "online")
return record_list
| 23.795 | 114 | 0.536037 | 5,038 | 0.957613 | 0 | 0 | 4,361 | 0.82893 | 4,174 | 0.793385 | 1,970 | 0.374454 |
87340f549a9504189615f389775f3fa776205530 | 5,816 | py | Python | ckan/tests/legacy/functional/test_admin.py | ogdch/ckan | dd5d6a46971bcd167450d58eb2613b2fe0c40f38 | [
"Apache-2.0"
] | null | null | null | ckan/tests/legacy/functional/test_admin.py | ogdch/ckan | dd5d6a46971bcd167450d58eb2613b2fe0c40f38 | [
"Apache-2.0"
] | 1 | 2016-09-20T16:46:29.000Z | 2016-09-20T16:46:29.000Z | ckan/tests/legacy/functional/test_admin.py | ogdch/ckan | dd5d6a46971bcd167450d58eb2613b2fe0c40f38 | [
"Apache-2.0"
] | 3 | 2017-03-09T15:07:14.000Z | 2019-11-17T18:32:19.000Z | import ckan.model as model
from ckan.tests.legacy import url_for, CreateTestData, WsgiAppCase
class TestAdminController(WsgiAppCase):
@classmethod
def setup_class(cls):
# setup test data including testsysadmin user
CreateTestData.create()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
#test that only sysadmins can access the /ckan-admin page
def test_index(self):
url = url_for('ckanadmin', action='index')
# redirect as not authorized
response = self.app.get(url, status=[302])
# random username
response = self.app.get(url, status=[401],
extra_environ={'REMOTE_USER': 'my-random-user-name'})
# now test real access
username = u'testsysadmin'.encode('utf8')
response = self.app.get(url,
extra_environ={'REMOTE_USER': username})
assert 'Administration' in response, response
## This is no longer used
class _TestAdminAuthzController(WsgiAppCase):
@classmethod
def setup_class(cls):
# setup test data including testsysadmin user
CreateTestData.create()
model.Session.commit()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_role_table(self):
#logged in as testsysadmin for all actions
as_testsysadmin = {'REMOTE_USER': 'testsysadmin'}
def get_system_user_roles():
sys_query=model.Session.query(model.SystemRole)
return sorted([(x.user.name,x.role) for x in sys_query.all() if x.user])
def get_response():
response = self.app.get(
url_for('ckanadmin', action='authz'),
extra_environ=as_testsysadmin)
assert 'Administration - Authorization' in response, response
return response
def get_user_form():
response = get_response()
return response.forms['theform']
def check_and_set_checkbox(theform, user, role, should_be, set_to):
user_role_string = '%s$%s' % (user, role)
checkboxes = [x for x in theform.fields[user_role_string] \
if x.__class__.__name__ == 'Checkbox']
assert(len(checkboxes)==1), \
"there should only be one checkbox for %s/%s" % (user, role)
checkbox = checkboxes[0]
#checkbox should be unticked
assert checkbox.checked==should_be, \
"%s/%s checkbox in unexpected state" % (user, role)
#tick or untick the box and submit the form
checkbox.checked=set_to
return theform
def submit(form):
return form.submit('save', extra_environ=as_testsysadmin)
def authz_submit(form):
return form.submit('authz_save', extra_environ=as_testsysadmin)
# get and store the starting state of the system roles
original_user_roles = get_system_user_roles()
# before we start changing things, check that the roles on the system are as expected
assert original_user_roles == \
[(u'logged_in', u'editor'), (u'testsysadmin', u'admin'), (u'visitor', u'reader')] , \
"original user roles not as expected " + str(original_user_roles)
# visitor is not an admin. check that his admin box is unticked, tick it, and submit
submit(check_and_set_checkbox(get_user_form(), u'visitor', u'admin', False, True))
# try again, this time we expect the box to be ticked already
submit(check_and_set_checkbox(get_user_form(), u'visitor', u'admin', True, True))
# put it back how it was
submit(check_and_set_checkbox(get_user_form(), u'visitor', u'admin', True, False))
# should be back to our starting state
assert original_user_roles == get_system_user_roles()
# change lots of things
form = get_user_form()
check_and_set_checkbox(form, u'visitor', u'editor', False, True)
check_and_set_checkbox(form, u'visitor', u'reader', True, False)
check_and_set_checkbox(form, u'logged_in', u'editor', True, False)
check_and_set_checkbox(form, u'logged_in', u'reader', False, True)
submit(form)
roles=get_system_user_roles()
# and assert that they've actually changed
assert (u'visitor', u'editor') in roles and \
(u'logged_in', u'editor') not in roles and \
(u'logged_in', u'reader') in roles and \
(u'visitor', u'reader') not in roles, \
"visitor and logged_in roles seem not to have reversed"
def get_roles_by_name(user=None, group=None):
if user:
return [y for (x,y) in get_system_user_roles() if x==user]
else:
assert False, 'miscalled'
# now we test the box for giving roles to an arbitrary user
# check that tester doesn't have a system role
assert len(get_roles_by_name(user=u'tester'))==0, \
"tester should not have roles"
# get the put tester in the username box
form = get_response().forms['addform']
form.fields['new_user_name'][0].value='tester'
# get the admin checkbox
checkbox = [x for x in form.fields['admin'] \
if x.__class__.__name__ == 'Checkbox'][0]
# check it's currently unticked
assert checkbox.checked == False
# tick it and submit
checkbox.checked=True
response = form.submit('add', extra_environ=as_testsysadmin)
assert "User Added" in response, "don't see flash message"
assert get_roles_by_name(user=u'tester') == ['admin'], \
"tester should be an admin now"
| 38.263158 | 98 | 0.617779 | 5,689 | 0.978164 | 0 | 0 | 427 | 0.073418 | 0 | 0 | 1,847 | 0.317572 |
87375bd58b68763ac46aa57f63b861f0eab7f1f4 | 943 | py | Python | main/PublicEmotionDatasets/Adobe/process/train_glove.py | cvlab-stonybrook/Emotion-Prediction | fb45f943208467ef91d8e43874599263f669166d | [
"MIT"
] | 10 | 2019-12-19T21:17:46.000Z | 2022-02-22T15:47:29.000Z | main/PublicEmotionDatasets/Adobe/process/train_glove.py | cvlab-stonybrook/Emotion-Prediction | fb45f943208467ef91d8e43874599263f669166d | [
"MIT"
] | 2 | 2020-06-05T03:14:15.000Z | 2020-06-14T09:14:54.000Z | main/PublicEmotionDatasets/Adobe/process/train_glove.py | cvlab-stonybrook/Emotion-Prediction | fb45f943208467ef91d8e43874599263f669166d | [
"MIT"
] | 2 | 2020-01-08T14:49:46.000Z | 2021-06-06T03:36:04.000Z | """
Copyright (c) 2019 Yevheniia Soroka
Licensed under the MIT License
Author: Yevheniia Soroka
Email: ysoroka@cs.stonybrook.edu
Last modified: 18/12/2019
Usage:
Run this script to train GloVe model on Adobe tags.
"""
import os
import gluonnlp as nlp
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from glove import Corpus, Glove
import glob
import pickle
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
model_folder = "/nfs/bigfovea/add_disk0/eugenia/Emotion/wordembedding_models/"
#Creating a corpus object
corpus = Corpus()
#Training the corpus to generate the co occurence matrix which is used in GloVe
corpus.fit(lines, window=10)
# train the model
glove = Glove(no_components=5, learning_rate=0.05)
glove.fit(corpus.matrix, epochs=30, no_threads=4, verbose=True)
glove.add_dictionary(corpus.dictionary)
# save the model
glove.save(os.path.join(model_folder, 'glove_adobe.model')) | 25.486486 | 79 | 0.795334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.463415 |
87384dc72f9e3199e13ece29c53d8947b043dfd8 | 3,592 | py | Python | src/py/fi3201/vis-anim/plot-sine-wave.py | butiran/butiran.github.io | bf99f55819a140190e5bda8f9675109ef607eb9d | [
"MIT"
] | null | null | null | src/py/fi3201/vis-anim/plot-sine-wave.py | butiran/butiran.github.io | bf99f55819a140190e5bda8f9675109ef607eb9d | [
"MIT"
] | 2 | 2020-08-08T13:57:20.000Z | 2020-08-08T14:18:05.000Z | src/py/fi3201/vis-anim/plot-sine-wave.py | butiran/butiran.github.io | bf99f55819a140190e5bda8f9675109ef607eb9d | [
"MIT"
] | 1 | 2020-08-08T13:54:23.000Z | 2020-08-08T13:54:23.000Z | #
# plot-sine-wave.py
# Produce a PNG file of a sine wave plot
#
# Sparisoma Viridi | https://butiran.github.io
#
# Execute: py plot-sine-wave.py
# Output: sine-t-<time>.png
#
# 20210212
# 1901 Create this by modifying moving-sine-wave.py from [1].
# 1902 Remove FuncAnimation from matplotlib.animation.
# 1904 Can save as PNG as in [2].
# 1949 Add comments and can show figure, learn Line2D [3].
# 1955 Can set axes label [4].
# 2002 Show grid [5].
# 2011 Use arange but modify [6] from xtics to set_xtics.
# 2021 Add text box [7].
# 2027 Set figure size [8], but in inch?
# 2038 Convert time with certain precision for output [9].
# 2024 Change size for Jekyll blog, hopefully better.
# 2120 Add _varphi to the function wave.
#
# References
# 1. Parul Pandey, "Animations with Mathplotlib", Towards Data Science, 14 Apr 2019, url https://towardsdatascience.com/animation-with-matplotlib-d96375c5442c [20210212].
# 2. Yann, Bhargav Rao, "Answer to 'matplotlib savefig in jpeg format'", StackOverflow, 01 Aug 2018 at 01:48, url https://stackoverflow.com/a/8827350 [20210212].
# 3. SHUBHAMSINGH10, "Matplotlib.axes.Axes.plot() in Python", GeeksforGeeks, 12 Apr 2020, url https://www.geeksforgeeks.org/matplotlib-axes-axes-plot-in-python/ [20210212].
# 4. Luv Dhamija, "Answer to 'How to set X and Y axis Title in matplotlib.pyplot'", StackOverflow, 08 Jun 2020 at 06:29, url https://stackoverflow.com/a/62256244 [20210212].
# 5. Andrey Sobolev, Peter Mortensen, "Answer to 'How do I draw a grid onto a plot in Python?'", StackOverflow, 20 Mar 2017 at 17:42, url https://stackoverflow.com/a/8210686 [20210212].
# 6. unutbu, "Answer to 'Changing the โtick frequencyโ on x or y axis in matplotlib?'", StackOverflow, 26 Sep 20212 at 19:24, url https://stackoverflow.com/a/12608937 [20210212].
# 7. Anake, "Answer to 'automatically position text box in matplotlib'", StackOverflow, 29 Oct 2015 at 14:59, url https://stackoverflow.com/a/33417697 [20210212].
# 8. iPas, cbare, "Answer to 'How do you change the size of figures drawn with matplotlib?'", StackOverflow, 01 Feb 2015 at 06:21, url https://stackoverflow.com/a/24073700 [20210212].
# 9. HAL 9001, "Answer to 'Convert floating point number to a certain precision, and then copy to string'", StackOverflow, 06 Mar 2019 at 19:57, url https://stackoverflow.com/a/15263885 [20210212].
#
# Import necessary packages
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.offsetbox import AnchoredText
# Define a function representing a sine wave
def swave(x, t):
A = 1.5
_lambda = 1
k = 2 * np.pi / _lambda
T = 1
_omega = 2 * np.pi / T
_varphi = 0
y = A * np.sin(k * x - _omega *t + _varphi)
return y
# Use style
plt.style.use("seaborn-pastel")
# Create figure with certain size in inch
fig = plt.figure(figsize=(2.5, 2.5))
# Set x range
xmin = 0
xmax = 2
xrange = (xmin, xmax)
# Set y range
ymin = -2
ymax = 2
yrange = (ymin, ymax)
# Set x and y axes
ax = plt.axes(xlim=xrange, ylim=yrange)
# Set axes label
ax.set_xlabel("x")
ax.set_ylabel("y")
# Set xtics
dx = 0.5
xtics = np.arange(xmin, xmax + dx, dx)
ax.set_xticks(xtics)
# Set ytics
dy = 1
ytics = np.arange(ymin, ymax + dy, dy)
ax.set_yticks(ytics)
# Get Line2D object representing plotted data
line, = ax.plot([], [], lw=3)
# Show grid or with True
plt.grid()
# Create data
t = 0
x = np.linspace(0, 4, 100)
y = swave(x, t)
line.set_data(x, y)
# Add time information
ts = "{:.2f}".format(t)
atext = AnchoredText("t = " + ts, loc=1)
ax.add_artist(atext)
# Save plot as PNG image
plt.savefig("sine-t-" + ts + ".png")
# Show plot
plt.show()
| 32.954128 | 197 | 0.707684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,706 | 0.752503 |
873cd9aa4ca69fb8db27282cbf6f46eaf38835b5 | 433 | py | Python | app/grandchallenge/emails/urls.py | comic/comic-django | 4f534fae2c7d2102e94991667398aef12394e32e | [
"Apache-2.0"
] | 7 | 2016-11-05T07:16:30.000Z | 2017-11-23T03:38:03.000Z | app/grandchallenge/emails/urls.py | comic/comic-django | 4f534fae2c7d2102e94991667398aef12394e32e | [
"Apache-2.0"
] | 113 | 2015-05-26T09:27:59.000Z | 2018-03-21T10:45:56.000Z | app/grandchallenge/emails/urls.py | comic/comic-django | 4f534fae2c7d2102e94991667398aef12394e32e | [
"Apache-2.0"
] | 7 | 2015-07-16T20:11:22.000Z | 2017-06-06T02:41:24.000Z | from django.urls import path
from grandchallenge.emails.views import (
EmailCreate,
EmailDetail,
EmailList,
EmailUpdate,
)
app_name = "emails"
urlpatterns = [
path("", EmailList.as_view(), name="list"),
path("create/", EmailCreate.as_view(), name="create"),
path("<int:pk>/", EmailDetail.as_view(), name="detail"),
path("<int:pk>/update/", EmailUpdate.as_view(), name="update"),
]
| 24.055556 | 68 | 0.628176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.180139 |
873d8b0d8fb5d84af7dbc0818d054644154458aa | 2,212 | py | Python | sdk/python/pulumi_azure_native/security/v20170801preview/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/security/v20170801preview/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/security/v20170801preview/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AlertNotifications',
'AlertsToAdmins',
'DataSource',
'ExportData',
'RecommendationConfigStatus',
'RecommendationType',
'SecuritySolutionStatus',
]
class AlertNotifications(str, Enum):
"""
Whether to send security alerts notifications to the security contact
"""
ON = "On"
OFF = "Off"
class AlertsToAdmins(str, Enum):
"""
Whether to send security alerts notifications to subscription admins
"""
ON = "On"
OFF = "Off"
class DataSource(str, Enum):
TWIN_DATA = "TwinData"
class ExportData(str, Enum):
RAW_EVENTS = "RawEvents"
class RecommendationConfigStatus(str, Enum):
"""
Recommendation status. The recommendation is not generated when the status is disabled
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class RecommendationType(str, Enum):
"""
The recommendation type.
"""
IO_T_ACR_AUTHENTICATION = "IoT_ACRAuthentication"
IO_T_AGENT_SENDS_UNUTILIZED_MESSAGES = "IoT_AgentSendsUnutilizedMessages"
IO_T_BASELINE = "IoT_Baseline"
IO_T_EDGE_HUB_MEM_OPTIMIZE = "IoT_EdgeHubMemOptimize"
IO_T_EDGE_LOGGING_OPTIONS = "IoT_EdgeLoggingOptions"
IO_T_INCONSISTENT_MODULE_SETTINGS = "IoT_InconsistentModuleSettings"
IO_T_INSTALL_AGENT = "IoT_InstallAgent"
IO_T_IP_FILTER_DENY_ALL = "IoT_IPFilter_DenyAll"
IO_T_IP_FILTER_PERMISSIVE_RULE = "IoT_IPFilter_PermissiveRule"
IO_T_OPEN_PORTS = "IoT_OpenPorts"
IO_T_PERMISSIVE_FIREWALL_POLICY = "IoT_PermissiveFirewallPolicy"
IO_T_PERMISSIVE_INPUT_FIREWALL_RULES = "IoT_PermissiveInputFirewallRules"
IO_T_PERMISSIVE_OUTPUT_FIREWALL_RULES = "IoT_PermissiveOutputFirewallRules"
IO_T_PRIVILEGED_DOCKER_OPTIONS = "IoT_PrivilegedDockerOptions"
IO_T_SHARED_CREDENTIALS = "IoT_SharedCredentials"
IO_T_VULNERABLE_TLS_CIPHER_SUITE = "IoT_VulnerableTLSCipherSuite"
class SecuritySolutionStatus(str, Enum):
"""
Security solution status
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
| 28.358974 | 90 | 0.735986 | 1,811 | 0.818716 | 0 | 0 | 0 | 0 | 0 | 0 | 1,141 | 0.515823 |
873dcab1696da898509ad657f23420a697da102c | 437 | py | Python | Python-For-Everyone-Horstmann/Chapter4-Loops/P4.12.py | islayy/Books-solutions | 5fe05deb4e9f65875284d8af43bd383bf9ae145b | [
"MIT"
] | null | null | null | Python-For-Everyone-Horstmann/Chapter4-Loops/P4.12.py | islayy/Books-solutions | 5fe05deb4e9f65875284d8af43bd383bf9ae145b | [
"MIT"
] | null | null | null | Python-For-Everyone-Horstmann/Chapter4-Loops/P4.12.py | islayy/Books-solutions | 5fe05deb4e9f65875284d8af43bd383bf9ae145b | [
"MIT"
] | 1 | 2021-01-30T22:19:07.000Z | 2021-01-30T22:19:07.000Z | # Write a program that reads a word and prints all substrings, sorted by length. For
# example, if the user provides the input "rum" , the program prints
# r
# u
# m
# ru
# um
# rum
word = str(input("Enter a word: "))
wordLen = len(word)
subLen = 1
start = 0
for i in range(wordLen):
start = 0
while start + subLen <= wordLen:
print(word[start:start+subLen])
start += 1
subLen += 1 | 20.809524 | 84 | 0.597254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.491991 |
873f6c355000a9b2cfae9ab9f448789e549f97f0 | 5,462 | py | Python | openprocurement/auction/esco/utils.py | ProzorroUKR/openprocurement.auction.esco | 16a127ac7fc47cacaaf5f2eb708ea8b273e57e56 | [
"Apache-2.0"
] | null | null | null | openprocurement/auction/esco/utils.py | ProzorroUKR/openprocurement.auction.esco | 16a127ac7fc47cacaaf5f2eb708ea8b273e57e56 | [
"Apache-2.0"
] | 3 | 2017-10-26T12:42:01.000Z | 2017-11-06T10:41:49.000Z | openprocurement/auction/esco/utils.py | ProzorroUKR/openprocurement.auction.esco | 16a127ac7fc47cacaaf5f2eb708ea8b273e57e56 | [
"Apache-2.0"
] | 4 | 2017-07-10T12:03:38.000Z | 2017-09-08T10:19:46.000Z | # -*- coding: utf-8 -*-
import json
from functools import partial
from fractions import Fraction
from barbecue import chef
from decimal import Decimal
def prepare_initial_bid_stage(bidder_name="",
bidder_id="",
time="",
amount_features="",
coeficient="",
amount="",
annualCostsReduction=None,
yearlyPaymentsPercentage="",
contractDurationDays="",
contractDurationYears=""):
if annualCostsReduction is None:
annualCostsReduction = []
stage = dict(bidder_id=bidder_id, time=str(time))
stage["label"] = dict(
en="Bidder #{}".format(bidder_name),
uk="ะฃัะฐัะฝะธะบ โ{}".format(bidder_name),
ru="ะฃัะฐััะฝะธะบ โ{}".format(bidder_name)
)
stage['amount'] = Fraction(amount )if amount else Fraction('0')
stage['yearlyPaymentsPercentage'] = yearlyPaymentsPercentage if yearlyPaymentsPercentage else 0
stage['contractDurationDays'] = contractDurationDays if contractDurationDays else 0
stage['contractDurationYears'] = contractDurationYears if contractDurationYears else 0
stage['annualCostsReduction'] = annualCostsReduction
if amount_features is not None and amount_features != "":
stage['amount_features'] = str(amount_features)
if coeficient:
stage['coeficient'] = str(coeficient)
return stage
def prepare_results_stage(bidder_name="",
bidder_id="",
time="",
amount_features="",
coeficient="",
amount="",
yearlyPaymentsPercentage="",
contractDurationDays="",
contractDurationYears=""):
stage = dict(bidder_id=bidder_id, time=str(time))
stage["label"] = dict(
en="Bidder #{}".format(bidder_name),
uk="ะฃัะฐัะฝะธะบ โ{}".format(bidder_name),
ru="ะฃัะฐััะฝะธะบ โ{}".format(bidder_name)
)
stage['amount'] = amount if amount else 0
stage['yearlyPaymentsPercentage'] = yearlyPaymentsPercentage if yearlyPaymentsPercentage else 0
stage['contractDurationDays'] = contractDurationDays if contractDurationDays else 0
stage['contractDurationYears'] = contractDurationYears if contractDurationYears else 0
if amount_features is not None and amount_features != "":
stage['amount_features'] = str(amount_features)
if coeficient:
stage['coeficient'] = str(coeficient)
return stage
def prepare_bids_stage(exist_stage_params, params={}):
exist_stage_params.update(params)
stage = dict(type="bids", bidder_id=exist_stage_params['bidder_id'],
start=str(exist_stage_params['start']), time=str(exist_stage_params['time']))
stage["amount"] = exist_stage_params['amount'] if exist_stage_params['amount'] else 0
stage["yearlyPaymentsPercentage"] = exist_stage_params['yearlyPaymentsPercentage'] if exist_stage_params['yearlyPaymentsPercentage'] else 0
stage["contractDurationDays"] = exist_stage_params['contractDurationDays'] if exist_stage_params['contractDurationDays'] else 0
stage["contractDurationYears"] = exist_stage_params['contractDurationYears'] if exist_stage_params['contractDurationYears'] else 0
if 'amount_features' in exist_stage_params:
stage["amount_features"] = exist_stage_params['amount_features']
if 'coeficient' in exist_stage_params:
stage["coeficient"] = exist_stage_params['coeficient']
if exist_stage_params['bidder_name']:
stage["label"] = {
"en": "Bidder #{}".format(exist_stage_params['bidder_name']),
"ru": "ะฃัะฐััะฝะธะบ โ{}".format(exist_stage_params['bidder_name']),
"uk": "ะฃัะฐัะฝะธะบ โ{}".format(exist_stage_params['bidder_name'])
}
else:
stage["label"] = {
"en": "",
"ru": "",
"uk": ""
}
return stage
def sorting_start_bids_by_amount(bids, features=None, reverse=True):
"""
>>> from json import load
>>> import os
>>> data = load(open(os.path.join(os.path.dirname(__file__),
... 'tests/functional/data/tender_simple.json')))
>>> sorted_data = sorting_start_bids_by_amount(data['data']['bids'])
"""
def get_amount(item):
return item['value']['amountPerformance']
# return sorted(bids, key=get_amount, reverse=reverse)
return chef(bids, features=features, awarding_criteria_key="amountPerformance", reverse=reverse)
def to_decimal(fraction):
return Decimal(fraction.numerator) / Decimal(fraction.denominator)
class FractionEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Fraction):
return str(obj)
return super(FractionEncoder, self).default(obj)
class FractionDecoder(json.JSONDecoder):
def default(self, obj):
data = super(FractionDecoder, self).decode(obj)
if isinstance(data, (str, unicode)) and ('/' in data):
try:
return Fraction(data)
except ValueError:
return data
return data
dumps = partial(json.dumps, cls=FractionEncoder)
loads = partial(json.loads, cls=FractionDecoder)
| 41.694656 | 143 | 0.623398 | 512 | 0.09277 | 0 | 0 | 0 | 0 | 0 | 0 | 1,326 | 0.240261 |
87401eff4431709b68990d11ae10ed30cfb15a48 | 5,446 | py | Python | src/octopus/dispatcher/model/pool.py | smaragden/OpenRenderManagement | cf3ab356f96969d7952b60417b48e941955e435c | [
"BSD-3-Clause"
] | 35 | 2015-02-23T23:13:13.000Z | 2021-01-03T05:56:39.000Z | src/octopus/dispatcher/model/pool.py | smaragden/OpenRenderManagement | cf3ab356f96969d7952b60417b48e941955e435c | [
"BSD-3-Clause"
] | 15 | 2015-01-12T12:58:29.000Z | 2016-03-30T13:10:19.000Z | src/octopus/dispatcher/model/pool.py | mikrosimage/OpenRenderManagement | 6f9237a86cb8e4b206313f9c22424c8002fd5e4d | [
"BSD-3-Clause"
] | 20 | 2015-03-18T06:57:13.000Z | 2020-07-01T15:09:36.000Z | ####################################################################################################
# @file pool.py
# @package
# @author
# @date 2008/10/29
# @version 0.1
#
# @mainpage
#
####################################################################################################
from weakref import WeakKeyDictionary
from . import models
class PoolShareCreationException(Exception):
'''Raised on a poolshare submission error.'''
## A portion of a pool bound to a dispatchTree node
#
class PoolShare(models.Model):
pool = models.ModelField(False, 'name')
node = models.ModelField()
allocatedRN = models.IntegerField()
maxRN = models.IntegerField()
userDefinedMaxRN = models.BooleanField()
# Use PoolShare.UNBOUND as maxRN value to allow full pool usage
UNBOUND = -1
## Constructs a new pool share.
#
# @param id the pool share unique identifier. Use None for auto-allocation by the DispatchTree.
# @param pool the pool from which to draw render nodes
# @param node the node where the poolshare is affected
# @param maxRN the max number of render nodes this share is allowed to draw from the pool. Use PoolShare.UNBOUND for unlimited access to the pool.
#
def __init__(self, id, pool, node, maxRN):
self.id = int(id) if id else None
self.pool = pool
self.node = node
self.allocatedRN = 0
self.maxRN = int(maxRN)
# Keep track of previous poolshares on the node's "additionnalPoolShares"
for ps in self.node.poolShares.values():
self.node.additionnalPoolShares[ps.pool] = ps
# check if we already have a poolShare with this pool and node
if node in pool.poolShares:
# reassign to the node if it already exists
self.node.poolShares = WeakKeyDictionary()
self.node.poolShares[self.pool] = self.pool.poolShares[self.node]
# Remove existing ref of the pool assigned
del(self.node.additionnalPoolShares[self.pool])
raise PoolShareCreationException("PoolShare on node %s already exists for pool %s", node.name, pool.name)
# registration
self.pool.poolShares[self.node] = self
# remove any previous poolshare on this node
self.node.poolShares = WeakKeyDictionary()
self.node.poolShares[self.pool] = self
# the default maxRN at the creation is -1, if it is a different value, it means it's user defined
if self.maxRN != -1:
self.userDefinedMaxRN = True
else:
self.userDefinedMaxRN = False
def hasRenderNodesAvailable(self):
# If job has some render nodes authorized
# and has not already used all of them.
if self.maxRN > 0 and self.allocatedRN >= self.maxRN:
return False
# PRA: is it possible to have no render node available?
# As we have computed the authorized RN regarding the available nodes...
#
# Is there some render nodes available in the pool?
return any((rn.isAvailable() for rn in self.pool.renderNodes))
def __repr__(self):
return "PoolShare(id=%r, pool.name=%r, node=%r, maxRN=%r, allocatedRN=%r)" % (self.id, self.pool.name if self.pool else None, self.node.name, self.maxRN, self.allocatedRN)
## This class represents a Pool.
#
class Pool(models.Model):
name = models.StringField()
renderNodes = models.ModelListField(indexField='name')
poolShares = models.ModelDictField()
## Constructs a new Pool.
# @param parent the pool's parent
# @param name the pool's name
#
def __init__(self, id, name):
self.id = int(id) if id else None
self.name = name if name else ""
self.renderNodes = []
self.poolShares = WeakKeyDictionary()
def archive(self):
self.fireDestructionEvent(self)
## Adds a render node to the pool.
# @param rendernode the rendernode to add
#
def addRenderNode(self, rendernode):
if self not in rendernode.pools:
rendernode.pools.append(self)
if rendernode not in self.renderNodes:
self.renderNodes.append(rendernode)
self.fireChangeEvent(self, "renderNodes", [], self.renderNodes)
## Removes a render node from the pool.
# @param rendernode the rendernode to remove
#
def removeRenderNode(self, rendernode):
if self in rendernode.pools:
rendernode.pools.remove(self)
if rendernode in self.renderNodes:
self.renderNodes.remove(rendernode)
self.fireChangeEvent(self, "renderNodes", [], self.renderNodes)
## Sets the rendernodes associated to this pool to the given list of rendernodes
# @param renderNodes the list of rendernodes to associate to the pool
def setRenderNodes(self, renderNodes):
for rendernode in self.renderNodes[:]:
self.removeRenderNode(rendernode)
for rendernode in renderNodes:
self.addRenderNode(rendernode)
## Returns a human readable representation of the pool.
#
def __str__(self):
return u"Pool(id=%s, name=%s)" % (repr(self.id), repr(self.name))
def __repr__(self):
return u"Pool(id=%s, name=%s)" % (repr(self.id), repr(self.name))
| 37.047619 | 180 | 0.615498 | 4,971 | 0.91278 | 0 | 0 | 0 | 0 | 0 | 0 | 2,193 | 0.402681 |
87410a5465e9de55d3305d33ab36376d56b46d98 | 13,287 | py | Python | optimiser.py | FJFranklin/DomeBuilder | b286486dd4ff99eacf5754653a0e318f57884c21 | [
"MIT"
] | null | null | null | optimiser.py | FJFranklin/DomeBuilder | b286486dd4ff99eacf5754653a0e318f57884c21 | [
"MIT"
] | null | null | null | optimiser.py | FJFranklin/DomeBuilder | b286486dd4ff99eacf5754653a0e318f57884c21 | [
"MIT"
] | null | null | null | import argparse
import csv
import sys
import numpy as np
from BeesEtAl.BA_Garden import BA_Garden
from BeesEtAl.F3_Garden import F3_Garden
from BeesEtAl.Base_Coster import Base_Coster
from DomeBuilder.MaterialLib import MaterialLib
from DomeBuilder.Frame import Frame
# Defaults
bF3 = False
bMESO = True
bShowStress = False
bVerbose = False
priorities = [5,2,2,1]
prior_init = None
fixedRings = None
bSelectedOnly = False
bLoadCheats = False
bCostProduct = False
sourceFile = None
F3_flies_bees = [2,6,2] # two each of two genders * three orientations + six bees per gender = 24 evaluations per iteration
F3_bee_radius = 0.02
parser = argparse.ArgumentParser(description="DomeBuilder dome optimisation tool.")
parser.add_argument('-v', '--verbose', help='Print additional commentary.', action='store_true')
parser.add_argument('--show-stress', help='Plot each frame, showing stresses.', action='store_true')
parser.add_argument('--no-meso', help='Optimise without MESO.', action='store_true')
parser.add_argument('--f3', help='Use hybrid Firefly - Bees Algorithm.', action='store_true')
parser.add_argument('--refinement', help='Run a study with 72 elite patches, 5 bees each.', action='store_true')
parser.add_argument('--bias-hybrid', help='7-8 rings; reduced parameter range to seek hybrid designs.', action='store_true')
parser.add_argument('--cost-product', help='Make cost function the product of mass and displacement.', action='store_true')
parser.add_argument('--duration', help='Duration, i.e., how many evaluations to end at [1000].', type=int, default=1000)
parser.add_argument('--fix-rings', help='Fix the number of node-rings.', type=int, default=0, dest='rings')
parser.add_argument('--pareto-out', help='Specify output file name for Pareto-optimal set [pareto.csv].', type=str, default='pareto.csv')
parser.add_argument('--results-out', help='Specify output file name for results [results.csv].', type=str, default='results.csv')
parser.add_argument('--selected-in', help='Specify input file name: do not run the optimiser, just evaluate locations.', type=str)
parser.add_argument('--sources-in', help='Specify input file name: pre-load scout locations.', type=str)
args = parser.parse_args()
maxsol_runs = args.duration
resultsFile = args.results_out
paretoFile = args.pareto_out
if args.bias_hybrid:
Nr_min_max = ( 7, 8)
Nn_min_max = ( 5, 20)
Ns_min_max = (20, 30)
Nm_min_max = (30, 40)
else:
Nr_min_max = ( 3, 8)
Nn_min_max = ( 5, 20)
Ns_min_max = (10, 30)
Nm_min_max = (10, 40)
if args.cost_product:
bCostProduct = True
if args.verbose:
bVerbose = True
if args.show_stress:
bShowStress = True
if args.f3:
bF3 = True
if args.selected_in is not None:
bSelectedOnly = True
sourceFile = args.selected_in
print('Evaluating locations from: {f}'.format(f=sourceFile))
else:
if args.no_meso:
bMESO = False
print('MESO disabled.')
else:
print('MESO enabled.')
if args.rings > 0:
fixedRings = args.rings
if args.sources_in is not None:
bLoadCheats = True
sourceFile = args.sources_in
if args.refinement:
if args.sources_in is None:
print('A refinement study should be used with preloaded scout locations.')
sys.exit()
maxsol_runs = 3600
priorities = [5]*72 + [1]
prior_init = [1]*73
bF3 = False
print('Running a refinement study.')
class DomeCoster(Base_Coster):
@staticmethod
def create_garden(priorities, Nr_min_max, Nn_min_max, Ns_min_max, Nm_min_max):
Nr_min, Nr_max = Nr_min_max
Nn_min, Nn_max = Nn_min_max
Ns_min, Ns_max = Ns_min_max
Nm_min, Nm_max = Nm_min_max
Nsec = (Nr_max - 1) * 3 + 1
par = np.zeros((2, 4+Nsec))
par[0,0:4] = [Nr_min, Nn_min, Ns_min, Nm_min]
par[1,0:4] = [Nr_max, Nn_max, Ns_max, Nm_max]
par[1,4:(4+Nsec)] = np.ones(Nsec) * 78
if bF3:
G = F3_Garden(par[0,:], par[1,:], F3_flies_bees)
else:
G = BA_Garden(par[0,:], par[1,:], priorities)
G.costfn = DomeCoster(G, par)
return G
def __init__(self, garden, parameter_matrix):
Base_Coster.__init__(self, garden)
self.par = parameter_matrix
self.Ht = 8 # tank-wall height
self.Rt = 21 # tank-wall radius
self.Rd = 29.4 # dome radius
self.wind_file_name = 'resources/Pressures_29_4.axdt' # or None to skip
self.shell_material = MaterialLib.steel()
self.shell_thickness = 0.007
self.snow_load = 1000 # snow load [+ve] per square metre (projected)
self.dome = None
if bVerbose:
self.verbose = True # print patch info
with open(resultsFile, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
minima = self.par[0,:]
maxima = self.par[1,:]
writer.writerow(['','','','','',*minima])
writer.writerow(['','','','','',*maxima])
def map_to_solution_space(self, X):
# Let's restrict solution space to discrete values, and choose the nearest
XA = np.copy(X)
XA[0] = np.around(X[0], decimals=0)
XA[1] = np.around(X[1], decimals=0)
for s in range(4, len(X)):
XA[s] = np.around(X[s], decimals=0)
return XA
def __determine_penalty(self, value, lower_limit, upper_limit, max_penalty=1000):
penalty = 0
if value > lower_limit:
if value > upper_limit:
penalty = 1
else:
penalty = (value - lower_limit) / (upper_limit - lower_limit)
self.cost = self.cost + penalty * max_penalty
def evaluate_cost(self):
Nr = int(self.XA[0]) # no. rings
Nn = int(self.XA[1]) # no. nodes per ring
Ns = self.XA[2] # ring spacing coefficient
Nm = self.XA[3] # lattice coefficient
Nc = 0 # number of circumferential rings
Ng = 3 * (Nr - 1) + 1 # no. of distinct section-groups, so Nr=20 => 58 section-groups
pg = 10000 / Ng # adjusted maximum penalty
if self.verbose:
print('Creating dome')
self.dome = Frame((self.Rt, self.Ht), self.Rd, Nr, Nn, (Ns, Nm), self.verbose)
s = 4
group = self.dome.get_group('central')
group['sec_index'] = int(self.XA[s])
for r in range(1, Nr):
s = s + 1
group = self.dome.get_group('anticlockwise', r)
group['sec_index'] = int(self.XA[s])
s = s + 1
group = self.dome.get_group('clockwise', r)
group['sec_index'] = int(self.XA[s])
s = s + 1
group = self.dome.get_group('diagonal', r)
group['sec_index'] = int(self.XA[s])
self.dome.apply_loads(self.wind_file_name, self.shell_material, self.shell_thickness, self.snow_load)
if self.verbose:
print('Setting up frame analysis')
mass, sigma, deflection = self.dome.analyse()
if self.verbose:
print("mass = {:.2f} tonnes; max stress = {:.2f} MPa; deflection = {:.2f} mm.".format(mass/1E3, sigma/1E6, deflection*1E3))
self.cost = mass / 1E3
self.__determine_penalty(deflection, 0.0375, 0.05, pg)
group = self.dome.get_group('central')
self.__determine_penalty(group['max_stress'], 0.75, 1, pg)
self.__determine_penalty(group['buckling'], 0.75, 1, pg)
for r in range(1, Nr):
group = self.dome.get_group('anticlockwise', r)
self.__determine_penalty(group['max_stress'], 0.75, 1, pg)
self.__determine_penalty(group['buckling'], 0.75, 1, pg)
group = self.dome.get_group('clockwise', r)
self.__determine_penalty(group['max_stress'], 0.75, 1, pg)
self.__determine_penalty(group['buckling'], 0.75, 1, pg)
group = self.dome.get_group('diagonal', r)
self.__determine_penalty(group['max_stress'], 0.75, 1, pg)
self.__determine_penalty(group['buckling'], 0.75, 1, pg)
if group['orientation'] == 'circumferential':
Nc = Nc + 1
if self.verbose:
print("cost = {:.2f} <~~~~~~~~~~~~~~~~".format(self.cost))
if bShowStress:
self.dome.plot_frame(False, True, False, True)
with open(resultsFile, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([mass, sigma, deflection, self.cost, Nc, *self.XA])
if bCostProduct:
self.cost = self.cost * (deflection * 1E3)
else:
# let's make this multiobjective
self.cost = [self.cost, (deflection * 1E3)]
def __meso_check_group(self, s, group):
secindex = group['sec_index']
if secindex > 0:
if group['max_stress'] > 0.75 or group['buckling'] > 0.75:
self.XM[s] = secindex - 1 # suggest an increase in the second moment of area of the section
return
if secindex < 78:
if self.strongest_group is None:
self.strongest_group = group
self.strongest_index = s
elif group['max_stress'] < self.strongest_group['max_stress']:
self.strongest_group = group
self.strongest_index = s
def meso(self):
if bMESO:
Nr = int(self.XA[0]) # no. rings - no. of distinct section-groups = 3 * (Nr - 1) + 1, so Nr=20 => 58 section-groups
self.strongest_group = None
self.strongest_index = None
s = 4
self.__meso_check_group(s, self.dome.get_group('central'))
for r in range(1, Nr):
s = s + 1
self.__meso_check_group(s, self.dome.get_group('anticlockwise', r))
s = s + 1
self.__meso_check_group(s, self.dome.get_group('clockwise', r))
s = s + 1
self.__meso_check_group(s, self.dome.get_group('diagonal', r))
if self.strongest_group is not None:
secindex = self.strongest_group['sec_index']
self.XM[self.strongest_index] = secindex + 1 # suggest a decrease in the second moment of area of the section
def load_and_run(self, file_name):
with open(file_name, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
values = list(map(float, row))
X = np.asarray(values)
cost = self.calculate_cost(X)
G = DomeCoster.create_garden(priorities, Nr_min_max, Nn_min_max, Ns_min_max, Nm_min_max)
if bSelectedOnly:
G.costfn.load_and_run(sourceFile)
G.pareto(paretoFile)
print('==== Finished - quitting in 60 seconds ====')
G.costfn.dome.syncPlotting(60)
else:
method = 'gauss'
if bF3:
params = { 'bee-radius': F3_bee_radius, 'neighborhood': method }
else:
Nfail = 6 # i.e., stops at 6th failure
rf = 1 / 78 # distance between CHS sections in unit space
r0, sf = G.initial_radius_and_shrinking(Nfail, rf, method)
params = { 'radius': r0, 'shrink': sf, 'fail_at': Nfail, 'neighborhood': method, 'dynamic': True }
G.set_search_params(**params)
if fixedRings is not None:
# let's fix the number of rings
mask = np.ones(G.Ndim)
defs = np.zeros(G.Ndim)
mask[0] = 0
defs[0] = fixedRings
G.set_mask_and_defaults(mask, defs)
print('Fixing number of rings = {n}.'.format(n=fixedRings))
if sourceFile is not None:
Ncheats = G.preload(sourceFile)
print('Preloaded {n} solutions from {f}.'.format(n=Ncheats, f=sourceFile))
solver_runs = 0
it = 0
while solver_runs < maxsol_runs:
it = it + 1
if it == 1 and prior_init is not None:
solver_runs = G.iterate(maxsol_runs, override=prior_init)
else:
solver_runs = G.iterate(maxsol_runs)
best_cost, best_X = G.best()
print('Iteration {:4d}: {s}/{t} solver runs'.format(it, s=solver_runs, t=maxsol_runs))
print('Global best = ' + str(best_cost) + ' { ' + str(best_X) + ' }')
G.pareto(paretoFile)
| 38.181034 | 158 | 0.567246 | 7,513 | 0.56544 | 0 | 0 | 701 | 0.052758 | 0 | 0 | 2,847 | 0.21427 |
87444113cdb71868cffb7577fdd08b827329e23a | 333 | py | Python | File_Counts.py | juliaviolet/Google_IT_Python_Crash_Course | e48f37f41000bb7fa6cfca197a964b792125067f | [
"MIT"
] | null | null | null | File_Counts.py | juliaviolet/Google_IT_Python_Crash_Course | e48f37f41000bb7fa6cfca197a964b792125067f | [
"MIT"
] | null | null | null | File_Counts.py | juliaviolet/Google_IT_Python_Crash_Course | e48f37f41000bb7fa6cfca197a964b792125067f | [
"MIT"
] | null | null | null | file_counts = {"jpg":10, "txt":14, "csv":2, "py":23}
print(file_counts)
file_counts["txt"]
14
"jpg" in file_counts
True
"html" in file_counts
False
file_counts["cfg"] = 8
print file_counts
{"jpg":10, "txt":14, "csv":2, "py":23, "cfg" = 8 }
file_counts["csv"]= 17
print file_counts
{"jpg":10, "txt":14, "csv":17, "py":23, "cfg" = 8 }
| 22.2 | 52 | 0.633634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.279279 |
8744e9e584ee597102545132a7add5ffd86f0a43 | 3,632 | py | Python | monolithic/order_transaction.py | liks79/sample-saga-with-step-functions | d515317f93cdb7b294de06de8f2b571c5bf9edb9 | [
"BSD-3-Clause"
] | 3 | 2020-03-17T08:08:15.000Z | 2020-04-17T07:16:05.000Z | monolithic/order_transaction.py | liks79/sample-saga-with-step-functions | d515317f93cdb7b294de06de8f2b571c5bf9edb9 | [
"BSD-3-Clause"
] | 3 | 2020-03-20T05:51:28.000Z | 2020-03-20T15:18:30.000Z | monolithic/order_transaction.py | liks79/sample-saga-with-aws-step-functions | d515317f93cdb7b294de06de8f2b571c5bf9edb9 | [
"BSD-3-Clause"
] | null | null | null | """
Database model for Monolithic application
~~~~~~~~~~~~~~~~~~~~~~~
:created date: Thursday, March 12, 2020
:description: Monolithic application implementation of simple order transaction scenario
:copyright: ยฉ 2020 written by sungshik (liks79@gmail.com)
:license: BSD 3-Clause License, see LICENSE for more details.
"""
import config
import traceback
from model import db
from model.user import User
from model.order import Order
from model.membership import Membership
from model.payment import Payment
from model.inventory import Inventory
from faker import Faker
from pprint import pformat
from sqlalchemy_utc import utcnow
from exception import OutOfStockError, PaymentFailureError
fake = Faker(config.FAKER_LOCALE)
logger = config.logger
try:
""" Data Preparation """
# Create tables, if it is not existed.
db.Base.metadata.create_all(bind=db.engine)
# T0.TRANSACTION BEGIN
# Transaction is already begun by SQLalchemy
logger.info('#### T0.TRANSACTION BEGIN ####')
# T1.Initiate User model
logger.info('#### T1.INITIATE USER MODEL ####')
user = db.session.query(User).first()
logger.info(pformat(user.to_json()))
# T2.Initiate Inventory model
logger.info('#### T2.INITIATE INVENTORY MODEL ####')
item = db.session.query(Inventory).first()
logger.info(pformat(item.to_json()))
# T3.Initiate Payment model
logger.info('#### T3.INITIATE PAYMENT MODEL ####')
payment = Payment(pay_type='credit_card',
user_id=user.user_id,
allowed=fake.boolean(),
date=utcnow())
logger.info(pformat(payment.to_json()))
db.session.add(payment)
# db.session.commit()
# T4.Initiate Order model
logger.info('#### T4.INITIATE ORDER MODEL ####')
ordered_item_qty = fake.random_int(1, 100)
order = Order(user_id=user.user_id,
item_id=item.item_id,
item_qty=ordered_item_qty,
date=utcnow(),
deliver_phone=fake.phone_number(),
deliver_address=fake.address(),
total_price=fake.random_int(100, 1000000))
logger.info(pformat(order.to_json()))
db.session.add(order)
# db.session.commit()
# T5.Update Inventory model
logger.info('#### T5.UPDATE INVENTORY MODEL ####')
logger.info('UPDATE INVENTORY QTY: {} - {} = {}'.
format(item.qty, ordered_item_qty, item.qty - ordered_item_qty))
item.qty = item.qty - ordered_item_qty
# T6.Update Membership model
membership = Membership(user_id=user.user_id,
date=utcnow(),
mileage=fake.random_int(10, 400))
db.session.add(membership)
logger.info('#### T6.UPDATE MEMBERSHIP ####')
logger.info(pformat(membership.to_json()))
# OutOfStockError Exception handling
logger.info(pformat(item.to_json()))
if item.qty < 0:
logger.error('ORDERED ITEM IS OUT OF STOCK: %s' % item.qty)
logger.error('THIS IS AN INTENDED ERROR.')
raise OutOfStockError
# PaymentFailureError Exception handling
logger.info(pformat(payment.to_json()))
if payment.allowed is not True:
logger.error('PAYMENT TRANSACTION IS FAILED: %s' % payment.allowed)
logger.error('THIS IS AN INTENDED ERROR.')
raise PaymentFailureError
# T7.Commit
db.session.commit()
logger.info('#### T7.TRANSACTION COMPLETED! ####')
except Exception as e:
logger.error(e)
print(traceback.format_exc())
db.session.rollback()
finally:
db.session.close()
| 33.62963 | 92 | 0.647577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.336361 |
8745e6b7c33659f03c672584cd00f2afaf7bb97a | 662 | py | Python | build.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | 1 | 2021-07-12T11:20:58.000Z | 2021-07-12T11:20:58.000Z | build.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | null | null | null | build.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
import os
PROJ_PATH = os.getenv('NEU_PATH')
# Test
lib_list = [
'dev_global','libutils', 'libmysql_utils',
'libbasemodel', 'libspider', 'libnlp',
'libcontext', 'service_api', 'libstrategy', 'libtask']
# lib_list = ['libstrategy',]
for lib in lib_list:
print(f"[Building {lib}]")
# go into library directory
os.chdir(f"{PROJ_PATH}/{lib}")
# run setup script
os.system("python3 setup.py sdist")
# remove egg-info file in package
# os.system(f"rm -r {lib}.egg-info")
# cp package in lib/dist into root path
os.system(f"cp -r dist/ {PROJ_PATH}/")
# remove lib/dist
os.system("rm -r dist/")
| 28.782609 | 58 | 0.637462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.685801 |
8747368931826c6a70cd078a736cf5cc4deabb4f | 175 | py | Python | problem0116.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0116.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0116.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | ###########################
#
# #116 Red, green or blue tiles - Project Euler
# https://projecteuler.net/problem=116
#
# Code by Kevin Marciniak
#
###########################
| 19.444444 | 47 | 0.48 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.954286 |
8749210b10b6c1b7dc00fe6d7d2356679b07cd0f | 7,569 | py | Python | src_adsabs/ads_dataviz.py | mlares/iate_audit | 3ae8aff2b3de853d43619c592ac8cedf0ff35591 | [
"MIT"
] | null | null | null | src_adsabs/ads_dataviz.py | mlares/iate_audit | 3ae8aff2b3de853d43619c592ac8cedf0ff35591 | [
"MIT"
] | null | null | null | src_adsabs/ads_dataviz.py | mlares/iate_audit | 3ae8aff2b3de853d43619c592ac8cedf0ff35591 | [
"MIT"
] | null | null | null | import numpy as np
import pickle
import pandas as pd
import random
from matplotlib import pyplot as plt
from plot_styles import *
# #################################################################
# Load data
# #################################################################
df_papers_auth = pickle.load(open('../dat/df_papers_auth.pk', 'rb'))
df_papers_auth_top = pickle.load(open('../dat/df_papers_auth_top.pk', 'rb'))
df_papers_inst = pickle.load(open('../dat/df_papers_inst.pk', 'rb'))
df_papers_inst_top = pickle.load(open('../dat/df_papers_inst_top.pk', 'rb'))
# #################################################################
# Visualization of metrics
# #################################################################
# Number of papers and proceedings per year ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
y = df_papers_inst.year.values
y = [int(a) for a in y]
t = np.arange(int(min(y))-0.5, int(max(y))+0.5, 1)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
H = ax.hist(y, bins=t)
ymax = max(H[0])
ax.set_xlabel('year')
ax.set_ylabel('published works')
ax.set_title('works published by IATE')
ax.set_ylim(0, ymax)
ax.grid()
fout = ("../plt/papers_per_year.png")
fig.savefig(fout)
plt.close()
# Number of papers in top journals per year ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
y = df_papers_inst_top.year.values
y = [int(a) for a in y]
t = np.arange(int(min(y))-0.5, int(max(y))+0.5, 1)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
ax.hist(y, bins=t)
ax.set_xlabel('year')
ax.set_ylabel('published papers')
ax.set_title('Papers published by IATE in top journals')
ax.set_ylim(0, ymax)
ax.grid()
fout = ("../plt/papers_top_per_year.png")
fig.savefig(fout)
plt.close()
# cumulative number of papers per author ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
# including proceedings, normalized to the first paper
tedges = np.arange(-0.5, 20.5, 1)
tmeans = np.arange(0, 20, 1)
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot()
cycling_attrs()
y_max = 0
auth_names = list(df_papers_inst.author1.unique())
for a in auth_names:
df = df_papers_inst[df_papers_inst['author1'].isin([a])]
y = [int(i) for i in df.year.values]
if len(y)==0:
continue
y = np.array(y)
y = y - min(y)
H = np.histogram(y, bins=tedges)
ac = H[0].cumsum()
y_max = max(y_max, max(ac))
aesthetics = aes_attrs()
ax.plot(tmeans, ac, label=a, **aesthetics)
ax.set_title('Cumulative works published by IATE researchers')
ax.set_xlabel('years since first publication')
ax.set_ylabel('cumulative number of papers')
ax.set_ylim(0, y_max)
ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,
handlelength=6)
fout = ("../plt/papers_by_author_zero.png")
fig.savefig(fout)
plt.close()
# cumulative number of papers per author ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
# excluding proceedings, normalized to the first paper
tedges = np.arange(-0.5, 20.5, 1)
tmeans = np.arange(0, 20, 1)
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot()
cycling_attrs()
auth_names = list(df_papers_inst.author1.unique())
for a in auth_names:
df = df_papers_inst_top[df_papers_inst_top['author1'].isin([a])]
y = [int(i) for i in df.year.values]
if len(y)==0:
continue
y = np.array(y)
y = y - min(y)
H = np.histogram(y, bins=tedges)
ac = H[0].cumsum()
aesthetics = aes_attrs()
ax.plot(tmeans, ac, label=a, **aesthetics)
ax.set_title('Cumulative papers published by IATE researchers')
ax.set_xlabel('years since first publication')
ax.set_ylabel('cumulative number of papers in journals')
ax.set_ylim(0, y_max)
ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,
handlelength=6)
fout = ("../plt/papers_by_author_top_zero.png")
fig.savefig(fout)
plt.close()
# cumulative number of papers per author ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
# including proceedings
tedges = np.arange(1995, 2021, 1)
tmeans = np.arange(1995, 2020, 1)
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot()
cycling_attrs()
y_max = 0
auth_names = list(df_papers_inst.author1.unique())
for a in auth_names:
df = df_papers_inst[df_papers_inst['author1'].isin([a])]
y = [int(i) for i in df.year.values]
if len(y)==0:
continue
y = np.array(y)
H = np.histogram(y, bins=tedges)
ac = H[0].cumsum()
y_max = max(y_max, max(ac))
aesthetics = aes_attrs()
ax.plot(tmeans, ac, label=a, **aesthetics)
ax.set_title('Cumulative works published by IATE researchers')
ax.set_xlabel('year')
ax.set_ylabel('cumulative number of papers')
ax.set_ylim(0, y_max)
ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,
handlelength=6)
fout = ("../plt/papers_by_author.png")
fig.savefig(fout)
plt.close()
# cumulative number of papers per author ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
# excluding proceedings
tedges = np.arange(1995, 2021, 1)
tmeans = np.arange(1995, 2020, 1)
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot()
cycling_attrs()
auth_names = list(df_papers_inst.author1.unique())
for a in auth_names:
df = df_papers_inst_top[df_papers_inst_top['author1'].isin([a])]
y = [int(i) for i in df.year.values]
if len(y)==0:
continue
y = np.array(y)
H = np.histogram(y, bins=tedges)
ac = H[0].cumsum()
aesthetics = aes_attrs()
ax.plot(tmeans, ac, label=a, **aesthetics)
ax.set_title('Cumulative papers published by IATE researchers')
ax.set_xlabel('year')
ax.set_ylabel('cumulative number of papers in journals')
ax.set_ylim(0, y_max)
ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,
handlelength=6)
fout = ("../plt/papers_by_author_top.png")
fig.savefig(fout)
plt.close()
# number of authors vs citations vs years ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
npapers = df_papers_inst_top.shape[0]
na = []
nc = []
ye = []
for i in range(npapers):
df = df_papers_inst_top.iloc[i]
nauths = len(df.authors)
ncitas = df.citation_count
year = df.year
r = random.random()*0.6 - 0.3
na.append(nauths+r)
r = random.random()*0.6 - 0.3
nc.append(ncitas+1+r)
ye.append(int(year))
y = ((np.array(ye)-1980)*0.2)**2.6
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(na, nc, s=y, color=(0, 0, 1, 0.3))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Number of authors')
ax.set_ylabel('Number of citations + 1')
ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5), labelspacing=3)
fout = ("../plt/nauth_ncitas_year.png")
fig.savefig(fout)
plt.close()
# publications top and total ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
tod = []
top = []
auth_names = list(df_papers_inst.author1.unique())
for a in auth_names:
dfa = df_papers_inst[df_papers_inst['author1'].isin([a])]
dft = df_papers_inst_top[df_papers_inst_top['author1'].isin([a])]
tod.append(dfa.shape[0])
top.append(dft.shape[0])
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(tod, top)
m = max(tod)
ax.plot([0,m],[0,m])
ax.set_title('all works vs. top papers')
ax.set_xlabel('all works')
ax.set_ylabel('papers top')
fout = ("../plt/top_vs_all.png")
fig.savefig(fout)
plt.close()
# Violin plot for number of authors vs. year ยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยทยท
nauth = []
for i, p in df_papers_inst.iterrows():
nauth.append(len(p.authors))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
years = [int(y) for y in df_papers_inst.year.values]
ax.scatter(years, nauth)
ax.set_yscale('log')
ax.set_title('number of authors per year')
ax.set_xlabel('year')
ax.set_ylabel('N authors')
fout = ("../plt/year_nauth.png")
fig.savefig(fout)
plt.close()
| 24.105096 | 77 | 0.631788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,461 | 0.315432 |
8749319477407cbd8ae7a058e2b902a1251b535a | 58,443 | py | Python | p2p/__init__.py | jperezlatimes/p2p-python | 193050303c762b4e46bd1f7637795e19178fd025 | [
"MIT"
] | 2 | 2017-08-05T12:25:17.000Z | 2017-11-21T05:00:19.000Z | p2p/__init__.py | jperezlatimes/p2p-python | 193050303c762b4e46bd1f7637795e19178fd025 | [
"MIT"
] | 28 | 2015-10-16T19:09:58.000Z | 2019-02-28T21:09:54.000Z | p2p/__init__.py | jperezlatimes/p2p-python | 193050303c762b4e46bd1f7637795e19178fd025 | [
"MIT"
] | 5 | 2015-10-15T22:56:10.000Z | 2018-11-13T20:44:39.000Z | from builtins import str
from builtins import range
from builtins import object
import os
import re
import json
import math
import logging
import requests
import warnings
from time import mktime
from copy import deepcopy
from datetime import datetime
from datetime import date
from p2p import utils
from p2p.cache import NoCache
from p2p.decorators import retry
from .adapters import TribAdapter
from .filters import get_custom_param_value
from wsgiref.handlers import format_date_time
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
from .errors import (
P2PException,
P2PFileError,
P2PSlugTaken,
P2PNotFound,
P2PForbidden,
P2PSearchError,
P2PTimeoutError,
P2PRetryableError,
P2PFileURLNotFound,
P2PInvalidFileType,
P2PEncodingMismatch,
P2PUnknownAttribute,
P2PPhotoUploadError,
P2PInvalidAccessDefinition,
P2PUniqueConstraintViolated
)
log = logging.getLogger('p2p')
def get_connection():
"""
Get a connected p2p object. This function is meant to auto-discover
the settings from your shell environment or from Django.
We'll read these from your shell variables::
export P2P_API_KEY=your_p2p_api_key
export P2P_API_URL=url_of_p2p_endpoint
# Optional
export P2P_API_DEBUG=plz # display an http log
export P2P_IMAGE_SERVICES_URL=url_of_image_services_endpoint
Or those same settings from your Django settings::
P2P_API_KEY = your_p2p_api_key
P2P_API_URL = url_of_p2p_endpoint
P2P_API_DEBUG = plz # display an http log
# Optional
P2P_IMAGE_SERVICES_URL = url_of_image_services_endpoint
If you need to pass in your config, just create a new p2p object.
"""
# Try getting settings from Django
try:
from django.conf import settings
return P2P(
url=settings.P2P_API_URL,
auth_token=settings.P2P_API_KEY,
debug=settings.DEBUG,
preserve_embedded_tags=getattr(
settings,
'P2P_PRESERVE_EMBEDDED_TAGS',
True
),
image_services_url=getattr(
settings,
'P2P_IMAGE_SERVICES_URL',
None
)
)
except ImportError:
# Try getting settings from environment variables
if 'P2P_API_KEY' in os.environ:
kwargs = dict(
auth_token=os.environ['P2P_API_KEY'],
debug=os.environ.get('P2P_API_DEBUG', False),
preserve_embedded_tags=os.environ.get(
'P2P_PRESERVE_EMBEDDED_TAGS',
True
),
image_services_url=os.environ.get(
'P2P_IMAGE_SERVICES_URL',
None
)
)
if os.environ.get('P2P_API_URL', None):
kwargs['url'] = os.environ['P2P_API_URL']
return P2P(**kwargs)
raise P2PException(
"No connection settings available. Please put settings "
"in your environment variables or your Django config"
)
class P2P(object):
"""
Get a connection to the P2P Content Services API::
p2p = P2P(my_p2p_url, my_auth_token)
You can send debug messages to stderr by using the keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True)
A P2P object can cache the API calls you make. Pass a new Cache_
object with the cache keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DictionaryCache())
A DictionaryCache just caches in a python variable. If you're using
Django caching::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DjangoCache())
"""
def __init__(
self,
auth_token,
url="http://content-api.p2p.tribuneinteractive.com",
debug=False,
cache=NoCache(),
image_services_url=None,
product_affiliate_code='lanews',
source_code='latimes',
webapp_name='tRibbit',
state_filter='working,live,pending,copyready',
preserve_embedded_tags=True
):
self.config = {
'P2P_API_ROOT': url,
'P2P_API_KEY': auth_token,
'IMAGE_SERVICES_URL': image_services_url,
}
self.cache = cache
self.debug = debug
self.product_affiliate_code = product_affiliate_code
self.source_code = source_code
self.webapp_name = webapp_name
self.state_filter = state_filter
self.preserve_embedded_tags = preserve_embedded_tags
self.default_filter = {
'product_affiliate': self.product_affiliate_code,
'state': self.state_filter
}
self.default_content_item_query = {
'include': [
'web_url',
'section',
'related_items',
'content_topics',
'embedded_items'
],
'filter': self.default_filter
}
self.content_item_defaults = {
"content_item_type_code": "blurb",
"product_affiliate_code": self.product_affiliate_code,
"source_code": self.source_code,
"content_item_state_code": "live",
}
self.collection_defaults = {
"productaffiliate_code": self.product_affiliate_code,
}
self.s = requests.Session()
self.s.mount('https://', TribAdapter())
def get_content_item(self, slug, query=None, force_update=False):
"""
Get a single content item by slug.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
Use the parameter `force_update=True` to update the cache for this
item and query.
"""
if not query:
query = self.default_content_item_query
ci = self.cache.get_content_item(slug=slug, query=query)
if ci is None:
j = self.get("/content_items/%s.json" % (slug), query)
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
elif force_update:
j = self.get("/content_items/%s.json" % (slug),
query, if_modified_since=ci['last_modified_time'])
if j:
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
return ci
def get_multi_content_items(self, ids, query=None, force_update=False):
"""
Get a bunch of content items at once. We need to use the content items
ids to use this API call.
The API only allows 25 items to be requested at once, so this function
breaks the list of ids into groups of 25 and makes multiple API calls.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
"""
ret = list()
ids_query = list()
if_modified_since = format_date_time(
mktime(datetime(2000, 1, 1).utctimetuple()))
if not query:
query = self.default_content_item_query
# Pull as many items out of cache as possible
ret = [
self.cache.get_content_item(
id=i, query=query) for i in ids
]
assert len(ids) == len(ret)
# Go through what we had in cache and see if we need to
# retrieve anything
for i in range(len(ret)):
if ret[i] is None:
ids_query.append({
"id": ids[i],
"if_modified_since": if_modified_since,
})
elif force_update:
ids_query.append({
"id": ids[i],
"if_modified_since": format_date_time(
mktime(ret[i]['last_modified_time'].utctimetuple())),
})
if len(ids_query) > 0:
# We can only request 25 things at a time
# so we're gonna break up the list into batches
max_items = 25
# we have to use <gasp>MATH</gasp>
num_items = len(ids_query)
# how many batches of max_items do we have?
num_batches = int(
math.ceil(float(num_items) / float(max_items)))
# make a list of indices where we should break the item list
index_breaks = [j * max_items for j in range(num_batches)]
# break up the items into batches of 25
batches = [ids_query[i:i + max_items] for i in index_breaks]
resp = list()
for items in batches:
multi_query = query.copy()
multi_query['content_items'] = items
resp += self.post_json(
'/content_items/multi.json', multi_query)
new_items = list()
remove_ids = list()
for i in range(len(ret)):
if ret[i] is None or force_update:
new_item = resp.pop(0)
assert ids[i] == new_item['id']
if new_item['status'] == 200:
ret[i] = new_item['body']['content_item']
new_items.append(new_item['body']['content_item'])
elif new_item['status'] == 404:
ret[i] = None
remove_ids.append(ids[i])
elif new_item['status'] == 304:
continue
else:
raise P2PException(
'%(status)s fetching %(id)s' % new_item)
if len(new_items) > 0:
for i in new_items:
self.cache.save_content_item(i, query=query)
try:
if len(remove_ids) > 0:
for i in remove_ids:
self.cache.remove_content_item(id=i)
except NotImplementedError:
pass
return ret
def update_content_item(self, payload, slug=None):
"""
Update a content item.
Takes a single dictionary representing the content_item to be updated.
Refer to the P2P API docs for the content item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
content = payload.copy()
# Check if content_item is nested or if this is a flat data structure
if 'content_item' in content:
content = content['content_item'].copy()
data = payload.copy()
else:
data = {'content_item': content }
# if a slug was given, remove it from the content item
if slug is None:
slug = content.pop('slug')
try:
content.pop("web_url")
except KeyError:
pass
# Now that we've manipulated the content item, update
# the payload as well
data['content_item'] = content
url = "/content_items/%s.json"
url = url % slug
if not self.preserve_embedded_tags:
url += "?preserve_embedded_tags=false"
resp = self.put_json(url, data)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return resp
def hide_right_rail(self, slug):
"""
Hide the right rail from an HTML story. Provide the slug
of the content item you'd like to update.
"""
params = {
'custom_param_data': {'htmlstory-rhs-column-ad-enable': 'false'},
}
return self.update_content_item(params, slug=slug)
def show_right_rail(self, slug):
"""
Show the right rail on an HTML story
"""
params = {
'custom_param_data': {'htmlstory-rhs-column-ad-enable': 'true'},
}
return self.update_content_item(params, slug=slug)
def show_to_robots(self, slug):
"""
Add metadata to the item so it is seen by robots and remove any
noindex and nofollow tags.
"""
params = {
'custom_param_data': {'metadata-robots': ''},
}
return self.update_content_item(params, slug=slug)
def hide_to_robots(self, slug):
"""
Add metadata to the item so it is hidden from robots using
the noindex and nofollow tags.
"""
params = {
'custom_param_data': {'metadata-robots': 'noindex, nofollow'},
}
return self.update_content_item(params, slug=slug)
def search_topics(self, name):
"""
Searches P2P for topics starting with the given name
"""
params = {
'name': name,
'name_contains': True,
}
return self.get("/topics.json", params)
def add_topic(self, topic_id, slug=None):
"""
Update a topic_id item.
Takes a single dictionary representing the topic_id_item to be updated.
Refer to the P2P API docs for the topic_id item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
if slug is None:
slug = topic_id.pop('slug')
d = {'add_topic_ids': topic_id}
self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
def remove_topic(self, topic_id, slug=None):
"""
Update a topic_id item.
Takes a single dictionary representing the topic_id_item to be updated.
Refer to the P2P API docs for the topic_id item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
if slug is None:
slug = topic_id.pop('slug')
d = {'remove_topic_ids': topic_id}
self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
def create_content_item(self, payload):
"""
Create a new content item.
Takes a single dictionary representing the new content item.
Refer to the P2P API docs for the content item field names.
"""
defaults = self.content_item_defaults.copy()
content = payload.copy()
# Check if content_item is nested or if this is a flat data structure
if 'content_item' in content:
item = content['content_item'].copy()
defaults.update(item)
content['content_item'] = defaults
data = content
else:
content = payload.copy()
defaults.update(content)
data = {'content_item': defaults}
url = '/content_items.json'
if not self.preserve_embedded_tags:
url += "?preserve_embedded_tags=false"
resp = self.post_json(url, data)
return resp
def clone_content_item(self, slug, clone_slug, keep_embeds=False, keep_relateds=False):
"""
Clone a P2P content item into the current market
Takes a single dict representing the content item to be cloned.
Refer to the P2P API docs for the content item field name
Flags keep_embeds and keep_relateds determines whether the embedded
and/or related items will persist in the cloned object
"""
# Extra include vars
query = {
"include": [
"contributors",
"related_items",
"embedded_items",
"programmed_custom_params",
"web_url",
"geocodes"
],
}
# Get the full fancy content item
content_item = self.get_content_item(slug, query)
# Datetime string format
fmt = '%Y-%m-%d %I:%M %p %Z'
# Format display and publish time
display_time_string = ''
publish_time_string = ''
if content_item.get('display_time'):
display_time_string = content_item.get('display_time').strftime(fmt)
# Format the corrections timestamp
corrections_date = get_custom_param_value(content_item, 'corrections_date', default_value='')
if not isinstance(corrections_date, basestring):
corrections_date = corrections_date.strftime(fmt)
# The story payload
payload = {
'slug': clone_slug,
'title': content_item.get('title'),
'titleline': content_item.get('titleline'),
'kicker_id': content_item.get('kicker_id'),
'seotitle': content_item.get('seotitle'),
'byline': '',
'body': content_item.get('body'),
'dateline': content_item.get('dateline'),
'seodescription': content_item.get('seodescription'),
'seo_keyphrase': content_item.get('seo_keyphrase'),
'content_item_state_code': 'working',
'content_item_type_code': content_item.get('content_item_type_code'),
'display_time': display_time_string,
'product_affiliate_code': self.product_affiliate_code,
'source_code': content_item.get('source_code'),
'canonical_url': content_item.get("web_url"),
}
# Update the custom param data
payload['custom_param_data'] = {
'enable-content-commenting': get_custom_param_value(content_item, 'enable-content-commenting'),
'leadart-size': get_custom_param_value(content_item, 'lead_image_size'),
'story-summary': get_custom_param_value(content_item, 'seodescription', default_value=''),
'article-correction-text': get_custom_param_value(content_item, 'corrections_text', default_value=''),
'article-correction-timestamp': corrections_date,
'snap-user-ids': get_custom_param_value(content_item, 'snap_user_ids', default_value='')
}
# HTML Story specific custom params
if payload['content_item_type_code'] == 'htmlstory':
html_params = {
'htmlstory-rhs-column-ad-enable': get_custom_param_value(content_item, 'htmlstory-rhs-column-ad-enable'),
'htmlstory-headline-enable': get_custom_param_value(content_item, 'htmlstory-headline-enable'),
'htmlstory-byline-enable': get_custom_param_value(content_item, 'htmlstory-byline-enable'),
'disable-publication-date': get_custom_param_value(content_item, 'disable-publication-date')
}
payload['custom_param_data'].update(html_params)
# Get alt_thumbnail_url and old_slug for thumbnail logic below
alt_thumbnail_url = content_item.get('alt_thumbnail_url')
# Only try to update if alt_thumbnail_url is a thing
if content_item.get('alt_thumbnail_url', None):
# data must be nested in this odd photo_upload key
# if source code is available then it will be placed on the payload, else it will
# default to the current users product affiliate source code
payload['photo_upload'] = {
'alt_thumbnail': {
'url': content_item.get('alt_thumbnail_url'),
"source_code": content_item.get('alt_thumb_source_id', self.source_code)
}
}
if keep_embeds:
# Compile the embedded items
payload['embedded_items'] = []
for item in content_item.get('embedded_items'):
embed_item = {
'embeddedcontentitem_id': item['embeddedcontentitem_id'],
'headline': item['headline'],
'subheadline': item['subheadline'],
'brief': item['brief'],
}
payload['embedded_items'].append(embed_item)
if keep_relateds:
# Compile the related items
payload['related_items'] = []
for item in content_item.get('related_items'):
related_item = {
'relatedcontentitem_id': item['relatedcontentitem_id'],
'headline': item['headline'],
'subheadline': item['subheadline'],
'brief': item['brief'],
}
payload['related_items'].append(related_item)
contributors = self._get_cloned_contributors(content_item)
if contributors:
del payload['byline']
payload['contributors'] = contributors
# Clone the thing
clone = self.create_content_item(payload)
clone = clone.get('story', clone.get('html_story'))
# if we have successfully cloned the content item, continue on
if not clone.get('id'):
raise P2PNotFound
return clone['id']
def _get_cloned_contributors(self, content_item):
"""
Take a content item and remove the contributers
This function is supposed to look at the byline in a content item and
caclulate the contributers or free_form_contributers from them
"""
clone_contributors = []
# Split apart the byline string and iterate through it
if content_item.get('byline', None):
bylines = content_item.get('byline').split(',')
for byline in bylines:
# Preemptively create a freeform contributor
byline = byline.strip()
byline_item = {"free_form_name": byline}
# Search the contributors array for a matching adv byline
for contributor in content_item.get('contributors'):
# Wade through the nestedness
contributor = contributor['contributor']
if byline.lower() in contributor['title'].lower():
# If a match was found, update the entry with the staff slug
byline_item = {'slug': contributor['slug']}
# Add the final result to the clone_contributors array
clone_contributors.append(byline_item);
return clone_contributors
def delete_content_item(self, slug):
"""
Delete the content item out of p2p
"""
result = self.delete(
'/content_items/%s.json' % slug)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return True if "destroyed successfully" in result else False
def create_or_update_content_item(self, content_item):
"""
Attempts to update a content item, if it doesn't exist, attempts to
create it::
create, response = p2p.create_or_update_content_item(item_dict)
TODO: swap the tuple that is returned.
"""
create = False
try:
response = self.update_content_item(content_item)
except P2PException:
response = self.create_content_item(content_item)
create = True
return (create, response)
def junk_content_item(self, slug):
"""
Sets a content item to junk status.
"""
return self.update_content_item({
'slug': slug,
'content_item_state_code': 'junk'
})
def content_item_exists(self, slug):
"""
Checks for the existance of a slug in content services
"""
exists = True
try:
self.get("/content_items/%s/exists" % (slug))
except P2PNotFound:
exists = False
return exists
def get_kickers(self, params):
"""
Retrieves all kickers for an affiliate.
"""
return self.get("/kickers.json", params)
def search(self, params):
"""
Searches P2P content items based on whatever is in the mystery params dictionary.
"""
return self.get("/content_items/search.json", params)
def search_collections(self, search_token, limit=20, product_affiliate_code=None):
"""
Requests a list of collections from P2P based on search term and owner.
"""
# Make a copy of our collection defaults
params = deepcopy(self.collection_defaults)
# Stick this search in there
params['search_token'] = search_token
# Also add the results length cutoff
params['limit'] = limit
# And if the user has provided a product affiliate code, override that
if product_affiliate_code:
params['productaffiliate_code'] = product_affiliate_code
# Make the search and return the results
return self.get('/collections/search.json', params)['search_results']['collections']
def get_collection(self, code, query=None, force_update=False):
"""
Get the data for this collection. To get the items in a collection,
use get_collection_layout.
"""
if query is None:
query = {'filter': self.default_filter}
if force_update:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
else:
collection = self.cache.get_collection(code, query=query)
if collection is None:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
return collection
def create_collection(self, data):
"""
Create a new collection. Takes a single argument which should be a
dictionary of collection data.
Example:
p2p.create_collection({
'code': 'my_new_collection',
'name': 'My new collection',
'section_path': '/news/local',
// OPTIONAL PARAMS
'collection_type_code': 'misc', # default 'misc'
'last_modified_time': date, # defaults to now
'product_affiliate_code': 'chinews' # default to instance setting
})
"""
ret = self.post_json(
'/collections.json?id=%s' % data['code'],
{
'collection': {
'code': data['code'],
'name': data['name'],
'collectiontype_id': data.get('collection_type_id', 1),
'last_modified_time': data.get('last_modified_time',
datetime.utcnow()),
'sequence': 999
},
'product_affiliate_code': data.get(
'product_affiliate_code', self.product_affiliate_code),
'section_path': data['section_path']
})
if 'collection' in ret:
return ret['collection']
else:
raise P2PException(ret)
def delete_collection(self, code):
"""
Delete a collection
"""
ret = self.delete(
'/collections/%s.json' % code)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def override_layout(self, code, content_item_slugs):
"""
Override Collection Layout
"""
ret = self.put_json(
'/collections/override_layout.json?id=%s' % code,
{
'items': content_item_slugs,
'replace_layout': 'true'
}
)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def push_into_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
# Enforce that a list of slugs is passed in (not a string)
if not isinstance(content_item_slugs, list):
log.warning("[P2P][push_into_collection] content_item_slugs is not a list: %s" % content_item_slugs)
content_item_slugs = [content_item_slugs]
ret = self.put_json(
'/collections/prepend.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def suppress_in_collection(
self,
code,
content_item_slugs,
affiliates=[]
):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/suppress.json?id=%s' % code,
{'items': [{
'slug': slug, 'affiliates': affiliates
} for slug in content_item_slugs]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def remove_from_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
# Enforce that a list of slugs is passed in (not a string)
if not isinstance(content_item_slugs, list):
log.warning("[P2P][remove_from_collection] content_item_slugs is not a list: %s" % content_item_slugs)
content_item_slugs = [content_item_slugs]
ret = self.put_json(
'/collections/remove_items.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def insert_position_in_collection(
self,
code,
slug,
affiliates=[]
):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/insert.json?id=%s' % code,
{'items': [{
'slug': slug, 'position': 1
}]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def append_contributors_to_content_item(self, slug, contributors):
"""
Push a list of editorial staff slugs into a content item's
contributors array for the display of advanced bylines
{
"items": [
{
"slug": "contributor_to_append_1"
},
{
"slug": "contributor_to_append_2"
}
]
}
"""
warnings.warn('append_contributors_to_content_item will be removed in version 2.1', DeprecationWarning)
ret = self.put_json(
'/content_items/%s/append_contributors.json' % slug,
{'items': contributors})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_contributors_from_content_item(self, slug, contributors):
"""
Pops a list of editorial staff slugs from a content item's
contributors array
Takes an array of slugs similar to append_contributors_to_content_item()
"""
ret = self.put_json(
'/content_items/%s/remove_contributors.json' % slug,
{'items': contributors})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_content_item_revision_list(self, slug, page):
"""
Accepts a slug and returns a list of revision dictionaries
Page should be a dict with the key 'page' and the desired number
"""
ret = self.get('/content_items/%s/revisions.json?page=%d' % (slug, page))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_content_item_revision_number(self, slug, number, query=None, related_items_query=None):
"""
Accepts a slug and a revision number, returns dict with
full content item information for that revision
"""
if query is None:
query = self.default_content_item_query
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get(
'/content_items/%s/revisions/%d.json'
% (slug, number), query)
# Drop unnecessary outer layer
content_item = content_item['content_item']
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']
]
related_items = self.get_multi_content_items(
ids, related_items_query, False)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if (
item is not None and
item_stub['relatedcontentitem_id'] == item['id']
):
item_stub['content_item'] = item
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return content_item
def push_into_content_item(self, slug, content_item_slugs):
"""
Push a list of content item slugs onto the top of the related
items list for a content item
"""
ret = self.put_json(
'/content_items/prepend_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def push_embed_into_content_item(self, slug, content_item_slugs, size="S"):
"""
Push a list of content item slugs into embedded items list
Accepts a list of slugs and an optional size, which will be applied to
all embeds.
client.push_embed_into_content_item(['slug-1', 'slug-2', 'slug-3'])
client.push_embed_into_content_item(
['slug-1', 'slug-2', 'slug-3'],
size='L'
)
Also accepts a list of dictionaries that provide a slug and custom size
for each embed.
client.push_embed_into_content_item([
dict(slug='slug-1', size='S'),
dict(slug='slug-2', size='L'),
dict(slug='slug-3', size='L'),
])
"""
items = []
for i, ci in enumerate(content_item_slugs):
if isinstance(ci, str):
d = dict(slug=ci, contentitem_size=size, position=i)
items.append(d)
elif isinstance(ci, dict):
d = dict(
slug=ci['slug'],
contentitem_size=ci.get('size', size),
position=i
)
items.append(d)
else:
raise ValueError("content_item_slugs are bad data")
ret = self.put_json(
'/content_items/append_embedded_items.json?id=%s' % slug,
{'items': items}
)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_from_content_item(self, slug, content_item_slugs):
"""
Removes related items from a content item, accepts slug of content item
and list of one or more related item slugs
"""
ret = self.put_json(
'/content_items/remove_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_embed_from_content_item(self, slug, content_item_slugs):
"""
Removes embed items from a content item, accepts slug of content item
and list of one or more related item slugs
"""
ret = self.put_json(
'/content_items/remove_embedded_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def insert_into_content_item(self, slug, content_item_slugs, position=1):
"""
Insert a list of content item slugs into the related items list for
a content item, starting at the specified position
"""
ret = self.put_json(
'/content_items/insert_related_items.json?id=%s' % slug,
{'items': [{
'slug': content_item_slugs[i], 'position': position + i
} for i in range(len(content_item_slugs))]})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def append_into_content_item(self, slug, content_item_slugs):
"""
Convenience function to append a list of content item slugs to the end
of the related items list for a content item
"""
ci = self.get_content_item(slug)
ret = self.insert_into_content_item(
slug, content_item_slugs, position=(len(ci['related_items']) + 1))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_collection_layout(self, code, query=None, force_update=False):
if not query:
query = {
'include': 'items',
'filter': self.default_filter
}
if force_update:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(collection_layout, query=query)
else:
collection_layout = self.cache.get_collection_layout(
code, query=query)
if collection_layout is None:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(
collection_layout, query=query)
return collection_layout
def get_fancy_collection(
self,
code,
with_collection=False,
limit_items=25,
content_item_query=None,
collection_query=None,
include_suppressed=False,
force_update=False
):
"""
Make a few API calls to fetch all possible data for a collection
and its content items. Returns a collection layout with
extra 'collection' key on the layout, and a 'content_item' key
on each layout item.
"""
collection_layout = self.get_collection_layout(
code, query=collection_query, force_update=force_update)
if with_collection:
# Do we want more detailed data about the collection?
collection = self.get_collection(
code, query=collection_query, force_update=force_update)
collection_layout['collection'] = collection
if limit_items:
# We're only going to fetch limit_items number of things
# so cut out the extra items in the content_layout
collection_layout['items'] = \
collection_layout['items'][:limit_items]
# Process the list of collection layout items to gather ids to fetch,
# and to remove suppressed items, if necessary.
content_item_ids = list()
remove_these = list()
for ci in collection_layout['items']:
if not include_suppressed and float(ci['suppressed']) > 0:
remove_these.append(ci)
else:
content_item_ids.append(ci['contentitem_id'])
# If we're not including suppressed items, remove them from the data
if not include_suppressed:
for ci in remove_these:
collection_layout['items'].remove(ci)
# Retrieve all the content_items, 25 at a time
content_items = self.get_multi_content_items(
content_item_ids, query=content_item_query,
force_update=force_update)
# Loop through the collection items and add the corresponding content
# item data.
for ci in collection_layout['items']:
for ci2 in content_items:
if ci['contentitem_id'] == ci2['id']:
ci['content_item'] = ci2
break
return collection_layout
def get_fancy_content_item(
self,
slug,
query=None,
related_items_query=None,
force_update=False
):
if query is None:
query = deepcopy(self.default_content_item_query)
query['include'].append('related_items')
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get_content_item(
slug, query, force_update=force_update)
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']]
related_items = self.get_multi_content_items(
ids, related_items_query, force_update=force_update)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if (
item is not None and
item_stub['relatedcontentitem_id'] == item['id']
):
item_stub['content_item'] = item
return content_item
def get_section(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'include': 'default_section_path_collections'
}
if force_update:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
else:
section = self.cache.get_section(path, query)
if section is None:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
return section
def get_section_configs(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'webapp_name': self.webapp_name
}
if force_update:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
else:
section = self.cache.get_section_configs(path, query)
if section is None:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
return section
def get_fancy_section(self, path, force_update=False):
section = self.get_section(path, force_update)
config = self.get_section_configs(path, force_update)
collections = list()
for c in section['results']['default_section_path_collections']:
collections.append({
'collection_type_code': c['collection_type_code'],
'name': c['name'],
'collection': self.get_fancy_collection(c['code'])
})
fancy_section = config['results']['section_config']
fancy_section['collections'] = collections
fancy_section['path'] = path
return fancy_section
def get_nav(self, collection_code, domain=None):
"""
get a simple dictionary of text and links for a navigation collection
"""
nav = list()
domain = domain.replace(
'http://', '').replace('https://', '').replace('/', '')
top_level = self.get_collection_layout(collection_code)
for item in top_level['items']:
fancy_item = self.get_fancy_content_item(item['slug'])
if 'url' not in fancy_item:
raise
sub_nav = list()
for sub_item in fancy_item['related_items']:
if 'url' in sub_item['content_item']:
url = sub_item['content_item']['url']
elif 'web_url' in sub_item['content_item']:
url = sub_item['content_item']['web_url']
else:
raise
if not url.startswith('http'):
url = 'http://' + domain + url
sub_nav.append({
'text': sub_item['headline'] or
sub_item['content_item']['title'],
'url': url,
'slug': sub_item['slug']
})
if fancy_item['url'].startswith('http'):
url = fancy_item['url']
path = url[url.find('/') + 1:url.rfind('/')]
else:
url = 'http://' + domain + fancy_item['url']
path = url[url.find('/', 7) + 1:url.rfind('/')]
nav.append({
'text': fancy_item['title'],
'url': url,
'slug': fancy_item['slug'],
'nav': sub_nav,
'path': path
})
return nav
def get_source_product_affiliates(self, min_date='', max_date='', page=1):
"""
Retrieves one or more product affiliate sources that have
been modified within a designated date range.
Why a date range? Who knows.
Dates must be of the format: YYYY-MM-DDTHH:MM:SSZ
"""
# Default max_date to today if non given
if not max_date:
max_date = date.today().strftime("%Y-%m-%dT%I:%M:%S%Z")
# Default min_date to the beginning of the epoch (1970)
if not min_date:
epoch = datetime.utcfromtimestamp(0)
min_date = epoch.strftime("%Y-%m-%dT%I:%M:%S%Z")
params = {
'page': page,
'minimum_date': min_date,
'maximum_date': max_date
}
return self.get("/source_product_affiliates/multi.json", params)
def get_product_affiliates(self, name='', code=''):
"""
Retrieves one or more affiliate source codes.
The Content Services endpoint takes either 'code' or 'name'
as arguments but not both.
"""
if name and name != 'all':
# If a name is specified, use it
params = {
'name': str(name)
}
elif name and name == 'all':
# Special case. If name is "all" get everything
params = {
'name': ''
}
elif code:
# If there is a code specified, use it instead of name
params = {
'code': str(code)
}
elif not name and not code:
# If the args are empty, get the defualt product affiliate info
params = {
'code': self.product_affiliate_code
}
return self.get("/product_affiliates/multi.json", params)
# Utilities
def http_headers(self, content_type=None, if_modified_since=None):
h = {'Authorization': 'Bearer %(P2P_API_KEY)s' % self.config}
if content_type is not None:
h['content-type'] = content_type
if type(if_modified_since) == datetime:
h['If-Modified-Since'] = format_date_time(
mktime(if_modified_since.utctimetuple()))
elif if_modified_since is not None:
h['If-Modified-Since'] = if_modified_since
return h
def _check_for_errors(self, resp, req_url):
"""
Parses the P2P response, scanning and raising for exceptions. When an
exception is raised, its message will contain the response url, a curl
string of the request and a dictionary of response data.
"""
curl = ''
request_log = {
'REQ_URL': req_url,
'REQ_HEADERS': self.http_headers(),
'RESP_URL': resp.url,
'STATUS': resp.status_code,
'RESP_BODY': resp.content,
'RESP_HEADERS': resp.headers,
# The time taken between sending the first byte of
# the request and finishing parsing the response headers
'SECONDS_ELAPSED': resp.elapsed.total_seconds()
}
if self.debug:
curl = utils.request_to_curl(resp.request)
log.debug("[P2P][RESPONSE] %s" % request_log)
resp_content = self.convert_response_bytes_to_string(resp)
if resp.status_code >= 500:
try:
if u'ORA-00001: unique constraint' in resp_content:
raise P2PUniqueConstraintViolated(resp.url, request_log, \
curl)
elif u'incompatible encoding regexp match' in resp_content:
raise P2PEncodingMismatch(resp.url, request_log, curl)
elif u'unknown attribute' in resp_content:
raise P2PUnknownAttribute(resp.url, request_log, curl)
elif u"Invalid access definition" in resp_content:
raise P2PInvalidAccessDefinition(resp.url, request_log, \
curl)
elif u"solr.tila.trb" in resp_content:
raise P2PSearchError(resp.url, request_log, curl)
elif u"Request Timeout" in resp_content:
raise P2PTimeoutError(resp.url, request_log, curl)
elif u'Duplicate entry' in resp_content:
raise P2PUniqueConstraintViolated(resp.url, request_log, \
curl)
elif (u'Failed to upload image to the photo service'
in resp_content):
raise P2PPhotoUploadError(resp.url, request_log, curl)
elif u"This file type is not supported" in resp_content:
raise P2PInvalidFileType(resp.url, request_log, curl)
elif re.search(r"The URL (.*) does not exist", resp_content):
raise P2PFileURLNotFound(resp.url, request_log)
data = resp.json()
except (ValueError, TypeError):
pass
raise P2PException(resp.url, request_log, curl)
elif resp.status_code == 404:
raise P2PNotFound(resp.url, request_log, curl)
elif resp.status_code >= 400:
if u'{"slug":["has already been taken"]}' in resp_content:
raise P2PSlugTaken(resp.url, request_log, curl)
elif u'{"code":["has already been taken"]}' in resp_content:
raise P2PSlugTaken(resp.url, request_log, curl)
elif resp.status_code == 403:
raise P2PForbidden(resp.url, request_log, curl)
try:
resp.json()
except ValueError:
pass
raise P2PException(resp_content, request_log, curl)
return request_log
def convert_response_bytes_to_string(self, response):
vartype = str(type(response.content))
if vartype == "<class 'bytes'>":
# Convert to str
return response.content.decode("utf-8")
elif vartype == "<class 'str'>":
# It's already a str, just return it
return response.content
# It is not a string type, return empty
return ''
@retry(P2PRetryableError)
def get(self, url, query=None, if_modified_since=None):
if query is not None:
url += '?' + utils.dict_to_qs(query)
resp = self.s.get(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(if_modified_since=if_modified_since),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][GET] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][GET] %s" % url)
resp_log = self._check_for_errors(resp, url)
# The API returns "Content item exists" when the /exists endpoint is called
# causing everything to go bonkers, Why do you do this!!!
resp_content = self.convert_response_bytes_to_string(resp)
if resp_content == "Content item exists":
return resp_content
try:
ret = utils.parse_response(resp.json())
if 'ETag' in resp.headers:
ret['etag'] = resp.headers['ETag']
if 'X-Total-Hits' in resp.headers:
ret['total-hits'] = resp.headers['X-Total-Hits']
return ret
except ValueError:
log.error('[P2P][GET] JSON VALUE ERROR ON SUCCESSFUL RESPONSE %s' % resp_log)
raise
@retry(P2PRetryableError)
def delete(self, url):
resp = self.s.delete(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(),
verify=True)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][DELETE] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][DELETE] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
self._check_for_errors(resp, url)
return utils.parse_response(resp_content)
@retry(P2PRetryableError)
def post_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.post(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][POST] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][POST] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
resp_log = self._check_for_errors(resp, url)
if resp_content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('[P2P][POST] EXCEPTION IN JSON PARSE: %s' % resp_log)
raise
@retry(P2PRetryableError)
def put_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.put(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][PUT] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][PUT] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
resp_log = self._check_for_errors(resp, url)
if resp_content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('[P2P][POST] EXCEPTION IN JSON PARSE: %s' % resp_log)
raise
| 35.898649 | 121 | 0.577092 | 55,055 | 0.942029 | 0 | 0 | 3,998 | 0.068409 | 0 | 0 | 21,221 | 0.363106 |
874a9dec1f0fc82823381398bf087a0f194d84c8 | 707 | py | Python | plots/ttest.py | Denbergvanthijs/imbDRLAppendix | 2bc810366b5793abdb47fbe110caf0936c0c9f28 | [
"Apache-2.0"
] | null | null | null | plots/ttest.py | Denbergvanthijs/imbDRLAppendix | 2bc810366b5793abdb47fbe110caf0936c0c9f28 | [
"Apache-2.0"
] | null | null | null | plots/ttest.py | Denbergvanthijs/imbDRLAppendix | 2bc810366b5793abdb47fbe110caf0936c0c9f28 | [
"Apache-2.0"
] | 3 | 2021-01-22T14:53:55.000Z | 2021-10-08T16:44:11.000Z | import pandas as pd
from scipy.stats import ttest_ind
experiments = ("creditcardfraud", "histology", "aki")
usecols = ("F1", ) # "Precision", "Recall"
alpha = 0.05
for experiment in experiments:
df_nn = pd.read_csv(f"./results/{experiment}/nn.csv", usecols=usecols)
df_dqn = pd.read_csv(f"./results/{experiment}/dqn.csv", usecols=usecols)
print(experiment)
for col in usecols:
_, p = ttest_ind(df_nn[col], df_dqn[col], equal_var=False, alternative="greater")
# Welchโs t-test
if (1 - p) > alpha:
print(f"{col:>12} p: {1-p:.3f}; Accept H0; Same performance;")
else:
print(f"{col:>12} p: {1-p:.3f}; Reject H0; Better performance;")
| 33.666667 | 89 | 0.623762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.372355 |
874c9017ad17aa813ad7a4b4bd54385bf7e3cba6 | 91 | py | Python | build/lib/acousondePy/__init__.py | SvenGastauer/acousondePy | 94a99dc9de35d644a35cbfa3078110a67a35212e | [
"MIT"
] | null | null | null | build/lib/acousondePy/__init__.py | SvenGastauer/acousondePy | 94a99dc9de35d644a35cbfa3078110a67a35212e | [
"MIT"
] | null | null | null | build/lib/acousondePy/__init__.py | SvenGastauer/acousondePy | 94a99dc9de35d644a35cbfa3078110a67a35212e | [
"MIT"
] | null | null | null | from .MTRead import MTread,spec_plot,read_multiple_MT
from .main import MTreadgui,acousonde | 45.5 | 53 | 0.868132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
874d8921f97e42c910fe28fa0b4fc2ea91322a92 | 5,739 | py | Python | references/encase/code/resNet_3.py | wenh06/cinc2020 | b3757f54df86c8470e8f22f3399b4aecd64dd5d1 | [
"BSD-2-Clause"
] | 4 | 2020-10-31T07:02:37.000Z | 2021-05-24T08:11:35.000Z | references/encase/code/resNet_3.py | DeepPSP/cinc2020 | 38105ed9dac6554e2dd51b94e5553fb8ba22dbe6 | [
"BSD-2-Clause"
] | null | null | null | references/encase/code/resNet_3.py | DeepPSP/cinc2020 | 38105ed9dac6554e2dd51b94e5553fb8ba22dbe6 | [
"BSD-2-Clause"
] | 1 | 2021-05-25T14:54:31.000Z | 2021-05-25T14:54:31.000Z | # -*- coding: utf-8 -*-
'''
split long seq into small sub_seq,
feed sub_seq to lstm
'''
from __future__ import division, print_function, absolute_import
import tflearn
import tflearn.data_utils as du
import numpy as np
import ReadData
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from sklearn.model_selection import StratifiedKFold
import MyEval
import pickle
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
from tflearn.layers.core import dropout
tf.logging.set_verbosity(tf.logging.INFO)
def read_data():
with open('../../data1/expanded_three_part_window_3000_stride_500.pkl', 'rb') as fin:
train_data = pickle.load(fin)
train_label = pickle.load(fin)
val_data = pickle.load(fin)
val_label = pickle.load(fin)
test_data = pickle.load(fin)
test_label = pickle.load(fin)
return train_data, train_label, val_data, val_label, test_data, test_label
## TODO normalization
n_dim = 3000
n_split = 300
tf.reset_default_graph()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
X, Y, valX, valY, testX, testY = read_data()
X = X.reshape([-1, n_dim, 1])
testX = testX.reshape([-1, n_dim, 1])
### split
#X = X.reshape([-1, n_split, 1])
#testX = testX.reshape([-1, n_split, 1])
# Building Residual Network
net = tflearn.input_data(shape=[None, n_dim, 1])
print("input", net.get_shape())
############ reshape for sub_seq
net = tf.reshape(net, [-1, n_dim//n_split, n_split, 1])
print("reshaped input", net.get_shape())
net = tflearn.layers.conv.conv_2d_cnnlstm(net, 64, 16, 2)
#net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
print("cov1", net.get_shape())
net = tflearn.batch_normalization(net)
print("bn1", net.get_shape())
net = tflearn.activation(net, 'relu')
print("relu1", net.get_shape())
# Residual blocks
'''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
print("resn2", net.get_shape())
net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
print("resn4", net.get_shape())
net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
print("resn6", net.get_shape())
net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
print("resn8", net.get_shape())
net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn10", net.get_shape())'''
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
print("resn2", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 64, downsample_strides = 2, downsample=True)
print("resn4", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 128, downsample_strides = 2, downsample=True)
print("resn6", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 128, downsample_strides = 2, downsample=True)
print("resn8", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 256, downsample_strides = 2, downsample=True)
print("resn10", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 256, downsample_strides = 2, downsample=True)
print("resn12", net.get_shape())
'''net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 512, downsample_strides = 2, downsample=True)
print("resn14", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 512, downsample_strides = 2, downsample=True)
print("resn16", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn18", net.get_shape())
net = tflearn.layers.conv.residual_bottleneck_cnnlstm(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn20", net.get_shape())'''
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
#net = tflearn.global_avg_pool(net)
# LSTM
print("before LSTM, before reshape", net.get_shape())
############ reshape for sub_seq
net = tf.reshape(net, [-1, n_dim//n_split, 256*3])
print("before LSTM", net.get_shape())
net = bidirectional_rnn(net, BasicLSTMCell(16), BasicLSTMCell(16))
print("after LSTM", net.get_shape())
#net = dropout(net, 0.5)
# Regression
#net = tflearn.fully_connected(net, 64, activation='sigmoid')
#net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 4, activation='softmax')
print("dense", net.get_shape())
net = tflearn.regression(net, optimizer='momentum',
loss='categorical_crossentropy'
, learning_rate=0.1)
# Training
model = tflearn.DNN(net, checkpoint_path='../../models2/resnet_16_lstm',
max_checkpoints=10, clip_gradients=0., tensorboard_verbose=0)
model.fit(X, Y, n_epoch=10, validation_set=(valX, valY),
show_metric=True, batch_size=200, run_id='resnet_3', snapshot_step=100,
snapshot_epoch=False)
#Predict
cur_testX = []
y_predicted=[]
for i in range(13638):
if (i % 300 == 0 or i/300 == 45) and i != 0:
tmp_testX = np.array(cur_testX, dtype=np.float32)
tmp_testX = tmp_testX.reshape([-1, n_dim, 1])
y_predicted.extend(model.predict(tmp_testX))
cur_testX = []
cur_testX.append(testX[i])
#y_predicted=[model.predict(testX[i].reshape([-1, n_dim, 1])) for i in list(range(13638))]
#Calculate F1Score
MyEval.F1Score3_num(y_predicted, testY[:len(y_predicted)])
## save model
model.save('../model/ttt.tfl')
| 40.415493 | 133 | 0.723471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,246 | 0.391357 |
874d8fc54269b9f8feb9d8bfe93b5d4acc6447c0 | 3,277 | py | Python | experiments/data_manager.py | bekirufuk/longformer | 75a07d57a46b6db298abf316e21b0d9f8763e26e | [
"Apache-2.0"
] | null | null | null | experiments/data_manager.py | bekirufuk/longformer | 75a07d57a46b6db298abf316e21b0d9f8763e26e | [
"Apache-2.0"
] | null | null | null | experiments/data_manager.py | bekirufuk/longformer | 75a07d57a46b6db298abf316e21b0d9f8763e26e | [
"Apache-2.0"
] | null | null | null | import os
import json
import config
import pandas as pd
def create_data_files(patents, ipcr):
chunk_count = 0
patent_count = 0
for chunk in patents:
# Combine patent with respective section info.
data = chunk.merge(ipcr, how='left', on='patent_id')
# Replace the letters with integers to create a suitable training input.
data.replace({'section':config.label2id}, inplace=True)
data['section'].astype(int)
# data.rename(columns = {'section':'label'}, inplace = True)
# Append the batch to the main data file.
print(data.info())
print(data.describe())
data.to_csv(os.path.join(config.data_dir, 'patents_'+config.patents_year+'.csv'),
sep=',',
mode='a',
index=False,
columns=['text', 'section'],
header = None
)
# Seperately write the batches as individual files. (optional)
data.to_csv(os.path.join(config.data_dir, 'chunks/patents_'+config.patents_year+'_chunk_'+str(chunk_count).zfill(6)+'.csv'),
sep=',',
mode='w',
index=False,
columns=['text', 'section'],
header = ['text','label']
)
patent_count += data.shape[0]
chunk_count += 1
print("Chunk {0} -> Total processed patent count: {1}".format(chunk_count, patent_count))
if config.single_chunk:
break
# Write the basic info about process data for ease of use.
with open(os.path.join(config.data_dir, "meta/patents_"+config.patents_year+"_meta.json"), "a") as f:
f.write(json.dumps({"num_chunks":chunk_count,
"chunk_size":config.chunk_size,
"num_patents":patent_count
}))
if __name__ == '__main__':
# Icpr file holds detailed class information about the patents.
# We will only investigate section column which consist of 8 distinct classes.
ipcr = pd.read_csv(os.path.join(config.data_dir, 'ipcr.tsv'),
sep="\t",
usecols=['patent_id','section'],
dtype={'patent_id':object, 'section':object},
engine='c',
)
print("Ipcr data loaded.")
# All patents from asinge year chunked. Multiple year processing will be implemented in future.
patents = pd.read_csv(os.path.join(config.data_dir, 'detail_desc_text_'+config.patents_year+'.tsv'),
sep="\t",
usecols=['patent_id', 'text'],
dtype={'patent_id':object, 'text':object},
engine='c',
chunksize=config.chunk_size,
encoding='utf8',
)
print("Patents data chunked with chunk_size={}.".format(config.chunk_size))
# Drop duplicates because this table might have duplicating patent_id sharing the same section with different subclasses.
ipcr = ipcr.drop_duplicates(subset=['patent_id'])
print("Ipcr data de-duplicated.")
print("\n----------\n DATA PROCESSING STARTED \n----------\n")
pd.DataFrame({}, columns=['text', 'label']).to_csv(os.path.join(config.data_dir, 'patents_'+config.patents_year+'.csv'),
index=False
)
create_data_files(patents, ipcr)
print("\n----------\n DATA PROCESSING FINISHED \n----------\n") | 36.411111 | 132 | 0.604821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,326 | 0.404638 |
874e44d7d7e00de1617e441b5379a89885ecea65 | 3,291 | py | Python | batch.py | jazevedo620/RepoMigrator | f358c91447e7fbb37b13c829aa16ce4ca36b213a | [
"MIT"
] | null | null | null | batch.py | jazevedo620/RepoMigrator | f358c91447e7fbb37b13c829aa16ce4ca36b213a | [
"MIT"
] | null | null | null | batch.py | jazevedo620/RepoMigrator | f358c91447e7fbb37b13c829aa16ce4ca36b213a | [
"MIT"
] | null | null | null | import migrate
import argparse
import shutil
__author__ = "Joseph Azevedo"
__version__ = "1.0"
DESCRIPTION = "Migrates multiple repositories at once"
HTTPS_START = "https://"
REPO_FORMAT = "{}/{}/{}.git"
def main(source_site, dest_site, repos, dest_user=None, dest_token=None, source_user=None, source_token=None,
temp_dir=None, remote=None, timeout=None):
if not source_site.startswith(HTTPS_START):
source_site = HTTPS_START + source_site
if not dest_site.startswith(HTTPS_START):
dest_site = HTTPS_START + dest_site
source_auth = migrate.construct_non_none_tuple(source_user, source_token)
dest_auth = migrate.construct_non_none_tuple(dest_user, dest_token)
temp_path, temp_existed_before = migrate.try_create_temp_dir(temp_dir)
for repo in repos:
user = source_user
dest_user = dest_user
repo_name = repo
if "/" in repo:
split_str = repo.split("/")
user = split_str[0]
repo_name = split_str[1]
if user is None:
print("A user on the source site must be specified")
exit()
if dest_user is None:
print("A user on the destination site must be specified")
exit()
source_repo = REPO_FORMAT.format(source_site, user, repo_name)
dest_repo = REPO_FORMAT.format(dest_site, dest_user, repo_name)
migrate.migrate(source_repo, dest_repo, source_auth=source_auth, dest_auth=dest_auth,
temp_dir=temp_dir, remote=remote, timeout=timeout)
try:
if not temp_existed_before:
shutil.rmtree(temp_path)
except OSError as e:
print ("An error occurred in cleanup. Exiting")
quit()
# Run script
if __name__ == "__main__":
# Argument definitions
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('source', metavar='src_site', help='the source github site')
parser.add_argument('dest', metavar='dest_site', help='the destination github site')
parser.add_argument('repos', nargs='+', metavar='repo', help='each source repo to use; either user/repo or repo')
parser.add_argument('--sourceUser', metavar='user', help='authentication user for the source repo(s)')
parser.add_argument('--sourceToken', metavar='token', help='authentication token/password for the source repo(s)')
parser.add_argument('--destUser', metavar='user', help='authentication user for the dest repo(s)',
required=True)
parser.add_argument('--destToken', metavar='token', help='authentication token/password for the dest repo(s)')
parser.add_argument('--temp', metavar='path', help='temp directory for cloning the source repo(s)')
parser.add_argument('--remote', metavar='name', help='name of the destination remote to use')
parser.add_argument('--timeout', metavar='ms', help='max amount of time to wait between command parses')
# Parse arguments
args = parser.parse_args()
main(args.source, args.dest, args.repos, args.destUser, args.destToken, args.sourceUser, args.sourceToken,
temp_dir=args.temp, remote=args.remote, timeout=args.timeout)
| 44.472973 | 120 | 0.666971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 895 | 0.271954 |
874e9462e77f3d3d5e37bd23d18ef1dc06f8cc9d | 2,704 | py | Python | libs/logo.py | Ethical-Hacking-Tools/InstaReport | 051bde7b4b24283096c3a050ec7f70019c36994e | [
"MIT"
] | null | null | null | libs/logo.py | Ethical-Hacking-Tools/InstaReport | 051bde7b4b24283096c3a050ec7f70019c36994e | [
"MIT"
] | null | null | null | libs/logo.py | Ethical-Hacking-Tools/InstaReport | 051bde7b4b24283096c3a050ec7f70019c36994e | [
"MIT"
] | null | null | null | # coding=utf-8
#!/usr/bin/env python3
from libs.animation import colorText
logo = '''
[[black-bright-background]][[red]] โโโ [[green]]โโโโ โ [[blue]] โโโโโโ [[magenta]]โโโโโโโโโ [[cyan]]โโโ [[red]] โโโโโโ [[green]]โโโโโโ [[blue]] โโโโโโ [[magenta]]โโโโโโ [[cyan]]โโโโโโ [[yellow]]โโโโโโโโโ[[reset]]
[[black-bright-background]][[red]]โโโโ [[green]]โโ โโ โ โ[[blue]]โโ โ [[magenta]]โ โโโ โโโ[[cyan]]โโโโโ [[red]]โโโ โ โโโ[[green]]โโ โ [[blue]]โโโโ โโโโ[[magenta]]โโโ โโโโ[[cyan]]โโ โ โโโ[[yellow]]โ โโโ โโ[[reset]]
[[black-bright-background]][[red]]โโโโโ[[green]]โโ โโ โโโโ[[blue]] โโโโ [[magenta]]โ โโโโ โโโ[[cyan]]โโ โโโ [[red]]โโโ โโโ โ[[green]]โโโโ [[blue]]โโโโ โโโโโ[[magenta]]โโโ โโโโ[[cyan]]โโ โโโ โ[[yellow]]โ โโโโ โโ[[reset]]
[[black-bright-background]][[red]]โโโโโ[[green]]โโโ โโโโโ [[blue]] โ โโโ[[magenta]]โ โโโโ โ โ[[cyan]]โโโโโโโโ [[red]]โโโโโโโ [[green]]โโโ โ [[blue]]โโโโโโโ โโ[[magenta]]โโ โโโโ[[cyan]]โโโโโโ [[yellow]]โ โโโโ โ [[reset]]
[[black-bright-background]][[red]]โโโโโ[[green]]โโโ โโโโโ[[blue]]โโโโโโโโ[[magenta]] โโโโ โ [[cyan]]โโ โโโโ [[red]]โโโโ โโโโ[[green]]โโโโโโโ[[blue]]โโโโ โ โโ[[magenta]] โโโโโโโโ[[cyan]]โโโ โโโโ[[yellow]] โโโโ โ [[reset]]
[[black-bright-background]][[red]]โโ โ[[green]] โโ โ โ โ[[blue]] โโโ โ โ[[magenta]] โ โโ [[cyan]]โโ โโโโ [[red]]โ โโ โโโโ[[green]]โโ โโ โ[[blue]]โโโโ โ โโ[[magenta]] โโโโโโ โ[[cyan]] โโ โโโโ[[yellow]] โ โโ [[reset]]
[[black-bright-background]][[red]] โ โโ[[green]] โโ โ โโโ[[blue]] โโ โ โ[[magenta]] โ [[cyan]] โ โโ โ [[red]] โโ โ โโ[[green]] โ โ โ[[blue]]โโ โ [[magenta]] โ โ โโ [[cyan]] โโ โ โโ[[yellow]] โ [[reset]]
[[black-bright-background]][[red]] โ โ [[green]] โ โ โ โ[[blue]] โ โ [[magenta]] โ [[cyan]] โ โ [[red]] โโ โ [[green]] โ [[blue]]โโ โ[[magenta]] โ โ โ [[cyan]] โโ โ [[yellow]] โ [[reset]]
[[black-bright-background]][[red]] โ [[green]] โ [[blue]] โ [[magenta]] [[cyan]] โ โ [[red]] โ [[green]] โ โ[[blue]] [[magenta]] โ โ [[cyan]] โ [[yellow]] [[reset]]
[[black-bright-background]][[white]]Codded By Crevil[[reset]]
[[black]]Version :- 2.01[[reset]]
[[red]]Youtube :- [[blue]]@Crevil[[reset]]
[[red]]Telegram :- [[blue]]@HackerExploits[[reset]]
'''
def print_logo():
print(colorText(logo)) | 100.148148 | 231 | 0.339867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,550 | 0.972603 |
874e95d0535471b1e17d17092df7f1e9c60c6cb5 | 1,228 | py | Python | coherence/upnp/services/servers/switch_power_server.py | palfrey/Cohen3 | d5779b4cbcf736e12d0ccfd162238ac5c376bb0b | [
"MIT"
] | 60 | 2018-09-14T18:57:38.000Z | 2022-02-19T18:16:24.000Z | coherence/upnp/services/servers/switch_power_server.py | palfrey/Cohen3 | d5779b4cbcf736e12d0ccfd162238ac5c376bb0b | [
"MIT"
] | 37 | 2018-09-04T08:51:11.000Z | 2022-02-21T01:36:21.000Z | coherence/upnp/services/servers/switch_power_server.py | palfrey/Cohen3 | d5779b4cbcf736e12d0ccfd162238ac5c376bb0b | [
"MIT"
] | 16 | 2019-02-19T18:34:58.000Z | 2022-02-05T15:36:33.000Z | # -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
'''
Switch Power service
====================
'''
from twisted.web import resource
from coherence.upnp.core import service
from coherence.upnp.core.soap_service import UPnPPublisher
class SwitchPowerControl(service.ServiceControl, UPnPPublisher):
def __init__(self, server):
service.ServiceControl.__init__(self)
UPnPPublisher.__init__(self)
self.service = server
self.variables = server.get_variables()
self.actions = server.get_actions()
class SwitchPowerServer(service.ServiceServer, resource.Resource):
logCategory = 'switch_power_server'
def __init__(self, device, backend=None):
self.device = device
if backend is None:
backend = self.device.backend
resource.Resource.__init__(self)
service.ServiceServer.__init__(
self, 'SwitchPower', self.device.version, backend)
self.control = SwitchPowerControl(self)
self.putChild(self.scpd_url, service.scpdXML(self, self.control))
self.putChild(self.control_url, self.control)
| 29.238095 | 73 | 0.697068 | 874 | 0.711726 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.19544 |
874f63a84b2700710fd965dced8d15b1927c1840 | 503 | py | Python | enumerate_and_map_and_reduce.py | aslange/Python_Basics | 53856d53b970026da7aa26b8bc468c03352d97b7 | [
"Apache-2.0"
] | null | null | null | enumerate_and_map_and_reduce.py | aslange/Python_Basics | 53856d53b970026da7aa26b8bc468c03352d97b7 | [
"Apache-2.0"
] | null | null | null | enumerate_and_map_and_reduce.py | aslange/Python_Basics | 53856d53b970026da7aa26b8bc468c03352d97b7 | [
"Apache-2.0"
] | null | null | null | # funรงรฃo enumerate
lista = ['abacate', 'bola', 'cachorro'] # lista
for i in range(len(lista)):
print(i, lista[i])
for i, nome in enumerate(lista):
print(i, nome)
# funรงรฃo map
def dobro(x):
return x * 2
valor = [1, 2, 3, 4, 5]
print(dobro(valor))
valor_dobrado = map(dobro, valor)
valor_dobrado = list(valor_dobrado)
print(valor_dobrado)
# funรงรฃo reduce
from functools import reduce
def soma(x, y):
return x + y
lista = [1, 2, 3, 4, 5]
soma = reduce(soma, lista)
print(soma) | 14.371429 | 47 | 0.648111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.163065 |
87514738fd99c215e3ff8b572b9ce3a7d7ef4ffb | 30,068 | py | Python | VKActivityAnalisys/ActivityAnalysis.py | Rig0ur/VKAnalysis | 23bfedc490c99d488078039b9aab1c7cd3defce9 | [
"Apache-2.0"
] | 54 | 2018-03-04T10:18:59.000Z | 2022-03-24T20:47:13.000Z | VKActivityAnalisys/ActivityAnalysis.py | Lukmora/VKAnalysis | 23bfedc490c99d488078039b9aab1c7cd3defce9 | [
"Apache-2.0"
] | 7 | 2020-09-30T10:17:20.000Z | 2021-12-27T01:53:52.000Z | VKActivityAnalisys/ActivityAnalysis.py | Lukmora/VKAnalysis | 23bfedc490c99d488078039b9aab1c7cd3defce9 | [
"Apache-2.0"
] | 12 | 2019-11-29T15:54:39.000Z | 2021-12-13T22:33:20.000Z | import datetime
import json
import logging
import operator
import os
from collections import defaultdict
from datetime import date
import vk_api
import vk_api.exceptions
from vk_api import execute
#from .TimeActivityAnalysis import VKOnlineGraph
from .VKFilesUtils import check_and_create_path, DIR_PREFIX
class VKActivityAnalysis:
"""
ะะพะดัะปั, ัะฒัะทะฐะฝะฝัะน ั ะธััะปะตะดะพะฒะฐะฝะธะตะผ ะฐะบัะธะฒะฝะพััะธ ะฟะพะปัะทะพะฒะฐัะตะปะตะน
"""
def __init__(self, vk_session):
"""
ะะพะฝััััะบัะพั
:param vk_session: ะพะฑัะตะบั ัะตััะธะธ ะบะปะฐััะฐ VK
"""
self.api = vk_session.get_api()
self.tools = vk_api.VkTools(vk_session)
self.logger = logging.getLogger("ActivityAnalysis")
# ััะฝะบัะธั ะฟะพะปััะตะฝะธั ะปะฐะนะบะพะฒ ะฟะพ 25 ัััะบ
vk_get_all_likes_info = vk_api.execute.VkFunction(
args=('user_id', 'owner_id', 'item_ids', 'type'),
code='''
var item_ids = %(item_ids)s;
var result = [];
var i = 0;
while(i <= 25 && item_ids.length > i){
var params = {"user_id":%(user_id)s,
"owner_id": %(owner_id)s,
"item_id": item_ids[i],
"type": %(type)s
};
result = result + [API.likes.isLiked(params) + {"owner_id": params["owner_id"],
"user_id": params["user_id"],
"type": params["type"],
"item_id": params["item_id"]} ];
i = i+1;
}
return {result: result, count: item_ids.length};
''')
# ััะฝะบัะธั ะฟะพะปััะตะฝะธั ะพะฑัะธั
ะดััะทะตะน ะฟะพ 25 ะดััะทะตะน ะฟัะพะฒะตััะตั
vk_get_all_common_friends = vk_api.execute.VkFunction(
args=('source_uid', 'target_uids'),
code='''
var source_uid = %(source_uid)s;
var target_uids = %(target_uids)s;
var result = [];
var i = 0;
while(i <= 25 && target_uids.length > i*100){
var sliced = 0;
if ( (i+1)*100 > target_uids.length) {
sliced = target_uids.slice(i*100,target_uids.length);
} else {
sliced = target_uids.slice(i*100,(i+1)*100);
}
var params = {"source_uid":%(source_uid)s,
"target_uids": sliced,
};
result = result + API.friends.getMutual(params);
i = i+1;
}
return {result:result};
''')
def is_online(self, uid):
"""
ะัะพะฒะตััะตั ะพะฝะปะฐะนะฝ ะฟะพะปัะทะพะฒะฐัะตะปั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั
"""
resp = self.api.users.get(user_id=uid, fields='online')
self.logger.debug("is_online: " + str(uid) + '; ' + str(resp))
if len(resp) > 0 and 'online' in resp[0]:
return resp[0]['online']
else:
return None
def likes_iter(self, uid, friend_uid, count, method, max_count, values, type='post', limit=100):
"""
ะะตะฝะตัะฐัะพั ะธะฝัะพะผะฐัะธะธ ะพ ะปะฐะนะบะฐั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friend_uid: id ะดััะณะฐ ะฟะพะปัะทะพะฒะฐัะตะปั
:param count: ะบะพะปะธัะตััะฒะพ ??? TODO: ัะต ั ััั ะฝะฐะฟะธัะฐะป, ัะธะณะฝั ะบะฐะบะฐั-ัะพ
:param method: ะผะตัะพะด VKApi
:param max_count: ะะฐะบัะธะผะฐะปัะฝะพะต ะบะพะปะธัะตััะฒะพ ัะปะผะตะฝัะพะฒ, ะบะพัะพัะพะต ะผะพะถะฝะพ ะทะฐะณััะทะธัั 1ะผ ะผะตัะพะดะพะผ ะทะฐ ัะฐะท
:param values: ะะฐัะฐะผะตััั ะผะตัะพะดะฐ
:param type: ะขะธะฟ ะทะฐะฟะธัะตะน (ะฟะพัั, ัะพัะพ)
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพะต ะบะพะปะธัััะฒะพ ะทะฐะฟะธัะตะน
"""
self.logger.debug("likes_iter: " + str(uid) + '; ' + str(friend_uid))
item_ids = []
entries = []
iterations = count // 25
tail = count % 25
iterations_count = 0
for key, entry in enumerate(self.tools.get_all_iter(method, max_count, values=values,
limit=limit)
):
if key > limit:
break
if iterations_count < iterations:
if key != 0 and key % 25 != 0:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
iterations_count += 1
else:
if key % 25 != tail - 1:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
def likes_friend_photos(self, uid, friend_uid, limit=100):
"""
ะะตะฝะตัะฐัะพั ะปะฐะนะบะพะฒ ะฝะฐ ัะพัะพะณัะฐัะธัั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friend_uid: id ะดััะณะฐ
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพะต ะบะพะปะธัะตััะฒะพ ะทะฐะณััะถะตะฝะฝัั
ะทะฐะฟะธัะตะน
"""
self.logger.debug("likes_friend_photos: " + str(uid) + '; ' + str(friend_uid))
count = self.api.photos.getAll(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'extended': 1, 'no_service_albums': 0}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='photos.getAll',
max_count=200,
values=values,
type='photo',
limit=limit):
yield like_info
def likes_friend_wall(self, uid, friend_uid, limit=100):
"""
ะะตะฝะตัะฐัะพั ะปะฐะนะบะพะฒ ะฝะฐ ััะตะฝะต TODO: ะผะพะถะตั, ัะพะฒะผะตััะธัั ัะพัะพ ะธ ััะตะฝั? ะ ัะพ ะบะพะด ะฟะพััะธ ะพะดะธะฝะบะพะฒัะน
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friend_uid: id ะดััะณะฐ
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพ ัะธัะปะพ ะทะฐะฟะธัะตะน ะดะปั ะทะฐะณััะทะบะธ
"""
self.logger.debug("likes_friend_wall: " + str(uid) + '; ' + str(friend_uid))
count = self.api.wall.get(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'filter': 'all'}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='wall.get',
max_count=100,
values=values,
type='post',
limit=limit):
yield like_info
def likes_group_wall(self, uid, group_id, limit=100):
"""
ะะตะฝะตัะฐัะพั ะปะฐะนะบะพะฒ ะฝะฐ ััะตะฝะต ะกะะะะฉะะกะขะะ
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั
:param group_id: id ะณััะฟะฟั
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพะต ัะธัะปะพ ะทะฐะฟะธัะตะน ะดะปั ะพะฑัะฐะฑะพัะบะธ
"""
self.logger.debug("likes_group_wall: " + str(uid) + '; ' + str(group_id))
return self.likes_friend_wall(uid, -abs(group_id), limit)
def friends_common_iter(self, uid, friends_ids):
"""
ะะตะฝะตัะฐัะพั ะธะฝัะพัะผะฐัะธะธ ะพะฑ ะพะฑัะธั
ะดััะทััั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friends_ids: ะผะฐััะธะฒ id ะดััะทะตะน
"""
self.logger.debug("friends_common_iter: " + str(uid) + '; ' + str(friends_ids))
steps = len(friends_ids) // 2500 + 1
for i in range(steps):
commmon_friends = self.vk_get_all_common_friends(self.api,
source_uid=uid,
target_uids=friends_ids[
i * 2500: min(
(i + 1) * 2500,
len(friends_ids)
)
]).get('result')
if not commmon_friends:
continue
for friend in commmon_friends:
yield friend
def friends_all_ids(self, uid, friends_full=None):
"""
ะะพะปััะธัั id ะฒัะตั
ะะะขะะะะซะฅ (ะฝะต ัะพะฑะฐัะตะบ) ะดััะทะตะน ะฟะพะปัะทะพะฒะฐัะตะปั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
self.logger.debug("friends_all_ids: " + str(uid))
if friends_full is None:
friends_full = self.friends_all_full(uid=uid)
return [el['id'] for el in friends_full]
def friends_all_full(self, uid, friends_full=None):
"""
ะะพะปััะฐะตั ะฟะพะดัะพะฑะฝัั ะธะฝัะพัะผะฐัะธั ะฟะพ ะฒัะตะผ ะะะขะะะะซะ (ะฝะต ัะพะฑะฐัะบะฐะผ) ะดััะทััะผ ะฟะพะปัะทะพะฒะฐัะตะปั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
self.logger.debug("friends_all_full: " + str(uid))
if friends_full is not None:
return friends_full
# TODO: ะฝะฐะดะพ ะฟะพัะผะพััะตัั, ะตััั ะปะธ ะฑะธัะพะฒะฐั ะผะฐัะบะฐ scop'ะฐ ะดััะทะตะน
scope = 'nickname, domain, sex, bdate, city, country, timezone, photo_50, photo_100, photo_200_orig, has_mobile, contacts, education, online, relation, last_seen, status, can_write_private_message, can_see_all_posts, can_post, universities';
return [el for el in self.tools.get_all('friends.get', 5000, values={'user_id': uid, 'fields': scope})['items']
if 'deactivated' not in el]
def common_city_score(self, uid, friends_full=None, result_type='first'):
"""
ะะพะทะฒัะฐัะฐะตั ะพัะบะธ ะทะฐ ะพะฑัะธะน ะณะพัะพะด.
ะัะปะธ ะฟะพะปัะทะพะฒะฐัะตะปั ัะพะฒะฟะฐะดะฐะตั ะณะพัะพะดะพะผ ั ะดััะณะพะผ, ัะพ +3 ะพัะบะฐ
ะัะปะธ ะบะพะปะธัะตััะฒะพ ะปัะดะตะน ั ัะฐะบะธะผ ะณะพัะพะดะพะผ ะผะฐะบัะธะผะฐะปัะฝะพ, ัะพ +3 ะพัะบะฐ ะฟะตัะฒัะผ 10%, +2 -- ะฟัะฒัะผ 20%
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
:param result_type: ะขะธะฟ ะฟะพะทะฒัะฐัะฐะตะผะพะณะพ ัะตะทัะปััะฐัะฐ. 'count' - ะฒัะต ัะตะทัะปััะฐัั
:type result_type: any('first', 'count')
:return: ะฒัะต ัะตะทัะปััะฐัั ะธะปะธ ะฟะตัะฒัะต 20%
"""
self.logger.debug("common_city_score: " + str(uid))
res = {}
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'city' in friend:
if friend['city']['title'] in res:
res[friend['city']['title']] += 1
else:
res.update({friend['city']['title']: 1})
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
return first_10p
def score_common_age(self, uid, friends_full=None, result_type='first'):
"""
ะัะบะธ ะทะฐ ะพะฑัะธะน ะฒะพะทัะฐัั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
:param result_type: ะขะธะฟ ะฟะพะทะฒัะฐัะฐะตะผะพะณะพ ัะตะทัะปััะฐัะฐ. 'count' - ะฒัะต ัะตะทัะปััะฐัั
:type result_type: any('first', 'count')
:return: ะฒัะต ัะตะทัะปััะฐัั ะธะปะธ ะฟะตัะฒัะต 20%
"""
self.logger.debug("score_common_age: " + str(uid))
res = defaultdict(lambda: 0)
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'bdate' in friend:
bdate = friend['bdate'].split('.')
if len(bdate) > 2:
res[int(bdate[2])] += 1
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
if len(first_10p) == 0:
first_10p = {res[0][0]: 1}
return first_10p
def search_user_by_age(self, user_info, group_id, age=(1, 100)):
"""
ะััะธัะปะธัั ะณะพะด ัะพะถะดะตะฝะธั ะฟะพะปัะทะพะฒะฐัะตะปั ัะตัะตะท ะณััะฟะฟั
:param user_info: ะธะฝัะพัะผะฐัะธั ะพ ะฟะพะปัะทะพะฒะฐัะตะปะต, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param group_id: id ะปัะฑะพะน ะณััะฟะฟั ั ะฟะพะปัะทะพะฒะฐัะตะปั
:param age: ะดะธะฐะฟะฐะทะพะฝ ะฟัะตะดะฟะพะปะฐะณะฐะตะผัั
ะฒะพะทัะฐััะพะฒ
:return: ัะพัะฝัะน ะณะพะด ัะพะถะดะตะฝะธั, ะบะพัะพััะน ัะบะฐะทะฐะป ะฟะพะปัะทะพะฒะฐัะตะปั
"""
info = self.api.users.search(q=user_info['first_name'] + ' ' + user_info['last_name'],
group_id=group_id,
age_from=age[0],
age_to=age[1],
count=1000)['items']
for user in info:
if user['id'] == user_info['id']:
if age[0] == age[1]:
return date.today().year - age[0]
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[0], (age[1] - age[0]) // 2 + age[0]))
if age[0] == age[1]:
return date.today().year - age[0] - 1
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[1], (age[1] - age[0]) * 2 + age[0]))
def user_age(self, uid, friends_full=None):
"""
ะััะธัะปะธัั ะฟัะตะดะฟะพะปะฐะณะฐะตะผัะน ะฒะพะทัะฐัั ะฟะพะปัะทะพะฒะฐัะตะปั 2ะผั ัะฟะพัะพะฑะฐะผะธ:
-ะผะฐะบัะธะผะฐะปัะฝะพะต ะบะพะป-ะฒะพ ะฟะพ ะดััะทััะผ (ะดะปั <25 ะปะตั ะฒะฟะพะปะฝะต ัะพัะฝัะน ัะตะท-ั)
-ะฟะพ ะฟะพะธัะบั ะฒ ะณััะฟะฟะต (ัะพัะฝัะน ัะตะทัะปััะฐั ัะบะฐะทะฐะฝะฝะพะณะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะผ)
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
:return: ัะปะพะฒะฐัั ั ัะตะทัะปััะฐัะฐะผะธ
"""
res = {'user_defined': -1, 'friends_predicted': -1}
user_info = self.api.users.get(user_ids=uid, fields='bdate')[0]
if 'bdate' in user_info:
bdate = user_info['bdate'].split('.')
if len(bdate) > 2:
res['user_defined'] = bdate[2]
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
common_age = int(list(self.score_common_age(uid=uid).items())[0][0])
res['friends_predicted'] = common_age
return res
def check_friends_online(self, uid):
"""
ะัะพะฒะตััะตั ะพะฝะปะฐะนะฝ ะฒัะตั
ะดััะทะตะน ะฟะพะปัะทะพะฒะฐัะตะปั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:return: ัะตะทัะปััะฐั friends.getOnline
"""
return self.api.friends.getOnline(user_id=uid)
def likes_friends(self, uid, limit_entries=100, friends_full=None):
"""
ะะตะฝะตัะฐัะพั ะธะฝัะพัะผะฐัะธะธ ะพ ะปะฐะนะบะฐั
ั ะดััะทะตะน ะฝะฐ ัะพัะพ ะธ ััะตะฝะต
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param limit_entries: ะผะฐะบัะธะผะฐะปัะฝะพะต ะบะพะป-ะฒะพ ะทะฐะฟะธัะตะน ะฝะฐ ะบะฐะถะดะพะผ ะดััะณะต
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
count = len(friends)
for i, friend in enumerate(friends, 1):
for like in self.likes_friend_wall(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
for like in self.likes_friend_photos(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
yield {"count": len(friends), "current": i, "inf": 0}
def likes_groups(self, uid, limit=100, groups=None):
"""
ะะตะฝะตัะฐัะพั ะธะฝัะพัะผะฐัะธะธ ะพ ะปะฐะนะบะฐั
ะฒ ัะพะพะฑัะตััะฒะฐั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพะต ัะธัะปะพ ะทะฐะฟะธัะตะน ั ะบะฐะถะดะพะน ะณััะฟะฟั
:param groups: ะผะฐััะธะฒ id ะณััะฟะฟ
"""
# TODO: ะทะดะตัั ะฑั ั
ะพัะพัะพ ัะฑัะฐัั ะฟะพะฒัะพัะฝะพะต ะธัะฟะพะปัะทะพะฒะฐะฝะธะต ะบะพะดะฐ ะธะท likes_friends
if groups is None:
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
for i, group in enumerate(groups['items'], 1):
try:
for like in self.likes_group_wall(uid=uid, group_id=group['id'], limit=limit):
if like['liked'] or like['copied']:
r = like
r.update({"count": groups['count'],
"current": i,
"name": groups['items'][i-1]['name']})
yield r
except vk_api.exceptions.ApiError as error:
# TODO: ะพะฑัะฐะฑะพัะฐัั ััะพ ะฟะพ-ะฝะพัะผะฐะปัะฝะพะผั
if error.code == 13:
self.logger.error("Size is too big, skipping group_id=" + str(group['id']))
elif error.code == 15:
self.logger.warning("Wall is disabled, skipping group_id=" + str(group['id']))
else:
raise error
except vk_api.exceptions.ApiHttpError as error:
# TODO: ะฝะต ะฟะพะฝััะฝะฐั ัะธะณะฝั, ะฝะฐะดะพ ัะฐะทะพะฑัะฐัััั
self.logger.error("Server 500 error, skipping group_id=" + str(group['id']))
yield {"count": groups['count'], "current": i, "inf": 0}
def likes_friends_and_groups(self, uid, limit=100, friends_need=False, groups_need=False, friends_full=None, groups=None):
"""
ะะตะฝะตัะฐัะพั ะธะฝัะพัะผะฐัะธะธ ะพ ะปะฐะนะบะฐั
ะฒ ะณััะฟะฟะฐั
ะธ ัะพะพะฑัะตััะฒะฐั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param limit: ะบะพะปะธัะตััะฒะพ ะทะฐะฟะธัะตะน, ะบะพัะพััะต ะฝัะถะฝะพ ะทะฐะณััะถะฐัั ะฝะฐ ะบะฐะถะดะพะผ ัะปะตะผะตะฝัะต
:param friends_need: ะฝะตะพะฑั
ะพะดะธะผะฐ ะฟัะพะฒะตัะบะฐ ั ะดััะทะน
:param groups_need: ะฝะตะพะฑั
ะพะดะธะผะฐ ะฟัะพะฒะตัะบะฐ ั ะณััะฟะฟ
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
:param groups: ะผะฐััะธะฒ ะฟะพะดะฟะธัะพะบ
:return:
"""
friends_full = self.friends_all_full(uid, friends_full)
if groups is None:
# TODO: subsriptions ะผะพะถะตั ัะพะดะตัะถะฐัั ะปัะดะตะน, ะฝะฐะดะพ ะดะพัะฐะฑะพัะฐัั, ะฒะพะทะผะพะถะฝั ะฑะฐะณะธ
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
friends_count = friends_need*len(friends_full)
groups_count = groups_need*groups['count']
count = friends_count + groups_need*groups['count']
if friends_need:
for like in self.likes_friends(uid=uid, limit_entries=limit, friends_full=friends_full):
r = like
r.update({"count": count})
yield r
if groups_need:
for like in self.likes_groups(uid=uid, limit=limit, groups=groups):
r = like
r.update({"count": count, "current": like['current'] + friends_count})
yield r
def score_likes_friends(self, uid, limit=100, friends_full=None):
"""
ะะพะทะฒัะฐัะฐะตั ะฑะฐะปะปั ะทะฐ ะปะฐะนะบะธ ะดััะทััะผ
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param limit: ะบะพะปะธัะตััะฒะพ ะทะฐะฟะธัะตะน ะทะฐะณััะถะฐะตะผัั
ะฝะฐ ะบะฐะถะดะพะน ัััะฐะฝะธัะต
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
score = 0
for post_info in self.likes_friends(uid=uid,
limit_entries=limit,
friends_full=friends_full):
if 'liked' in post_info:
if post_info['liked'] == 1:
score += 1
if 'copied' in post_info:
if post_info['copied'] == 1:
score += 10
if 'inf' in post_info:
temp = score
score = 0
yield 'likes_friends', post_info['current']-1, temp
def score_likes_self(self, uid, limit=100, friends_full=None):
"""
ะะพะทะฒัะฐัะฐะตั ะพัะบะธ ะทะฐ ะปะฐะนะบะธ ะดััะทะตะน ั ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะฐ ัััะฐะฝะธัะต
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพะต ัะธัะปะพ ะทะฐะฟะธัะตะน
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
res = [0]*len(friends)
for key, post in enumerate(self.tools.get_all_iter(method='wall.get', max_count=100, values={'owner_id': uid},
limit=limit)):
if key > limit:
break
post_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own':1,
'owner_id': uid,
'item_id': post['id']})['items']
post_reposts = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own': 1,
'owner_id': uid,
'filter': 'copies',
'item_id': post['id']})['items']
for user in post_likes:
if user in friends:
res[friends.index(user)] += 1
for user in post_reposts:
if user in friends:
if user in friends:
res[friends.index(user)] += 10
for key, photo in enumerate(self.tools.get_all_iter(method='photos.getAll',
max_count=200,
values={'owner_id': uid, 'extended': 1, 'no_service_albums': 0})):
if key>limit:
break
photo_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'photo',
'skip_own':1,
'owner_id': uid,
'item_id': photo['id']})['items']
for user in photo_likes:
if user in friends:
if user in friends:
res[friends.index(user)] += 1
for i, friend in enumerate(res):
yield 'likes_self', i, friend
def score_mutual_friends(self, uid, friends_full=None):
"""
ะะพะทะฒัะฐัะฐะตั ะพัะบะธ ะทะฐ ะพะฑัะธั
ะดััะทะตะน
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
res = []
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
for mutual in self.friends_common_iter(uid=uid, friends_ids=friends):
res.append(mutual['common_count'])
res_sorted = sorted(list(set(res)))
count = len(res_sorted)
for i, friend in enumerate(res):
yield 'friends', i, res_sorted.index(friend)*10//count
def score_all_common_age(self, uid, friends_full=None):
"""
ะะพะทะฒัะฐัะฐะตั ะพัะบะธ ะทะฐ ะพะฑัะธะน ะฒะพะทัะฐัั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
user_age = self.user_age(uid=uid, friends_full=friends_full)
def get_user_real_age(age):
if age[0] == age[1]:
return age[0],1,2
elif age[0] == -1:
return age[1],2,3
elif age[1] == -1:
return age[0],2,3
else:
return (int(age[0])+int(age[1]))//2, -1, abs(int(age[0])-int(age[1]))
user_real_age = get_user_real_age((user_age['user_defined'], user_age['friends_predicted']))
for i, friend in enumerate(friends_full):
score = 0
if 'bdate' in friend:
date = friend['bdate'].split('.')
if len(date)>2:
if int(date[2]) - user_real_age[1] <= user_real_age[0] <= int(date[2]) + user_real_age[1]:
score = 3
elif int(date[2]) - user_real_age[2] <= user_real_age[0] <= int(date[2]) + user_real_age[2]:
score = 1
yield 'age', i, score
def score_all_common_city(self, uid, friends_full=None):
"""
ะะพะทะฒัะฐัะฐะตั ะพัะบะธ ะทะฐ ะพะฑัะธะน ะณะพัะพะด
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
common_city_score = self.common_city_score(uid=uid, friends_full=friends_full, result_type='first')
user = self.api.users.get(user_id=uid,fields='city')[0]
user_city = ''
if 'city' in user:
user_city = user['city']['title']
for i, friend in enumerate(friends_full):
score = 0
if 'city' in friend:
friend_city = friend['city']['title']
if friend_city in common_city_score:
score = common_city_score[friend_city]
score += (friend_city==user_city)*3
yield 'city', i, score
def score_all(self,
uid,
limit=100,
likes_friends_need=False,
likes_self_need=False,
common_friends_need=False,
common_age_need=False,
common_city_need=False,
friends_full=None):
"""
ะะตะฝะตัะฐัะพั ะธะฝัะพัะผะฐัะธะธ ะพ ะบััะณะต ะพะฑัะตะฝะธั
:param uid: id ะฟะพะปัะทะพะฒะฐัะตะปั, ะบะพัะพัะพะณะพ ะฟัะพะฒะตััะตะผ
:param limit: ะผะฐะบัะธะผะฐะปัะฝะพะต ะบะพะปะธัะตััะฒะพ ะทะฐะณััะถะฐะตะผัั
ะบะฐะถะดัะน ัะฐะท ะทะฐะฟะธัะตะน
:param likes_friends_need: ะฝะตะพะฑั
ะพะดะธะผะพ ะฟัะพะฒะตัััั ะปะฐะนะบะธ ะดััะทััะผ
:param likes_self_need: ะฝะตะพะฑั
ะพะดะธะผะพ ะฟัะพะฒะตัััั ะปะฐะนะบะธ ะดััะทะตะน
:param common_friends_need: ะฟัะพะฒะตัััั ะพะฑัะธั
ะดััะทะตะน
:param common_age_need: ะฟัะพะฒะตัััั ะพะฑัะธะน ะฒะพะทัะฐัั
:param common_city_need: ะฟัะพะฒะตัััั ะพะฑัะธะน ะณะพัะพะด
:param friends_full: ะผะฐััะธะฒ ะฟะพะปะฝะพะน ะธะฝัะพัะผะฐัะธะธ ะพ ะดััะทััั
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
if common_age_need:
for element in self.score_all_common_age(uid=uid, friends_full=friends_full):
yield element
if common_city_need:
for element in self.score_all_common_city(uid=uid, friends_full=friends_full):
yield element
if common_friends_need:
for element in self.score_mutual_friends(uid=uid, friends_full=friends_full):
yield element
if likes_self_need:
for element in self.score_likes_self(uid=uid, limit=limit, friends_full=friends_full):
yield element
if likes_friends_need:
for element in self.score_likes_friends(uid=uid, limit=limit, friends_full=friends_full):
yield element
| 48.1088 | 249 | 0.511906 | 33,090 | 0.990659 | 20,922 | 0.62637 | 0 | 0 | 0 | 0 | 14,039 | 0.420304 |
87567263e4472013f3e9c6f40f7e91f2cff4f5d5 | 40 | py | Python | tipos de datos/integer1.py | gabys12/portafolio-fundamento-de-programacion | c9b47f32e885ed6ae80b14133a609798ea034e19 | [
"CNRI-Python"
] | null | null | null | tipos de datos/integer1.py | gabys12/portafolio-fundamento-de-programacion | c9b47f32e885ed6ae80b14133a609798ea034e19 | [
"CNRI-Python"
] | null | null | null | tipos de datos/integer1.py | gabys12/portafolio-fundamento-de-programacion | c9b47f32e885ed6ae80b14133a609798ea034e19 | [
"CNRI-Python"
] | null | null | null | x = 100
y = 50
print('x=', x, 'y=', y)
| 8 | 23 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.2 |
8757b04fd4b2fb6a8af17658469898c40069ccbc | 4,776 | py | Python | tests/package/aeronpy/archive_test.py | welly87/aeron-python | 300a4344dd7f1526aeafb3a23fe5c85fb3313ad1 | [
"Apache-2.0"
] | 9 | 2018-11-16T03:06:22.000Z | 2022-03-13T19:14:15.000Z | tests/package/aeronpy/archive_test.py | welly87/aeron-python | 300a4344dd7f1526aeafb3a23fe5c85fb3313ad1 | [
"Apache-2.0"
] | 1 | 2021-11-10T12:43:09.000Z | 2021-11-10T12:43:09.000Z | tests/package/aeronpy/archive_test.py | welly87/aeron-python | 300a4344dd7f1526aeafb3a23fe5c85fb3313ad1 | [
"Apache-2.0"
] | 7 | 2019-01-21T13:52:03.000Z | 2022-03-08T21:09:06.000Z | import os
from hamcrest import *
from pytest import fixture
from tempfile import _get_candidate_names as temp_dir_candidates, tempdir
from time import sleep
from aeronpy import Archive
from aeronpy.driver import archiving_media_driver
@fixture()
def aeron_directory():
temp_dirs = temp_dir_candidates()
where = os.path.join(tempdir, next(temp_dirs))
where_archive = os.path.join(tempdir, next(temp_dirs))
with archiving_media_driver.launch(aeron_directory_name=where, archive_directory_name=where_archive):
yield where
@fixture()
def config_file():
here, _ = os.path.split(__file__)
return os.path.join(here, 'archive.properties')
def test__archive_create(aeron_directory):
archive = Archive(aeron_dir=aeron_directory)
assert_that(archive, is_not(None))
def test__archive_create__with_config(aeron_directory, config_file):
archive = Archive(config_file=config_file, aeron_dir=aeron_directory)
assert_that(archive, is_not(None))
def test__archive_add_recorded_publication(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_(None))
publication = archive.add_recorded_publication('aeron:ipc', 5000)
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_not(None))
assert_that(recording.id, is_(equal_to(0)))
result = publication.offer(b'abc')
assert_that(result, is_(greater_than(0)))
sleep(0.5)
assert_that(recording.position, is_(equal_to(result)))
def test__archive_add_recorded_exclusive_publication(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_(None))
publication = archive.add_recorded_exclusive_publication('aeron:ipc', 5000)
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_not(None))
assert_that(recording.id, is_(equal_to(0)))
result = publication.offer(b'abc')
assert_that(result, is_(greater_than(0)))
sleep(0.5)
assert_that(recording.position, is_(equal_to(result)))
def test__recording_find(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
sleep(0.5)
recording = archive.find(0)
assert_that(recording, is_not(None))
assert_that(recording.position, is_(equal_to(0)))
def test__recording_replay(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
offer_result = publication.offer(b'abc')
assert_that(offer_result, is_(greater_than(0)))
offer_result = publication.offer(b'def')
assert_that(offer_result, is_(greater_than(0)))
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
subscription = recording.replay('aeron:ipc', 6000)
assert_that(archive.find_last('aeron:ipc', 6000), is_(None))
replayed = list()
subscription.poll(lambda data: replayed.append(bytes(data)))
assert_that(replayed, has_length(2))
assert_that(replayed, has_items(equal_to(b'abc'), equal_to(b'def')))
def test__recording_replay__from_position(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
offer_result = publication.offer(b'abc')
assert_that(offer_result, is_(greater_than(0)))
offer_result = publication.offer(b'def')
assert_that(offer_result, is_(greater_than(0)))
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
subscription = recording.replay('aeron:ipc', 6000, 64)
assert_that(archive.find_last('aeron:ipc', 6000), is_(None))
replayed = list()
subscription.poll(lambda data: replayed.append(bytes(data)))
assert_that(replayed, has_length(1))
assert_that(replayed, has_items(equal_to(b'def')))
def test__recording_replay__from_position__not_aligned(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
offer_result = publication.offer(b'abc')
assert_that(offer_result, is_(greater_than(0)))
offer_result = publication.offer(b'def')
assert_that(offer_result, is_(greater_than(0)))
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(calling(recording.replay).with_args('aeron:ipc', 6000, 50), raises(RuntimeError))
| 33.398601 | 105 | 0.751884 | 0 | 0 | 297 | 0.062186 | 427 | 0.089405 | 0 | 0 | 284 | 0.059464 |
8757f1428a8a127f5ffc412dd30fac1f77be1d47 | 106 | py | Python | app/script.py | tallywiesenberg/spotter-blocker | 1fffd3c62a4b3d5c7964694455e4beb08e6bc8cc | [
"MIT"
] | null | null | null | app/script.py | tallywiesenberg/spotter-blocker | 1fffd3c62a4b3d5c7964694455e4beb08e6bc8cc | [
"MIT"
] | null | null | null | app/script.py | tallywiesenberg/spotter-blocker | 1fffd3c62a4b3d5c7964694455e4beb08e6bc8cc | [
"MIT"
] | null | null | null | import edits
from edits import PageEditor
pe = PageEditor(keyword='spider', orientation='block')
pe.edit() | 26.5 | 54 | 0.783019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.141509 |
87599bd37b5c1ce4791f85dbb86aca71c590ae91 | 6,095 | py | Python | src/cascade_at/settings/base_case.py | ihmeuw/cascade-at | a5b1b5da1698163fd3bbafc6288968dd9c398096 | [
"MIT"
] | 1 | 2019-10-14T23:18:04.000Z | 2019-10-14T23:18:04.000Z | src/cascade_at/settings/base_case.py | ihmeuw/cascade | a5b1b5da1698163fd3bbafc6288968dd9c398096 | [
"MIT"
] | 35 | 2018-07-17T18:37:33.000Z | 2020-03-06T13:31:35.000Z | src/cascade_at/settings/base_case.py | ihmeuw/cascade | a5b1b5da1698163fd3bbafc6288968dd9c398096 | [
"MIT"
] | 4 | 2018-07-13T00:01:35.000Z | 2019-09-02T23:56:11.000Z | BASE_CASE = {
"model": {
"random_seed": 495279142,
"default_age_grid": "0 0.01917808 0.07671233 1 5 10 20 30 40 50 60 70 80 90 100",
"default_time_grid": "1990 1995 2000 2005 2010 2015 2016",
"add_calc_emr": "from_both",
"birth_prev": 0,
"ode_step_size": 5,
"minimum_meas_cv": 0.2,
"rate_case": "iota_pos_rho_zero",
"data_density": "log_gaussian",
"constrain_omega": 1,
"modelable_entity_id": 2005,
"decomp_step_id": 3,
"research_area": 2,
"drill": "drill",
"drill_location_start": 70,
"bundle_id": 173,
"crosswalk_version_id": 5699,
"split_sex": "most_detailed",
"add_csmr_cause": 587,
"drill_sex": 2,
"model_version_id": 472515,
"title": "test diabetes australasia marlena -- 2",
"relabel_incidence": 2,
"description": "<p>diabetes<\/p>",
"addl_ode_stpes": "0.01917808 0.07671233 1.0",
"zero_sum_random": [
"iota"
],
"bound_frac_fixed": 1.0e-8,
"drill_location_end": [72],
"quasi_fixed": 0
},
"max_num_iter": {
"fixed": 200,
"random": 100
},
"print_level": {
"fixed": 5,
"random": 0
},
"accept_after_max_steps": {
"fixed": 5,
"random": 5
},
"students_dof": {
"priors": 5,
"data": 5
},
"log_students_dof": {
"priors": 5,
"data": 5
},
"eta": {
"priors": 1.0e-5,
"data": 1.0e-5
},
"config_version": "mnorwood",
"rate": [
{
"age_time_specific": 1,
"default": {
"value": {
"density": "gaussian",
"min": 1.0e-6,
"mean": 0.00015,
"max": 0.01,
"std": 1.5,
"eta": 1.0e-6
},
"dage": {
"density": "gaussian",
"min": -1,
"mean": 0,
"max": 1,
"std": 0.01
},
"dtime": {
"density": "gaussian",
"min": -1,
"mean": 0,
"max": 1,
"std": 0.01
}
},
"rate": "iota",
"age_grid": "0 5 10 50 100"
},
{
"age_time_specific": 1,
"default": {
"value": {
"density": "gaussian",
"min": 1.0e-6,
"mean": 0.0004,
"max": 0.01,
"std": 0.2
},
"dage": {
"density": "gaussian",
"min": -1,
"mean": 0,
"max": 1,
"std": 0.01
},
"dtime": {
"density": "gaussian",
"min": -1,
"mean": 0,
"max": 1,
"std": 0.01
}
},
"rate": "chi"
},
{
"age_time_specific": 0,
"default": {
"value": {
"density": "log_gaussian",
"min": 0,
"mean": 0.1,
"max": 0.2,
"std": 1,
"eta": 1.0e-6
},
"dage": {
"density": "uniform",
"min": -1,
"mean": 0,
"max": 1
},
"dtime": {
"density": "uniform",
"min": -1,
"mean": 0,
"max": 1
}
},
"rate": "pini"
}
],
"random_effect": [
{
"age_time_specific": 0,
"default": {
"value": {
"density": "gaussian",
"mean": 0,
"std": 1
},
"dage": {
"mean": 0,
"std": 1,
"density": "uniform"
},
"dtime": {
"mean": 0,
"std": 1,
"density": "uniform"
}
},
"rate": "iota"
}
],
"study_covariate": [
{
"age_time_specific": 0,
"mulcov_type": "rate_value",
"default": {
"value": {
"density": "uniform",
"min": -1,
"mean": 0,
"max": 1
},
"dage": {
"density": "uniform",
"min": -1,
"mean": 0,
"max": 1
},
"dtime": {
"density": "uniform",
"min": -1,
"mean": 0,
"max": 1
}
},
"study_covariate_id": 0,
"transformation": 0,
"measure_id": 41
}
],
"country_covariate": [
{
"age_time_specific": 0,
"mulcov_type": "rate_value",
"measure_id": 41,
"country_covariate_id": 28,
"transformation": 0,
"default": {
"value": {
"density": "uniform",
"min": -1,
"mean": 0,
"max": 1,
"eta": 1.0e-5
}
}
}
],
"gbd_round_id": 6,
"csmr_cod_output_version_id": 84,
"csmr_mortality_output_version_id": 8003,
"location_set_version_id": 684,
"tolerance": {
"fixed": 1.0e-6,
"random": 1.0e-6
}
}
| 27.331839 | 89 | 0.31534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,174 | 0.356686 |
875a2fb1710235abb404827fb32a14c9aebf6c27 | 14,778 | py | Python | navrep/scripts/train_rnn.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | 48 | 2020-11-26T10:16:08.000Z | 2022-03-24T15:22:08.000Z | navrep/scripts/train_rnn.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | 1 | 2021-12-14T02:08:18.000Z | 2022-03-14T09:17:25.000Z | navrep/scripts/train_rnn.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | 18 | 2020-12-09T08:37:43.000Z | 2022-03-30T06:56:38.000Z | from __future__ import print_function
import numpy as np
import os
from datetime import datetime
import random
import time
import pickle
from pyniel.python_tools.path_tools import make_dir_if_not_exists
import pandas as pd
from navrep.models.vae2d import ConvVAE
from navrep.models.vae1d import Conv1DVAE
from navrep.models.rnn import reset_graph, default_hps, MDNRNN, MAX_GOAL_DIST
from navrep.tools.test_worldmodel import rnn_worldmodel_error, vae1d_rnn_worldmodel_error
from navrep.tools.commonargs import parse_common_args
from navrep.scripts.train_vae import _Z
_H = 512
_G = 2 # goal states
_A = 3 # action dims
if __name__ == "__main__":
common_args, _ = parse_common_args()
VARIANT = common_args.environment
START_TIME = datetime.now().strftime("%Y_%m_%d__%H_%M_%S")
MAX_STEPS = common_args.n
if MAX_STEPS is None:
MAX_STEPS = 222222
N_EPOCHS = MAX_STEPS # don't limit based on epoch
VAE_TYPE = "1d" if common_args.backend == "VAE1D_LSTM" else ""
hps = default_hps()
hps = hps._replace(seq_width=_Z+_G, action_width=_A, rnn_size=_H)
print(hps)
# hps.batch_size = 100
# hps.max_seq_len = 1000
# hps = hps._replace(learning_rate=0.0001)
if VARIANT == "ian":
dataset_folder = os.path.expanduser("~/navrep/datasets/M/ian")
test_dataset_folder = os.path.expanduser("~/navrep/datasets/V/ian")
log_path = os.path.expanduser("~/navrep/logs/M/rnn_train_log_{}.csv".format(START_TIME))
log_hyperparams_path = os.path.expanduser(
"~/navrep/logs/M/rnn_train_log_{}.hyperparams.pckl".format(START_TIME))
model_hyperparams_path = os.path.expanduser("~/navrep/models/M/rnn.hyperparams.pckl")
model_path = os.path.expanduser("~/navrep/models/M/rnn.json")
vae_model_path = os.path.expanduser("~/navrep/models/V/vae.json")
if VARIANT == "toy":
dataset_folder = os.path.expanduser("~/navrep/datasets/M/toy")
test_dataset_folder = os.path.expanduser("~/navrep/datasets/V/toy")
log_path = os.path.expanduser("~/navrep/logs/M/toyrnn_train_log_{}.csv".format(START_TIME))
log_hyperparams_path = os.path.expanduser(
"~/navrep/logs/M/toyrnn_train_log_{}.hyperparams.pckl".format(START_TIME))
model_hyperparams_path = os.path.expanduser("~/navrep/models/M/toyrnn.hyperparams.pckl")
model_path = os.path.expanduser("~/navrep/models/M/toyrnn.json")
vae_model_path = os.path.expanduser("~/navrep/models/V/toyvae.json")
if VARIANT == "markone":
dataset_folder = os.path.expanduser("~/navrep/datasets/M/markone")
test_dataset_folder = os.path.expanduser("~/navrep/datasets/V/markone")
log_path = os.path.expanduser("~/navrep/logs/M/markonernn_train_log_{}.csv".format(START_TIME))
log_hyperparams_path = os.path.expanduser(
"~/navrep/logs/M/markonernn_train_log_{}.hyperparams.pckl".format(START_TIME))
model_hyperparams_path = os.path.expanduser("~/navrep/models/M/markonernn.hyperparams.pckl")
model_path = os.path.expanduser("~/navrep/models/M/markonernn.json")
vae_model_path = os.path.expanduser("~/navrep/models/V/markonevae.json")
if VARIANT == "marktwo":
dataset_folder = os.path.expanduser("~/navrep/datasets/M/marktwo")
test_dataset_folder = os.path.expanduser("~/navrep/datasets/V/marktwo")
log_path = os.path.expanduser("~/navrep/logs/M/marktwornn_train_log_{}.csv".format(START_TIME))
log_hyperparams_path = os.path.expanduser(
"~/navrep/logs/M/marktwornn_train_log_{}.hyperparams.pckl".format(START_TIME))
model_hyperparams_path = os.path.expanduser("~/navrep/models/M/marktwornn.hyperparams.pckl")
model_path = os.path.expanduser("~/navrep/models/M/marktwornn.json")
vae_model_path = os.path.expanduser("~/navrep/models/V/marktwovae.json")
if VARIANT == "navreptrain":
dataset_folder = os.path.expanduser("~/navrep/datasets/M/navreptrain")
test_dataset_folder = os.path.expanduser("~/navrep/datasets/V/navreptrain")
log_path = os.path.expanduser(
"~/navrep/logs/M/navreptrainrnn{}_train_log_{}.csv".format(VAE_TYPE, START_TIME))
log_hyperparams_path = os.path.expanduser(
"~/navrep/logs/M/navreptrainrnn{}_train_log_{}.hyperparams.pckl".format(VAE_TYPE, START_TIME))
model_hyperparams_path = os.path.expanduser(
"~/navrep/models/M/navreptrainrnn{}.hyperparams.pckl".format(VAE_TYPE))
model_path = os.path.expanduser("~/navrep/models/M/navreptrainrnn{}.json".format(VAE_TYPE))
vae_model_path = os.path.expanduser("~/navrep/models/V/navreptrainvae{}.json".format(VAE_TYPE))
if common_args.dry_run:
log_path = log_path.replace(os.path.expanduser("~/navrep"), "/tmp/navrep")
log_hyperparams_path = log_hyperparams_path.replace(os.path.expanduser("~/navrep"), "/tmp/navrep")
model_path = model_path.replace(os.path.expanduser("~/navrep"), "/tmp/navrep")
model_hyperparams_path = model_hyperparams_path.replace(os.path.expanduser("~/navrep"), "/tmp/navrep")
make_dir_if_not_exists(os.path.dirname(model_path))
make_dir_if_not_exists(os.path.dirname(log_path))
# load preprocessed data
files = []
for dirpath, dirnames, filenames in os.walk(dataset_folder):
for filename in [f for f in filenames if f.endswith(".npz")]:
files.append(os.path.join(dirpath, filename))
all_data = []
for path in files:
arrays = np.load(path)
all_data.append(
[
arrays["mus"],
arrays["logvars"],
arrays["robotstates"],
arrays["actions"],
arrays["dones"],
arrays["rewards"],
]
)
n_total_frames = np.sum([mu.shape[0] for mu, _, _, _, _, _ in all_data])
chunksize = hps.batch_size * hps.max_seq_len # frames per batch (100'000)
print("total frames: ", n_total_frames)
if n_total_frames < chunksize:
raise ValueError()
reset_graph()
model = MDNRNN(hps)
model.print_trainable_params()
vae = None
viewer = None
values_logs = None
start = time.time()
for epoch in range(1, N_EPOCHS + 1):
# print('preparing data for epoch', epoch)
batches_start = time.time()
# flatten all sequences into one
mu_sequence = np.zeros((n_total_frames, _Z), dtype=np.float32)
logvar_sequence = np.zeros((n_total_frames, _Z), dtype=np.float32)
robotstate_sequence = np.zeros((n_total_frames, 5), dtype=np.float32)
action_sequence = np.zeros((n_total_frames, 3), dtype=np.float32)
done_sequence = np.zeros((n_total_frames, 1), dtype=np.float32)
reward_sequence = np.zeros((n_total_frames, 1), dtype=np.float32)
i = 0
random.shuffle(all_data)
for mu, logvar, robotstate, action, done, reward in all_data:
L = len(mu)
mu_sequence[i : i + L, :] = mu.reshape(L, _Z)
logvar_sequence[i : i + L, :] = logvar.reshape(L, _Z)
robotstate_sequence[i : i + L, :] = robotstate.reshape(L, 5)
action_sequence[i : i + L, :] = action.reshape(L, 3)
done_sequence[i : i + L, :] = done.reshape(L, 1)
reward_sequence[i : i + L, :] = reward.reshape(L, 1)
i += L
# sample z from mu and logvar
z_sequence = mu_sequence + np.exp(logvar_sequence / 2.0) * np.random.randn(
*(mu_sequence.shape)
)
# add goalstate (robotstate[:2]) to z
robotstate_sequence[:, :_G] = robotstate_sequence[:, :_G] / MAX_GOAL_DIST # normalize goal dist
z_rs_sequence = np.concatenate([z_sequence, robotstate_sequence[:, :_G]], axis=-1)
# resize array to be reshapable into sequences and batches
n_chunks = n_total_frames // chunksize
# reshape into sequences
z_rs_sequences = np.reshape(
z_rs_sequence[: n_chunks * chunksize, :], (-1, hps.max_seq_len, _Z+_G)
)
action_sequences = np.reshape(
action_sequence[: n_chunks * chunksize], (-1, hps.max_seq_len, 3)
)
done_sequences = np.reshape(
done_sequence[: n_chunks * chunksize], (-1, hps.max_seq_len)
)
reward_sequences = np.reshape(
reward_sequence[: n_chunks * chunksize], (-1, hps.max_seq_len)
)
num_sequences = len(z_rs_sequences)
if num_sequences == 0:
raise ValueError("Not enough data for a single batch")
# shuffle
random_idxs = list(range(num_sequences))
random.shuffle(random_idxs)
random_idxs = np.reshape(random_idxs, (-1, hps.batch_size))
# reshape into batches
z_rs_batches = z_rs_sequences[random_idxs]
action_batches = action_sequences[random_idxs]
done_batches = done_sequences[random_idxs]
reward_batches = reward_sequences[random_idxs]
num_batches = len(z_rs_batches)
# result is of size (n_batches, batch_size, seq_len, ...)
# print('number of batches', num_batches)
batches_end = time.time()
batch_time_taken = batches_end - batches_start
# print('time taken to create batches', batch_time_taken)
batch_state = model.sess.run(model.initial_state)
for batch_z_rs, batch_action, batch_done, batch_reward in zip(
z_rs_batches, action_batches, done_batches, reward_batches
):
step = model.sess.run(model.global_step)
curr_learning_rate = (hps.learning_rate - hps.min_learning_rate) * (
hps.decay_rate
) ** step + hps.min_learning_rate
feed = {
model.batch_z_rs: batch_z_rs,
model.batch_action: batch_action,
model.batch_restart: batch_done,
model.initial_state: batch_state,
model.lr: curr_learning_rate,
}
(train_cost, z_cost, r_cost, batch_state, train_step, _) = model.sess.run(
[
model.cost,
model.z_cost,
model.r_cost,
model.final_state,
model.global_step,
model.train_op,
],
feed,
)
lidar_e = None
state_e = None
if step % 200 == 0:
# load VAE
if VAE_TYPE == "1d":
if vae is None:
vae = Conv1DVAE(z_size=_Z, batch_size=model.hps.max_seq_len-1, is_training=False)
vae.load_json(vae_model_path)
lidar_e, state_e = vae1d_rnn_worldmodel_error(model, test_dataset_folder, vae)
else:
if vae is None:
vae = ConvVAE(z_size=_Z, batch_size=model.hps.max_seq_len-1, is_training=False)
vae.load_json(vae_model_path)
lidar_e, state_e = rnn_worldmodel_error(model, test_dataset_folder, vae)
print("Test: lidar error {}, state error {}".format(lidar_e, state_e))
model.save_json(model_path)
if step % 20 == 0 and step > 0:
end = time.time()
time_taken = end - start
start = time.time()
output_log = (
"step: %d, lr: %.6f, cost: %.4f, z_cost: %.4f, r_cost: %.4f, train_time_taken: %.4f"
% (step, curr_learning_rate, train_cost, z_cost, r_cost, time_taken)
)
print(output_log)
# log
values_log = pd.DataFrame(
[[step, curr_learning_rate, train_cost, z_cost, r_cost, time_taken, lidar_e, state_e]],
columns=["step", "lr", "cost", "z_cost", "r_cost", "train_time_taken",
"lidar_test_error", "state_test_error"],
)
if values_logs is None:
values_logs = values_log.copy()
else:
values_logs = values_logs.append(values_log, ignore_index=True)
values_logs.to_csv(log_path)
with open(log_hyperparams_path, "wb") as f:
pickle.dump(hps, f)
if common_args.render: # Visually check that the batch is sound
import matplotlib.pyplot as plt
from navrep.tools.rings import generate_rings
reset_graph()
vae = ConvVAE(z_size=_Z, batch_size=1, is_training=False)
vae.load_json(vae_model_path)
rings_def = generate_rings(64, 64)
rings_pred = vae.decode(batch_z_rs[0, :, :_Z]) * rings_def["rings_to_bool"]
plt.ion()
for i, ring in enumerate(rings_pred):
rings_def["visualize_rings"](ring, scan=None)
plt.scatter(batch_z_rs[0, i, _Z], batch_z_rs[0, i, 33], color='red')
plt.ylim([0, 10])
plt.title("{:.1f} {:.1f} {:.1f}".format(*batch_action[0, i]))
plt.pause(0.5)
exit()
if False: # render all sequences in batch at once
from navrep.tools.render import render_lidar_batch
from navrep.tools.rings import generate_rings
reset_graph()
vae = ConvVAE(z_size=_Z, batch_size=100, is_training=False)
vae.load_json(vae_model_path)
rings_def = generate_rings(64, 64)
batch_decodings = []
for i in range(batch_z_rs.shape[1]): # for each sequence step
rings_pred = vae.decode(batch_z_rs[:, i, :_Z]) * rings_def["rings_to_bool"]
predicted_ranges = rings_def["rings_to_lidar"](rings_pred, 1080)
batch_decodings.append(predicted_ranges)
for i, predicted_ranges in enumerate(batch_decodings):
viewer = render_lidar_batch(
predicted_ranges, 0, 2 * np.pi, viewer=viewer
)
import pyglet
filename = "/tmp/frame{:03}.png".format(i)
pyglet.image.get_buffer_manager().get_color_buffer().save(filename)
print("image file writen : ", filename)
if MAX_STEPS is not None:
if train_step > MAX_STEPS:
break
if MAX_STEPS is not None:
if train_step > MAX_STEPS:
break
model.save_json(model_path)
| 47.517685 | 110 | 0.60563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,751 | 0.186155 |
875bf153c38837563607848c2a4373f619fc49b1 | 893 | py | Python | Programming I Python/Chapter 5/b project 5a/Eric Blanco Problem 13.py | eebr99/Python-Projects | 016f72b3f13793cb73c333c6eaab313eddfae9e7 | [
"MIT"
] | null | null | null | Programming I Python/Chapter 5/b project 5a/Eric Blanco Problem 13.py | eebr99/Python-Projects | 016f72b3f13793cb73c333c6eaab313eddfae9e7 | [
"MIT"
] | null | null | null | Programming I Python/Chapter 5/b project 5a/Eric Blanco Problem 13.py | eebr99/Python-Projects | 016f72b3f13793cb73c333c6eaab313eddfae9e7 | [
"MIT"
] | null | null | null | #Write a function named "falling_distance" that accepts an object's falling time
#(in seconds) as an argument. The function should return the distance, in meters
#, that the object has fallen during the time interval.Write a program that
#calls the function in a loop that passes the values 1 through 10 as arguments
#and displays the return value.
def main():
print("This program shows an object's fall distance (in meters) with each")
print('passing second from 1 to 10.')
print()
print('Time(s)\tDistance(m)')
print('--------------------')
for time in range (10 + 1):
distance = falling_distance(time)
print(time, '\t', format(distance, ',.1f'))
def falling_distance(time):
g = 9.8
distance = 0.5*g*time**2 #I believe the formula is incorrect, i think it
return distance #means (1/2)gt^2
main()
| 35.72 | 81 | 0.646137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.634938 |
875d850a61502a05ae1607c589db3488582a51b9 | 3,140 | py | Python | prepare_data.py | aascode/OMGEmotionChallenge | 0f29ba2d4e9472c889f3aa72545fb835c80dcf0f | [
"Apache-2.0"
] | null | null | null | prepare_data.py | aascode/OMGEmotionChallenge | 0f29ba2d4e9472c889f3aa72545fb835c80dcf0f | [
"Apache-2.0"
] | null | null | null | prepare_data.py | aascode/OMGEmotionChallenge | 0f29ba2d4e9472c889f3aa72545fb835c80dcf0f | [
"Apache-2.0"
] | 1 | 2021-01-07T05:42:54.000Z | 2021-01-07T05:42:54.000Z | from __future__ import print_function
import argparse
import os
import sys
import subprocess
import datetime
def get_formatted_time(seconds):
return str(datetime.timedelta(seconds=seconds))
# microsecond = int((seconds - int(seconds)) * 1000 * 1000)
# int_seconds = int(seconds)
# hour = int_seconds // 3600
# minute = (int_seconds - hour * 3600) // 60
# second = int_seconds - hour * 3600 - minute * 60
# return "{:02}:{:02}:{:02}.{:03}".format(hour, minute, second, microsecond)
def dl_youtube(link, target_file):
p = subprocess.Popen(["youtube-dl",
"-f", "best",
"--merge-output-format", "mp4",
"--restrict-filenames",
"--socket-timeout", "20",
"-iwc",
"--write-info-json",
'--write-annotations',
'--prefer-ffmpeg',
link,
'-o', target_file],
)
out, err = p.communicate()
def prepare_data(file, target_dir):
temp_directory = os.path.abspath(os.path.join(target_dir, "youtube_videos_temp"))
if not os.path.exists(temp_directory):
os.makedirs(temp_directory)
print ("here")
with open(file) as f:
next(f)
for l in f:
l = l.strip()
if len(l) > 0:
link, start, end, video, utterance = l.split(',')[:5]
#print "Link:", link
result_dir = os.path.join(os.path.join(target_dir, video))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
result_filename = os.path.abspath(os.path.join(result_dir, utterance))
#dl video with youtube-dl
target_file = os.path.abspath(os.path.join(temp_directory, video + ".mp4"))
if not os.path.exists(target_file):
dl_youtube(link, target_file)
p = subprocess.call(["ffmpeg",
"-y",
"-i", target_file,
"-ss", get_formatted_time(float(start)),
"-c:v", "libx264", "-preset", "superfast",
"-f", "mp4",
"-c:a", "aac",
"-to", get_formatted_time(float(end)),
'-strict', '-2',
result_filename],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--split-file", help = "Metadata file")
parser.add_argument("--target-dir")
opt = parser.parse_args()
if not os.path.exists(opt.split_file):
print("Cannot find split file")
sys.exit(-1)
if not os.path.exists(opt.target_dir):
os.makedirs(opt.target_dir)
else:
print("Target dir already exists.")
prepare_data(opt.split_file, opt.target_dir)
| 35.681818 | 91 | 0.486306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.236943 |
875e13669b90d566535e678aeb9626f67f4c6d30 | 3,101 | py | Python | PyBlokusTools/pyblokustools/testserver/Message.py | HenrikThoroe/SWC-2021 | 8e7eee25e3a6fda7e863591b05fa161d8a2ebc78 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | PyBlokusTools/pyblokustools/testserver/Message.py | HenrikThoroe/SWC-2021 | 8e7eee25e3a6fda7e863591b05fa161d8a2ebc78 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | PyBlokusTools/pyblokustools/testserver/Message.py | HenrikThoroe/SWC-2021 | 8e7eee25e3a6fda7e863591b05fa161d8a2ebc78 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | from typing import Any, Final, Tuple, List
from enum import IntEnum
from .MsgType import MsgType
class Message():
"""Base Message
Arguments:
type {MsgType} -- Type of message
payload {Any} -- Payload used for later handling
"""
def __init__(self, type: MsgType, payload: Any) -> None:
self.type: Final[MsgType] = type
self.payload: Final[Any] = payload
def __repr__(self) -> str:
"""Represent object instance
Returns:
str -- Representation of instance
"""
return f"Message({self.type}, {self.payload})"
class MementoMsg():
"""Message that holds a Gamestate
Arguments:
currentTurn {int} -- Current turn
"""
def __init__(self, currentTurn: int) -> None:
self.currentTurn: Final[int] = currentTurn
def __repr__(self) -> str:
"""Represent object instance
Returns:
str -- Representation of instance
"""
return f"MementoMsg({self.currentTurn})"
class ResultCause(IntEnum):
"""An enum of different game end causes.
"""
REGULAR = 0
LEFT = 1
RULE_VIOLATION = 2
SOFT_TIMEOUT = 3
HARD_TIMEOUT = 4
class ResultEnd(IntEnum):
"""An enum of different game endings.
"""
LOSE = 0
DRAW = 1
WIN = 2
class ResultMsg():
"""Message that holds a game's result
Arguments:
score {List[int, int]} -- GameScore both players reached
end {List[ResultEnd, ResultEnd]} -- TournamentPoints both players earned
cause {List[ResultCause, ResultCause]} -- Game-ending causes for both players
"""
def __init__(self, score: List[int], end: List[ResultEnd], cause: List[ResultCause]) -> None:
self.score: Final[List[int]] = score
self.end: Final[List[ResultEnd]] = end
self.cause: Final[List[ResultCause]] = cause
def swap(self) -> None:
"""Swap Player1 & Player2 in place
"""
self.score.reverse()
self.end.reverse()
self.cause.reverse()
def __repr__(self) -> str:
"""Represent object instance
Returns:
str -- Representation of instance
"""
return f"ResultMsg(({self.score[0]}, {self.score[1]}), ({self.end[0]}, {self.end[1]}), ({self.cause[0]}, {self.cause[1]}))"
class PreparedMsg():
"""Message that holds info on a prepared game
Arguments:
roomId {str} -- RoomId of newly prepared game
reservations {Tuple[str]} -- Reservation codes for clients associated with the game
"""
def __init__(self, roomId: str, reservations: Tuple[str, str]) -> None:
self.roomId: Final[str] = roomId
self.reservations: Final[Tuple[str, str]] = reservations
def __repr__(self) -> str:
"""Represent object instance
Returns:
str -- Representation of instance
"""
return f"PreparedMsg({self.roomId}, ({self.reservations[0]}, {self.reservations[1]}))"
| 29.817308 | 131 | 0.58207 | 2,991 | 0.964528 | 0 | 0 | 0 | 0 | 0 | 0 | 1,608 | 0.518542 |
875f3dc2268c5b69947a406b55babc4f4af29f54 | 2,984 | py | Python | celo_sdk/tests/stable_token_tests.py | rcroessmann/celo-sdk-py | 8826adaa6bbcb53374e7c26f0638a7fc973a9dd9 | [
"Apache-2.0"
] | 7 | 2021-02-09T20:44:41.000Z | 2022-03-30T10:56:06.000Z | celo_sdk/tests/stable_token_tests.py | rcroessmann/celo-sdk-py | 8826adaa6bbcb53374e7c26f0638a7fc973a9dd9 | [
"Apache-2.0"
] | 4 | 2020-11-04T07:39:10.000Z | 2022-02-19T00:06:46.000Z | celo_sdk/tests/stable_token_tests.py | rcroessmann/celo-sdk-py | 8826adaa6bbcb53374e7c26f0638a7fc973a9dd9 | [
"Apache-2.0"
] | 8 | 2020-11-03T14:45:26.000Z | 2022-02-23T12:41:05.000Z | import time
import unittest
from web3 import Web3
from celo_sdk.kit import Kit
from celo_sdk.tests import test_data
class TestStableTokenWrapper(unittest.TestCase):
@classmethod
def setUpClass(self):
self.kit = Kit('http://localhost:8544')
self.stable_token_wrapper = self.kit.base_wrapper.create_and_get_contract_by_name(
'StableToken')
self.kit.wallet.sign_with_provider = True
self.accounts = self.kit.w3.eth.accounts
for _, v in test_data.deriv_pks.items():
self.kit.wallet_add_new_key = v
self.kit.w3.eth.defaultAccount = self.accounts[0]
self.kit.wallet_change_account = self.accounts[0]
def test_name(self):
name = self.stable_token_wrapper.name()
self.assertEqual(name, 'Celo Dollar')
def test_symbol(self):
symbol = self.stable_token_wrapper.symbol()
self.assertEqual(symbol, 'cUSD')
def test_decimals(self):
decimals = self.stable_token_wrapper.decimals()
self.assertEqual(decimals, 18)
def test_total_supply(self):
total_supply = self.stable_token_wrapper.total_supply()
self.assertEqual(type(total_supply), int)
def test_balance_of(self):
balance = self.stable_token_wrapper.balance_of(self.accounts[0])
self.assertEqual(type(balance), int)
def test_owner(self):
owner = self.stable_token_wrapper.owner()
self.assertEqual(self.kit.w3.isAddress(owner), True)
def test_get_inflation_parameters(self):
infl_params = self.stable_token_wrapper.get_inflation_parameters()
self.assertEqual(type(infl_params), dict)
def test_transfer(self):
initial_balance_2 = self.stable_token_wrapper.balance_of(
self.accounts[1])
tx_hash = self.stable_token_wrapper.transfer(
self.accounts[1], self.kit.w3.toWei(1, 'ether'))
self.assertEqual(type(tx_hash), str)
time.sleep(5) # wait until transaction finalized
final_balance_2 = self.stable_token_wrapper.balance_of(
self.accounts[1])
self.assertEqual(final_balance_2, initial_balance_2 +
self.kit.w3.toWei(1, 'ether'))
def test_transfer_from(self):
tx_hash = self.stable_token_wrapper.increase_allowance(self.accounts[1], self.kit.w3.toWei(1, 'ether'))
self.assertEqual(type(tx_hash), str)
self.kit.w3.eth.defaultAccount = self.accounts[1]
self.kit.wallet_change_account = self.accounts[1]
initial_balance_3 = self.stable_token_wrapper.balance_of(
test_data.address3)
tx_hash = self.stable_token_wrapper.transfer_from(self.accounts[0], self.accounts[2], self.kit.w3.toWei(1, 'ether'))
time.sleep(5)
final_balance_3 = self.stable_token_wrapper.balance_of(
self.accounts[2])
self.assertEqual(final_balance_3, initial_balance_3 + self.kit.w3.toWei(1, 'ether'))
| 33.909091 | 124 | 0.678284 | 2,863 | 0.95945 | 0 | 0 | 522 | 0.174933 | 0 | 0 | 124 | 0.041555 |
876278e2ecaa859862d1b4df367b7a0946313f4c | 3,450 | py | Python | compipe/utils/parameters.py | ImagineersHub/pyspinner | 272c000af3bfaef866bf4ae6a4694e5449ceca68 | [
"MIT"
] | null | null | null | compipe/utils/parameters.py | ImagineersHub/pyspinner | 272c000af3bfaef866bf4ae6a4694e5449ceca68 | [
"MIT"
] | null | null | null | compipe/utils/parameters.py | ImagineersHub/pyspinner | 272c000af3bfaef866bf4ae6a4694e5449ceca68 | [
"MIT"
] | null | null | null | """Represent the command parameter strings."""
ARG_COMMAND = 'command'
ARG_ARGUMENT = 'argument'
ARG_COUNT = 'count'
ARG_CHANNEL = 'channel'
ARG_CLIENT = 'client'
ARG_GUID = 'guid'
ARG_CHANGELIST = 'cl'
ARG_DEV_CHANNEL = 'dev_channel'
ARG_QUEUE_WORKER_NUMBER = 'worker_number'
ARG_CALLBACK = 'callback'
ARG_PAYLOAD = 'payload'
ARG_MESSAGE = 'message'
ARG_USER = 'user'
ARG_ERROR = 'error'
ARG_PATH = 'path'
ARG_ISO = 'iso'
ARG_DESCRIPTION = 'description'
ARG_SOURCE = 'source'
ARG_TARGET = 'target'
ARG_RESPONSE = 'response'
ARG_COMMAND_ID = 'command_id'
ARG_MESSAGE_TYPE = 'msg_status'
ARG_MODE = 'mode'
ARG_FOLDER = 'folder'
ARG_VERBOSE = 'verbose'
ARG_FORCE = 'force'
ARG_REBUILD = 'rebuild'
ARG_SHELVE = 'shelve'
ARG_CHECKOUT = 'co'
ARG_LOOP = 'loop'
ARG_SAVE = 'save'
ARG_SIMILARITY = 'sim'
ARG_UPLOAD = 'upload'
ARG_METADATA = 'metadata'
ARG_CONFIG = 'config'
ARG_RESET = 'reset'
ARG_NAME = 'name'
ARG_DOC = 'doc'
ARG_INDEX = 'index'
ARG_CQL = 'cql'
ARG_HEADERS = 'headers'
ARG_FILES = 'files'
ARG_FILE = 'file'
ARG_TXT = 'txt'
ARG_OUTPUT = 'output'
ARG_CDP = 'cdp' # create document placeholder : doc_app
ARG_CTP = 'ctp'
ARG_VERSION = 'version'
ARG_SPACE = 'space'
ARG_PREBUILD = 'prebuild'
ARG_OUT_OF_SERVICE = 'oos' # out of service
ARG_CID = 'cid' # represent the flag checking CID build
ARG_DB_ALIAS = 'db_alias'
ARG_COMMIT = 'commit'
ARG_DUPLICATE = 'dup' # duplicate (flag)
ARG_RENAME = 'rename'
ARG_COPY = 'copy'
ARG_ACTION = 'action'
ARG_COLOR = 'color'
ARG_OPTIMIZE = 'opt'
ARG_SYNC = 'sync'
ARG_DATA = 'data'
ARG_DIR = 'dir'
ARG_PARENT = 'parent'
ARG_POPULATE = 'populate'
ARG_PROJECT = 'project'
ARG_CLASS = 'cls'
ARG_OBJ = 'object'
ARG_CACHE = 'cache'
ARG_RESOURCE = 'resource'
ARG_DEBUG = 'debug'
ARG_QUEUE_WORKER_NUM = 'queue_worker_num'
ARG_SUBPROCESS_NUM = 'subprocess_num'
ARG_CLASSIFICATION = 'classification'
ARG_TEAM = 'team'
ARG_CONSOLE = 'console'
ARG_FILTER = 'filter'
ARG_PUSH = 'push'
ARG_ORIGINAL = 'original'
ARG_ORIGIN = 'origin'
ARG_DIRECTION = 'direction'
ARG_MAIN = 'main'
ARG_TRANSFORM = 'transform'
ARG_AXES = 'axes'
ARG_PATTERN = 'pattern'
ARG_REVERSE = 'reverse'
ARG_MESH = 'mesh'
ARG_OFFSET = 'offset'
ARG_FACTOR = 'factor'
ARG_SCALE = 'scale'
ARG_DENOISING = 'denoising'
ARG_MODEL = 'model'
ARG_DNN_MODELS = 'dnn_models'
ARG_REFORMAT = 'reformat'
ARG_SCENE = 'scene'
ARG_MATERIAL = 'material'
ARG_SIZE = 'size'
ARG_TEXTURE = 'texture'
ARG_TYPE = 'type'
ARG_FACES = 'faces'
ARG_SMOOTH_LEVEL = 'smooth_level'
ARG_MAX_FACES = 'max_faces'
ARG_DOMINANT = 'dominant'
ARG_SHOW = "show"
ARG_PREVIEW = "preview"
ARG_OPACITY = "opacity"
ARG_PADDING = 'padding'
ARG_CROP = 'crop'
ARG_DECIMATE = 'decimate'
ARG_RESAMPLE = 'resample'
ARG_LIMITS = 'limits'
# space names
ARG_SPACE_SAVOIA = 'savoia'
ARG_SPACE_HYPER = 'hyper'
ARG_SPACE_MARS = 'mars'
# values
ARG_FRAMES = 'frames'
ARG_FRAME = 'frame'
# Mars Project
ARG_LOCAL_DRIVE = 'local_drive'
ARG_SEGMENT = 'segment'
ARG_THRESHOLD = 'threshold'
ARG_TAGS = 'tags'
ARG_SPACING = 'spacing'
ARG_MODALITY = 'modality'
ARG_WORLDSPACE_OFFSET = 'worldspace_offset'
ARG_WINDOW_WIDTH = 'ww'
ARG_WINDOW_LEVEL = 'wl'
# server config
ARG_EXECUTABLE_TOOLS = 'executable_tools'
ARG_PYTHON_MODULES = 'python_modules'
# Channel Texture Maps
ARG_AMBIENTOCCLUSION_MAP = 'aomap'
ARG_EMISSION_MAP = 'emmap'
ARG_CHANNEL_MAP = 'chmap'
ARG_CHANNEL_TRANS_MAP = 'tchmap'
ARG_DIFFUSE_MAP = 'dcmap'
def parse_bool(value):
return str(value).lower() in ("yes", "true", "t", "1")
| 23 | 58 | 0.73971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,342 | 0.388986 |
87639751a19ec144d88f1bcfcf5fc14664d50b35 | 3,412 | py | Python | python/avi/migrationtools/f5_converter/test/excel_reader.py | thisisshi/sdk | 99c52caffeebbfd41f43931fea2b5b1323841892 | [
"Apache-2.0"
] | 37 | 2016-03-14T22:27:17.000Z | 2022-03-03T05:18:39.000Z | python/avi/migrationtools/f5_converter/test/excel_reader.py | thisisshi/sdk | 99c52caffeebbfd41f43931fea2b5b1323841892 | [
"Apache-2.0"
] | 195 | 2016-03-14T23:47:55.000Z | 2021-05-12T11:28:56.000Z | python/avi/migrationtools/f5_converter/test/excel_reader.py | thisisshi/sdk | 99c52caffeebbfd41f43931fea2b5b1323841892 | [
"Apache-2.0"
] | 50 | 2016-03-14T05:52:14.000Z | 2022-01-06T06:12:00.000Z | import json
import pandas
def output_sanitization(path_to_excel, path_to_out_json=None):
''' Find the Success percentage of each output report '''
path = path_to_excel
out_obj = []
excel_obj = []
# Output Sanitization
wb = pandas.read_excel(path, engine='openpyxl')
cols = 0
for s in wb.sheets():
for col in range(s.ncols):
if s.cell(0, col).value == "F5 ID":
cols_id = col
if s.cell(0, col).value == "Status":
cols_status = col
if cols_id and cols_status:
for row in range(s.nrows):
if row == 0:
continue
if s.cell(row, cols_status).value == 'SUCCESSFUL' or\
s.cell(row, cols_status).value == 'PARTIAL':
if s.cell(row, cols_id ) == 'hash' or\
s.cell(row, cols_id) == 'oneconnect':
value = None
else:
value = s.cell(row, cols_id).value
if value:
excel_obj.append(value)
break
with open(path_to_out_json, 'r') as file_strem:
file_strem = json.load(file_strem)
for entity in file_strem:
if entity != 'META' and entity != 'VsVip':
# print file_strem
for obj in file_strem[entity]:
out_obj.append(obj.get('name'))
print(len(out_obj))
print(len(excel_obj))
excel_obj.sort()
out_obj.sort()
print("Object Common in Both Excel and Output ")
for obj in excel_obj:
if obj not in out_obj:
print(obj)
def percentage_success(path_to_excel):
# Percetage Success from Excel Reportss
# find the status colummn
path = path_to_excel
wb = pandas.read_excel(path, engine='openpyxl')
for s in wb.sheets():
for col in range(s.ncols):
if s.cell(0, col).value == "Status":
col_status_val = col
if s.cell(0, col).value == "F5 type" or\
s.cell(0, col).value == "Netscaler Command":
col_type_val = col
break
report_dict = dict()
for s in wb.sheets():
for row in range(s.nrows):
if row == 0:
continue
# taking col_type_val column for type and col_status_val for status
val, state = s.cell(row, col_type_val), s.cell(row, col_status_val)
state = state.value
val = val.value
fail = 1
suc = 0
if state == "PARTIAL" or state == "SUCCESSFUL":
fail = 0
suc = 1
if val not in report_dict:
report_dict.update({val: {'success': suc, 'fail': fail}})
else:
report_dict[val]['success'] += suc
report_dict[val]['fail'] += fail
break
for key in report_dict.keys():
if report_dict[key]['success'] + report_dict[key]['fail'] != 0:
percent = float(report_dict[key]['success'] * 100 /
(report_dict[key]['success'] + report_dict[key]['fail']))
report_dict[key].update({'percent': percent})
else:
report_dict[key].update({'percent': 100.0})
for key in report_dict.keys():
print(key, " -> ", report_dict[key]['percent'], "%")
| 35.541667 | 85 | 0.517878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.154455 |