blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a430b405c518f5492c4bfcf40ae484ae3432d216
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02618/s417415114.py
|
ddebb487f588173570c9610c70cadb46a063199e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
from sys import exit
import copy
#import numpy as np
#from collections import deque
d, = map(int, input().split())
c= list(map(int, input().split()))
s=[list(map(int, input().split())) for _ in range(d)]
# t=[int(input()) for _ in range(d)]
sche=[0 for _ in range(d)]
s_tmp=float("inf")*(-1)
for off in range(0,13):
last=[0 for _ in range(26)]
sche=[0 for _ in range(d)]
for day in range(1,d+1):
idx=day-1
d_tmp=float("inf")*(-1)
i_tmp=0
for t in range(26):
delta=0
l_tmp=copy.copy(last)
delta+=s[idx][t]
l_tmp[t]=day
for l in range(26):
delta-=0.5*(off+1)*c[l]*((day-l_tmp[l])+(day+off-l_tmp[l]))
if delta>=d_tmp:
d_tmp=delta
i_tmp=t
sche[idx]=i_tmp+1
# score+=d_tmp
last[i_tmp]=day
# print(score)
# print(i_tmp+1)
score=0
last=[0 for _ in range(26)]
for i in range(1,d+1):
idx=i-1
score+=s[idx][sche[idx]-1]
for l in range(26):
score-=c[l]*(i-last[l])
last[sche[idx]-1]=i
# print(score)
if score>=s_tmp:
s_tmp=score
sche_tmp=copy.copy(sche)
for i in sche_tmp:
print(i)
# print(s_tmp)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f1a84740d0a5c3bf1ba1441ba380dc64176cbe97
|
d7ad696cd1b550bb41d20f87b83c984ec7f19aa7
|
/practice/design_pattern/03_abstract_factory/abstract_factory.py
|
5fa712b16a1b0fb0cd9de79237fa18d370861894
|
[] |
no_license
|
mida-hub/hobby
|
2947d10da7964d945e63d57b549c1dcb90ef7305
|
6e6f381e59fc2b0429fab36474d867aa3855af77
|
refs/heads/master
| 2022-12-21T23:33:14.857931
| 2022-12-19T16:30:34
| 2022-12-19T16:30:34
| 147,890,434
| 0
| 0
| null | 2021-03-20T04:31:58
| 2018-09-08T01:31:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
# Abstract Factory
# abstract_factory.py
from abc import ABC, abstractmethod
class AbcItem(ABC):
def __init__(self, caption):
self.caption = caption
@abstractmethod
def make_html(self):
pass
class PageItem(AbcItem):
def __init__(self, title, author):
self.title = title
self.author = author
self.content = []
def add(self, item):
self.content.append(item)
def write_html(self, file_name):
with open(file_name, 'w', encoding='utf-8') as fh:
fh.write(self.make_html())
class LinkItem(AbcItem):
def __init__(self, caption, url):
super().__init__(caption)
self.url = url
class ListItem(AbcItem):
def __init__(self, caption):
super().__init__(caption)
self.items = []
def add(self, item):
self.items.append(item)
class Factory(ABC):
@abstractmethod
def create_page_item(self, title, author):
pass
@abstractmethod
def create_link_item(self, caption ,url):
pass
@abstractmethod
def create_list_item(self, caption):
pass
|
[
"rusuden0106@gmail.com"
] |
rusuden0106@gmail.com
|
bb48285834ee29beb7a898493b7d407dafdf7dd6
|
8c7a187ebfe858ff3f840602585d166b29fce576
|
/appstore/regulate_underscores.py
|
db0232fa39df3b96f78c3dc29fa2e15e90914bc1
|
[] |
no_license
|
ohannes/pythonScripts
|
b756faa2e6d5314cb04c7afc0ca07f69027f59b2
|
5249b2735d8b2a9a2c6ad8a1ae625cb47f50d0b5
|
refs/heads/master
| 2020-04-06T04:20:29.565042
| 2015-07-19T17:40:39
| 2015-07-19T17:40:39
| 34,119,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
import sys
sys.path.append(os.environ["ohannes"])
from ohannes import *
input_file = getStrArg(1, 1)
output_file = input_file + ".regulated"
lines = getFileLines(input_file)
ftw = open(output_file, write_mode)
for line in lines:
sharp_found = False
equal_found = False
line_regulated = False
if not "=>" in line or not "#" in line or not "_" in line:
ftw.write(line)
continue
index = 0
while True:
if index == len(line) - 1:
ftw.write(line[index])
break
if line[index] == "#":
sharp_found = True
if line[index] == "=" and line[index+1] == ">":
equal_found = True
if line[index] == "_" and (not sharp_found) and equal_found and (not line_regulated):
ftw.write(line[index+1].upper())
index += 1
line_regulated = True
else:
ftw.write(line[index])
index += 1
ftw.close()
|
[
"yasinyildiza@gmail.com"
] |
yasinyildiza@gmail.com
|
2e77842e863422f2ffdaefdc8d6d8126892ba1d3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03347/s144374882.py
|
8ce3352dfe431d952e676130950485ebdc55dc2e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
import sys,queue,math,copy,itertools,bisect,collections,heapq
def main():
sys.setrecursionlimit(10**7)
INF = 10**18
MOD = 10**9 + 7
LI = lambda : [int(x) for x in sys.stdin.readline().split()]
NI = lambda : int(sys.stdin.readline())
SI = lambda : sys.stdin.readline().rstrip()
N = NI()
A = [NI() for _ in range(N)]
ans = 0
cnt = 0
for i in range(N-1,-1,-1):
if cnt == 0:
ans += A[i]
cnt = A[i]
elif A[i] < cnt -1:
print(-1)
return
elif A[i] >= cnt:
ans += A[i]
cnt = A[i]
else:
cnt -= 1
if cnt > 0:
print(-1)
else:
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7098f4dd04eee66744539b132a94f353fab0fbdd
|
3373b2bbe6303dcee3ae7f7f3e715ce674878a7b
|
/packages/hyperk/wcsim_dev.py
|
7187f72a10ce70577639daba6aa414e563e94670
|
[
"MIT"
] |
permissive
|
pgjones/nusoft
|
f3515a6e2fc90622638cde0b8712ba6fcea2aa8e
|
442c7bca2f921892ecf9eb3ff6821e2a9da7b156
|
refs/heads/master
| 2020-09-12T21:44:54.453633
| 2014-10-03T20:22:09
| 2014-10-03T20:22:09
| 17,223,474
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,401
|
py
|
#!/usr/bin/env python
#
# WCSimDev
#
# The HyperK WCSim development version
#
# Author P G Jones - 2014-06-20 <p.g.jones@qmul.ac.uk> : New file.
####################################################################################################
import nusoft.package.local as local_package
import os
import nusoft.envfile
class WCSimDev(local_package.LocalPackage):
""" The WCSimDev installation package.
:param _root: version of ROOT this is dependent on
:param _geant4: version of Geant4 this is dependent on
"""
def __init__(self, system, repository):
""" Initialise this wcsim installation package.
:param system: class that manages system commands
:type system: :class:`nusoft.system.System` instance
:param repository: local name of the repository the package is from
"""
super(WCSimDev, self).__init__("wcsim-dev", system, repository)
self._root = "root_v5.34.10"
self._geant4 = "geant4.9.4.p04"
self._clhep = "clhep-2.1.0.1"
def get_dependencies(self):
""" Return a list of dependency names
:returns: list of dependency package names
:rtype: list
"""
return ["make", "g++", "gcc", "ld", "python", "python-dev", self._root, self._geant4,
self._clhep]
def _download(self):
""" Git clone the wcsim repository file."""
self._system.git_clone("ssh://git@poset.ph.qmul.ac.uk/hk-WCSim", self.get_install_path())
def _install(self):
""" Write an environment file and install wcsim."""
# Now write the environment file
self.write_env_file()
commands = ["source " + os.path.join(self._system.get_install_path(), "env_wcsim-dev.sh"),
"cd " + self.get_install_path(),
"make rootcint",
"make "]
self._system.execute_commands(commands)
def write_env_file(self):
""" Write an environment file for this package."""
env_file = nusoft.envfile.EnvFile("#wcsim environment\n")
env_file.add_source(os.path.join(self._dependencies[self._root].get_install_path(), "bin"), "thisroot")
env_file.add_source(os.path.join(self._dependencies[self._geant4].get_install_path(),
"share/geant4-9.4.4/config"),
"geant4-9.4.4")
env_file.add_environment("CLHEP_BASE_DIR", self._dependencies[self._clhep].get_install_path())
env_file.add_environment("G4WORKDIR", os.path.join(self.get_install_path(), "exe"))
env_file.write(self._system.get_install_path(), "env_wcsim-dev")
def _update(self):
""" Update the git repository."""
if not self._system.git_update(self.get_install_path()):
raise Exception("Cannot update, repository has changes")
self._install() # Now reinstall (compile)
def _remove(self):
""" Remove the install directory."""
self._system.remove(self.get_install_path())
def _is_installed(self):
""" Check if root is installed by looking for the root executable in the bin directory.
:return: True if installed
"""
sys = os.uname()[0]
return False
# The versions of WCSimDev that can be installed (only one, WCSimDev)
# [Although potentially more if the user wants].
versions = [WCSimDev]
|
[
"p.g.jones@qmul.ac.uk"
] |
p.g.jones@qmul.ac.uk
|
3ae2079875387f561dad5fbc4ea251ed85ed9d12
|
fcef3602a044a82b75eb1bdee87a5eb347a56769
|
/recolo/tests/test_coordinate_solver.py
|
d18af8c84528da0a59395aaf2880b71ea511ddb3
|
[
"MIT"
] |
permissive
|
PolymerGuy/recolo
|
5cb9c6b01d7eeb4108710606341518aa13efc1d1
|
05b14f0834fa675579eabdf43fac046259df19bb
|
refs/heads/master
| 2023-04-12T00:17:50.150126
| 2022-03-11T12:42:44
| 2022-03-11T12:42:44
| 343,329,602
| 4
| 1
|
MIT
| 2022-03-05T08:04:49
| 2021-03-01T07:39:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,877
|
py
|
from unittest import TestCase
from recolo.artificial_grid_deformation import find_coords_in_undef_conf, interpolated_disp_field
import numpy as np
def rms_diff(array1, array2):
return np.sqrt(np.nanmean((array1 - array2) ** 2.))
def biharmonic_disp_field(x, y, amp_scale=0.5):
return (amp_scale * 0.4 * np.cos(np.pi * x / 30) + amp_scale * 0.5 * np.sin(np.pi * y / 40)), (
amp_scale * 0.6 * np.cos(np.pi * x / 50) + amp_scale * 0.7 * np.sin(np.pi * y / 60))
class TestFindCoordinatesInUndefConf(TestCase):
# As X is needed for other calculations, check that we can determine X from x = X + u(X)
def test_analytical_disp_field(self):
tol = 1e-5
dx = 3.5
dy = 2.7
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
Xs, Ys = find_coords_in_undef_conf(xs, ys, biharmonic_disp_field, tol=1e-9)
u_X, u_Y = biharmonic_disp_field(Xs, Ys)
errors_x = xs - Xs - u_X
errors_y = ys - Ys - u_Y
peak_error_x = np.max(np.abs(errors_x))
peak_error_y = np.max(np.abs(errors_y))
if peak_error_x > tol or peak_error_y > tol:
self.fail("Maximum error is %f and %f" % (peak_error_x, peak_error_y))
def test_interpolated_disp_field(self):
tol = 1e-5
dx = 3.5
dy = 2.7
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
# Make an approximated displacement field
u_x, u_y = biharmonic_disp_field(xs, ys)
disp_func_interp = interpolated_disp_field(u_x, u_y, dx=2, dy=4, order=3)
X, Y = find_coords_in_undef_conf(xs, ys, disp_func_interp, tol=1e-9)
u_X, u_Y = disp_func_interp(X, Y)
errors_x = xs - X - u_X
errors_y = ys - Y - u_Y
peak_error_x = np.max(np.abs(errors_x))
peak_error_y = np.max(np.abs(errors_y))
if peak_error_x > tol or peak_error_y > tol:
self.fail("Maximum error is %f and %f" % (peak_error_x, peak_error_y))
def test_compare_interpolated_and_analytical(self):
# As there will always be minor error at the edges, we look at the mean error for the whole field
tol = 1.e-3
dx = 3.5
dy = 2.7
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
# Make an approximated displacement field0
u_x, u_y = biharmonic_disp_field(xs, ys)
disp_func_interp = interpolated_disp_field(u_x, u_y, dx=dx, dy=dy, order=3, mode="nearest")
X_interp, Y_interp = find_coords_in_undef_conf(xs, ys, disp_func_interp, tol=1e-9)
X, Y = find_coords_in_undef_conf(xs, ys, biharmonic_disp_field, tol=1e-9)
rms_diff_X = rms_diff(X_interp, X)
rms_diff_Y = rms_diff(Y_interp, Y)
if rms_diff_X > tol or rms_diff_Y > tol:
self.fail("RMS error is %f and %f" % (rms_diff_X, rms_diff_Y))
def test_check_grid_sampling_independency(self):
# Ensure that the sampling of u_x and u_y does not have a large impact on the final results
tol = 1.e-3
dxs = [0.1,0.5,1.0,3.2]
for i,dx in enumerate(dxs):
dy = dx + 0.12
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
# Make an approximated displacement field0
u_x, u_y = biharmonic_disp_field(xs, ys)
disp_func_interp = interpolated_disp_field(u_x, u_y, dx=dx, dy=dy, order=3, mode="nearest")
X_interp, Y_interp = find_coords_in_undef_conf(xs, ys, disp_func_interp, tol=1e-9)
X, Y = find_coords_in_undef_conf(xs, ys, biharmonic_disp_field, tol=1e-9)
rms_diff_X = rms_diff(X_interp, X)
rms_diff_Y = rms_diff(Y_interp, Y)
if rms_diff_X > tol or rms_diff_Y > tol:
self.fail("RMS error is %f and %f for dx=%f and dy=%f" % (rms_diff_X, rms_diff_Y,dx,dy))
|
[
"sindre.n.olufsen@ntnu.no"
] |
sindre.n.olufsen@ntnu.no
|
138d7251e99fd5b8de87425401cfefea55cd6357
|
84065ee4fb4ebeb8cb2cf1d3f6f385d2c56d787e
|
/page/__init__.py
|
359e38e1661042b3715145fd8b364217bb2881c4
|
[] |
no_license
|
bian-py/app_kefu_code
|
59ed0bcf247e5dd7b06e0f91cdd9563faa49ce60
|
2f84a152bdc2c226f2bcb6aabc34f0a5313c094e
|
refs/heads/master
| 2023-01-28T11:17:40.984458
| 2020-12-08T11:07:50
| 2020-12-08T11:07:50
| 319,289,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,863
|
py
|
from selenium.webdriver.common.by import By
# 以下是服务器页面配置信息
fwq_new = By.XPATH, '//*[contains(@content-desc,"添加新的服务器")]'
fwq_hand_input = By.XPATH, '//*[contains(@content-desc,"手工输入")]'
fwq_scan_code = By.XPATH, '//*[contains(@content-desc,"扫码二维码")]'
fwq_input_name = By.XPATH, """//android.view.View[@content-desc="{{ 'server.name' | trans }}"]/../../android.widget.EditText"""
fwq_input_URL = By.XPATH, """//android.view.View[@content-desc="{{ 'm.api.url' | trans }}"]/../../android.widget.EditText"""
fwq_save_btn = By.XPATH, '//*[contains(@content-desc,"保存")]'
fwq_confirm = By.XPATH, '//*[contains(@content-desc,"{}")]'
fwq_url_error = By.XPATH, "//*[@content-desc = '无法连接到API']"
fwq_swipe_area = By.XPATH, "//android.view.View[@scrollable = 'true']"
fwq_back_btn = By.XPATH, "//*[@content-desc = '编辑服务器']/../android.widget.Button"
fwq_modify_btn = By.XPATH, '//*[contains(@content-desc,"我的服务器 http://192.168.1.10/kefu/php/app.php?mobile-api")]' \
'/../android.view.View[2]/android.view.View[1]/android.widget.Button'
fwq_delete_btn = By.XPATH, '//*[contains(@content-desc,"我的服务器 http://192.168.1.10/kefu/php/app.php?mobile-api")]' \
'/../android.view.View[2]/android.view.View[2]/android.widget.Button'
fwq_delete_confirm_btn = By.XPATH, '//*[@content-desc="删除 "]'
# 以下是登录页面配置信息
login_username = By.XPATH, '//android.view.View[@content-desc="登陆"]/../../android.widget.EditText'
login_password = By.XPATH, '//android.view.View[@content-desc="密码"]/../../android.widget.EditText'
login_confirm_btn = By.XPATH, '//android.widget.Button[@content-desc="登陆 "]'
login_cancel_btn = By.XPATH, '//android.widget.Button[@content-desc="取消 "]'
login_if_success = By.XPATH, '//android.view.View[@content-desc="我的服务器"]/../android.widget.Button'
login_logout = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]'
login_error_confirm = By.XPATH, '//android.widget.Button[@content-desc="OK "]'
login_error_info = By.XPATH, '//android.widget.Button[@content-desc="OK "]/../android.view.View[2]'
# 以下是用户列表页面配置信息
def get_user_self_element(username):
loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(username)
return loc
user_details_page = By.XPATH, '//android.view.View[@content-desc="用户详细信息"]'
user_details_page_back_btn = By.XPATH, '//android.view.View[@content-desc="用户详细信息"]/../android.widget.Button'
user_details_send_btn = By.XPATH, '//android.widget.Button[contains(@content-desc,"发送消息 ")]'
user_conversation_page = By.XPATH, '//android.view.View[@content-desc="会话"]'
user_conversation_page_back_btn = By.XPATH, '//android.view.View[@content-desc="会话"]/../android.widget.Button'
user_bottom_btn_talk_list = By.XPATH, '//android.view.View[contains(@content-desc,"会话 会话")]/android.view.View/android.view.View'
user_bottom_btn_user_list = By.XPATH, '//android.view.View[contains(@content-desc,"在线用户 在线用户")]/android.view.View/android.view.View'
user_talk_input = By.CLASS_NAME, 'android.widget.EditText'
user_talk_input_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[3]'
# 以下是导航栏配置信息
dhl_menu = By.XPATH, '//android.view.View[@content-desc="我的服务器"]/../android.widget.Button'
dhl_logout = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]'
dhl_user = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[1]'
dhl_talk = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[2]'
dhl_history = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[3]'
dhl_view = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[4]'
dhl_if_user = By.XPATH, '//android.view.View[@content-desc=" 匿名用户"]'
dhl_if_history = By.XPATH, '//android.widget.Button[contains(@content-desc,"搜索 ")]'
dhl_if_view = 'org.chromium.webview_shell'
dhl_if_view_for_android_6 = 'com.android.browser'
dhl_if_logout = By.XPATH, '//*[contains(@content-desc,"添加新的服务器")]'
dhl_back_from_talk = By.XPATH, '//android.view.View[contains(@content-desc,"在线用户 在线用户")]/android.view.View/android.view.View'
# 以下是会话页面配置信息
def get_talk_list_element(username):
loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(username)
return loc
def search_history_msg(msg):
loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(msg)
return loc
talk_bottom_btn = By.XPATH, '//android.view.View[contains(@content-desc,"会话 会话")]/android.view.View/android.view.View'
talk_back_to_list = By.XPATH, '//android.view.View[@content-desc="会话"]/../android.widget.Button'
talk_input = By.CLASS_NAME, 'android.widget.EditText'
talk_input_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[3]'
talk_emoji_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[2]'
talk_menu_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[1]'
talk_attachment_btn = By.XPATH, '//android.widget.EditText/../../android.view.View[2]/android.view.View[1]'
talk_attachment_for_6_arth = By.ID,'com.android.packageinstaller:id/permission_allow_button'
talk_attachment_enter = By.XPATH, '//android.widget.TextView[contains(@text,"文")]'
talk_attachment_file_menu = By.XPATH, '//android.widget.ImageButton[@content-desc="显示根目录"]'
talk_attachment_download = By.XPATH, "//android.widget.TextView[@text = '下载']"
talk_attachment = By.XPATH, "//android.widget.TextView[@text = 'timg.png']"
talk_attachment_if = By.XPATH, '//android.view.View[@content-desc="timg.png"]'
talk_emoji_select = By.XPATH, '//android.view.View[@content-desc="emot-3"]'
talk_emoji_if = By.XPATH, '//android.widget.Image[@content-desc="emot-3"]'
talk_menu_invite_user = By.XPATH, '//android.view.View[contains(@content-desc,"邀请会话")]'
talk_invite_user = By.XPATH, '//android.view.View[@content-desc="test05"]'
talk_invite_user2 = By.XPATH, '//android.view.View[@content-desc="test04"]'
talk_invite_if = By.XPATH, '//android.view.View[@content-desc=") 已被邀请参加会谈"]'
talk_menu_exit = By.XPATH, '//android.view.View[contains(@content-desc,"离开会话")]'
talk_menu_cancel = By.XPATH, '//android.widget.Button[@content-desc="取消 "]'
# 以下是历史记录页面配置信息
history_enter = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[3]'
history_username_input = By.XPATH, '''//android.view.View[@content-desc="{{ 'user.name' | trans }}"]/../../android.widget.EditText'''
history_email_input = By.XPATH, '''//android.view.View[@content-desc="{{ 'user.email' | trans }}"]/../../android.widget.EditText'''
history_search_btn = By.XPATH, '//android.widget.Button[contains(@content-desc,"搜索 ")]'
history_username_if_success = By.XPATH, '//android.view.View[@content-desc="test04, test05"]'
history_email_if_success = By.XPATH, '//android.view.View[@content-desc="test04, test03"]'
history_date_start_btn = By.XPATH, '''//android.widget.Spinner[@content-desc="{{ 'from.date' | trans }} "]'''
history_date_end_btn = By.XPATH, '''//android.widget.Spinner[@content-desc="{{ 'to.date' | trans }} "]'''
history_data_start = By.XPATH, '//android.view.View[@content-desc="06 十二月 2020"]'
history_data_end = By.XPATH, '//android.view.View[@content-desc="07 十二月 2020"]'
history_date_set_btn = By.ID, 'android:id/button1'
history_check_if1 = By.XPATH, '//android.view.View[@content-desc="历史会话"]'
history_check_if2 = By.XPATH, '//android.view.View[@content-desc="这是test03发给test04的历史信息"]'
|
[
"334783747@qq.com"
] |
334783747@qq.com
|
e3ba9166ff9f094c8ede9e3c3756bb8117241c50
|
3cae667175b2d6aac6d7f3d8189e9a02c38ea1cf
|
/AOJ/ITP1/python/ITP1_1_D_Watch.py
|
c0a07556a3ffec6f81a94127a026e1a802c5a520
|
[] |
no_license
|
kokorinosoba/contests
|
3ee14acf729eda872ebec9ec7fe3431f50ae23c2
|
6e0dcd7c8ee086650d89fc65616981361b9b20b9
|
refs/heads/master
| 2022-08-04T13:45:29.722075
| 2022-07-24T08:50:11
| 2022-07-24T08:50:11
| 149,092,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
s=int(input())
print(s//3600,s//60%60,s%60,sep=':')
"""
S=int(input())
m,s=divmod(S,60)
h,m=divmod(m,60)
print(h,m,s,sep=":")
"""
|
[
"34607448+kokorinosoba@users.noreply.github.com"
] |
34607448+kokorinosoba@users.noreply.github.com
|
7765cc67a607b9556d7c75470b892c02b3fe5707
|
f208676788a901f4b66fa0a5809ef5563c1d5471
|
/classy_vision/hooks/classy_hook.py
|
ad5c0a900f8643ca8ed1f247fd4a4e113ac37853
|
[
"MIT"
] |
permissive
|
cwb96/ClassyVision
|
10e47703ec3989260840efe22db94720122f9e66
|
597a929b820efdd914cd21672d3947fa9c26d55e
|
refs/heads/master
| 2021-02-18T03:35:51.520837
| 2020-03-05T05:41:24
| 2020-03-05T05:43:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Dict
from classy_vision import tasks
class ClassyHookState:
"""Class to store state within instances of ClassyHook.
Any serializable data can be stored in the instance's attributes.
"""
def get_classy_state(self) -> Dict[str, Any]:
return self.__dict__
def set_classy_state(self, state_dict: Dict[str, Any]):
self.__dict__ = state_dict
class ClassyHook(ABC):
"""Base class for hooks.
Hooks allow to inject behavior at different places of the training loop, which
are listed below in the chronological order.
on_start -> on_phase_start ->
on_step -> on_phase_end -> on_end
Deriving classes should call ``super().__init__()`` and store any state in
``self.state``. Any state added to this property should be serializable.
E.g. -
.. code-block:: python
class MyHook(ClassyHook):
def __init__(self, a, b):
super().__init__()
self.state.a = [1,2,3]
self.state.b = "my_hook"
# the following line is not allowed
# self.state.my_lambda = lambda x: x^2
"""
def __init__(self):
self.state = ClassyHookState()
def _noop(self, *args, **kwargs) -> None:
"""Derived classes can set their hook functions to this.
This is useful if they want those hook functions to not do anything.
"""
pass
@classmethod
def name(cls) -> str:
"""Returns the name of the class."""
return cls.__name__
@abstractmethod
def on_start(self, task: "tasks.ClassyTask") -> None:
"""Called at the start of training."""
pass
@abstractmethod
def on_phase_start(
self, task: "tasks.ClassyTask", local_variables: Dict[str, Any]
) -> None:
"""Called at the start of each phase."""
pass
@abstractmethod
def on_step(self, task: "tasks.ClassyTask") -> None:
"""Called each time after parameters have been updated by the optimizer."""
pass
@abstractmethod
def on_phase_end(
self, task: "tasks.ClassyTask", local_variables: Dict[str, Any]
) -> None:
"""Called at the end of each phase (epoch)."""
pass
@abstractmethod
def on_end(self, task: "tasks.ClassyTask") -> None:
"""Called at the end of training."""
pass
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyHook.
The returned state is used for checkpointing.
Returns:
A state dictionary containing the state of the hook.\
"""
return self.state.get_classy_state()
def set_classy_state(self, state_dict: Dict[str, Any]) -> None:
"""Set the state of the ClassyHook.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the hook from a checkpoint.
"""
self.state.set_classy_state(state_dict)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
942d5f383fb074463bde66060a1faedb97568626
|
1033c93917117f462771571c29dd046954582bd8
|
/revscores/features/proportion_of_symbolic_added.py
|
2eeae56295eca238e2c206c786853e46201b8d7b
|
[
"MIT"
] |
permissive
|
jonasagx/Revision-Scoring
|
d4e3e892ac5de3a7f3032ef2b4fcc7b6efb20330
|
dfacba014e30d49577aa1a56aab13393ecede9d5
|
refs/heads/master
| 2021-01-17T11:57:39.393734
| 2015-01-10T19:13:02
| 2015-01-10T19:13:02
| 29,064,762
| 0
| 1
| null | 2015-01-10T19:13:03
| 2015-01-10T17:25:22
|
Python
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
from .chars_added import chars_added
from .feature import Feature
from .symbolic_chars_added import symbolic_chars_added
def process(chars_added, symbolic_chars_added):
return symbolic_chars_added/(chars_added or 1)
proportion_of_symbolic_added = Feature("proportion_of_symbolic_added", process,
returns=float,
depends_on=[chars_added,
symbolic_chars_added])
|
[
"aaron.halfaker@gmail.com"
] |
aaron.halfaker@gmail.com
|
55095ee0ea77fe40bd4ed68f53cd486d3d782b2d
|
fb235cccecab5368074bc43ed8677025f925dceb
|
/notebooks/westgrid/cffi_practice/__init__.py
|
6a5ba61abdb1177997fc7a77bffbd803fbab65cb
|
[] |
no_license
|
sbowman-mitre/parallel_python_course
|
88a5f767de2f0f630d48faf94983fad51ecbe50f
|
85b03809c9725c38df85b0ac1e9b34cc50c0dc54
|
refs/heads/master
| 2022-01-04T18:29:12.443568
| 2019-11-29T16:08:06
| 2019-11-29T16:08:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
# import version for use by setup.py
from ._version import version_info, __version__ # noqa: F401 imported but unused
from pathlib import Path
import pdb
import os
import pdb
def get_paths(*args, **kwargs):
binpath=Path(os.environ['CONDA_PREFIX'])
<<<<<<< HEAD
libfile= binpath / Path('lib/libcffi_funs.so')
libdir= binpath / Path('lib')
pdb.set_trace()
#
# find either libcffi_funs.so or libcffi_funs.dll
#
library=list(libdir.glob('libcffi_funs.*'))
if len(library) > 1:
raise ImportError('found more than one libcffi_funs library')
try:
libfile=library[0]
except IndexError:
libfile=Path('libcffi_funs')
includedir=Path.joinpath(binpath.parent,Path('include'))
for the_path in [libfile, libdir, includedir]:
if not the_path.exists():
print(f"couldn't find {str(the_path)}. Did you install cffi_funs?")
out_dict=None
break
else:
out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir))
=======
libfile= binpath/ Path('lib/libcffi_funs.so')
libdir= binpath / Path('lib')
includedir = binpath / Path('include')
out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir))
>>>>>>> checkpoint
return out_dict
|
[
"paustin@eos.ubc.ca"
] |
paustin@eos.ubc.ca
|
4668b524700dbf55e3711938e6cfd959affaa864
|
57ddfddd1e11db649536a8ed6e19bf5312d82d71
|
/AtCoder/ABC1/ABC123/ABC123-A.py
|
04402036b76e6ab088ca47d8dcc146c57c639e4d
|
[] |
no_license
|
pgDora56/ProgrammingContest
|
f9e7f4bb77714dc5088c2287e641c0aa760d0f04
|
fdf1ac5d1ad655c73208d98712110a3896b1683d
|
refs/heads/master
| 2023-08-11T12:10:40.750151
| 2021-09-23T11:13:27
| 2021-09-23T11:13:27
| 139,927,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
sm = float('inf')
bi = - float('inf')
for _ in range(5):
v = int(input())
if v < sm: sm = v
if v > bi: bi = v
if bi - sm > int(input()): print(':(')
else: print('Yay!')
|
[
"doradora.prog@gmail.com"
] |
doradora.prog@gmail.com
|
8c36fc26a272f071d2585e8f26ae41f860d794bf
|
85381529f7a09d11b2e2491671c2d5e965467ac6
|
/OJ/Leetcode/Algorithm/54. Spiral Matrix.py
|
877d512e72cd9a17631f7f49ff7225fae0269c52
|
[] |
no_license
|
Mr-Phoebe/ACM-ICPC
|
862a06666d9db622a8eded7607be5eec1b1a4055
|
baf6b1b7ce3ad1592208377a13f8153a8b942e91
|
refs/heads/master
| 2023-04-07T03:46:03.631407
| 2023-03-19T03:41:05
| 2023-03-19T03:41:05
| 46,262,661
| 19
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
# -*- coding: utf-8 -*-
# @Author: HaonanWu
# @Date: 2017-03-03 10:57:26
# @Last Modified by: HaonanWu
# @Last Modified time: 2017-03-03 11:01:34
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
ret = []
while matrix:
ret += matrix.pop(0)
if matrix and matrix[0]:
for row in matrix:
ret.append(row.pop())
if matrix:
ret += matrix.pop()[::-1]
if matrix and matrix[0]:
for row in matrix[::-1]:
ret.append(row.pop(0))
return ret
|
[
"whn289467822@outlook.com"
] |
whn289467822@outlook.com
|
143a773bbbec049d6b12a6406b50a9fce3cdd585
|
26dec2f8f87a187119336b09d90182d532e9add8
|
/mcod/resources/documents.py
|
da3e92fb9c8f6d9a843336fb6541b7e1b3f9d460
|
[] |
no_license
|
olekstomek/mcod-backend-dane.gov.pl
|
7008bcd2dbd0dbada7fe535536b02cf27f3fe4fd
|
090dbf82c57633de9d53530f0c93dddf6b43a23b
|
refs/heads/source-with-hitory-from-gitlab
| 2022-09-14T08:09:45.213971
| 2019-05-31T06:22:11
| 2019-05-31T06:22:11
| 242,246,709
| 0
| 1
| null | 2020-02-24T22:39:26
| 2020-02-21T23:11:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,197
|
py
|
from django.apps import apps
from django_elasticsearch_dsl import DocType, Index, fields
from mcod import settings
from mcod.lib.search.fields import TranslatedTextField
Resource = apps.get_model('resources', 'Resource')
Dataset = apps.get_model('datasets', 'Dataset')
TaskResult = apps.get_model('django_celery_results', "TaskResult")
INDEX = Index(settings.ELASTICSEARCH_INDEX_NAMES['resources'])
INDEX.settings(**settings.ELASTICSEARCH_DSL_INDEX_SETTINGS)
data_schema = fields.NestedField(attr='schema', properties={
'fields': fields.NestedField(properties={
'name': fields.KeywordField(attr='name'),
'type': fields.KeywordField(attr='type'),
'format': fields.KeywordField(attr='format')
}),
'missingValue': fields.KeywordField(attr='missingValue')
})
@INDEX.doc_type
class ResourceDoc(DocType):
id = fields.IntegerField()
slug = fields.TextField()
uuid = fields.TextField()
title = TranslatedTextField('title', common_params={'suggest': fields.CompletionField()})
description = TranslatedTextField('description')
file_url = fields.TextField(
attr='file_url'
)
download_url = fields.TextField(
attr='download_url'
)
link = fields.TextField()
format = fields.KeywordField()
file_size = fields.LongField()
type = fields.KeywordField()
openness_score = fields.IntegerField()
dataset = fields.NestedField(properties={
'id': fields.IntegerField(),
'title': TranslatedTextField('title'),
'slug': TranslatedTextField('slug')
})
views_count = fields.IntegerField()
downloads_count = fields.IntegerField()
status = fields.TextField()
modified = fields.DateField()
created = fields.DateField()
verified = fields.DateField()
data_date = fields.DateField()
class Meta:
doc_type = 'resource'
model = Resource
related_models = [Dataset, ]
def get_instances_from_related(self, related_instance):
if isinstance(related_instance, Dataset):
return related_instance.resources.all()
def get_queryset(self):
return self._doc_type.model.objects.filter(status='published')
|
[
"piotr.zientarski@britenet.com.pl"
] |
piotr.zientarski@britenet.com.pl
|
220274ef4a9b4c4918eadc9760519ac1b39963d8
|
3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7
|
/mounth001/day21/exercise03.py
|
fe9a7a38bb1bfcf3fe7454d21909dc564595ee5d
|
[] |
no_license
|
Molly-l/66
|
4bfe2f93e726d3cc059222c93a2bb3460b21ad78
|
fae24a968f590060522d30f1b278fcfcdab8b36f
|
refs/heads/master
| 2020-09-28T12:50:18.590794
| 2019-11-27T04:42:28
| 2019-11-27T04:42:28
| 226,782,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
"""
lstack.py 栈的链式结构
重点代码
思路:
1. 源于节点存储数据,建立节点关联
2. 封装方法 入栈 出栈 栈空 栈顶元素
3. 链表的开头作为栈顶(不需要每次遍历)
"""
# 自定义异常
class StackError(Exception):
pass
# 创建节点类
class Node:
def __init__(self,val,next=None):
self.val = val # 有用数据
self.next = next # 节点关系
# 链式栈
class LStack:
def __init__(self):
# 标记顶位置
self._top = None
def is_empty(self):
return self._top is None
def push(self,val):
node=Node(val)
node.next=self._top
self._top=node
def pop(self):
temp=self._top.val
self.top=self.top.next
return temp
|
[
"769358744@qq.com"
] |
769358744@qq.com
|
7046f96277b3a24fa4c120d9e42ebb229ccaad4a
|
fe7763e194be94c402482619c0111fcaca1ef7f6
|
/tutorial/snippets/permissions.py
|
a42b29204436ae53823a6a8aff8bf895527515ec
|
[
"MIT"
] |
permissive
|
antoniocarlosortiz/django-rest-framework-sample
|
1fc8b11af2aa1cacfbbc2c3363e097262eec7aee
|
45ff0213b4a74566c8571c498c67adf66b420d3e
|
refs/heads/master
| 2021-01-01T05:18:51.457373
| 2016-04-23T18:28:12
| 2016-04-23T18:28:12
| 56,934,397
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request.
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
|
[
"ortizantoniocarlos@gmail.com"
] |
ortizantoniocarlos@gmail.com
|
c412835e863548366c31fa22434e45e614059113
|
56278a6e508ce1a282270f90f1cd9984edd14965
|
/tests/test_validation/_test_utils.py
|
ae430d81167f643c218fc773e99d0fc4cf3c2974
|
[
"MIT"
] |
permissive
|
gc-ss/py-gql
|
3d5707938e503dc26addc6340be330c1aeb2aa76
|
5a2d180537218e1c30c65b2a933fb4fe197785ae
|
refs/heads/master
| 2023-04-10T05:21:24.086980
| 2020-04-01T14:18:20
| 2020-04-01T14:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
# -*- coding: utf-8 -*-
from py_gql._string_utils import dedent
from py_gql.lang import parse
from py_gql.validation import validate_ast
from py_gql.validation.validate import SPECIFIED_RULES, default_validator
def _ensure_list(value):
if isinstance(value, list):
return value
else:
return [value]
def assert_validation_result(
schema, source, expected_msgs=None, expected_locs=None, checkers=None
):
# Prints are here so we can more easily debug when running pytest with -v
expected_msgs = expected_msgs or []
expected_locs = expected_locs or []
print(source)
result = validate_ast(
schema,
parse(dedent(source), allow_type_system=True),
validators=[
lambda s, d, v: default_validator(
s, d, v, validators=(checkers or SPECIFIED_RULES)
)
],
)
errors = result.errors
msgs = [str(err) for err in errors]
locs = [[node.loc for node in err.nodes] for err in errors]
print(" [msgs] ", msgs)
print(" [locs] ", locs)
assert msgs == expected_msgs
if expected_locs:
assert locs == [_ensure_list(l) for l in expected_locs]
def assert_checker_validation_result(
checker, schema, source, expected_msgs=None, expected_locs=None
):
assert_validation_result(
schema,
source,
expected_msgs=expected_msgs,
expected_locs=expected_locs,
checkers=[checker],
)
|
[
"c.lirsac@gmail.com"
] |
c.lirsac@gmail.com
|
c2191030e2543c62287b31ad7e253f8767252f1c
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/enums/types/feed_item_quality_approval_status.py
|
2b7fc3c81f16e8f0168b1a99e3484c10977c937b
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"FeedItemQualityApprovalStatusEnum",},
)
class FeedItemQualityApprovalStatusEnum(proto.Message):
r"""Container for enum describing possible quality evaluation
approval statuses of a feed item.
"""
class FeedItemQualityApprovalStatus(proto.Enum):
r"""The possible quality evaluation approval statuses of a feed
item.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
a30f1f5184e240fdb168d288874791f7260c7029
|
cdbb11473dc8d34767a5916f9f85cb68eb2ca3f2
|
/core/helpers.py
|
a9cf1b2ad8c669f8aac1b940187d7a46adde3660
|
[] |
no_license
|
skyride/evestats
|
fb2a1a248952771731dcfecadab7d02b1f08cd4b
|
4bd2153f65c084b478272513733dcc78f9a0ef98
|
refs/heads/master
| 2020-03-23T13:50:19.216870
| 2018-08-05T19:19:47
| 2018-08-05T19:19:47
| 141,640,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
from sde.models import Type
def generate_breadcrumb_trail(marketgroup):
def recurse(node):
"""Return an list containing the path to this trail"""
if isinstance(node, dict):
return []
elif isinstance(node, Type):
return [*recurse(node.market_group), node]
elif node.parent is None:
return [node]
else:
return [*recurse(node.parent), node]
return [
{
"name": "Market",
"root": True
},
*recurse(marketgroup)
]
|
[
"adam.findlay@mercurytide.co.uk"
] |
adam.findlay@mercurytide.co.uk
|
fa3e65432481dc50669a709c3740fc9753628e14
|
8f0524fc0171e27a15f4cf5fb3fe48ef2053b40e
|
/leetcode/DP/edit_distance_formula.py
|
e9141de529dbc4bde7fdefe5cc4713fae1837147
|
[] |
no_license
|
MohammedAlewi/competitive-programming
|
51514fa04ba03d14f8e00031ee413d6d74df971f
|
960da78bfa956cb1cf79a0cd19553af97a2aa0f3
|
refs/heads/master
| 2023-02-08T20:25:58.279241
| 2023-02-02T00:11:23
| 2023-02-02T00:11:23
| 222,710,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
def edit_str(s1,s2,n,m):
if n<0 or m<0:
return max(m,n)+1
elif s1[n]==s2[m]:
return edit_str(s1,s2,n-1,m-1)
else:
return min(edit_str(s1,s2,n-1,m-1),edit_str(s1,s2,n,m-1),edit_str(s1,s2,n-1,m)) +1
print(edit_str("kitten","sitting",5,6))
|
[
"rofyalewi@gmail.com"
] |
rofyalewi@gmail.com
|
aae84273d14923a5fb83bf35b9b0e6a31ea3d1af
|
a6270537b5c6d924fa6353a8f0328e07c71a0366
|
/numbasltiprovider/urls.py
|
c12994c32a9c81f0df352e00b8c9d1aa5310f5c7
|
[
"Apache-2.0"
] |
permissive
|
oscarsiles/numbas-lti-provider
|
9b993175a6b6463a974373c7bdb2c9f38b057b89
|
ef7080a2593a800a1b9630c746e4f8667e2ec42d
|
refs/heads/master
| 2020-08-20T03:47:54.399198
| 2020-08-05T13:44:16
| 2020-08-05T13:44:16
| 215,979,486
| 0
| 0
|
NOASSERTION
| 2019-10-18T08:39:09
| 2019-10-18T08:39:09
| null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('numbas_lti.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
except ImportError:
pass
|
[
"christianperfect@gmail.com"
] |
christianperfect@gmail.com
|
2f85952fcbe3b65f4c744f4e3bb7f9549a012652
|
cb4cfcece4bc14f591b038adbc7fadccaf447a1d
|
/ELEVSTRS.py
|
d84b11ce6e30ca754fe1115b5248d18d884db818
|
[] |
no_license
|
psycho-pomp/CodeChef
|
ba88cc8e15b3e87d39ad0c4665c6892620c09d22
|
881edddded0bc8820d22f42b94b9959fd6912c88
|
refs/heads/master
| 2023-03-21T06:46:14.455055
| 2021-03-11T12:07:48
| 2021-03-11T12:07:48
| 275,214,989
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
# cook your dish here
from math import sqrt
t=int(input())
for _ in range(t):
n,v1,v2=map(int,input().split())
t1=n/v1
t2=(sqrt(2)*n)/v2
if t2>=t1:
print("Stairs")
else:
print("Elevator")
|
[
"noreply@github.com"
] |
psycho-pomp.noreply@github.com
|
8c7ec1217dd7bc22b88439c1f406972e4f2a9006
|
3bae1ed6460064f997264091aca0f37ac31c1a77
|
/apps/cloud_api_generator/generatedServer/tasklets/rack/create/rack_create.py
|
3e407f24ace515e0974c5621850b08fc380425ff
|
[] |
no_license
|
racktivity/ext-pylabs-core
|
04d96b80ac1942754257d59e91460c3a141f0a32
|
53d349fa6bee0ccead29afd6676979b44c109a61
|
refs/heads/master
| 2021-01-22T10:33:18.523799
| 2017-06-08T09:09:28
| 2017-06-08T09:09:28
| 54,314,984
| 0
| 0
| null | 2017-06-08T09:09:29
| 2016-03-20T11:55:01
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
__author__ = 'aserver'
__tags__ = 'rack', 'create'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
|
[
"devnull@localhost"
] |
devnull@localhost
|
7be5aa773f2e343fd4b8b491a4269fdf9fff5719
|
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
|
/2017-feb/1.python/5.data-frames.py
|
959658216b9ad8cb6baf46f1063d69277bcff50f
|
[] |
no_license
|
rajesh2win/datascience
|
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
|
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
|
refs/heads/master
| 2021-01-20T21:06:12.488996
| 2017-08-01T04:39:07
| 2017-08-01T04:39:07
| 101,746,310
| 1
| 0
| null | 2017-08-29T09:53:49
| 2017-08-29T09:53:49
| null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
import pandas as pd
col1 = [10,20,30,40]
col2 = ['abc','def','xyz','pqr']
col3 = [0,0,0,0]
#creating data frame
df1 = pd.DataFrame({'pid':col1,
'pname':col2,'survived':col3})
df1.shape
df1.info()
df1.describe()
df1.head(2)
df1.tail()
df1['col4'] = 0
#access frame content by column/columns
df1.pid
df1['pid']
df1[['pid','pname']]
df1[[0,1]]
#dropping a column
df2 = df1.drop('survived',1)
#slicing rows of frame
df1[0:2]
df1[0:4]
df1[0:]
df1[:2]
df1[-2:]
#filtering rows of dataframe by condition
type(df1.pid > 20)
df1[df1.pid>20]
#selecting subsets of rows and columns
df1.iloc[0:2,]
df1.iloc[[0,2],]
df1.iloc[0:2,0]
df1.iloc[0:2,[0,2]]
df1.loc[0:2,['pname']]
#grouping data in data frames
df1.groupby('id').size()
|
[
"info@algorithmica.co.in"
] |
info@algorithmica.co.in
|
0a53f26329b7e8f590b399d677a12e83e6704b2e
|
28a124b6a2f22a53af3b6bb754e77af88b4138e1
|
/DJANGO/companytodo/reports/migrations/0006_auto_20191209_0121.py
|
a29feb60b3e3cadd0f868274c2f14a8a99ef6f0e
|
[] |
no_license
|
mebaysan/LearningKitforBeginners-Python
|
f7c6668a9978b52cad6cc2b969990d7bbfedc376
|
9e1a47fb14b3d81c5b009b74432902090e213085
|
refs/heads/master
| 2022-12-21T03:12:19.892857
| 2021-06-22T11:58:27
| 2021-06-22T11:58:27
| 173,840,726
| 18
| 4
| null | 2022-12-10T03:00:22
| 2019-03-04T23:56:27
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
# Generated by Django 2.2.7 on 2019-12-08 22:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_auto_20191209_0120'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={'ordering': ('-created',)},
),
]
|
[
"menesbaysan@gmail.com"
] |
menesbaysan@gmail.com
|
a81f1658dd871e8e403dcf6b4e512ae458767a2f
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/HBKAGJZ62JkCTgYX3_15.py
|
5c5665b4393c00c704f2eb04cb3ee08dfe0d3464
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
def last(l,n):
if n>len(l):
return 'invalid'
elif n==0:
return []
else:
return l[len(l)-n:]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
c0300915f88b4cbb234193be8a08ceb789f7fd55
|
c24b28c0dc4ad8f83845f4c61882f1e04d49b5cd
|
/Plotly_Graphs/Plotly_Introduction/plotly_charts.py
|
d17cd6a9de3a549f8ebb82ff2712db48bbb76398
|
[] |
no_license
|
Coding-with-Adam/Dash-by-Plotly
|
759e927759513d96060a770b1e0b0a66db13f54f
|
9f178f1d52536efd33827758b741acc4039d8d9b
|
refs/heads/master
| 2023-08-31T17:23:02.029281
| 2023-08-08T05:12:50
| 2023-08-08T05:12:50
| 234,687,337
| 1,293
| 1,822
| null | 2023-07-31T15:47:07
| 2020-01-18T05:36:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 370
|
py
|
import pandas as pd
import plotly.express as px
dfb = pd.read_csv("bird-window-collision-death.csv")
df = px.data.tips()
fig = px.pie(dfb, values='Deaths', names='Bldg #', color="Side", hole=0.3)
fig.update_traces(textinfo="label+percent", insidetextfont=dict(color="white"))
fig.update_layout(legend={"itemclick":False})
fig.show()
fig.write_image("images/fig1.png")
|
[
"noreply@github.com"
] |
Coding-with-Adam.noreply@github.com
|
9ff2f22cb931ef1b4b6f3de6cb5ba468dace744c
|
ae613a880eecf783ba23e7ca871f9e165ec2ce6e
|
/calculate_root.py
|
f6e918aef989a07665376a59101b386c993edc8e
|
[
"MIT"
] |
permissive
|
h-mayorquin/capacity_code
|
52d7e81026cd804677d5a5a6312b434bdff6ed32
|
f885f0e409d3f9c54b8e20c902f7ef28584ca8a2
|
refs/heads/master
| 2020-08-28T00:30:14.760936
| 2020-01-31T17:26:29
| 2020-01-31T17:26:29
| 217,534,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
import warnings
import pickle
import pandas as pd
import numpy as np
import random
from math import ceil, floor
from copy import deepcopy
from functions import *
warnings.filterwarnings('ignore')
minicolumns = 10
hypercolumns = 5
sequence_length = 2
number_of_sequences = 20
pattern_seed = np.random.randint(0, 20)
desired_root = 0.9
verbose = True
n_patterns = 100
pairs = produce_pairs_with_constant_number_of_patterns(n_patterns)[3:-3]
# Format is hypercolumns, minicolumns, extra
pairs = [(3, 66, 0)]
# Do the calculations
for pair in pairs:
hypercolumns, minicolumns, extra = pair
print('hypercolumns', hypercolumns)
print('minicolumns', minicolumns)
print('extra', extra)
pattern_seed = np.random.randint(0, 20)
aux = find_root_empirical(desired_root, hypercolumns, minicolumns, sequence_length, pattern_seed, tolerance=0.01, verbose=verbose)
capacity, p_root, trials = aux
# Read
data_frame = pd.read_csv('../storage_capacity_data.csv', index_col=0)
# Write
data_frame = data_frame.append({'hypercolumns':hypercolumns, 'minicolumns':minicolumns, 'sequence_length':sequence_length,
'capacity':capacity, 'p_critical':p_root, 'trials':trials }, ignore_index=True)
# Store the data base
data_frame.to_csv('../storage_capacity_data.csv')
print('Stored')
print('================')
|
[
"h.mayorquin@gmail.com"
] |
h.mayorquin@gmail.com
|
c1337933143e4be73f495569475dbf98d651bfac
|
f0b52a3ae5115b9a839d6bd3e765de83ecb21a28
|
/Payload_Type/Apollo/mythic/agent_functions/net_localgroup_member.py
|
6b2fad53fcf068ef12c142ebdcfed4c9d96d878c
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
bopin2020/Apollo
|
ad98f1cb872bd2134509df55ee67a79c51e6d316
|
7660439cbc8d4f18af2b564a5b7a0ac4f8f3765a
|
refs/heads/master
| 2023-01-12T23:50:01.266984
| 2020-11-12T07:03:13
| 2020-11-12T07:03:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
from CommandBase import *
import json
class NetLocalgroupMemberArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"computer": CommandParameter(name="computer", required=False, type=ParameterType.String, description="Computer to enumerate."),
"group": CommandParameter(name="group", type=ParameterType.String, description="Group to enumerate.")
}
def split_commandline(self):
if self.command_line[0] == "{":
raise Exception("split_commandline expected string, but got JSON object: " + self.command_line)
inQuotes = False
curCommand = ""
cmds = []
for x in range(len(self.command_line)):
c = self.command_line[x]
if c == '"' or c == "'":
inQuotes = not inQuotes
if (not inQuotes and c == ' '):
cmds.append(curCommand)
curCommand = ""
else:
curCommand += c
if curCommand != "":
cmds.append(curCommand)
for x in range(len(cmds)):
if cmds[x][0] == '"' and cmds[x][-1] == '"':
cmds[x] = cmds[x][1:-1]
elif cmds[x][0] == "'" and cmds[x][-1] == "'":
cmds[x] = cmds[x][1:-1]
return cmds
async def parse_arguments(self):
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
cmds = self.split_commandline()
if len(cmds) == 1:
self.add_arg("group", cmds[0])
elif len(cmds) == 2:
self.add_arg("computer", cmds[0])
self.add_arg("group", cmds[1])
else:
raise Exception("Expected one or two arguments, but got: {}".format(cmds))
class NetLocalgroupMemberCommand(CommandBase):
cmd = "net_localgroup_member"
needs_admin = False
help_cmd = "net_localgroup_member [computer] [group]"
description = "Retrieve local group membership of the group specified by [group]. If [computer] is omitted, defaults to localhost."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = NetLocalgroupMemberArguments
attackmapping = []
browser_script = BrowserScript(script_name="net_localgroup_member", author="@djhohnstein")
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass
|
[
"djhohnstein@gmail.com"
] |
djhohnstein@gmail.com
|
109e032b250691b3bf5f5ea34a9982e509cbd868
|
3d9825900eb1546de8ad5d13cae893eb0d6a9b14
|
/AutoWorkup/SEMTools/setup.py
|
94c4428d8e6622b3e676c76f782d87775210107d
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
rtkarcher/BRAINSTools
|
20d69f96e6d5ca92adaeb06aa4fe6556b5e7b268
|
961135366450400409cece431423ed480855d34c
|
refs/heads/master
| 2021-01-15T08:53:48.961607
| 2013-06-26T19:09:34
| 2013-06-26T19:09:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('SEMTools', parent_package, top_path)
config.add_data_dir('diffusion')
config.add_data_dir('segmentation')
config.add_data_dir('filtering')
config.add_data_dir('brains')
config.add_data_dir('utilities')
config.add_data_dir('registration')
config.add_data_dir('utility')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
[
"hans-johnson@uiowa.edu"
] |
hans-johnson@uiowa.edu
|
4cecb1fdea73ca9f39f2bdf440f6840a5f57c2f2
|
7aebfaec6957ad67523f1d8851856af88fb997a6
|
/catkin_ws/build/robotiq/robotiq_3f_gripper_control/catkin_generated/pkg.develspace.context.pc.py
|
472d6f1b90073d4764f3841493bb27694a2f8bfa
|
[] |
no_license
|
k-makihara/ROS
|
918e79e521999085ab628b6bf27ec28a51a8ab87
|
45b60e0488a5ff1e3d8f1ca09bfd191dbf8c0508
|
refs/heads/master
| 2023-01-28T06:00:55.943392
| 2020-11-26T05:27:16
| 2020-11-26T05:27:16
| 316,127,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/mslab/catkin_ws/devel/include".split(';') if "/home/mslab/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_manager;diagnostic_updater;dynamic_reconfigure;hardware_interface;robotiq_ethercat;roscpp;rospy;socketcan_interface;std_srvs;robotiq_3f_gripper_articulated_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrobotiq_3f_gripper_control".split(';') if "-lrobotiq_3f_gripper_control" != "" else []
PROJECT_NAME = "robotiq_3f_gripper_control"
PROJECT_SPACE_DIR = "/home/mslab/catkin_ws/devel"
PROJECT_VERSION = "1.0.0"
|
[
"makihara@ms.esys.tsukuba.ac.jp"
] |
makihara@ms.esys.tsukuba.ac.jp
|
b836ab3184c86a5580190a268a1f5b5241677048
|
ef54d37f8a3303013ca7469871a320d303957ed7
|
/robo4.2/fusion/tests/wpst_crm/feature_tests/TBIRD/F110_Port_Monitoring/port_monitor_support_module.py
|
a6deffdae867986fcbebc06b5c2da63aca16ddb9
|
[] |
no_license
|
richa92/Jenkin_Regression_Testing
|
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
|
24a74926170cbdfafa47e972644e2fe5b627d8ff
|
refs/heads/master
| 2020-07-12T10:01:59.099137
| 2019-08-27T12:14:53
| 2019-08-27T12:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
'''
This module is used to include some helper function for Port Monitoring
'''
def set_networkuri_lig(data_variable, get_output):
'''
Build the network URI's from the network Name and form the
LIG body
'''
temp = data_variable
for i in range(len(temp['uplinkSets'])):
for j in range(len(temp['uplinkSets'][i]['networkUris'])):
for x in get_output['members']:
if temp['uplinkSets'][i]['networkUris'][j] == x['name']:
temp['uplinkSets'][i]['networkUris'][j] = x['uri']
return temp
|
[
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] |
akul@SAC0MKUVCQ.asiapacific.hpqcorp.net
|
ebe07b6c084e6824573cbad59b09aeeccd77287e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03130/s063741533.py
|
8e8ac571e89e7cbb1f076333c2fcb83f461a3bff
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
def examA():
N, K = LI()
if (N+1)//2>=K:
print("YES")
else:
print("NO")
return
def examB():
d = [0]*4
for _ in range(3):
a, b = LI()
a -=1; b -=1
d[a] +=1
d[b] +=1
for i in d:
if i>=3 or i==0:
print("NO")
return
print("YES")
return
def examC():
ans = 0
print(ans)
return
def examD():
ans = 0
print(ans)
return
def examE():
ans = 0
print(ans)
return
def examF():
ans = 0
print(ans)
return
import sys,copy,bisect,itertools,heapq,math
from heapq import heappop,heappush,heapify
from collections import Counter,defaultdict,deque
def I(): return int(sys.stdin.readline())
def LI(): return list(map(int,sys.stdin.readline().split()))
def LFI(): return list(map(float,sys.stdin.readline().split()))
def LSI(): return list(map(str,sys.stdin.readline().split()))
def LS(): return sys.stdin.readline().split()
def SI(): return sys.stdin.readline().strip()
global mod,mod2,inf,alphabet
mod = 10**9 + 7
mod2 = 998244353
inf = 10**18
alphabet = [chr(ord('a') + i) for i in range(26)]
if __name__ == '__main__':
examB()
"""
"""
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2b2602042f1ed0d95c722a129a06ec21856cab22
|
cc90d98a64693ca4542c999b5d2241b60eb33aac
|
/Problem62-3.py
|
e12955f5179fe407cd38b8141c64e187aee5cac8
|
[] |
no_license
|
Nan-Do/eulerproject
|
1f63b23a4d4e344c8525238b2333920e733b03c9
|
d33033d6af10d1aca8f7db9bcf187ef8f6005040
|
refs/heads/master
| 2021-01-10T15:51:39.594159
| 2016-04-14T05:41:16
| 2016-04-14T05:41:16
| 48,170,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
from itertools import count, permutations
from math import ceil
def gen_cubes():
for n in count(start=1):
yield n ** 3
def is_cube(n):
v = ceil(pow(n, (1/3.)))
return (v ** 3) == n
def check_cube(number, limit):
n_str = str(number)
count = 0
repeated = set()
for n in set(permutations(n_str)):
if n[0] = '0' or n in repeated:
continue
repeated.add(n)
if is_cube(int(p_str)):
count += 1
if count == limit:
return True
return False
for n in gen_cubes():
if check_cube(n, 4):
print n
break
|
[
"icemanf@gmail.com"
] |
icemanf@gmail.com
|
175cd537ba734aea16b54646d227d7f043eae53f
|
3027a838581e2b0778bd6ae40f9a6c72017b3b0d
|
/loss.py
|
84c014beac4352f30db99c0f34f0a9b4f0f3262b
|
[] |
no_license
|
arthur-qiu/robust
|
2617adf3be8ea24592990e66b35123d02b0db045
|
3f40b45a740a1d3f2ba81a18e2cb510fe613d616
|
refs/heads/master
| 2020-12-04T12:08:52.665675
| 2020-02-26T10:37:34
| 2020-02-26T10:37:34
| 231,758,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,164
|
py
|
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
# if ad_out.shape[0] != 128 or dc_target.shape[0] != 128:
# print(ad_out.shape)
# print(dc_target.shape)
# print(softmax_output.shape)
# print(feature.shape)
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def DANN(features, ad_net):
ad_out = ad_net(features)
batch_size = ad_out.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
return nn.BCELoss()(ad_out, dc_target)
|
[
"Arthur"
] |
Arthur
|
d7feedf3e0f9eec8c5f371d5bd23732533460493
|
5babecf71b6b3c3295219b59bd96e348e1cfaf80
|
/singleylinkedlist.py
|
c5bb94f61e1e34fc34faa6ab4bc6d013e5858183
|
[] |
no_license
|
jefinagilbert/dataStructures
|
46697a8c1ec5cdb943a1e95e887f6343a85f648b
|
04773fc0dff7d18078f3960b0993ce8ab7918a19
|
refs/heads/main
| 2023-06-12T02:06:24.345124
| 2021-07-10T18:05:44
| 2021-07-10T18:05:44
| 384,760,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,771
|
py
|
class node:
def __init__(self,data):
self.data = data
self.next = None
class linkedlist:
def __init__(self):
self.head = None
def printlinkedlist(self):
temp = self.head
print (temp)
while (temp):
print (temp.data,end=" -> ")
temp = temp.next
def append(self, new_data):
new_node = node(new_data)
if self.head is None:
self.head = new_node
return
last = self.head
while (last.next):
last = last.next
last.next = new_node
def push(self, new_data):
new_node = node(new_data)
new_node.next = self.head
self.head = new_node
def insertAfter(self, prev_node, new_data):
if prev_node is None:
print("The given previous node not inLinkedList.")
return
new_node = node(new_data)
new_node.next = prev_node.next
prev_node.next = new_node
def deletenode(self,key):
temp = self.head
if (temp is not None):
if key == temp.data:
self.head = temp.next
temp = None
while (temp is not None):
if temp.data == key:
break
prev = temp
temp = temp.next
if (temp is None):
return
prev.next = temp.next
temp = None
def deletelist(self):
temp = self.head # we can also use Only self.head = None
while (temp):
next = temp.next
del temp.data
temp = next
self.head = None
def deletenodeposition(self,position):
temp = self.head
if (self.head is None):
return
if position == 0:
self.head = temp.next
temp = None
return
for i in range(position - 1):
temp = temp.next
if (temp is None):
break
if (temp is None):
return
if (temp.next is None):
return
next = temp.next.next
temp.next = None
temp.next = next
if __name__ == "__main__":
llist = linkedlist()
while True:
print()
print("------ NOTES ------")
print()
print("1. Append Value")
print()
print("2. Push Value")
print()
print("3. Insert After")
print()
print("4. Display Node")
print()
print("5. Delete Node by data")
print()
print("6. Delete Node by Position")
print()
print("7. Delete Linked list")
print()
print("8. Exit")
i = int(input("Enter the Number: "))
if i == 1:
k = int(input("enter value to append : "))
llist.append(k)
print()
print(k," Appended Successfully")
elif i == 2:
k = int(input("enter value to push : "))
llist.push(k)
elif i == 3:
k = int(input("enter value to add after : "))
llist.insertAfter(llist.head.next,k)
elif i == 4:
llist.printlinkedlist()
elif i == 5:
k = int(input("enter value to deletenode : "))
llist.deletenode(k)
elif i == 6:
k = int(input("enter position to Delete : "))
llist.deletenodeposition(k)
elif i == 7:
llist.deletelist()
elif i == 8:
break
else:
print("Enter Valid Number")
|
[
"noreply@github.com"
] |
jefinagilbert.noreply@github.com
|
271813ce9df854023fe3b6d50c40601bd44a2d32
|
b80059648afab4474e567ec1035d63d060d9b3a6
|
/src/analyze.py
|
e18911d2f1160107000f7ce93c5532bf18c7c900
|
[
"MIT"
] |
permissive
|
SteemData/classify.steemdata.com
|
8b34d7ae9e666b9dfe9930c82dc347650356fb94
|
507d2d537a502701dd6e28c9581c132942084b7a
|
refs/heads/master
| 2021-03-19T05:57:34.360839
| 2017-11-09T22:30:59
| 2017-11-09T22:30:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
import boto3
from easydict import EasyDict as ed
config = ed(
region_name = 'us-west-2',
s3_bucket_name = 'steem-hackaton-input'
)
rkg = boto3.client('rekognition', region_name=config.region_name)
def nsfw(img: bytes):
response = rkg.detect_moderation_labels(
Image={'Bytes': img},
)
return response['ModerationLabels']
def labels(img: bytes):
response = rkg.detect_labels(
Image={'Bytes': img},
MaxLabels=100,
MinConfidence=80,
)
return response['Labels']
|
[
"_@furion.me"
] |
_@furion.me
|
bfa64414e10648e405e89258f858138cfe2bcc91
|
f4e21b9a042577400689e83a7ae11c0eee13cecf
|
/gneiss/regression/tests/test_transformer.py
|
3f7aa1cd6eebc62d528cecdf3407afee1faff1f6
|
[] |
no_license
|
ebolyen/gneiss
|
8facaaffe9904c8641f418fdd1461c1ae447e593
|
bb47be8805bf887afcc40b72365b062aa74ff823
|
refs/heads/master
| 2022-12-21T21:08:09.162341
| 2017-04-21T01:30:10
| 2017-04-21T01:30:10
| 88,930,099
| 0
| 0
| null | 2017-04-21T02:20:16
| 2017-04-21T02:20:16
| null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from gneiss.regression._format import (LinearRegressionFormat_g,
LinearMixedEffectsFormat_g)
from qiime2.plugin.testing import TestPluginBase
from gneiss.regression._ols import OLSModel
from gneiss.regression._mixedlm import LMEModel
import pandas.util.testing as pdt
class TestLinearRegressionTransformers(TestPluginBase):
package = "gneiss.regression.tests"
def test_ols_model_to_regression_format(self):
filepath = self.get_data_path('ols.pickle')
transformer = self.get_transformer(OLSModel, LinearRegressionFormat_g)
input = OLSModel.read_pickle(filepath)
obs = transformer(input)
obs = OLSModel.read_pickle(str(obs))
pdt.assert_frame_equal(input.pvalues, obs.pvalues)
def test_regression_format_to_ols_model(self):
filename = 'ols.pickle'
input, obs = self.transform_format(LinearRegressionFormat_g, OLSModel,
filename)
exp = OLSModel.read_pickle(str(input))
pdt.assert_frame_equal(exp.pvalues, obs.pvalues)
class TestLinearMixedEffectsTransformers(TestPluginBase):
package = "gneiss.regression.tests"
def test_lme_model_to_regression_format(self):
filepath = self.get_data_path('lme.pickle')
transformer = self.get_transformer(LMEModel,
LinearMixedEffectsFormat_g)
input = LMEModel.read_pickle(filepath)
obs = transformer(input)
obs = LMEModel.read_pickle(str(obs))
pdt.assert_frame_equal(input.pvalues, obs.pvalues)
def test_regression_format_to_lme_model(self):
filename = 'lme.pickle'
input, obs = self.transform_format(LinearMixedEffectsFormat_g,
LMEModel, filename)
exp = LMEModel.read_pickle(str(input))
pdt.assert_frame_equal(exp.pvalues, obs.pvalues)
if __name__ == '__main__':
unittest.main()
|
[
"jamietmorton@gmail.com"
] |
jamietmorton@gmail.com
|
0d37df26911f7aa45fd992907792f711b760b1d3
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/Gluon_ResNet50_v1d_for_PyTorch/timm/models/layers/involution.py
|
97e83500b1f997b67fbd369776d069d277ac3bdb
|
[
"Apache-2.0",
"MIT",
"CC-BY-NC-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
# Copyright [yyyy] [name of copyright owner]
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" PyTorch Involution Layer
Official impl: https://github.com/d-li14/involution/blob/main/cls/mmcls/models/utils/involution_naive.py
Paper: `Involution: Inverting the Inherence of Convolution for Visual Recognition` - https://arxiv.org/abs/2103.06255
"""
import torch.nn as nn
from .conv_bn_act import ConvBnAct
from .create_conv2d import create_conv2d
class Involution(nn.Module):
def __init__(
self,
channels,
kernel_size=3,
stride=1,
group_size=16,
rd_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(Involution, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.channels = channels
self.group_size = group_size
self.groups = self.channels // self.group_size
self.conv1 = ConvBnAct(
in_channels=channels,
out_channels=channels // rd_ratio,
kernel_size=1,
norm_layer=norm_layer,
act_layer=act_layer)
self.conv2 = self.conv = create_conv2d(
in_channels=channels // rd_ratio,
out_channels=kernel_size**2 * self.groups,
kernel_size=1,
stride=1)
self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity()
self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride)
def forward(self, x):
weight = self.conv2(self.conv1(self.avgpool(x)))
B, C, H, W = weight.shape
KK = int(self.kernel_size ** 2)
weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2)
out = self.unfold(x).view(B, self.groups, self.group_size, KK, H, W)
out = (weight * out).sum(dim=3).view(B, self.channels, H, W)
return out
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
a5ae575a5d08b866c988d7daff8b8357e695454b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03148/s977826358.py
|
4dcf39217ab9acca14bc5415bf0a46880be55e2c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,165
|
py
|
import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
import heapq
class PriorityQueue:
class Reverse:
def __init__(self, val):
self.val = val
def __lt__(self, other):
return self.val > other.val
def __repr__(self):
return repr(self.val)
def __init__(self, x=None, desc=False):
if not x:
x = []
if desc:
for i in range(len(x)):
x[i] = self.Reverse(x[i])
self._desc = desc
self._container = x
heapq.heapify(self._container)
@property
def is_empty(self):
return not self._container
def pop(self):
if self._desc:
return heapq.heappop(self._container).val
else:
return heapq.heappop(self._container)
def push(self, item):
if self._desc:
heapq.heappush(self._container, self.Reverse(item))
else:
heapq.heappush(self._container, item)
def top(self):
if self._desc:
return self._container[0].val
else:
return self._container[0]
def sum(self):
return sum(self._container)
def __len__(self):
return len(self._container)
def main():
from operator import itemgetter
n, k = list(map(int, readline().split()))
sushis_original = [list(map(int, readline().split())) for _ in range(n)]
sushis_original.sort(key=itemgetter(1))
sushis_original.sort(key=itemgetter(0))
new_type = 0
prev = -1
for i in range(n):
cur = sushis_original[i][0]
if prev != cur:
new_type += 1
if cur > new_type:
sushis_original[i][0] = new_type
prev = cur
type_num = sushis_original[-1][0]
sushis = {i: [] for i in range(1, type_num + 1)}
for sushi_type, val in sushis_original:
sushis[sushi_type].append(val)
eat_sushis = PriorityQueue()
rem_sushis = PriorityQueue(desc=True)
rem = k
if rem >= type_num:
for i in range(1, type_num + 1):
eat_sushis.push(sushis[i].pop())
rem -= type_num
for vals in sushis.values():
for val in vals:
rem_sushis.push(val)
for _ in range(rem):
eat_sushis.push(rem_sushis.pop())
else:
for i in range(1, type_num + 1):
eat_sushis.push(sushis[i].pop())
discard_num = type_num - k
for _ in range(discard_num):
eat_sushis.pop()
for vals in sushis.values():
for val in vals:
rem_sushis.push(val)
cur_type = min(k, type_num)
sub_next = 2 * cur_type - 1
while rem_sushis:
cur_val = eat_sushis.top()
new_val = rem_sushis.top()
diff = new_val - cur_val
if diff >= sub_next:
eat_sushis.pop()
eat_sushis.push(rem_sushis.pop())
cur_type -= 1
sub_next = 2 * cur_type - 1
else:
break
ans = cur_type ** 2 + eat_sushis.sum()
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6849d2ec9790e047a0e8c225fd2ba62a5fdcdd56
|
3dcfa2980db0770af9b4355b0d5a5e5ef2313c50
|
/corpus/exceptions.py
|
173d06acf3cf5bd7493cf25b0c6f41cbc47cf052
|
[
"CC-BY-NC-2.0",
"CC-BY-NC-4.0",
"Apache-2.0"
] |
permissive
|
looselycoupled/partisan-discourse
|
5f4638d984fb54a5add870d4cb59445811c412a1
|
8579924094c92e25e21ce59a26232269cf6b34bc
|
refs/heads/master
| 2020-03-27T06:35:49.627350
| 2018-08-25T18:05:44
| 2018-08-25T18:05:44
| 146,118,079
| 0
| 0
|
Apache-2.0
| 2018-08-25T18:02:38
| 2018-08-25T18:02:38
| null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# corpus.exceptions
# Custom exceptions for corpus handling.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Mon Jul 18 09:57:26 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: exceptions.py [63935bc] benjamin@bengfort.com $
"""
Custom exceptions for corpus handling.
"""
##########################################################################
## Corpus Exceptions
##########################################################################
class CorpusException(Exception):
"""
Something went wrong in the corpus app.
"""
pass
class BitlyAPIError(CorpusException):
"""
Something went wrong trying to shorten a url.
"""
pass
class FetchError(CorpusException):
"""
Something went wrong trying to fetch a url using requests.
"""
pass
class NLTKError(CorpusException):
"""
Something went wrong when using NLTK.
"""
pass
|
[
"benjamin@bengfort.com"
] |
benjamin@bengfort.com
|
e6abd68ee8891fd33558e2c79ba7b61eeb2bd4b5
|
b35aea9f4411f5dc7942392d78dc31bb76c7ec73
|
/djangoProject/services/migrations/0004_alter_sub_head.py
|
142021ffe6400739d7b16a3cef8ad1787f49ebfd
|
[] |
no_license
|
ashkanusefi/rondshow
|
1079b81704fff55a1d54fa8dee2712ab61e92f4a
|
7e5a80fcc6e326b8b1737a54fb53becc4195e475
|
refs/heads/master
| 2023-09-01T18:45:33.170465
| 2021-09-18T11:24:52
| 2021-09-18T11:24:52
| 407,820,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Generated by Django 3.2.5 on 2021-07-13 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0003_alter_sub_head'),
]
operations = [
migrations.AlterField(
model_name='sub',
name='head',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='services.service', verbose_name='نام سردسته'),
),
]
|
[
"yousefi.ashkan96@gmail.com"
] |
yousefi.ashkan96@gmail.com
|
ad2cf41e2835382146d98e659562119b73b04000
|
10425fd2f058afb9dd823929314bfede0a4eb513
|
/flaskaiohttp_websocket/app.py
|
67a5c4b45adbbf95106696a4225673ec018d7153
|
[] |
no_license
|
gaozhidf/flask_websocket
|
60883571a469a7c283e3da9a8fbf81d752f82f71
|
41653f71b7fd6d07d3592a22a11f29e795ba45d8
|
refs/heads/master
| 2022-11-29T04:31:08.953294
| 2017-08-12T08:53:24
| 2017-08-12T08:53:24
| 49,828,952
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
import json
import asyncio
import aiohttp
from flask import Flask, current_app
from flask_aiohttp import AioHTTP
from flask_aiohttp.helper import async, websocket
app = Flask(__name__)
aio = AioHTTP(app)
@app.route('/echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
@app.route('/api')
@async
def api():
response = yield from aiohttp.request(
'GET', 'https://graph.facebook.com/zuck')
data = yield from response.read()
return data
@app.route('/param/<arg>')
@websocket
def param(arg):
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(arg)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
@app.route('/late')
@async
def late():
yield from asyncio.sleep(3)
data = {
'data': 'done'
}
data = json.dumps(data)
current_app.response_class(data, headers={
'Content-Type': 'application/json',
}, status=201)
return 'done'
@app.route('/plain')
def plain():
return 'Hello, World!'
@app.route('/stream')
def stream():
def f():
yield 'Hello, '
yield 'World!'
return app.response_class(f())
@app.route('/async-stream')
@async
def async_stream():
def f():
yield 'I\'m '
yield 'sorry!'
yield from asyncio.sleep(1)
return app.response_class(f())
def main():
aio.run(app, debug=True)
if __name__ == '__main__':
main()
|
[
"gaozhidf@gmail.com"
] |
gaozhidf@gmail.com
|
5e56cc78a121e1d1b486e6bc4a3fc7a7cd46762b
|
2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5
|
/Platinum_clusters_Project/final_images/Pt13_O2_DFTsorted/Pt7_3O2_TiO2_101surface_zorderimageplotbasedondepth1.py
|
4f1b59995eb2ca8b9bc48aa8fecadced15bc2251
|
[] |
no_license
|
sivachiriki/GOFEE_Pt_V_supported
|
5787d44294262870075f35f2d31c096021b7ce20
|
6bd700dac1f3e7c58394b758d75246ac6e07eade
|
refs/heads/master
| 2022-04-08T11:38:13.038455
| 2020-03-09T10:48:31
| 2020-03-09T10:48:31
| 226,359,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,837
|
py
|
from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rc('font',**{'family':'sans-serif',
'sans-serif':['Helvetica'],
'size':14})
matplotlib.rc('text',usetex=True)
matplotlib.rcParams['text.latex.unicode']=True
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{bm}']
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{xfrac}']
matplotlib.rcParams['mathtext.default'] = 'regular'
matplotlib.rcParams['ps.usedistiller'] = 'xpdf'
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
# if atoms[ia].symbol == 'Ti':
# arad = aradii[atoms[ia].number] #* 0.9 * 0.5
# else:
arad = aradii[atoms[ia].number] #* 0.9
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==8 and i >= 135 and i <=149 ):
colors[i] =[0.1, 0.2, 0.5]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
data=read('Pt13_O2_Al2O3_KRRfund9l_DFTrelaxedsorted.traj@:')
#for j in range(len(data)):
image = data[8] #* (2,2,1)
for i,a in enumerate(image):
# if a.position[1] >11.100:
# image.positions[i,1] =0.000
if i ==48 or i==3 :
image.positions[i,1] =image.positions[i,1]-12.429
image.positions[i,0] =image.positions[i,0]+7.176
# if i==148:
# image.positions[i,0] =image.positions[i,0]-14.352
#write('newimage.traj',image)
plt.figure(figsize=(6.0,7.0))
gs = gridspec.GridSpec(2, 1,
height_ratios=[7.77,9.090])
cell = image.get_cell()
# 0 0
ax = plt.subplot(gs[0, 0])
img = image.copy()
plot_conf(ax, img)
ax.set_xlim([-2.8, 11.85])
ax.set_ylim([10.0, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
# 0 1
ax = plt.subplot(gs[1, 0])
image = image * (2,2,1)
write('newimage.traj',image)
cell = image.get_cell()
img = image.copy()
plot_conf(ax, img, rot=True)
ax.set_xlim([-2.8, 11.85])
ax.set_ylim([0.80, 12.50])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
gs.update(wspace=0.00,hspace=0.00)
plt.tight_layout()
name ='Pt13_O2_Al2O3_0001_DFTopt_g{}'.format(8)
savefig(name,bbox_inches='tight')
show()
|
[
"sivachiriki@phys.au.dk"
] |
sivachiriki@phys.au.dk
|
3dc1ada00afa0327b9f7befb7328a8b103da9b07
|
d7719b7b537a1484228d377d265ebeea8f76748a
|
/Robot Operating System (ROS)/ROS/Starter Code and Resources/ROS_Minimal_Projects/rospy_minimal_packages/modular_pub_sub/setup.py
|
f8270eaaf22d1aa84841ec98b72a858c7271b601
|
[
"BSD-2-Clause"
] |
permissive
|
OpenSUTD/coding-notes
|
9724ac9d35f585ff3140a43c8a10fcdcbaedfc79
|
f9b8c778f8494d0bf47bd816cfd77b88e78a5a1f
|
refs/heads/master
| 2022-07-16T22:17:21.930385
| 2019-07-03T10:11:30
| 2019-07-03T10:11:30
| 166,292,417
| 7
| 5
|
BSD-2-Clause
| 2019-07-03T11:20:16
| 2019-01-17T20:30:47
|
C++
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages = ['modular_pub_sub'],
package_dir = {'': 'src'},
install_requires = ['']
)
setup(**setup_args)
|
[
"methylDragon@gmail.com"
] |
methylDragon@gmail.com
|
4f86503e9967ceaa9cb417c55dc2f4ceb6706b4e
|
a8595670862f9475050abf73399afe34faaa083b
|
/wb_api/urls.py
|
f2d8bb64d1f13e51f8b884542b8f6d173580934d
|
[] |
no_license
|
skiboorg/wb_api
|
14392df2da8569212c0ba05e527b46fcd9c30338
|
c45d8c340a45958bc6d380c2a431d13d0f1ebf37
|
refs/heads/master
| 2022-12-06T03:52:58.621255
| 2020-08-26T19:25:24
| 2020-08-26T19:25:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/', include('api.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"11@11.11"
] |
11@11.11
|
48217e37537211a9bfd2b671886a356efa6a7a8d
|
b047a32da65cc0fafe249160f57765ddbe80176e
|
/apps/support/templatetags/forum.py
|
20f193d4a3ba8af04bb65ae72f774e72f6431c3b
|
[
"MIT"
] |
permissive
|
fengjinqi/website
|
0568c679e7964bdbb637831a4f1dec7c5e8d767c
|
340eecec49ce0d66cd6a491d0ae9ad23ec9f841b
|
refs/heads/master
| 2023-02-18T10:12:52.158471
| 2023-02-16T08:40:13
| 2023-02-16T08:40:13
| 160,755,540
| 367
| 114
|
MIT
| 2022-12-08T01:42:40
| 2018-12-07T01:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
from datetime import datetime
from django.contrib.sessions.models import Session
from django.core.cache import cache
#from apps.forum.views import get_online_count
from apps.support.models import QQ
from apps.forum.models import Forum
from django import template
from django.utils.timezone import now, timedelta
from apps.user.models import User
register = template.Library()
@register.inclusion_tag('pc/aside/forum_side.html')
def get_fourm():
qq = QQ.objects.all()
fourm = Forum.objects.filter(hidden=False,category__name='求职招聘')[:10]
sessions = Session.objects.filter(expire_date__gte=datetime.now()).count()
#print(get_online_count())
user = User.objects.count()
cur_date = now().date() + timedelta(days=0)
days = Forum.objects.filter(hidden=False,add_time__gte=cur_date).count()
count = Forum.objects.filter(hidden=False).count()
Hottest = Forum.objects.filter(hidden=False).order_by('-click_nums')[:10]
return {'fourm':fourm,'qq':qq,'user':user,'sessions':sessions,'days':days,'count':count,'Hottest':Hottest}
@register.filter
def get_count(x):
return x.filter(hidden=False).count()
@register.filter
def get_counts(x):
return x.filter(is_show=True).count()
|
[
"tarena_feng@126.com"
] |
tarena_feng@126.com
|
4a98ed5c35bc602fa3cf5522d5f85ab078bbcb92
|
009f9761767f93a2986f8b5a2ba61bac6f33dc59
|
/examples/intro/8/example.py
|
0fc03bff85a41b5054ceffe6fd6a14aa7ee9e136
|
[
"MIT"
] |
permissive
|
crasiak/ginkgo
|
8798d28d16732cc1c5b18f8e5df0d17f8866f999
|
2592de2c8acfe6e62f33e7ac1f79cc5613567908
|
refs/heads/master
| 2021-01-16T20:07:45.269511
| 2012-04-06T07:26:46
| 2012-04-06T07:26:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
import gevent
from gevent.pywsgi import WSGIServer
from gevent.server import StreamServer
from gevent.socket import create_connection
from ginkgo.core import Service
class TcpClient(Service):
def __init__(self, address, handler):
self.address = address
self.handler = handler
def do_start(self):
self.spawn(self.handler, self.address)
class MyApplication(Service):
def __init__(self, config):
self.add_service(WSGIServer(('127.0.0.1', config['http_port']), self.handle_http))
self.add_service(StreamServer(('127.0.0.1', config['tcp_port']), self.handle_tcp))
self.add_service(TcpClient(config['connect_address'], self.client_connect))
def client_connect(self, address):
sockfile = create_connection(address).makefile()
while True:
line = sockfile.readline() # returns None on EOF
if line is not None:
print "<<<", line,
else:
break
def handle_tcp(self, socket, address):
print 'new tcp connection!'
while True:
socket.send('hello\n')
gevent.sleep(1)
def handle_http(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
print 'new http request!'
return ["hello world"]
|
[
"progrium@gmail.com"
] |
progrium@gmail.com
|
df8f34cabb73d62adf171eeae4b7788cbdfdf467
|
6db8aba817161dc573f16cde185f4a1c02c753e0
|
/XOR.py
|
0c07e2db55b43284b1baa73054a707acc0f131b8
|
[] |
no_license
|
Prakashchater/Leetcode-array-easy-questions
|
456153a13397c895acae6550dad8f1b1851ff854
|
7c5d40f9d68dbf61f4a61a33d9b54f769473b057
|
refs/heads/main
| 2023-06-19T14:01:52.483440
| 2021-07-22T19:44:40
| 2021-07-22T19:44:40
| 354,926,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
xor = 0
out = []
arr = [10,11,1,2,3]
n = arr[len(arr)-1]
for i in range(len(arr)-1):
out.append(arr[i]^arr[i+1])
out.append(arr[len(arr)-1])
print(out)
|
[
"prakashchater@gmail.com"
] |
prakashchater@gmail.com
|
ac7f9a29b1083ff198275d312b01fecad5ed4fc3
|
039446516b188899e2fd21a41087ad20f06d666b
|
/src/server_code/game_logic.py
|
9934157d88fc8ff515d2b4ff8f39a8e5a2c028ab
|
[] |
no_license
|
Tyorat/TicTacToe
|
62ebbeee168568a0c590a5923127a3ac529ba134
|
0281f3c7b293256e2c73ac1530786308cea385af
|
refs/heads/main
| 2023-06-24T22:37:18.744669
| 2021-07-12T23:11:30
| 2021-07-12T23:11:30
| 377,521,858
| 0
| 0
| null | 2021-07-12T23:11:31
| 2021-06-16T14:20:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
import secrets
WIN_COMBO = ((1, 2, 3),
(4, 5, 6),
(7, 8, 9),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(1, 5, 9),
(7, 5, 3)
)
SWITCH_TURN = {"x": "o", "o": "x"}
class WrongMove(Exception): pass
class Game:
def __init__(self, player_one, player_two):
self.__field = list(range(1, 10))
self.switch_turn = {player_one: player_two, player_two: player_one}
self.__turn = None
self.choose_random_player()
turn = property()
@turn.getter
def turn(self):
return self.__turn
def check_end_game(self):
self.show_field()
for combo in WIN_COMBO:
if self.__field[combo[0] - 1] == self.__field[combo[1] - 1] == self.__field[combo[2] - 1]:
return{"endgame": True, "message": f"win {self.__field[combo[0] - 1]}"}
if not any(list(map(lambda x: str(x).isdigit(), self.__field))):
return {"endgame": True, "message": "draw"}
else:
return {"endgame": False, "message": "wait for opponent"}
def check_turn(self, index, who):
if self.__field[index - 1] != index:
raise WrongMove("The cell is already occupied")
elif who not in self.switch_turn.keys():
raise WrongMove("Wrong player")
elif who != self.__turn:
raise WrongMove("Not your turn")
self.__field[index - 1] = who
res = self.check_end_game()
self.__turn = self.switch_turn[self.__turn]
return res
def choose_random_player(self):
print(self.switch_turn.keys())
self.__turn = secrets.choice(list(self.switch_turn.keys()))
def show_field(self):
print("************")
print("|" + "|".join(map(str, self.__field[:3])) + "|")
print("|" + "|".join(map(str, self.__field[3:6])) + "|")
print("|" + "|".join(map(str, self.__field[6:])) + "|")
print("************")
|
[
"you@example.com"
] |
you@example.com
|
c8df91551a44a334be5a4cd94d26220e4cc54a07
|
84e661d5d293ec0c544fedab7727767f01e7ddcf
|
/target/migrations/0011_auto_20201101_1147.py
|
9ca50acaa24915c420cbf92d31354e33dd6cdc7f
|
[
"BSD-3-Clause"
] |
permissive
|
groundupnews/gu
|
ea6734fcb9509efc407061e35724dfe8ba056044
|
4c036e79fd735dcb1e5a4f15322cdf87dc015a42
|
refs/heads/master
| 2023-08-31T13:13:47.178119
| 2023-08-18T11:42:58
| 2023-08-18T11:42:58
| 48,944,009
| 21
| 23
|
BSD-3-Clause
| 2023-09-14T13:06:42
| 2016-01-03T11:56:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 882
|
py
|
# Generated by Django 3.0.10 on 2020-11-01 09:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('target', '0010_auto_20201101_1127'),
]
operations = [
migrations.AddField(
model_name='target',
name='tweet_notified_published',
field=models.BooleanField(default=False, editable=False),
),
migrations.AddField(
model_name='target',
name='tweet_notified_solution',
field=models.BooleanField(default=False, editable=False),
),
migrations.AlterField(
model_name='target',
name='publish_solution_after',
field=models.SmallIntegerField(default=24, help_text='Make solution available after this many hours', null=True, verbose_name='solution time'),
),
]
|
[
"nathangeffen@gmail.com"
] |
nathangeffen@gmail.com
|
4f91db0c179ce5761c01cb598130a254ba26e16f
|
1e8d9a251b36f2e80a851d541321522ce4e812fa
|
/igmspec/scripts/plot_igmspec.py
|
c3e68e4c64d8f0b820e6ba8daf9c1882d20f2c30
|
[] |
no_license
|
Waelthus/igmspec
|
c81f31360e9528bd150a991ad96b8b4ca94962d0
|
8fdbb622360ca5263711f75d0f7571ed955f6e28
|
refs/heads/master
| 2020-12-25T21:55:50.001007
| 2016-07-17T21:17:08
| 2016-07-17T21:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,154
|
py
|
#!/usr/bin/env python
""" Loads and plots a requested spectrum
"""
import pdb
def parser(options=None):
import argparse
parser = argparse.ArgumentParser(description='plot_igmspec script v0.2')
parser.add_argument("coord", type=str, help="Coordinates, e.g. J081240+320808")
parser.add_argument("--tol", default=5., type=float, help="Maximum offset in arcsec [default=5.]")
parser.add_argument("--meta", default=True, help="Show meta data? [default: True]", action="store_true")
parser.add_argument("-s", "--survey", help="Name of Survey to use")
parser.add_argument("--select", default=0, type=int, help="Index of spectrum to plot (when multiple exist)")
parser.add_argument("--mplot", default=False, help="Use simple matplotlib plot [default: False]")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args, unit_test=False, **kwargs):
""" Run
"""
from astropy import units as u
from igmspec.igmspec import IgmSpec
from igmspec import cat_utils as icu
# init
igmsp = IgmSpec(**kwargs)
# Grab
all_spec, all_meta = igmsp.spec_from_coord(args.coord, tol=args.tol*u.arcsec, isurvey=args.survey)
# Outcome
if len(all_meta) == 0:
print("No source found, try another location or a larger tolerance.")
return
elif len(all_meta) == 1: # One survey hit
spec = all_spec[0]
meta = all_spec[0]
else: # More than 1 survey
idx = 0
spec = all_spec[idx]
meta = all_meta[idx]
surveys = [meta.meta['survey'] for meta in all_meta]
print("Source located in more than one survey")
print("Using survey {:s}. You can choose from this list {}".format(surveys[idx], surveys))
#print("Choose another survey from this list (as you wish): {}".format(surveys))
if args.meta:
igmsp.idb.show_meta()
# Load spectra
spec.select = args.select
if unit_test:
return
# Show [may transition to xspec]
if args.mplot:
spec.plot()
else:
spec.plot(xspec=True)
|
[
"xavier@ucolick.org"
] |
xavier@ucolick.org
|
c37ee59446bb0ce436a571312628fce8121b88a8
|
1905e5cece92e6cdc68dac3ebb0ee1d05bef35c8
|
/fuzzinator/tracker/base.py
|
84de8815bb65c1977f3e473d326d6eed14335b6c
|
[
"BSD-3-Clause"
] |
permissive
|
darrynza/fuzzinator
|
e876131d18c5f0a17ae8bdc2fb10f18d8b0084fb
|
e1642f75ba8c1b555f7e2557b52f43df4d17b89f
|
refs/heads/master
| 2020-04-29T04:39:36.453300
| 2018-12-06T17:15:35
| 2019-01-08T23:36:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
# Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
class BaseTracker(object):
@property
def logged_in(self):
return True
def find_issue(self, issue):
pass
def report_issue(self, **kwargs):
pass
def issue_url(self, issue):
return ''
|
[
"reni@inf.u-szeged.hu"
] |
reni@inf.u-szeged.hu
|
007ee5b8228f8de322122564f9f44722684aa6cf
|
87a9706379670da62739b3c1fbbdd75edb5107b8
|
/Django/django_celery_test/django_celery_test/celeryconfig.py
|
cda60c880a754a04fa66f089f2be5f3d6b7e1eed
|
[] |
no_license
|
zxbzxb180/python_work
|
ba21ab74f842e0d560a8bb192bb8a874d356b9e1
|
6406024e011aa06d1bda78d97cfecc47f7f2058c
|
refs/heads/master
| 2022-12-12T23:53:36.887963
| 2020-03-04T07:20:29
| 2020-03-04T07:20:29
| 194,494,744
| 0
| 0
| null | 2022-11-22T03:54:47
| 2019-06-30T08:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 854
|
py
|
import djcelery
djcelery.setup_loader()
BROKER_BACKEND = 'redis'
BROKER_URL = 'redis://:6222580@localhost:6379/1'
CELERY_RESULT_BACKEND = 'redis://:6222580@localhost:6379/2'
CELERY_QUEUES = {
'beat_tasks': {
'exchange': 'beat_tasks',
'exchange_type': 'direct',
'binding_key': 'beat_tasks'
},
'work_queue': {
'exchange': 'work_queue',
'exchange_type': 'direct',
'binding_key': 'work_queue'
}
}
CELERY_DEFAULT_QUEUE = 'work_queue'
CELERY_IMPORTS = (
'course.tasks',
)
#有些情况防止死锁
CELERYD_FORCE_EXECV = True
#设置并发的worker数量
CELERYD_CONCURRENCY = 4
#允许重试
CELERY_ACKS_LATE = True
#每个worker最多执行100个任务,可以防止内存泄漏
CELERYD_MAX_TASKS_PER_CHILD = 100
#单个任务最大执行时间
CELERYD_TASK_TIME_LIMIT = 12 * 30
|
[
"616529325@qq.com"
] |
616529325@qq.com
|
f5baeac0738dfa8508464ce5bcfa5f41ca97435b
|
4d343b7e92a44b8c8eb3e3afeeeb0e1a7a699869
|
/ch4-practice/books/models.py
|
feed0423f49f8baba32c13a55d88e9fa25a7ef57
|
[] |
no_license
|
dev-iwin/book4-Django-redbook
|
dfca5395ae68df7536a9e7b64b73b582608b6eaa
|
64e0b260b50bae8bd35b918eb341725c3c3373b4
|
refs/heads/master
| 2023-03-07T05:12:26.990807
| 2021-02-20T21:15:12
| 2021-02-20T21:15:12
| 335,654,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
from django.db import models
# 예제 4-26을 위한 모델 (혼자 만들어 봄) ==========================
class Book(models.Model):
book_name = models.CharField(max_length=300)
pub_date = models.DateTimeField('publication_date')
def __str__(self):
return self.book_name
# =============================================================
|
[
"iwinoriwin@gmail.com"
] |
iwinoriwin@gmail.com
|
be1ff21d8d3789702fd02aa6333e49ef6cfe5047
|
cddfa750235344aa5e04244ce5c36871d3c6465b
|
/mayan/apps/document_states/links.py
|
f2d41f7e5898b86968e418d4195fb20c1560a36b
|
[
"Apache-2.0"
] |
permissive
|
Lomascolo/mayan-edms
|
76e0fdcad98605838df6737d109c95d67d9ebba5
|
f7f0d27a059b1e010b9bbcdf371b9867f6fcfa45
|
refs/heads/master
| 2021-01-24T08:30:07.480929
| 2017-05-30T06:01:31
| 2017-05-30T06:02:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,176
|
py
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .permissions import (
permission_workflow_create, permission_workflow_delete,
permission_workflow_edit, permission_workflow_tools,
permission_workflow_view,
)
link_document_workflow_instance_list = Link(
icon='fa fa-sitemap', permissions=(permission_workflow_view,),
text=_('Workflows'),
view='document_states:document_workflow_instance_list',
args='resolved_object.pk'
)
link_setup_workflow_create = Link(
permissions=(permission_workflow_create,), text=_('Create workflow'),
view='document_states:setup_workflow_create'
)
link_setup_workflow_delete = Link(
permissions=(permission_workflow_delete,), tags='dangerous',
text=_('Delete'), view='document_states:setup_workflow_delete',
args='object.pk'
)
link_setup_workflow_document_types = Link(
permissions=(permission_workflow_edit,), text=_('Document types'),
view='document_states:setup_workflow_document_types', args='object.pk'
)
link_setup_workflow_edit = Link(
permissions=(permission_workflow_edit,), text=_('Edit'),
view='document_states:setup_workflow_edit', args='object.pk'
)
link_setup_workflow_list = Link(
permissions=(permission_workflow_view,), icon='fa fa-sitemap',
text=_('Workflows'), view='document_states:setup_workflow_list'
)
link_setup_workflow_state_create = Link(
permissions=(permission_workflow_edit,), text=_('Create state'),
view='document_states:setup_workflow_state_create', args='object.pk'
)
link_setup_workflow_state_delete = Link(
permissions=(permission_workflow_edit,), tags='dangerous',
text=_('Delete'), view='document_states:setup_workflow_state_delete',
args='object.pk'
)
link_setup_workflow_state_edit = Link(
permissions=(permission_workflow_edit,), text=_('Edit'),
view='document_states:setup_workflow_state_edit', args='object.pk'
)
link_setup_workflow_states = Link(
permissions=(permission_workflow_view,), text=_('States'),
view='document_states:setup_workflow_states', args='object.pk'
)
link_setup_workflow_transition_create = Link(
permissions=(permission_workflow_edit,), text=_('Create transition'),
view='document_states:setup_workflow_transition_create', args='object.pk'
)
link_setup_workflow_transition_delete = Link(
permissions=(permission_workflow_edit,), tags='dangerous',
text=_('Delete'), view='document_states:setup_workflow_transition_delete',
args='object.pk'
)
link_setup_workflow_transition_edit = Link(
permissions=(permission_workflow_edit,), text=_('Edit'),
view='document_states:setup_workflow_transition_edit', args='object.pk'
)
link_setup_workflow_transitions = Link(
permissions=(permission_workflow_view,), text=_('Transitions'),
view='document_states:setup_workflow_transitions', args='object.pk'
)
link_tool_launch_all_workflows = Link(
icon='fa fa-sitemap',
permissions=(permission_workflow_tools,),
text=_('Launch all workflows'),
view='document_states:tool_launch_all_workflows'
)
link_workflow_instance_detail = Link(
permissions=(permission_workflow_view,), text=_('Detail'),
view='document_states:workflow_instance_detail', args='resolved_object.pk'
)
link_workflow_instance_transition = Link(
text=_('Transition'),
view='document_states:workflow_instance_transition',
args='resolved_object.pk'
)
link_workflow_document_list = Link(
permissions=(permission_workflow_view,), text=_('Workflow documents'),
view='document_states:workflow_document_list', args='resolved_object.pk'
)
link_workflow_list = Link(
permissions=(permission_workflow_view,), icon='fa fa-sitemap',
text=_('Workflows'), view='document_states:workflow_list'
)
link_workflow_state_document_list = Link(
permissions=(permission_workflow_view,),
text=_('State documents'), view='document_states:workflow_state_document_list',
args='resolved_object.pk'
)
link_workflow_state_list = Link(
permissions=(permission_workflow_view,),
text=_('States'), view='document_states:workflow_state_list',
args='resolved_object.pk'
)
|
[
"roberto.rosario.gonzalez@gmail.com"
] |
roberto.rosario.gonzalez@gmail.com
|
a6066f6913650d57e3ddb4301debeef629944d3d
|
e97fb7903336837edb6bb3db16ea48512128076e
|
/22. Database/2. Parameterized/24. RetrieveMultipleRowWhereClauseUserInputDict.py
|
c2ab4af145f726d2a9c6710e5f457dcc12b68740
|
[] |
no_license
|
satyambhatt5/Advance_Python_code
|
4081bdb1e80f05161c07f416ebade3907e83f0fd
|
58746540285f253699b73aeebd3975911a310deb
|
refs/heads/master
| 2023-05-22T20:10:54.271672
| 2021-06-16T08:55:16
| 2021-06-16T08:55:16
| 377,386,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# Retrieve Multiple Rows WHERE clause - User Input - Dict
import mysql.connector
try:
conn= mysql.connector.connect(
user='root',
password='geek',
host='localhost',
database='pdb',
port=3306
)
if (conn.is_connected()):
print('Connected')
except:
print('Unable to Connect')
sql = 'SELECT * FROM student WHERE roll=%(roll)s'
myc = conn.cursor()
n = int(input('Enter Roll to Display: '))
disp_value = {'roll':n}
try:
myc.execute(sql, disp_value)
row = myc.fetchone()
while row is not None:
print(row)
row = myc.fetchone()
print('Total Rows:',myc.rowcount)
except:
print('Unable to Retrieve Data')
myc.close() # Close Cursor
conn.close() # Close Connection
|
[
"bhattsatyam793@gmail.com"
] |
bhattsatyam793@gmail.com
|
e98c9e6e4e8e98f0eb86148a6604600fbb0f969e
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part002645.py
|
26c00d84fbe342c060edabef02fe3c69582a4427
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,821
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher122210(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher122210._instance is None:
CommutativeMatcher122210._instance = CommutativeMatcher122210()
return CommutativeMatcher122210._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 122209
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 123779
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 123780
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 123781
if len(subjects2) == 0:
pass
# State 123782
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
4db5502b3cb8b1723df8a7ac89467e02e213fda7
|
d83f50302702d6bf46c266b8117514c6d2e5d863
|
/counting-bits.py
|
f875bfed4d8a2d111f435b9c52cfced316a0c179
|
[] |
no_license
|
sfdye/leetcode
|
19764a6bdb82de114a2c82986864b1b2210c6d90
|
afc686acdda4168f4384e13fb730e17f4bdcd553
|
refs/heads/master
| 2020-03-20T07:58:52.128062
| 2019-05-05T08:10:41
| 2019-05-05T08:10:41
| 137,295,892
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
ones = [0] * (num + 1)
for i in range(1, num + 1):
ones[i] = ones[i & (i - 1)] + 1
return ones
|
[
"tsfdye@gmail.com"
] |
tsfdye@gmail.com
|
4f0d5c22413bdaacf869bf9cbd12d47bcc73f375
|
1dc753d68b234b10193962f58d306bd91957eb6d
|
/college/college/doctype/student_achievements/student_achievements.py
|
66884338ed30206d53469c0ed0ba413e759ab9c7
|
[
"MIT"
] |
permissive
|
harshith187/college
|
e8612134e47c48ad721840f684362f7348e9bad4
|
d9ae21734dcde70397aead827e57fbbdcdeb98c9
|
refs/heads/master
| 2020-07-20T12:36:27.601134
| 2020-05-25T13:53:57
| 2020-05-25T13:53:57
| 206,641,495
| 0
| 4
|
NOASSERTION
| 2020-05-25T15:05:16
| 2019-09-05T19:27:37
|
Python
|
UTF-8
|
Python
| false
| false
| 266
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, mvit ise and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class StudentAchievements(Document):
pass
|
[
"frappe@ubuntu.vm"
] |
frappe@ubuntu.vm
|
7948a9e20dfc18adb728f35ea7d8d4a1387faf1a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2408/60670/279400.py
|
462432fde739ac9f0e437d3408deb95a44e663a4
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
def isPrime(x):
for i in range(2,int(x**0.5)+1):
if x%i!=0:
return False
return True
def factorial(n):
t=1
for i in range(1,n):
t*=i
return t%1000000007
n=int(input())
numOfPrime=0
for i in range(1,n+1):
if isPrime(i):
numOfPrime+=1
print((factorial(numOfPrime)*factorial(n-numOfPrime))%1000000007)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
955a3394f44e953f1a4c30c5c454af78e16f84da
|
a2477654a0fb85f9507389ff7a4b4a8bcc1641fa
|
/trydjango1-11/src/restaurants/migrations/0003_auto_20170926_1624.py
|
5708b2f804f86a92b2d7213e1dbc4f79de3a24b5
|
[] |
no_license
|
ervinpepic/Django-11-Restaurant-app
|
6ae1e2dec7571b0180ea991ca80b9b83d00cdb1b
|
a6bd976130c70621e6149ee64c61e1cdcec2acba
|
refs/heads/master
| 2022-10-18T08:34:11.496044
| 2017-11-25T19:57:36
| 2017-11-25T19:57:36
| 111,400,182
| 0
| 1
| null | 2022-10-10T08:12:45
| 2017-11-20T11:13:00
|
Python
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-26 16:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0002_restaurant_location'),
]
operations = [
migrations.RenameModel(
old_name='Restaurant',
new_name='RestaurantLocation',
),
]
|
[
"ervin.hack@gmail.com"
] |
ervin.hack@gmail.com
|
85dedc26a7d0b18671e3606cefba8011ec6f33a6
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_156/521.py
|
ca8aafaec283d6e9fa857be6020a6168166a825e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
#!/usr/bin/python3
import sys
import math
ncases = int(sys.stdin.readline().strip())
for t in range(1, ncases+1):
d = int(sys.stdin.readline().strip())
values = sys.stdin.readline().strip().split()
pancakes = [int(x) for x in values]
pancakes.sort(reverse=True)
best = pancakes[0]
# Node format: List of diners with pancakes, number of special minutes
initial_node = [pancakes, 0]
queue = [initial_node]
while queue:
node = queue.pop(0)
diners = node[0]
special = node[1]
top = diners[0]
#if (top + special) >= best:
# continue
if (top + special) < best:
best = top + special
if top < 4:
continue
# Let's introduce new special minutes. Note _all_ diners with
# the max number of pancakes should be split (adding more special
# minuts), as splitting just one of them is stupid
for n in [2, 3, 4]:
splits = []
remainder = top
for i in range(0, n):
split = math.floor(remainder/(n-i))
remainder -= split
splits.append(split)
diners_after_special = list(diners)
new_special = special
while diners_after_special[0] == top:
diners_after_special.pop(0)
diners_after_special += splits
new_special += (n-1)
diners_after_special.sort(reverse=True)
new_node = [diners_after_special, new_special]
queue.append(new_node)
print("Case #{0}: {1}".format(t, best))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
36815ed5dbc21619f0e347fd9614d4889ea71b0d
|
bfb882c400956861fccd40bf1fb53cd6ddcba41e
|
/hagelslag/processing/__init__.py
|
947f56449e95c6deffd11da0f81a50f94c71a716
|
[
"MIT"
] |
permissive
|
stsaten6/hagelslag
|
3b1b07cf424997686b3320c538a188c790232bd7
|
6b7d0779a0b0ac4bd26fbe4931b406fad1ef9f9e
|
refs/heads/master
| 2020-03-10T17:38:44.528943
| 2018-04-12T20:50:38
| 2018-04-12T20:50:38
| 129,504,847
| 2
| 0
|
MIT
| 2018-04-14T09:58:37
| 2018-04-14T09:58:37
| null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
from .EnhancedWatershedSegmenter import EnhancedWatershed
from .EnsembleProducts import MachineLearningEnsembleProducts, EnsembleProducts, EnsembleConsensus
from .Hysteresis import Hysteresis
from .ObjectMatcher import ObjectMatcher, TrackMatcher
from .ObjectMatcher import mean_minimum_centroid_distance, centroid_distance, shifted_centroid_distance, nonoverlap, \
mean_min_time_distance, start_centroid_distance, start_time_distance, closest_distance
from .STObject import STObject, read_geojson
from .tracker import *
|
[
"djgagne@ou.edu"
] |
djgagne@ou.edu
|
feb3861b0c0a06a508fdf4a0748c05fe0b8f72be
|
0f00c8a02e8dc1d8136b2afc92338108f92cc6ae
|
/recipes/mrbayes/run_test.py
|
40033ea2ed9721ad50dfc69b067eccb43cef93ff
|
[] |
no_license
|
faircloth-lab/conda-recipes
|
3714f5be83753261bf3abc70454bdf6b7028c8d6
|
75a520a75a357ea47ee80262f3c3a6dfe1b0715f
|
refs/heads/master
| 2021-01-20T07:07:05.705307
| 2015-06-16T13:50:18
| 2015-06-16T13:50:18
| 12,671,015
| 2
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2013 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 30 December 2013 16:33 PST (-0800)
"""
import unittest
import subprocess
class TestMb(unittest.TestCase):
def test_mb(self):
cmd = ["mb", "-h"]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.stdout, self.stderr = proc.communicate()
text = [v.strip() for k, v in enumerate(self.stdout.split("\n"))
if k in range(0, 6, 2)]
assert text == [
'',
'MrBayes v3.2.2 x64',
'(Bayesian Analysis of Phylogeny)'
]
class TestMbMpi(unittest.TestCase):
def test_mb(self):
cmd = ["mb-mpi", "-h"]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.stdout, self.stderr = proc.communicate()
text = [v.strip() for k, v in enumerate(self.stdout.split("\n"))
if k in range(0, 6, 2)]
assert text == [
'MrBayes v3.2.2 x64',
'(Bayesian Analysis of Phylogeny)',
'(Parallel version)'
]
if __name__ == '__main__':
unittest.main()
|
[
"brant@faircloth-lab.org"
] |
brant@faircloth-lab.org
|
8c49afcd2557458371bc37031be00356b871799d
|
092e00ae8389811929a381637b73dcb2303fefeb
|
/blog/domain/user.py
|
338592ec2da4b0e0020f532f84602d13ba2ace07
|
[] |
no_license
|
uiandwe/rest_framework_ex
|
33cfb73e386785009b1d012a3dfa6909bdc74ab3
|
8130bcf9a6ffd67b91906c85d66ed9d8d453bab8
|
refs/heads/master
| 2022-11-27T20:56:26.911462
| 2021-10-12T07:46:17
| 2021-10-12T07:46:17
| 234,095,110
| 0
| 0
| null | 2022-11-22T05:17:55
| 2020-01-15T14:12:34
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
# -*- coding: utf-8 -*-
class User:
def __init__(self, email, username):
self.email = email
self.username = username
def __repr__(self):
return "{}, {}".format(self.email, self.username)
|
[
"uiandwe@gmail.com"
] |
uiandwe@gmail.com
|
291145b4c5ed899fc48d811be2dd62caa2b32b4a
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4010/819004010.py
|
23f27f88966ad294e1ec85c55e27af7395e422d6
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
from bots.botsconfig import *
from records004010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'JB',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BOS', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'ITD', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'JIL', MIN: 1, MAX: 10000, LEVEL: [
{ID: 'PID', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'ITA', MIN: 0, MAX: 10},
{ID: 'PSA', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'JID', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'PID', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'MEA', MIN: 0, MAX: 5},
]},
]},
{ID: 'AMT', MIN: 1, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'TDS', MIN: 0, MAX: 1},
{ID: 'PSA', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'CTT', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
51086a37acacb82ec4da2e56fe316b05793a58d1
|
2335e7d1c10d800abb10b4432465f29a4456548d
|
/setup.py
|
721f1b8d75682c30d9183bd741ff5d826e50db7d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"EFL-2.0"
] |
permissive
|
deathbybandaid/Sopel-StartupMonologue
|
48a7e85ca117c630cf8039af76a0bbaea91ff5a1
|
f495344cee379e66ec5022e1e7edf15f075c758c
|
refs/heads/master
| 2020-05-09T11:18:01.564022
| 2019-04-27T14:12:38
| 2019-04-27T14:12:38
| 181,074,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from setuptools import setup, find_packages
if __name__ == '__main__':
print('Sopel does not correctly load modules installed with setup.py '
'directly. Please use "pip install .", or add {}/sopel_modules to '
'core.extra in your config.'.format(
os.path.dirname(os.path.abspath(__file__))),
file=sys.stderr)
with open('README.md') as readme_file:
readme = readme_file.read()
with open('NEWS') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = [req for req in requirements_file.readlines()]
with open('dev-requirements.txt') as dev_requirements_file:
dev_requirements = [req for req in dev_requirements_file.readlines()]
setup(
name='sopel_modules.startupmonologue',
version='0.1.0',
description='Sopel Startup Monologue displays to all channels that the bot is online',
long_description=readme + '\n\n' + history,
author='Sam Zick',
author_email='sam@deathbybandaid.net',
url='https://github.com/deathbybandaid/Sopel-StartupMonologue',
packages=find_packages('.'),
namespace_packages=['sopel_modules'],
include_package_data=True,
install_requires=requirements,
tests_require=dev_requirements,
test_suite='tests',
license='Eiffel Forum License, version 2',
)
|
[
"sam@deathbybandaid.net"
] |
sam@deathbybandaid.net
|
55c13d8cf177119f3b0b4ac0b18bc121cc4f8d62
|
f64e31cb76909a6f7fb592ad623e0a94deec25ae
|
/tests/test_p1494_parallel_courses_ii.py
|
dbf8cbae087e98cebaed176c651d916aaa595833
|
[] |
no_license
|
weak-head/leetcode
|
365d635cb985e1d154985188f6728c18cab1f877
|
9a20e1835652f5e6c33ef5c238f622e81f84ca26
|
refs/heads/main
| 2023-05-11T14:19:58.205709
| 2023-05-05T20:57:13
| 2023-05-05T20:57:13
| 172,853,059
| 0
| 1
| null | 2022-12-09T05:22:32
| 2019-02-27T05:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
# flake8: noqa: F403, F405
import pytest
from leetcode.p1494_parallel_courses_ii import *
solutions = [
minNumberOfSemesters,
]
test_cases = [
(
[
13,
[
[12, 8],
[2, 4],
[3, 7],
[6, 8],
[11, 8],
[9, 4],
[9, 7],
[12, 4],
[11, 4],
[6, 4],
[1, 4],
[10, 7],
[10, 4],
[1, 7],
[1, 8],
[2, 7],
[8, 4],
[10, 8],
[12, 7],
[5, 4],
[3, 4],
[11, 7],
[7, 4],
[13, 4],
[9, 8],
[13, 8],
],
9,
],
3,
),
([4, [[2, 1], [3, 1], [1, 4]], 2], 3),
([5, [[2, 1], [3, 1], [4, 1], [1, 5]], 2], 4),
([11, [], 2], 6),
([11, [], 1], 11),
([11, [], 3], 4),
([11, [], 6], 2),
([11, [], 8], 2),
([11, [], 10], 2),
([11, [], 11], 1),
([11, [], 12], 1),
]
@pytest.mark.timeout(2)
@pytest.mark.parametrize(("args", "expectation"), test_cases)
@pytest.mark.parametrize("solution", solutions)
def test_solution(args, expectation, solution):
assert solution(*args) == expectation
|
[
"zinchenko@live.com"
] |
zinchenko@live.com
|
78e368fb716111fadb4e8ba88e1ddd8e34f363a5
|
98b0d740346ad9aecd228b9a8ebb8e818908ce03
|
/hr-1.py
|
0d51517045973153f9d6f31c16975b8fb25a1e6b
|
[] |
no_license
|
alexisbellido/python-examples
|
8c63156a2800a584a8aff0909325e38acbe49163
|
e6a4f61d9cd18588987430007e28ef036971764b
|
refs/heads/master
| 2022-10-16T08:28:15.312916
| 2022-09-30T15:55:31
| 2022-09-30T15:55:31
| 240,379,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
def hi(name):
return f'Hi, {name}'
if __name__ == '__main__':
# people = [input().split() for i in range(int(input()))]
# print(*name_format(people), sep='\n')
####################
people = [
'John',
'Mike',
]
# print(hi(people[0]))
# print(hi(people[1]))
# print(*hi(people), sep='\n')
|
[
"alexis@ventanazul.com"
] |
alexis@ventanazul.com
|
9e783b4e701f26b5c214da0138af22e4c3c66562
|
f2ac9260dfa7483cd54a30700bb952e10acbc1bb
|
/fit_lr.py
|
27c2ea1089ad19bf4212c6e4d9de0bab81cb012f
|
[] |
no_license
|
kudkudak/compound-activity-prediction
|
94dd9efd2ff7ba5c95ebb71ce1766eb6b8882aac
|
d55e6ecb4e3de74d40b1a37950449f60df1a2ca4
|
refs/heads/master
| 2016-09-15T21:35:54.930142
| 2015-01-14T13:09:19
| 2015-01-14T13:09:19
| 27,130,096
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,056
|
py
|
from misc.utils import *
from misc.experiment_utils import get_exp_options, print_exp_header, \
save_exp, get_exp_logger, generate_configs, print_exp_name
from data_api import prepare_experiment_data, prepare_experiment_data_embedded, get_raw_training_data
from sklearn.metrics import matthews_corrcoef, accuracy_score, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
import sklearn.linear_model
def fit_lrs(config_in = None):
#### Load config and data ####
config = {"protein":0, "fingerprint":4,"n_folds":10,
"use_embedding": 1, "K":20, "max_hashes":1000, "seed":0, "C_min":-3, "C_max":7}
if config_in is None:
config.update(get_exp_options(config))
else:
config.update(config_in)
D, config_from_data = prepare_experiment_data_embedded(n_folds=10, seed=config["seed"], K=config["K"], \
max_hashes=config["max_hashes"],
protein=config["protein"], fingerprint=config["fingerprint"])
config.update(config_from_data)
config["C"] = [10.0**(i/float(2)) for i in range(2*config["C_min"],2*(1+config["C_max"]))]
print config["C"]
logger = get_exp_logger(config)
### Prepare experiment ###
E = {"config": config, "experiments":[]}
def fit_lr(config):
### Prepare result holders ###b
values = {}
results = {}
monitors = {}
E = {"config": config, "results": results, "monitors":monitors, "values":values}
### Print experiment header ###
print_exp_name(config)
### Train ###
monitors["acc_fold"] = []
monitors["mcc_fold"] = []
monitors["wac_fold"] = []
monitors["cm"] = [] # confusion matrix
monitors["clf"] = []
monitors["train_time"] = []
monitors["test_time"] = []
results["mean_acc"] = 0
results["mean_mcc"] = 0
values["transformers"] = []
for fold in D["folds"]:
X_train, Y_train, X_test, Y_test = fold["X_train"], fold["Y_train"], fold["X_test"], fold["Y_test"]
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.transform(X_test)
clf =sklearn.linear_model.LogisticRegression (C=config["C"], class_weight="auto")
tstart = time.time()
monitors["train_time"].append(time.time() - tstart)
clf.fit(X_train.astype(float), Y_train.astype(float).reshape(-1))
tstart = time.time()
Y_pred = clf.predict(X_test.astype(float))
monitors["test_time"].append(time.time() - tstart)
acc_fold, mcc_fold = accuracy_score(Y_test, Y_pred), matthews_corrcoef(Y_test, Y_pred)
cm = confusion_matrix(Y_test, Y_pred)
tp, fn, fp, tn = cm[1,1], cm[1,0], cm[0,1], cm[0,0]
monitors["clf"].append(clf)
monitors["cm"].append(cm)
monitors["wac_fold"].append(0.5*tp/float(tp+fn) + 0.5*tn/float(tn+fp))
monitors["acc_fold"].append(acc_fold)
monitors["mcc_fold"].append(mcc_fold)
monitors["acc_fold"] = np.array(monitors["acc_fold"])
monitors["mcc_fold"] = np.array(monitors["mcc_fold"])
monitors["wac_fold"] = np.array(monitors["wac_fold"])
results["mean_acc"] = monitors["acc_fold"].mean()
results["mean_mcc"] = monitors["mcc_fold"].mean()
results["mean_wac"] = monitors["wac_fold"].mean()
logger.info(results)
return E
cv_configs = generate_configs(config, ["C"])
for c in cv_configs:
print c
E["experiments"].append(fit_lr(c))
save_exp(E)
best_e = E["experiments"][0]
for e in E["experiments"]:
if e["results"]["mean_wac"] > best_e["results"]["mean_wac"]:
best_e = e
logger.info(best_e)
logger.info("Done")
if __name__ == "__main__":
fit_lrs()
|
[
"staszek.jastrzebski@gmail.com"
] |
staszek.jastrzebski@gmail.com
|
4fafdb60d2714fc699c55d2ce9bc473bfcffb686
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/Scripts/simulation/situations/complex/university_mixer_situation.py
|
bdd94a7c82a8c319385d8ae99bf8517a96e6a57b
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485
| 2021-02-13T21:27:38
| 2021-02-13T21:27:38
| 337,543,310
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,087
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\complex\university_mixer_situation.py
# Compiled at: 2019-10-19 01:32:53
# Size of source mod 2**32: 5699 bytes
from situations.situation_complex import SituationComplex, CommonInteractionCompletedSituationState, CommonSituationState, SituationComplexCommon, TunableSituationJobAndRoleState, SituationStateData
from sims4.tuning.tunable import TunableReference, TunableEnumWithFilter
from tag import Tag
import services
from objects.object_manager import ObjectManager
from sims4.tuning.instances import lock_instance_tunables
from situations.bouncer.bouncer_request import exclusivity_compare
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation_types import SituationCreationUIOption
from situations.situation import Situation
class _MixerParty(CommonSituationState):
def timer_expired(self):
self._change_state(self.owner.cleanup_party_state())
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner.juice_keg is not None:
self.owner._claim_object(self.owner.juice_keg.id)
class _CleanupJuiceKeg(CommonInteractionCompletedSituationState):
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner.juice_keg is None:
self.owner._self_destruct()
def _on_interaction_of_interest_complete(self, **kwargs):
self.owner._self_destruct()
class _SetupJuiceKeg(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner.mixer_party_state())
class UniversityMixerPartySituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'juice_keg_bearer_job_and_role':TunableSituationJobAndRoleState(description='\n The job and role state for the bearer of the juice keg.\n '),
'setup_juice_keg_state':_SetupJuiceKeg.TunableFactory(description='\n The state to bring in the keg bearer and have the juice keg set up on the lot.\n ',
display_name='1. Setup Juice Keg State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'mixer_party_state':_MixerParty.TunableFactory(description='\n The state to represent the party itself.\n ',
display_name='2. Mixer Party State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'cleanup_party_state':_CleanupJuiceKeg.TunableFactory(description='\n The state to cleanup the juice keg and end the party\n ',
display_name='3. Party Cleanup State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'juice_keg_tag':TunableEnumWithFilter(description='\n Tag used to find the juice keg supplied by the situation.\n ',
tunable_type=Tag,
default=Tag.INVALID,
invalid_enums=Tag.INVALID,
filter_prefixes=('func', ))}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._juice_keg_object_id = None
def start_situation(self):
super().start_situation()
if self.juice_keg is not None:
self._claim_object(self.juice_keg.id)
self._change_state(self.setup_juice_keg_state())
@classmethod
def _states(cls):
return (SituationStateData(1, _SetupJuiceKeg, factory=(cls.setup_juice_keg_state)),
SituationStateData(2, _MixerParty, factory=(cls.mixer_party_state)),
SituationStateData(3, _CleanupJuiceKeg, factory=(cls.cleanup_party_state)))
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.juice_keg_bearer_job_and_role.job, cls.juice_keg_bearer_job_and_role.role_state)]
@classmethod
def default_job(cls):
pass
@property
def juice_keg(self):
object_manager = services.object_manager()
juice_keg = None
if self._juice_keg_object_id is not None:
juice_keg = object_manager.get(self._juice_keg_object_id)
if juice_keg is None:
if self.juice_keg_bearer is not None:
for obj in object_manager.get_objects_with_tag_gen(self.juice_keg_tag):
if obj.get_sim_owner_id() is self.juice_keg_bearer.id:
juice_keg = obj
self._juice_keg_object_id = juice_keg.id
break
return juice_keg
@property
def juice_keg_bearer(self):
sim = next(self.all_sims_in_job_gen(self.juice_keg_bearer_job_and_role.job), None)
return sim
lock_instance_tunables(UniversityMixerPartySituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE))
|
[
"cristina.caballero2406@gmail.com"
] |
cristina.caballero2406@gmail.com
|
2a6ed3ab36186dc4b2907c6eccfff147841622dd
|
bc28f8fe941caf281261afa1641868e743ecb5ab
|
/Google_APAC_Round_E/Beautiful_Numbers/Beautiful_Numbers.py
|
07ce6d570af05b0e1e80e6cd90d4524fcd142a89
|
[] |
no_license
|
anubhavshrimal/CompetitiveProgrammingInPython
|
9fc6949fb3cd715cfa8544c17a63ffbe52677b37
|
2692c446d49ec62d4967ed78a7973400db7ce981
|
refs/heads/master
| 2021-07-05T08:17:15.182154
| 2018-05-29T02:26:25
| 2018-05-29T02:26:25
| 60,554,340
| 7
| 6
| null | 2021-05-24T17:46:16
| 2016-06-06T19:18:27
|
Python
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
import numpy as np
test = int(input())
for t in range(1, test+1):
num = int(input())
n1, n2 = abs(np.roots([1, 1, -(num-1)]))
if int(n1) != n1 or int(n2)!= n2:
ans = num-1
else:
if n1 == 1 or n1 == -1:
ans = n2
elif n2 == 1 or n2 == -1:
ans = n1
else:
if n2 > n1:
ans = n1
else:
ans = n2
print('Case #'+str(t)+':',str(int(ans)))
|
[
"anubhavshrimal@gmail.com"
] |
anubhavshrimal@gmail.com
|
7054d92c14a1e6c568fc15281f3341cce89ae817
|
4c2136ab05913beba890b4127c2f608be4798ed2
|
/(0, '')/py/fc_session.py
|
751c6d3892c8e00fd0baf22a85673c65224e1427
|
[] |
no_license
|
Dyutee/test
|
345adcd1769cba0f468090bcc311f4d379ea5f1e
|
b8b3718922bafbac1bad3802f6c885d777e1bb08
|
refs/heads/master
| 2021-01-12T04:19:45.511927
| 2016-12-29T07:25:29
| 2016-12-29T07:25:29
| 77,588,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,517
|
py
|
#!/usr/bin/python
import cgitb, sys, header, common_methods
cgitb.enable()
sys.path.append('/var/nasexe/storage')
import storage_op
import sys,os
from lvm_infos import *
from functions import *
import san_disk_funs
check_fc = san_disk_funs.fc_target_status();
fc_target=san_disk_funs.fc_list_targets()
fc_ip = ''
ses = ''
########### FC Session ##########################
for session_tar in fc_target:
#print 'Session Target:'+str(session_tar)
#print '<br/>'
#print 'Sess Tar:'+str(session_tar)
#print '<br/>'
ses=san_disk_funs.fc_session(session_tar)
#print 'FC SESSION Info:'+str(sess)
import left_nav
#if (str(check_fc).find("'1'") > 0):
if (check_fc !=[]):
print
print """
<!--Right side body content starts from here-->
<div class="rightsidecontainer">
<div class="insidepage-heading">Fc >> <span class="content">Fc Session Information</span></div>
<!--tab srt-->
<div class="searchresult-container">
<div class="infoheader">
<div id="tabs">
<ul>
<li><a href="#tabs-1">Fc Session</a></li>
</ul>
<div id="tabs-1">
<!--form container starts here-->
<div class="form-container">
<div class="topinputwrap-heading">Fc Session Information </div>
<div class="inputwrap">
<div class="formrightside-content">
<form name = 'add_info' method = 'POST'>
<table width = "680" border = "1" cellspacing = "0" cellpadding = "0" name = 'disp_tables' id = 'id_target_info' style ="border-style:ridge;">"""
print"""<tr style = 'background-color:#999999; font-weight: bold;'>
<td height = "35px" valign = "middle" style = 'color: #FFF;'>Fc Target</td>
<td height = "35px" valign = "middle" style = 'color: #FFF;'>Connected Client</td>
</tr>"""
#print fc_target
if(ses !=''):
for tar_info in fc_target:
print"""<tr>
<!--<td class = "table_content" height = "35px" valign = "middle">
<a href = 'main.php?page=iscsi&act=add_disk_tgt_done&target=<?= $show_targets ?>'><img border = '0' style = 'margin-top: 2px;' src = '../images/add.png' title = 'Add disk to target' /></a> <a href = 'main.php?page=iscsi&act=del_disk_tgt_done&t=<?= $show_targets ?>'><img border = '0' src = '../images/fileclose.png' title = 'Remove disk from target' /></a> <a href = 'get_properties.php?target=<?= $show_targets ?>'><img border = '0' src = '../images/properties.png' title = 'Target properties' /></a> </td>-->
<td class = "table_content" height = "35px" valign = "middle">"""
print""" <font color ="darkred"><b>"""+tar_info+"""</b></font>"""
print """</td>"""
print"""<td class = "table_content" height = "35px" valign = "middle" style="font-family: Tahoma;text-decoration:blink;">"""
sesion_tar =sess=san_disk_funs.fc_session(tar_info)
replace_sess_nm = str(sesion_tar).replace('[]', '')
replace_sess_nm1 = str(replace_sess_nm).replace('[', '')
replace_sess_nm2 = str(replace_sess_nm1).replace(']', '')
replace_session_name = str(replace_sess_nm2).replace("'", '')
#print replace_session_name
if(replace_session_name!=''):
print"""<font color = 'darkgreen'><b>"""+replace_session_name+"""</b></font></td>"""
else:
print """
<marquee behavior="alternate" direction ="right"><b><font size="3">There is no Session for this client</font></b></marquee>
</td>
"""
else:
print"""<tr>
<td colspan = '3' align = 'center' height="50px;">
<marquee behavior="alternate" direction= "right"><b><font size="5">No Information is available</font></b></marquee>
</td>
</tr>"""
print"""
</table>
</form>
</div>"""
print"""
</div>
</div>
<!--form container ends here-->
</div>
</div>
</div>
</div>
<!--form container ends here-->
<!--form container starts here-->
<!--form container ends here-->
</div>
<!--Right side body content ends here-->
</div>
<!--Footer starts from here-->
<div class="insidefooter footer_content">© 2013 Opslag FS2</div>
<!-- Footer ends here-->
</div>
<!--inside body wrapper end-->
</div>"""
else:
print "<div style = 'margin-left: auto; margin-right: auto; text-align: center; vertical-align: center; color: darkred; width: 65%; font: 16px Arial;'><br/><br/><br/><b>Check the 'Enable/Disable FC' option in Maintenance -></b><a href= 'main.py?page=sr'><span style='text-decoration:underline;'>Services</span></a>.</div>"
print"""
<!--body wrapper end-->
</body>
</html>
"""
|
[
"dyuteemoy46@gmail.com"
] |
dyuteemoy46@gmail.com
|
d3c0c2a4b226f7e7de023845098715c9f079029c
|
6484cdf98189f5f5736950c81a9d8d30e0f0c0db
|
/notifications/serializers.py
|
488db18520ad943f4fc0b50ec121588e37fe25bd
|
[] |
no_license
|
AlexFrundin/great_app_example
|
e0e9c91f06bfba76058f3af5b113a9399945bf6c
|
23225e7e88f2ee51359d23cac2200b32b8bd6e20
|
refs/heads/main
| 2023-05-30T15:02:22.035811
| 2021-06-17T06:40:06
| 2021-06-17T06:40:06
| 339,434,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
from rest_framework import serializers
from .models import Notification
# This class is use for serialize the data of user profile details
class NoitifcationListSerializer(serializers.ModelSerializer):
created_on = serializers.DateTimeField(format="%d %b %Y")
class Meta:
model = Notification
fields = (
'id',
'refrence_id',
'event_id',
'title',
'message',
'is_read',
'is_deleted',
'created_on')
|
[
"aleksey.frundin@gmail.com"
] |
aleksey.frundin@gmail.com
|
0a6a1c337560a7be7affe868a65af85fb574f072
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/LeetCode_30days_challenge/2021/February/Peeking Iterator.py
|
1c47322e8ae397e80fa7c43ca73eea44f3a2c292
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713
| 2023-05-16T21:45:08
| 2023-05-16T21:45:08
| 254,296,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator:
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
if self.iterator.hasNext():
self.buffer = self.iterator.next()
else:
self.buffer = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.buffer
def next(self):
"""
:rtype: int
"""
tmp = self.buffer
if self.iterator.hasNext():
self.buffer = self.iterator.next()
else:
self.buffer = None
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.buffer is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
|
[
"mariandanaila01@gmail.com"
] |
mariandanaila01@gmail.com
|
050fbf37649611034d2d17fa1d8f6eaaec527045
|
99b784550a6d306147c022c8d829800b0fbb8c68
|
/Part_1_Basics/Chapter_9_Classes/number_served.py
|
c4bf3cff3db3a73bcf0555f68427754403f58a40
|
[] |
no_license
|
apuya/python_crash_course
|
116d6598f656d8fed0b4184edbce8e996cd0f564
|
0b2e8a6e9849a198cfb251706500a919d6f51fe7
|
refs/heads/main
| 2023-06-03T22:41:03.203889
| 2021-06-16T04:07:28
| 2021-06-16T04:07:28
| 367,812,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
# Python Crash Course: A Hands-On, Project-Based Introduction To Programming
#
# Name: Mark Lester Apuya
# Date: 06/12/2021
#
# Chapter 9: Classes
#
# Exercise 9.4 Number Served:
# Start with your program from Exercise 9-1 (page 162). Add an attribute
# called number_served with a default value of 0. Create an instance called
# restaurant from this class. Print the number of customers the restaurant has
# served, and then change this value and print it again.
# Add a method called set_number_served() that lets you set the number of
# customers that have been served. Call this method with a new number and print
# the value again.
# Add a method called increment_number_served() that lets you increment the
# number of customers who’ve been served. Call this method with any number you
# like that could represent how many customers were served in, say, a day of
# business.
class Restaurant:
"""
Restaurant information.
"""
def __init__(self, restaurant_name, cuisine_type):
"""
Initialize restuarant name and cuisine type
"""
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.number_served = 0
def discribe_restaurant(self):
"""
Prints restaurant information.
"""
print(f"\n{self.restaurant_name} serves {self.cuisine_type}")
def open_restaurant(self):
"""
Prints that the restaurant is open.
"""
print(f"\n{self.restaurant_name} is open.")
def set_number_served(self, number_served):
"""
Set the number of customers served.
"""
self.number_served = number_served
def increment_number_served(self, number_served):
"""
Increment the number of customers who have been served.
"""
self.number_served += number_served
restaurant = Restaurant('Olive Garden', 'Italian')
restaurant.discribe_restaurant()
print(f"\nNumber served: {restaurant.number_served}")
restaurant.number_served = 22
print(f"\nNumber served: {restaurant.number_served}")
restaurant.set_number_served(20)
print(f"\nNumber served: {restaurant.number_served}")
restaurant.increment_number_served(2)
print(f"\nNumber served: {restaurant.number_served}")
|
[
"contact@mapuya.com"
] |
contact@mapuya.com
|
b91cb3c12a2949a4360518e9abecbc11298c03dd
|
230b7714d61bbbc9a75dd9adc487706dffbf301e
|
/third_party/blink/web_tests/external/wpt/tools/wptrunner/wptrunner/environment.py
|
2493f1fa4407a39aad3ac3c2a724322b75b0944a
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-w3c-03-bsd-license"
] |
permissive
|
byte4byte/cloudretro
|
efe4f8275f267e553ba82068c91ed801d02637a7
|
4d6e047d4726c1d3d1d119dfb55c8b0f29f6b39a
|
refs/heads/master
| 2023-02-22T02:59:29.357795
| 2021-01-25T02:32:24
| 2021-01-25T02:32:24
| 197,294,750
| 1
| 2
|
BSD-3-Clause
| 2019-09-11T19:35:45
| 2019-07-17T01:48:48
| null |
UTF-8
|
Python
| false
| false
| 8,027
|
py
|
import json
import os
import multiprocessing
import signal
import socket
import sys
import time
from mozlog import get_default_logger, handlers, proxy
from .wptlogging import LogLevelRewriter
here = os.path.split(__file__)[0]
repo_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, repo_root)
from tools import localpaths # noqa: flake8
from wptserve.handlers import StringHandler
serve = None
def do_delayed_imports(logger, test_paths):
global serve
serve_root = serve_path(test_paths)
sys.path.insert(0, serve_root)
failed = []
try:
from tools.serve import serve
except ImportError:
failed.append("serve")
if failed:
logger.critical(
"Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
(", ".join(failed), serve_root))
sys.exit(1)
def serve_path(test_paths):
return test_paths["/"]["tests_path"]
class TestEnvironmentError(Exception):
pass
class TestEnvironment(object):
def __init__(self, test_paths, testharness_timeout_multipler, pause_after_test, debug_info, options, ssl_config, env_extras):
"""Context manager that owns the test environment i.e. the http and
websockets servers"""
self.test_paths = test_paths
self.server = None
self.config_ctx = None
self.config = None
self.testharness_timeout_multipler = testharness_timeout_multipler
self.pause_after_test = pause_after_test
self.test_server_port = options.pop("test_server_port", True)
self.debug_info = debug_info
self.options = options if options is not None else {}
self.cache_manager = multiprocessing.Manager()
self.stash = serve.stash.StashServer()
self.env_extras = env_extras
self.env_extras_cms = None
self.ssl_config = ssl_config
def __enter__(self):
self.config_ctx = self.build_config()
self.config = self.config_ctx.__enter__()
self.stash.__enter__()
self.cache_manager.__enter__()
self.setup_server_logging()
assert self.env_extras_cms is None, (
"A TestEnvironment object cannot be nested")
self.env_extras_cms = []
for env in self.env_extras:
cm = env(self.options, self.config)
cm.__enter__()
self.env_extras_cms.append(cm)
self.servers = serve.start(self.config,
self.get_routes())
if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
self.ignore_interrupts()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_interrupts()
for scheme, servers in self.servers.iteritems():
for port, server in servers:
server.kill()
for cm in self.env_extras_cms:
cm.__exit__(exc_type, exc_val, exc_tb)
self.env_extras_cms = None
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
self.stash.__exit__()
self.config_ctx.__exit__(exc_type, exc_val, exc_tb)
def ignore_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def process_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def build_config(self):
override_path = os.path.join(serve_path(self.test_paths), "config.json")
config = serve.ConfigBuilder()
config.ports = {
"http": [8000, 8001],
"https": [8443],
"ws": [8888],
"wss": [8889],
}
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
config.update(override_obj)
config.check_subdomains = False
ssl_config = self.ssl_config.copy()
ssl_config["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
config.ssl = ssl_config
if "browser_host" in self.options:
config.browser_host = self.options["browser_host"]
if "bind_address" in self.options:
config.bind_address = self.options["bind_address"]
config.server_host = self.options.get("server_host", None)
config.doc_root = serve_path(self.test_paths)
return config
def setup_server_logging(self):
server_logger = get_default_logger(component="wptserve")
assert server_logger is not None
log_filter = handlers.LogLevelFilter(lambda x:x, "info")
# Downgrade errors to warnings for the server
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
server_logger.component_filter = log_filter
server_logger = proxy.QueuedProxyLogger(server_logger)
try:
#Set as the default logger for wptserve
serve.set_logger(server_logger)
serve.logger = server_logger
except Exception:
# This happens if logging has already been set up for wptserve
pass
def get_routes(self):
route_builder = serve.RoutesBuilder()
for path, format_args, content_type, route in [
("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
(self.options.get("testharnessreport", "testharnessreport.js"),
{"output": self.pause_after_test,
"timeout_multiplier": self.testharness_timeout_multipler,
"explicit_timeout": "true" if self.debug_info is not None else "false"},
"text/javascript;charset=utf8",
"/resources/testharnessreport.js")]:
path = os.path.normpath(os.path.join(here, path))
# Note that .headers. files don't apply to static routes, so we need to
# readd any static headers here.
headers = {"Cache-Control": "max-age=3600"}
route_builder.add_static(path, format_args, content_type, route,
headers=headers)
data = b""
with open(os.path.join(repo_root, "resources", "testdriver.js"), "rb") as fp:
data += fp.read()
with open(os.path.join(here, "testdriver-extra.js"), "rb") as fp:
data += fp.read()
route_builder.add_handler(b"GET", b"/resources/testdriver.js",
StringHandler(data, "text/javascript"))
for url_base, paths in self.test_paths.iteritems():
if url_base == "/":
continue
route_builder.add_mount_point(url_base, paths["tests_path"])
if "/" not in self.test_paths:
del route_builder.mountpoint_routes["/"]
return route_builder.get_routes()
def ensure_started(self):
# Pause for a while to ensure that the server has a chance to start
total_sleep_secs = 30
each_sleep_secs = 0.5
end_time = time.time() + total_sleep_secs
while time.time() < end_time:
failed = self.test_servers()
if not failed:
return
time.sleep(each_sleep_secs)
raise EnvironmentError("Servers failed to start: %s" %
", ".join("%s:%s" % item for item in failed))
def test_servers(self):
failed = []
host = self.config["server_host"]
for scheme, servers in self.servers.iteritems():
for port, server in servers:
if self.test_server_port:
s = socket.socket()
s.settimeout(0.1)
try:
s.connect((host, port))
except socket.error:
failed.append((host, port))
finally:
s.close()
if not server.is_alive():
failed.append((scheme, port))
return failed
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
242f80c5d1c207d66d4fd11b8d495d63cf4a6543
|
4b2c5fe21ffcc35837bba06d2c3b43c5116f74bd
|
/Bit++.py
|
b021896ca96ab26196e29a12c95ef313ebda47fc
|
[] |
no_license
|
joydas65/Codeforces-Problems
|
8870cbbf1db9fa12b961cee7aaef60960af714ae
|
eb0f5877d0fede95af18694278029add7385973d
|
refs/heads/master
| 2023-06-23T07:16:49.151676
| 2023-06-17T07:28:24
| 2023-06-17T07:28:24
| 184,123,514
| 5
| 1
| null | 2020-11-28T07:28:03
| 2019-04-29T18:33:23
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
ans = 0
for _ in range(int(input())):
s = input()
if s[0] == '+' or '+' in s:
ans += 1
elif s[0] == '-' or '-' in s:
ans -= 1
print(ans)
|
[
"noreply@github.com"
] |
joydas65.noreply@github.com
|
20eb7196fe3b002591b7b276815778936aebeb54
|
4eb76ddbe2bf6d7fb8ee791dcaa1dfaccd4a09b0
|
/jitai/events/EventTemplate.py
|
e85c491ebb1b21082dabbe5b4fef53d7216dc3b1
|
[] |
no_license
|
koike-ya/research
|
3cae0be17a8871d5782842510676c05a75627c49
|
3ff99c56c8e5d6c57ee65f1bca2431f3dc6f6593
|
refs/heads/master
| 2021-10-12T03:13:20.645738
| 2019-01-26T07:12:58
| 2019-01-26T07:12:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,138
|
py
|
from abc import ABC
from datetime import datetime, timedelta
import pandas as pd
from jitai.src.utils import set_hour_minute
class EventTemplate(ABC):
def __init__(self, param, user_info, ema, logger):
self.param = param
self.ema = ema
self.name = param["condition_name"]
self.ema_content = param["ema_content"]
self.user_info = user_info
self.logger = logger
self.exists = param["exists"]
self.ema_time = self.param["ema_time"] # param["ema_time]"の値はdict
def _init_ema_content(self):
if not self.ema_content == "none":
self.threshold = self.param["threshold"]
self.more_or_less = self.param["more_or_less"]
def _init_ema_time(self):
# 時間に関する設定
if list(self.ema_time.keys())[0] == "set_time":
from_ = datetime.strptime(self.ema_time["set_time"]["from"], "%H:%M")
self.ema_from_ = set_hour_minute(datetime.today(), from_)
to = datetime.strptime(self.ema_time["set_time"]["to"], "%H:%M")
self.ema_to = set_hour_minute(datetime.today(), to)
if list(self.ema_time.keys())[0] == "interval":
t = datetime.strptime(self.ema_time["interval"]["value"], "%H:%M")
self.ema_from_ = datetime.today() - timedelta(hours=t.hour, minutes=t.minute)
self.ema_to = datetime.today()
def _validate_params(self):
# TODO 与えられたパラメータが適切でないときにエラーを返す
# 例えば、こちらが想定するquestionの中に、self.ema_contentで指定された要素がない場合とか
pass
def _extract_about_time(self):
self.ema = self.ema[(self.ema["end"] >= self.ema_from_) & (self.ema["end"] <= self.ema_to)]
def _ema_content_not_none(self):
# このメソッドはDAMSの項目のみ有効, それ以外の場合はoverrideすること
content_df = self.ema[self.ema["question"] == self.ema_content]
content_df = content_df.astype({"answer": int})
if not content_df.empty:
if self.more_or_less == "more":
self.ema = content_df[content_df["answer"] >= self.threshold]
elif self.more_or_less == "less":
self.ema = content_df[content_df["answer"] < self.threshold]
else:
self.ema = pd.DataFrame(columns=self.ema)
def get_depend_class_last_ema_time(self):
# TODO 要テスト. use=Falseに対して、これで本当にロジックが通るのか.
if hasattr(self.depend_class, "use"):
res = self.depend_class.ema.run()
depend_ema = self.depend_class.ema
if depend_ema.empty:
self.ema = pd.DataFrame()
return 0
depend_ema.reset_index(drop=True, inplace=True)
return depend_ema.loc[depend_ema.shape[0] - 1, "end"]
def _depend_condition(self):
# 従属関係の条件はここに記述する.
self.ema_from_ = self.get_depend_class_last_ema_time()
t = datetime.strptime(self.param["ema_time"]["interval"]["value"], "%H:%M")
if self.ema_from_ != 0 and datetime.today() >= self.ema_from_ + timedelta(hours=t.hour, minutes=t.minute):
return True
else:
return False
def _run(self):
if not self.ema.empty:
self._extract_about_time()
if not self.ema.empty and not self.ema_content == "none":
self._ema_content_not_none()
def run(self):
if hasattr(self, "depend_class"):
fill_cond_flag = self._depend_condition()
# 〇〇時間経っていない場合にFalseが返る
if not fill_cond_flag:
return False
self._run()
if self.exists:
return True if not self.ema.empty else False
else:
return True if self.ema.empty else False
def add_depend_class(self, depend_class):
self.depend_class = depend_class
def copy(self):
return EventTemplate(self.param, self.user_info, self.ema, self.logger)
|
[
"makeffort134@gmail.com"
] |
makeffort134@gmail.com
|
2527f4d9fd54b3e27de63af10a0a6823676bffc5
|
8f63cf27e69bc44dcd11e63a0c396b398443009b
|
/tests/unit/util/iterables.py
|
454eaf3914e1ade640b62d055b97606ada1ab216
|
[
"MIT"
] |
permissive
|
ethanjli/phylline
|
fae756dbbead0351dd11c770158a1aa08fa363d2
|
f11307d0f37ca835996250e1e835c44abd282769
|
refs/heads/master
| 2021-01-01T23:56:41.018911
| 2020-02-25T05:07:34
| 2020-02-25T05:07:34
| 239,400,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
"""Test the util.iterables module."""
# Builtins
# Packages
from phylline.util.iterables import make_collection, remove_none
def test_make_collection_singleton():
"""Test whether the make_collection function makes collections from singletons."""
assert make_collection(42) != 42
assert make_collection(42) == [42]
assert make_collection(42) != (42,)
assert make_collection(42, type=tuple) != 42
assert make_collection(42, type=tuple) != [42]
assert make_collection(42, type=tuple) == (42,)
def test_make_collection_iterable():
"""Test whether the make_collection function makes collections from iterables."""
assert make_collection(range(5)) != range(5)
assert make_collection(range(5)) == list(range(5))
assert make_collection(range(5)) != tuple(range(5))
assert make_collection(range(5), type=tuple) != range(5)
assert make_collection(range(5), type=tuple) != list(range(5))
assert make_collection(range(5), type=tuple) == tuple(range(5))
def test_remove_none():
"""Test whether remove_none removes Nones correctly."""
assert len(tuple(range(5))) == len(tuple(remove_none(range(5))))
for (initial, filtered) in zip(range(5), remove_none(range(5))):
assert initial == filtered
assert len(tuple(remove_none([1, 2, None, 3]))) == 3
assert tuple(remove_none([1, 2, None, 3])) == (1, 2, 3)
|
[
"lietk12@gmail.com"
] |
lietk12@gmail.com
|
b86f37f64be3a4a6a783e0cc8de77ab087a399bf
|
4b360696d512a35b2114c482c658d10e3ff91a2c
|
/project-addons/mail_ph/models/__init__.py
|
94a375f2116535169a7287ca79e29be1a3feb530
|
[] |
no_license
|
Comunitea/CMNT_010_15_PHA
|
24ecf3be6a50441dfa3dd8deca4ee96ac5e61970
|
d4a24aafba48fc7dda7ee662e0c7e1112c220162
|
refs/heads/master
| 2022-08-12T00:39:24.464028
| 2022-07-11T10:30:30
| 2022-07-11T10:31:31
| 37,918,119
| 0
| 1
| null | 2015-12-02T12:39:43
| 2015-06-23T12:37:45
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
# -*- coding: utf-8 -*-
# © 2020 Pharmadus I.T.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import res_partner, sale_order, purchase_order, hr_holidays, \
account_invoice, res_company, stock, mail_compose_message, mail_mail
|
[
"oscar.salvador@pharmadus.com"
] |
oscar.salvador@pharmadus.com
|
68e9badb63dfa7f93aed88ca630799e3a43f8ee8
|
bb24d8a7f71206fac23ebef0d53f94918d7aa32d
|
/mymusic/migrations/0005_album_image_url.py
|
2818a2cbf76e1a6e207e5a6e7dae1d783a693bd1
|
[] |
no_license
|
momentum-morehouse/django-music-bettinacjohnson
|
ec3311b41df1c3c09a3993fb476c06d715a87405
|
c52f24d2f9faec73b0cad4139ebfe002bd819766
|
refs/heads/master
| 2022-11-27T02:04:49.847168
| 2020-07-16T23:46:13
| 2020-07-16T23:46:13
| 279,333,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 3.0.8 on 2020-07-15 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mymusic', '0004_auto_20200714_1510'),
]
operations = [
migrations.AddField(
model_name='album',
name='image_url',
field=models.TextField(blank=True, null=True),
),
]
|
[
"replituser@example.com"
] |
replituser@example.com
|
88e16d0fac13e4e9eee8c7bea8b9fa71c55ddd68
|
9c2edc273db48dcb6d31a937510476b7c0b0cc61
|
/cython_sample/setup.py
|
aee60680780e7c7437d6abd35f1504bd902ef425
|
[] |
no_license
|
miyamotok0105/python_sample
|
4d397ac8a3a723c0789c4c3e568f3319dd754501
|
77101c981bf4f725acd20c9f4c4891b29fbaea61
|
refs/heads/master
| 2022-12-19T22:53:44.949782
| 2020-05-05T05:09:22
| 2020-05-05T05:09:22
| 81,720,469
| 1
| 0
| null | 2022-11-22T02:22:55
| 2017-02-12T11:15:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 731
|
py
|
#! -*- coding: utf-8 -*-
#python setup.py build_ext --inplace
from Cython.Build import cythonize
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
ext_modules = [
Extension(
"sample1",
["sample1.pyx"],
include_dirs=[numpy_include]
)
]
setup(
name='sample1',
ext_modules=cythonize(ext_modules),
)
# ext_modules = [
# Extension( "sample1", ["sample1.pyx"] ),
# ]
# setup(
# name = "Sample sample1 app",
# cmdclass = { "build_ext" : build_ext },
# ext_modules = ext_modules,
# )
|
[
"miyamotok0105@gmail.com"
] |
miyamotok0105@gmail.com
|
60d34638bc1a71aec3b30bdb71943672f3a6594b
|
88ed6ed99589f7fb8e49aeb6c15bf0d51fe14a01
|
/026_removeDuplicates.py
|
5e8dbfc5edb96029cb37d413ce206813159f712a
|
[] |
no_license
|
ryeLearnMore/LeetCode
|
3e97becb06ca2cf4ec15c43f77447b6ac2a061c6
|
04ec1eb720474a87a2995938743f05e7ad5e66e3
|
refs/heads/master
| 2020-04-07T19:02:43.171691
| 2019-06-23T15:09:19
| 2019-06-23T15:09:19
| 158,634,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
#!/usr/bin/env python
#coding:utf-8
#@author: rye
#@time: 2019/2/18 17:15
'''
很快就写完了。。。算是最近写题最快的一个
'''
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
i = 0
j = 0
while j < len(nums):
if nums[j] == nums[i]:
j += 1
else:
nums[i + 1] = nums[j]
i += 1
j += 1
return len(nums[:i + 1])
if __name__ == '__main__':
nums1 = [0,0,0,1,1,1,2,2,3,3,4]
print(Solution().removeDuplicates(nums1))
|
[
"noreply@github.com"
] |
ryeLearnMore.noreply@github.com
|
7ac450e80d74815ef7401aa056f3feb1952628a3
|
853d4cec42071b76a80be38c58ffe0fbf9b9dc34
|
/venv/Lib/site-packages/pandas/tests/series/test_duplicates.py
|
6577b3e54b7b981a4d18a17b1a5eb28849a224fe
|
[] |
no_license
|
msainTesting/TwitterAnalysis
|
5e1646dbf40badf887a86e125ef30a9edaa622a4
|
b1204346508ba3e3922a52380ead5a8f7079726b
|
refs/heads/main
| 2023-08-28T08:29:28.924620
| 2021-11-04T12:36:30
| 2021-11-04T12:36:30
| 424,242,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,717
|
py
|
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas.util.testing as tm
def test_value_counts_nunique():
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
assert result == 11
# GH 18051
s = Series(Categorical([]))
assert s.nunique() == 0
s = Series(Categorical([np.nan]))
assert s.nunique() == 0
def test_unique():
# GH714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
s = Series([1.2345] * 100, dtype="f4")
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# NAs in object arrays #714
s = Series(["foo"] * 100, dtype="O")
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
# GH 18051
s = Series(Categorical([]))
tm.assert_categorical_equal(s.unique(), Categorical([]), check_dtype=False)
s = Series(Categorical([np.nan]))
tm.assert_categorical_equal(s.unique(), Categorical([np.nan]), check_dtype=False)
def test_unique_data_ownership():
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
@pytest.mark.parametrize(
"data, expected",
[
(np.random.randint(0, 10, size=1000), False),
(np.arange(1000), True),
([], True),
([np.nan], True),
(["foo", "bar", np.nan], True),
(["foo", "foo", np.nan], False),
(["foo", "bar", np.nan, np.nan], False),
],
)
def test_is_unique(data, expected):
# GH11946 / GH25180
s = Series(data)
assert s.is_unique is expected
def test_is_unique_class_ne(capsys):
# GH 20661
class Foo:
def __init__(self, val):
self._value = val
def __ne__(self, other):
raise Exception("NEQ not supported")
with capsys.disabled():
li = [Foo(i) for i in range(5)]
s = Series(li, index=[i for i in range(5)])
s.is_unique
captured = capsys.readouterr()
assert len(captured.err) == 0
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, False, True], name="name")),
("last", Series([True, True, False, False, False], name="name")),
(False, Series([True, True, True, False, True], name="name")),
],
)
def test_duplicated_keep(keep, expected):
s = Series(["a", "b", "b", "c", "a"], name="name")
result = s.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, False, True])),
("last", Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True])),
],
)
def test_duplicated_nan_none(keep, expected):
s = Series([np.nan, 3, 3, None, np.nan], dtype=object)
result = s.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
|
[
"msaineti@icloud.com"
] |
msaineti@icloud.com
|
c019e47f0ff83cf6dcdb0d544128652acf3ae52c
|
0cf6728548830b42c60e37ea1c38b54d0e019ddd
|
/Learning_MachineLearning/DeepLearningWithPython/5.3.py
|
0f1e218f44d0b1287be5fb399e830a0c97bf75a1
|
[] |
no_license
|
MuSaCN/PythonLearning
|
8efe166f66f2bd020d00b479421878d91f580298
|
507f1d82a9228d0209c416626566cf390e1cf758
|
refs/heads/master
| 2022-11-11T09:13:08.863712
| 2022-11-08T04:20:09
| 2022-11-08T04:20:09
| 299,617,217
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,734
|
py
|
# Author:Zhang Yuan
from MyPackage import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import statsmodels.api as sm
from scipy import stats
#------------------------------------------------------------
__mypath__ = MyPath.MyClass_Path("\\DeepLearningWithPython") # 路径类
myfile = MyFile.MyClass_File() # 文件操作类
myword = MyFile.MyClass_Word() # word生成类
myexcel = MyFile.MyClass_Excel() # excel生成类
mytime = MyTime.MyClass_Time() # 时间类
myplt = MyPlot.MyClass_Plot() # 直接绘图类(单个图窗)
mypltpro = MyPlot.MyClass_PlotPro() # Plot高级图系列
myfig = MyPlot.MyClass_Figure(AddFigure=False) # 对象式绘图类(可多个图窗)
myfigpro = MyPlot.MyClass_FigurePro(AddFigure=False) # Figure高级图系列
mynp = MyArray.MyClass_NumPy() # 多维数组类(整合Numpy)
mypd = MyArray.MyClass_Pandas() # 矩阵数组类(整合Pandas)
mypdpro = MyArray.MyClass_PandasPro() # 高级矩阵数组类
myDA = MyDataAnalysis.MyClass_DataAnalysis() # 数据分析类
# myMql = MyMql.MyClass_MqlBackups() # Mql备份类
# myMT5 = MyMql.MyClass_ConnectMT5(connect=False) # Python链接MetaTrader5客户端类
# myDefault = MyDefault.MyClass_Default_Matplotlib() # matplotlib默认设置
# myBaidu = MyWebCrawler.MyClass_BaiduPan() # Baidu网盘交互类
# myImage = MyImage.MyClass_ImageProcess() # 图片处理类
myBT = MyBackTest.MyClass_BackTestEvent() # 事件驱动型回测类
myBTV = MyBackTest.MyClass_BackTestVector() # 向量型回测类
myML = MyMachineLearning.MyClass_MachineLearning() # 机器学习综合类
mySQL = MyDataBase.MyClass_MySQL(connect=False) # MySQL类
mySQLAPP = MyDataBase.MyClass_SQL_APPIntegration() # 数据库应用整合
myWebQD = MyWebCrawler.MyClass_QuotesDownload(tushare=False) # 金融行情下载类
myWebR = MyWebCrawler.MyClass_Requests() # Requests爬虫类
myWebS = MyWebCrawler.MyClass_Selenium(openChrome=False) # Selenium模拟浏览器类
myWebAPP = MyWebCrawler.MyClass_Web_APPIntegration() # 爬虫整合应用类
myEmail = MyWebCrawler.MyClass_Email() # 邮箱交互类
myReportA = MyQuant.MyClass_ReportAnalysis() # 研报分析类
myFactorD = MyQuant.MyClass_Factor_Detection() # 因子检测类
myKeras = MyDeepLearning.MyClass_Keras() # Keras综合类
#------------------------------------------------------------
#%%
from tensorflow.keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
#%%
conv_base.summary()
#%%
import os
import numpy as np
original_dataset_dir = os.path.expandvars('%USERPROFILE%')+'\\.kaggle\\dogs-vs-cats'
base_dir = original_dataset_dir+'\\cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
# 使用已知模型快速特征提取
train_features, train_labels = myKeras.extract_features_from_directory(conv_base,train_dir,2000,batch_size=20)
validation_features, validation_labels = myKeras.extract_features_from_directory(conv_base,validation_dir,1000,batch_size=20)
test_features, test_labels = myKeras.extract_features_from_directory(conv_base,test_dir,1000,batch_size=20)
#%%
reshapecount = np.array(train_features.shape[1:]).cumprod()[-1]
train_features = np.reshape(train_features, (2000, reshapecount))
validation_features = np.reshape(validation_features, (1000, reshapecount))
test_features = np.reshape(test_features, (1000, reshapecount))
#%%
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5)) #(注意要使用 dropout 正则化)
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(train_features, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features, validation_labels))
myKeras.plot_acc_loss(history)
#%%
myKeras.clear_session()
from tensorflow.keras import models
from tensorflow.keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
#%%
model.summary()
#%%
# 冻结conv_base网络
print('This is the number of trainable weights '
'before freezing the conv base:', len(model.trainable_weights))
conv_base.trainable = False
print('总共有 4 个权重张量,每层2个(主权重矩阵和偏置向量)。', len(model.trainable_weights))
#%%
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
model,history = myKeras.cnn2D_fit_from_directory(model,train_dir,validation_dir,augmentation=True,flow_batch_size=20,epochs=30,plot=True)
#%%
myKeras.plot_acc_loss(history)
model.save(base_dir+'\\cats_and_dogs_small_3.h5')
#%%
conv_base.summary()
#%%
conv_base = myKeras.fine_tune_model(conv_base,'block5_conv1')
#%%
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc'])
model,history = myKeras.cnn2D_fit_from_directory(model,train_dir,validation_dir,augmentation=True,flow_batch_size=20,epochs=30,plot=True)
#%%
model.save(base_dir+'\\cats_and_dogs_small_4.h5')
#%%
myKeras.cnn2D_evaluate_from_directory(model,test_dir,flow_batch_size=20,steps=50)
|
[
"39754824+MuSaCN@users.noreply.github.com"
] |
39754824+MuSaCN@users.noreply.github.com
|
fd7663c74ab7441e0d5e4e98c3e5a02023c432b6
|
48983b88ebd7a81bfeba7abd6f45d6462adc0385
|
/HakerRank/data_structures/trees/tree_top_view.py
|
54610fe4a1f57e64ca716708d368bed09f4c0f84
|
[] |
no_license
|
lozdan/oj
|
c6366f450bb6fed5afbaa5573c7091adffb4fa4f
|
79007879c5a3976da1e4713947312508adef2e89
|
refs/heads/master
| 2018-09-24T01:29:49.447076
| 2018-06-19T14:33:37
| 2018-06-19T14:33:37
| 109,335,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
# author: Daniel Lozano
# source: HackerRank ( https://www.hackerrank.com )
# problem name: Data Structures: Trees: Top View
# problem url: https://www.hackerrank.com/challenges/tree-top-view/problem
def topView(root):
instance = root
if not root:
return
answer = [instance.data]
while instance.left:
answer.append(instance.left.data)
instance = instance.left
answer.reverse()
while root.right:
answer.append(root.right.data)
root = root.right
print " ".join(map(str, answer))
|
[
"lozanodaniel02@gmail.com"
] |
lozanodaniel02@gmail.com
|
314ea5491f976610601bc93def87970f19fa13e6
|
33e006f5ae711d44d796a0e3ca384caefe1ec299
|
/Wprowadzenie do algorytmow - ksiazka/rozdzial 2/2.1-2.py
|
1919575e88d14a8d51ece544f7292e484a60b267
|
[] |
no_license
|
Cozoob/Algorithms_and_data_structures
|
959b188f8cef3e6b7b1fd2a6c45a5e169d8f41fe
|
f786a397964f71e2938d9fd6268d3428e3ed7992
|
refs/heads/main
| 2023-08-05T02:23:43.565651
| 2021-09-17T10:52:14
| 2021-09-17T10:52:14
| 407,532,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
# Zmodyfikuj INSERTION_SORT tak zeby sortowala w porzadku nierosnacym
def insertion_sort(A):
for j in range(1, len(A)):
key = A[j]
# Wstaw A[j] w posortowany ciąg A[1,...,j-1]
i = j - 1
while i >= 0 and A[i] < key:
A[i + 1] = A[i]
i -= 1
A[i + 1] = key
return A
if __name__ == '__main__':
A = [5,2,4,6,1,3]
B = [31,41,59,26,41,58]
print(A)
insertion_sort(A)
insertion_sort(B)
print(A)
print(B)
|
[
"kozubmarcin10@gmail.com"
] |
kozubmarcin10@gmail.com
|
c62c4a9af1d76050479aa8b61113b12aa938d298
|
9187131d6a06e4a2cd56a0eb6d20604b38ea2359
|
/apitest/tp/mail/test_case/page_object/mail_page.py
|
fd5073f7bbd54dfe0c0487251a04d2b334badf62
|
[] |
no_license
|
hikaruwin/hikaru
|
0dc75843047c01023327854798fbf4999e710f57
|
1675192d4584609bb1f678c2e5a82c06915ab25e
|
refs/heads/master
| 2020-03-27T23:33:14.958007
| 2018-09-04T10:29:40
| 2018-09-04T10:29:40
| 147,327,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
# coding: utf-8
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from time import sleep
from .base import Base
class MailPage(Base):
url = '/'
login_success_user_loc = (By.ID, 'spnUid')
def login_success_user(self):
return self.find_element(*self.login_success_user_loc).text
|
[
"your email"
] |
your email
|
1c8bcdf2d99bd5630809fedcd85b30f4ca5af1d3
|
b61b0a5333814779669320532233ee75327f039f
|
/xls_proc.py
|
2b62ee064f9f7d001f18b164b612cead6498106d
|
[] |
no_license
|
marine0131/attendance_calc
|
75f6d387e336dfd7ff22fcde5bcb13c96a87c810
|
e991f30ba7ff88474b2135315b12f360d52ee726
|
refs/heads/master
| 2020-03-26T07:52:31.226713
| 2018-08-14T08:37:25
| 2018-08-14T08:37:25
| 144,675,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,994
|
py
|
#! /usr/bin/env python
import xlrd
import xlwt
import re
import datetime
import json
with open("config.txt", 'r') as f:
params = json.load(f)
FILE = params["FILE"]
MONTH = params['MONTH']
ON_WORK_TIME = params['ON_WORK_TIME']
LUNCH_TIME = params['LUNCH_TIME']
REST_TIME = params['REST_TIME']
AFTERNOON_WORK_TIME = params['AFTERNOON_WORK_TIME']
OFF_WORK_TIME = params['OFF_WORK_TIME']
OVER_WORK_TIME = params['OVER_WORK_TIME']
OVER_TIME = params['OVER_TIME']
def str_to_absmin(t_str):
a = list(map(int, t_str.split(':'))) # list() for python3 compatible
return a[0]*60 + a[1]
def duration(start, end):
return str_to_absmin(end) - str_to_absmin(start)
def proc_time(time_list, is_weekend=False):
if len(time_list) == 0:
return "", "~", 0, 0
if len(time_list) == 1:
return "", time_list[0]+"~", 0, 0
start = time_list[0]
end = time_list[-1]
start_min = str_to_absmin(start)
end_min = str_to_absmin(end)
tag = ""
start_end = start + "~" + end
work_duration = 0
over_duration = 0
if is_weekend:
over_duration = duration(start, end)
over_duration = round(over_duration/60.0, 1) # * 2)/2.0
return tag, start_end, work_duration, over_duration
else:
morning_work_min = duration(ON_WORK_TIME, LUNCH_TIME)
afternoon_work_min = duration(AFTERNOON_WORK_TIME, OFF_WORK_TIME)
regular_work_min = morning_work_min + afternoon_work_min
if start_min <= str_to_absmin(ON_WORK_TIME): # check in regular
if end_min > str_to_absmin(OVER_TIME): # work over time
work_duration = regular_work_min
over_duration = duration(OVER_WORK_TIME, end)
elif end_min >= str_to_absmin(OFF_WORK_TIME): # regular work
work_duration = regular_work_min
elif end_min >= str_to_absmin(AFTERNOON_WORK_TIME): # work over lunch
work_duration = morning_work_min + duration(AFTERNOON_WORK_TIME, end)
elif end_min >= str_to_absmin(LUNCH_TIME): # work whole morning
work_duration = morning_work_min
else: # work only morning
work_duration = duration(ON_WORK_TIME, end)
elif start_min > str_to_absmin(ON_WORK_TIME) and start_min <= str_to_absmin(LUNCH_TIME): # late check in morning
late = start_min - str_to_absmin(ON_WORK_TIME)
tag = "late: " + str(late) + "min"
if late < 30: # late but worktime is full
late = 0
start = ON_WORK_TIME
if late > 60:
tag = "absence: " + str(late) + "min"
if end_min > str_to_absmin(OVER_TIME): # work over time
work_duration = regular_work_min - late
over_duration = duration(OVER_WORK_TIME, end)
elif end_min > str_to_absmin(OFF_WORK_TIME): # regular work
work_duration = regular_work_min - late
elif end_min > str_to_absmin(AFTERNOON_WORK_TIME): # work over lunch
work_duration = duration(start, LUNCH_TIME) + duration(AFTERNOON_WORK_TIME, end)
elif end_min >= str_to_absmin(LUNCH_TIME): # work whole morning
work_duration = duration(start, LUNCH_TIME)
else: # work only morning
work_duration = duration(start, end)
# check in lunchtime
elif start_min > str_to_absmin(LUNCH_TIME) and start_min < str_to_absmin(AFTERNOON_WORK_TIME):
tag = "absence: " + str(morning_work_min) + "min"
if end_min > str_to_absmin(OVER_TIME): # work over time
work_duration = afternoon_work_min
over_duration = duration(OVER_WORK_TIME, end)
elif end_min > str_to_absmin(OFF_WORK_TIME): # regular work
work_duration = afternoon_work_min
elif end_min > str_to_absmin(AFTERNOON_WORK_TIME): # work over lunch
work_duration = duration(start, end)
else:
pass
# check in afternoon
elif start_min > str_to_absmin(AFTERNOON_WORK_TIME) and start_min <= str_to_absmin(OFF_WORK_TIME): # check in afternoon
tag = "absence: morning"
if end_min > str_to_absmin(OVER_TIME): # work over time
work_duration = duration(start, OFF_WORK_TIME)
over_duration = duration(OVER_WORK_TIME, end)
elif end_min > str_to_absmin(OFF_WORK_TIME): # regular work
work_duration = duration(start, OFF_WORK_TIME)
else:
work_duration = duration(start, end)
else: # check in evening
if end_min > str_to_absmin(OVER_TIME): # work over time
over_duration = duration(OVER_WORK_TIME, end)
else:
pass
work_duration = round(work_duration/60.0, 1) # * 2)/2.0
over_duration = round(over_duration/60.0, 1) # * 2)/2.0
return tag, start_end, work_duration, over_duration
def check_weekend(day):
weekenum = ["Mon", "Tus", "Wed", "Thu", "Fri", "Sat", "Sun"]
year_month = MONTH.split('/')
d = datetime.date(int(year_month[0]), int(year_month[1]), int(day))
if d.weekday() == 5 or d.weekday() == 6:
return True, weekenum[d.weekday()]
else:
return False, weekenum[d.weekday()]
if __name__ == "__main__":
src_book = xlrd.open_workbook(FILE)
src_sheet = src_book.sheets()[2]
n_rows = src_sheet.nrows
print("sheet rows:{}".format(n_rows))
dst_book = xlwt.Workbook()
dst_sheet = dst_book.add_sheet('Sheet1')
# copy the head
row = src_sheet.row_values(2)
dst_sheet.write(0, 0, row[0])
dst_sheet.write(0, 1, row[2])
dst_sheet.write(0, 20, "generate by whj")
row = src_sheet.row_values(3)
for i, r in enumerate(row):
dst_sheet.write(1, i+1, r)
# copy and calc work time
ind = 2
for i in range(4, n_rows):
row = src_sheet.row_values(i)
if i%2 == 0:
dst_sheet.write(ind, 0, row[2] + ":".encode('utf-8') + row[10])
ind += 1
else:
# write title
dst_sheet.write(ind, 0, "start~end")
dst_sheet.write(ind+1, 0, "worktime")
dst_sheet.write(ind+2, 0, "overtime")
dst_sheet.write(ind+3, 0, "comment")
for j, r in enumerate(row):
time_list = re.findall(r'.{5}', r)
is_weekend, day_tag = check_weekend(src_sheet.cell_value(3, j))
tag, start_end, work_duration, over_duration = proc_time(time_list, is_weekend)
dst_sheet.write(ind, j+1, start_end)
dst_sheet.write(ind+1, j+1, work_duration)
dst_sheet.write(ind+2, j+1, over_duration)
dst_sheet.write(ind+3, j+1, tag)
if is_weekend:
dst_sheet.write(ind-1, j+1, day_tag)
ind += 4
dst_book.save("new.xls")
|
[
"wanghj@woosiyuan.com"
] |
wanghj@woosiyuan.com
|
580d3bab5161c2089c9b1c92b66b2465fd94ddb9
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/pacbiolib/thirdparty/pythonpkgs/scipy/scipy_0.9.0+pbi86/lib/python2.7/site-packages/scipy/linalg/interface_gen.py
|
aed22b2164e1399c612a6bd8fd85ad35866e808f
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,791
|
py
|
#! python
import os
import re
from distutils.dir_util import mkpath
def all_subroutines(interface_in):
# remove comments
comment_block_exp = re.compile(r'/\*(?:\s|.)*?\*/')
subroutine_exp = re.compile(r'subroutine (?:\s|.)*?end subroutine.*')
function_exp = re.compile(r'function (?:\s|.)*?end function.*')
interface = comment_block_exp.sub('',interface_in)
subroutine_list = subroutine_exp.findall(interface)
function_list = function_exp.findall(interface)
subroutine_list = subroutine_list + function_list
subroutine_list = map(lambda x: x.strip(),subroutine_list)
return subroutine_list
def real_convert(val_string):
return val_string
def complex_convert(val_string):
return '(' + val_string + ',0.)'
def convert_types(interface_in,converter):
regexp = re.compile(r'<type_convert=(.*?)>')
interface = interface_in[:]
while 1:
sub = regexp.search(interface)
if sub is None: break
converted = converter(sub.group(1))
interface = interface.replace(sub.group(),converted)
return interface
def generic_expand(generic_interface,skip_names=[]):
generic_types ={'s' :('real', 'real', real_convert,
'real'),
'd' :('double precision','double precision',real_convert,
'double precision'),
'c' :('complex', 'complex',complex_convert,
'real'),
'z' :('double complex', 'double complex',complex_convert,
'double precision'),
'cs':('complex', 'real',complex_convert,
'real'),
'zd':('double complex', 'double precision',complex_convert,
'double precision'),
'sc':('real', 'complex',real_convert,
'real'),
'dz':('double precision','double complex', real_convert,
'double precision')}
generic_c_types = {'real':'float',
'double precision':'double',
'complex':'complex_float',
'double complex':'complex_double'}
# cc_types is specific in ATLAS C BLAS, in particular, for complex arguments
generic_cc_types = {'real':'float',
'double precision':'double',
'complex':'void',
'double complex':'void'}
#2. get all subroutines
subs = all_subroutines(generic_interface)
print len(subs)
#loop through the subs
type_exp = re.compile(r'<tchar=(.*?)>')
TYPE_EXP = re.compile(r'<TCHAR=(.*?)>')
routine_name = re.compile(r'(subroutine|function)\s*(?P<name>\w+)\s*\(')
interface = ''
for sub in subs:
#3. Find the typecodes to use:
m = type_exp.search(sub)
if m is None:
interface = interface + '\n\n' + sub
continue
type_chars = m.group(1)
# get rid of spaces
type_chars = type_chars.replace(' ','')
# get a list of the characters (or character pairs)
type_chars = type_chars.split(',')
# Now get rid of the special tag that contained the types
sub = re.sub(type_exp,'<tchar>',sub)
m = TYPE_EXP.search(sub)
if m is not None:
sub = re.sub(TYPE_EXP,'<TCHAR>',sub)
sub_generic = sub.strip()
for char in type_chars:
type_in,type_out,converter, rtype_in = generic_types[char]
sub = convert_types(sub_generic,converter)
function_def = sub.replace('<tchar>',char)
function_def = function_def.replace('<TCHAR>',char.upper())
function_def = function_def.replace('<type_in>',type_in)
function_def = function_def.replace('<type_in_c>',
generic_c_types[type_in])
function_def = function_def.replace('<type_in_cc>',
generic_cc_types[type_in])
function_def = function_def.replace('<rtype_in>',rtype_in)
function_def = function_def.replace('<rtype_in_c>',
generic_c_types[rtype_in])
function_def = function_def.replace('<type_out>',type_out)
function_def = function_def.replace('<type_out_c>',
generic_c_types[type_out])
m = routine_name.match(function_def)
if m:
if m.group('name') in skip_names:
print 'Skipping',m.group('name')
continue
else:
print 'Possible bug: Failed to determine routines name'
interface = interface + '\n\n' + function_def
return interface
#def interface_to_module(interface_in,module_name,include_list,sdir='.'):
def interface_to_module(interface_in,module_name):
pre_prefix = "!%f90 -*- f90 -*-\n"
# heading and tail of the module definition.
file_prefix = "\npython module " + module_name +" ! in\n" \
"!usercode '''#include \"cblas.h\"\n"\
"!'''\n"\
" interface \n"
file_suffix = "\n end interface\n" \
"end module %s" % module_name
return pre_prefix + file_prefix + interface_in + file_suffix
def process_includes(interface_in,sdir='.'):
include_exp = re.compile(r'\n\s*[^!]\s*<include_file=(.*?)>')
include_files = include_exp.findall(interface_in)
for filename in include_files:
f = open(os.path.join(sdir,filename))
interface_in = interface_in.replace('<include_file=%s>'%filename,
f.read())
f.close()
return interface_in
def generate_interface(module_name,src_file,target_file,skip_names=[]):
print "generating",module_name,"interface"
f = open(src_file)
generic_interface = f.read()
f.close()
sdir = os.path.dirname(src_file)
generic_interface = process_includes(generic_interface,sdir)
generic_interface = generic_expand(generic_interface,skip_names)
module_def = interface_to_module(generic_interface,module_name)
mkpath(os.path.dirname(target_file))
f = open(target_file,'w')
user_routines = os.path.join(sdir,module_name+"_user_routines.pyf")
if os.path.exists(user_routines):
f2 = open(user_routines)
f.write(f2.read())
f2.close()
f.write(module_def)
f.close()
def process_all():
# process the standard files.
for name in ['fblas','cblas','clapack','flapack']:
generate_interface(name,'generic_%s.pyf'%(name),name+'.pyf')
if __name__ == "__main__":
process_all()
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
013c1d369981d94c454a38a281f78ed4f54d4b91
|
5f86944bdf1b810a84c63adc6ed01bbb48d2c59a
|
/kubernetes/test/test_settings_api.py
|
e266034720dee9676cdc5fb197e1b837aaa3f470
|
[
"Apache-2.0"
] |
permissive
|
m4ttshaw/client-python
|
384c721ba57b7ccc824d5eca25834d0288b211e2
|
4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1
|
refs/heads/master
| 2021-01-13T06:05:51.564765
| 2017-06-21T08:31:03
| 2017-06-21T08:31:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.settings_api import SettingsApi
class TestSettingsApi(unittest.TestCase):
""" SettingsApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.settings_api.SettingsApi()
def tearDown(self):
pass
def test_get_api_group(self):
"""
Test case for get_api_group
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
6a5f99fc2d8fd1c5ad7da2f097eecb0cf51bf7cf
|
0ba2c3776618b5b8b76f4a23f21e9c6ad3f6e2e1
|
/afterclass/homework1/007_1.py
|
98e2ac33076dbf3ab283e7a973e4e7a0a135d6f8
|
[] |
no_license
|
WangDongDong1234/python_code
|
6dc5ce8210b1dcad7d57320c9e1946fd4b3fe302
|
6a785306a92d328a0d1427446ca773a9803d4cc0
|
refs/heads/master
| 2020-04-15T12:35:03.427589
| 2019-09-16T15:38:25
| 2019-09-16T15:38:25
| 164,681,323
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,826
|
py
|
#list记录以i为分段点的最长增长子序列的个数
#返回最大分段点的坐标
def Max(list,n):
max=0
index=0;
for i in range(0,n):
if list[i]>max:
max=list[i]
index=i
return index;
def LIS(array,len,list,list_increase):
# list记录以i为分段点的最长增长子序列的个数
for i in range(0,len):
list.append(1)
list_increase[i].append(array[i])
for j in range(0,i):
if (array[i]>array[j])and(list[j]+1>list[i]):
list[i]=list[j]+1
for item in list_increase[j]:
if item not in list_increase[i]:
list_increase[i].append(item)
location=Max(list,len)
return location
arr=input()
arr_tmp=arr.strip(" ").split(" ")
#起初输入的数组
array_0=[]
array=[]
for item in arr_tmp:
array.append(int(item))
array_0.append(int(item))
list1=[]
list_increase=[]
for i in range(0,len(array_0)):
tmp_list=[]
list_increase.append(tmp_list)
index=LIS(array,len(array),list1,list_increase)
#print(list1)
#print(list_increase)
array.reverse()
list_reduce=[]
list2=[]
for i in range(0,len(array_0)):
tmp_list = []
list_reduce.append(tmp_list)
index2=LIS(array,len(array),list2,list_reduce)
list2.reverse()
list_reduce.reverse()
#print(list2)
#print(list_reduce)
sum=0
index=0
for i in range(0, len(list1)):
if sum<(list1[i]+list2[i]):
sum=list1[i]+list2[i]
index=i
list_increase[index].sort()
list_reduce[index].sort(reverse=True)
#print(list_increase[index])
#print(list_reduce[index])
print_list=[]
for item in list_increase[index]:
print_list.append(item)
for i in range(1,len(list_reduce[index])):
print_list.append(list_reduce[index][i])
for item in print_list:
print(item,end=" ")
|
[
"827495316@qq.com"
] |
827495316@qq.com
|
35b823ee571526aabe931d1cf528fedc446c7de5
|
55e9f3b00fc2e488597bab5225ed321c86efbd4b
|
/sdk/test/test_frequency_response.py
|
8e97d49e14aa132ed3efe4ee80569106b6d29d8d
|
[
"MIT"
] |
permissive
|
bs-yapily/yapily-sdk-python
|
ad9d04c28f3d744830734c3444c1cef8215206fd
|
0bba45e351b674eb655425a51190f539c4e9896f
|
refs/heads/master
| 2020-08-26T17:18:53.156429
| 2019-10-22T11:01:16
| 2019-10-22T11:01:16
| 217,085,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import yapily
from yapily.models.frequency_response import FrequencyResponse # noqa: E501
from yapily.rest import ApiException
class TestFrequencyResponse(unittest.TestCase):
"""FrequencyResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFrequencyResponse(self):
"""Test FrequencyResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = yapily.models.frequency_response.FrequencyResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"systems@yapily.com"
] |
systems@yapily.com
|
2eb15e7a7809dccc58b91240a1a0bdbde8f2ea7a
|
162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d
|
/examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
afccba025af1f2bb50d6e3b57e30535232120bfa
|
[] |
no_license
|
testsleeekGithub/trex
|
2af21fa95f9372f153dbe91941a93937480f4e2f
|
9d27a9b44d814ede3996a37365d63814214260ae
|
refs/heads/master
| 2020-08-01T11:47:43.926750
| 2019-11-06T06:47:19
| 2019-11-06T06:47:19
| 210,987,245
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,328
|
py
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1, L2 and Elastic-Net penalty are used for different values of C. We can see
that large values of C give more freedom to the model. Conversely, smaller
values of C constrain the model more. In the L1 penalty case, this leads to
sparser solutions. As expected, the Elastic-Net penalty sparsity is between
that of L1 and L2.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mrex.linear_model import LogisticRegression
from mrex import datasets
from mrex.preprocessing import StandardScaler
X, y = datasets.load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
l1_ratio = 0.5 # L1 weight in the Elastic-Net regularization
fig, axes = plt.subplots(3, 3)
# Set regularization parameter
for i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01, solver='saga')
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01, solver='saga')
clf_en_LR = LogisticRegression(C=C, penalty='elasticnet', solver='saga',
l1_ratio=l1_ratio, tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
clf_en_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
coef_en_LR = clf_en_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
sparsity_en_LR = np.mean(coef_en_LR == 0) * 100
print("C=%.2f" % C)
print("{:<40} {:.2f}%".format("Sparsity with L1 penalty:", sparsity_l1_LR))
print("{:<40} {:.2f}%".format("Sparsity with Elastic-Net penalty:",
sparsity_en_LR))
print("{:<40} {:.2f}%".format("Sparsity with L2 penalty:", sparsity_l2_LR))
print("{:<40} {:.2f}".format("Score with L1 penalty:",
clf_l1_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with Elastic-Net penalty:",
clf_en_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with L2 penalty:",
clf_l2_LR.score(X, y)))
if i == 0:
axes_row[0].set_title("L1 penalty")
axes_row[1].set_title("Elastic-Net\nl1_ratio = %s" % l1_ratio)
axes_row[2].set_title("L2 penalty")
for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):
ax.imshow(np.abs(coefs.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
ax.set_xticks(())
ax.set_yticks(())
axes_row[0].set_ylabel('C = %s' % C)
plt.show()
|
[
"shkolanovaya@gmail.com"
] |
shkolanovaya@gmail.com
|
71e8829afac3e0a0c65027c407736ec43eeb6262
|
0cba5529e387ba0f077b4e8ddeb96f914004f5df
|
/malaya/emotion.py
|
dcd419468d7b3fce6dc88b499f1cc790ea1925c7
|
[
"MIT"
] |
permissive
|
AsyrafAzlan/Malaya
|
dc78398ee6880578f40c5646a48882a5913217ae
|
3d5166173cf74881f7a56fffaaf391813c55d4f1
|
refs/heads/master
| 2021-05-21T22:47:41.863857
| 2020-04-03T15:00:21
| 2020-04-03T15:00:21
| 252,841,526
| 1
| 0
|
MIT
| 2020-04-03T21:04:44
| 2020-04-03T21:04:44
| null |
UTF-8
|
Python
| false
| false
| 1,861
|
py
|
from malaya.supervised import softmax
from malaya.path import PATH_EMOTION, S3_PATH_EMOTION
from herpetologist import check_type
_emotion_label = ['anger', 'fear', 'joy', 'love', 'sadness', 'surprise']
_availability = [
'bert',
'tiny-bert',
'albert',
'tiny-albert',
'xlnet',
'alxlnet',
]
def available_transformer_model():
"""
List available transformer emotion analysis models.
"""
return _availability
def multinomial(**kwargs):
"""
Load multinomial emotion model.
Returns
-------
BAYES : malaya._models._sklearn_model.BAYES class
"""
return softmax.multinomial(
PATH_EMOTION, S3_PATH_EMOTION, 'emotion', _emotion_label, **kwargs
)
@check_type
def transformer(model: str = 'xlnet', **kwargs):
"""
Load Transformer emotion model.
Parameters
----------
model : str, optional (default='bert')
Model architecture supported. Allowed values:
* ``'bert'`` - BERT architecture from google.
* ``'tiny-bert'`` - BERT architecture from google with smaller parameters.
* ``'albert'`` - ALBERT architecture from google.
* ``'tiny-albert'`` - ALBERT architecture from google with smaller parameters.
* ``'xlnet'`` - XLNET architecture from google.
* ``'alxlnet'`` - XLNET architecture from google + Malaya.
Returns
-------
MODEL : Transformer class
"""
model = model.lower()
size = size.lower()
if model not in _availability:
raise Exception(
'model not supported, please check supported models from malaya.emotion.available_transformer_model()'
)
return softmax.transformer(
PATH_EMOTION,
S3_PATH_EMOTION,
'emotion',
_emotion_label,
model = model,
size = size,
validate = validate,
)
|
[
"husein.zol05@gmail.com"
] |
husein.zol05@gmail.com
|
f68c22a3ebcff8045d9ad3131f9b30a050725a36
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_filthiness.py
|
d1e4e34d83d291300555681e0bf38feb72c2e796
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
#calss header
class _FILTHINESS():
def __init__(self,):
self.name = "FILTHINESS"
self.definitions = [u'the quality of being very dirty']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b033e8f0b13e41e324b11e403739c993c52bbe7e
|
a4a01e251b194f6d3c6654a2947a33fec2c03e80
|
/PythonWeb/Ajax/1809/Day02/1808/AjaxDemo02/run01.py
|
35ac2bfbdbdab18d5da55f05332beae995cd1c85
|
[] |
no_license
|
demo112/1809
|
033019043e2e95ebc637b40eaf11c76bfd089626
|
e22972229e5e7831dce2aae0b53ce19a6e3bb106
|
refs/heads/master
| 2020-04-09T07:10:49.906231
| 2019-02-27T13:08:45
| 2019-02-27T13:08:45
| 160,143,869
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,861
|
py
|
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import json
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']="mysql://root:123456@localhost:3306/flask"
db = SQLAlchemy(app)
class Login(db.Model):
__tablename__ = "login"
id = db.Column(db.Integer,primary_key=True)
lname = db.Column(db.String(30))
lpwd = db.Column(db.String(30))
uname = db.Column(db.String(30))
def to_dict(self):
dic = {
'id':self.id,
'lname' : self.lname,
'lpwd' : self.lpwd,
'uname' : self.uname,
}
return dic
@app.route('/00-homework')
def homework():
return render_template('00-homework.html')
@app.route('/00-server')
def server00():
lname = request.args.get('lname')
login=Login.query.filter_by(lname=lname).first()
if login:
return "用户名称已经存在"
else:
return "通过"
@app.route('/01-post')
def post():
return render_template("01-post.html")
@app.route('/01-server',methods=['POST'])
def server01():
uname = request.form['uname']
uage = request.form['uage']
return "传递过来的uname的值为:%s,传递过来的uage的值为:%s" % (uname,uage)
@app.route('/02-form',methods=['GET','POST'])
def form():
if request.method == 'GET':
return render_template('02-form.html')
else:
uname = request.form['uname']
uage = request.form['uage']
return "传递过来的uname的值为:%s,传递过来的uage的值为:%s" % (uname,uage)
@app.route('/03-getlogin')
def getlogin():
return render_template('03-getlogin.html')
@app.route('/03-server')
def server03():
logins = Login.query.all()
str1 = ""
for login in logins:
str1 += str(login.id)
str1 += login.lname
str1 += login.lpwd
str1 += login.uname
return str1
@app.route('/04-json')
def json_views():
return render_template("04-json.html")
@app.route('/04-server')
def server04():
# list = ["王老六","RapWang","隔壁老顽固"]
# dic = {
# 'name':'TeacherWang',
# 'age' : 35,
# 'gender' : 'Male',
# }
# jsonStr=json.dumps(dic)
list = [
{
"name":"wangwc",
"age":35,
"gender":"Male",
},
{
'name':'RapWang',
'age':40,
'gender':'Female',
}
]
jsonStr=json.dumps(list)
return jsonStr
@app.route('/05-json-login')
def json_login():
return render_template('05-json-login.html')
@app.route('/05-server')
def server05():
#得到id为一的Login的信息
login=Login.query.filter_by(id=1).first()
jsonStr=json.dumps(login.to_dict())
return jsonStr
if __name__ == "__main__":
app.run(debug=True)
|
[
"huafengdongji@hotmail.com"
] |
huafengdongji@hotmail.com
|
35e5326d1aad1c103b3e76b9efefdd92864a2926
|
45edff14271724c5bf27e62e96eeb635840eae22
|
/ML/ensemble_learning/util.py
|
d998161fe6c0a48ae7207841cc63d1e0147b0db8
|
[] |
no_license
|
DaiJitao/machine_learning
|
1e41208dc94836a97e57a4b0f5778f8da2bb81d4
|
49e1db9ecbfbf886a11ce416eea402d214cf2049
|
refs/heads/master
| 2021-06-25T23:52:06.066315
| 2021-02-07T16:17:50
| 2021-02-07T16:17:50
| 209,712,507
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
"""
决策树常用的工具类:指标的计算、数据的加载
"""
import numpy as np
def load_data():
'''
根据《统计学习方法》第八章8.1.3产生数据.
:return:
'''
dataset_label = np.array([[0, 1], [1, 1], [2, 1], [3, -1], [4, -1], [5, -1], [6, 1], [7, 1], [8, 1], [9, -1]])
return dataset_label
|
[
"hejinrong@news.cn"
] |
hejinrong@news.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.