blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4160e38a5fe5b321cdff170039b55d5691b1787
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2692487_0/Python/Sibi/osmos.py
|
358f3037cb7d86476c1f1042cd07a622eda406f5
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
from math import log
def solve(mySize,sizes):
if len(sizes) == 0:
return 0
while sizes[0] < mySize:
mySize = mySize + sizes.pop(0)
if len(sizes) == 0:
return 0
if sizes[0] < 2*mySize-1:
return 1+solve(2*mySize-1,sizes)
for insertions in range(1,100):
if mySize*(2**insertions)-(2**insertions)+1 > sizes[0]:
break
#insertions = log((sizes[0]-1.0)/(mySize-1.0))/log(2.0)
#insertions = int(insertions)
if insertions >= len(sizes):
return len(sizes)
else:
return min(len(sizes),insertions+solve(mySize*2**insertions-2**insertions+1,sizes))
iFile = open("A-small-attempt2.in","r")
oFile = open("output.txt","w")
cases = int(iFile.readline().strip())
for i in range(cases):
line1 = [int(a) for a in iFile.readline().strip().split()]
mySize = line1[0]
sizes = [int(a) for a in iFile.readline().strip().split()]
sizes.sort()
if mySize == 1:
minSolution = len(sizes)
else:
minSolution = solve(mySize,sizes)
output = str(minSolution)
oFile.write("Case #"+str(i+1)+": "+output+"\n")
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
4711c9358a365a673a560438a3d01eaa7bc1cdcd
|
abd9537f8b90a990e195ded5f9fafdcc108d2a48
|
/swea/d4/1861/1861_june.py
|
8790efff9e44731cc0c47a060c8eb6dc902bcd0a
|
[] |
no_license
|
ohdnf/algorithms
|
127171744631406c1d08cc2583aa569a094fa2cd
|
6f286753dab827facc436af4f2130f11dad2d44f
|
refs/heads/master
| 2023-08-09T11:19:56.445351
| 2021-08-31T13:11:46
| 2021-08-31T13:11:46
| 236,180,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
import sys
sys.stdin = open('input.txt')
sys.stdin = open('n1000.txt')
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
t = int(input())
for test_case in range(1, t+1):
n = int(input())
room = [list(map(int, input().split())) for _ in range(n)]
v = [0] * (n**2 + 1)
for i in range(n):
for j in range(n):
for k in range(4):
nx = i + dx[k]
ny = j + dy[k]
if 0 <= nx < n and 0 <= ny < n and room[i][j] + 1 == room[nx][ny]:
v[room[i][j]] += 1
break
start = 0
move = max_move = 1
for i in range(n*n, -1, -1):
if v[i]:
move += 1
else:
if move >= max_move:
max_move = move
start = i+1
move = 1
print('#{} {} {}'.format(test_case, start, max_move))
|
[
"jupyohong7@gmail.com"
] |
jupyohong7@gmail.com
|
e410945fdc90dfec260540d9e96b5aa39d3d487e
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/modules/bootstrap/blueprint.py
|
cce0f666235dc8c6007c3badae3b258b1efdbb60
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383
| 2018-04-27T09:28:25
| 2018-04-27T09:28:25
| 101,277,325
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from __future__ import unicode_literals
from fossir.modules.bootstrap.controllers import RHBootstrap
from fossir.web.flask.wrappers import fossirBlueprint
_bp = fossirBlueprint('bootstrap', __name__, template_folder='templates', virtual_template_folder='bootstrap')
_bp.add_url_rule('/bootstrap', 'index', RHBootstrap, methods=('GET', 'POST'))
|
[
"hodardhazwinayo@gmail.com"
] |
hodardhazwinayo@gmail.com
|
856bafa4536d68bf54de8ad934805089bf2d0897
|
0bce7412d58675d6cc410fa7a81c294ede72154e
|
/Python3/0303. Range Sum Query - Immutable.py
|
4d1cb2c5dbaf4b75edcda2762e7d6aa9aa227e01
|
[] |
no_license
|
yang4978/LeetCode
|
9ddf010b0f1dda32cddc7e94c3f987509dea3214
|
6387d05b619d403414bad273fc3a7a2c58668db7
|
refs/heads/master
| 2022-01-15T04:21:54.739812
| 2021-12-28T12:28:28
| 2021-12-28T12:28:28
| 182,653,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
class NumArray:
def __init__(self, nums: List[int]):
self.temp = [0]
for i in nums:
self.temp.append(self.temp[-1]+i)
def sumRange(self, i: int, j: int) -> int:
return self.temp[j+1]-self.temp[i]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
|
[
"noreply@github.com"
] |
yang4978.noreply@github.com
|
b93689c0be7a720edd4a7d4908073df64f921dc6
|
29a4c1e436bc90deaaf7711e468154597fc379b7
|
/modules/ieee/doc/ulpdist.py
|
ac366398b04284aadd5b1cb7da038ae261d73daf
|
[
"BSL-1.0"
] |
permissive
|
brycelelbach/nt2
|
31bdde2338ebcaa24bb76f542bd0778a620f8e7c
|
73d7e8dd390fa4c8d251c6451acdae65def70e0b
|
refs/heads/master
| 2021-01-17T12:41:35.021457
| 2011-04-03T17:37:15
| 2011-04-03T17:37:15
| 1,263,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
[ ## this file was manually modified by jt
{
'functor' : {
'arity' : '2',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::result_of<nt2::meta::arithmetic(T)>::type',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_', 'unsigned_int_', 'signed_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'modified by jt the 04/12/2010',
'included' : [],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 12/12/2010',
},
'ranges' : {
'real_' : [['T(-10)', 'T(10)'], ['T(-10)', 'T(10)']],
'signed_int_' : [['-100', '100'], ['-100', '100']],
'unsigned_int_' : [['0', '100'], ['0', '100']],
},
'specific_values' : {
'default' : {
},
'real_' : {
'nt2::Inf<T>()' : 'nt2::Zero<r_t>()',
'nt2::Minf<T>()' : 'nt2::Zero<r_t>()',
'nt2::Mone<T>()' : 'nt2::Zero<r_t>()',
'nt2::Nan<T>()' : 'nt2::Zero<r_t>()',
'nt2::One<T>()' : 'nt2::Zero<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()',
},
'signed_int_' : {
'nt2::Mone<T>()' : 'nt2::Zero<r_t>()',
'nt2::One<T>()' : 'nt2::Zero<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()',
},
'unsigned_int_' : {
'nt2::One<T>()' : 'nt2::Zero<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()',
},
},
'verif_test' : {
},
},
'version' : '0.1',
},
]
|
[
"jtlapreste@gmail.com"
] |
jtlapreste@gmail.com
|
91e45e105497e90a01f63258bc61dd9638245813
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc121/B/4971858.py
|
85a9a317171327639376a274c3b93a09195fc306
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
N,M,C=list(map(int,input().split()))
B=list(map(int,input().split()))
a=[]
for i in range(N):
a.append(list(map(int,input().split())))
cnt=0
for k in range(N):
sum=0
for j in range(M):
sum+=a[k][j]*B[j]
if sum+C > 0:
cnt+=1
print(cnt)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
7871e98f8480ddaec7ab0d3d64ff3ecdf9d6e751
|
4ca44b7bdb470fcbbd60c2868706dbd42b1984c9
|
/20.11.23/백준_1018.py
|
125a30305d44671fe2572d9e6f4d2a4da5de720e
|
[] |
no_license
|
titiman1013/Algorithm
|
3b3d14b3e2f0cbc4859029eb73ad959ec8778629
|
8a67e36931c42422779a4c90859b665ee468255b
|
refs/heads/master
| 2023-06-29T17:04:40.015311
| 2021-07-06T01:37:29
| 2021-07-06T01:37:29
| 242,510,483
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
import sys; sys.stdin = open('text3.txt', 'r')
# def check(x, y):
# temp = 0
# color = ''
# for p in range(8):
# for q in range(8):
# if p == 0 and q == 0:
# color = arr[x + p][y + q]
# continue
# if arr[x + p][y + q] == color:
# temp += 1
# if color == 'W':
# color = 'B'
# else:
# color = 'W'
# if temp > res:
# return False
# else:
# color = arr[x + p][y + q]
# continue
# if color == 'W':
# color = 'B'
# else:
# color = 'W'
# return temp
def check(x, y, color):
temp = 0
for p in range(8):
for q in range(8):
if p == 0 and q == 0:
continue
if arr[x + p][y + q] == color:
temp += 1
if color == 'W':
color = 'B'
else:
color = 'W'
if temp > res:
return False
else:
color = arr[x + p][y + q]
continue
if color == 'W':
color = 'B'
else:
color = 'W'
return temp
for tc in range(6):
N, M = map(int, input().split())
arr = [list(input()) for _ in range(N)]
res = 10000000000
for i in range(N - 8 + 1):
for j in range(M - 8 + 1):
ischeck = check(i, j, 'W')
if ischeck == False:
pass
else:
if ischeck < res:
if arr[i][j] == 'W':
res = ischeck
else:
res = ischeck + 1
ischeck2 = check(i, j, 'B')
if ischeck2 == False:
continue
else:
if ischeck2 < res:
if arr[i][j] == 'B':
res = ischeck2
else:
res = ischeck2 + 1
print(tc, res)
|
[
"hyunsukr1013@gmail.com"
] |
hyunsukr1013@gmail.com
|
ed3f0747a03be29e372e99f9cf90afa6a0bcb387
|
19f698ab74cba74ae52c780f5986d273fb319308
|
/SWExpertAcademy/D5/1242.py
|
e82ef68270ba4d0834822e4878c2c2d888764f6f
|
[] |
no_license
|
naye0ng/Algorithm
|
15023f1070eb7cc5faca9cf7154af2ecffab92c2
|
1e8848e3e2574b01dc239212ea084b0a4837bc03
|
refs/heads/master
| 2021-06-25T14:18:46.117411
| 2020-10-16T10:47:37
| 2020-10-16T10:47:37
| 149,326,399
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,505
|
py
|
"""
1242.암호코드 스캔
"""
import sys
sys.stdin = open('input.txt','r')
match = [[3,2,1,1],[2,2,2,1],[2,1,2,2],[1,4,1,1],[1,1,3,2],[1,2,3,1],[1,1,1,4],[1,3,1,2],[1,2,1,3],[3,1,1,2]]
T= int(input())
for test_case in range(1, 1+T):
N, M = map(int, input().split())
# 중복되는 암호문 입력 안받음
empty = str(0)*M
arr = [0]*N
n = -1
for _ in range(N) :
local = input()
if local != empty :
if n== -1 or arr[n] != local :
n+=1
arr[n] = local
n +=1
arr = arr[:n]
# 이진수 변환
for x in range(n) :
arr[x] = arr[x].replace('0', '0000')
arr[x] = arr[x].replace('1', '0001')
arr[x] = arr[x].replace('2', '0010')
arr[x] = arr[x].replace('3', '0011')
arr[x] = arr[x].replace('4', '0100')
arr[x] = arr[x].replace('5', '0101')
arr[x] = arr[x].replace('6', '0110')
arr[x] = arr[x].replace('7', '0111')
arr[x] = arr[x].replace('8', '1000')
arr[x] = arr[x].replace('9', '1001')
arr[x] = arr[x].replace('A', '1010')
arr[x] = arr[x].replace('B', '1011')
arr[x] = arr[x].replace('C', '1100')
arr[x] = arr[x].replace('D', '1101')
arr[x] = arr[x].replace('E', '1110')
arr[x] = arr[x].replace('F', '1111')
patt = []
maxPattern = 0
#암호문 찾기
for x in range(n) :
end, start = 0, 0
for y in range(len(arr[x])-1,-1,-1) :
if end == 0 and arr[x][y] == '1':
end = y+1
elif start == 0 and end != 0 and arr[x][y] == '0' :
start = y+1
# 0이 나오더라도 길이가 부족하면 앞쪽 다시 탐색
if (end - start)%56 :
start = 0
else :
lengthP = (end - start)//56
an = arr[x][start:end]
# 패턴의 유효성 검사, 마지막 글자는 항상 1
# 패턴 유효성 검사, 맨 앞의 '0'은 최대 lengthP만큼
is_pattern = True
for i in range(0,len(an),7*lengthP) :
if '1' in an[i :i+lengthP] or an[i+lengthP*7-1] !='1' :
is_pattern = False
break
if is_pattern :
if maxPattern < lengthP :
maxPattern = lengthP
patt.append([lengthP, an])
end = 0
start = 0
# 계속 앞으로 전진!
else :
start = 0
# maxPattern만큼 패턴 딕셔너리 생성
dictmatch = {}
for i in range(1,maxPattern+1) :
for j in range(10) :
dictmatch[str(0)*match[j][0]*i+str(1)*match[j][1]*i+str(0)*match[j][2]*i+str(1)*match[j][3]*i] = str(j)
# 중복제거한 패턴 리스트
Pattern = []
for p in patt :
pn = ''
for k in range(0,p[0]*56-1,7*p[0]) :
pn += dictmatch[p[1][k:k+7*p[0]]]
if pn not in Pattern :
Pattern.append(pn)
# 올바른 패턴인지 검사
result = 0
for i in range(len(Pattern)) :
pn = list(map(int,Pattern[i].replace('', ' ').split()))
if ((pn[0]+pn[2]+pn[4]+pn[6])*3+(pn[1]+pn[3]+pn[5])+pn[7])%10 == 0:
result += sum(pn)
print('#{} {}'.format(test_case, result))
|
[
"nayeong_e@naver.com"
] |
nayeong_e@naver.com
|
3bfc14d1d230a18045d9e8d9fb084c3c5c9a87a0
|
3fe1b6f36bfd02156f606cf90797d69b18dd19d2
|
/creme/utils/inspect.py
|
bf12fcf5404537a35e4d08489d98d2d98889f465
|
[
"BSD-3-Clause"
] |
permissive
|
mihir-thakkar-ai/creme
|
a19a1975bb462a1a93046b6ea55830e88846cb88
|
008b0c1beb26b36b448fc3d04537e02e66d402b3
|
refs/heads/master
| 2022-12-18T01:15:18.132117
| 2020-09-15T20:17:16
| 2020-09-15T20:17:16
| 296,288,773
| 0
| 0
|
BSD-3-Clause
| 2020-09-17T10:04:27
| 2020-09-17T10:04:26
| null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
"""Utilities for inspecting a model's type.
Sometimes we need to check if a model can perform regression, classification, etc. However, for
some models the model's type is only known at runtime. For instance, we can't do
`isinstance(pipeline, base.Regressor)` or `isinstance(wrapper, base.Regressor)`. This submodule
thus provides utilities for determining an arbitrary model's type.
"""
from creme import base
from creme import compose
# TODO: maybe all of this could be done by monkeypatching isintance for pipelines?
__all__ = [
'extract_relevant',
'isclassifier',
'isregressor',
'ismoclassifier',
'ismoregressor'
]
def extract_relevant(model: base.Estimator):
"""Extracts the relevant part of a model.
Parameters:
model
"""
if isinstance(model, compose.Pipeline):
return extract_relevant(list(model.steps.values())[-1]) # look at last step
return model
def isclassifier(model):
return isinstance(extract_relevant(model), base.Classifier)
def ismoclassifier(model):
return isinstance(extract_relevant(model), base.MultiOutputClassifier)
def isregressor(model):
return isinstance(extract_relevant(model), base.Regressor)
def istransformer(model):
return isinstance(extract_relevant(model), base.Transformer)
def ismoregressor(model):
return isinstance(extract_relevant(model), base.MultiOutputRegressor)
|
[
"maxhalford25@gmail.com"
] |
maxhalford25@gmail.com
|
58794e3389ada30651487ebcafdf441f1dd0d6f3
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/testing/web-platform/tests/tools/lint/tests/base.py
|
f624276e3be4c16d8acd1226e8f4e128812cbd2e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853
| 2023-08-01T00:40:39
| 2023-08-01T00:40:39
| 211,297,481
| 0
| 0
|
NOASSERTION
| 2019-09-29T01:27:49
| 2019-09-27T10:44:24
|
C++
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
def check_errors(errors):
for e in errors:
error_type, description, path, line_number = e
assert isinstance(error_type, str)
assert isinstance(description, str)
assert isinstance(path, str)
assert line_number is None or isinstance(line_number, int)
|
[
"mcastelluccio@mozilla.com"
] |
mcastelluccio@mozilla.com
|
5547d118a16dcd0b4cebc4a30404d27ad74d3fe2
|
fca6a986e735843b667e3714b11cafaed0f390e8
|
/fastai2/text/models/core.py
|
235083d04a808132f5d60a270d60480a07014007
|
[
"Apache-2.0"
] |
permissive
|
mbrukman/fastai2
|
2c631b515a13738800b5bcce781be6dac807368a
|
404383912503b69b244e175f3b26a06b532ee4bd
|
refs/heads/master
| 2020-11-27T17:59:33.125318
| 2019-12-21T08:22:48
| 2019-12-21T08:22:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,013
|
py
|
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/33_text.models.core.ipynb (unless otherwise specified).
__all__ = ['LinearDecoder', 'SequentialRNN', 'get_language_model', 'SentenceEncoder', 'masked_concat_pool',
'PoolingLinearClassifier', 'get_text_classifier']
#Cell
from ...data.all import *
from ..core import *
from .awdlstm import *
#Cell
_model_meta = {AWD_LSTM: {'hid_name':'emb_sz', 'url':URLs.WT103_FWD, 'url_bwd':URLs.WT103_BWD,
'config_lm':awd_lstm_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_lstm_clas_config, 'split_clas': awd_lstm_clas_split},
AWD_QRNN: {'hid_name':'emb_sz',
'config_lm':awd_qrnn_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_qrnn_clas_config, 'split_clas': awd_lstm_clas_split},}
# Transformer: {'hid_name':'d_model', 'url':URLs.OPENAI_TRANSFORMER,
# 'config_lm':tfmer_lm_config, 'split_lm': tfmer_lm_split,
# 'config_clas':tfmer_clas_config, 'split_clas': tfmer_clas_split},
# TransformerXL: {'hid_name':'d_model',
# 'config_lm':tfmerXL_lm_config, 'split_lm': tfmerXL_lm_split,
# 'config_clas':tfmerXL_clas_config, 'split_clas': tfmerXL_clas_split}}
#Cell
class LinearDecoder(Module):
"To go on top of a RNNCore module and create a Language Model."
initrange=0.1
def __init__(self, n_out, n_hid, output_p=0.1, tie_encoder=None, bias=True):
self.decoder = nn.Linear(n_hid, n_out, bias=bias)
self.decoder.weight.data.uniform_(-self.initrange, self.initrange)
self.output_dp = RNNDropout(output_p)
if bias: self.decoder.bias.data.zero_()
if tie_encoder: self.decoder.weight = tie_encoder.weight
def forward(self, input):
raw_outputs, outputs = input
decoded = self.decoder(self.output_dp(outputs[-1]))
return decoded, raw_outputs, outputs
#Cell
class SequentialRNN(nn.Sequential):
"A sequential module that passes the reset call to its children."
def reset(self):
for c in self.children(): getattr(c, 'reset', noop)()
#Cell
def get_language_model(arch, vocab_sz, config=None, drop_mult=1.):
"Create a language model from `arch` and its `config`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_lm']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias'])
init = config.pop('init') if 'init' in config else None
encoder = arch(vocab_sz, **config)
enc = encoder.encoder if tie_weights else None
decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias)
model = SequentialRNN(encoder, decoder)
return model if init is None else model.apply(init)
#Cell
def _pad_tensor(t, bs, val=0.):
if t.size(0) < bs: return torch.cat([t, val + t.new_zeros(bs-t.size(0), *t.shape[1:])])
return t
#Cell
class SentenceEncoder(Module):
"Create an encoder over `module` that can process a full sentence."
def __init__(self, bptt, module, pad_idx=1): store_attr(self, 'bptt,module,pad_idx')
def _concat(self, arrs, bs):
return [torch.cat([_pad_tensor(l[si],bs) for l in arrs], dim=1) for si in range(len(arrs[0]))]
def reset(self): getattr(self.module, 'reset', noop)()
def forward(self, input):
bs,sl = input.size()
self.reset()
raw_outputs,outputs,masks = [],[],[]
for i in range(0, sl, self.bptt):
r,o = self.module(input[:,i: min(i+self.bptt, sl)])
masks.append(input[:,i: min(i+self.bptt, sl)] == self.pad_idx)
raw_outputs.append(r)
outputs.append(o)
return self._concat(raw_outputs, bs),self._concat(outputs, bs),torch.cat(masks,dim=1)
#Cell
def masked_concat_pool(outputs, mask):
"Pool `MultiBatchEncoder` outputs into one vector [last_hidden, max_pool, avg_pool]"
output = outputs[-1]
lens = output.size(1) - mask.long().sum(dim=1)
avg_pool = output.masked_fill(mask[:, :, None], 0).sum(dim=1)
avg_pool.div_(lens.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),lens-1], max_pool, avg_pool], 1) #Concat pooling.
return x
#Cell
class PoolingLinearClassifier(Module):
"Create a linear classifier with pooling"
def __init__(self, dims, ps):
mod_layers = []
if len(ps) != len(dims)-1: raise ValueError("Number of layers and dropout values do not match.")
acts = [nn.ReLU(inplace=True)] * (len(dims) - 2) + [None]
layers = [LinBnDrop(i, o, p=p, act=a) for i,o,p,a in zip(dims[:-1], dims[1:], ps, acts)]
self.layers = nn.Sequential(*layers)
def forward(self, input):
raw,out,mask = input
x = masked_concat_pool(out, mask)
x = self.layers(x)
return x, raw, out
#Cell
def get_text_classifier(arch, vocab_sz, n_class, seq_len=72, config=None, drop_mult=1., lin_ftrs=None,
ps=None, pad_idx=1):
"Create a text classifier from `arch` and its `config`, maybe `pretrained`"
meta = _model_meta[arch]
config = ifnone(config, meta['config_clas']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = SentenceEncoder(seq_len, arch(vocab_sz, **config), pad_idx=pad_idx)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps))
return model if init is None else model.apply(init)
|
[
"sylvain.gugger@gmail.com"
] |
sylvain.gugger@gmail.com
|
c98395864af6a107b993684c44803e2fb2b6fca7
|
2181d99f84f4f7556efb13ac203a533fc87f9acd
|
/tools/CodeGenerators/codegen/app/src/generated/Gui/ComboBoxes/LithologicUnitInBedComboBox.py
|
9ed3aae608989a0c741cc39df512c59f76b25e5c
|
[] |
no_license
|
BackupTheBerlios/profilelogger-svn
|
0f80fd8f63c3b413dc06ecc6d2be623f8ae2cc8c
|
5ba067205316b0955f0c8876dd8b0f10672abc0a
|
refs/heads/master
| 2020-05-18T16:33:31.154612
| 2010-04-24T16:51:28
| 2010-04-24T16:51:28
| 40,822,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
from InBedComboBox import *
class LithologicUnitInBedComboBox(InBedComboBox):
def __init__(self, parent, managementDialogClass, finderClass):
DataSelectionComboBox.__init__(self,
parent,
LithologicUnitInBedManagementDialog,
LithologicUnitInBedFinder)
|
[
"jolo@28dda339-3e7f-0410-9691-cab309f6cb01"
] |
jolo@28dda339-3e7f-0410-9691-cab309f6cb01
|
98e8617fddd53b570991cb56f984fbf05598530a
|
34a2046714261a5e42692ab7a656eec708395243
|
/appcode/mri/results/create_nifti_from_raw_data.py
|
893307757154dbb38935566262c95113f314919d
|
[] |
no_license
|
royshau/thesis
|
a64893ca25981bd8fff394161829d0147934a324
|
a02a8cfea9e00bd98289419eb9f7fb78c36c028e
|
refs/heads/master
| 2021-06-03T19:24:21.110455
| 2020-07-22T13:30:59
| 2020-07-22T13:30:59
| 115,911,615
| 0
| 0
| null | 2018-01-01T10:16:46
| 2018-01-01T10:03:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,904
|
py
|
# !/home/ohadsh/Tools/anaconda/bin/python
import numpy as np
import os
from appcode.mri.k_space.k_space_data_set import KspaceDataSet
from appcode.mri.data.write_nifti_data import write_nifti_data
from appcode.mri.data.mri_data_base import MriDataBase
from common.files_IO.file_handler import FileHandler
from appcode.mri.k_space.utils import get_image_from_kspace
from appcode.mri.k_space.data_creator import get_rv_mask
file_names = ['k_space_real_gt', 'k_space_imag_gt', 'meta_data']
import argparse
predict_info = {'width': 256, 'height': 256, 'channels': 1, 'dtype': 'float32'}
predict_names = {'real': '000000.predict_real.bin', 'imag': '000000.predict_imag.bin'}
import matplotlib.pyplot as plt
META_KEYS = {'hash':0, 'slice': 1, 'bit_pix':2, 'aug':3, 'norm_factor':4}
MASKS_DIR = '/media/ohadsh/Data/ohadsh/work/matlab/thesis/'
def create_nifti_from_raw_data(data_dir, predict_path, output_path, data_base, batch_size, num_of_cases=-1,
tt='train', source='k_space', random_sampling_factor=None, cs_path=None):
"""
Assumption - predict on all examples exists
This script create nifti files from k-space raw data, original and predictions.
:param data_dir:
:param predict_path:
:param output_path:
:param data_base:
:param batch_size:
:param num_of_cases:
:param tt:
:param random_sampling_factor:
:param cs_path: compressed sensing predicted path
:return:
"""
db = MriDataBase(data_base)
f_predict = {}
cs_pred = None
for name_pred in ['real', 'imag']:
f_predict[name_pred] = FileHandler(path=os.path.join(predict_path, predict_names[name_pred]),
info=predict_info, read_or_write='read', name=name_pred, memmap=True)
if cs_path is not None:
cs_pred = FileHandler(path=cs_path, info=predict_info, read_or_write='read', name='CS', memmap=True)
# write_nifti_data(cs_pred.memmap.transpose(2, 1, 0), output_path='/tmp/', name='CS')
data_set = KspaceDataSet(data_dir, file_names, stack_size=batch_size, shuffle=False, data_base=data_base, memmap=True)
data_set_tt = getattr(data_set, tt)
meta_data = data_set_tt.files_obj['meta_data'].memmap
# Get all unique case hash
all_cases = np.unique(meta_data[:, META_KEYS['hash']])
all_cases = all_cases if num_of_cases == -1 else all_cases[:num_of_cases]
# For each case, create indices, build a nifty from real image and predict
done = 1
for case in all_cases:
try:
idx = get_case_idx(case, meta_data)
name = db.info['hash_to_case'][case]
print("Working on case : %s, number= (%d / %d)" % (name, done, num_of_cases))
ref = os.path.join(db.data_path, name, "IXI"+name+".nii.gz")
if not os.path.exists(ref):
ref = None
res_out_path = os.path.join(output_path, name)
if not os.path.exists(res_out_path):
os.makedirs(res_out_path)
# Data creation
org_real = data_set_tt.files_obj['k_space_real_gt'].memmap[idx]
org_imag = data_set_tt.files_obj['k_space_imag_gt'].memmap[idx]
data = get_image_from_kspace(org_real, org_imag).transpose(1, 2, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name)
# Predict from network
pred_real = f_predict['real'].memmap[idx]
pred_imag = f_predict['imag'].memmap[idx]
if source == 'k_space':
data = get_image_from_kspace(pred_real, pred_imag).transpose(2, 1, 0)
else:
data = 256*np.abs(pred_real+ 1j * pred_imag).transpose(2, 1, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name+"_predict")
# Zero Padding
if random_sampling_factor is not None:
mask = get_rv_mask(mask_main_dir=MASKS_DIR, factor=random_sampling_factor)
org_real_zero_padded = mask * org_real
org_imag_zero_padded = mask * org_imag
data = get_image_from_kspace(org_real_zero_padded, org_imag_zero_padded).transpose(1, 2, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name+"_zeroPadding")
# CS
if cs_pred is not None:
data = cs_pred.memmap[idx].transpose(2, 1, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name + "_CS")
done += 1
except:
print "BAD: (min, max) = (%d, %d)" % (idx.min(), idx.max())
continue
def get_case_idx(case_hash, meta_data):
""" Get case indices given cash hash and meta data memmap
:param case_hash:
:param meta_data:
:return:
"""
idx = np.where(meta_data[:, META_KEYS['hash']] == case_hash)[0]
slice_idx_rel = np.argsort(meta_data[idx, META_KEYS['slice']])
slice_idx_abs = idx[slice_idx_rel]
return slice_idx_abs
def norm_data(data):
"""
Normalize data
:param data:
:return:
"""
norm_factor = 1.0 / data.max()
return (data * norm_factor).astype('float32')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TBD.')
parser.add_argument('--tt', dest='tt', choices=['train', 'test'], default='train', type=str, help='train / test')
parser.add_argument('--data_dir', dest='data_dir', default='/media/ohadsh/Data/ohadsh/work/data/T1/sagittal/', type=str, help='data directory')
parser.add_argument('--num_of_cases', dest='num_of_cases', type=int, default=-1, help='number of cases')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=50, help='mini batch size')
parser.add_argument('--data_base', dest='data_base', type=str, default='IXI_T1', help='data base name - for file info')
parser.add_argument('--predict_path', dest='predict_path', type=str, help='run path')
parser.add_argument('--output_path', dest='output_path', default='./', type=str, help='out path')
parser.add_argument('--source', dest='source', default='k_space', type=str, help='source')
parser.add_argument('--random_sampling_factor', dest='random_sampling_factor', type=int, default=None,
help='Random sampling factor for zero padding')
parser.add_argument('--cs_path', dest='cs_path', default=None, type=str, help='CS path')
args = parser.parse_args()
create_nifti_from_raw_data(args.data_dir, args.predict_path, args.output_path,
args.data_base, args.batch_size, args.num_of_cases, args.tt, args.source,
args.random_sampling_factor, args.cs_path)
|
[
"shohad25@gmail.com"
] |
shohad25@gmail.com
|
b65efc5b76e81a98e3d8dbd1d9eeb2f0c675189f
|
3940b4a507789e1fbbaffeb200149aee215f655a
|
/lc/399.EvaluateDivision.py
|
c17e9249c1acf2d9f8603f76a20ff582cbf7453c
|
[] |
no_license
|
akimi-yano/algorithm-practice
|
15f52022ec79542d218c6f901a54396a62080445
|
1abc28919abb55b93d3879860ac9c1297d493d09
|
refs/heads/master
| 2023-06-11T13:17:56.971791
| 2023-06-10T05:17:56
| 2023-06-10T05:17:56
| 239,395,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
# 399. Evaluate Division
# Medium
# 2633
# 209
# Add to List
# Share
# You are given equations in the format A / B = k, where A and B are variables represented as strings, and k is a real number (floating-point number). Given some queries, return the answers. If the answer does not exist, return -1.0.
# The input is always valid. You may assume that evaluating the queries will result in no division by zero and there is no contradiction.
# Example 1:
# Input: equations = [["a","b"],["b","c"]], values = [2.0,3.0], queries = [["a","c"],["b","a"],["a","e"],["a","a"],["x","x"]]
# Output: [6.00000,0.50000,-1.00000,1.00000,-1.00000]
# Explanation:
# Given: a / b = 2.0, b / c = 3.0
# queries are: a / c = ?, b / a = ?, a / e = ?, a / a = ?, x / x = ?
# return: [6.0, 0.5, -1.0, 1.0, -1.0 ]
# Example 2:
# Input: equations = [["a","b"],["b","c"],["bc","cd"]], values = [1.5,2.5,5.0], queries = [["a","c"],["c","b"],["bc","cd"],["cd","bc"]]
# Output: [3.75000,0.40000,5.00000,0.20000]
# Example 3:
# Input: equations = [["a","b"]], values = [0.5], queries = [["a","b"],["b","a"],["a","c"],["x","y"]]
# Output: [0.50000,2.00000,-1.00000,-1.00000]
# Constraints:
# 1 <= equations.length <= 20
# equations[i].length == 2
# 1 <= equations[i][0], equations[i][1] <= 5
# values.length == equations.length
# 0.0 < values[i] <= 20.0
# 1 <= queries.length <= 20
# queries[i].length == 2
# 1 <= queries[i][0], queries[i][1] <= 5
# equations[i][0], equations[i][1], queries[i][0], queries[i][1] consist of lower case English letters and digits.
# THIS SOLUTION WORKS !!!
'''
solved it as a graph problem
made an adj_list and process both e1->e2 and e2->e1 with flipped values
traverse the adj_list with seen set, if you find the val, return 1 ; if its not in adj_list, return -1,
keep multiplying the weights and return it if its positive value
'''
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
self.adj_list = {}
for i in range(len(equations)):
e1,e2 = equations[i]
ans = values[i]
if e1 not in self.adj_list:
self.adj_list[e1] = []
self.adj_list[e1].append((e2, ans))
if e2 not in self.adj_list:
self.adj_list[e2] = []
self.adj_list[e2].append((e1, 1/ans))
res = []
for q1, q2 in queries:
res.append(self.helper(q1, q2, set([])))
return res
def helper(self, cur, target, seen):
if cur in seen:
return -1
seen.add(cur)
if cur not in self.adj_list:
return -1
if cur == target:
return 1
for next_node, weight in self.adj_list[cur]:
temp = weight * self.helper(next_node, target, seen)
if temp > 0:
return temp
return -1
|
[
"akimi.mimi.yano@gmail.com"
] |
akimi.mimi.yano@gmail.com
|
c2356c672e81c8d0028769668339da65ff1697aa
|
f47863b3a595cbe7ec1c02040e7214481e4f078a
|
/plugins/waf/webknight.py
|
d46383a64c36e0f5877762772612a77f656d3ac9
|
[] |
no_license
|
gobiggo/0bscan
|
fe020b8f6f325292bda2b1fec25e3c49a431f373
|
281cf7c5c2181907e6863adde27bd3977b4a3474
|
refs/heads/master
| 2020-04-10T20:33:55.008835
| 2018-11-17T10:05:41
| 2018-11-17T10:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import re
from config import HTTP_HEADER
__product__ = "WebKnight Application Firewall (AQTRONIX)"
def detect(content, **kwargs):
headers = kwargs.get("headers", None)
status = kwargs.get("status", None)
detection_schema = (
re.compile(r"\bwebknight", re.I),
re.compile(r"webknight", re.I)
)
if status is not None:
if status == 999 and headers.get(HTTP_HEADER.SERVER, "") == "WebKnight":
return True
for detection in detection_schema:
if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
return True
|
[
"zer0i3@aliyun.com"
] |
zer0i3@aliyun.com
|
4c18b17007a61eeb0415eb384a1b4980e476f0ba
|
0d01d65ed67faf09b31b6333013393194b4a25d0
|
/twitter.py
|
1bfef6c0550af908369c3cbe09c1674970e8f41f
|
[] |
no_license
|
AshithaL/twitter-streaming
|
0d5b16c56c92810496f6b635b03024679cc2c10b
|
993b6e87fd1d546dcdde5c12db7e49791a5f5890
|
refs/heads/master
| 2022-11-09T02:52:17.843457
| 2020-06-15T04:43:41
| 2020-06-15T04:43:41
| 270,915,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
import socket
import sys
import requests
import requests_oauthlib
import json
from sql_connection import conn
# Replace the values below with yours
ACCESS_TOKEN = '1252513694992330753-YpQY1SlyBWIN66ngHXeM8hcZWvvTeZ'
ACCESS_SECRET = 'reoC4xZdgp3bqRPjTC2ptxn00vUPrftWlhprHOBIp29jA'
CONSUMER_KEY = 'eLsiPuE8adtsJUt8hr0iMku3b'
CONSUMER_SECRET = 'p03sqgt8V8TYZbueGzA3SQPZXI5xuhpU5DkPj4fOGyra8YTiXn'
auth_handler = requests_oauthlib.OAuth1(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)
def get_tweets():
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
query_data = [('locations', '-122.75,36.8,-121.75,37.8,-74,40,-73,41'),
('track', '#')]
query_url = url + '?' + '&'.join([str(t[0]) + '=' + str(t[1]) for t in query_data])
response = requests.get(query_url, auth=auth_handler, stream=True)
print(query_url, response)
return response
def send_tweets_to_spark(http_resp, tcp_connection):
for lines in http_resp.iter_lines():
try:
full_tweet = json.loads(lines)
words = full_tweet['text'].split(' ')
tweet = ''
for w in words:
if '#' in w:
i = "".join(w.split(' '))
tweet += i
break
time = full_tweet['created_at']
location = "".join(full_tweet["user"]["location"].encode("utf-8"))
if tweet is not '':
tweet_text = tweet.encode('utf-8') + '&%' + location + '&%' + time
print("Tweet Text: " + tweet_text)
tcp_connection.send(tweet_text + '\n')
conn.execute(
'INSERT INTO tweet (time, tweet, location) VALUES (%s,%s,%s,%s,%s)',
(str(time), tweet, str(location)))
conn.commit()
except:
e = sys.exc_info()[0]
print("Error: %s" % e)
TCP_IP = "localhost"
TCP_PORT = 9009
conn = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print("Waiting for TCP connection...")
conn, addr = s.accept()
print("Connected... Starting getting tweets.")
resp = get_tweets()
send_tweets_to_spark(resp, conn)
|
[
"you@example.com"
] |
you@example.com
|
66c5b2d003be821beb2127b0ffef1023020ef83a
|
4b44a299bafbd4ca408ce1c89c9fe4a449632783
|
/python3/14_Code_Quality/04_mocking/example_5/test_mymodule2.py
|
fee202b03daf17899388a56cf126c60e665c2088
|
[] |
no_license
|
umunusb1/PythonMaterial
|
ecd33d32b2de664eaaae5192be7c3f6d6bef1d67
|
1e0785c55ccb8f5b9df1978e1773365a29479ce0
|
refs/heads/master
| 2023-01-23T23:39:35.797800
| 2020-12-02T19:29:00
| 2020-12-02T19:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from mymodule import rm
from unittest import TestCase, mock
class RmTestCase(TestCase):
@mock.patch('mymodule.os')
def test_rm(self, mock_os):
rm("any path")
# test that rm called os.remove with the right parameters
mock_os.remove.assert_called_with("any path")
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
31f19af81a8c9456a85f2bb8d9ab67906b28f744
|
a034d4ba39789e4a351112c46dd04a38180cd06c
|
/appengine/findit/findit_v2/model/atomic_failure.py
|
8c09489dd8dc8824de2e446a083c0238f3c8698b
|
[
"BSD-3-Clause"
] |
permissive
|
asdfghjjklllllaaa/infra
|
050ad249ab44f264b4e2080aa9537ce74aafb022
|
8f63af54e46194cd29291813f2790ff6e986804d
|
refs/heads/master
| 2023-01-10T21:55:44.811835
| 2019-07-01T14:03:32
| 2019-07-01T14:03:32
| 194,691,941
| 1
| 0
|
BSD-3-Clause
| 2023-01-07T07:12:37
| 2019-07-01T14:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,725
|
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from findit_v2.model.gitiles_commit import Culprit
class FileInFailureLog(ndb.Model):
"""Class for a file mentioned in failure log."""
# normalized file path.
path = ndb.StringProperty(indexed=False)
# Mentioned line numbers of the file in failure log.
line_numbers = ndb.IntegerProperty(repeated=True, indexed=False)
class AtomicFailure(ndb.Model):
"""Base Class for an atom failure.
Atom failure means failures that cannot be further divided.
- In compile failure atom failure is a failed compile target.
- In test failure atom failure is a failed test.
Atom failures in the same build have the same parent.
"""
# Full step name.
step_ui_name = ndb.StringProperty()
# Id of the build in which this atom failure occurred the first time in
# a sequence of consecutive failed builds.
# For example, if a test passed in build 100, and failed in builds 101 - 105,
# then for atom failures of builds 101 - 105, their first_failed_build_id
# will all be id of build 101.
# First_failed_build_id can also be used to find the analysis on the
# failure: analysis only runs for the first time failures, so using the
# first_failed_build_id can get to the analysis.
first_failed_build_id = ndb.IntegerProperty()
# Id of the build in which this atom run (targets or test) was a pass and
# since the next build, it kept not passing (can failed, not run, or end
# with other status).
last_passed_build_id = ndb.IntegerProperty()
# Id of the first build forming the group.
# Whether or how to group failures differs from project to project.
# So this value could be empty.
failure_group_build_id = ndb.IntegerProperty()
# Key to the culprit commit found by rerun based analysis.
# There should be only one culprit for each failure.
culprit_commit_key = ndb.KeyProperty(Culprit)
# Key to the suspected commit found by heuristic analysis.
# There could be multiple suspects found for each failure.
suspect_commit_key = ndb.KeyProperty(Culprit, repeated=True)
# Optional information for heuristic analysis.
# Mentioned files in failure log for the failure.
files = ndb.LocalStructuredProperty(FileInFailureLog, repeated=True)
@property
def build_id(self):
"""Gets the id of the build that this failure belongs to."""
return self.key.parent().id()
@classmethod
def Create(cls,
failed_build_key,
step_ui_name,
first_failed_build_id=None,
last_passed_build_id=None,
failure_group_build_id=None,
files=None): # pragma: no cover
instance = cls(step_ui_name=step_ui_name, parent=failed_build_key)
files_objs = []
if files:
for path, line_numbers in files.iteritems():
files_objs.append(
FileInFailureLog(path=path, line_numbers=line_numbers))
instance.files = files_objs
instance.first_failed_build_id = first_failed_build_id
instance.last_passed_build_id = last_passed_build_id
instance.failure_group_build_id = failure_group_build_id
return instance
def GetFailureIdentifier(self):
"""Returns the identifier for the failure within its step.
Returns:
(list): information to identify a failure.
- For compile failures, it'll be the output_targets.
- For test failures, it'll be the [test_name].
"""
raise NotImplementedError
def GetMergedFailure(self):
"""Gets the most up-to-date merged_failure for the current failure."""
raise NotImplementedError
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
176f55c02c04c05f69692175fb1e2ad43a67c3e1
|
ac39baffc572b72ddd4d25617014a51522ee30a8
|
/challenge29-34/simpledu/forms.py
|
dcdb80fff200e1000ecdcd852c6560cf019f5e08
|
[] |
no_license
|
Yao-Phoenix/challenge
|
01d72a63eb6c144bb59cd4d5f658e170c8ad0092
|
d5ce1659f47cbe5295f65b7ac05ca25c79955f00
|
refs/heads/master
| 2020-09-24T17:42:02.380190
| 2020-02-17T03:15:25
| 2020-02-17T03:15:25
| 225,810,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,259
|
py
|
#!/usr/bin/env python3
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import Length, Email, EqualTo, DataRequired, URL, NumberRange,Regexp
from simpledu.models import db, User, Course, Live
from wtforms import ValidationError, TextAreaField, IntegerField
class RegisterForm(FlaskForm):
#username = StringField('用户名', validators=[DataRequired(), Regexp(r'^[0_9a_zA_Z]{3,24}$', message='用户名只能包含数字和字母, 长度在3到24之间')])
username = StringField('用户名', validators=[DataRequired(), Length(3, 24)])
# Length(3, 24)])
email = StringField('邮箱', validators=[DataRequired(), Email()])
password = PasswordField('密码', validators=[DataRequired(), Length(6, 24)])
repeat_password = PasswordField(
'重复密码', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('提交')
def create_user(self):
user = User()
self.populate_obj(user)
user.username = self.username.data
user.email = self.email.data
user.password = self.password.data
db.session.add(user)
db.session.commit()
return user
def validate_username(self, field):
if not field.data.isalnum():
raise ValidationError('用户名只能包含数字和字母')
if User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已经存在')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经存在')
class LoginForm(FlaskForm):
username = StringField('用户名', validators=[DataRequired(), Length(3, 24)])
password = PasswordField('密码', validators=[DataRequired(), Length(6, 24)])
remember_me = BooleanField('记住我')
submit = SubmitField('提交')
def validate_eamil(self, field):
if not User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱未注册')
def validate_password(self, field):
user = User.query.filter_by(username=self.username.data).first()
if user and not user.check_password(field.data):
raise ValidationError('密码错误')
class CourseForm(FlaskForm):
name = StringField('课程名称', validators=[DataRequired(), Length(5, 32)])
description = TextAreaField(
'课程简介', validators=[DataRequired(), Length(20, 256)])
image_url = StringField('封面图片', validators=[DataRequired(), URL()])
author_id = IntegerField('作者ID', validators=[DataRequired(), NumberRange(
min=1, message='无效的用户ID')])
submit = SubmitField('提交')
def validate_author_id(self, field):
if not User.query.get(field.data):
raise ValidationError('用户不存在')
def create_course(self):
course = Course()
# 使用课程表单数据填充 course 对象
print('--------------------------------')
print(self.populate_obj.__doc__)
self.populate_obj(course)
db.session.add(course)
db.session.commit()
return course
def update_course(self, course):
self.populate_obj(course)
db.session.add(course)
db.session.commit()
return course
class LiveForm(FlaskForm):
name = StringField('直播名称', validators=[DataRequired(), Length(1, 256)])
user_id = IntegerField('用户ID', validators=[DataRequired(), NumberRange(min=1, message=('无效的用户ID'))])
submit = SubmitField('提交')
def validate_user_id(self, field):
if not User.query.get(self.user_id.data):
raise ValidationError('用户不存在')
def create_live(self):
live = Live()
self.populate_obj(live)
db.session.add(live)
db.session.commit()
return live
def update_live(self, live):
self.populate_obj(live)
db.session.add(live)
db.session.commit()
return live
class MessageForm(FlaskForm):
text = StringField('发送后台消息', validators=[DataRequired(), Length(1, 256)])
submit = SubmitField('提交')
|
[
"493867456@qq.com"
] |
493867456@qq.com
|
ae0708cc0342891c5fb01dace708ffbc954432d3
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/021_module_collection/namedtuple/_exercises/namedtuple_002_Other Ways to Specify Field Names_template.py
|
49045eb80045bf9453e1f2c3957cb3e8c0bb1e29
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
# from collections ____ n_t_
#
# # Other Ways to Specify Field Names
# # There are a number of ways we can specify the field names for the named tuple:
# # we can provide a sequence of strings containing each property name
# # we can provide a single string with property names separated by whitespace or a comma
#
# Circle _ n_t_('Circle' 'center_x' 'center_y' 'radius'
# circle_1 _ C_ 0 0 10
# circle_2 _ C_ c._x_10 c._y_20 r.._100
# print c_1
# # Circle(center_x=0, center_y=0, radius=10)
#
# print c.._2
# # Circle(center_x=10, center_y=20, radius=100)
#
# # Or we can do it this way:
#
# City _ n_t_ 'City' 'name country population'
# new_york _ C__ 'New York' 'USA' 8_500_000
# print(n._y.
# # City(name='New York', country='USA', population=8500000)
#
# # This would work equally well:
#
# Stock _ n_t_ 'Stock' 'symbol, year, month, day, open, high, low, close'
# djia _ S.. 'DJIA', 2018, 1, 25, 26_313, 26_458, 26_260, 26_393
# print d...
# # Stock(symbol='DJIA', year=2018, month=1, day=25, open=26313, high=26458, low=26260, close=26393)
#
# # In fact, since whitespace can be used we can even use a multi-line string!
#
# Stock _ n_t_ 'Stock', '''symbol
# year month day
# open high low close'''
# djia _ S__ 'DJIA', 2018, 1, 25, 26_313, 26_458, 26_260, 26_393
# print d..
# # Stock(symbol='DJIA', year=2018, month=1, day=25, open=26313, high=26458, low=26260, close=26393)
#
# # Accessing Items in a Named Tuple
# # The major advantage of named tuples are that, as the name suggests, we can access the properties (fields)
# # of the tuple by name:
#
# # pt1 # ERROR NameError: name 'pt1' is not defined
# # pt1.x
# # 10
#
# print c.._1
# # Circle(center_x=0, center_y=0, radius=10)
#
# print c.._1.r..
# # 10
#
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
40ad9596e5995a4fe0ac18d2fdc15a8855e1aa5a
|
bd62843278ffc297ef8f6d75a931f1f4ca4caaa7
|
/exercises/friends_family/ff_dictionary/friends_dk_mod.py
|
e7db5bf1184a32c5d7919883f7ca191f03d5850c
|
[] |
no_license
|
raysmith619/Introduction-To-Programming
|
d3bae042b4fc17bd56e8631a4d660233d8cd165b
|
bedc16eb5f6db0ad3b313355df6d51b5161c3835
|
refs/heads/master
| 2023-07-19T08:43:41.229893
| 2023-07-15T19:22:28
| 2023-07-15T19:22:28
| 132,622,195
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
#friends_dk_mod.py 13Oct2020 crs
# Adapted from friends_mod.py
"""
A friends "module" which can be used by other programs
via from friends_mod import *
"""
my_friends = {} # Initialize dictionary of friends(names) as an empty list
def list_friends():
""" list friends
"""
nf = 0 # Count of number listed so far
print("friends: ", end="")
for fr_name in my_friends:
if nf > 0:
print(", ", end="") # Separate after first
print(fr_name, end="") # On one line
nf += 1
print() # Add newline end of list
def test_list_friends():
""" Testing list_friends
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_list_friends")
my_friends = {"fa":1, "fb":1, "fc": 1}
list_friends()
def add_one_friend(friend):
""" Adds one friend to our list
:friend: friend's name
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("add_one_friend(",
friend, ")", sep="")
my_friends[friend] = friend # Add to list (replaces)
list_friends()
def add_friends(*friends):
""" Add zero or more friends
:*friends: zero or more friend names
"""
print("\nadd_friends(", *friends, ")") # passing on list to print
for friend in friends: # comma separated args become list
add_one_friend(friend)
def is_friend(possible):
""" Check if possible is a friend, that is in my_friends
:possible: name of possible friend
:returns: True if possible is a friend
"""
if possible in my_friends:
return True # possible is in list
return False # Not in list
"""
Do testing
"""
def test_add_one_friend():
""" Test, or atleast exercise, add_one_friend function
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_add_one_friend")
my_friends = {} # Start test with empty
add_one_friend("tom")
add_one_friend("joe")
def test_add_friends():
""" Test, or atleast exercise, add_one_friend function
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_add_friends()")
my_friends = {} # Start test with empty
add_friends("tom")
add_friends("joe", "mary", "ray")
def test_is_friend_ck(possible, expect=True):
""" Helper function check if test passes
:possible: possible friend
:expect: expected value (True,False)
default: True if not present
"""
print("test_is_friend_ck:", possible, "expect=", expect, end="")
result = is_friend(possible)
if result == expect:
print(" Passed Test")
else:
print(" FAILED Test result=", result, "expected=", expect)
def test_is_friend():
""" Test is_friend function
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_is_friend()")
print("Set up friends list")
my_friends = {} # Start test with empty
add_friends("joe", "mary", "ray")
print("Check function")
test_is_friend_ck("joe") # Check if True as expected
test_is_friend_ck("marty", expect=False) # Check if False
test_is_friend_ck("mary", expect=True) # Ck if True explicit
print("Test the testing - this should fail the test.")
test_is_friend_ck("alex") # Should fail this!
"""
This type of test can be placed
in a module to facilitate "self-testing"
because it gets executed if/when the file gets
run by itself
"""
if __name__ == "__main__":
print("Self test", __file__)
test_list_friends()
test_add_one_friend()
test_add_friends()
test_is_friend()
|
[
"noreply@github.com"
] |
raysmith619.noreply@github.com
|
de46d2d1fb94ab7a5c96224c56459fe16cb981cf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02716/s082816838.py
|
81eab4bb1d4a40d68a77f483f2899018f648da49
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
N = int(input())
A = list(map(int, input().split()))
DP_odd = [0, 0, A[0]]
DP_even = [0, max(A[0], A[1])]
if N >= 3:
DP_odd = [DP_even[0], max(DP_odd[1] + A[2], DP_even[1]), DP_odd[2] + A[2]]
for i in range(3, N):
if (i + 1) % 2 == 1:
DP_odd = [max(DP_odd[0] + A[i], DP_even[0]), max(DP_odd[1] + A[i], DP_even[1]), DP_odd[2] + A[i]]
else:
DP_even = [max(DP_even[0] + A[i], DP_odd[1]), max(DP_even[1] + A[i], DP_odd[2])]
if N % 2 == 1:
ans = DP_odd[1]
else:
ans = DP_even[1]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
111534e2e8de66694688967bcfb3a213ec10094b
|
12346be5075d772878a6015053d6eeb4e7227acc
|
/21. Design Patterns/behavioral/template.py
|
b5f260abd7cbf6bd3ca2bef6a2a0ad0bdfa3fa25
|
[
"MIT"
] |
permissive
|
elenaborisova/Python-OOP
|
2a46bfafce868f03481fb699580fb3e60ca4e3bd
|
584882c08f84045b12322917f0716c7c7bd9befc
|
refs/heads/main
| 2023-04-02T17:41:23.440617
| 2021-04-10T13:56:38
| 2021-04-10T13:56:38
| 321,376,083
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
from abc import ABC, abstractmethod
class Storage(ABC):
@abstractmethod
def get_storage_list(self):
pass
def save(self, data):
self.get_storage_list().append(data)
class SelfListStorage(Storage):
def __init__(self):
self.list = []
def get_storage_list(self):
return self.list
class ProviderListStorage(Storage):
def __init__(self, list_provider):
self.list_provider = list_provider
def get_storage_list(self):
return self.list_provider.provide_list()
|
[
"elenaborrisova@gmail.com"
] |
elenaborrisova@gmail.com
|
10878b61c5e0ebf2a18f06f4fa888b9efee34475
|
ee904d3335b8fdc5dbb6c260f87dd0e01b7bb605
|
/personal/models.py
|
2a3381d9a1a6aa4c51c481316fcf4cac75a423cd
|
[] |
no_license
|
sudhanshu8917/Techy-Blogger
|
32930136b479635ec5616e44cc48b7d02bce2795
|
f7fd26cb223276bd9c35023c8166243ab430b6b4
|
refs/heads/master
| 2022-04-25T01:03:57.281784
| 2020-04-23T19:46:48
| 2020-04-23T19:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
from django.db import models
# Create your models here.
# PRIORITY = [
# ("H","Low"),
# ("L","Medium"),
# ("H","High"),
# ]
# class Question(models.Model):
# tilte =models.CharField(max_length=60)
# question =models.TextField(max_length=400)
# priority =models.CharField(max_length=1,choices=PRIORITY)
# def __str__(self):
# return self.tilte
# class Meta:
# verbose_name = "The Question"
# verbose_name_plural = "Peoples Question"
|
[
"sudhanshuraj8917@gmail.com"
] |
sudhanshuraj8917@gmail.com
|
3e216e3ee3078736267939ddfdd51b2ed51045cd
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/wxtbx/phil_controls/boolctrl.py
|
eb5df59dfbe39578d721ac70b72c30df2989b55a
|
[
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
from __future__ import absolute_import, division, print_function
from wxtbx import phil_controls
import wx
from libtbx import Auto
WXTBX_PHIL_BOOL_TRIBOOL = 1
WXTBX_PHIL_BOOL_AUTO = 2
class BoolCtrl(wx.CheckBox, phil_controls.PhilCtrl):
def __init__(self, *args, **kwds):
kwds = dict(kwds)
self._bool_style = kwds.get("style", 0)
kwds['style'] = 0
if ((self._bool_style & WXTBX_PHIL_BOOL_TRIBOOL) or
(self._bool_style & WXTBX_PHIL_BOOL_AUTO)):
kwds['style'] |= wx.CHK_ALLOW_3RD_STATE_FOR_USER|wx.CHK_3STATE
else :
kwds['style'] |= wx.CHK_3STATE # wx.CHK_ALLOW_3RD_STATE_FOR_USER?
wx.CheckBox.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CHECKBOX, lambda evt: self.DoSendEvent())
def SetValue(self, value):
if (value is None) or (value is Auto):
assert (self.Is3State())
self.Set3StateValue(wx.CHK_UNDETERMINED)
else :
if (self.Is3State()):
if (value == True):
self.Set3StateValue(wx.CHK_CHECKED)
else :
self.Set3StateValue(wx.CHK_UNCHECKED)
else :
wx.CheckBox.SetValue(self, value)
def GetValue(self):
if (self.Is3State()):
value = self.Get3StateValue()
if (value == wx.CHK_UNDETERMINED):
if (self._bool_style & WXTBX_PHIL_BOOL_AUTO):
return Auto
else :
return None
else :
return (value == wx.CHK_CHECKED)
else :
return wx.CheckBox.GetValue(self)
def GetPhilValue(self):
return self.GetValue()
def GetStringValue(self):
return str(self.GetValue())
if (__name__ == "__main__"):
app = wx.App(0)
frame = wx.Frame(None, -1, "PHIL bool test")
panel = wx.Panel(frame, -1, size=(600,400))
box1 = BoolCtrl(panel, label="Use NCS restraints", pos=(100,100))
box2 = BoolCtrl(panel, label="Find NCS groups automatically", pos=(100,150))
box3 = BoolCtrl(panel, label="Fast search mode", pos=(100,200),
style=WXTBX_PHIL_BOOL_AUTO)
box1.SetValue(False)
box2.SetValue(None)
box3.SetValue(Auto)
assert (box1.GetValue() == box1.GetPhilValue() == False)
assert (box2.GetValue() is None)
assert (box3.GetValue() is Auto)
assert (box2.GetStringValue() == "None")
assert (box3.GetStringValue() == "Auto")
box3.SetValue(False)
assert (box3.GetStringValue() == "False")
box1.SetValue(True)
assert (box1.GetStringValue() == "True")
def OnChange(event):
print(event.GetEventObject().GetPhilValue())
frame.Bind(phil_controls.EVT_PHIL_CONTROL, OnChange)
frame.Show()
app.MainLoop()
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
a50506c6f1e9b437891467aeec49f7ce0e5d0e3c
|
5e9b2d0d2a4399fd028c738a082921a1df1f8321
|
/hacker rank/30 Days Of Code/Day 04 - Class vs. Instance.py
|
2f6aaced92bd0e97a00c76adb993e0adff08a560
|
[] |
no_license
|
sunilsm7/python_exercises
|
42e5a1aee0a0d5402b585e1b1631517145aa1e00
|
b2754c51464dcd81319c8514c24249a13e18d825
|
refs/heads/master
| 2020-12-02T16:19:20.266436
| 2017-10-27T10:22:37
| 2017-10-27T10:22:37
| 96,534,650
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
"""
In this challenge, we're going to learn about the difference between a class and an instance;
because this is an Object Oriented concept, it's only enabled in certain languages.
Task
Write a Person class with an instance variable, age, and a constructor that takes an integer, initial_age, as a parameter.
The constructor must assign initial_age to _age after confirming the argument passed as _initial_age is not negative.
If a negative argument is passed as initial_age, the constructor should set to and print "Age is not valid, setting age to 0."
In addition, you must write the following instance methods:
age_1_year() should increase the instance variable _age by 1.
is_old() should perform the following conditional actions:
If age < 13, print "You are young.".
If age >= 13 and age < 18, print "You are a teenager.".
Otherwise, print "You are old.".
"""
|
[
"sunil9766@gmail.com"
] |
sunil9766@gmail.com
|
a92d6441a0fd2f223bc21e0d866ebddf7a054b36
|
17821ba5f1345bcb5181092cec7808e08355abd0
|
/Django_projects/P2-video fail/my_proj/src/my_proj/migrations/0005_video_name.py
|
e9e93aa7fe462157356740f1f0b77bf76382fe25
|
[
"MIT"
] |
permissive
|
Coni63/scripts_Python
|
be1a416dc702c919120645f2946596c68a6a3fbb
|
b1ac0bee706504abcc86fd7a72b8ec625ffa12b3
|
refs/heads/master
| 2021-07-11T16:50:56.719758
| 2018-02-25T12:19:29
| 2018-02-25T12:19:29
| 95,472,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-17 09:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_proj', '0004_remove_video_name'),
]
operations = [
migrations.AddField(
model_name='video',
name='name',
field=models.CharField(default='test', max_length=100),
),
]
|
[
"mine.nicolas@gmail.com"
] |
mine.nicolas@gmail.com
|
1596db543519340af331ebc5b52159918fd4ee73
|
8848bd7a4ca88e0061ce1c7dfbf45c488968ea52
|
/ravens/tasks/insertion_goal.py
|
fc0615b9afcb813910b6af74eada86ff53f36564
|
[
"Apache-2.0"
] |
permissive
|
gautams3/deformable-ravens
|
5f390d6bf5af26fa9c746232a8d90403a89fd7ce
|
1324243b804532d229d91f2af13ee84c6fd4771c
|
refs/heads/master
| 2023-08-15T01:46:47.025808
| 2021-10-13T16:10:32
| 2021-10-13T16:10:32
| 416,812,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
#!/usr/bin/env python
import numpy as np
import pybullet as p
from ravens.tasks import Task
from ravens import utils as U
class InsertionGoal(Task):
"""Using insertion, but in a goal-based Transporters context."""
def __init__(self):
super().__init__()
self.ee = 'suction'
self.max_steps = 3
self.metric = 'pose'
self.primitive = 'pick_place'
def reset(self, env, last_info=None):
self.num_steps = 1
self.goal = {'places': {}, 'steps': []}
# Add L-shaped block.
block_size = (0.1, 0.1, 0.04)
block_urdf = 'assets/insertion/ell.urdf'
block_pose = self.random_pose(env, block_size)
block_id = env.add_object(block_urdf, block_pose)
self.goal['steps'].append({block_id: (2 * np.pi, [0])})
# Add L-shaped target pose, but without actually adding it.
if self.goal_cond_testing:
assert last_info is not None
self.goal['places'][0] = self._get_goal_info(last_info)
#print('\nin insertion reset, goal: {}'.format(self.goal['places'][0]))
else:
hole_pose = self.random_pose(env, block_size)
self.goal['places'][0] = hole_pose
#print('\nin insertion reset, goal: {}'.format(hole_pose))
def _get_goal_info(self, last_info):
"""Used to determine the goal given the last `info` dict."""
position, rotation, _ = last_info[4] # block ID=4
return (position, rotation)
|
[
"takeshidanny@gmail.com"
] |
takeshidanny@gmail.com
|
26ff43671dca13288c13c63813a52087fc0064b9
|
136a174f8de72746004aaf28a7ec959fddbd689b
|
/test_scripts/xx4.py
|
aca9862f32eab929bf99b84ef4e4d77742fecf20
|
[] |
no_license
|
xod442/imcServiceNow2
|
ff3c74ffd633f67ef984c5ab9a65da0010e3bc9d
|
0dd86659816bae19e5d43bcb8c894005564597cb
|
refs/heads/master
| 2021-01-18T18:33:50.203685
| 2018-01-09T21:25:22
| 2018-01-09T21:25:22
| 86,862,461
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
import time
from flask import Flask, request, render_template, redirect, url_for, flash, session, send_file
from flask.ext.bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from models import db, Imc_alarm_ids
from settings import APP_STATIC
import os
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
from snow_py import *
import requests
from pyhpeimc.auth import *
from pyhpeimc.plat.alarms import *
from snowbridge import *
db.create_all()
# Locked down upload folder never hurts...
UPLOAD_FOLDER = APP_STATIC
ALLOWED_EXTENSIONS = set(['csv'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
bootstrap = Bootstrap(app)
imc_user = "admin"
imc_passwd = "admin"
imc_host = "10.132.0.15"
snow_user = "admin"
snow_passwd = "Grape123!"
instance = "dev30543"
snow_url = 'https://dev30543.service-now.com/api/now/table/incident'
varz = []
data = {}
dump = []
alarm = {}
imc_test_url = 'http://'+imc_host+':8080'
# Configuring a connection to the VSD API
#
# Write logfile to local database
#
# Routes
alarm['severity'] = "1"
alarm['userAckUserName'] ='admin'
alarm['deviceDisplay'] = '10.10.10.10'
alarm['faultDesc'] = "Its down"
alarm['userAckType'] = "0"
alarm['id'] = "210"
alarm['faultTime'] = "1490648244"
snow_return = "401"
print alarm['id']
print snow_return
print alarm['faultDesc']
print alarm['deviceDisplay']
print alarm['severity']
print alarm['faultTime']
print alarm['userAckUserName']
print alarm['userAckType']
write_local_db(alarm, snow_return)
'''
logfile = Imc_alarm_ids(alarm['id'],snow_return,alarm['faultDesc'],alarm['deviceDisplay'],
alarm['severity'],alarm['faultTime'],alarm['userAckUserName'], alarm['userAckType'])
print logfile
db.session.add(logfile)
db.session.commit()
'''
print "Peace!"
|
[
"rick@rickkauffman.com"
] |
rick@rickkauffman.com
|
8af17fb2e3ab102cd0d02489f823a5800a3dac93
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Lazymux/websploit/core/help.py
|
8b341ed650071223766e292675f6d56d447f26f8
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:ad1e9de5e3288e6454391464b0dbe8b0b42084b82cfbc0f4789743568bbccdf1
size 1001
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
b4f7cdeec17ecd205cbc93e2f5b6bc0444aacb08
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2864/60591/310678.py
|
b3ddf43088c2b9c36f5b42543811c99ee8acaf76
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
n = eval(input())
nums = list(map(int, input().split(" ")))
if (len(nums) == 1):
print(nums[0])
elif(len(nums) == 2):
if(abs(nums[1] - nums[0]) == 1):
print(max(nums[1],nums[0]))
else:
print(nums[1] + nums[0])
else:
result = 0
temp = []
for x in range(max(nums) + 1):
temp.append(0)
for num in nums:
temp[num] += num
cost = [temp[0], max(temp[0], temp[1])]
for x in range(2,max(nums) + 1):
cost.append(max(cost[x - 1],cost[x - 2] + temp[x]))
print(cost[-1])
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d8a5c1939b6c95386264908ad58cff196f78ef17
|
5963c12367490ffc01c9905c028d1d5480078dec
|
/homeassistant/components/numato/switch.py
|
505d28d0c4036acb28906ca213dd68232a6cd195
|
[
"Apache-2.0"
] |
permissive
|
BenWoodford/home-assistant
|
eb03f73165d11935e8d6a9756272014267d7d66a
|
2fee32fce03bc49e86cf2e7b741a15621a97cce5
|
refs/heads/dev
| 2023-03-05T06:13:30.354545
| 2021-07-18T09:51:53
| 2021-07-18T09:51:53
| 117,122,037
| 11
| 6
|
Apache-2.0
| 2023-02-22T06:16:51
| 2018-01-11T16:10:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
"""Switch platform integration for Numato USB GPIO expanders."""
import logging
from numato_gpio import NumatoGpioError
from homeassistant.const import (
CONF_DEVICES,
CONF_ID,
CONF_SWITCHES,
DEVICE_DEFAULT_NAME,
)
from homeassistant.helpers.entity import ToggleEntity
from . import CONF_INVERT_LOGIC, CONF_PORTS, DATA_API, DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the configured Numato USB GPIO switch ports."""
if discovery_info is None:
return
api = hass.data[DOMAIN][DATA_API]
switches = []
devices = hass.data[DOMAIN][CONF_DEVICES]
for device in [d for d in devices if CONF_SWITCHES in d]:
device_id = device[CONF_ID]
platform = device[CONF_SWITCHES]
invert_logic = platform[CONF_INVERT_LOGIC]
ports = platform[CONF_PORTS]
for port, port_name in ports.items():
try:
api.setup_output(device_id, port)
api.write_output(device_id, port, 1 if invert_logic else 0)
except NumatoGpioError as err:
_LOGGER.error(
"Failed to initialize switch '%s' on Numato device %s port %s: %s",
port_name,
device_id,
port,
err,
)
continue
switches.append(
NumatoGpioSwitch(
port_name,
device_id,
port,
invert_logic,
api,
)
)
add_entities(switches, True)
class NumatoGpioSwitch(ToggleEntity):
"""Representation of a Numato USB GPIO switch port."""
def __init__(self, name, device_id, port, invert_logic, api):
"""Initialize the port."""
self._name = name or DEVICE_DEFAULT_NAME
self._device_id = device_id
self._port = port
self._invert_logic = invert_logic
self._state = False
self._api = api
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if port is turned on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the port on."""
try:
self._api.write_output(
self._device_id, self._port, 0 if self._invert_logic else 1
)
self._state = True
self.schedule_update_ha_state()
except NumatoGpioError as err:
_LOGGER.error(
"Failed to turn on Numato device %s port %s: %s",
self._device_id,
self._port,
err,
)
def turn_off(self, **kwargs):
"""Turn the port off."""
try:
self._api.write_output(
self._device_id, self._port, 1 if self._invert_logic else 0
)
self._state = False
self.schedule_update_ha_state()
except NumatoGpioError as err:
_LOGGER.error(
"Failed to turn off Numato device %s port %s: %s",
self._device_id,
self._port,
err,
)
|
[
"noreply@github.com"
] |
BenWoodford.noreply@github.com
|
380d34e9731daa55c6f70d3e860fe21844cf1912
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/googlecloudsdk/core/util/semver.py
|
8fa1edb39fbcb2ec9cf241dd02e65fa0e28e8c38
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 6,611
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for comparing semantic versions.
Basic rules of semver:
Format: major.minor.patch-prerelease+build
major, minor, patch, must all be present and integers with no leading zeros.
They are compared numerically by segment.
prerelease is an optional '.' separated series of identifiers where each is
either an integer with no leading zeros, or an alphanumeric string
(including '-'). Prereleases are compared by comparing each identifier in
order. Integers are compared numerically, alphanumeric strings are compared
lexigraphically. A prerelease version is lower precedence than it's associated
normal version.
The build number is optional and not included in the comparison. It is '.'
separated series of alphanumeric identifiers.
Two SemVer objects are considered equal if they represent the exact same string
(including the build number and including case differences). For comparison
operators, we follow the SemVer spec of precedence and ignore the build number
and case of alphanumeric strings.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from six.moves import zip_longest
# Only digits, with no leading zeros.
_DIGITS = r'(?:0|[1-9][0-9]*)'
# Digits, letters and dashes
_ALPHA_NUM = r'[-0-9A-Za-z]+'
# This is an alphanumeric string that must have at least once letter (or else it
# would be considered digits).
_STRICT_ALPHA_NUM = r'[-0-9A-Za-z]*[-A-Za-z]+[-0-9A-Za-z]*'
_PRE_RELEASE_IDENTIFIER = r'(?:{0}|{1})'.format(_DIGITS, _STRICT_ALPHA_NUM)
_PRE_RELEASE = r'(?:{0}(?:\.{0})*)'.format(_PRE_RELEASE_IDENTIFIER)
_BUILD = r'(?:{0}(?:\.{0})*)'.format(_ALPHA_NUM)
_SEMVER = (
r'^(?P<major>{digits})\.(?P<minor>{digits})\.(?P<patch>{digits})'
r'(?:\-(?P<prerelease>{release}))?(?:\+(?P<build>{build}))?$'
).format(digits=_DIGITS, release=_PRE_RELEASE, build=_BUILD)
class ParseError(Exception):
"""An exception for when a string failed to parse as a valid semver."""
pass
class SemVer(object):
"""Object to hold a parsed semantic version string."""
def __init__(self, version):
"""Creates a SemVer object from the given version string.
Args:
version: str, The version string to parse.
Raises:
ParseError: If the version could not be correctly parsed.
Returns:
SemVer, The parsed version.
"""
(self.major, self.minor, self.patch, self.prerelease, self.build) = (
SemVer._FromString(version))
@classmethod
def _FromString(cls, version):
"""Parse the given version string into its parts."""
if version is None:
raise ParseError('The value is not a valid SemVer string: [None]')
try:
match = re.match(_SEMVER, version)
except (TypeError, re.error) as e:
raise ParseError('Error parsing version string: [{0}]. {1}'
.format(version, e))
if not match:
raise ParseError(
'The value is not a valid SemVer string: [{0}]'.format(version))
parts = match.groupdict()
return (
int(parts['major']), int(parts['minor']), int(parts['patch']),
parts['prerelease'], parts['build'])
@classmethod
def _CmpHelper(cls, x, y):
"""Just a helper equivalent to the cmp() function in Python 2."""
return (x > y) - (x < y)
@classmethod
def _ComparePrereleaseStrings(cls, s1, s2):
"""Compares the two given prerelease strings.
Args:
s1: str, The first prerelease string.
s2: str, The second prerelease string.
Returns:
1 if s1 is greater than s2, -1 if s2 is greater than s1, and 0 if equal.
"""
s1 = s1.split('.') if s1 else []
s2 = s2.split('.') if s2 else []
for (this, other) in zip_longest(s1, s2):
# They can't both be None because empty parts of the string split will
# come through as the empty string. None indicates it ran out of parts.
if this is None:
return 1
elif other is None:
return -1
# Both parts have a value
if this == other:
# This part is the same, move on to the next.
continue
if this.isdigit() and other.isdigit():
# Numerical comparison if they are both numbers.
return SemVer._CmpHelper(int(this), int(other))
# Lexical comparison if either is a string. Numbers will always sort
# before strings.
return SemVer._CmpHelper(this.lower(), other.lower())
return 0
def _Compare(self, other):
"""Compare this SemVer to other.
Args:
other: SemVer, the other version to compare this one to.
Returns:
1 if self > other, -1 if other > self, 0 if equal.
"""
# Compare the required parts.
result = SemVer._CmpHelper(
(self.major, self.minor, self.patch),
(other.major, other.minor, other.patch))
# Only if required parts are equal, compare the prerelease strings.
# Never include build number in comparison.
result = result or SemVer._ComparePrereleaseStrings(
self.prerelease, other.prerelease)
return result
def Distance(self, other):
"""Compare this SemVer to other and returns the distances.
Args:
other: SemVer, the other version to compare this one to.
Returns:
Distances between the major, minor and patch versions.
"""
major_diff = self.major - other.major
minor_diff = self.minor - other.minor
patch_diff = self.patch - other.patch
return major_diff, minor_diff, patch_diff
def __eq__(self, other):
return other and (
(self.major, self.minor, self.patch, self.prerelease, self.build) ==
(other.major, other.minor, other.patch, other.prerelease, other.build))
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._Compare(other) > 0
def __lt__(self, other):
return self._Compare(other) < 0
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not self > other
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
0f03a2ac1686b80d47dda000ad3fd21ef99f7f7a
|
1ef68ba8f4754bf4d4d86d945bb1392be3ff5beb
|
/mlagents/envs/communicator_objects/custom_action_pb2.py
|
1c16809b0b0d05a34b2fe0fb5193e54c3337f10e
|
[
"MIT"
] |
permissive
|
Abluceli/HRG-SAC
|
fc1b5fb720f391390b0ac86c23c46187178a3691
|
334df1e8afbfff3544413ade46fb12f03556014b
|
refs/heads/master
| 2022-12-29T22:51:35.584254
| 2020-02-19T13:39:23
| 2020-02-19T13:39:23
| 241,630,517
| 7
| 1
|
MIT
| 2022-12-08T06:18:57
| 2020-02-19T13:36:58
|
Python
|
UTF-8
|
Python
| false
| true
| 1,939
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents/envs/communicator_objects/custom_action.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="mlagents/envs/communicator_objects/custom_action.proto",
package="communicator_objects",
syntax="proto3",
serialized_options=_b("\252\002\034MLAgents.CommunicatorObjects"),
serialized_pb=_b(
'\n6mlagents/envs/communicator_objects/custom_action.proto\x12\x14\x63ommunicator_objects"\x0e\n\x0c\x43ustomActionB\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3'
),
)
_CUSTOMACTION = _descriptor.Descriptor(
name="CustomAction",
full_name="communicator_objects.CustomAction",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=80,
serialized_end=94,
)
DESCRIPTOR.message_types_by_name["CustomAction"] = _CUSTOMACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CustomAction = _reflection.GeneratedProtocolMessageType(
"CustomAction",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMACTION,
__module__="mlagents.envs.communicator_objects.custom_action_pb2"
# @@protoc_insertion_point(class_scope:communicator_objects.CustomAction)
),
)
_sym_db.RegisterMessage(CustomAction)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"787873309@qq.com"
] |
787873309@qq.com
|
8de8ea6b1c58ca9b9e86f7823a753e50cc5c3b33
|
553e6acd1019bb2c7d6a1b08009ca50ef2fa0ad1
|
/mammoth/optim.py
|
b7f17e1cecf96cd1842270f07004067b512eda4a
|
[] |
no_license
|
bkj/mammoth
|
ac0cfd6f8c5165ce72a5a7e591a938cf823270d3
|
0bd0122b5bac5ce897436a2318cb47b2fbc84164
|
refs/heads/master
| 2021-05-15T00:23:48.290164
| 2018-07-26T16:15:23
| 2018-07-26T16:15:23
| 103,467,821
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,805
|
py
|
#!/usr/bin/env python
"""
optim.py
"""
import math
import torch
import numpy as np
class LambdaAdam(torch.optim.Optimizer):
"""
ADAM optimizer that mimics hypergrads
- Difference is addition of `lam` parameter. I noticed that my hypergrad test was converging
to eps < 1e-10. Setting lam to some small number (1e-1, 1e-2, etc) lets the torch version
convert to eps < 1e-8.
!! This is not efficient, due to cloning, etc. Will need to reimplement more efficiently
for larger models. Then again, for larger models, this may not matter.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=10**-4, lam=1):
defaults = dict(lr=lr, betas=betas, eps=eps, lam=lam)
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
m, v = state['exp_avg'].clone(), state['exp_avg_sq'].clone()
beta1, beta2 = group['betas']
state['step'] += 1
# --
b1t = beta1 * (group['lam'] ** (state['step'] - 1))
m = (m * b1t) + ((1 - b1t) * grad)
v = (1 - beta2) * (grad ** 2) + beta2 * v
mhat = m / (1 - beta1 ** state['step'])
vhat = v / (1 - beta2 ** state['step'])
p.data -= group['lr'] * mhat / (torch.sqrt(vhat) + group['eps'])
# --
# default torch implementation
# m = (m * beta1) + ((1 - beta1) * grad)
# v = (1 - beta2) * (grad ** 2) + beta2 * v
# denom = torch.sqrt(v) + group['eps']
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# p.data.addcdiv_(-step_size, m, denom)
# --
state['exp_avg'] = m.clone()
state['exp_avg_sq'] = v.clone()
return loss
|
[
"bkj.322@gmail.com"
] |
bkj.322@gmail.com
|
8cf1a9534a126b14369a0c65201592f19a07b52f
|
7a1a65b0cda41ea204fad4848934db143ebf199a
|
/automatedprocesses_firststage/adsym_InventorySources_v2_DD_testapi.py
|
3f6dcf9326e7f74dfb04f362aaeebd1489663c43
|
[] |
no_license
|
bpopovich44/ReaperSec
|
4b015e448ed5ce23316bd9b9e33966373daea9c0
|
22acba4d84313e62dbbf95cf2a5465283a6491b0
|
refs/heads/master
| 2021-05-02T18:26:11.875122
| 2019-06-22T15:02:09
| 2019-06-22T15:02:09
| 120,664,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
#!/usr/bin/python2.7
import json
from mysql.connector import MySQLConnection, Error
from python_dbconfig import read_db_config
import aol_api
def connect():
# """Gets AOL Data and writes them to a MySQL table"""
db = "mysql_dl"
# Connect To DB:
db_config = read_db_config(db)
try:
print('Connecting to database...')
conn = MySQLConnection(**db_config)
if conn.is_connected():
print('Connection established.')
cursor = conn.cursor()
# calls get_access_token function and starts script
logintoken = aol_api.get_access_token("25e5de37-aa8d-4d93-b407-29bc42b86044", "stEVHyPObmxCTeI6mTMKuA")
print(logintoken)
result = aol_api.run_existing_report(logintoken, "190595")
#print(result)
info = json.loads(result)
#print(info)
for x in json.loads(result)['data']:
rownum = ''
date = x['row'][0]
inventory_source = x['row'][1].replace("'", " -").replace('"', "")
geo_country = x['row'][2].replace("'", "")
media = x['row'][3].replace('"', "").replace("'", "")
ad_opportunities = x['row'][4]
ad_attempts = x['row'][5]
ad_impressions = x['row'][6]
ad_revenue = x['row'][7]
ecpm = x['row'][8]
ad_errors = x['row'][9]
iab_viewability_measurable_ad_impressions = x['row'][10]
iab_viewable_ad_impressions = x['row'][11]
market_ops = x['row'][12]
clicks = x['row'][13].replace(" ", "0")
list = (rownum, date, inventory_source, geo_country, media, ad_opportunities, ad_attempts, ad_impressions, \
ad_revenue, ecpm, ad_errors, iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, market_ops, clicks)
#print(list)
sql = """INSERT INTO adsym_InventorySources_v2 VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", \
"%s", "%s", "%s", "%s")""" % (rownum, date, inventory_source, geo_country, media, ad_opportunities, ad_attempts, ad_impressions, \
ad_revenue, ecpm, ad_errors, iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, market_ops, clicks)
cursor.execute(sql)
cursor.execute('commit')
else:
print('Connection failed.')
except Error as error:
print(error)
finally:
conn.close()
print('Connection closed.')
if __name__ == '__main__':
connect()
|
[
"bpopovich4@gmail.com"
] |
bpopovich4@gmail.com
|
97e4669eaaef04e481d3c1a28889378009c43f5e
|
c97ae1cc922a037484c5d4794d0a657561cf47f3
|
/config.py
|
53c8a1a210fb59cef99d47b41017842907143b96
|
[] |
no_license
|
AlenAlic/clubpromoters
|
3059078b02b77745e7a1e49d998f9d24554082e8
|
f44b3b20c20d5669c1658036cea35fb9a4f223fc
|
refs/heads/master
| 2022-12-11T14:38:37.824769
| 2019-09-08T19:02:49
| 2019-09-08T19:02:49
| 190,430,315
| 0
| 0
| null | 2022-12-09T22:02:49
| 2019-06-05T16:29:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
ENV = 'development'
SECRET_KEY = 'test-key'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db?check_same_thread=False')
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = False
# MAIL SERVERS
# python -m smtpd -n -c DebuggingServer localhost:8025
# python -u -m smtpd -n -c DebuggingServer localhost:8025 > mail.log
MAIL_SERVER = 'localhost'
MAIL_PORT = 8025
MAIL_USE_TLS = ''
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
ADMINS = ['your-email@example.com']
DEBUG_TB_INTERCEPT_REDIRECTS = False
# requirements
# pip freeze > requirements.txt
|
[
"aalic89@gmail.com"
] |
aalic89@gmail.com
|
2cbf983911e50399c3a76fb804444089fce74a61
|
c071eb46184635818e8349ce9c2a78d6c6e460fc
|
/system/python_stubs/-745935208/_ast/__init__/Global.py
|
e9f88c95a0171f4bb4c222f70243e1403b25fc9c
|
[] |
no_license
|
sidbmw/PyCharm-Settings
|
a71bc594c83829a1522e215155686381b8ac5c6e
|
083f9fe945ee5358346e5d86b17130d521d1b954
|
refs/heads/master
| 2020-04-05T14:24:03.216082
| 2018-12-28T02:29:29
| 2018-12-28T02:29:29
| 156,927,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# encoding: utf-8
# module _ast
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\pandas\util\_move.cp37-win_amd64.pyd
# by generator 1.146
# no doc
# no imports
from .stmt import stmt
class Global(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
|
[
"siddharthnatamai@gmail.com"
] |
siddharthnatamai@gmail.com
|
af7582913055c33dfb0d2fb42261bb2d00085cbd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/55/usersdata/120/22966/submittedfiles/av2_p3_civil.py
|
ec623fcaf0928a3823b671b83b40cb785119d3ff
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
#definir somalinha
def somalinha(a,linha):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[linha,j]
return somalinha
#definir somacoluna
def somacoluna(a,coluna):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,coluna]
return soma
# definir peso
def peso(a,linha,coluna):
peso=somalinha(a,linha)+somacoluna(a,coluna)-(2*a[linha,coluna])
return peso
n=input('digite n:')
x=input('digite x:')
y=input('digite y:')
a=np.zeros((n,n))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
print ('%d'%peso(a,x,y))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1f8b9d0f62221cd8a4ea43b57dfb8951433fe248
|
5c81a33883e052070c557c76b5968aa501d5526e
|
/products/migrations/0005_attribute_attributeitem.py
|
24c44104e06e383020c74f454c2c93a8a182519b
|
[] |
no_license
|
worlddeleteRin/rabbit_vkusno
|
2ebacdf72d87700d191965481c56e78bfec33e9b
|
017cdff4b40fa7e9a0f7729e4f7b754f48e93c3a
|
refs/heads/master
| 2023-04-03T23:32:42.770973
| 2021-04-08T06:43:04
| 2021-04-08T06:43:04
| 355,661,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
# Generated by Django 3.0.8 on 2020-10-11 12:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20201010_1810'),
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=300)),
('category', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='products.Category')),
],
),
migrations.CreateModel(
name='Attributeitem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=300)),
('attr', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Attribute')),
],
),
]
|
[
"noname@MacBook-Pro-Rin.local"
] |
noname@MacBook-Pro-Rin.local
|
0f2011cb5b2aadf3215ef9f7b51b9c97d83a2488
|
43ab33b2f50e47f5dbe322daa03c86a99e5ee77c
|
/rcc/models/study_site_view_rpc.py
|
a9556784eb2812cdd56b265c2e06679096439101
|
[] |
no_license
|
Sage-Bionetworks/rcc-client
|
c770432de2d2950e00f7c7bd2bac22f3a81c2061
|
57c4a621aecd3a2f3f9faaa94f53b2727992a01a
|
refs/heads/main
| 2023-02-23T05:55:39.279352
| 2021-01-21T02:06:08
| 2021-01-21T02:06:08
| 331,486,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,149
|
py
|
# coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class StudySiteViewRpc(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'study_id': 'int',
'name': 'str',
'site_id': 'int',
'site_type': 'str',
'principal_investigator': 'str',
'facility_name': 'str',
'enabled': 'bool'
}
attribute_map = {
'id': 'id',
'study_id': 'studyId',
'name': 'name',
'site_id': 'siteId',
'site_type': 'siteType',
'principal_investigator': 'principalInvestigator',
'facility_name': 'facilityName',
'enabled': 'enabled'
}
def __init__(self, id=None, study_id=None, name=None, site_id=None, site_type=None, principal_investigator=None, facility_name=None, enabled=None, local_vars_configuration=None): # noqa: E501
"""StudySiteViewRpc - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._study_id = None
self._name = None
self._site_id = None
self._site_type = None
self._principal_investigator = None
self._facility_name = None
self._enabled = None
self.discriminator = None
if id is not None:
self.id = id
if study_id is not None:
self.study_id = study_id
if name is not None:
self.name = name
if site_id is not None:
self.site_id = site_id
if site_type is not None:
self.site_type = site_type
if principal_investigator is not None:
self.principal_investigator = principal_investigator
if facility_name is not None:
self.facility_name = facility_name
if enabled is not None:
self.enabled = enabled
@property
def id(self):
"""Gets the id of this StudySiteViewRpc. # noqa: E501
:return: The id of this StudySiteViewRpc. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this StudySiteViewRpc.
:param id: The id of this StudySiteViewRpc. # noqa: E501
:type: int
"""
self._id = id
@property
def study_id(self):
"""Gets the study_id of this StudySiteViewRpc. # noqa: E501
:return: The study_id of this StudySiteViewRpc. # noqa: E501
:rtype: int
"""
return self._study_id
@study_id.setter
def study_id(self, study_id):
"""Sets the study_id of this StudySiteViewRpc.
:param study_id: The study_id of this StudySiteViewRpc. # noqa: E501
:type: int
"""
self._study_id = study_id
@property
def name(self):
"""Gets the name of this StudySiteViewRpc. # noqa: E501
:return: The name of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StudySiteViewRpc.
:param name: The name of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._name = name
@property
def site_id(self):
"""Gets the site_id of this StudySiteViewRpc. # noqa: E501
:return: The site_id of this StudySiteViewRpc. # noqa: E501
:rtype: int
"""
return self._site_id
@site_id.setter
def site_id(self, site_id):
"""Sets the site_id of this StudySiteViewRpc.
:param site_id: The site_id of this StudySiteViewRpc. # noqa: E501
:type: int
"""
self._site_id = site_id
@property
def site_type(self):
"""Gets the site_type of this StudySiteViewRpc. # noqa: E501
:return: The site_type of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._site_type
@site_type.setter
def site_type(self, site_type):
"""Sets the site_type of this StudySiteViewRpc.
:param site_type: The site_type of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._site_type = site_type
@property
def principal_investigator(self):
"""Gets the principal_investigator of this StudySiteViewRpc. # noqa: E501
:return: The principal_investigator of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._principal_investigator
@principal_investigator.setter
def principal_investigator(self, principal_investigator):
"""Sets the principal_investigator of this StudySiteViewRpc.
:param principal_investigator: The principal_investigator of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._principal_investigator = principal_investigator
@property
def facility_name(self):
"""Gets the facility_name of this StudySiteViewRpc. # noqa: E501
:return: The facility_name of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._facility_name
@facility_name.setter
def facility_name(self, facility_name):
"""Sets the facility_name of this StudySiteViewRpc.
:param facility_name: The facility_name of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._facility_name = facility_name
@property
def enabled(self):
"""Gets the enabled of this StudySiteViewRpc. # noqa: E501
:return: The enabled of this StudySiteViewRpc. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this StudySiteViewRpc.
:param enabled: The enabled of this StudySiteViewRpc. # noqa: E501
:type: bool
"""
self._enabled = enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StudySiteViewRpc):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StudySiteViewRpc):
return True
return self.to_dict() != other.to_dict()
|
[
"thomas.yu@sagebase.org"
] |
thomas.yu@sagebase.org
|
55d11a1f12e6188a1ad698f562dad455168768b3
|
4129ae27e90b3aa76187203e42aa0ecbae69216f
|
/img/test.py
|
b8e635ba0fa6d08d2030bb90c4ad0473dfa34e66
|
[] |
no_license
|
PinoJoe/WebCrawler
|
d1a6b84629832222cbebb1037f1cbc0771deadcf
|
94929bc73bde98569b2992f8bc648c2f39afcccc
|
refs/heads/master
| 2022-01-23T13:09:35.853177
| 2019-08-05T13:40:44
| 2019-08-05T13:40:44
| 122,821,572
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
#-*-coding:utf-8 -*-
import urllib
from lxml import etree
import requests
import time
from contextlib import closing
def ProcessBar(blocknum, blocksize, totalsize):
speed = (blocknum * blocksize) / (time.time() - start_time)
speed_str = '下载速度: %s' % format_size(speed)
recv_size = blocknum * blocksize
pervent = recv_size / totalsize
percent_str = '%.2f%%' % (pervent * 100)
n = round(pervent * 5)
s = ('=' * n).ljust(5, '-')
print(percent_str.ljust(8, ' ') + '[' + s + ']' + speed_str, end='\r')
def format_size(bytes):
try:
bytes = float(bytes)
kb = bytes / 1024
except:
print('传入的字节格式错误')
return 'Error'
if kb >= 1024:
M = kb / 1024
if M >= 1024:
G = M / 1024
return '%.3fG' % (G)
else:
return '%.3fM' % (M)
else:
return '%.3fK' % (kb)
user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:58.0) Gecko/20100101 Firefox/58.0'
headers = {'User-Agent':user_agent}
r = requests.get('http://www.ivsky.com/tupian/ziranfengguang/',headers=headers)
h = etree.HTML(r.text)
img_urls = h.xpath('.//img/@src')
i = 0
for img_url in img_urls:
filename = 'img' + str(i) + '.jgp'
start_time = time.time()
urllib.request.urlretrieve(img_url, filename, ProcessBar)
i += 1
print('\n')
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
b376047b8fffc918dea88a06d8f95217ed1a01eb
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_coolants.py
|
52648d1c9cbfeb1d8e6f52d2d454be9cc0ff0119
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _COOLANTS():
def __init__(self,):
self.name = "COOLANTS"
self.definitions = coolant
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['coolant']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ae8384d7325ab05aae05cc3dff842e3ae00aef65
|
632b9b323dc29c67fd6b8cdbec6ec80161ad484a
|
/extractInstitution.py
|
126544f887ba375a7c57a7e8e67987ecdd57ee55
|
[] |
no_license
|
SixingYan/Academic-Relationship-Network
|
3a08f7cf5d9d1a73f8639c883257fc76dbe86376
|
94dbfcc76a734005ffceb08e31763112b0d4462b
|
refs/heads/master
| 2021-01-21T17:57:14.388478
| 2018-02-23T09:48:31
| 2018-02-23T09:48:31
| 92,003,208
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,655
|
py
|
# -*- coding: utf-8 -*-
import re
from tool import getResult,getCursor,readTXT
from bs4 import BeautifulSoup
import os
files_path = 'E:/Code/Data/dlibrary'
conn,cur = getCursor()
#import os;os.chdir('e:/Code/Python');import extractInstitution;extractInstitution.mainFunction()
def cleanInstit(instit):
#
institNew = ''
for inst in instit.split(' '):
institNew += re.sub('[^a-zA-Z]','',inst)+' '
return institNew.strip()
def readFiles(files_path):
#
filePathList = []
for fileName in os.listdir(files_path):
if len(fileName)>1:
newFilePath = files_path+'/'+fileName
filePathList.append(newFilePath)
return filePathList
def insertInstitution(eid,institution,fileP):
#放入一个一维数组
insertSQL = ''
for inst in institution:
try:
insertSQL = 'insert into experience1 (eid,institution) values('+str(eid)+', "'+inst+'")'
cur.execute(insertSQL)
conn.commit()
except Exception:
print('error:'+insertSQL)
#cur.execute('update dlurl1 set status= where id='+str(eid))#标记已抽取
#conn.commit()
print('Competed '+fileP)
def extractInstitut(html):
#
institution = []
#找到<strong> Affiliation history
#它的下一个div
#里面的每一个a
soup = BeautifulSoup(''.join(html),"lxml")
history = soup.find('history')
strongTag = history.find(text='Affiliation history')
if strongTag != None:
strongTag = strongTag.parent
else:
return institution
while (type(strongTag.nextSibling) != 'NoneType') or (strongTag.nextSibling.name != 'div'):
#print(' ---loop--- ')
strongTag = strongTag.nextSibling
#print(str(strongTag))
if strongTag.name == 'div':
break
if strongTag == None:
print('no find?')
break
try:
if strongTag.findAll('a') != None:
for a in strongTag.findAll('a'):
instName = cleanInstit(a.string)
institution.append(instName)
return institution
except Exception:
print('error:'+str(strongTag))
def extractUserID(url):
#
url = url.split('&')[0]
urlid = url[:]
id = urlid.replace('http://dl.acm.org/author_page.cfm?id=','')
userid = id[4:]#only numbers begin at 4 are considered
return urlid,userid
def getID(html):
#
eid = -1 #初始化
indx = '<![CDATA['
start = html.find(indx)
end = html.find(']]></fullpath>')
if start>0:
subjectURL = html[(start+len(indx)):end]
url,userid = extractUserID(subjectURL)#从网址中分离出url地址
#回查数据库
selectSQL = 'select t.id from (select id,url from dlurl1 where userid='+str(userid)+') t where t.url="'+url+'"'
result = getResult(selectSQL,cur)
if len(result)==1:
eid = int(result[0]['id'])
else:
print('error or exist')
return eid
def mainFunction():
#
#读取文件
filePathList = readFiles(files_path)
print('read is ready')
for fileP in filePathList:
html = readTXT(fileP)
#print('do here')
eid = getID(html)
#print('do here0')
if eid >0:
instit = extractInstitut(html)
if len(instit)>0:
#print('do here1')
insertInstitution(eid,instit,fileP)
#print(instit)
#break#只运行一次
cur.close();conn.close();
if __name__ == '__main__':
mainFunction()
|
[
"plutoyem@outlook.com"
] |
plutoyem@outlook.com
|
fa3e1e5a03b34cf9667f4072e97ba84be7134e14
|
f8ef8828377131f38a75e25f1571d3e0ea7b4837
|
/api/migrations/0006_match_expansion.py
|
6d76c5f3773954536724f113207af5736a647ccb
|
[] |
no_license
|
szpone/bg-journal
|
8b46742e2b71db820e5fb1e5f690c0362586661e
|
80a3570414773daf34458ca068d051cbfe29a8b7
|
refs/heads/master
| 2022-05-05T06:10:33.957525
| 2019-05-02T17:11:48
| 2019-05-02T17:11:48
| 165,717,971
| 0
| 1
| null | 2022-04-22T21:07:23
| 2019-01-14T19:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 499
|
py
|
# Generated by Django 2.1.7 on 2019-03-18 20:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_remove_user_confirm_password'),
]
operations = [
migrations.AddField(
model_name='match',
name='expansion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Expansion'),
),
]
|
[
"nikola.adamus@gmail.com"
] |
nikola.adamus@gmail.com
|
cb562f1501ebd70b4953051bffe97d1b3be9ab1f
|
147d0863f4590649a90ea5f78c66974723a87247
|
/api/api_request.py
|
7e3ae65e10c255203d5ebc25a3c87d4874377cbe
|
[] |
no_license
|
jinchuika/ligabot
|
af5bd5443dc0df7d929e7b866869ba075c91db55
|
69544912e1ac46f281ba2fc78ff913d60d9a2a38
|
refs/heads/master
| 2021-01-20T12:50:32.894359
| 2017-05-08T14:07:47
| 2017-05-08T14:07:47
| 90,419,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,788
|
py
|
import requests
import click
from django.conf import settings
from datetime import datetime
class RequestHandler(object):
BASE_URL = settings.BASE_URL
API_TOKEN = settings.API_TOKEN
LIVE_URL = 'http://soccer-cli.appspot.com/'
verbose = False
def __init__(self, verbose=False):
self.verbose = verbose
def _get(self, url):
"""Handles api.football-data.org requests"""
if self.verbose:
print('calling: ' + url)
req = requests.get(RequestHandler.BASE_URL + url, headers={'X-Auth-Token': RequestHandler.API_TOKEN, 'X-Response-Control': 'minified'})
if req.status_code == requests.codes.ok:
if self.verbose:
print(req.text)
return req
def get_live_scores(self, use_12_hour_format):
"""Gets the live scores"""
req = requests.get(RequestHandler.LIVE_URL)
if req.status_code == requests.codes.ok:
scores = req.json()
if len(scores["games"]) == 0:
click.secho("No live action currently", fg="red", bold=True)
return
self.writer.live_scores(scores, use_12_hour_format)
else:
click.secho("There was problem getting live scores", fg="red", bold=True)
def get_team_scores(self, team_id, time=7, show_upcoming=False, use_12_hour_format=False):
"""Queries the API and gets the particular team scores"""
time_frame = 'n' if show_upcoming else 'p'
if team_id:
req = self._get('teams/{team_id}/fixtures?timeFrame={time_frame}{time}'.format(
team_id=team_id,
time_frame=time_frame,
time=time))
team_scores = req.json()
if len(team_scores["fixtures"]) != 0:
return [{
'id': fixture['id'],
'fecha': fixture['date'],
'jornada': fixture['matchday'],
'local': fixture['homeTeamName'],
'visitante': fixture['awayTeamName'],
'gol_local': fixture['result']['goalsHomeTeam'],
'gol_visitante': fixture['result']['goalsAwayTeam'],
'estado': fixture["status"]
} for fixture in team_scores['fixtures']]
else:
return []
def get_standings(self, league_id):
"""Queries the API and gets the standings for a particular league"""
req = self._get('competitions/{id}/leagueTable'.format(id=league_id))
return [{
'rank': team["rank"],
'teamId': team["teamId"],
'teamName': team["team"],
'playedGames': team["playedGames"],
'goals': team["goals"],
'goalsAgainst': team["goalsAgainst"],
'goalDifference': team["goalDifference"],
'points': team["points"]
} for team in req.json()['standing']]
def get_league_scores(self, league_id, time=7, show_upcoming=False, use_12_hour_format=False):
"""
Queries the API and fetches the scores for fixtures
based upon the league and time parameter
"""
time_frame = 'n' if show_upcoming else 'p'
if league_id:
req = self._get('competitions/{league_id}/fixtures?timeFrame={time_frame}{time}'.format(
league_id=league_id,
time_frame=time_frame,
time=time))
fixtures_results = req.json()
# no fixtures in the past week. display a help message and return
if len(fixtures_results["fixtures"]) != 0:
return [{
'id': fixture['id'],
'fecha': fixture['date'],
'jornada': fixture['matchday'],
'local': fixture['homeTeamName'],
'local_id': fixture['homeTeamId'],
'visitante_id': fixture['awayTeamId'],
'visitante': fixture['awayTeamName'],
'gol_local': fixture['result']['goalsHomeTeam'],
'gol_visitante': fixture['result']['goalsAwayTeam'],
'estado': fixture["status"]
} for fixture in fixtures_results['fixtures']]
else:
return []
else:
# When no league specified. Print all available in time frame.
return []
def get_team_players(self, team):
"""
Queries the API and fetches the players
for a particular team
"""
team_id = self.team_names.get(team, None)
req = self._get('teams/{team_id}/players'.format(team_id=team_id))
team_players = req.json()
if int(team_players["count"]) == 0:
click.secho("No players found for this team", fg="red", bold=True)
else:
self.writer.team_players(team_players)
def get_leagues(self, season=None):
if not season:
season = datetime.now().year
req = self._get('competitions/?season={season}'.format(season=season))
competition_list = req.json()
return [{
'id': competition['id'],
'caption': competition['caption'],
'league': competition['league'],
'year': competition['year'],
'numberOfTeams': competition['numberOfTeams'],
'numberOfGames': competition['numberOfGames'],
'numberOfMatchdays': competition['numberOfMatchdays'],
'currentMatchday': competition['currentMatchday'],
'lastUpdated': competition['lastUpdated'],
} for competition in competition_list]
def get_league_info(self, league_id):
req = self._get('competitions/{league_id}/'.format(league_id=league_id))
competition = req.json()
return {
'id': competition['id'],
'caption': competition['caption'],
'league': competition['league'],
'year': competition['year'],
'numberOfTeams': competition['numberOfTeams'],
'numberOfGames': competition['numberOfGames'],
'numberOfMatchdays': competition['numberOfMatchdays'],
'currentMatchday': competition['currentMatchday'],
'lastUpdated': competition['lastUpdated'],
}
def get_league_teams(self, league_id):
req = self._get('competitions/{league_id}/teams'.format(league_id=league_id))
team_list = req.json()
return [{
'id': team['id'],
'name': team['name'],
'short_name': team['shortName'],
'squad_market_value': team['squadMarketValue'],
'crest_url': team['crestUrl'],
} for team in team_list['teams'] if 'id' in team]
|
[
"jinchuika@gmail.com"
] |
jinchuika@gmail.com
|
f75f33d950309ba2333f6b2ace38e72f6bf95b7c
|
65675a487fee2ff9651675ae6a09c8d62682c2a4
|
/23b.py
|
e44397b4dee9ab0ef0746d70edcb656548770bd5
|
[] |
no_license
|
Abarn279/advent-of-code-2020
|
e132f4b04ee1b777ddc00bb97322f707a72c86e0
|
ea840ee1e7a8cafedfe6f0b9f3e64a2e8b6f0c80
|
refs/heads/master
| 2023-02-04T21:11:06.771592
| 2020-12-26T02:37:25
| 2020-12-26T02:37:25
| 317,412,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
node_dict = {}
class SLLNode:
''' Singly linked list node '''
def __init__(self, nxt=None, data=None):
if nxt is None:
self.next = self
else:
self.next = nxt
self.data = data
node_dict[self.data] = self
def insert_after(self, other_val):
current_next = self.next
self.next = SLLNode(current_next, other_val)
return self.next
def insert_bulk_after(self, ary):
curr_next = self.next
self.next = ary[0]
ary[-1].next = curr_next
def remove_range_after(self, amt):
''' Remove amt of nodes, after this one, returning the resulting array. '''
nodes = []
to_remove = self.next
for n in range(amt):
nodes.append(to_remove)
to_remove = to_remove.next
self.next = nodes[-1].next
nodes[-1].next = None
return nodes
def find_destination(self, t = None):
if t is None:
t = ((self.data - 2) % 1000000) + 1
else:
t = ((t - 1) % 1000000) + 1
return node_dict[t]
def get_order(self):
o = self.next
while o.data != 1:
o = o.next
c = o.next
s = ""
while c is not o:
s += str(c.data)
c = c.next
return s
def __eq__(self, other):
return self.data == other.data
def __repr__(self):
return str(self.data)
# My input
inp = '318946572'
# Build DLL
current = SLLNode(None, int(inp[0]))
nxt = current
for n in inp[1:]:
nxt = nxt.insert_after(int(n))
for n in range(10, 1000001):
nxt = nxt.insert_after(n)
for move in range(10000000):
removed = current.remove_range_after(3)
destination = current.find_destination()
while destination in removed:
destination = current.find_destination(destination.data - 1)
destination.insert_bulk_after(removed)
current = current.next
print(node_dict[1].next.data * node_dict[1].next.next.data)
|
[
"Abarn279@gmail.com"
] |
Abarn279@gmail.com
|
dba760a081168b07da49364f0d7449d2b7849238
|
b6b380e6f5353dba2256211033cebec638dffe4a
|
/packages/fuego/fuego/serialization/chemkin/unpickle/parsers/Species.py
|
cb4d4a0b1bd9d21462be0519db57925920608495
|
[] |
no_license
|
danse-inelastic/pyre-all
|
0ddf640b68f6089e40345e9a8e20562a8b035b3c
|
59cc235b6481586c58415535bbec660470218e31
|
refs/heads/master
| 2021-01-18T12:31:27.905459
| 2015-04-26T04:57:46
| 2015-04-26T04:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2007 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from BaseParser import BaseParser
class Species(BaseParser):
# the interesting tokens
def aSpeciesName(self, token):
try:
species = self._mechanism.newSpecies(token.name, self.locator())
except self._mechanism.DuplicateSpecies, msg:
self.onWarning(str(msg), self.locator())
return 0
# transitions
def aSpeciesSection(self, token):
self._info.log("species parser: section start")
self._parse(self._scanner, self._tokenizer)
return 0
# other methods
def __init__(self, mechanism, tokenizer):
import pyre
BaseParser.__init__(self, mechanism)
self._tokenizer = tokenizer
import fuego
self._scanner = fuego.serialization.chemkin.unpickle.scanners.species()
return
# version
__id__ = "$Id: Species.py,v 1.1.1.1 2007-09-13 18:17:31 aivazis Exp $"
#
# End of file
|
[
"michael.aivazis@gmail.com"
] |
michael.aivazis@gmail.com
|
f7debe817af0afd601474afec973beb67886808b
|
f9183ce2308090dbb6a8c2f5d96c17c56a8ca768
|
/main/forms.py
|
c9618dd619531607d80ecd51fc59f05971ac3664
|
[] |
no_license
|
asad2200/UrlShortener
|
a9b0e0f5cce203dd2bcc1244de7feb99588c6c71
|
055d83d5a1bbf9628a8f045d152dc85c58e9460f
|
refs/heads/master
| 2023-05-13T23:44:09.748618
| 2021-05-29T10:56:17
| 2021-05-29T10:56:17
| 371,616,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
from django import forms
from .models import URL
class URLForm(forms.ModelForm):
class Meta:
model = URL
fields = ["name", "url"]
widgets = {
"url": forms.Textarea(attrs={"rows": 2, "cols": 5}),
}
|
[
"asadjakhavala92@gmail.com"
] |
asadjakhavala92@gmail.com
|
d7a25d94dee5bb5c016aa6033dc187cfe73cf882
|
40f4908483b98fc4f370ff4f2d520e1284d045b3
|
/phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/analysis/profiling/simpleresourcedependencyassertion.py
|
fd72bc793c259cc64cb2bc4289b0667e7140091c
|
[] |
no_license
|
TF-185/bbn-immortals
|
7f70610bdbbcbf649f3d9021f087baaa76f0d8ca
|
e298540f7b5f201779213850291337a8bded66c7
|
refs/heads/master
| 2023-05-31T00:16:42.522840
| 2019-10-24T21:45:07
| 2019-10-24T21:45:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
from pymmortals.datatypes.serializable import Serializable
from pymmortals.generated.com.securboration.immortals.ontology.core.resource import Resource
from pymmortals.generated.com.securboration.immortals.ontology.measurement.codeunitpointer import CodeUnitPointer
from typing import Type
# noinspection PyPep8Naming
class SimpleResourceDependencyAssertion(Serializable):
_validator_values = dict()
_types = dict()
def __init__(self,
codeUnit: CodeUnitPointer = None,
dependency: Type[Resource] = None):
super().__init__()
self.codeUnit = codeUnit
self.dependency = dependency
|
[
"awellman@bbn.com"
] |
awellman@bbn.com
|
f57d10cb23fa6300616fe2080588f7d3c6404adb
|
190072bc404751d83e5aceb99a34ccba1067caae
|
/photobot/examples/Layer_function_select.py
|
6437d18ae66b047371f687e0ca0497d59b8a25ed
|
[
"MIT"
] |
permissive
|
karstenw/Library
|
ab751bde79bb0bd2bd7f705901dab415ba154476
|
9c3f665be4988c14d939d28e7729c72819bba446
|
refs/heads/master
| 2023-08-14T04:53:15.559747
| 2023-07-16T12:27:19
| 2023-07-16T12:27:19
| 46,520,062
| 0
| 0
| null | 2023-05-18T14:06:29
| 2015-11-19T21:00:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
import sys, os
# need a different name
import random as rnd
import pprint
pp = pprint.pprint
import pdb
kwdbg = 0
W, H = 542, 1050
fullwidth = int(W-20)
tilewidth = int((fullwidth-10) / 2.0)
# check for Nodebox
NB = True
try:
_ctx
except(NameError):
NB = False
if NB:
size(W, H)
pb = ximport("photobot")
else:
WIDTH, HEIGHT = W, H
import photobot as pb
import imagewells
if kwdbg:
# make random choices repeatable for debugging
rnd.seed(8)
imagewell = imagewells.loadImageWell(resultfile="imagewell-files")
tiles = imagewell['landscape']
rnd.shuffle(tiles)
# pick 2 images
img1path = tiles.pop()
img2path = tiles.pop()
# create a gray canvas
c = pb.canvas( WIDTH, HEIGHT)
c.fill( (192, 192, 192) )
#
# Image 1
#
_, filename = os.path.split( img1path )
# create, scale and place the image
x, y = 10, 10
img1, w1, h1 = pb.placeImage(c, img1path, x, y, WIDTH-20, "Image 1 Base")
c.top.autocontrast(cutoff=0)
pb.label(c, filename, x, y)
#
# Image 2
#
c.layers[img1].duplicate()
path=( (w1/2,0), (w1,int(h1*0.667)), (w1/2.0, h1), (0,h1*0.75),(0,h1/2) )
c.top.select( path )
x, y = 10, h1+20+10
c.top.translate( x, y)
# draw the result
c.draw(name="Layer_function_select")
|
[
"karstenwo@web.de"
] |
karstenwo@web.de
|
19a267b88eeda5563af6a304dcbd755284124dfc
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_84/177.py
|
db6951d5ed962140a11a025f300265217eb10a9c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
# coding: shift-jis
import sys
f = file(sys.argv[1])
test_cnt = int(f.readline())
for case in range(1, test_cnt+1):
V, H = map(int, f.readline().split())
row = [list(f.readline()[:-1]) for _ in range(V) ]
ret = True
for v in range(V):
for h in range(H):
if row[v][h] == '#':
if v == V-1 or h == H-1:
ret = False
break
if row[v][h+1] != '#' or row[v+1][h] != '#' or row[v+1][h+1]!='#':
ret = False
break
row[v][h] = '/'
row[v][h+1] = '\\'
row[v+1][h] = '\\'
row[v+1][h+1] = '/'
print 'Case #%d:'%case
if ret:
for r in row:
print reduce(lambda a,b:a+b, r)
else:
print 'Impossible'
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ed9a9a16fa606dc3a0b92e7d93dbca7f3237abe1
|
7101871e7a82d202483ada3053fec155ce7824a6
|
/test/functional/sapling_wallet_send.py
|
39c6d0a107af083c9d58aa278ac3ef4d18f9ad78
|
[
"MIT"
] |
permissive
|
trumpcoinsupport/TrumpCoin
|
633a9992e46cab00774d01e569f4611b7f6b4b54
|
098c62ea249a63ca1cc31d5f37c6209ccdf50e2a
|
refs/heads/master
| 2023-01-11T20:22:03.469608
| 2021-12-31T10:04:39
| 2021-12-31T10:04:39
| 194,952,065
| 15
| 14
|
MIT
| 2023-01-08T02:49:09
| 2019-07-03T00:24:45
|
C++
|
UTF-8
|
Python
| false
| false
| 3,831
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Copyright (c) 2020 The TrumpCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from decimal import Decimal
from test_framework.test_framework import TrumpCoinTestFramework
from test_framework.util import (
assert_equal,
)
class SaplingWalletSend(TrumpCoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
saplingUpgrade = ['-nuparams=v5_shield:201']
self.extra_args = [saplingUpgrade, saplingUpgrade, saplingUpgrade]
def run_test(self):
self.log.info("Mining...")
self.nodes[0].generate(2)
self.sync_all()
self.nodes[2].generate(200)
self.sync_all()
assert_equal(self.nodes[1].getblockcount(), 202)
taddr1 = self.nodes[1].getnewaddress()
saplingAddr1 = self.nodes[1].getnewshieldaddress()
# Verify addresses
assert(saplingAddr1 in self.nodes[1].listshieldaddresses())
assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('0'))
assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))
# Test subtract fee from recipient
self.log.info("Checking sendto[shield]address with subtract-fee-from-amt")
node_0_bal = self.nodes[0].getbalance()
node_1_bal = self.nodes[1].getbalance()
txid = self.nodes[0].sendtoaddress(saplingAddr1, 10, "", "", True)
node_0_bal -= Decimal('10')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
self.sync_mempools()
self.nodes[2].generate(1)
self.sync_all()
feeTx = self.nodes[0].gettransaction(txid)["fee"] # fee < 0
saplingAddr1_bal = (Decimal('10') + feeTx)
node_1_bal += saplingAddr1_bal
assert_equal(self.nodes[1].getbalance(), node_1_bal)
self.log.info("Checking shieldsendmany with subtract-fee-from-amt")
node_2_bal = self.nodes[2].getbalance()
recipients1 = [{"address": saplingAddr1, "amount": Decimal('10')},
{"address": self.nodes[0].getnewshieldaddress(), "amount": Decimal('5')}]
subtractfeefrom = [saplingAddr1]
txid = self.nodes[2].shieldsendmany("from_transparent", recipients1, 1, 0, subtractfeefrom)
node_2_bal -= Decimal('15')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all()
feeTx = self.nodes[2].gettransaction(txid)["fee"] # fee < 0
node_1_bal += (Decimal('10') + feeTx)
saplingAddr1_bal += (Decimal('10') + feeTx)
assert_equal(self.nodes[1].getbalance(), node_1_bal)
node_0_bal += Decimal('5')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
self.log.info("Checking sendmany to shield with subtract-fee-from-amt")
node_2_bal = self.nodes[2].getbalance()
txid = self.nodes[2].sendmany('', {saplingAddr1: 10, taddr1: 10},
1, "", False, [saplingAddr1, taddr1])
node_2_bal -= Decimal('20')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all()
feeTx = self.nodes[2].gettransaction(txid)["fee"] # fee < 0
node_1_bal += (Decimal('20') + feeTx)
assert_equal(self.nodes[1].getbalance(), node_1_bal)
taddr1_bal = Decimal('10') + feeTx/2
saplingAddr1_bal += Decimal('10') + feeTx / 2
assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), taddr1_bal)
assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), saplingAddr1_bal)
if __name__ == '__main__':
SaplingWalletSend().main()
|
[
"sebgruby@gmail.com"
] |
sebgruby@gmail.com
|
d96f1e56f4b8aa0ac94be4330d3f7524cc14c3a7
|
ffd19240effa4f50b8469432d6ad2078e6b0db7d
|
/app/models.py
|
2e9fa0ba6b2c20f730a447db3c7f950342cbe5c6
|
[] |
no_license
|
Jackson-coder-arch/Studio-session-booker
|
98e26ca1ef7953b81562884b4306becde097a47c
|
de20432fa3bb0660c7499efd5dd0917f0218670b
|
refs/heads/features
| 2023-03-21T11:42:48.043138
| 2021-03-12T08:49:34
| 2021-03-12T08:49:34
| 345,683,743
| 0
| 0
| null | 2021-03-12T08:49:35
| 2021-03-08T14:27:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
from app import db
from flask_login import UserMixin
from datetime import datetime
from werkzeug.security import generate_password_hash,check_password_hash
from . import login_manager
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index =True)
email = db.Column(db.String(255),unique = True,index =True)
pass_secure = db.Column(db.String(255))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
bookings = db.relationship('Booking',backref = 'user', lazy = 'dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def save_user(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'User {self.username} '
class Booking(db.Model):
__tablename__ = 'bookings'
id = db.Column(db.Integer,primary_key = True)
# email = db.Column(db.String(255),unique = True,index =True)
title = db.Column(db.String(255))
day = db.Column(db.String(255))
session = db.Column(db.String(255))
category = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
def save_booking(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'Booking {self.day} '
|
[
"jacksonikonya@gmail.com"
] |
jacksonikonya@gmail.com
|
6ca7eee9d0ea8b564e470641f42e79dd6d3c8de4
|
35d3bd909cc232b51496b8b07971386305bbc769
|
/sitemessage/settings.py
|
649d4678202125f4fcafd413a1db551e4a45344d
|
[] |
no_license
|
shtalinberg/django-sitemessage
|
772810beae344529867df2b58e873a04dc6b5c93
|
dc1a5312316c5d0269380c1f80752437c7a1d6eb
|
refs/heads/master
| 2021-01-22T05:57:43.991672
| 2017-04-15T04:33:30
| 2017-04-15T04:33:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
from django.conf import settings
# Module name to search sitemessage preferences in.
APP_MODULE_NAME = getattr(settings, 'SITEMESSAGE_APP_MODULE_NAME', 'sitemessages')
# Whether to register builtin message types.
INIT_BUILTIN_MESSAGE_TYPES = getattr(settings, 'SITEMESSAGE_INIT_BUILTIN_MESSAGE_TYPES', True)
# Priority for messages sent by Django Email backend (sitemessage.backends.EmailBackend).
EMAIL_BACKEND_MESSAGES_PRIORITY = getattr(settings, 'SITEMESSAGE_EMAIL_BACKEND_MESSAGES_PRIORITY', None)
# Message type alias for messages sent `schedule_email` shortcut.
DEFAULT_SHORTCUT_EMAIL_MESSAGES_TYPE = getattr(settings, 'SITEMESSAGE_DEFAULT_SHORTCUT_EMAIL_MESSAGES_TYPE', 'smtp')
# Site URL to use in messages.
SITE_URL = getattr(settings, 'SITEMESSAGE_SITE_URL', None)
|
[
"idlesign@yandex.ru"
] |
idlesign@yandex.ru
|
6a9727a84a58a3c17a257bafd64c3423e263ac0a
|
9a93a4d9e8d7424ccc3947ed8486083b815c5276
|
/websockets/exceptions.py
|
1b758c648ad74be77879f58c0bf6c315f1664f94
|
[
"BSD-3-Clause"
] |
permissive
|
MariaElysse/websockets
|
de40f7dea8fa26c5f29a0cc2bf41d78c1acd2ac8
|
4216b35384c177981c4d18d763248c712b8e21d4
|
refs/heads/master
| 2020-03-26T19:26:29.171235
| 2018-08-11T10:16:12
| 2018-08-11T10:16:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,462
|
py
|
__all__ = [
'AbortHandshake', 'ConnectionClosed', 'DuplicateParameter',
'InvalidHandshake', 'InvalidHeader', 'InvalidHeaderFormat',
'InvalidHeaderValue', 'InvalidMessage', 'InvalidOrigin',
'InvalidParameterName', 'InvalidParameterValue', 'InvalidState',
'InvalidStatusCode', 'InvalidUpgrade', 'InvalidURI', 'NegotiationError',
'PayloadTooBig', 'WebSocketProtocolError',
]
class InvalidHandshake(Exception):
"""
Exception raised when a handshake request or response is invalid.
"""
class AbortHandshake(InvalidHandshake):
"""
Exception raised to abort a handshake and return a HTTP response.
"""
def __init__(self, status, headers, body=b''):
self.status = status
self.headers = headers
self.body = body
message = "HTTP {}, {} headers, {} bytes".format(
status, len(headers), len(body))
super().__init__(message)
class InvalidMessage(InvalidHandshake):
"""
Exception raised when the HTTP message in a handshake request is malformed.
"""
class InvalidHeader(InvalidHandshake):
"""
Exception raised when a HTTP header doesn't have a valid format or value.
"""
def __init__(self, name, value):
if value:
message = "Invalid {} header: {}".format(name, value)
else:
message = "Missing or empty {} header".format(name)
super().__init__(message)
class InvalidHeaderFormat(InvalidHeader):
"""
Exception raised when a Sec-WebSocket-* HTTP header cannot be parsed.
"""
def __init__(self, name, error, string, pos):
error = "{} at {} in {}".format(error, pos, string)
super().__init__(name, error)
class InvalidHeaderValue(InvalidHeader):
"""
Exception raised when a Sec-WebSocket-* HTTP header has a wrong value.
"""
class InvalidUpgrade(InvalidHeader):
"""
Exception raised when a Upgrade or Connection header isn't correct.
"""
class InvalidOrigin(InvalidHeader):
"""
Exception raised when the Origin header in a request isn't allowed.
"""
def __init__(self, origin):
super().__init__('Origin', origin)
class InvalidStatusCode(InvalidHandshake):
"""
Exception raised when a handshake response status code is invalid.
Provides the integer status code in its ``status_code`` attribute.
"""
def __init__(self, status_code):
self.status_code = status_code
message = "Status code not 101: {}".format(status_code)
super().__init__(message)
class NegotiationError(InvalidHandshake):
"""
Exception raised when negotiating an extension fails.
"""
class InvalidParameterName(NegotiationError):
"""
Exception raised when a parameter name in an extension header is invalid.
"""
def __init__(self, name):
self.name = name
message = "Invalid parameter name: {}".format(name)
super().__init__(message)
class InvalidParameterValue(NegotiationError):
"""
Exception raised when a parameter value in an extension header is invalid.
"""
def __init__(self, name, value):
self.name = name
self.value = value
message = "Invalid value for parameter {}: {}".format(name, value)
super().__init__(message)
class DuplicateParameter(NegotiationError):
"""
Exception raised when a parameter name is repeated in an extension header.
"""
def __init__(self, name):
self.name = name
message = "Duplicate parameter: {}".format(name)
super().__init__(message)
class InvalidState(Exception):
"""
Exception raised when an operation is forbidden in the current state.
"""
CLOSE_CODES = {
1000: "OK",
1001: "going away",
1002: "protocol error",
1003: "unsupported type",
# 1004 is reserved
1005: "no status code [internal]",
1006: "connection closed abnormally [internal]",
1007: "invalid data",
1008: "policy violation",
1009: "message too big",
1010: "extension required",
1011: "unexpected error",
1015: "TLS failure [internal]",
}
def format_close(code, reason):
"""
Display a human-readable version of the close code and reason.
"""
if 3000 <= code < 4000:
explanation = "registered"
elif 4000 <= code < 5000:
explanation = "private use"
else:
explanation = CLOSE_CODES.get(code, "unknown")
result = "code = {} ({}), ".format(code, explanation)
if reason:
result += "reason = {}".format(reason)
else:
result += "no reason"
return result
class ConnectionClosed(InvalidState):
"""
Exception raised when trying to read or write on a closed connection.
Provides the connection close code and reason in its ``code`` and
``reason`` attributes respectively.
"""
def __init__(self, code, reason):
self.code = code
self.reason = reason
message = "WebSocket connection is closed: "
message += format_close(code, reason)
super().__init__(message)
class InvalidURI(Exception):
"""
Exception raised when an URI isn't a valid websocket URI.
"""
class PayloadTooBig(Exception):
"""
Exception raised when a frame's payload exceeds the maximum size.
"""
class WebSocketProtocolError(Exception):
"""
Internal exception raised when the remote side breaks the protocol.
"""
|
[
"aymeric.augustin@m4x.org"
] |
aymeric.augustin@m4x.org
|
bdf28fd0c02c0410299165e2313553ae48a9f9ea
|
b92417413ec5b05ca25695de55934ce7072a0f0a
|
/test/test_v1_role.py
|
8276850e9eb6f7e6038ba217a2d6777c01ea27fd
|
[
"Apache-2.0"
] |
permissive
|
detiber/lib_openshift
|
be1f0f1b3eec62c9bbf50a3fcea61303a870c112
|
efea21ce6f67e3d48885c03ae22978c576c0b87d
|
refs/heads/master
| 2021-01-18T04:12:00.820052
| 2016-10-04T03:20:43
| 2016-10-04T03:20:43
| 63,102,761
| 0
| 0
| null | 2016-07-11T21:15:36
| 2016-07-11T21:15:36
| null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_role import V1Role
class TestV1Role(unittest.TestCase):
""" V1Role unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Role(self):
"""
Test V1Role
"""
model = lib_openshift.models.v1_role.V1Role()
if __name__ == '__main__':
unittest.main()
|
[
"jdetiber@redhat.com"
] |
jdetiber@redhat.com
|
d9b8eb05b29632fdf14022991b75fcc4898142aa
|
76c8a2593316a74078e5ebe3c280d393b058ff67
|
/vai/commands/JoinWithNextLineCommand.py
|
c009849009712410c2f67e842d8f74c84019c1ef
|
[] |
no_license
|
gavd89/vai
|
b7f746c3ba31397e8d85f477af9b9b71d01795fb
|
afa3a31b74ee81f9be8ab2c06cd8bdaebae1baad
|
refs/heads/master
| 2021-01-16T22:04:05.131998
| 2014-10-31T22:35:37
| 2014-10-31T22:35:37
| 26,130,434
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
from .BufferCommand import BufferCommand
from .CommandResult import CommandResult
from ..models.TextDocument import LineMeta
class JoinWithNextLineCommand(BufferCommand):
def execute(self):
cursor = self._cursor
document = self._document
pos = cursor.pos
if pos[0] == document.numLines():
return CommandResult(success=False, info=None)
self.saveCursorPos()
line_meta = document.lineMeta(pos[0])
self.saveLineMemento(pos[0], BufferCommand.MEMENTO_REPLACE)
self.saveLineMemento(pos[0]+1, BufferCommand.MEMENTO_INSERT)
document.joinWithNextLine(pos[0])
if line_meta.get(LineMeta.Change) == None:
document.updateLineMeta(pos[0], {LineMeta.Change: "modified"})
return CommandResult(success=True, info=None)
|
[
"stefano.borini@gmail.com"
] |
stefano.borini@gmail.com
|
b78e36b1360b1dd9d552187653a755c3bb26c881
|
35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6
|
/55_JumpGame/55_JumpGame_3.py
|
01b5f0afae09de958c5980a9bd943ed3ceab4200
|
[] |
no_license
|
H-Cong/LeetCode
|
0a2084a4845b5d7fac67c89bd72a2adf49f90c3d
|
d00993a88c6b34fcd79d0a6580fde5c523a2741d
|
refs/heads/master
| 2023-03-19T15:22:00.971461
| 2021-03-11T00:33:00
| 2021-03-11T00:33:00
| 303,265,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
class Solution:
def canJump(self, nums: List[int]) -> bool:
'''
BackTracking
'''
return self.canJumpFromPosition(0, nums)
def canJumpFromPosition(self, position, nums):
if position == len(nums) - 1: return True
furthestJump = min(position + nums[position], len(nums) - 1)
for nextPosition in range(position + 1, furthestJump + 1):
if self.canJumpFromPosition(nextPosition, nums):
return True
return False
# TC: O(2^n)
# There are 2^n (upper bound) ways of jumping from the first position to the last,
# where nn is the length of array nums.
# SC: O(n)
# Recursion requires additional memory for the stack frames.
|
[
"nych1989@gmail.com"
] |
nych1989@gmail.com
|
ef301480c09e8bf0f702faabb05d320f96d1726c
|
8b3ca44ee3d990233e74655b7131d616094f70c2
|
/experiments/cross_validation/movielens_1M/gaussian_gaussian_wishart.py
|
77b129a3b7aae4cbfca58736af054f4dfb1902a5
|
[] |
no_license
|
zshwuhan/BMF_Priors
|
8b8c54271285a72d2085a56a9475c0756f375e67
|
6a600da1c41f1ccde2f2ba99298b40e68fb9910a
|
refs/heads/master
| 2021-05-13T19:10:07.203215
| 2017-12-01T13:30:21
| 2017-12-01T13:30:21
| 116,883,181
| 1
| 0
| null | 2018-01-09T23:36:13
| 2018-01-09T23:36:13
| null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
'''
Run nested cross-validation experiment on the MovieLens 1M dataset, with
the All Gaussian model (multivariate posterior) and Wishart prior.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/" # "/home/tab43/Documents/Projects/libraries/" #
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_gaussian_wishart import BMF_Gaussian_Gaussian_Wishart
from BMF_Priors.code.cross_validation.nested_matrix_cross_validation import MatrixNestedCrossValidation
from BMF_Priors.data.movielens.load_data import load_processed_movielens_1M
''' Settings BMF model. '''
method = BMF_Gaussian_Gaussian_Wishart
R, M = load_processed_movielens_1M()
hyperparameters = { 'alpha':1., 'beta':1., 'mu0':0., 'beta0':1., 'W0':1. }
train_config = {
'iterations' : 120,
'init' : 'random',
}
predict_config = {
'burn_in' : 100,
'thinning' : 1,
}
''' Settings nested cross-validation. '''
K_range = [12,13,14]
no_folds = 5
no_threads = 5
parallel = False
folder_results = './results/gaussian_gaussian_wishart/'
output_file = folder_results+'results.txt'
files_nested_performances = [folder_results+'fold_%s.txt'%(fold+1) for fold in range(no_folds)]
''' Construct the parameter search. '''
parameter_search = [{'K':K, 'hyperparameters':hyperparameters} for K in K_range]
''' Run the cross-validation framework. '''
nested_crossval = MatrixNestedCrossValidation(
method=method,
R=R,
M=M,
K=no_folds,
P=no_threads,
parameter_search=parameter_search,
train_config=train_config,
predict_config=predict_config,
file_performance=output_file,
files_nested_performances=files_nested_performances,
)
nested_crossval.run(parallel=parallel)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
d0a7aeff905f45c9098ea9c161be390f6f6400d6
|
00b24ff5ec169210b1b7cce53b621cbc0ee0fe40
|
/migrations/versions/e812a221262e_initialized_database.py
|
5944c31f6d72464d38c440607572aeca5fe9a83d
|
[] |
no_license
|
carter3689/fakebook-march
|
1242c052fa51826f56aeb187cfdf41e0464ca4f8
|
41c2c388e0f19d849eef4572a13fcdffb41d3de4
|
refs/heads/main
| 2023-05-04T02:59:00.245789
| 2021-05-18T16:28:38
| 2021-05-18T16:28:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
"""initialized database
Revision ID: e812a221262e
Revises:
Create Date: 2021-04-26 11:24:10.910838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e812a221262e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=50), nullable=True),
sa.Column('image', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('post')
# ### end Alembic commands ###
|
[
"derekh@codingtemple.com"
] |
derekh@codingtemple.com
|
d2bda2941a3c280e45de65afc578c06a0a1341f7
|
95f9c734c4bf5de8e5d0adff9ac2cf0228df75ac
|
/django-pro/opweb/opweb/wsgi.py
|
afbe2f475b239ff7eb311f1c9c1e5d1dd89b1289
|
[] |
no_license
|
holen/Python
|
7a996b13ff2224084397223879c380169d47ff8c
|
506fff291d6e9c6f80c30a51cc3b77e9dd048468
|
refs/heads/master
| 2022-12-12T22:12:51.561716
| 2019-10-16T03:08:00
| 2019-10-16T03:08:00
| 14,278,665
| 1
| 0
| null | 2022-12-08T00:51:26
| 2013-11-10T15:29:59
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for opweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opweb.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"yh_zhl@sina.com"
] |
yh_zhl@sina.com
|
15b35b4d0ed83d0b99c75f0e25604fb40d79f538
|
ea6b3b74c8f1ff9333c5d4b06a0e4dd9bbdb3bba
|
/tests/rpc/test_reflect_service.py
|
b7a206e547357b2ef8b7398477b04497c4742ae9
|
[
"MIT"
] |
permissive
|
sgalkina/venom
|
d495d296a388afcb25525491bbbe590bfd258a05
|
e372ab9002e71ba4e2422aabd02143e4f1247dba
|
refs/heads/master
| 2021-01-23T03:27:17.239289
| 2017-03-24T15:05:56
| 2017-03-24T15:05:56
| 86,077,951
| 0
| 0
| null | 2017-03-24T14:40:46
| 2017-03-24T14:40:46
| null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from unittest import TestCase
from venom.rpc import Service, Venom
from venom.rpc.reflect.service import ReflectService
class ReflectServiceTestCase(TestCase):
def test_service_registration(self):
class BeforeService(Service):
pass
class AfterService(Service):
pass
venom = Venom()
venom.add(BeforeService)
venom.add(ReflectService)
venom.add(AfterService)
self.assertEqual(ReflectService.__manager__.reflect.services, {BeforeService, AfterService, ReflectService})
|
[
"lars@lyschoening.de"
] |
lars@lyschoening.de
|
a19b592b9058d5945dee87f774bc4ee913bbecf1
|
0b5f2442b222da2895cdad06913c3687162f06bb
|
/pyclustering/container/__init__.py
|
27e87524cc9610c785f192f53863d1b4e7d5a005
|
[] |
no_license
|
Kinddle-tick/ML_clustering
|
a765fadde581392de098227b0ee4a9b3572ef24f
|
27f9887cb383d0d1ea0a4a42788eddc2f4c85c67
|
refs/heads/master
| 2023-03-14T18:15:08.350604
| 2021-03-23T07:16:51
| 2021-03-23T07:16:51
| 350,600,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
"""!
@brief pyclustering module of data structures (containers).
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
|
[
"h88888888123@163.com"
] |
h88888888123@163.com
|
05932b5eb4efff7df2c4efaadaa8037452a9e61d
|
cd90bbc775cbce9a7e0bc46cbb9437e3961e587f
|
/misc/advent/2017/5/e.py
|
4d998c22b9ab5cb25fc7a904a5b71e05e17ea209
|
[] |
no_license
|
llimllib/personal_code
|
7b3f0483589e2928bf994184e3413f4b887e1f0c
|
4d4662d53e0ac293dea8a4208ccca4a1f272e64a
|
refs/heads/master
| 2023-09-05T04:02:05.075388
| 2023-09-01T12:34:09
| 2023-09-01T12:34:09
| 77,958
| 9
| 16
| null | 2023-08-16T13:54:39
| 2008-11-19T02:04:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 571
|
py
|
def run(cmds):
location = 0
counter = 0
l = len(cmds)
while 1:
try:
cmd = cmds[location]
if cmd >= 3:
cmds[location] -= 1
else:
cmds[location] += 1
location += cmd
if location < 0:
print(counter)
break
counter += 1
except:
print(counter)
break
if __name__=="__main__":
text = open("input.txt").read().strip().split("\n")
cmds = [int(cmd) for cmd in text]
run(cmds)
|
[
"bill@billmill.org"
] |
bill@billmill.org
|
b314afaaabc9bbf3ea4b69fe5f6f89638900efc2
|
04d55063219d484f29bf1a351b87e972b374e9a6
|
/inversetoon/core/light_estimation/light_estimation.py
|
f6a43a8535f4fe512287218f1850a235825d8872
|
[
"MIT"
] |
permissive
|
tody411/InverseToon
|
5530f63d225f91d1c497f3f80f24c4ccf086aa8f
|
bc5b922cae9bbf99ed1f020c93b1577c4747ff92
|
refs/heads/master
| 2020-05-18T13:32:25.285723
| 2015-10-06T02:35:15
| 2015-10-06T02:35:15
| 39,255,745
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
# -*- coding: utf-8 -*-
## @package inversetoon.core.light_estimation
#
# inversetoon.core.light_estimation utility package.
# @author tody
# @date 2015/09/07
import numpy as np
from inversetoon.np.norm import normalizeVector
from inversetoon.core.light_estimation.light_estimation_common import testToon
def estimateLightDir(input_data):
N_sil = input_data["N_sil"]
I_sil = input_data["I_sil"]
I_positive = I_sil > 0.001
N_sil = N_sil[I_positive]
I_sil = I_sil[I_positive]
NdLs = I_sil
L = estimateLightDirProjection(N_sil, NdLs)
output_data = {"L": L}
# error = np.linalg.norm(np.dot(N_sil, L) - NdLs)
return output_data
def estimateLightDirLstSq(Ns, NdLs):
b = NdLs
A = Ns
L = np.linalg.lstsq(A, b)[0]
return L
def estimateLightDirProjection(Ns, NdLs):
I_maxID = np.argmax(NdLs)
L = Ns[I_maxID]
for i in xrange(100):
for N, NdL in zip(Ns, NdLs):
NdL_c = np.dot(L, N)
L = L - NdL_c * N + NdL * N
L = normalizeVector(L)
return L
if __name__ == '__main__':
testToon("LeastSquare", estimateLightDir)
|
[
"tody411@gmail.com"
] |
tody411@gmail.com
|
050fe8dd60fa24022d363e59407aef735c810440
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/P/protanto/vultus_christi_archive.py
|
a3494ec9528161e0c530bce04e116f028619d6fe
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,560
|
py
|
from datetime import datetime, timedelta
import scraperwiki
import requests
import lxml.html
from lxml.cssselect import CSSSelector as CSS
import dateutil.parser
import dateutil.tz
TARGET = "http://vultus.stblogs.org/archives.html"
HEADERS = {
'User-agent': 'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11',
}
ROW_KEY = ['year', 'month', 'slug']
ROW_SCHEMA = ROW_KEY + ['title', 'text', 'author', 'date', 'tags']
EDIT_WINDOW = timedelta(days=10)
sel_item = CSS("div.archive-individual li")
sel_anchor = CSS("a")
sel_asset = CSS("#alpha-inner")
sel_author = CSS("div.asset-header span.byline address.author a")
sel_date = CSS("div.asset-header span.byline abbr.published")
sel_text = CSS("div.asset-content")
sel_tags = CSS("div.asset-footer li.entry-category a")
def scrape(url):
return lxml.html.fromstring(requests.get(url, headers=HEADERS).text)
def Row(**kwargs):
row = dict((field, None) for field in ROW_SCHEMA)
row.update(kwargs)
return row
store = scraperwiki.sqlite.save
parsedate = dateutil.parser.parse
tzlocal = dateutil.tz.tzlocal
# If the scraper has run once successfully, subsequent runs should
# only scrape new pages and pages that are less than ten days old (to
# allow for edits by the author)
historic_latest = scraperwiki.sqlite.get_var('latest')
if historic_latest:
historic_latest = parsedate(historic_latest)
print("Begin scraping archive ten days prior to: %s" % historic_latest.strftime("%Y.%m.%d"))
latest = datetime(year=2000, month=12, day=31, tzinfo=tzlocal())
latest_timestamp = None
# the scraping loop below swallows errors, but the error may have been
# due to a request timeout or similar, so we want to retry those pages
# that don't exist in the database
try:
archive = set(
(d['year'], d['month'], d['slug']) for d in scraperwiki.sqlite.select("year, month, slug FROM pages")
)
except:
archive = set([])
print "PAGE COUNT: %s" % len(archive)
# begin scrape - first the archive index page to get all individual page urls
index = scrape(TARGET)
# go through the list of page links and scrape each one
for li in sel_item(index):
date = li.text.rstrip().rstrip(':').strip()
a = sel_anchor(li)[0]
href = a.get('href')
if href:
year, month, day = map(int, date.split('.'))
slug = href.split('/')[5].partition('.')[0]
if (year, month, slug) in archive and historic_latest:
# don't re-scrape anything outside the ten day edit window
if datetime(year=year, month=month, day=day, tzinfo=tzlocal()) < historic_latest-EDIT_WINDOW:
# you could break here because the list is date-ordered
continue
print("%s - %s - %s" % (date, slug, href))
page = scrape(href)
try:
content = sel_asset(page)[0]
timestamp = sel_date(content)[0].get('title')
date = parsedate(timestamp)
if date > latest:
# there's a new 'latest' timestamp - saved as a variable below
latest = date
latest_timestamp = timestamp
row = Row(year=year, month=month, title=a.text_content(), slug=slug)
row['date'] = date
row['author'] = sel_author(content)[0].text_content()
row['tags'] = ','.join(a.text_content() for a in sel_tags(content))
row['text'] = lxml.html.tostring(sel_text(content)[0])
except Exception, e:
print("Skipping " + href)
print(" ERROR: %s" % e)
continue
#print row
store(unique_keys=ROW_KEY, data=row, table_name="pages")
if latest_timestamp:
scraperwiki.sqlite.save_var('latest', latest_timestamp)
from datetime import datetime, timedelta
import scraperwiki
import requests
import lxml.html
from lxml.cssselect import CSSSelector as CSS
import dateutil.parser
import dateutil.tz
TARGET = "http://vultus.stblogs.org/archives.html"
HEADERS = {
'User-agent': 'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11',
}
ROW_KEY = ['year', 'month', 'slug']
ROW_SCHEMA = ROW_KEY + ['title', 'text', 'author', 'date', 'tags']
EDIT_WINDOW = timedelta(days=10)
sel_item = CSS("div.archive-individual li")
sel_anchor = CSS("a")
sel_asset = CSS("#alpha-inner")
sel_author = CSS("div.asset-header span.byline address.author a")
sel_date = CSS("div.asset-header span.byline abbr.published")
sel_text = CSS("div.asset-content")
sel_tags = CSS("div.asset-footer li.entry-category a")
def scrape(url):
return lxml.html.fromstring(requests.get(url, headers=HEADERS).text)
def Row(**kwargs):
row = dict((field, None) for field in ROW_SCHEMA)
row.update(kwargs)
return row
store = scraperwiki.sqlite.save
parsedate = dateutil.parser.parse
tzlocal = dateutil.tz.tzlocal
# If the scraper has run once successfully, subsequent runs should
# only scrape new pages and pages that are less than ten days old (to
# allow for edits by the author)
historic_latest = scraperwiki.sqlite.get_var('latest')
if historic_latest:
historic_latest = parsedate(historic_latest)
print("Begin scraping archive ten days prior to: %s" % historic_latest.strftime("%Y.%m.%d"))
latest = datetime(year=2000, month=12, day=31, tzinfo=tzlocal())
latest_timestamp = None
# the scraping loop below swallows errors, but the error may have been
# due to a request timeout or similar, so we want to retry those pages
# that don't exist in the database
try:
archive = set(
(d['year'], d['month'], d['slug']) for d in scraperwiki.sqlite.select("year, month, slug FROM pages")
)
except:
archive = set([])
print "PAGE COUNT: %s" % len(archive)
# begin scrape - first the archive index page to get all individual page urls
index = scrape(TARGET)
# go through the list of page links and scrape each one
for li in sel_item(index):
date = li.text.rstrip().rstrip(':').strip()
a = sel_anchor(li)[0]
href = a.get('href')
if href:
year, month, day = map(int, date.split('.'))
slug = href.split('/')[5].partition('.')[0]
if (year, month, slug) in archive and historic_latest:
# don't re-scrape anything outside the ten day edit window
if datetime(year=year, month=month, day=day, tzinfo=tzlocal()) < historic_latest-EDIT_WINDOW:
# you could break here because the list is date-ordered
continue
print("%s - %s - %s" % (date, slug, href))
page = scrape(href)
try:
content = sel_asset(page)[0]
timestamp = sel_date(content)[0].get('title')
date = parsedate(timestamp)
if date > latest:
# there's a new 'latest' timestamp - saved as a variable below
latest = date
latest_timestamp = timestamp
row = Row(year=year, month=month, title=a.text_content(), slug=slug)
row['date'] = date
row['author'] = sel_author(content)[0].text_content()
row['tags'] = ','.join(a.text_content() for a in sel_tags(content))
row['text'] = lxml.html.tostring(sel_text(content)[0])
except Exception, e:
print("Skipping " + href)
print(" ERROR: %s" % e)
continue
#print row
store(unique_keys=ROW_KEY, data=row, table_name="pages")
if latest_timestamp:
scraperwiki.sqlite.save_var('latest', latest_timestamp)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
266635f8db60fe89592c32c152f3b53a7832a8f6
|
2b3f859e7bde80f19e0f823b5e6e73ddb44cb3fe
|
/tests/migrators/test_group.py
|
a08a14e4fd2b0715b54d3f317b4ef5b0ed0de7b1
|
[] |
no_license
|
stormpath/stormpath-migrate
|
4a0d4bc949da7df416529820bdcd76f590a8fe89
|
ee43dbddda29a0b85c9901ea1e678660ef3bce36
|
refs/heads/master
| 2021-01-22T10:01:37.030105
| 2017-03-06T19:17:13
| 2017-03-06T19:17:13
| 43,518,049
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,665
|
py
|
"""Tests for our GroupMigrator class."""
from os import environ
from unittest import TestCase
from uuid import uuid4
from stormpath.client import Client
from migrate.migrators import DirectoryMigrator, GroupMigrator
from migrate.utils import sanitize
# Necessary environment variables.
SRC_CLIENT_ID = environ['SRC_CLIENT_ID']
SRC_CLIENT_SECRET = environ['SRC_CLIENT_SECRET']
DST_CLIENT_ID = environ['DST_CLIENT_ID']
DST_CLIENT_SECRET = environ['DST_CLIENT_SECRET']
class GroupMigratorTest(TestCase):
def setUp(self):
self.src = Client(id=SRC_CLIENT_ID, secret=SRC_CLIENT_SECRET)
self.dst = Client(id=DST_CLIENT_ID, secret=DST_CLIENT_SECRET)
self.dir = self.src.directories.create({
'description': uuid4().hex,
'name': uuid4().hex,
})
self.group = self.dir.groups.create({
'description': uuid4().hex,
'name': uuid4().hex,
'status': 'DISABLED',
'custom_data': {'hi': 'there'},
})
migrator = DirectoryMigrator(destination_client=self.dst, source_directory=self.dir)
self.dst_dir = migrator.migrate()
def tearDown(self):
self.dir.delete()
self.dst_dir.delete()
def test_copy_group(self):
migrator = GroupMigrator(destination_directory=self.dst_dir, source_group=self.group)
migrator.destination_group = migrator.get_destination_group()
migrator.get_destination_group()
copied_group = migrator.copy_group()
self.assertTrue(copied_group)
self.assertEqual(copied_group.description, self.group.description)
self.assertEqual(copied_group.name, self.group.name)
self.assertEqual(copied_group.status, self.group.status)
def test_copy_custom_data(self):
migrator = GroupMigrator(destination_directory=self.dst_dir, source_group=self.group)
migrator.destination_group = migrator.get_destination_group()
copied_group = migrator.copy_group()
copied_custom_data = migrator.copy_custom_data()
self.assertEqual(copied_custom_data['hi'], 'there')
def test_migrate(self):
custom_data = self.group.custom_data
migrator = GroupMigrator(destination_directory=self.dst_dir, source_group=self.group)
copied_group = migrator.migrate()
copied_custom_data = copied_group.custom_data
self.assertEqual(copied_group.description, self.group.description)
self.assertEqual(copied_group.name, self.group.name)
self.assertEqual(copied_group.status, self.group.status)
self.assertEqual(copied_custom_data['hi'], self.group.custom_data['hi'])
|
[
"r@rdegges.com"
] |
r@rdegges.com
|
86ac7f9bcafb82d17cf1e1940c1920b2fc108579
|
0b414a080c9853997bfba016c7f66e5f11d80a14
|
/cj_env/lib/python3.6/site-packages/pysnmp/proto/rfc1901.py
|
8b7dec076254fc3b3a6d3e95de8296083ff5f1db
|
[] |
no_license
|
alkhor/Cable_Journal
|
2bd4bf00210f78c08fcc5508c13833b5e8aa3c46
|
e64fb1bfcc4d1b7844b2e0a10653264d58039259
|
refs/heads/master
| 2021-01-22T19:09:33.562313
| 2018-04-15T19:42:16
| 2018-04-15T19:42:16
| 100,772,711
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pysnmp.sf.net/license.html
#
from pyasn1.type import univ, namedtype, namedval
from pysnmp.proto import rfc1905
version = univ.Integer(namedValues=namedval.NamedValues(('version-2c', 1)))
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', version),
namedtype.NamedType('community', univ.OctetString()),
namedtype.NamedType('data', rfc1905.PDUs())
)
|
[
"khomenkoalexandr@gmail.com"
] |
khomenkoalexandr@gmail.com
|
32a95aa4742fabf685bb335ad6a630b7ee37a801
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/data_subject_type.py
|
8af8f9227c69c2dc7db37be9eb47d8afd69c4c11
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
py
|
from enum import Enum
class DataSubjectType(str, Enum):
Customer = "customer",
CurrentEmployee = "currentEmployee",
FormerEmployee = "formerEmployee",
ProspectiveEmployee = "prospectiveEmployee",
Student = "student",
Teacher = "teacher",
Faculty = "faculty",
Other = "other",
UnknownFutureValue = "unknownFutureValue",
|
[
"GraphTooling@service.microsoft.com"
] |
GraphTooling@service.microsoft.com
|
34405ad77c78331d0d5982215aff3c9b75139970
|
54ddb3f38cd09ac25213a7eb8743376fe778fee8
|
/topic_08_functions/examples/6_visibility_global.py
|
ec56199933ecb0c82714c47ca80123e9fa749b43
|
[] |
no_license
|
ryndovaira/leveluppythonlevel1_300321
|
dbfd4ee41485870097ee490f652751776ccbd7ab
|
0877226e6fdb8945531775c42193a90ddb9c8a8b
|
refs/heads/master
| 2023-06-06T07:44:15.157913
| 2021-06-18T11:53:35
| 2021-06-18T11:53:35
| 376,595,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
x = 0
def outer():
x = 1
def inner():
global x
x = 2
print("inner:", x)
inner()
print("outer:", x)
outer()
print("global:", x)
# inner: 2
# outer: 1
# global: 2
|
[
"ryndovaira@gmail.com"
] |
ryndovaira@gmail.com
|
66925b569b205b36e465e85da39a6c1ca0b998ab
|
cccf8da8d41ae2c14f5f4313c1edcf03a27956bb
|
/python/python2latex/writeLTXtextrm.py
|
25de7160851c9ab2eed55ced5886e34a78ecaea7
|
[] |
no_license
|
LucaDiStasio/transpilers
|
e8f8ac4d99be3b42a050148ca8fbc5d025b83290
|
c55d4f5240083ffd512f76cd1d39cff1016909b8
|
refs/heads/master
| 2021-01-12T01:57:00.540331
| 2017-11-01T13:59:55
| 2017-11-01T13:59:55
| 78,448,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,283
|
py
|
# Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXtextrm(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXtextrm.varargin
nargin = writeLTXtextrm.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <luca.distasio@gmail.com>
# <luca.distasio@ingpec.eu>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Sets roman font family. SeeText Formatting.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\textrm'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return
|
[
"luca.distasio@gmail.com"
] |
luca.distasio@gmail.com
|
e3d03d32e51e516989a28022f99a1ecc931a3bb1
|
cb0e7d6493b23e870aa625eb362384a10f5ee657
|
/solutions/python3/0199.py
|
55bf7f22754e31867cd312d03f303f6cb6b10e0b
|
[] |
no_license
|
sweetpand/LeetCode-1
|
0acfa603af254a3350d457803449a91322f2d1a7
|
65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94
|
refs/heads/master
| 2022-11-14T07:01:42.502172
| 2020-07-12T12:25:56
| 2020-07-12T12:25:56
| 279,088,171
| 1
| 0
| null | 2020-07-12T15:03:20
| 2020-07-12T15:03:19
| null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
def dfs(root: TreeNode, depth: int) -> None:
if not root:
return
if depth == len(ans):
ans.append(root.val)
dfs(root.right, depth + 1)
dfs(root.left, depth + 1)
ans = []
dfs(root, 0)
return ans
|
[
"walkccray@gmail.com"
] |
walkccray@gmail.com
|
36aa53c07596f0697adcc2a9facc301ec460cbac
|
c9a6b59b7164b6e402105c802b91d6c2695cec21
|
/blog/templatetags/isliked.py
|
36f43c2eee37d031b1f40c6b9824525f9ca61c65
|
[] |
no_license
|
harunurkst/amar-campus
|
9b9d10e216c9e85b2c78e0c6720310084d389187
|
fe0474274fb7419ef70f9463842260af7d6dea2f
|
refs/heads/master
| 2021-01-18T20:24:40.343852
| 2017-04-25T06:07:26
| 2017-04-25T06:07:26
| 86,963,328
| 1
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
from django import template
register = template.Library()
@register.filter(name="is_liked")
def is_liked(user):
post_id = request.POST.get('id', None)
post = get_object_or_404(Post, pk=post_id)
if post.likes.filter(id=user.id).exists():
post.likes.remove(user)
is_liked = False
else:
post.likes.add(user)
is_liked = True
return is_liked
|
[
"harun1393@gmail.com"
] |
harun1393@gmail.com
|
1fa19d44a1c11b59e3a25c948ed5ac15c23cdb30
|
8c917dc4810e2dddf7d3902146280a67412c65ea
|
/v_11/EBS-SVN/trunk/purchase_custom/__manifest__.py
|
90748a083184db83669186389d9fa5f1e7757874
|
[] |
no_license
|
musabahmed/baba
|
d0906e03c1bbd222d3950f521533f3874434b993
|
0b997095c260d58b026440967fea3a202bef7efb
|
refs/heads/master
| 2021-10-09T02:37:32.458269
| 2018-12-20T06:00:00
| 2018-12-20T06:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
# -*- coding: utf-8 -*-
{
'name': "purchase_custom",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','base_custom','purchase','purchase_requisition','hr','account'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/views.xml',
'views/templates.xml',
'security/purchase_security.xml',
'wizard/purchase_order_wizard_view.xml',
'report/action_purchase_order_report.xml',
'report/template_purchase_order_report.xml',
'report/purchase_quotation_templates_inherit.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
|
[
"bakry@exp-sa.com"
] |
bakry@exp-sa.com
|
b7ac2271f415f595aa5380f77be150c49345beab
|
f0e25779a563c2d570cbc22687c614565501130a
|
/Think_Python/ackermann.py
|
58574bbcd3d3e680558b07a0a04c15c6a2349f44
|
[] |
no_license
|
XyK0907/for_work
|
8dcae9026f6f25708c14531a83a6593c77b38296
|
85f71621c54f6b0029f3a2746f022f89dd7419d9
|
refs/heads/master
| 2023-04-25T04:18:44.615982
| 2021-05-15T12:10:26
| 2021-05-15T12:10:26
| 293,845,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
"""This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
def ackermann(m, n):
"""Computes the Ackermann function A(m, n)
See http://en.wikipedia.org/wiki/Ackermann_function
n, m: non-negative integers
"""
if m == 0:
return n+1
if n == 0:
return ackermann(m-1, 1)
return ackermann(m-1, ackermann(m, n-1))
print(ackermann(100, 101))
|
[
"cherry.kong0907@gmail.com"
] |
cherry.kong0907@gmail.com
|
bdb9812cf2874f454a6ae0a548efa9589981824c
|
3528abad46b15133b2108c237f926a1ab252cbd5
|
/Core/_Axiom/Transport.py
|
bc2b7d2b4e8787c324fb18ec12fce7581ef3879f
|
[] |
no_license
|
scottmudge/MPK261_Ableton
|
20f08234f4eab5ba44fde6e5e745752deb968df2
|
c2e316b8347367bd157276f143b9f1a9bc2fe92c
|
refs/heads/master
| 2020-03-20T10:56:32.421561
| 2018-06-14T19:12:47
| 2018-06-14T19:12:47
| 137,389,086
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,693
|
py
|
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Axiom/Transport.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
import Live
from .consts import *
class Transport:
u""" Class representing the transport section on the Axiom controllers """
def __init__(self, parent):
self.__parent = parent
self.__ffwd_held = False
self.__rwd_held = False
self.__delay_counter = 0
def build_midi_map(self, script_handle, midi_map_handle):
for cc_no in AXIOM_TRANSPORT:
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, 15, cc_no)
def receive_midi_cc(self, cc_no, cc_value):
if cc_no == AXIOM_STOP:
if cc_value > 0:
self.__parent.song().is_playing = False
else:
if cc_no == AXIOM_PLAY:
if cc_value > 0:
self.__parent.song().is_playing = True
else:
if cc_no == AXIOM_REC:
if cc_value > 0:
self.__parent.song().record_mode = not self.__parent.song().record_mode
else:
if self.__parent.application().view.is_view_visible('Session'):
if cc_value > 0:
self.__cc_in_session(cc_no)
else:
self.__cc_in_arranger(cc_no, cc_value)
def __cc_in_session(self, cc_no):
index = list(self.__parent.song().scenes).index(self.__parent.song().view.selected_scene)
if cc_no == AXIOM_LOOP:
self.__parent.song().view.selected_scene.fire_as_selected()
else:
if cc_no == AXIOM_RWD:
if index > 0:
index = index - 1
self.__parent.song().view.selected_scene = self.__parent.song().scenes[index]
else:
if cc_no == AXIOM_FFWD:
if index < len(self.__parent.song().scenes) - 1:
index = index + 1
self.__parent.song().view.selected_scene = self.__parent.song().scenes[index]
def __cc_in_arranger(self, cc_no, cc_value):
if cc_no == AXIOM_LOOP:
if cc_value > 0:
self.__parent.song().loop = not self.__parent.song().loop
else:
if cc_no == AXIOM_RWD:
if not self.__ffwd_held:
if cc_value > 0:
self.__rwd_held = True
self.__delay_counter = 0
self.__parent.song().jump_by(-1 * self.__parent.song().signature_denominator)
else:
self.__rwd_held = False
else:
if cc_no == AXIOM_FFWD:
if not self.__rwd_held:
if cc_value > 0:
self.__ffwd_held = True
self.__delay_counter = 0
self.__parent.song().jump_by(self.__parent.song().signature_denominator)
else:
self.__ffwd_held = False
def refresh_state(self):
if self.__ffwd_held:
self.__delay_counter += 1
if self.__delay_counter % 5 == 0:
self.__parent.song().jump_by(self.__parent.song().signature_denominator)
if self.__rwd_held:
self.__delay_counter += 1
if self.__delay_counter % 5 == 0:
self.__parent.song().jump_by(-1 * self.__parent.song().signature_denominator)
|
[
"mail@scottmudge.com"
] |
mail@scottmudge.com
|
d9e0bc511e4e2824de47b2ed8a38c4a528b2ad2b
|
1333a965058e926649652ea55154bd73b6f05edd
|
/4_advanced/ai-py-param-validation/src/paramvalidator/exceptions/validation_exception.py
|
594afb62d07c15d96b2052c9d63a8accbf4eb5fb
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
grecoe/teals
|
42ebf114388b9f3f1580a41d5d03da39eb083082
|
ea00bab4e90d3f71e3ec2d202ce596abcf006f37
|
refs/heads/main
| 2021-06-21T20:12:03.108427
| 2021-05-10T19:34:40
| 2021-05-10T19:34:40
| 223,172,099
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
"""
(c) Microsoft. All rights reserved.
"""
class ParameterValidationException(Exception):
"""
Base exception for parameter validation
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
|
[
"grecoe@microsoft.com"
] |
grecoe@microsoft.com
|
a7be7d0c99595985e6a9bcda3ec4af33a03ae376
|
18057e01c81dc792a73a2e0bd1a4e037de8fefcb
|
/kaohantei/ninsiki.py
|
606ec732585a18eb525347fe8015002c5d6950de
|
[] |
no_license
|
kentahoriuchi/Kenta
|
15e80018f5c14e1409ac13a7a52c4f64acdce938
|
97bb657a37f0d89525b04f9157a223b47664793e
|
refs/heads/master
| 2020-03-22T14:21:30.221093
| 2018-07-08T14:22:15
| 2018-07-08T14:22:15
| 140,173,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from keras.models import load_model
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
import cv2
import sys
dir = "face.jpeg"
img = cv2.imread(sys.argv[1])
filepath = "sys.argv[1]"
cascade_path = "haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(cascade_path)
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=10, minSize=(30, 30))
for rect in facerect:
x = rect[0]
y = rect[1]
width = rect[2]
height = rect[3]
dst = img[y:y+height, x:x+width]
image = cv2.resize(dst, (100, 100))
image = image.transpose(2, 0, 1)
model = load_model('gazou.h5')
opt = Adam(0.0001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
result = model.predict_classes(np.array([image/255.]))
if result[0] == 0:
print("he may be man1")
elif result[0] == 1:
print("he may be man2")
|
[
"dorahori_108@yahoo.co.jp"
] |
dorahori_108@yahoo.co.jp
|
b7721870d8d7c53ef25f4eb6c25ca932b7aa76e7
|
10d8fab4b21d55cfef0139c04a7f70881f5196f4
|
/Stack/simplify-directory-path-unix-like.py
|
db6af1a41488710662509a134bcd9f11f7e8172a
|
[] |
no_license
|
wilfredarin/geeksforgeeks
|
a2afcfd2c64be682b836019407e557332d629ab8
|
5e27cb6706e0ae507694c2170fa00370f219c3e6
|
refs/heads/master
| 2021-08-07T05:48:39.426686
| 2020-08-19T07:25:19
| 2020-08-19T07:25:19
| 212,023,179
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
"""Simplify the directory path (Unix like)
Given an absolute path for a file (Unix-style), simplify it. Note that absolute path always begin with ‘/’ ( root directory ), a dot in path represent current directory and double dot represents parent directory.
Examples:
"/a/./" --> means stay at the current directory 'a'
"/a/b/.." --> means jump to the parent directory
from 'b' to 'a'
"////" --> consecutive multiple '/' are a valid
path, they are equivalent to single "/".
Input : /home/
Output : /home
Input : /a/./b/../../c/
Output : /c
Input : /a/..
Output:/
Input : /a/../
Output : /
Input : /../../../../../a
Output : /a
Input : /a/./b/./c/./d/
Output : /a/b/c/d
Input : /a/../.././../../.
Output:/
Input : /a//b//c//////d
Output : /a/b/c/d
By looking at examples we can see that the above simplification process just behaves like a stack.
Whenever we encounter any file’s name, we simply push it into the stack. when we come across ” . ” we do nothing.
When we find “..” in our path, we simply pop the topmost element as we have to jump back to parent’s directory.
When we see multiple “////” we just ignore them as they are equivalent to one single “/”.
After iterating through the whole string the elements remaining in the stack is our simplified absolute path.
We have to create another stack to reverse the elements stored inside the original stack and then store the result inside a string.
"""
def simplifyPath(self, A):
stack = []
dir = ""
res = ""
res+="/"
n = len(A)
i = 0
while i<n:
#new comand
dir_str = ""
while i<n and A[i]=="/":
i+=1
while i<n and A[i]!="/":
dir_str+=A[i]
i+=1
if dir_str==".." :
if stack:
stack.pop()
elif dir_str==".":
continue
elif dir_str:
stack.append(dir_str)
i+=1
stack.reverse()
while stack:
temp = stack[-1]
if len(stack)!=1:
res+=temp+"/"
else:
res+=temp
stack.pop()
return res
|
[
"noreply@github.com"
] |
wilfredarin.noreply@github.com
|
5dbea0ee8b5ef1ca38d84fa1aaf715f0c794feb4
|
d89581e043a154a56de69f419f9e7c2f67cf4ff2
|
/Apps/Engines/Nuke/NukeTools_1.01/Python/minorVersionUp.py
|
214d1a6323b578537a59a33a18fc91be3ffd8409
|
[
"MIT"
] |
permissive
|
yazici/CyclopsVFX-Unity
|
38b121333d5a5a610cf58489822c6f20f825be11
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
refs/heads/master
| 2020-04-29T15:05:04.942646
| 2017-11-21T17:16:45
| 2017-11-21T17:16:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
#The MIT License (MIT)
#
#Copyright (c) 2015 Geoffroy Givry
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import nuke
import os
geoffPath = os.getenv("SHOW_PATH")
task = os.getenv('TASK')
def minorVersionUp():
if not nuke.root()['name'].value() == "":
version = int(nuke.root()['name'].value().split('/')[-1].split('v')[1].split('_')[0])
take = int(nuke.root()['name'].value().split('/')[-1].split('v')[1].split('_')[1].split('.')[0])
shot = os.environ['SHOT']
job = os.environ['JOB']
baseFile = nuke.root()['name'].value().split('/')[-1].split('v')[0]
baseFile0 = '%s/%s/%s/TASKS/%s/Work/Nuke/' % (geoffPath, job, shot, task)
takeUp = take + 1
newFile = baseFile0 + '%sv%03d_%02d.nk' % (baseFile, version, takeUp)
nuke.scriptSaveAs(newFile,0)
else:
nuke.message('please choose a script to save first')
pass
|
[
"geoff.givry@gmail.com"
] |
geoff.givry@gmail.com
|
975050352947450358340060f69ced694a7463e3
|
143eb3ced0ff1f9cad745c620fcb572f72d66048
|
/Assignment4/atom3/Kernel/GenericGraph/models/test_GenericGraph_mdl.py
|
fd96c32e3307d5950cd3b3ee0c3afc1ddfd9c6c7
|
[] |
no_license
|
pombreda/comp304
|
2c283c60ffd7810a1d50b69cab1d5c338563376d
|
d900f58f0ddc1891831b298d9b37fbe98193719d
|
refs/heads/master
| 2020-12-11T07:26:19.594752
| 2014-11-07T12:29:28
| 2014-11-07T12:29:28
| 35,264,549
| 1
| 1
| null | 2015-05-08T07:18:18
| 2015-05-08T07:18:18
| null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
from graph_ASG_ERmetaMetaModel import *
from stickylink import *
from widthXfillXdecoration import *
from ASG_GenericGraph import *
from ASG_GenericGraph import *
from GenericGraphNode import *
from GenericGraphEdge import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def test_GenericGraph_mdl(self, rootNode):
self.globalPrecondition( rootNode )
self.obj52=GenericGraphNode(self)
self.obj52.graphClass_= graph_GenericGraphNode
if self.genGraphics:
from graph_GenericGraphNode import *
new_obj = graph_GenericGraphNode(182.0,111.0,self.obj52)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("GenericGraphNode", new_obj.tag)
else: new_obj = None
self.obj52.graphObject_ = new_obj
rootNode.addNode(self.obj52)
self.globalAndLocalPostcondition(self.obj52, rootNode)
self.globalPrecondition( rootNode )
self.obj56=GenericGraphEdge(self)
self.obj56.graphClass_= graph_GenericGraphEdge
if self.genGraphics:
from graph_GenericGraphEdge import *
new_obj = graph_GenericGraphEdge(249.0,218.0,self.obj56)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("GenericGraphEdge", new_obj.tag)
else: new_obj = None
self.obj56.graphObject_ = new_obj
rootNode.addNode(self.obj56)
self.globalAndLocalPostcondition(self.obj56, rootNode)
self.drawConnections( )
newfunction = test_GenericGraph_mdl
loadedMMName = 'GenericGraph'
|
[
"shankland@bigvikinggames.com"
] |
shankland@bigvikinggames.com
|
65b3e12a7a4232da82a51c7d4fddf642b3b3700e
|
2df82b931c89ac70d49b0716d642d8e355926d50
|
/product/urls.py
|
87647f6494c6266c870dd2feb79bc260185026f8
|
[] |
no_license
|
khanansha/producthunt
|
1a638104e83803b9afc4a51ff3ead438ae47cab6
|
03b8d45091c88a2ff142f0a3082910ac1fa0ba41
|
refs/heads/master
| 2021-05-26T03:21:35.246011
| 2020-04-08T08:41:17
| 2020-04-08T08:41:17
| 254,031,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from django.urls import path, include
from . import views
urlpatterns = [
path('create', views.create, name='create'),
path('<int:product_id>', views.detail, name='detail'),
path('<int:product_id>/upvote', views.upvote, name='upvote'),
]
|
[
"anjumkhan88987@gmail.com"
] |
anjumkhan88987@gmail.com
|
7f33ebf6e4c0e218d49274dab77575fbad6f4e72
|
b3638a57ff986c9af7281f057bd4cb5641c11589
|
/백준/210803_최단경로/11404플로이드.py
|
e3904cdf037eb2b00770b01adef797ef64f2da63
|
[] |
no_license
|
commGom/pythonStudy
|
6adc01faddbe3ef88e0cbab9da174caa77857ff7
|
a5d52e66dfd0b3b7538454ca2b6fcd9665f83e6c
|
refs/heads/main
| 2023-08-25T12:21:27.670495
| 2021-10-04T08:19:42
| 2021-10-04T08:19:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
# 5
# 14
# 1 2 2
# 1 3 3
# 1 4 1
# 1 5 10
# 2 4 2
# 3 4 1
# 3 5 1
# 4 5 3
# 3 5 10
# 3 1 8
# 1 4 2
# 5 1 7
# 3 4 2
# 5 2 4
import sys
input=sys.stdin.readline
# 도시의 갯수 N, 버스의 갯수 M
N=int(input())
M=int(input())
# 버스의 도착도시, 드는 비용 값을 저장할 graph
#버스의 출발도시 도착도시 드는비용 순으로 값을 M번 받는다
graph=[[0 for col in range(N+1)] for row in range(N+1)]
for _ in range(M):
start,arrive,cost=map(int,input().split())
if graph[start][arrive]==0 or graph[start][arrive]>cost:
graph[start][arrive]=cost
# print(graph)
for k in range(1,N+1):
for i in range(1,N+1):
for j in range(1,N+1):
if i==j or i==k or j==k:continue
if graph[i][k]>0 and graph[k][j]>0:
if graph[i][j]==0:
graph[i][j]=graph[i][k]+graph[k][j]
else:
graph[i][j]=min(graph[i][j],graph[i][k]+graph[k][j])
# print(graph)
for i in range(1,len(graph)):
for j in range(1,len(graph[i])):
print(graph[i][j],end=" ")
print()
|
[
"babywhitebear2021@gmail.com"
] |
babywhitebear2021@gmail.com
|
aeeee04864a4e9f90e0b78751f06a0c1734023fe
|
ffca2ab12cb1dad9e3ddd6bf0f615cef91db62e5
|
/test.py
|
cf87ab1380fb1dd2c650be87a67e7c934d453c5d
|
[
"MIT"
] |
permissive
|
CyborgVillager/Block-Tower-Defense
|
0ee26678bb00951b1168f5bc20c762c04cf8a648
|
287da85c852e8596de9e57827845c6d7db286ec9
|
refs/heads/master
| 2020-12-02T02:22:29.347524
| 2019-12-30T10:54:27
| 2019-12-30T10:54:27
| 230,857,127
| 0
| 0
|
MIT
| 2019-12-30T06:00:15
| 2019-12-30T06:00:14
| null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import pygame
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("Hello, world!")
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((100, 60, 25))
clock = pygame.time.Clock()
keepGoing = True
color = (100, 100, 100)
size = (150, 50)
pos = (50, 50)
# Set up main loop
while keepGoing:
# Timer to set frame rate
clock.tick(30)
touch = pygame.mouse.get_pos()
bar = pygame.Surface(size)
bar = bar.convert()
bar.fill(color)
bar.fill(color)
for event in pygame.event.get():
if event.type == pygame.QUIT:
keepGoing = False
if touch[0] >= 50 and touch[0] <= 200 and touch[1] >= 50 and touch[1] <= 100:
if event.type == pygame.MOUSEBUTTONDOWN:
color = (50, 50, 50)
size = (160, 60)
pos = (45, 45)
if event.type == pygame.MOUSEBUTTONUP:
color = (100, 100, 100)
size = (150, 50)
pos = (50, 50)
screen.blit(background, (0, 0))
screen.blit(bar, pos)
pygame.display.flip()
|
[
"almawijonathan@gmail.com"
] |
almawijonathan@gmail.com
|
d5a5c939def085847ffa6a958f51d3a0dee2867d
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/world/ZoneLOD.py
|
484017d0780e356d8af203a398a3626258d15e5c
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,823
|
py
|
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.PythonUtil import *
from otp.otpbase import OTPGlobals
from pirates.piratesbase import PiratesGlobals
class ZoneLOD(DirectObject):
notify = directNotify.newCategory('ZoneLOD')
def __init__(self, uniqueNameFunc, zoneRadii = []):
self.uniqueNameFunc = uniqueNameFunc
self.zoneRadii = zoneRadii
self.zoneSphere = []
self.lastZoneLevel = None
self.numSpheres = 0
self.levelForced = False
self.lodCollideMask = PiratesGlobals.ZoneLODBitmask
self.allEnabled = False
def delete(self):
self.deleteZoneCollisions()
if self.zoneSphere:
del self.zoneSphere
self.ignoreAll()
del self.uniqueNameFunc
def cleanup(self):
if hasattr(self, 'outerSphere') and self.numSpheres:
self.setZoneLevel(self.outerSphere + 1)
def setZoneRadii(self, zoneRadii, zoneCenter=[0, 0]):
self.numSpheres = len(zoneRadii)
self.zoneRadii = zoneRadii
self.zoneCenter = zoneCenter
self.innerSphere = 0
self.outerSphere = self.numSpheres - 1
self.deleteZoneCollisions()
self.initZoneCollisions()
def setLodCollideMask(self, mask):
self.lodCollideMask = mask
for currSphere in self.zoneSphere:
currSphere.node().setIntoCollideMask(self.lodCollideMask)
def getLodCollideMask(self):
return self.lodCollideMask
def initZoneCollisions(self):
for i in xrange(len(self.zoneRadii)):
cSphere = CollisionSphere(0.0, 0.0, 0.0, self.zoneRadii[i])
cSphere.setTangible(0)
cName = self.uniqueNameFunc('zoneLevel' + str(i))
cSphereNode = CollisionNode(cName)
cSphereNode.setIntoCollideMask(self.lodCollideMask)
cSphereNode.addSolid(cSphere)
cRoot = self.find('collisions')
if not cRoot.isEmpty():
cSphereNodePath = cRoot.attachNewNode(cSphereNode)
else:
cSphereNodePath = self.attachNewNode(cSphereNode)
cSphereNodePath.setPos(self.zoneCenter[0], self.zoneCenter[1], 0)
self.zoneSphere.append(cSphereNodePath)
self.setZoneLevel(self.outerSphere + 1)
def deleteZoneCollisions(self):
for c in self.zoneSphere:
c.remove_node()
self.zoneSphere = []
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
def showZoneCollisions(self):
for c in self.zoneSphere:
c.show()
def hideZoneCollisions(self):
for c in self.zoneSphere:
c.hide()
def enableAllLODSpheres(self):
for i in xrange(self.numSpheres):
self.accept(self.uniqueNameFunc('exitzoneLevel' + str(i)), Functor(self.handleExitZoneLevel, i + 1))
self.accept(self.uniqueNameFunc('enterzoneLevel' + str(i)), Functor(self.handleEnterZoneLevel, i))
for sphere in self.zoneSphere:
sphere.unstash()
self.allEnabled = True
def disableAllLODSpheres(self):
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
for sphere in self.zoneSphere:
sphere.stash()
self.allEnabled = False
def clearAllEnabled(self, resetLastZoneLevel = False):
self.allEnabled = False
if resetLastZoneLevel:
self.setCollLevel(self.lastZoneLevel)
def setCollLevel(self, level):
if self.allEnabled:
return None
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
for sphere in self.zoneSphere:
sphere.stash()
if level <= self.outerSphere:
self.zoneSphere[level].unstash()
if level > self.innerSphere:
self.zoneSphere[level - 1].unstash()
if level <= self.outerSphere:
self.accept(self.uniqueNameFunc('exitzoneLevel' + str(level)), Functor(self.handleExitZoneLevel, level + 1))
if level > self.innerSphere:
self.accept(self.uniqueNameFunc('enterzoneLevel' + str(level - 1)), Functor(self.handleEnterZoneLevel, level - 1))
def handleEnterZoneLevel(self, level, entry = None):
if level >= self.lastZoneLevel:
return None
self.setZoneLevel(level, entry)
def handleExitZoneLevel(self, level, entry = None):
if level < self.lastZoneLevel:
return None
self.setZoneLevel(level, entry)
def setZoneLevel(self, level, entry = None):
self.notify.debug('Changing Zone %s:%s' % (self.name, level))
if self.levelForced:
return None
if self.lastZoneLevel == None:
self.loadZoneLevel(level)
elif self.lastZoneLevel > level:
for i in xrange(self.lastZoneLevel - 1, level - 1, -1):
self.loadZoneLevel(i)
self.lastZoneLevel = i
elif self.lastZoneLevel < level:
for i in xrange(self.lastZoneLevel, level):
self.unloadZoneLevel(i)
if i == self.numSpheres:
self.allEnabled = False
self.lastZoneLevel = i
self.setCollLevel(level)
self.lastZoneLevel = level
def setInitialZone(self, pos):
avDist = pos.length()
curLevel = self.outerSphere + 1
for i in xrange(self.numSpheres):
dist = self.zoneRadii[i]
if avDist < dist:
curLevel = i
break
continue
self.setZoneLevel(curLevel)
def setZoneLevelOuter(self):
if self.outerSphere > self.lastZoneLevel:
self.setZoneLevel(self.outerSphere)
def turnOff(self):
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
for sphere in self.zoneSphere:
sphere.stash()
def turnOn(self):
self.allEnabled = False
if self.lastZoneLevel is not None:
self.setCollLevel(self.lastZoneLevel)
else:
self.setCollLevel(self.outerSphere)
def loadZoneLevel(self, level):
pass
def unloadZoneLevel(self, level):
pass
def forceZoneLevel(self, level):
self.setZoneLevel(level)
self.levelForced = True
def clearForceZoneLevel(self):
self.levelForced = False
self.setZoneLevel(self.outerSphere)
def childLeft(self, myDoId, parentObj, isIsland = True):
if isIsland:
self.builder.left()
for island in parentObj.islands.values():
if island.doId != myDoId:
if isIsland:
island.builder.areaGeometry.unstash()
island.enableAllLODSpheres()
if isIsland:
island.builder.collisions.unstash()
def childArrived(self, myDoId, parentObj, isIsland = True):
if isIsland:
self.builder.arrived()
for island in parentObj.islands.values():
if island.doId != myDoId:
if isIsland:
island.builder.areaGeometry.stash()
island.disableAllLODSpheres()
island.builder.collisions.stash()
else:
island.clearAllEnabled(True)
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
b6f879be814c5cb7ae7e78b1b76cb8c2557580c5
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_4_neat/16_0_4_kylewilson_d.py
|
f0a169990ea6d378bafc747d1a45704aacf52a63
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
f = open("D-small-attempt0.in", "r")
fo = open("out.txt","w")
count = int(f.readline())
for case in xrange(0,count):
print case
line = f.readline().split()
k = line[0]
c = line[1]
s = line[2]
fo.write("Case #" + str(case + 1) + ":")
if s < k:
fo.write(" IMPOSSIBLE")
else:
for x in range(int(k)):
fo.write(" " + str(x+1))
fo.write("\n")
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
0468f8f8d3852fec3500d09cba904bdbd0e2e2c9
|
bc9abf538f5d4093324b2f055f0b090fe4b03247
|
/python/etc/primer3d.py
|
e4712fca4b2656dfe05717a7fff112e19d3cfd1f
|
[] |
no_license
|
galaxysd/GalaxyCodeBases
|
3c8900d0c2ca0ed73e9cf4c30630aca4da6cc971
|
58946261bf72afd6a7287e781a2176cdfaddf50e
|
refs/heads/master
| 2023-09-04T04:59:35.348199
| 2023-08-25T01:04:02
| 2023-08-25T01:04:02
| 33,782,566
| 7
| 6
| null | 2020-09-04T06:06:49
| 2015-04-11T16:12:14
|
C
|
UTF-8
|
Python
| false
| false
| 4,773
|
py
|
#!/usr/bin/env python3
import argparse
import logging
import primer3 # https://github.com/libnano/primer3-py
# https://brcaexchange.org/variants and click "Show All Public Data", then click "Download" to get `variants.tsv`.
# gzcat ~/Downloads/variants.tsv.gz|head -n30|awk -F'\t' '{if (length($124)+length($125)==2 || NR==1) print $43,$108,$122,$123,$124,$125}'|column -t
# DBID_LOVD Genomic_Coordinate_hg38 Chr Pos Ref Alt
# BRCA1_001574 chr17:g.43093299:A>G 17 43093299 A G
# BRCA1_003516 chr17:g.43078507:T>G 17 43078507 T G
# BRCA1_004379 chr17:g.43103085:A>G 17 43103085 A G
from os.path import expanduser
InFile: str = r'~/tmp/variants.tsv.0.gz'
InFile = expanduser(InFile)
InRef: str = expanduser(r'~/tmp/GRCh38.fa')
from pyfaidx import Fasta
RefSeqs = Fasta(InRef)
#print(RefSeqs['chr1'])
# Random access of BGZip is not supported now, see https://github.com/mdshw5/pyfaidx/issues/126
#InColNames = ['DBID_LOVD','Chr','Pos','Ref','Alt','Genomic_Coordinate_hg38']
InColNames = ['Chr','Pos','Ref','Alt']
#import numpy
#import pandas as pd
#pd.read_table(InFile,compression='gzip',sep='\t')
import gzip
import csv
Total: int = 0
Skipped: int = 0
from typing import Dict, List, Tuple
InData: Dict[str,Dict[int,Tuple[str,List[str]]]] = {}
'''
第一种引物,上游引物3‘端设一个,下游距离300bp-400bp设置
第二种,目标点上游100bp设置上游引物,不要覆盖目标点,下游,200-300,
只考虑一对引物中间的部分,引物本身不考虑。
Tm 参考范围55-62
'''
thePara: Dict[str,int] = dict(MaxAmpLen=400, MinAmpLen=300, P5Up1=0, P5Up2=100,
TmMax=63, TmMin=55, TmDeltra=5,
PrimerLenMin=25, PrimerLenMax=36, Mode2LeftMax=100
)
with gzip.open(InFile, 'rt') as tsvin:
tsvin = csv.DictReader(tsvin, delimiter='\t')
#headers = tsvin.fieldnames
#print(headers)
for row in tsvin:
#print(', '.join(row[col] for col in InColNames))
Total += 1
if len(row['Ref']) > 1 or len(row['Alt']) > 1 :
#print(', '.join(row[col] for col in ['Chr','Pos','Ref','Alt']))
Skipped += 1
else :
print(', '.join(row[col] for col in InColNames))
row['Pos'] = int(row['Pos'])
if row['Chr'] in InData :
if row['Pos'] in InData[row['Chr']] :
InData[row['Chr']][row['Pos']][1].append(row['Alt'])
#print(InData[row['Chr']][row['Pos']])
else :
InData[row['Chr']][row['Pos']] = (row['Ref'],[row['Alt']])
else :
InData[row['Chr']] = { row['Pos'] : (row['Ref'],[row['Alt']]) }
Primer3GlobalArgs: Dict = {
'PRIMER_OPT_SIZE': 2+thePara['PrimerLenMin'],
'PRIMER_PICK_INTERNAL_OLIGO': 1,
'PRIMER_INTERNAL_MAX_SELF_END': 8,
'PRIMER_MIN_SIZE': thePara['PrimerLenMin'],
'PRIMER_MAX_SIZE': thePara['PrimerLenMax'],
'PRIMER_OPT_TM': 60.0,
'PRIMER_MIN_TM': thePara['TmMin'],
'PRIMER_MAX_TM': thePara['TmMax'],
'PRIMER_MIN_GC': 20.0,
'PRIMER_MAX_GC': 80.0,
'PRIMER_MAX_POLY_X': 10,
'PRIMER_INTERNAL_MAX_POLY_X': 10,
'PRIMER_SALT_MONOVALENT': 50.0,
'PRIMER_DNA_CONC': 50.0,
'PRIMER_MAX_NS_ACCEPTED': 0,
'PRIMER_MAX_SELF_ANY': 12,
'PRIMER_MAX_SELF_END': 8,
'PRIMER_PAIR_MAX_COMPL_ANY': 12,
'PRIMER_PAIR_MAX_COMPL_END': 8,
'PRIMER_PRODUCT_SIZE_RANGE': [[thePara['MinAmpLen']-thePara['PrimerLenMax'],thePara['MaxAmpLen']+thePara['PrimerLenMax']]],
'PRIMER_TASK': 'generic',
'PRIMER_PICK_LEFT_PRIMER': 1,
'PRIMER_PICK_INTERNAL_OLIGO': 0,
'PRIMER_PICK_RIGHT_PRIMER': 1,
'PRIMER_PAIR_MAX_DIFF_TM': thePara['TmDeltra'],
}
primer3.bindings.setP3Globals(Primer3GlobalArgs)
for ChrID in InData.keys() :
for thePos in InData[ChrID].keys() :
FulChrID: str = ''.join(['chr',ChrID])
# Start attributes are 1-based
Left: int = thePos - thePara['Mode2LeftMax'] - thePara['PrimerLenMax'] -1
if Left < 0 : Left = 0
#Left = thePos-1
# End attributes are 0-based
Right: int = thePos + thePara['MaxAmpLen'] + thePara['PrimerLenMax']
if Right > len(RefSeqs[FulChrID]) : Right = len(RefSeqs[FulChrID])
theSeq: str = RefSeqs[FulChrID][Left:Right]
print(':'.join([ChrID,str(thePos),FulChrID,str(theSeq),str(InData[ChrID][thePos]) ]))
Primer3Ret: Dict = primer3.bindings.designPrimers({
'SEQUENCE_ID': theSeq.fancy_name,
'SEQUENCE_TEMPLATE': str(theSeq),
'SEQUENCE_INCLUDED_REGION': [ thePara['PrimerLenMax'],thePara['MaxAmpLen'] ],
})
print(Primer3Ret)
print(b'[!] %(skipped)d InDels skipped in %(Total)d items.' % {b'skipped': Skipped, b'Total': Total})
|
[
"galaxy001@gmail.com"
] |
galaxy001@gmail.com
|
d2fe44622bb24756b61b213bf3e55799154afa69
|
ca752ad55da471392e8690437d9a672c9a52bf2a
|
/manage.py
|
30ad0b0215c06bac6732623c2d633d4d66ec30aa
|
[] |
no_license
|
fortable1999/zhaomengblog
|
9280f8bbb1b8f8bbb8e56e26b0b7fb074e07685b
|
f9ee379affee99ebf8a4a6da2b322fb469451fe9
|
refs/heads/master
| 2021-01-10T21:30:37.297876
| 2013-07-12T13:05:47
| 2013-07-12T13:05:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zmblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"fortable1999@gmail.com"
] |
fortable1999@gmail.com
|
e7b36c21f59ad06459d4b86ada0988020ae3ef90
|
463c8ba5baad086d37819804af4ee10f43ab6dd5
|
/Algorithm/190911/실수_연습문제3.py
|
82ce02e942a893598cacf8b5c5949694673348f5
|
[] |
no_license
|
sooya14/TIL
|
dbbb0608d45ce273ddef6f7cea1b1195285f269d
|
232b0d38d8f6ee2e6e5517bfd6a2a15cf1000dad
|
refs/heads/master
| 2023-01-11T17:12:39.370178
| 2020-05-11T12:06:41
| 2020-05-11T12:06:41
| 195,916,241
| 0
| 0
| null | 2023-01-05T18:22:56
| 2019-07-09T02:17:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 169
|
py
|
def Bbit_print(i):
output = ''
for j in range(15, -1, -1):
output += '1' if i & (1 << j) else '0'
return output
a = 0xDEC
print(Bbit_print(a))
|
[
"soosmile9653@gmail.com"
] |
soosmile9653@gmail.com
|
ba50d67df374167270831ee86f66ac7d0f40ba3f
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/F/frabcus/p-francis.py
|
16380646750da2ccabd56a5aaf6b52e69bb3b125
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import scraperwiki
print "hello, world"
download = scraperwiki.scrape("http://un.org/")
print download
data = { 'foo': 10, 'bar': 'hello' }
scraperwiki.sqlite.save( ['foo'], data )
import scraperwiki
print "hello, world"
download = scraperwiki.scrape("http://un.org/")
print download
data = { 'foo': 10, 'bar': 'hello' }
scraperwiki.sqlite.save( ['foo'], data )
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
511a12dcf6ad86e4c5a9d820d091e7c541027811
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03239/s021170085.py
|
463b5e90f404a028b4f64d43dc4fefef0042eb47
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
def resolve():
n,T=map(int,input().split())
ans=10000
for i in range(n):
c,t=map(int,input().split())
if t<=T:
ans=min(ans,c)
if ans==10000:
print('TLE')
else:
print(ans)
if __name__ == '__main__':
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
db0075a312c5191fbe8ab1c749b93da07077d880
|
2eeda6bfea74cf746f8223274ee9ec25b9387526
|
/dgCubeDemo/testString.py
|
8641d86826c9af2d47311dc85fb3b364d42349e4
|
[] |
no_license
|
PowerDG/PycharmProjects
|
74f6468964d64846d8c979260a51f375e5d0476d
|
74a7f18be4a7337eef546e4bf3cc6320b9f5b39d
|
refs/heads/master
| 2022-12-09T09:25:11.993089
| 2020-09-18T13:28:00
| 2020-09-18T13:28:00
| 287,977,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# outhor:xinlan time:
# 我是中国人
"""
https://zhuanlan.zhihu.com/p/52770875
我是中国人,
那你呢
"""
str1 = '我叫 %s,俺爹是 %s' % ('小王', '老王')
print(str1)
nameStr = '马爸爸'
moneyStr = '有钱'
print('用+将字符串合并,', nameStr + moneyStr)
# === 列表----[ ]
nameList = ['猴子', '马云', '王健林', '马化腾']
nameLen = len(nameList)
print(nameLen)
nameList.append('刘强东')
print('增加1个元素的列表是', nameList)
del nameList[1]
print('删除第2个元素后的列表是', nameList)
name1 = nameList[0]
print('查询列表的第一个元素是:', name1)
print('修改之前的列表是', nameList)
nameList[0] = '孙悟空'
print('修改之后的列表是', nameList)
# === 定义元组
gafataTuple = ('腾讯', '阿里巴巴', '苹果', '谷歌', 'FB', '亚马逊')
# 元组的长度
gafataLen = len(gafataTuple)
print('元组的长度是:', gafataLen)
# 查询元组元素
print('第1个元素的值', gafataTuple[0])
# === 7、集合----花括号{ } 定义:集合(sets)是不重复的容器
#
# List有序、可重复,Set无序,不能重复的
gafataSets = {'腾讯', '阿里巴巴', '苹果', '谷歌', 'FB', '亚马 逊', '亚马逊'}
print(gafataSets)
stockSets = set()
# 使用update()增加元素
stockSets.update(['腾讯', '阿里巴巴', '京东'])
print(stockSets)
# (2)删除
stockSets.discard('京东')
print(stockSets)
# (3)查找
txBool = '京东' in stockSets
print(txBool)
# (#4)修改
stockSets.discard('京东')
stockSets.update(['京东'])
print(stockSets)
# 8、字典(映射(键值对))----{ }
# 定义字典
patientDic = {'001': ['猴子', 29, '1型糖尿病', '较差'], '002': ['马云', 34, '2型糖尿病', '好转'], '003': ['王健林', 28, '1型糖尿病', '显著好转'],
'004': ['马化腾', 52, '型糖尿病', '好转'], '005': ['王思聪', 30, '1型糖尿病', '好转']}
# === 字典的操作:
# (1)增加
print(patientDic)
# (2)删除
del patientDic['005']
print(patientDic)
# (3)查询
valueList = patientDic['001']
print(valueList)
# (4)修改
print('修改之前的,病人信息:', patientDic)
patientDic['001'] = ['猴子', 29, '1型糖尿病', '好转']
print('修改之后的,病人信息:', patientDic)
|
[
"1049365046@qq.com"
] |
1049365046@qq.com
|
480312fb4dd33c1c96bed2d89c5eda9c402cec34
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/third_party/blink/renderer/build/scripts/make_origin_trials.py
|
db5b93a998e28fcaa8ec63a52fc4e2229de671db
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 5,714
|
py
|
#!/usr/bin/env python
# Copyright (C) 2015 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json5_generator
import make_runtime_features
import name_utilities
import template_expander
# We want exactly the same parsing as RuntimeFeatureWriter
# but generate different files.
class OriginTrialsWriter(make_runtime_features.BaseRuntimeFeatureWriter):
file_basename = 'origin_trials'
def __init__(self, json5_file_path, output_dir):
super(OriginTrialsWriter, self).__init__(json5_file_path, output_dir)
self._outputs = {
(self.file_basename + '.cc'): self.generate_implementation,
}
self._implied_mappings = self._make_implied_mappings()
self._trial_to_features_map = self._make_trial_to_features_map()
self._max_features_per_trial = max(
len(features) for features in self._trial_to_features_map.values())
self._set_trial_types()
@property
def origin_trial_features(self):
return self._origin_trial_features
def _make_implied_mappings(self):
# Set up the implied_by relationships between trials.
implied_mappings = dict()
for implied_feature in (feature
for feature in self._origin_trial_features
if feature['origin_trial_feature_name']
and feature['implied_by']):
# An origin trial can only be implied by other features that also
# have a trial defined.
implied_by_trials = []
for implied_by_name in implied_feature['implied_by']:
if any(implied_by_name == feature['name'].original
and feature['origin_trial_feature_name']
for feature in self._origin_trial_features):
implied_by_trials.append(implied_by_name)
# Keep a list of origin trial features implied for each
# trial. This is essentially an inverse of the implied_by
# list attached to each feature.
implied_list = implied_mappings.get(implied_by_name)
if implied_list is None:
implied_list = set()
implied_mappings[implied_by_name] = implied_list
implied_list.add(implied_feature['name'].original)
implied_feature['implied_by_origin_trials'] = implied_by_trials
return implied_mappings
def _make_trial_to_features_map(self):
trial_feature_mappings = {}
for feature in [
feature for feature in self._origin_trial_features
if feature['origin_trial_feature_name']
]:
trial_name = feature['origin_trial_feature_name']
if trial_name in trial_feature_mappings:
trial_feature_mappings[trial_name].append(feature)
else:
trial_feature_mappings[trial_name] = [feature]
return trial_feature_mappings
def _set_trial_types(self):
for feature in self._origin_trial_features:
trial_type = feature['origin_trial_type']
if feature[
'origin_trial_allows_insecure'] and trial_type != 'deprecation':
raise Exception('Origin trial must have type deprecation to '
'specify origin_trial_allows_insecure: %s' %
feature['name'])
if trial_type:
feature[
'origin_trial_type'] = name_utilities._upper_camel_case(
trial_type)
@template_expander.use_jinja('templates/' + file_basename + '.cc.tmpl')
def generate_implementation(self):
return {
'features': self._features,
'origin_trial_features': self._origin_trial_features,
'implied_origin_trial_features': self._implied_mappings,
'trial_to_features_map': self._trial_to_features_map,
'max_features_per_trial': self._max_features_per_trial,
'input_files': self._input_files,
}
if __name__ == '__main__':
json5_generator.Maker(OriginTrialsWriter).main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
7c4beb4c63680a1ee45c5c97a528c02e1ef70d08
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/yubinbai/pcuva-problems/UVa 12493 stars/main.py
|
779906f17e251db0fb19cfabdf8ad7b649c8263a
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664
| 2016-09-12T12:38:32
| 2016-09-12T12:38:32
| 65,951,766
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import sys
import math
def primeFactors(n):
i = 2
result = []
while i <= math.sqrt(n):
while n % i == 0:
result.append(i)
n /= i
i += 1
if n != 1:
result.append(n)
result.sort()
return result
sys.stdin = open('input.txt')
while True:
try:
n = int(input())
except:
break
res = n
for f in set(primeFactors(n)):
res *= (1 - 1.0 / f)
print int(res / 2)
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
c41013fcbae7df51d04a6e80c96ad4de24752f18
|
d0aade2edd6ba5750d70c70198a4bfe16356355e
|
/maskrcnn_benchmark/modeling/poolers.py
|
754b5147485769c9de1ff6429e4a1300a0e27231
|
[
"MIT"
] |
permissive
|
mjq11302010044/RRPN_pytorch
|
ca3a6b781d49b80323671581ea0a5c13ca500a7a
|
a966f6f238c03498514742cde5cd98e51efb440c
|
refs/heads/master
| 2022-08-29T07:29:20.311262
| 2020-10-16T02:29:19
| 2020-10-16T02:29:19
| 184,703,273
| 305
| 68
|
MIT
| 2020-04-30T06:37:19
| 2019-05-03T05:28:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,295
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign
from maskrcnn_benchmark.layers import RROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class PyramidRROIAlign(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(PyramidRROIAlign, self).__init__()
poolers = []
for scale in scales:
poolers.append(
RROIAlign(
output_size, spatial_scale=scale
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
# result = []
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level) # rois_per_level)
# result.append(pooler(per_level_feature, rois))
return result # torch.cat(result, 1)
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level)
return result
|
[
"mjq11302010044@gmail.com"
] |
mjq11302010044@gmail.com
|
ded84433bff0e82fd58f4dc304b9a645a18403dd
|
dfad28a2e1a0199c0117e551fd1e31804804d5b9
|
/app/__init__.py
|
6efcbe27d1084742b368ce958eb28dc8ecdb33de
|
[
"MIT"
] |
permissive
|
wilbrone/Pitches
|
c33d60b142b43de9ccf60a86cf59acbc262c6711
|
b20d234fd930a6551f26d9cf863c6d1631b62bc2
|
refs/heads/master
| 2022-12-09T08:02:08.631177
| 2019-11-25T23:47:13
| 2019-11-25T23:47:13
| 223,405,696
| 0
| 0
|
MIT
| 2022-12-08T06:55:48
| 2019-11-22T13:09:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_login import LoginManager
from flask_simplemde import SimpleMDE
# from flask_uploads import UploadSet,configure_uploads,IMAGES
from config import config_options
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
# photos = UploadSet('photos',IMAGES)
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
simple = SimpleMDE()
def create_app(config_name):
app = Flask(__name__)
# creating app configurations
app.config.from_object(config_options[config_name])
# initializing flask extentions
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
# registering BluePrint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix = '/authenticate')
# configure UploadSet
# configure_uploads(app,photos)
return app
|
[
"wilbroneokoth@gmail.com"
] |
wilbroneokoth@gmail.com
|
fa772d6c102d931e816220e31d045e9b09bf18ab
|
3fd9c7ee49a32eae3013191b63154a9a5d6dafe6
|
/12.6驾驶飞船/12.6.4调整飞船的速度/alien_invasion_0.10.py
|
07be0e8c4159b335189b2368a83e687b00039bf0
|
[] |
no_license
|
taozhenting/alien_invasion
|
e0c03cd9797cb33e40ca47a13eadeda8b1c4cf85
|
fd9bd97d6238da702fbb1eb6fcb78e8352875fe2
|
refs/heads/master
| 2020-04-27T05:31:48.862784
| 2019-01-30T09:43:49
| 2019-01-30T09:43:50
| 174,083,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
#修改while循环,每次执行循环时调用飞船方法update()
import pygame
from settings_2 import Settings
from ship_4 import Ship
import game_functions_5 as gf
def run_game():
#初始化游戏并创建一个屏幕对象
pygame.init()
#创建Settings实例存储在ai_settings变量中
ai_settings = Settings()
#使用ai_settings的属性screen_width和screen_height
screen = pygame.display.set_mode(
(ai_settings.screen_width,ai_settings.screen_height)
)
pygame.display.set_caption("Alien Invasion")
#创建一艘飞船
#需要传入实参ai_settings
ship = Ship(ai_settings,screen)
#开始游戏的循环
while True:
gf.check_events(ship)
#飞船的位置在检测到键盘事件后(但在更新屏幕前)更新。
ship.update()
gf.update_screen(ai_settings,screen,ship)
run_game()
|
[
"taozt@ichile.com.cn"
] |
taozt@ichile.com.cn
|
584201b3a981910411696aaa3cbbeb9fa1d2944e
|
8f8f40280afdd0c47fd39664b43c8fb45d86a285
|
/code_sd_ssc/plot_utils.py
|
d11d53930b5434a904ba8695ee4e51e7e4f4b739
|
[] |
no_license
|
shibaji7/IP_Shock_SSC_SuperDARN
|
d463341c432e14c3007a0540ad96b4325289d6c0
|
32ea9d72d2ab68a7b80ab12f41228783370b6d4f
|
refs/heads/main
| 2023-02-19T14:32:43.806681
| 2021-01-23T02:41:00
| 2021-01-23T02:41:00
| 303,022,533
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,803
|
py
|
#!/usr/bin/env python
"""plot_utils.py: module is dedicated to plot different types of parameters"""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, num2date
from matplotlib import patches
import matplotlib.patches as mpatches
import random
import utils
class RangeTimePlot(object):
"""
Create plots for velocity or power.
"""
def __init__(self, nrang, fig_title, num_subplots=1):
self.nrang = nrang
self.unique_gates = np.linspace(1, nrang, nrang)
self.num_subplots = num_subplots
self._num_subplots_created = 0
self.fig = plt.figure(figsize=(8, 3*num_subplots), dpi=100) # Size for website
plt.suptitle(fig_title, x=0.075, y=0.99, ha="left", fontweight="bold", fontsize=15)
mpl.rcParams.update({"font.size": 10})
return
def _tight_layout(self):
#self.fig.tight_layout(rect=[0, 0, 0.9, 0.97])
return
def show(self):
plt.show()
return
def save(self, filepath):
plt.savefig(filepath, bbox_inches="tight")
return
def close(self):
self.fig.clf()
plt.close()
return
def _add_axis(self):
self._num_subplots_created += 1
ax = self.fig.add_subplot(self.num_subplots, 1, self._num_subplots_created)
return ax
def _add_colorbar(self, fig, ax, bounds, colormap, label=""):
"""
Add a colorbar to the right of an axis.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.0125,
0.015, pos.height * 0.9] # this list defines (left, bottom, width, height
cax = fig.add_axes(cpos)
norm = mpl.colors.BoundaryNorm(bounds, colormap.N)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
ticks=bounds,
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
return
def addPlot(self, df, beam, param="v", title="", pmax=200, step=25, xlabel="Time UT"):
# add new axis
df = df[df.bmnum==beam]
self.ax = self._add_axis()
# set up variables for plotter
time = np.hstack(df["time"])
gate = np.hstack(df["slist"])
flags = np.hstack(df["v"])
bounds = list(range(-pmax, pmax+1, step))
cmap = plt.cm.jet
X, Y, Z = utils.get_gridded_parameters(df, xparam="time", yparam="slist", zparam="v")
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# cmap.set_bad("w", alpha=0.0)
# Configure axes
self.ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
hours = mdates.HourLocator(byhour=range(0, 24, 4))
self.ax.xaxis.set_major_locator(hours)
self.ax.set_xlabel(xlabel)
self.ax.set_xlim([df.time.tolist()[0], df.time.tolist()[-1]])
self.ax.set_ylabel("Range gate")
self.ax.pcolormesh(X, Y, Z.T, lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
self._tight_layout() # need to do this before adding the colorbar, because it depends on the axis position
self._add_colorbar(self.fig, self.ax, bounds, cmap, label="Velocity [m/s]")
self.ax.set_title(title, loc="left", fontdict={"fontweight": "bold"})
return
|
[
"shibaji7@vt.edu"
] |
shibaji7@vt.edu
|
f0b36e876db026e32aeb47a005d47490db58f2bd
|
ff23e5c890216a1a63278ecb40cd7ac79ab7a4cd
|
/clients/client/python/test/test_update_recovery_flow_body.py
|
91fe274624feee373ef8d631fc0e1e6dd405c9ab
|
[
"Apache-2.0"
] |
permissive
|
ory/sdk
|
fcc212166a92de9d27b2dc8ff587dcd6919e53a0
|
7184e13464948d68964f9b605834e56e402ec78a
|
refs/heads/master
| 2023-09-01T10:04:39.547228
| 2023-08-31T08:46:23
| 2023-08-31T08:46:23
| 230,928,630
| 130
| 85
|
Apache-2.0
| 2023-08-14T11:09:31
| 2019-12-30T14:21:17
|
C#
|
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v1.1.51
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.update_recovery_flow_with_code_method import UpdateRecoveryFlowWithCodeMethod
from ory_client.model.update_recovery_flow_with_link_method import UpdateRecoveryFlowWithLinkMethod
globals()['UpdateRecoveryFlowWithCodeMethod'] = UpdateRecoveryFlowWithCodeMethod
globals()['UpdateRecoveryFlowWithLinkMethod'] = UpdateRecoveryFlowWithLinkMethod
from ory_client.model.update_recovery_flow_body import UpdateRecoveryFlowBody
class TestUpdateRecoveryFlowBody(unittest.TestCase):
"""UpdateRecoveryFlowBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateRecoveryFlowBody(self):
"""Test UpdateRecoveryFlowBody"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateRecoveryFlowBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"3372410+aeneasr@users.noreply.github.com"
] |
3372410+aeneasr@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.