blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfec3e3f9e181f7560fa29eacbdd7626ae414e3b
|
16ac02b8f427bd622af1564f1236e4913ed63521
|
/Codes/Version 1.9.1/force_sorter.py
|
c3a4f9e16eb12403d6897fbe06a5af0f0c70f7b5
|
[
"MIT"
] |
permissive
|
gharib85/Brownian-dynamics-in-a-time-varying-force-field
|
20660665747310e1201e8ca7d404acc15ec7a3bd
|
1dce268fcc4f27e066be0ec0b511178cbc1437c5
|
refs/heads/main
| 2023-08-16T03:47:51.957137
| 2021-10-23T19:09:50
| 2021-10-23T19:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,583
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on March 6, 2021
@author: Mohammad Asif Zaman
- May 28, 2021
- Functionalized fluid velocity
"""
import numpy as np
import pylab as py
import matplotlib as plt
import time
from scipy import interpolate
from parameters import *
from geometry_def_sorter import *
# Module global Parameters:
# =============================================================================
# Setting fluid flow velocity
# vx_flow = 120e-6
# vy_flow = 0e-6
# vz_flow = 0e-6
def vel_lm(xin, yin, zin): # linear model of fluid velocity
Np = xin.size
v_out = np.zeros((3,Np))
v_out[0,:] = 150e-6
v_out[1,:] = 0
v_out[2,:] = 0
v_out[1,np.where( (xin > 170e-6) & (yin >= 30e-6 ) )] = 30e-6
v_out[1,np.where( (xin > 170e-6) & (yin <= -30e-6 ) )] = -30e-6
return v_out
# def fluid_vel(r_in, t):
# Np = r_in[0,:].size
# xin = r_in[0,:]
# yin = r_in[1,:]
# zin = r_in[2,:]
# v_fluid = np.zeros((3,Np))
# v_fluid[0,:] = 120e-6
# v_fluid[1,:] = 0
# v_fluid[2,:] = 0
# v_fluid[1,np.where( (xin > 170e-6) & (yin >= 30e-6 ) )] = 30e-6
# v_fluid[1,np.where( (xin > 170e-6) & (yin <= -30e-6 ) )] = -30e-6
# return v_fluid
def fluid_vel(r_in, t):
Np = r_in[0,:].size
xi = r_in[0,:]
yi = r_in[1,:]
zi = r_in[2,:]
d = 10e-6
# temporary variables
v1 = np.zeros((3,Np))
v2 = np.zeros((3,Np))
# Moving average smoothing of fluid velocity
# number of points per axis over which to average the velocity predicted by the linear velocity model
N_avg_points = 15 # must be an odd number
for m in range(int((N_avg_points-1)/2 + 1)):
v1 = v1 + vel_lm(xi+d*m,yi,zi) + vel_lm(xi-d*m,yi,zi) if m > 0 else v1 + vel_lm(xi,yi,zi)
v2 = v2 + vel_lm(xi,yi+d*m,zi) + vel_lm(xi,yi-d*m,zi) if m > 0 else v2 + vel_lm(xi,yi,zi)
v_fluid = (v1 + v2)/(2*N_avg_points)
return v_fluid
# Read force data from data file
Mdata = np.genfromtxt('Fy_XY_grid2.csv',delimiter=',',skip_header=9)
xdata = Mdata[:,0]*1e-6
ydata = Mdata[:,1]*1e-6
points = np.array( (xdata, ydata)).T
Fydata = Mdata[:,2]*1e-12
# This is function that is called from the main program
# Simplified spring force model
def force_profile(r_in, t):
# Np = r_in[0,:].size
Np, ro, tfinal, x_lim, y_lim, z_lim, xi_lim, yi_lim, zi_lim = time_pos_ax_limits()
temp = [.7,-0.25]
od_ev = np.array(int(Np/2)*[temp]).flatten()
xin = r_in[0,:]
yin = r_in[1,:]
fy = interpolate.griddata(points,Fydata,(xin,abs(yin)),method='linear',fill_value = 0)*np.sign(yin)
fz = -od_ev*.3e-12
fm = np.zeros((3,Np))
fm[1,:] = fy*od_ev
fm[2,:] = fz
return fm
# force_plot()
# Np = 1
# # # xin = [1, 4, 2, 3]
# # # xin = np.array(xin)
# # # v_temp = np.zeros(Np)
# # # v_temp[np.where(xin > 2)] = 7
# r = np.random.rand(3,Np,2)
# rin = r[:,:,0]
# t = 0
# rin[0] = 176e-6
# rin[1] = 50e-6
# vf = fluid_vel(rin,t)
# # print(rin)
# print(vf)
# # vf = fluid_vel([[170e-6,40e-6,2e-6]],t)
# # print(vf)
# # interpolation speed test
# start_time = time.time()
# print('\n\n===========================================\n')
# Np = 24
# r = np.random.rand(3,Np,2)
# rin = r[:,:,0]
# t = 0
# tt= force_profile(rin,t)
# print("Execution time = %1.2f seconds \n" % (time.time() - start_time))
# print('\n===========================================\n')
|
[
"39745895+zaman13@users.noreply.github.com"
] |
39745895+zaman13@users.noreply.github.com
|
ae355ce430c65d2fdcab3685713278f7881f618e
|
32dda10669e459cf37c31f426fa709001d2c75b0
|
/leetcode_cn/solved/pg_926.py
|
f858309b00cdcbf0e1b7e35377b4344e80aca5d1
|
[] |
no_license
|
fastso/learning-python
|
3300f50d06871245d0bfcbe9d201224580f70852
|
d21dbd1b9f31017cdb1ed9b9ffd1e53ffe326572
|
refs/heads/master
| 2023-02-10T14:43:53.726247
| 2023-01-26T10:14:59
| 2023-01-26T10:14:59
| 193,454,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
class Solution:
def minFlipsMonoIncr(self, s: str) -> int:
p = [0]
for c in s:
p.append(p[-1] + int(c))
return min(p[i] + len(s) - i - (p[-1] - p[i]) for i in range(len(p)))
|
[
"fastso.biko@gmail.com"
] |
fastso.biko@gmail.com
|
01ad1462bd75d8419ef238415f086483904f0de2
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/110_concurrency_parallelism/_examples/Learn Parallel Computing in Python/004_condition_variables/stingy_spendy_cond_variable.py
|
790010e66a70ddcb69293dc906896900f0e63fe9
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 791
|
py
|
import time
from threading import Thread, Condition
class StingySpendy:
money = 100
cv = Condition()
def stingy(self):
for i in range(1000000):
self.cv.acquire()
self.money += 10
self.cv.notify()
self.cv.release()
print("Stingy Done")
def spendy(self):
for i in range(500000):
self.cv.acquire()
while self.money < 20:
self.cv.wait()
self.money -= 20
if self.money < 0:
print("Money in bank", self.money)
self.cv.release()
print("Spendy Done")
ss = StingySpendy()
Thread(target=ss.stingy, args=()).start()
Thread(target=ss.spendy, args=()).start()
time.sleep(5)
print("Money in the end", ss.money)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
0b4478a24e878754d3ce2b47225c1fcdeda4363b
|
53edf6b0f4262ee76bb4e3b943394cfeafe54865
|
/linear_theory/Non_linear_stuff/geopack_test.py
|
8885c66e0b3c6e38a6be562edce45df4d3d234f6
|
[] |
no_license
|
Yoshi2112/hybrid
|
f86265a2d35cb0a402ba6ab5f718717d8eeb740c
|
85f3051be9368bced41af7d73b4ede9c3e15ff16
|
refs/heads/master
| 2023-07-07T21:47:59.791167
| 2023-06-27T23:09:23
| 2023-06-27T23:09:23
| 82,878,960
| 0
| 1
| null | 2020-04-16T18:03:59
| 2017-02-23T03:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,948
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 31 11:17:17 2021
@author: Yoshi
"""
import pdb
import matplotlib.pyplot as plt
import numpy as np
import geopack as gp
# Want to use geopack to give me a trace of the magnetic field alone a field
# line, and somehow convert (x, y, z) in GSM (?) to an s-value
# Would it be easier to trace out the field line first and output (x,yz) at
# standard intervals along the line, then retrieve the fields at those points
# using whatever? That seems like the easy part (if each point has MLAT, r)
def read_QIN_file(filepath):
'''
Hard-coded because I'm too lazy to learn JSON
'''
# Could potentially be hardcoded with n_lines = 1440 but this is safer
print('Reading', filepath)
n_lines = 0
with open(filepath) as f:
for line in f:
if line[0] != '#':
n_lines += 1
# Initialize arrays
year, month, day, hour, minute, second, ByIMF, BzIMF, Vsw, den_P, Pdyn, \
ByIMF_status, BzIMF_status, Vsw_status, den_P_status, Pdyn_status, \
Kp, akp3, Dst, = [np.zeros(n_lines) for _ in range(19)]
epoch = np.zeros((n_lines), dtype=str)
G = np.zeros((n_lines, 3))
G_status = np.zeros((n_lines, 3))
Bz = np.zeros((n_lines, 6))
W = np.zeros((n_lines, 6))
W_status = np.zeros((n_lines, 6))
# Pack in dict? Do later
with open(filepath) as f:
ii = 0
for line in f:
if line[0] != '#':
A = line.split()
epoch[ii] = A[0]
year[ii] = int(A[1])
month[ii] = int(A[2])
day[ii] = int(A[3])
hour[ii] = int(A[4])
minute[ii] = int(A[5])
second[ii] = int(A[6])
ByIMF[ii] = float(A[7])
BzIMF[ii] = float(A[8])
Vsw[ii] = float(A[9])
den_P[ii] = float(A[10])
Pdyn[ii] = float(A[11])
G[ii, 0] = float(A[12])
G[ii, 1] = float(A[13])
G[ii, 2] = float(A[14])
ByIMF_status[ii] = float(A[15])
BzIMF_status[ii] = float(A[16])
Vsw_status[ii] = float(A[17])
den_P_status[ii] = float(A[18])
Pdyn_status[ii] = float(A[19])
G_status[ii, 0] = float(A[20])
G_status[ii, 1] = float(A[21])
G_status[ii, 2] = float(A[22])
Kp[ii] = float(A[23])
akp3[ii] = float(A[24])
Dst[ii] = float(A[25])
Bz[ii, 0] = float(A[26]); Bz[ii, 1] = float(A[27]); Bz[ii, 2] = float(A[28])
Bz[ii, 3] = float(A[29]); Bz[ii, 4] = float(A[30]); Bz[ii, 5] = float(A[31])
W[ii, 0] = float(A[32]); W[ii, 1] = float(A[33]); W[ii, 2] = float(A[34])
W[ii, 3] = float(A[35]); W[ii, 4] = float(A[36]); W[ii, 5] = float(A[37])
W_status[ii, 0] = float(A[38]); W_status[ii, 1] = float(A[39])
W_status[ii, 2] = float(A[40]); W_status[ii, 3] = float(A[41])
W_status[ii, 4] = float(A[42]); W_status[ii, 5] = float(A[43])
ii += 1
return
if __name__ == '__main__':
FPATH = 'G://DATA//QIN_DENTON//2020//QinDenton_20200101_1min.txt'
read_QIN_file(FPATH)
L_value = 6 # GSM: (L, 0, 0) would be the equatorial point
xf, yf, zf, xn, yn, zn=gp.geopack.trace(L_value, 0.0, 0.0, -1)
xf, yf, zf, xs, ys, zs=gp.geopack.trace(L_value, 0.0, 0.0, 1)
# Check radius:
r = np.sqrt(xf ** 2 + yf ** 2 + zf ** 2)
earth = plt.Circle((0, 0), 1.0, color='k', fill=False)
# Plot field
fig, ax = plt.subplots()
ax.scatter(xn, zn)
ax.scatter(xs, zs)
ax.add_patch(earth)
ax.axis('equal')
|
[
"joshua.s.williams@uon.edu.au"
] |
joshua.s.williams@uon.edu.au
|
4c9df45b6b637fcd01d083c271d5cbf9cfbc3413
|
c7e028d71b5dd72eb18b72c6733e7e98a969ade6
|
/src/demos/datastructures/expressioneval.py
|
30185336f42de290841e528b0e3f0eb704a41207
|
[
"MIT"
] |
permissive
|
antoniosarosi/algoritmia
|
da075a7ac29cc09cbb31e46b82ae0b0ea8ee992f
|
22b7d61e34f54a3dee03bf9e3de7bb4dd7daa31b
|
refs/heads/master
| 2023-01-24T06:09:37.616107
| 2020-11-19T16:34:09
| 2020-11-19T16:34:09
| 314,302,653
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
#coding: latin1
from algoritmia.problems.traversals.treetraversals import PostorderTreeTraverser #[]dos
from algoritmia.datastructures.queues import Lifo #[pre
from algoritmia.datastructures.trees import ListOfListsTree
class ExpressionEvaluator:
def __init__(self, createLifo=lambda: Lifo()):
self.createLifo = createLifo
def tokenize(self, expression: "str") -> "Iterable<str>":
i = 0
while i < len(expression):
lexeme = []
if '0' <= expression[i] <= '9':
while i < len(expression) and '0' <= expression[i] <= '9':
lexeme.append(expression[i])
i += 1
yield int(''.join(lexeme))
elif expression[i] in '+*-/()':
yield expression[i]
i += 1
else:
i += 1
def parse(self, expression: "str") -> "ITree<str>":
S = self.createLifo()
tree = []
op = {'+': 0, '-': 0, '*': 1, '/': 1}
for token in self.tokenize(expression):
if type(token) == int:
tree.append([token])
elif token in op:
while len(S) > 0 and S.top() in op and op[token] <= op[S.top()]:
tree[-2:] = [[S.pop(), tree[-2], tree[-1]]]
S.push(token)
elif token == '(':
S.push('(')
elif token == ')':
while S.top() != '(':
tree[-2:] = [[S.pop(), tree[-2], tree[-1]]]
S.pop()
while len(S) > 0:
tree[-2:] = [[S.pop(), tree[-2], tree[-1]]]
return ListOfListsTree(tree[0]) #]pre
def evaluate(self, exp: "str") -> "int": #[dos
tree = self.parse(exp)
stack = self.createLifo()
visitor = lambda t: self.process_root(t, stack=stack)
for dummy in PostorderTreeTraverser().traverse(tree, visitor): pass
return stack.pop()
def process_root(self, tree: "ITree<str>", stack: "Lifo"):
if isinstance(tree.root, str) and tree.root in "+-*/":
a, b = stack.pop(), stack.pop()
if tree.root == '+': stack.push(b + a)
elif tree.root == '-': stack.push(b - a)
elif tree.root == '*': stack.push(b * a)
else: stack.push(b // a)
else:
stack.push(tree.root) #]dos
if __name__ == "__main__": #[tres
ee = ExpressionEvaluator()
exp = "2 - 5 + 3 * 6"
print('{} -> {}'.format(ee.parse(exp), ee.evaluate(exp))) #]tres
|
[
"amarzal@localhost"
] |
amarzal@localhost
|
529e7a1ad3229cb026b3acc0d8c25e1c1d835045
|
2125593138c50b1fba5e46cd4d88d6c04d0b417a
|
/알고리즘-파이참/백준/code_Test/LZW 압축.py
|
19315db3a2344d4ff8407c1596119911f4df610c
|
[] |
no_license
|
minkishome/TIL-master
|
5f0e6ef61b34a2983961ccf44f7523603ccb5907
|
d8edc0ff8abff3b2239a2d751eee263b722013a6
|
refs/heads/master
| 2023-01-21T00:43:30.165535
| 2020-08-25T14:56:18
| 2020-08-25T14:56:18
| 203,070,283
| 0
| 1
| null | 2023-01-05T01:08:10
| 2019-08-19T00:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 895
|
py
|
from string import ascii_uppercase
def solution(msg):
answer = []
ls_abc = [0] + list(ascii_uppercase)
idx = 0
flag = len(msg)
# print(ls_abc.index('AA'))
while idx < flag:
word = ''
for i in range(idx, flag):
word += msg[i]
idx += 1
if word in ls_abc:
continue
else: # 없을 경우
ls_abc.append(word)
idx -= 1
break
# ls_abc.append(word)
if idx == flag:
answer.append(ls_abc.index(word))
else:
answer.append(ls_abc.index(word[:-1]))
# print(word)
# print(ls_abc)
# print(answer)
# print(idx)
# answer.append(ls_abc.index(word[:-1]))
# print(answer)
# print(len(msg))
return answer
solution('KAKAO')
solution('TOBEORNOTTOBEORTOBEORNOT')
|
[
"minkishome@gmail.com"
] |
minkishome@gmail.com
|
41028d5c4ae422f93009422780b1acaf80ec0334
|
96d77068d6762bad6847a2c74dfd6c220a3fdd75
|
/features/generate_wordvector_distances.py
|
d3514741c4fc634bd2fd297014510fcb474f002a
|
[
"MIT"
] |
permissive
|
SunnyMarkLiu/Kaggle_Quora_Question_Pairs_Intent
|
402cc2c4eaf40d25794dd6ff0b583411865f884b
|
1b6914cd08d1bb3e815aacdf0f220458f5d75f7c
|
refs/heads/master
| 2021-05-05T12:08:51.823673
| 2017-10-05T10:42:52
| 2017-10-05T10:42:52
| 104,720,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,489
|
py
|
#!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-10-5 上午11:28
"""
from __future__ import absolute_import, division, print_function
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
import cPickle
import numpy as np
from utils import data_utils, jobs
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from optparse import OptionParser
from conf.configure import Configure
def generate_word_vector_map():
"""
build index mapping words in the embeddings set
to their embedding vector
"""
embeddings_index = {}
embeddings_index_path = '/d_2t/lq/kaggle/Kaggle_Quora_Question_Pairs_Intent/embeddings_index.pkl'
if os.path.exists(embeddings_index_path):
with open(embeddings_index_path, "rb") as f:
embeddings_index = cPickle.load(f)
return embeddings_index
f = open(Configure.pretrained_wordvectors)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
with open(embeddings_index_path, "wb") as f:
cPickle.dump(embeddings_index, f, -1)
return embeddings_index
def get_wordvector(word):
embedding_vector = embeddings_index.get(word)
embedding_vector = embedding_vector if embedding_vector is not None else [0] * 300
return embedding_vector
def generate_wordvectors_features(df):
df['cleaned_question1_vcs'] = df['cleaned_question1'].map(lambda x: [get_wordvector(word) for word in str(x).split()])
df['cq1_sentence_vector'] = df['cleaned_question1_vcs'].map(lambda x: np.mean(x, axis=0) if len(x) > 0 else [0] * 300)
del df['cleaned_question1_vcs']
df['cleaned_question2_vcs'] = df['cleaned_question2'].map(lambda x: [get_wordvector(word) for word in str(x).split()])
df['cq2_sentence_vector'] = df['cleaned_question2_vcs'].map(lambda x: np.mean(x, axis=0) if len(x) > 0 else [0] * 300)
del df['cleaned_question2_vcs']
return df
def generate_wordvector_distance(df):
df['cp1_mean_vector'] = df['cq1_sentence_vector'].map(lambda x: np.mean(x, axis=0))
df['cp2_mean_vector'] = df['cq2_sentence_vector'].map(lambda x: np.mean(x, axis=0))
df['cp_wordvector_cosine_distances'] = df.apply(lambda row: cosine_distances(np.array(row['cq1_sentence_vector']).reshape(1, -1),
np.array(row['cq2_sentence_vector']).reshape(1, -1))[0][0],
axis=1)
df['cp_wordvector_euclidean_distances'] = df.apply(lambda row: euclidean_distances(np.array(row['cq1_sentence_vector']).reshape(1, -1),
np.array(row['cq2_sentence_vector']).reshape(1, -1))[0][0],
axis=1)
del df['cq1_sentence_vector']
del df['cq2_sentence_vector']
return df
parser = OptionParser()
parser.add_option(
"-d", "--base_data_dir",
dest="base_data_dir",
default="perform_stem_words",
help="""base dataset dir:
perform_stem_words,
perform_no_stem_words,
full_data_perform_stem_words,
full_data_perform_no_stem_words"""
)
options, _ = parser.parse_args()
print("========== generate word vector features ==========")
base_data_dir = options.base_data_dir
op_scope = 5
if os.path.exists(Configure.processed_train_path.format(base_data_dir, op_scope + 1)):
exit()
print("---> load datasets from scope {}".format(op_scope))
train, test = data_utils.load_dataset(base_data_dir, op_scope)
print("train: {}, test: {}".format(train.shape, test.shape))
print('---> generate word vector mapping')
embeddings_index = generate_word_vector_map()
print('---> generate wordvectors features')
train = jobs.parallelize_dataframe(train, generate_wordvectors_features)
test = jobs.parallelize_dataframe(test, generate_wordvectors_features)
print('---> generate wordvector distance features')
train = jobs.parallelize_dataframe(train, generate_wordvector_distance)
test = jobs.parallelize_dataframe(test, generate_wordvector_distance)
print("train: {}, test: {}".format(train.shape, test.shape))
print("---> save datasets")
data_utils.save_dataset(base_data_dir, train, test, op_scope + 1)
|
[
"SunnyMarkLiu101@gmail.com"
] |
SunnyMarkLiu101@gmail.com
|
0a0065801350f7189f26c180481a975d51f3d661
|
abef98cfa3fb2c4626eb8c0a77c1080992d9b11b
|
/c/python-interface/spam/test-spam.py
|
c714b78481ecb371933cc40cde3dd794cddbcf59
|
[] |
no_license
|
mikebentley15/sandbox
|
ff88ed9dc4b9ac37668142a319d0a8162e88e9e3
|
4f5869544de18be21f415a9d6f9b71c362307f27
|
refs/heads/main
| 2023-04-14T00:22:34.623441
| 2023-03-24T21:43:56
| 2023-03-24T21:43:56
| 116,987,549
| 6
| 3
| null | 2022-10-26T03:02:06
| 2018-01-10T17:14:54
|
C++
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
#!/usr/bin/env python3
import spam
print()
status = spam.system("ls -l | wc")
print("status: ", status)
print()
print('Expect spam.SpamError')
try:
status = spam.check_system("false")
print("status: ", status)
except spam.SpamError as ex:
print(' ', ex)
print(' ignored')
print()
s = spam.Spam("Real brand of SPAM")
s.print()
print(s)
print()
n1 = spam.Noddy1()
print(n1)
print()
n2 = spam.Noddy2(first="Mike", last="Bentley")
print(n2)
print("Name: ", n2.name())
print()
n2 = spam.Noddy2(first=2)
print(n2)
print("Name: ", n2.name())
print()
n3 = spam.Noddy3(first="Mike", last="Bentley")
print(n3)
print("Name: ", n3.name())
print()
print('Expect TypeError')
try:
spam.Noddy3(first=3)
except TypeError as ex:
print(' ', ex)
print(' ignored')
print()
n4 = spam.Noddy4(first="Mike", last="Bentley")
print(n4)
print("Name: ", n4.name())
print()
n4 = spam.Noddy4(first=2)
print(n4)
print("Name: ", n4.name())
|
[
"mikebentley15@gmail.com"
] |
mikebentley15@gmail.com
|
f2795230818272b8a213a4a39e164aa72cffeb1a
|
0629ada6a00ceff0dc567b965ec021278ca27a7f
|
/pyhon_zh_web/pageobject/Home_page.py
|
b0428c5fb1c75ac098c54857b75b2147cf16d303
|
[] |
no_license
|
zhaozongzhao/sqbj_web
|
48310e41dc914ae82bc3fa919b63a9ffea04f22e
|
7d33acbaca18b6764f80e2247ff631e46d5b0b89
|
refs/heads/master
| 2020-04-09T18:22:49.280922
| 2019-09-26T10:01:53
| 2019-09-26T10:01:53
| 160,510,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
import traceback
from Util.ParsePageObjectRepository import *
from Util.var import *
from Util.ObjectMap import *
from selenium.webdriver.support.ui import Select
class HomePage(object):
def __init__(self,driver):
self.driver = driver
self.paser_page_object = ParsePageObjectRepository('PageObjectRepository')
self.login_iteim = self.paser_page_object.getItemSection('zh_page_home')
self.wait = WebDriverWait(self.driver,10,0.2)
#定位物业收费
def open_charge(self):
locateType, locateExpression = self.login_iteim['page.charge'].split('>')
charge = getElement(self.driver,locateType,locateExpression)
return charge
#定位基础服务
def open_basic(self):
locateType, locateExpression = self.login_iteim['page.basic_services'].split('>')
charge = getElement(self.driver,locateType,locateExpression)
return charge
def get_login_name(self):
locateType, locateExpression = self.login_iteim['page.personal'].split('>')
time.sleep(1)
name = getElement(self.driver,locateType,locateExpression).text
print(name)
return name
|
[
"2206321864@qq.com"
] |
2206321864@qq.com
|
1851b1294e96a418b521d90051a6aa7ba5f128fc
|
efd8628adc042ae2d58fa89cc31a5c1c80aa94f6
|
/data/stochastic_dataset_script/load_save_data.py
|
faf224b7b20e02dfae644277e27d62b61bfaff04
|
[] |
no_license
|
Xharlie/stochast_dynamic_for_video_infilling
|
d7e0bfaf8b71cf3f39170793e5a1a50b289aee40
|
a825de4c5178f7084925817f0631ac331073866f
|
refs/heads/master
| 2021-03-27T09:11:15.478067
| 2019-10-23T17:59:25
| 2019-10-23T17:59:25
| 110,137,739
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,896
|
py
|
import tensorflow as tf
import numpy as np
import imageio
import cv2
def vids_2_frames(tfiles, data_path, image_size_h, image_size_w, channel):
vids=[]
actions=[]
for i in xrange(len(tfiles)):
f_name = tfiles[i]
tokens = f_name.split()
vid_path = data_path + tokens[0] + ("_uncomp.avi" if channel ==1 else "")
print "vid_path:", vid_path
try:
vid = imageio.get_reader(vid_path, "ffmpeg")
if len(tokens) < 2:
low = 1
high = vid.get_length()
else:
low = int(tokens[1])
high = np.min([int(tokens[2]), vid.get_length()])
seq = np.zeros((image_size_h, image_size_w, high - low + 1, channel), dtype=np.uint8)
for t in xrange(high - low + 1):
if channel == 1:
# w,h not h,w here!
img = cv2.cvtColor(cv2.resize(vid.get_data(t),
(image_size_w, image_size_h)), cv2.COLOR_RGB2GRAY)
else:
img = cv2.resize(vid.get_data(t),(image_size_w, image_size_h))
# print img.shape, seq.shape
if len(img.shape) == 2: seq[:, :, t, :] = img[:, :, None]
else: seq[:, :, t, :] = img[:, :, :]
print tokens[0]
if (len(tokens[0].split("_"))) > 0:
actions.append(tokens[0].split("_")[1])
vids.append(seq)
except KeyError:
print KeyError.message
continue
return vids, actions
def save_data2record(tfiles, data_path, image_size_h, image_size_w, tf_record_dir, channel):
vids, actions = vids_2_frames(tfiles, data_path, image_size_h, image_size_w, channel)
print actions
tfrecords_filename = 'tfrecords'
writer = tf.python_io.TFRecordWriter(tf_record_dir + tfrecords_filename)
for i in xrange(len(vids)):
vids_record = tf.train.Example(features=tf.train.Features(
feature={
'height': _int64_feature(image_size_h),
'width': _int64_feature(image_size_w),
'depth': _int64_feature(vids[i].shape[2]),
'channels': _int64_feature(channel),
'action': _bytes_feature(actions[i]),
'vid': _bytes_feature(vids[i].tostring())
}
))
writer.write(vids_record.SerializeToString())
print "finish writing video{} to {}".format(i, tf_record_dir + tfrecords_filename)
writer.close()
return vids
def save_data2records(tfiles, data_path, image_size_h, image_size_w, tf_record_dir, channel):
tf_size = 800
start = 0
end = 0
files=[]
while start <= len(tfiles):
end = min(start + tf_size, len(tfiles) + 1)
if end + tf_size / 4 > len(tfiles): end = len(tfiles) + 1
print "file start and end:",start,end
vids, actions = vids_2_frames(tfiles[start:end], data_path, image_size_h, image_size_w, channel)
tfrecords_filename = 'tfrecords' + str(start / tf_size)
writer = tf.python_io.TFRecordWriter(tf_record_dir + tfrecords_filename)
for i in xrange(len(vids)):
vids_record = tf.train.Example(features=tf.train.Features(
feature={
'height': _int64_feature(image_size_h),
'width': _int64_feature(image_size_w),
'depth': _int64_feature(vids[i].shape[2]),
'channels': _int64_feature(channel),
'action': _bytes_feature(actions[i]),
'vid': _bytes_feature(vids[i].tostring())
}
))
writer.write(vids_record.SerializeToString())
print "finish writing video{} to {}".format(i, tf_record_dir + tfrecords_filename)
files.append(tf_record_dir + tfrecords_filename)
writer.close()
start = end
return files
def load_records(tf_record_files, length=None):
vids = []
for i in xrange(len(tf_record_files)):
print "loading {}".format(tf_record_files[i])
record_iterator = tf.python_io.tf_record_iterator(path=tf_record_files[i])
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
height = int(example.features.feature['height']
.int64_list
.value[0])
width = int(example.features.feature['width']
.int64_list
.value[0])
depth = int(example.features.feature['depth']
.int64_list
.value[0])
vid_string = (example.features.feature['vid']
.bytes_list
.value[0])
vid_raw = np.fromstring(vid_string, dtype=np.uint8)
vid = vid_raw.reshape((height, width, depth, -1))
if length is not None and vid.shape[-2] < length:
print length, vid.shape[-2]
continue
vids.append(vid)
print "finish {}".format(tf_record_files[i])
print len(vids), " videos in total"
return vids
def load_record_inbatch(file_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_queue)
features = tf.parse_single_example(serialized_example,
features={
'depth': tf.FixedLenFeature([1], tf.int64),
'vid': tf.FixedLenFeature([],tf.string),
}
)
depth = tf.cast(features['depth'], tf.int32)
return features["vid"], depth
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
|
[
"charlie.xu007@yahoo.com"
] |
charlie.xu007@yahoo.com
|
4d33d5ef634b5714a020bc5477a6c0a495e1ad47
|
fa3d19403750d300a4228fa0fc414c88c49a6d35
|
/bin/pfurl
|
37f78b8d6bb0f06d9b056bd75cde10fb8222b4ab
|
[
"MIT"
] |
permissive
|
priyakapadia/pfurl
|
2c700217d0f2c623e5fddde62c96f10502eebb49
|
3392347b5874fc4af7f1f3798e692bd0e3733351
|
refs/heads/master
| 2021-01-20T02:24:47.745713
| 2017-04-21T19:36:07
| 2017-04-21T19:36:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,075
|
#!/usr/bin/env python3.5
#
# (c) 2017 Fetal-Neonatal Neuroimaging & Developmental Science Center
# Boston Children's Hospital
#
# http://childrenshospital.org/FNNDSC/
# dev@babyMRI.org
#
import sys, os
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '..'))
import socket
import json
import sys
import pfurl
from argparse import RawTextHelpFormatter
from argparse import ArgumentParser
from pfurl._colors import Colors
str_defIP = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
str_defPort = '5055'
str_version = "1.1.1"
str_desc = Colors.CYAN + """
__ _
/ _| | |
_ __ | |_ _ _ _ __| |
| '_ \| _| | | | '__| |
| |_) | | | |_| | | | |
| .__/|_| \__,_|_| |_|
| |
|_|
Process-File-over-URL
A simple URL-based communication and control script.
-- version """ + \
Colors.YELLOW + str_version + Colors.CYAN + """ --
'pfurl' sends REST conforming commands and data to remote services, similar
in some ways to the well-known CLI tool, 'curl' or the Python tool, 'httpie'
'pfurl' not only sends curl type payloads, but can also zip and unzip entire
directories of files for transmission and reception.
'pfurl' is designed to be part of the ChRIS/CHIPS framework.
""" + \
Colors.BLINK_RED + """
+---------------------------------------------------------+
| NOTE THAT 'pfurl' COMMS ARE NOT NATIVELY ENCRYPTED! |
| USE AN SSH TUNNEL IF YOU NEED SECURE DATA TRANSMISSION. |
+---------------------------------------------------------+
""" + Colors.NO_COLOUR
parser = ArgumentParser(description = str_desc, formatter_class = RawTextHelpFormatter)
parser.add_argument(
'--msg',
action = 'store',
dest = 'msg',
default = '',
help = 'Message to send to pman or similar listener.'
)
parser.add_argument(
'--verb',
action = 'store',
dest = 'verb',
default = 'POST',
help = 'REST verb.'
)
parser.add_argument(
'--http',
action = 'store',
dest = 'http',
default = '%s:%s' % (str_defIP, str_defPort),
help = 'HTTP string: <IP>[:<port>]</some/path/>'
)
parser.add_argument(
'--auth',
action = 'store',
dest = 'auth',
default = '',
help = 'user:passwd authorization'
)
parser.add_argument(
'--jsonwrapper',
action = 'store',
dest = 'jsonwrapper',
default = '',
help = 'wrap msg in optional field'
)
parser.add_argument(
'--quiet',
help = 'if specified, only echo final JSON output returned from server',
dest = 'b_quiet',
action = 'store_true',
default = False
)
parser.add_argument(
'--raw',
help = 'if specified, do not wrap return data from remote call in json field',
dest = 'b_raw',
action = 'store_true',
default = False
)
parser.add_argument(
'--oneShot',
help = 'if specified, transmit a shutdown ctl to the remote service after event',
dest = 'b_oneShot',
action = 'store_true',
default = False
)
parser.add_argument(
'--man',
help = 'request help: --man commands',
dest = 'man',
action = 'store',
default = ''
)
parser.add_argument(
'--content-type',
help = 'content type',
dest = 'contentType',
action = 'store',
default = ''
)
parser.add_argument(
'--jsonpprintindent',
help = 'pretty print json-formatted payloads',
dest = 'jsonpprintindent',
action = 'store',
default = 0
)
parser.add_argument(
'--httpResponse',
help = 'if specified, return HTTP responses',
dest = 'b_httpResponse',
action = 'store_true',
default = False
)
parser.add_argument(
'--version',
help = 'if specified, print version number',
dest = 'b_version',
action = 'store_true',
default = False
)
args = parser.parse_args()
if args.b_version:
print("Version: %s" % str_version)
sys.exit(1)
pfurl = pfurl.Pfurl(
msg = args.msg,
http = args.http,
verb = args.verb,
contentType = args.contentType,
auth = args.auth,
b_raw = args.b_raw,
b_quiet = args.b_quiet,
b_oneShot = args.b_oneShot,
b_httpResponse = args.b_httpResponse,
jsonwrapper = args.jsonwrapper,
man = args.man,
startFromCLI = True,
desc = str_desc
)
if not args.jsonpprintindent:
print(pfurl())
else:
print(json.dumps(json.loads(pfurl()), indent=int(args.jsonpprintindent)))
sys.exit(0)
|
[
"rudolph.pienaar@gmail.com"
] |
rudolph.pienaar@gmail.com
|
|
7638f5b06086fd4352f99f4e28e29719f59c1a59
|
3b786d3854e830a4b46ee55851ca186becbfa650
|
/SystemTesting/pylib/nsx/vsm/edge/edge_firewall_schema/edge_firewall_rule_application_schema.py
|
e6b475f52d5dcc3b1981baeeef177e822df242aa
|
[] |
no_license
|
Cloudxtreme/MyProject
|
d81f8d38684333c22084b88141b712c78b140777
|
5b55817c050b637e2747084290f6206d2e622938
|
refs/heads/master
| 2021-05-31T10:26:42.951835
| 2015-12-10T09:57:04
| 2015-12-10T09:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
import base_schema
from edge_firewall_rule_application_service_schema import FirewallRuleApplicationServiceSchema
class FirewallRuleApplicationSchema(base_schema.BaseSchema):
_schema_name = "application"
def __init__(self, py_dict=None):
""" Constructor to create FirewallRuleApplicationSchema object
@param py_dict : python dictionary to construct this object
"""
super(FirewallRuleApplicationSchema, self).__init__()
self.set_data_type('xml')
self.applicationId = None
self.service = FirewallRuleApplicationServiceSchema()
if py_dict is not None:
self.get_object_from_py_dict(py_dict)
|
[
"bpei@vmware.com"
] |
bpei@vmware.com
|
58668ff0ee439f72a7abb030a5a1180a8f974d1e
|
7882860350c714e6c08368288dab721288b8d9db
|
/1일차/func(1번문제).py
|
30e5d8dec0d211d9db0a6a342ba840820f468502
|
[] |
no_license
|
park-seonju/Algorithm
|
682fca984813a54b92a3f2ab174e4f05a95921a8
|
30e5bcb756e9388693624e8880e57bc92bfda969
|
refs/heads/master
| 2023-08-11T18:23:49.644259
| 2021-09-27T10:07:49
| 2021-09-27T10:07:49
| 388,741,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# def abc(t):
# for i in range(len(t)):
# if t[i] != t[len(t)-1-i]:
# return False
# return True
# word = input()
# print(word)
# if abc(word):
# print("입력하신 단어는 회문(Palindrome)입니다.")
# else :
# print("입력하신 단어는 회문(Palindrome)이 아닙니다.")
a=input()
b=str(reversed(a))
print(type(b))
if a==b: print('O')
|
[
"cucu9823@naver.com"
] |
cucu9823@naver.com
|
f8e1c79af7f8ff238e4aa312ef2ee68f1e70f4a8
|
03c00aa07607c1f206c0fb3cf00fc5c510d7a4bf
|
/Infoplus/apis/carrier_service_api.py
|
3666fa7df7e4744314451fd9fceb448261b997ef
|
[] |
no_license
|
infopluscommerce/infoplus-python-client
|
748cc9af739615036c52adce70aa7f4303601b97
|
bde657057fedb5396ecf6c42e8ba668456bd1c43
|
refs/heads/master
| 2023-08-23T11:32:01.160320
| 2017-03-17T14:43:15
| 2017-03-17T14:43:15
| 58,404,638
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,611
|
py
|
# coding: utf-8
"""
CarrierServiceApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CarrierServiceApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_carrier_service_by_id(self, carrier_service_id, **kwargs):
"""
Get a carrierService by id
Returns the carrierService identified by the specified id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_carrier_service_by_id(carrier_service_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str carrier_service_id: Id of carrierService to be returned. (required)
:return: CarrierService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['carrier_service_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_carrier_service_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carrier_service_id' is set
if ('carrier_service_id' not in params) or (params['carrier_service_id'] is None):
raise ValueError("Missing the required parameter `carrier_service_id` when calling `get_carrier_service_by_id`")
resource_path = '/beta/carrierService/{carrierServiceId}'.replace('{format}', 'json')
path_params = {}
if 'carrier_service_id' in params:
path_params['carrierServiceId'] = params['carrier_service_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CarrierService',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_carrier_service_by_search_text(self, **kwargs):
"""
Search carrierServices
Returns the list of carrierServices that match the given searchText.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_carrier_service_by_search_text(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search_text: Search text, used to filter results.
:param int page: Result page number. Defaults to 1.
:param int limit: Maximum results per page. Defaults to 20. Max allowed value is 250.
:return: list[CarrierService]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['search_text', 'page', 'limit']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_carrier_service_by_search_text" % key
)
params[key] = val
del params['kwargs']
resource_path = '/beta/carrierService/search'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'search_text' in params:
query_params['searchText'] = params['search_text']
if 'page' in params:
query_params['page'] = params['page']
if 'limit' in params:
query_params['limit'] = params['limit']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CarrierService]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
[
"kdonnelly@infopluscommerce.com"
] |
kdonnelly@infopluscommerce.com
|
5964837f4eff397b07abd2f9cf033fbca7a302c2
|
6540f5a1ba587fb06067a09e67d0603e22305631
|
/apps/users/admin.py
|
7a124deede616de4741134a5ccf003d45c473533
|
[] |
no_license
|
bluehawkarthur/ebil
|
3ff8171672baaebeacfd95f8fb68c9df3e0d54ad
|
9d3d5291f2aa3f0822047cd88d398add4b987c54
|
refs/heads/master
| 2021-01-17T11:18:24.173696
| 2016-10-06T22:20:53
| 2016-10-06T22:20:53
| 46,352,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from django.contrib import admin
from .models import User, Personajuridica
admin.site.register(User)
admin.site.register(Personajuridica)
|
[
"josedanielf9@gmail.com"
] |
josedanielf9@gmail.com
|
29818ba52762eb59c2db173b8688f4636cf37b75
|
c4591b23aebde4a1ec262a6b3f5cc124fea0d638
|
/ExceptionHandling/02-code.py
|
a072cdaae3db241510b0ae750e404b9e9a643154
|
[] |
no_license
|
ravi4all/PythonApril_21
|
b41f2c845c4003d4291d46d52294767741d4f0d8
|
52a1f538182a7ce78b2c90db3f745d37ea321897
|
refs/heads/main
| 2023-05-04T01:48:11.414424
| 2021-05-24T12:27:30
| 2021-05-24T12:27:30
| 356,850,524
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
try:
file = open('file_1.txt','w')
file.write("Hello")
data = file.read()
print(data)
# file.close()
except BaseException as ex:
print(ex)
finally:
print("Finally will always execute")
file.close()
|
[
"noreply@github.com"
] |
ravi4all.noreply@github.com
|
547407b8be9ec042211d5cdf47758cab6df9f6e4
|
2a3743ced45bd79826dcdc55f304da049f627f1b
|
/venv/lib/python3.7/site-packages/notebook/log.py
|
3621a70caef590f7d25e7f206b7a6f9826090430
|
[
"MIT"
] |
permissive
|
Dimasik007/Deribit_funding_rate_indicator
|
12cc8cd7c0be564d6e34d9eae91940c62492ae2a
|
3251602ae5249069489834f9afb57b11ff37750e
|
refs/heads/master
| 2023-05-26T10:14:20.395939
| 2019-08-03T11:35:51
| 2019-08-03T11:35:51
| 198,705,946
| 5
| 3
|
MIT
| 2023-05-22T22:29:24
| 2019-07-24T20:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) Jupyter Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import json
from tornado.log import access_log
from .prometheus.log_functions import prometheus_log_method
def log_request(handler):
"""log a bit more information about each request than tornado's default
- move static file get success to debug-level (reduces noise)
- get proxied IP instead of proxy IP
- log referer for redirect and failed requests
- log user-agent for failed requests
"""
status = handler.get_status()
request = handler.request
if status < 300 or status == 304:
# Successes (or 304 FOUND) are debug-level
log_method = access_log.debug
elif status < 400:
log_method = access_log.info
elif status < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
ns = dict(
status=status,
method=request.method,
ip=request.remote_ip,
uri=request.uri,
request_time=request_time,
)
msg = "{status} {method} {uri} ({ip}) {request_time:.2f}ms"
if status >= 400:
# log bad referers
ns['referer'] = request.headers.get('Referer', 'None')
msg = msg + ' referer={referer}'
if status >= 500 and status != 502:
# log all headers if it caused an error
log_method(json.dumps(dict(request.headers), indent=2))
log_method(msg.format(**ns))
prometheus_log_method(handler)
|
[
"dmitriy00vn@gmail.com"
] |
dmitriy00vn@gmail.com
|
323b337749b6214dcef22befb999fdc3ec1afa0c
|
14804b282e567bf45c974b9a55cbdfa1907c5958
|
/7_Modules/E_from_Import_module.py
|
f330334ed400c444afaf4beadb73d5e60f834495
|
[
"MIT"
] |
permissive
|
Oscar-Oliveira/Python-3
|
cfdcbcf4548144fb2488625f53f76b20e4d8c5b0
|
fa791225a6810b75890d24407b73c5e1b514acbe
|
refs/heads/master
| 2021-09-26T06:27:16.367956
| 2018-10-27T10:42:21
| 2018-10-27T10:42:21
| 101,991,657
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
"""
Import module example
"""
from C_my_module import my_sum, __version__, __sprint__, some_value
print(my_sum(1.25, 3.2))
print(__version__)
print(__sprint__)
print(some_value)
|
[
"oscar.m.oliveira@gmail.com"
] |
oscar.m.oliveira@gmail.com
|
a04ef2491409689efa7fb5b643c15eede0ce6500
|
2e60017779c5c286629ab5a3a7aeb27a6b19a60b
|
/python/problem_38.py
|
7a9e34b9da335053313aef540af6db768d6ad191
|
[] |
no_license
|
jamesjiang52/10000-Lines-of-Code
|
f8c7cb4b8d5e441693f3e0f6919731ce4680f60d
|
3b6c20b288bad1de5390ad672c73272d98e93ae0
|
refs/heads/master
| 2020-03-15T03:50:38.104917
| 2018-05-07T04:41:52
| 2018-05-07T04:41:52
| 131,952,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
if __name__ == '__main__':
import sys
sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')
from functions import *
from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
for line in answers:
answers_list.append(int(line))
progress_ = Progress("Problem 038: Pandigital multiples", 0, 5000)
for i in range(10000, 5000, -1):
progress_.count = 10000 - i
progress_.progress()
if is_pandigital(str(i) + str(2*i)):
break
progress_.count = int(str(i) + str(2*i))
progress_.total = answers_list[38]
progress_.progress()
if __name__ == '__main__':
input()
|
[
"jamesjiang52@gmail.com"
] |
jamesjiang52@gmail.com
|
ef1e26ac2e2027166701c8f393f5f6cbba7dd26a
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/beginner/beginner-bite-32-dont-mutability-fool-you.py
|
bb552f90e582a2a3dc772ff09e46161df47d251e
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 606
|
py
|
'''
In this Bite you are presented with a function that copies the given items data structure.
There is a problem though, the tests fail. Can you fix it?
This can be done in a one liner. If you know which module to use it will be easy,
if not you will learn something new today.
Regardless we want you to think about Python's mutability. Have fun!
'''
items [{'id': 1, 'name': 'laptop', 'value': 1000},
{'id': 2, 'name': 'chair', 'value': 300},
{'id': 3, 'name': 'book', 'value': 20}]
___ duplicate_items(items
r.. items |
items_copy items
print(id(items
print(id(items_copy
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
dbe92292a15e5a2a15eb70407f835d192eb3601a
|
63d3a6255f2677f9d92205d62163b9d22a74c5c7
|
/modules/accounts/migrations/0024_alter_user_protec_sub_pass.py
|
dbe591a3aaa8fc2e6defef1a0cf0a10b14cdb8ac
|
[
"Apache-2.0"
] |
permissive
|
GPCRmd/GPCRmd
|
9204f39b1bfbc800b13512b316e05e54ddd8af23
|
47d7a4e71025b70e15a0f752760873249932c54e
|
refs/heads/main
| 2023-09-04T11:13:44.285629
| 2023-08-29T13:43:01
| 2023-08-29T13:43:01
| 260,036,875
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Generated by Django 4.1.5 on 2023-06-19 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0023_user_protec_sub_pass'),
]
operations = [
migrations.AlterField(
model_name='user',
name='protec_sub_pass',
field=models.BinaryField(),
),
]
|
[
"adrian.garcia.recio@gmail.com"
] |
adrian.garcia.recio@gmail.com
|
8c9aa6151183c5a58c9b2f4629ee57dc75958092
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_204/193.py
|
e70d2b19de1b0addcc2ca7ee2d5b7e204e5ad372
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
import math
def make_kits(num_ingredients, num_packages, ingredients_proportion, packages):
for i in xrange(0, num_ingredients):
packages[i].sort()
counter = 0
pointers = [0]*num_ingredients
for i in xrange(0, num_packages):
num_servings = serving(packages[0][i], ingredients_proportion[0])
# print "i: ", i, " num_servings: ", num_servings
for num_serving in xrange(num_servings[0], num_servings[1]+1):
flag = 0
for j in xrange(1, num_ingredients):
while pointers[j] < num_packages and too_little(packages[j][pointers[j]], ingredients_proportion[j], num_serving):
pointers[j] = pointers[j]+1
if pointers[j] == num_packages or too_much(packages[j][pointers[j]], ingredients_proportion[j], num_serving):
flag = -1
break
if flag == 0:
# print "counter: ", counter
# print i, " ", pointers[1]
pointers = [x+1 for x in pointers]
counter = counter+1
break
return counter
def serving(weight, unit):
res = []
res.append(int(math.ceil(weight/1.1/unit)))
res.append(int(math.floor(weight/0.9/unit)))
return res
def too_little(weight, unit, num_serving):
if weight < unit*num_serving*0.9:
return True
return False
def too_much(weight, unit, num_serving):
if weight > unit*num_serving*1.1:
return True
return False
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
num_ingredients, num_packages = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
ingredients_proportion = [int(s) for s in raw_input().split(" ")]
packages = [[] for k in xrange(1, num_ingredients+1)]
for j in xrange(0, num_ingredients):
packages[j] = [int(s) for s in raw_input().split(" ")]
res = make_kits(num_ingredients, num_packages, ingredients_proportion, packages)
print "Case #{}: {}".format(i, res)
# check out .format's specification for more formatting options
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
b6edd7683db0062d6bd530a81b71c14729977618
|
f08336ac8b6f8040f6b2d85d0619d1a9923c9bdf
|
/3-lengthOfLongestSubstring.py
|
e11ad2357de806386f05b36afa99e4988224ba70
|
[] |
no_license
|
MarshalLeeeeee/myLeetCodes
|
fafadcc35eef44f431a008c1be42b1188e7dd852
|
80e78b153ad2bdfb52070ba75b166a4237847d75
|
refs/heads/master
| 2020-04-08T16:07:47.943755
| 2019-02-21T01:43:16
| 2019-02-21T01:43:16
| 159,505,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
'''
3. Longest Substring Without Repeating Characters
Given a string, find the length of the longest substring without repeating characters.
Example 1:
Input: "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
'''
class Solution:
# O(n)
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l = len(s)
hashmap = dict()
maxLen = 0
currLen = 0
head = 0
for i in range(l):
try:
if (hashmap[s[i]] < head):
currLen += 1
else:
currLen = i - hashmap[s[i]]
head = hashmap[s[i]]
except:
currLen += 1
maxLen = currLen if (currLen > maxLen) else maxLen
hashmap[s[i]] = i
return maxLen
|
[
"marshallee413lmc@sina.com"
] |
marshallee413lmc@sina.com
|
1b02c0d09ab89fd002e5a0b4d19bc4797f962554
|
823105ac7d892cf214ed9dcd8eaba315b01c1ed7
|
/model/unet.py
|
7105c7f28fcfcc05af73a60055e12a885fd43f39
|
[] |
no_license
|
jiye-ML/lane_detection_baidu_2019
|
ccee82a1272bace80f9e128c24ae5ff64b827bd7
|
6ed35de00a34a8714a32c2a3ff649c4b0b1f1407
|
refs/heads/master
| 2022-12-10T20:46:27.983785
| 2020-08-29T03:26:25
| 2020-08-29T03:26:25
| 223,421,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
import torch
import torch.nn as nn
from model.network import ResNet101v2
from model.module import Block
class UNetConvBlock(nn.Module):
def __init__(self, in_chans, out_chans):
super(UNetConvBlock, self).__init__()
block = [
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(out_chans),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(out_chans)
]
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_chans, out_chans):
super(UNetUpBlock, self).__init__()
self.up = nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
nn.Conv2d(in_chans, out_chans, kernel_size=1)
)
self.conv_block = UNetConvBlock(in_chans, out_chans)
@staticmethod
def center_crop(layer, target_size):
_, _, layer_height, layer_width = layer.size()
diff_y = (layer_height - target_size[0]) // 2
diff_x = (layer_width - target_size[1]) // 2
return layer[
:, :, diff_y: (diff_y + target_size[0]), diff_x: (diff_x + target_size[1])
]
def forward(self, x, bridge):
up = self.up(x)
crop1 = self.center_crop(bridge, up.shape[2:])
out = torch.cat([up, crop1], 1)
out = self.conv_block(out)
return out
class ResNetUNet(nn.Module):
def __init__(self, config):
super(ResNetUNet, self).__init__()
self.n_classes = config.NUM_CLASSES
self.encode = ResNet101v2()
prev_channels = 2048
self.up_path = nn.ModuleList()
for i in range(3):
self.up_path.append(UNetUpBlock(prev_channels, prev_channels // 2))
prev_channels //= 2
self.cls_conv_block1 = Block(prev_channels, 32)
self.cls_conv_block2 = Block(32, 16)
self.last = nn.Conv2d(16, self.n_classes, kernel_size=1)
self.init_weight()
def forward(self, x):
input_size = x.size()[2:]
blocks = self.encode(x)
x = blocks[-1]
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i - 2])
x = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(x)
x = self.cls_conv_block1(x)
x = self.cls_conv_block2(x)
x = self.last(x)
return x
def init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
[
"woxinxie1234@163.com"
] |
woxinxie1234@163.com
|
aceb1fcdf196d716cb53b0a7e02874bfd259fffa
|
e2426d7c01500ca4a2df4e4555f217f957baf957
|
/cows/service/imps/geoplot_wms_backend/slabs/slab_base.py
|
3375ea42382e0d9856eba9bfa7bb8f66d03e401f
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
cedadev/cows
|
959a5e1ad220cfe0cce48a2131d6971106c765aa
|
db9ed729c886b271ce85355b97e39243081e8246
|
refs/heads/master
| 2020-03-16T15:17:45.710584
| 2018-05-09T10:35:47
| 2018-05-09T10:36:37
| 132,736,968
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,755
|
py
|
# BSD Licence
# Copyright (c) 2010, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
import logging
import time
import numpy
from geoplot.utils import isRangeInLimits
import geoplot.colour_scheme as colour_scheme
from cows.service.wms_iface import IwmsLayerSlab
from cows.service.imps.image_import import Image
from cows.service.imps.geoplot_wms_backend.slab_options_parser import SlabOptionsParser
from cows.service.imps.geoplot_wms_backend.rendering_option import RenderingOption
log = logging.getLogger(__name__)
class SlabBase(IwmsLayerSlab):
"""
A layer slab that implements the IwmsLayerSlab interface and uses geoplot
to render the required images.
This is an abstract base class and should not be used directly.
"""
renderingOptions = [
RenderingOption('cmap', "Colour Scheme" ,str,'jet',["bone","jet", "copper", "gray", "winter"] ),
RenderingOption('cmap_min', "Legend Min" ,float,None),
RenderingOption('cmap_max', "Legend Max" ,float,None),
RenderingOption('cmap_scale', "Colour Bar Scale" ,str ,'linear', ['linear','log']),
]
"""
constructor
@param variable: the netcdf variable that contains the data for this slab
@param title: the title of the variable that is to be used
@param crs: the coordinate refrence system the data is stored in
@param dimValues: the dimension values for this slab
@param transparent: indicates if the produced image should be transparent or
not.
@param bbox: the bounds of the data in lat/lon
@param renderOpts: the additional parameters recieved by the WMS, may include
some custom rendering options.
"""
def __init__(self, variable, title, crs, dimValues, transparent, bgcolor, bbox, renderOpts):
self.title = title
self.renderOpts = renderOpts
self.bgcolor = bgcolor
self.transparent = transparent
self.variable = variable
#log.debug("renderOpts = %s" % (renderOpts,))
# Check for non-default, but valid, colour map.
cmapName = renderOpts.get('cmap', None)
self._setUpColourMap(cmapName)
self.parser = SlabOptionsParser(self.renderingOptions, renderOpts)
self.ld = self._setupLayerDrawer()
@classmethod
def _setUpColourMap(cls, cmapName):
"""Adds a colour map to those defined in the rendering options if it is valid and not
present already.
@param cmapName: name of colour map
"""
log.debug("Checking for cmap %s" % cmapName)
cmapOptions = [r for r in cls.renderingOptions if r.name == 'cmap'][0]
if cmapName not in cmapOptions.options:
log.debug("Not found in renderingOptions %s" % cmapName)
if colour_scheme.isValidCmapName(cmapName):
log.debug("Valid cmap name %s" % cmapName)
cmapOptions.options.append(cmapName)
log.debug("All known cmaps %s" % cmapOptions)
"""
Creates the layer drawer object so that it can be used in getImage
"""
def _setupLayerDrawer(self):
raise NotImplementedError()
"""
returns an image of the data constructed using the layer drawer
@param bbox: the limits of the image requested
@param width: the width in px of the image
@param height: the height in px of the image
"""
def getImage(self, bbox, width, height):
"""
Create an image of a sub-bbox of a given size.
:ivar bbox: A bbox 4-tuple.
:ivar width: width in pixels.`
:ivar height: height in pixels.
:return: A PIL Image object.
"""
#log.debug("GetImage called with bbox=%s, width=%s, height = %s" % (bbox, width, height,))
xLimits = (bbox[0], bbox[2])
yLimits = (bbox[1], bbox[3])
if sorted(self.variable.getAxisIds()) == sorted(['latitude','longitude']):
if not self._areBoundsInLimits(bbox, xLimits, yLimits):
img = numpy.zeros((height,width,4), numpy.uint8)
pilImage = Image.fromarray(img, 'RGBA')
log.debug("empty image used as no data found for id=%s (%sx%s), lon=%s, lat=%s " % \
(self.variable.id, width, height, xLimits, yLimits))
return pilImage
st = time.time()
im = self.ld.makeImage(xLimits, yLimits, width, height)
log.debug("generated contour image id=%s (%sx%s, lon=%s, lat=%s in %.2fs" % \
(self.variable.id, width, height, xLimits, yLimits, time.time() - st,))
return im
def _areBoundsInLimits(self, bbox, xLimits, yLimits):
if self.variable.getAxisIds()[0] == 'longitude':
lonAx, latAx = self.variable.getAxisList()
else:
latAx, lonAx = self.variable.getAxisList()
xRange = [ lonAx.getBounds().min(), lonAx.getBounds().max()]
yRange = [ latAx.getBounds().min(), latAx.getBounds().max()]
log.debug("xLimits = %s" % (xLimits,))
log.debug("yLimits = %s" % (yLimits,))
log.debug("xRange = %s" % (xRange,))
log.debug("yRange = %s" % (yRange,))
log.debug("x range is circular: %s" % ("True" if lonAx.isCircular() else "False",))
isInLimits = ((lonAx.isCircular() or isRangeInLimits(xRange, xLimits)) and
isRangeInLimits(yRange, yLimits))
log.debug("isInLimits = %s" % (isInLimits,))
return isInLimits
|
[
"ag.stephens@stfc.ac.uk"
] |
ag.stephens@stfc.ac.uk
|
406894c61e6011d157c6b62a0eccbe5a91f21124
|
58fb8cebdb51a83c6afd29f6b0d745d07ccfb441
|
/Cmonitor/statTasks/tasks.py
|
be2edf49342afdb91b45469ba2b8bd66d0a2a4b2
|
[] |
no_license
|
amonlong/Cmonitor
|
7980baabc139a62f9870fe0110076b761b7890b6
|
6cf1ec84db69236c9ff79c7bc475a0fa26e40e12
|
refs/heads/master
| 2020-03-09T22:39:58.661338
| 2018-04-23T06:01:56
| 2018-04-23T06:01:56
| 129,039,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import,unicode_literals
import uuid
import time
from celery import states
from apps.record.models import TaskState, TaskItem
from statTasks.celery import app
from statTasks.subtasks import index, userInfo, business, risk, uniId
def makeRecord(taskname, stime, state, memo):
task = TaskItem.objects.filter(taskname=taskname)
if task:
sname = taskname + str(time.time())
ts = TaskState(
task_id = uuid.uuid3(uuid.NAMESPACE_DNS, sname),
taskname = task[0],
state = state,
memo = memo,
runtime = time.time() - stime
)
ts.save()
#index mession
@app.task
def indexHead(taskname):
stime = time.time()
state, memo = index.indexHead()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def indexHopper(taskname):
stime = time.time()
state, memo = index.indexHopper()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def indexPlace(taskname):
stime = time.time()
state, memo = index.indexPlace()
makeRecord(taskname, stime, state, memo)
return state, memo
#userInfo mession
@app.task
def userIncrease(taskname):
stime = time.time()
state, memo = userInfo.userIncrease()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def userAge(taskname):
stime = time.time()
state, memo = userInfo.userAge()
makeRecord(taskname, stime, state, memo)
return state, memo
#business
@app.task
def flowLoanMoneyNO(taskname):
stime = time.time()
state, memo = business.flowLoanMoneyNO()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def flowRepayMoney(taskname):
stime = time.time()
state, memo = business.flowRepayMoney()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def flowDelayRate(taskname):
stime = time.time()
state, memo = business.flowDelayRate()
makeRecord(taskname, stime, state, memo)
return state, memo
#risk mession
@app.task
def passRate(taskname):
stime = time.time()
state, memo = risk.passRate()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def overdueRate(taskname):
stime = time.time()
state, memo = risk.overdueRate()
makeRecord(taskname, stime, state, memo)
return state, memo
#uniId
def productFirm(taskname):
stime = time.time()
state, memo = uniId.productFirm()
makeRecord(taskname, stime, state, memo)
return state, memo
|
[
"250986341@qq.com"
] |
250986341@qq.com
|
11061c8608613ee3897a45acfe68832ba4ec274e
|
77900cdd9a815caf1cd04705321ca93f5072179f
|
/Project2/Project2/.history/blog/admin_20211115153912.py
|
aabc2adbfaef442cdd6a022985cdf4696abffa70
|
[] |
no_license
|
Bom19990111/helloword_python
|
717799d994223d65de5adaeabecf396ff2bc1fb7
|
2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7
|
refs/heads/master
| 2023-09-06T04:17:02.057628
| 2021-11-21T20:00:46
| 2021-11-21T20:00:46
| 407,063,273
| 0
| 1
| null | 2021-11-21T20:00:47
| 2021-09-16T07:18:35
|
Python
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
from django.contrib import admin
from .models import Blog
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
# Register your models here.
class LessonForm(forms.Mode):
content = forms.CharField(widget=CKEditorUploadingWidget)
class Meta:
model = Blog
fields = '__all__'
class BlogAdmin(admin.ModelAdmin):
forms = LessonForm
list_display = ('title', 'slug', 'status', 'created_on')
list_filter = ('status',)
search_field = ['title', 'content']
admin.site.register(Blog, BlogAdmin)
|
[
"phanthituyngoc1995@gmail.com"
] |
phanthituyngoc1995@gmail.com
|
2a428d4157e23eb314c57e4cb9f438c38a5c599d
|
3bae1ed6460064f997264091aca0f37ac31c1a77
|
/apps/cloud_api_generator/generatedServer/tasklets/sso/changeAgentPassword/sso_changeAgentPassword.py
|
6e7965e0ea2c880b5f9870c2b34e27563455236e
|
[] |
no_license
|
racktivity/ext-pylabs-core
|
04d96b80ac1942754257d59e91460c3a141f0a32
|
53d349fa6bee0ccead29afd6676979b44c109a61
|
refs/heads/master
| 2021-01-22T10:33:18.523799
| 2017-06-08T09:09:28
| 2017-06-08T09:09:28
| 54,314,984
| 0
| 0
| null | 2017-06-08T09:09:29
| 2016-03-20T11:55:01
|
Python
|
UTF-8
|
Python
| false
| false
| 186
|
py
|
__author__ = 'aserver'
__tags__ = 'sso', 'changeAgentPassword'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
|
[
"devnull@localhost"
] |
devnull@localhost
|
dc87b8ec1e10aade6ad80e91571d12ee9671758b
|
c5b062551f2131b4d9d68de44d0eceebb57403d9
|
/tests/refresh_token/test_shortcuts.py
|
1a69514235e1294ddcef5bd8aed414ebeb26f760
|
[
"MIT"
] |
permissive
|
PedroBern/django-graphql-jwt
|
e78437257e6d948ba48c32107596742c4e9753b9
|
6e816445b72e7582d0595fda9e7e5d0486026045
|
refs/heads/master
| 2020-12-05T10:12:20.893450
| 2019-12-05T15:28:42
| 2019-12-05T15:28:42
| 232,077,280
| 1
| 0
|
MIT
| 2020-01-06T10:28:28
| 2020-01-06T10:28:28
| null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from graphql_jwt import shortcuts
from graphql_jwt.exceptions import JSONWebTokenError
from ..testcases import UserTestCase
class ShortcutsTests(UserTestCase):
def test_get_refresh_token(self):
refresh_token = shortcuts.create_refresh_token(self.user)
user = shortcuts.get_refresh_token(refresh_token).user
self.assertEqual(user, self.user)
def test_get_refresh_token_error(self):
with self.assertRaises(JSONWebTokenError):
shortcuts.get_refresh_token('invalid')
|
[
"domake.io@gmail.com"
] |
domake.io@gmail.com
|
e6293c0f6d02105de2139fc70ed4a725fedec707
|
7c19fbfe632d6fc32b1d2ba4f53aac17f9351483
|
/test.py
|
b312087306712d3d8e304f240172b11d2a06c079
|
[] |
no_license
|
ymsk-sky/capture_tube
|
3cdea1e0634d6252a8980aa685f963cc3de12518
|
bc6e9bb5c88e4b9212c02b249eef8b40f1e5aa24
|
refs/heads/master
| 2020-12-27T16:27:04.528906
| 2020-04-08T11:08:16
| 2020-04-08T11:08:16
| 237,970,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
# -*- coding: utf-8 -*-
import cv2
import pafy
import youtube_dl
def main():
src = 'test.mp4'
video = cv2.VideoCapture(src)
if not video.isOpened():
return
# fpsを取得
fps = int(video.get(cv2.CAP_PROP_FPS))
# 分類器を作成
cascade_file = 'lbpcascade_animeface.xml'
clf = cv2.CascadeClassifier(cascade_file)
# 1フレームごとに処理を行なう
while video.isOpened():
ret, frame = video.read()
if not ret:
break
# グレイスケール→二値化
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 検出
faces = clf.detectMultiScale(gray)
# 描画
for x, y, w, h in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0 ,255), 2)
cv2.imshow('tube', frame)
key = cv2.waitKey(fps) & 0xFF
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
def dl(url):
ydl = youtube_dl.YoutubeDL({'outtmple': '%(id)s%(ext)s', 'format': '137'})
with ydl:
result = ydl.extract_info(url, download=True)
if __name__ == '__main__':
main()
|
[
"ymsk.sky.95@gmail.com"
] |
ymsk.sky.95@gmail.com
|
13b88e53752bbfb7de8405c6c5af6b3a53d11bd7
|
3d4a6bd2297ac04b112bc8d24fa1118f692a4e2b
|
/procon_python/src/atcoder/abc/past/B_044_BeautifulStrings.py
|
5bc6369e3357ba32fb294ba56744f3eb855c7957
|
[] |
no_license
|
WAT36/procon_work
|
beba626d9b9c1effded8c9b9f56fbc37abd13636
|
2e6bc42e6f25938afe740682ad7b6c21a0838d42
|
refs/heads/master
| 2021-08-16T11:56:25.048392
| 2021-06-17T14:13:10
| 2021-06-17T14:13:10
| 162,991,707
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
w = input()
word = {}
l = len(w)
for i in range(l):
n = word.get(w[i], 0)
n = n+1
word[w[i]] = n
flag = True
for i in word.values():
if(i % 2 == 1):
flag = False
break
if(flag):
print("Yes")
else:
print("No")
|
[
"motohari.xanadu@gmail.com"
] |
motohari.xanadu@gmail.com
|
3c2e5aaa02755fa348d33bdb44613ba9ceabf258
|
07c4c39a3f43aa41327702329cddf555ac489f0e
|
/.PyCharmCE2019.1/system/python_stubs/1499390420/typed_ast/_ast27/Import.py
|
bcab2971d027f20d058802751dc4c2363981afe7
|
[] |
no_license
|
shirotakoki/teran
|
a2ba42c2d2c605c7421b35dc1dfa5f51baec0fd7
|
13c5e8d7484d148c3490726aa860d5a10165381b
|
refs/heads/master
| 2023-02-04T21:18:51.829188
| 2020-09-27T08:26:54
| 2020-09-27T08:26:54
| 323,551,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# encoding: utf-8
# module typed_ast._ast27
# from C:\Users\teran\AppData\Roaming\Python\Python37\site-packages\typed_ast\_ast27.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
from .stmt import stmt
class Import(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
|
[
"teranosinn@gmail.com"
] |
teranosinn@gmail.com
|
bb4f37c64f211df4f9a73f5b49800a64ada951c9
|
f925499f896b012624118cfafd02fef76ff5075a
|
/src/testcase/GN_Y201J/input_case/GN_Y201J_Over_Day.py
|
227028df3fa47b789517261bde3cd7c1da6899a7
|
[
"Apache-2.0"
] |
permissive
|
maiyajj/AutoTest_script-Appium_Connect
|
f7c06db1d2f58682d1a9d6f534f7dd5fb65d766d
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
HEAD
| 2019-07-26T01:39:48.413753
| 2018-04-11T02:11:38
| 2018-04-11T02:11:38
| 112,449,369
| 30
| 22
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
# coding=utf-8
try:
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_001 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_002 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_003 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_004 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_005 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_006 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_007 import *
except ImportError as e:
print(e)
|
[
"1045373828@qq.com"
] |
1045373828@qq.com
|
fb01fad045d376f95d097d9da88e8099c7a6cfdd
|
36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1
|
/restaurants/migrations/0062_auto_20181226_0610.py
|
3d00e07850b262bdc620a65cc97e2a5a2c9e49af
|
[] |
no_license
|
phufoxy/fotourNew
|
801ab2518424118020dc6e5f31a7ba90a654e56a
|
6048c24f5256c8c5a0d18dc7b38c106a7c92a29c
|
refs/heads/master
| 2023-04-13T01:34:22.510717
| 2018-12-26T03:46:09
| 2018-12-26T03:46:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# Generated by Django 2.1.4 on 2018-12-26 06:10
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0061_auto_20181226_0609'),
]
operations = [
migrations.AlterField(
model_name='comment_restaurant',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 26, 6, 9, 58, 878970)),
),
]
|
[
"vanphudhsp2015@gmail.com"
] |
vanphudhsp2015@gmail.com
|
1e1c0f99075f01def2a23ac3fa4b1465b418cc2a
|
e44c83395d2ddd1e1b7c1e521d360f2ef8d585d0
|
/gitlab-new/landchina/landchina.py
|
8b8fd2fccd158e28a5533e0137c4282c437ec99c
|
[] |
no_license
|
zhoujx4/Crawls
|
63ebcac5b4c0bbccdde56e6a2f5efbc4091d03e0
|
94b3ac88d7e49cb4a03e7b211a9437709d1c371c
|
refs/heads/master
| 2020-12-23T15:25:48.041965
| 2020-01-30T10:35:19
| 2020-01-30T10:35:19
| 237,189,197
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,121
|
py
|
import fitz
import datetime
import time
from time import sleep
import re
import os
import sys
import csv
import socket
import random
from urllib import parse
from collections.abc import Iterable
from collections.abc import Mapping
from PIL import Image
from landchina.settings import settings
sys.path.append("..")
from library.commonmethodclass import CommonMethodClass
class LandchinaSpider(object):
"""
爬取https://www.landchina.com/default.aspx?tabid=263页面
备注:
1、20190808采用chrome webdriver爬取失败(失败的现象是在webdriver驱动的
浏览器内可以输入“广东省”等关键词;但是点击“查询”以后加载1秒钟以后就终止
加载了。)
2、20190812采用scrapy爬取,没有写完代码就放弃了。决心使用下面第3种方法爬取
3、采用图像识别和抓包工具配合爬取;使用了C++, Python, 和JScript;基本实现
无人值守。
"""
name = "landchina"
now = None
today = None
settings = None
root_path = None
log_dir = None
main_log_file_path = None
debug = False
crawled_dir = None
html_dir = None
output_folder_name = None
input_folder_name = None
base_uri = None
browser = None
tabid_list = None
input_keyword_dict = None
list_csv_file_path = None
wait_time = None
missed_url_file_name = ""
input_box_dict = {
263: "TAB_QuerySubmitConditionData",
226: "TAB_queryTblEnumItem_75",
}
keyword_english = {}
replace_list = ["市本级", "市", "县", "区" ]
def __init__(self ):
self.init_self_attributes( )
def init_self_attributes(self):
self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
self.today = datetime.datetime.now().strftime("%Y%m%d")
self.settings = settings
self.root_path = self.settings.get( name="PROJECT_PATH", default="" )
self.log_dir = self.settings.get( name="LOG_DIR", default="" )
self.main_log_file_path = os.path.join( self.log_dir, self.settings.get( name="MAIN_LOG_FILE_NAME", default="" ) )
self.debug = self.settings.get( name = "PROJECT_DEBUG", default=False )
self.crawled_dir = self.settings.get( name="CRAWLED_DIR", default = "" )
self.html_dir = self.settings.get( name="HTML_DIR", default = "" )
self.output_folder_name = self.settings.get( name = "OUTPUT_FOLDER_NAME", default="" )
self.input_folder_name = self.settings.get( name = "INPUT_FOLDER_NAME", default="" )
self.base_uri = self.settings.get( name = "BASE_URI", default="" )
self.browser = self.settings.get( name = "BROWSER", default="" )
self.tabid_list = self.settings.get( name = "TABID_LIST", default="" )
self.input_keyword_dict = self.settings.get( name = "INPUT_KEYWORD_DICT", default="" )
self.list_csv_file_path = os.path.join( self.crawled_dir, f"landchina_list_{self.today}.csv" )
self.wait_time = 2 if self.debug else 3
self.maximal_requests = self.settings.get( name = "MAXIMAL_REQUESTS", default=50 )
self.missed_url_file_name = self.settings.get( name = "MISSED_URL_FILE_NAME", default="" )
self.keyword_english = self.settings.get( name = "KEYWORD_ENGLISH", default={} )
def make_uri_list(self):
url_list = []
for one_id in self.tabid_list:
url_list.append( f"{self.base_uri}?tabid={one_id}" )
return url_list
def send_keywords(self):
"""
revision: 20190813
"""
url_list = self.make_uri_list()
log_file_path = os.path.join( self.log_dir, self.missed_url_file_name )
for index, one_url in enumerate(url_list):
tabid = self.tabid_list[ index ]
keyword_list = self.input_keyword_dict[tabid]
input_box_xpath = self.input_box_dict[tabid]
for keyword in keyword_list:
keyword_en = self.keyword_english[keyword] if keyword in self.keyword_english.keys() else keyword
def parse_one_index_page_response_field(self, webdriver = None ):
info_list = []
if webdriver is None:
return info_list
tr_list = webdriver.find_elements_by_xpath( "//table[@id='TAB_contentTable']/tbody/tr[not(@class='gridHeader')]" )
for one_tr in tr_list:
td_list = one_tr.find_elements_by_xpath("./td")
value_list = []
this_row_dict = {}
link = ""
for one_td in td_list:
value_list.append( one_td.text )
link_a = self.get_element( webdriver = one_td, xpath = "./a", elements_bool = False, use_id = False )
if link_a is not None and 1 > len(link):
link = link_a.get_attribute("href")
if 4 == len( value_list ):
this_row_dict["序号"] = value_list[0].replace(".", "")
this_row_dict["行政区代码"] = value_list[1]
this_row_dict["标题"] = value_list[2]
this_row_dict["发布时间"] = value_list[3]
this_row_dict["detailed_url"] = link
info_list.append(this_row_dict)
return info_list
def execute(self):
if type(self.tabid_list) not in [list] or type(self.input_keyword_dict) not in [dict] or 1 > len( self.tabid_list ):
error_msg = f"self.tabid_list or self.input_keyword_dict is NOT correct: {self.tabid_list}, {self.input_keyword_dict}"
content = f"Inside Method {sys._getframe().f_code.co_name} of Class {self.__class__.__name__}, {error_msg}"
CommonMethodClass.write_log( content = content, log_file_path = self.main_log_file_path )
return False
for one_category in self.tabid_list:
if one_category not in self.input_keyword_dict.keys():
error_msg = f"{one_category} is NOT in {self.input_keyword_dict.keys()}"
content = f"Inside Method {sys._getframe().f_code.co_name} of Class {self.__class__.__name__}, {error_msg}"
CommonMethodClass.write_log( content = content, log_file_path = self.main_log_file_path )
return False
counter = self.do_requests( )
content = f"At {self.now}, {counter} requests have been sent"
CommonMethodClass.write_log( content = content, log_file_path = self.main_log_file_path )
def test(self):
path = self.whereis_chromedriver()
print( path )
# print( self.district_name_dict )
# district_list = ["南澳县", "佛山市本级", "连南瑶族自治县", "梅州市本级", "雷州市", ]
# self.check_district_names( district_list = district_list, keyword = "广东省" )
if __name__=='__main__':
app = LandchinaSpider( )
# app.test()
app.execute()
|
[
"673760239@qq.com"
] |
673760239@qq.com
|
00a0f660b96836d3c0823a3c10c5289c90c74ab4
|
82ef9a0dd1618a28770597227acfc0150b948af2
|
/wearnow/plugins/sidebar/categorysidebar.py
|
a878520c4e392dca2b4a5ca6025627ff95349c32
|
[] |
no_license
|
bmcage/wearnow
|
ef32a7848472e79e56763b38551835aa97864b21
|
c8dfa75e1ea32b0c021d71c4f366ab47104c207e
|
refs/heads/master
| 2021-01-16T00:27:59.597812
| 2016-01-19T11:55:03
| 2016-01-19T11:55:03
| 37,195,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,347
|
py
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from wearnow.tex.config import config
from wearnow.gui.basesidebar import BaseSidebar
from wearnow.gui.viewmanager import get_available_views, views_to_show
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
UICATEGORY = '''<ui>
<toolbar name="ToolBar">
<placeholder name="ViewsInCategory">%s
</placeholder>
</toolbar>
</ui>
'''
#-------------------------------------------------------------------------
#
# CategorySidebar class
#
#-------------------------------------------------------------------------
class CategorySidebar(BaseSidebar):
"""
A sidebar displaying a column of toggle buttons that allows the user to
change the current view.
"""
def __init__(self, dbstate, uistate, categories, views):
self.viewmanager = uistate.viewmanager
self.buttons = []
self.button_handlers = []
self.ui_category = {}
self.merge_ids = []
self.window = Gtk.ScrolledWindow()
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.window.add(vbox)
self.window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.window.show()
use_text = config.get('interface.sidebar-text')
for cat_num, cat_name, cat_icon in categories:
# create the button and add it to the sidebar
button = self.__make_sidebar_button(use_text, cat_num,
cat_name, cat_icon)
vbox.pack_start(button, False, True, 0)
# Enable view switching during DnD
button.drag_dest_set(0, [], 0)
button.connect('drag_motion', self.cb_switch_page_on_dnd, cat_num)
# toollbar buttons for switching views in a category
uitoolitems = ''
for view_num, view_name, view_icon in views[cat_num]:
pageid = 'page_%i_%i' % (cat_num, view_num)
uitoolitems += '\n<toolitem action="%s"/>' % pageid
if len(views[cat_num]) > 1:
self.ui_category[cat_num] = UICATEGORY % uitoolitems
vbox.show_all()
def get_top(self):
"""
Return the top container widget for the GUI.
"""
return self.window
def view_changed(self, cat_num, view_num):
"""
Called when the active view is changed.
"""
# Add buttons to the toolbar for the different view in the category
uimanager = self.viewmanager.uimanager
list(map(uimanager.remove_ui, self.merge_ids))
if cat_num in self.ui_category:
mergeid = uimanager.add_ui_from_string(self.ui_category[cat_num])
self.merge_ids.append(mergeid)
# Set new button as selected
self.__handlers_block()
for index, button in enumerate(self.buttons):
if index == cat_num:
button.set_active(True)
else:
button.set_active(False)
self.__handlers_unblock()
def __handlers_block(self):
"""
Block signals to the buttons to prevent spurious events.
"""
for idx in range(len(self.buttons)):
self.buttons[idx].handler_block(self.button_handlers[idx])
def __handlers_unblock(self):
"""
Unblock signals to the buttons.
"""
for idx in range(len(self.buttons)):
self.buttons[idx].handler_unblock(self.button_handlers[idx])
def cb_view_clicked(self, radioaction, current, cat_num):
"""
Called when a button causes a view change.
"""
view_num = radioaction.get_current_value()
self.viewmanager.goto_page(cat_num, view_num)
def __category_clicked(self, button, cat_num):
"""
Called when a button causes a category change.
"""
# Make the button active. If it was already active the category will
# not change.
button.set_active(True)
self.viewmanager.goto_page(cat_num, None)
def __make_sidebar_button(self, use_text, index, page_title, page_stock):
"""
Create the sidebar button. The page_title is the text associated with
the button.
"""
# create the button
button = Gtk.ToggleButton()
button.set_relief(Gtk.ReliefStyle.NONE)
self.buttons.append(button)
# add the tooltip
button.set_tooltip_text(page_title)
# connect the signal, along with the index as user data
handler_id = button.connect('clicked', self.__category_clicked, index)
self.button_handlers.append(handler_id)
button.show()
# add the image. If we are using text, use the BUTTON (larger) size.
# otherwise, use the smaller size
hbox = Gtk.Box()
hbox.show()
image = Gtk.Image()
if use_text:
image.set_from_icon_name(page_stock, Gtk.IconSize.BUTTON)
else:
image.set_from_icon_name(page_stock, Gtk.IconSize.DND)
image.show()
hbox.pack_start(image, False, False, 0)
hbox.set_spacing(4)
# add text if requested
if use_text:
label = Gtk.Label(label=page_title)
label.show()
hbox.pack_start(label, False, True, 0)
button.add(hbox)
return button
def cb_switch_page_on_dnd(self, widget, context, xpos, ypos, time, page_no):
"""
Switches the page based on drag and drop.
"""
self.__handlers_block()
if self.viewmanager.notebook.get_current_page() != page_no:
self.viewmanager.notebook.set_current_page(page_no)
self.__handlers_unblock()
def inactive(self):
"""
Called when the sidebar is hidden.
"""
uimanager = self.viewmanager.uimanager
list(map(uimanager.remove_ui, self.merge_ids))
|
[
"benny.malengier@gmail.com"
] |
benny.malengier@gmail.com
|
989d249c6266a0283ffbc88c2776a2ecdb833eca
|
c25f64f43660d5a5065327f258f3e7348d93e438
|
/asyncio_redis/encoders.py
|
68a493e85b54bf349b6f1c61466fd292929b58c3
|
[
"BSD-2-Clause-Views"
] |
permissive
|
jonathanslenders/asyncio-redis
|
96735d4270453eaa8435e8e39b5c536abb1a7d86
|
50d71a53798967f7fdf1be36b8447e322dedc5ee
|
refs/heads/master
| 2022-04-06T16:06:50.671959
| 2020-06-12T21:18:37
| 2020-08-11T13:56:51
| 13,547,040
| 495
| 83
|
NOASSERTION
| 2021-01-11T13:44:37
| 2013-10-13T21:31:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
"""
The redis protocol only knows about bytes, but we like to have strings inside
Python. This file contains some helper classes for decoding the bytes to
strings and encoding the other way around. We also have a `BytesEncoder`, which
provides raw access to the redis server.
"""
__all__ = ("BaseEncoder", "BytesEncoder", "UTF8Encoder")
class BaseEncoder:
"""
Abstract base class for all encoders.
"""
#: The native Python type from which we encode, or to which we decode.
native_type = None
def encode_from_native(self, data):
"""
Encodes the native Python type to network bytes.
Usually this will encode a string object to bytes using the UTF-8
encoding. You can either override this function, or set the
`encoding` attribute.
"""
raise NotImplementedError
def decode_to_native(self, data):
"""
Decodes network bytes to a Python native type.
It should always be the reverse operation of `encode_from_native`.
"""
raise NotImplementedError
class BytesEncoder(BaseEncoder):
"""
For raw access to the Redis database.
"""
#: The native Python type from which we encode, or to which we decode.
native_type = bytes
def encode_from_native(self, data):
return data
def decode_to_native(self, data):
return data
class StringEncoder(BaseEncoder):
"""
Abstract base class for all string encoding encoders.
"""
#: Redis keeps all values in binary. Set the encoding to be used to
#: decode/encode Python string values from and to binary.
encoding = None
#: The native Python type from which we encode, or to which we decode.
native_type = str
def encode_from_native(self, data):
""" string to bytes """
return data.encode(self.encoding)
def decode_to_native(self, data):
""" bytes to string """
return data.decode(self.encoding)
class UTF8Encoder(StringEncoder):
"""
Encode strings to and from utf-8 bytes.
"""
encoding = "utf-8"
|
[
"jonathan@slenders.be"
] |
jonathan@slenders.be
|
a794664d21b2877b6ebe81762c14babbf6aa8140
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/shell/v1/shell-v1-py/scripts/fixup_shell_v1_keywords.py
|
c4b2aa4ae9e1c4e9409492197cc740fab3accbec
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,211
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class shellCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'add_public_key': ('environment', 'key', ),
'authorize_environment': ('name', 'access_token', 'id_token', 'expire_time', ),
'get_environment': ('name', ),
'remove_public_key': ('environment', 'key', ),
'start_environment': ('name', 'access_token', 'public_keys', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=shellCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the shell client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
15855cb736dc27f1def8b56d28bc287b6cac6fdf
|
1207c58fa92dad30050b9f3bcc1173d7e7034c73
|
/train_mnist/train.py
|
3259594eb13f1d915ccdf55d49383f85fcb36d13
|
[] |
no_license
|
chagge/rethinking-generalization
|
b49cf59c8d4d2c3607fa2074a80f86d8e682150c
|
317c1ae29ae119d7399e8e04e95eb903f4d1c045
|
refs/heads/master
| 2021-01-22T09:04:55.449746
| 2017-02-12T16:41:09
| 2017-02-12T16:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
import numpy as np
import os, sys, time, math
from chainer import cuda
from chainer import functions as F
import pandas as pd
sys.path.append(os.path.split(os.getcwd())[0])
import dataset
from progress import Progress
from mnist_tools import load_train_images, load_test_images
from model import model
from args import args
def compute_accuracy(image_batch, label_batch):
num_data = image_batch.shape[0]
images_l_segments = np.split(image_batch, num_data // 500)
label_ids_l_segments = np.split(label_batch, num_data // 500)
sum_accuracy = 0
for image_batch, label_batch in zip(images_l_segments, label_ids_l_segments):
distribution = model.discriminate(image_batch, apply_softmax=True, test=True)
accuracy = F.accuracy(distribution, model.to_variable(label_batch))
sum_accuracy += float(accuracy.data)
return sum_accuracy / len(images_l_segments)
def main():
# load MNIST images
images, labels = dataset.load_train_images()
# config
config = model.config
# settings
max_epoch = 1000
num_trains_per_epoch = 500
num_validation_data = 10000
batchsize = 128
# seed
np.random.seed(args.seed)
if args.gpu_device != -1:
cuda.cupy.random.seed(args.seed)
# save validation accuracy per epoch
csv_results = []
# create semi-supervised split
training_images, training_labels, validation_images, validation_labels = dataset.split_data(images, labels, num_validation_data, seed=args.seed)
# training
progress = Progress()
for epoch in xrange(1, max_epoch):
progress.start_epoch(epoch, max_epoch)
sum_loss = 0
for t in xrange(num_trains_per_epoch):
# sample from data distribution
image_batch, label_batch = dataset.sample_data(training_images, training_labels, batchsize, binarize=False)
distribution = model.discriminate(image_batch, apply_softmax=False)
loss = F.softmax_cross_entropy(distribution, model.to_variable(label_batch))
sum_loss += float(loss.data)
model.backprop(loss)
if t % 10 == 0:
progress.show(t, num_trains_per_epoch, {})
model.save(args.model_dir)
train_accuracy = compute_accuracy(training_images, training_labels)
validation_accuracy = compute_accuracy(validation_images, validation_labels)
progress.show(num_trains_per_epoch, num_trains_per_epoch, {
"loss": sum_loss / num_trains_per_epoch,
"accuracy (validation)": validation_accuracy,
"accuracy (train)": train_accuracy,
})
# write accuracy to csv
csv_results.append([epoch, validation_accuracy, progress.get_total_time()])
data = pd.DataFrame(csv_results)
data.columns = ["epoch", "accuracy", "min"]
data.to_csv("{}/result.csv".format(args.model_dir))
if __name__ == "__main__":
main()
|
[
"musyoku@users.noreply.github.com"
] |
musyoku@users.noreply.github.com
|
056345d4c6d7aad110c67c6acea795d071247950
|
7d7d37a66e970af3cc0beca3babba9ef18f8d7c1
|
/Searching Algorithms/LinearSearch.py
|
42f93963971baac2bab579ee83d77359c8443b40
|
[
"MIT"
] |
permissive
|
DrakeEntity/DataStructureAndAlgorithm
|
762e4d01024252754c1308e642803cccaa461fb0
|
9c942217e1a31f143e739682680c12f67d717ee3
|
refs/heads/master
| 2022-04-21T02:25:39.318888
| 2020-04-24T15:32:33
| 2020-04-24T15:32:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
class Search:
""" True : If searching objet found in list it will return True
False : If Searching object not found in list it will return False
"""
def __init__(self,list,search_for):
self.list = list
self.search_for = search_for
def __len__(self):
return len(self.list)
def linear_search(self):
"""
In this type of search, a sequential search is made over all items one by one. Every Item is checked.
If a match is found then that particular item is returned,
otherwise the search continue till the end of the
data-strucutre
"""
search_at = 0
search_res = False
# match the value with each data point
while search_at < len(self.list) and search_res is False:
if self.list[search_at] == self.search_for:
search_res = True
else:
search_at = search_at + 1
print(f'{search_res}')
l = [4,534,646,3,6,6,33,6,34,643,32,4,43,6]
result = Search(l,5)
result.linear_search()
|
[
"54245038+perfect104@users.noreply.github.com"
] |
54245038+perfect104@users.noreply.github.com
|
764cdd64ee9f866e42d940df2f06f450d0e88fd7
|
f889bc01147869459c0a516382e7b95221295a7b
|
/test/test_catalog_data_custom_option_interface.py
|
ca5ca581745279b4da16c3436707fa9b39887277
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.catalog_data_custom_option_interface import CatalogDataCustomOptionInterface
class TestCatalogDataCustomOptionInterface(unittest.TestCase):
""" CatalogDataCustomOptionInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCatalogDataCustomOptionInterface(self):
"""
Test CatalogDataCustomOptionInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.catalog_data_custom_option_interface.CatalogDataCustomOptionInterface()
pass
if __name__ == '__main__':
unittest.main()
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
7a5bc213722fdefb013d9c11de37ab21381a8ff8
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/e2e/jit/test_Softsign_base.py
|
206436de22ef7510496f8c5810e52b215c72787a
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 623
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Softsign_base():
"""test Softsign_base"""
jit_case = JitTrans(case=yml.get_case_info("Softsign_base"))
jit_case.jit_run()
|
[
"825276847@qq.com"
] |
825276847@qq.com
|
7dba6a3289087fe713494551fb12554582fca39e
|
05824d96edf28918e25886716f0a5f904868a872
|
/diff_tool.py
|
d638d6c9b4a4e50b342d048a3f0bb7a0f2ba7f91
|
[
"MIT"
] |
permissive
|
DahlitzFlorian/diff-tool-video-snippets
|
d7c5fb8616fc06f71566d7c9eae9be9e5ec1bf7d
|
0cd457abe43e63732810dbfec2e90cfb17d3d0a8
|
refs/heads/master
| 2023-08-27T06:46:36.789501
| 2021-10-29T06:55:07
| 2021-10-29T06:55:07
| 420,377,905
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
# diff_tool.py
import argparse
import difflib
import sys
from pathlib import Path
def create_diff(old_file: Path, new_file: Path, output_file: Path = None):
file_1 = open(old_file).readlines()
file_2 = open(new_file).readlines()
if output_file:
delta = difflib.HtmlDiff().make_file(
file_1, file_2, old_file.name, new_file.name
)
with open(output_file, "w") as f:
f.write(delta)
else:
delta = difflib.unified_diff(file_1, file_2, old_file.name, new_file.name)
sys.stdout.writelines(delta)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("old_file_version")
parser.add_argument("new_file_version")
parser.add_argument("--html", help="specify html to write to")
args = parser.parse_args()
old_file = Path(args.old_file_version)
new_file = Path(args.new_file_version)
if args.html:
output_file = Path(args.html)
else:
output_file = None
create_diff(old_file, new_file, output_file)
if __name__ == "__main__":
main()
|
[
"f2dahlitz@freenet.de"
] |
f2dahlitz@freenet.de
|
ac0bdc20ef03692d0dfb49b0a9c2a5916f19c954
|
06685fa3aceb620ea13b593ddc52bba53300b93a
|
/ssh/__init__.py
|
2dd7cdbecddeb86fdb56377ffe072425eef11b83
|
[] |
no_license
|
66laps/kokki-cookbooks
|
a900f958d346e35a35a05ed6cbb12bbe2f5bf4a4
|
6c059f8cda577c765083dfe92688094bc38dfd4b
|
refs/heads/master
| 2021-01-01T17:00:28.952170
| 2010-11-20T13:39:52
| 2010-11-20T13:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from kokki import *
from kokki.cookbooks.ssh.providers import SSHKnownHostProvider, SSHAuthorizedKeyProvider
from kokki.cookbooks.ssh.resources import SSHKnownHost, SSHAuthorizedKey
def SSHConfig(name, hosts, mode=0600, **kwargs):
File(name,
mode = mode,
content = Template("ssh/config.j2", {'hosts': hosts}),
**kwargs)
|
[
"samuel@descolada.com"
] |
samuel@descolada.com
|
9859b409527d7034606e1203ba46e1b8cf065b5a
|
f4fdb0c1213bbb403b87c2dbbde390918ac08861
|
/fix_cite_date.py
|
deff0147b7c52b04e42582406ca609f000af3565
|
[] |
no_license
|
benwing2/RuNounChanges
|
0d5076e576237f10b50049ed52b91f96c95cca95
|
048dfed5abe09b8d5629c5772292027ce0a170f2
|
refs/heads/master
| 2023-09-03T22:48:06.972127
| 2023-09-03T06:27:56
| 2023-09-03T06:27:56
| 41,480,942
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Replace dates of the form "1 January, 2012" with "1 January 2012"
# (remove the comma) in quotation/citation templates.
import pywikibot, re, sys, argparse
import mwparserfromhell as mw
import blib
from blib import getparam, rmparam, set_template_name, msg, errmsg, site
import rulib
replace_templates = [
"cite-book", "cite-journal", "cite-newsgroup", "cite-video game",
"cite-web",
"quote-book", "quote-hansard", "quote-journal", "quote-newsgroup",
"quote-song", "quote-us-patent", "quote-video", "quote-web",
"quote-wikipedia"
]
months = ["January", "February", "March", "April", "May", "June", "July",
"August", "September", "October", "November", "December",
"Jan", "Feb", "Mar", "Apr", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"]
month_re = "(?:%s)" % "|".join(months)
def process_page(page, index, parsed):
pagetitle = str(page.title())
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
pagemsg("Processing")
if not page.exists():
pagemsg("WARNING: Page doesn't exist")
return
if ":" in pagetitle and not re.search(
"^(Citations|Appendix|Reconstruction|Transwiki|Talk|Wiktionary|[A-Za-z]+ talk):", pagetitle):
pagemsg("WARNING: Colon in page title and not a recognized namespace to include, skipping page")
return
text = str(page.text)
notes = []
parsed = blib.parse_text(text)
for t in parsed.filter_templates():
tname = str(t.name)
origt = str(t)
if tname.strip() in replace_templates:
date = getparam(t, "date")
if date.strip():
newdate = re.sub(r"^(\s*[0-9]+\s+%s\s*),(\s*[0-9]+\s*)$" % month_re,
r"\1\2", date)
if date != newdate:
# We do this instead of t.add() because if there's a final newline,
# it will appear in the value but t.add() will try to preserve the
# newline separately and you'll get two newlines.
t.get("date").value = newdate
pagemsg(("Replacing %s with %s" % (origt, str(t))).replace("\n", r"\n"))
notes.append("fix date in %s" % tname.strip())
return str(parsed), notes
if __name__ == "__main__":
parser = blib.create_argparser("Fix date in cite/quote templates",
include_pagefile=True)
args = parser.parse_args()
start, end = blib.parse_start_end(args.start, args.end)
blib.do_pagefile_cats_refs(args, start, end, process_page, edit=True,
# FIXME, had includelinks= for references, which we don't have a flag for now
default_refs=["Template:%s" % template for template in replace_templates])
|
[
"ben@benwing.com"
] |
ben@benwing.com
|
9d468379d6252e0193cf1aa21ee3dd194eb34613
|
55692ac1b8a1b00750c0b9caf7ebba53f1dde78b
|
/server/toolkits/migrations/tip.py
|
c3783643ffc06de90010c34ded47ec1e4a3f1e4f
|
[] |
no_license
|
Soopro/julolo
|
8d9dea62aa055318f891d200614314e402bda1eb
|
73cc67f378f45c0da40911bac5e5e038f63588ab
|
refs/heads/master
| 2021-10-26T08:50:58.940548
| 2019-04-11T15:41:12
| 2019-04-11T15:41:12
| 107,217,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
# coding=utf-8
from __future__ import absolute_import
from mongokit import DocumentMigration
class TipMigration(DocumentMigration):
pass
# def allmigration01_remove_complete(self):
# self.target = {'verification': {'$exists': True}}
# if not self.status:
# self.update = {
# '$unset': {
# 'verification': False
# },
# '$set': {
# 'verified': False
# }
# }
# self.collection.update(self.target,
# self.update,
# multi=True,
# safe=True)
|
[
"redy.ru@gmail.com"
] |
redy.ru@gmail.com
|
81de7e98002c913886831f84a4f671f56499c8f8
|
7d5047dae4df06f10752b7a3ec6e663f296457d3
|
/Programmers/Level 2/영어 끝말잇기.py
|
fb829b110405b798b97e5b1c0d629d4794df69a9
|
[] |
no_license
|
soulgchoi/Algorithm
|
a73e3e8e3a256d4cf1c8b5fa3c7dc35a325a6e9a
|
a88b2c2a0f0d75ca59269dd815ee8d30dd270ce7
|
refs/heads/master
| 2022-02-05T18:08:09.271443
| 2022-01-20T02:36:37
| 2022-01-20T02:36:37
| 228,958,329
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
def solution(n, words):
answer = [0, 0]
for i in range(1, len(words)):
if words[i][0] != words[i-1][-1] or words.index(words[i]) < i:
answer = [i % n + 1, i // n + 1]
break
return answer
def solution(n, words):
answer = [0, 0]
for i in range(1, len(words)):
if words[i][0] != words[i-1][-1] or words[i] in words[:i]:
answer = [i % n + 1, i // n + 1]
break
return answer
|
[
"bssj9307@gmail.com"
] |
bssj9307@gmail.com
|
fb7213b98d2e792b11a08e891b2cde8ae6a46d14
|
bc6508a1dde1e61a8b2f61e70044c074aeeb4406
|
/whoiser/servers/XN__G6W251D.py
|
36b0bcbef1eb19e54daf5967aaf73f97ffed1542
|
[] |
no_license
|
krikulis/whoiser
|
7eca72260dc061a91c7630901557264b80c5263e
|
27af46d6ffcf2bacc5e5b837883ab5fab7ac9b40
|
refs/heads/master
| 2021-01-10T19:10:53.915622
| 2012-06-24T23:50:28
| 2012-06-24T23:50:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from servers.generic import GenericWhoisQuery
class WhoisQuery(GenericWhoisQuery):
def query(self, query):
raise NotImplementedError(u"TLD XN--G6W251D has no Whois server available")
|
[
"kristaps.kulis@gmail.com"
] |
kristaps.kulis@gmail.com
|
8a312b438fc15bf78d0eae6a7849064a7eaaf7e8
|
f3997f566695a78d09fcab688db88499223dca17
|
/coil_phase/det_coil_phase.py
|
c1f4d76d095a147236a45d3f938cbc97af0f839b
|
[] |
no_license
|
melampyge/CollectiveFilament
|
600d7a426d88a7f8f31702edb2b1fea7691372d2
|
7d2659bee85c955c680eda019cbff6e2b93ecff2
|
refs/heads/master
| 2020-07-23T05:58:55.383746
| 2017-06-25T14:55:14
| 2017-06-25T14:55:14
| 94,351,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,003
|
py
|
#!/usr/local/bin/python2.7
import matplotlib as mpl
mpl.use('Agg')
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import stats
#####################################################################
### define / read in some global variables
gamma = 2.0 # viscosity
kT = 1.0 # thermal energy
ifile = open('scale_params.data')
line = ifile.readline()
line = line.split()
L = float(line[-1]) # polymer length
line = ifile.readline()
line = line.split()
dt = float(line[-1]) # simulation timestep
#####################################################################
def read_coilicity():
""" read in the coilcity"""
t = []
c = []
ifile = open('coilicity.data')
ifile.readline()
ifile.readline()
for line in ifile:
line = line.split()
try:
t.append(float(line[0]))
c.append(float(line[2]))
except:
pass
ifile.close()
t = np.array(t)
c = np.array(c)
# transform time and coility units
ttrans = gamma*L**3/6./kT
t *= dt/ttrans
c *= L/2/np.pi
return t,c
#####################################################################
def read_cacf():
""" read in the coilicity autocorrelation function"""
tacf = []
cacf = []
ifile = open('coil2_acf.data', 'r')
ifile.readline()
for line in ifile:
line = line.split()
tacf.append(float(line[0]))
cacf.append(float(line[1]))
ifile.close()
tacf = np.array(tacf)
cacf = np.array(cacf)
# transform time units
ttrans = gamma*L**3/6./kT
tacf *= dt/ttrans
return tacf,cacf
#####################################################################
def compute_moments(c):
""" compute the coil moments"""
n = len(c)
cav = np.average(c)
cav_std = np.std(c)/np.sqrt(n)
csq = np.average(c**2)
csq_std = np.std(c**2)/np.sqrt(n)
curt = stats.kurtosis(c, fisher = False)
# compute mirrored statistics
cm = -np.copy(c)
cboth = np.append(c,cm)
curt2 = stats.kurtosis(cboth, fisher = False)
return cav, cav_std, csq, csq_std,curt,curt2
#####################################################################
def compute_thalf(tacf,cacf):
""" check where the autocorrelation function drops below 0.5"""
n = len(tacf)
thalf = -1
for i in range(n):
if cacf[i] < 0.5:
thalf = tacf[i]
break
plt.plot(tacf, cacf)
plt.savefig('coilicity_acf.png')
plt.close()
return thalf
#####################################################################
def main():
""" main function"""
# read in the coilicity
t,c = read_coilicity()
# read in the time autocorrelation function
tacf, cacf = read_cacf()
# compute the moments and standard deviations
cav, cav_std, csq, csq_std,curt,curt2 = compute_moments(c)
# compute the moments for only the second part of the array
n = len(c)
cavh, cav_stdh, csqh, csq_stdh, curth, curt2h = compute_moments(c[n/2:])
# compute the time where the acf drops below 0.5
thalf = compute_thalf(tacf,cacf)
# write results to file
ofile = open('coil_phase.data', 'w')
ofile.write('Information required to identify coil phase\n\n')
ofile.write('cav\tcav_std\tcsq\tcsq_std\tthalf\tcurt\tcurt2\n')
ofile.write(str(cav) + '\t' + str(cav_std) + '\t' + str(csq) + '\t' + str(csq_std) + '\t' + str(thalf) + '\t' + str(curt) + '\t' + str(curt2) + '\n')
ofile.close()
ofile = open('coil_phaseh.data', 'w')
ofile.write('Information required to identify coil phase\n\n')
ofile.write('cav\tcav_std\tcsq\tcsq_std\tthalf\tcurt\tcurt2\n')
ofile.write(str(cavh) + '\t' + str(cav_stdh) + '\t' + str(csqh) + '\t' + str(csq_stdh) + '\t' + str(thalf) + '\t' + str(curth) + '\t' + str(curt2h) + '\n')
ofile.close()
return
#####################################################################
if __name__ == '__main__':
main()
|
[
"ozer.duman@gmail.com"
] |
ozer.duman@gmail.com
|
d9a39023ff5913ca3b3d1a074f52ca0bb921f5aa
|
22bb398d1d9af678e25ccf39350f90f109c74256
|
/tests/test_utils/output/ifabsents.py
|
03eb882c4e349ece90efcb33534eaf4c040188a6
|
[
"CC0-1.0"
] |
permissive
|
rajshruti18/biolinkml
|
45a0848512e00d0ce66ad17684f26909a3ad3953
|
451e71c9d3fd11aa3b08c6a713d9ab8b127ece77
|
refs/heads/master
| 2023-03-14T05:57:22.399803
| 2020-08-11T15:42:49
| 2020-08-11T15:42:49
| 287,549,421
| 0
| 0
|
CC0-1.0
| 2020-08-14T14:16:53
| 2020-08-14T14:16:52
| null |
UTF-8
|
Python
| false
| false
| 7,050
|
py
|
# Auto generated from ifabsents.yaml by pythongen.py version: 0.4.0
# Generation date: 2020-08-04 09:40
# Schema: ifabsent
#
# id: http://example.org/tests/ifabsent
# description:
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import sys
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from biolinkml.utils.slot import Slot
from biolinkml.utils.metamodelcore import empty_list, empty_dict, bnode
from biolinkml.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
if sys.version_info < (3, 7, 6):
from biolinkml.utils.dataclass_extensions_375 import dataclasses_init_fn_with_kwargs
else:
from biolinkml.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from biolinkml.utils.formatutils import camelcase, underscore, sfx
from rdflib import Namespace, URIRef
from biolinkml.utils.curienamespace import CurieNamespace
from biolinkml.utils.metamodelcore import Bool, ElementIdentifier, NCName, NodeIdentifier, URI, URIorCURIE, XSDDate, XSDDateTime, XSDTime
metamodel_version = "1.5.3"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
SHEX = CurieNamespace('shex', 'http://www.w3.org/ns/shex#')
SKOS = CurieNamespace('skos', 'http://www.w3.org/2004/02/skos/core#')
TEST = CurieNamespace('test', 'http://example.org/test/')
XSD = CurieNamespace('xsd', 'http://www.w3.org/2001/XMLSchema#')
DEFAULT_ = TEST
# Types
class String(str):
""" A character string """
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "string"
type_model_uri = TEST.String
class Integer(int):
""" An integer """
type_class_uri = XSD.integer
type_class_curie = "xsd:integer"
type_name = "integer"
type_model_uri = TEST.Integer
class Boolean(Bool):
""" A binary (true or false) value """
type_class_uri = XSD.boolean
type_class_curie = "xsd:boolean"
type_name = "boolean"
type_model_uri = TEST.Boolean
class Float(float):
""" A real number that conforms to the xsd:float specification """
type_class_uri = XSD.float
type_class_curie = "xsd:float"
type_name = "float"
type_model_uri = TEST.Float
class Double(float):
""" A real number that conforms to the xsd:double specification """
type_class_uri = XSD.double
type_class_curie = "xsd:double"
type_name = "double"
type_model_uri = TEST.Double
class Time(XSDTime):
""" A time object represents a (local) time of day, independent of any particular day """
type_class_uri = XSD.dateTime
type_class_curie = "xsd:dateTime"
type_name = "time"
type_model_uri = TEST.Time
class Date(XSDDate):
""" a date (year, month and day) in an idealized calendar """
type_class_uri = XSD.date
type_class_curie = "xsd:date"
type_name = "date"
type_model_uri = TEST.Date
class Datetime(XSDDateTime):
""" The combination of a date and time """
type_class_uri = XSD.dateTime
type_class_curie = "xsd:dateTime"
type_name = "datetime"
type_model_uri = TEST.Datetime
class Uriorcurie(URIorCURIE):
""" a URI or a CURIE """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "uriorcurie"
type_model_uri = TEST.Uriorcurie
class Uri(URI):
""" a complete URI """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "uri"
type_model_uri = TEST.Uri
class Ncname(NCName):
""" Prefix part of CURIE """
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "ncname"
type_model_uri = TEST.Ncname
class Objectidentifier(ElementIdentifier):
""" A URI or CURIE that represents an object in the model. """
type_class_uri = SHEX.iri
type_class_curie = "shex:iri"
type_name = "objectidentifier"
type_model_uri = TEST.Objectidentifier
class Nodeidentifier(NodeIdentifier):
""" A URI, CURIE or BNODE that represents a node in a model. """
type_class_uri = SHEX.nonLiteral
type_class_curie = "shex:nonLiteral"
type_name = "nodeidentifier"
type_model_uri = TEST.Nodeidentifier
# Class references
@dataclass
class C1(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = TEST.C1
class_class_curie: ClassVar[str] = "test:C1"
class_name: ClassVar[str] = "c1"
class_model_uri: ClassVar[URIRef] = TEST.C1
s1: Optional[str] = True
s1p: Optional[str] = True
s2: Optional[str] = False
s2p: Optional[str] = False
slot_uri: Optional[str] = None
slot_curie: Optional[str] = None
class_uri: Optional[str] = None
class_curie: Optional[str] = None
bnode: Optional[str] = bnode()
txt: Optional[str] = "penguins\"doves"
int: Optional[str] = -1403
dfltrange: Optional[str] = None
dfltns: Optional[str] = None
# Slots
class slots:
pass
slots.s1 = Slot(uri=TEST.s1, name="s1", curie=TEST.curie('s1'),
model_uri=TEST.s1, domain=None, range=Optional[str])
slots.s1p = Slot(uri=TEST.s1p, name="s1p", curie=TEST.curie('s1p'),
model_uri=TEST.s1p, domain=None, range=Optional[str])
slots.s2 = Slot(uri=TEST.s2, name="s2", curie=TEST.curie('s2'),
model_uri=TEST.s2, domain=None, range=Optional[str])
slots.s2p = Slot(uri=TEST.s2p, name="s2p", curie=TEST.curie('s2p'),
model_uri=TEST.s2p, domain=None, range=Optional[str])
slots.slot_uri = Slot(uri=TEST.slot_uri, name="slot_uri", curie=TEST.curie('slot_uri'),
model_uri=TEST.slot_uri, domain=None, range=Optional[str])
slots.slot_curie = Slot(uri=TEST.slot_curie, name="slot_curie", curie=TEST.curie('slot_curie'),
model_uri=TEST.slot_curie, domain=None, range=Optional[str])
slots.class_uri = Slot(uri=TEST.class_uri, name="class_uri", curie=TEST.curie('class_uri'),
model_uri=TEST.class_uri, domain=None, range=Optional[str])
slots.class_curie = Slot(uri=TEST.class_curie, name="class_curie", curie=TEST.curie('class_curie'),
model_uri=TEST.class_curie, domain=None, range=Optional[str])
slots.bnode = Slot(uri=TEST.bnode, name="bnode", curie=TEST.curie('bnode'),
model_uri=TEST.bnode, domain=None, range=Optional[str])
slots.txt = Slot(uri=TEST.txt, name="txt", curie=TEST.curie('txt'),
model_uri=TEST.txt, domain=None, range=Optional[str])
slots.int = Slot(uri=TEST.int, name="int", curie=TEST.curie('int'),
model_uri=TEST.int, domain=None, range=Optional[str])
slots.dfltrange = Slot(uri=TEST.dfltrange, name="dfltrange", curie=TEST.curie('dfltrange'),
model_uri=TEST.dfltrange, domain=None, range=Optional[str])
slots.dfltns = Slot(uri=TEST.dfltns, name="dfltns", curie=TEST.curie('dfltns'),
model_uri=TEST.dfltns, domain=None, range=Optional[str])
|
[
"solbrig@jhu.edu"
] |
solbrig@jhu.edu
|
f4145e6b8b24944fa2ee4b82009ad6b9a3c1facb
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/account_helpers/settings_core/SettingsCache.py
|
c8feafae4411fe1391ea440ca86c25272b1f67dd
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 2,564
|
py
|
# 2017.08.29 21:43:24 Střední Evropa (letní čas)
# Embedded file name: scripts/client/account_helpers/settings_core/SettingsCache.py
from Event import Event
from adisp import async
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.shared.utils.requesters.IntSettingsRequester import IntSettingsRequester
from account_helpers.settings_core.settings_constants import VERSION
from skeletons.account_helpers.settings_core import ISettingsCache
class SettingsCache(ISettingsCache):
def __init__(self):
self.__intSettings = IntSettingsRequester()
self.__waitForSync = False
self.onSyncStarted = Event()
self.onSyncCompleted = Event()
def init(self):
g_clientUpdateManager.addCallbacks({'intUserSettings': self._onResync})
def fini(self):
self.onSyncStarted.clear()
self.onSyncCompleted.clear()
g_clientUpdateManager.removeObjectCallbacks(self)
@property
def waitForSync(self):
return self.__waitForSync
@property
def settings(self):
return self.__intSettings
def _onResync(self, *args):
self.__invalidateData()
@async
def update(self, callback = None):
self.__invalidateData(callback)
def getSectionSettings(self, section, defaultValue = 0):
return self.__intSettings.getSetting(section, defaultValue)
def setSectionSettings(self, section, value):
self.__intSettings.setSetting(section, value)
def setSettings(self, settings):
self.__intSettings.setSettings(settings)
def getSetting(self, key, defaultValue = 0):
return self.__intSettings.getSetting(key, defaultValue)
def getVersion(self, defaultValue = 0):
return self.__intSettings.getSetting(VERSION, defaultValue)
def setVersion(self, value):
self.__intSettings.setSetting(VERSION, value)
def __invalidateData(self, callback = lambda *args: None):
def cbWrapper(*args):
self.__waitForSync = False
self.onSyncCompleted()
callback(*args)
self.__waitForSync = True
self.onSyncStarted()
import BattleReplay
if BattleReplay.g_replayCtrl.isPlaying:
cbWrapper(dict())
return
self.__intSettings.request()(cbWrapper)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\account_helpers\settings_core\SettingsCache.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:43:24 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
a008c9830e1db80a44abf7e5e9088150db092ed9
|
24a9bbc1c8000f080958570c513d1d249c3514fd
|
/models/resnet_50.py
|
898b31c4c982d6933630de58afae99afeb9cad85
|
[] |
no_license
|
CoderHHX/DGRL_OPFE
|
e74640693152ce4256dc54d3c0e4703fd6fcef4d
|
40d2f1a87714d7924e0931f8f7da6487acd1634d
|
refs/heads/master
| 2020-09-17T09:22:34.589899
| 2018-09-29T14:54:53
| 2018-09-29T14:54:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,672
|
py
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet50': '/home/zhengxiawu/.torch/models/resnet50-19c8e357.pth',
}
# model_urls = {
# 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
# 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
# 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
# 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
# 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
# }
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, **kwargs):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.global_avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.global_max_pool = nn.AdaptiveMaxPool2d((1, 1))
self.class_fc = nn.Linear(512 * block.expansion * 2, kwargs['num_class'])
#normalze the weight with
self.is_train = bool(kwargs['is_train'])
self.saliency = str(kwargs['saliency'])
self.pool_type = str(kwargs['pool_type'])
self.scale = int(kwargs['scale'])
self.threshold = float(kwargs['threshold']) if kwargs.has_key('threshold') else 'none'
self.phase = str(kwargs['phase']) if kwargs.has_key('phase') else 'none'
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def extract_conv_feature(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
#**kwargs form
#pool_type:default max_avg, can be set max, avg
#scale: int default 128
#scda:True or false, if use saliency
#is_train
x = self.extract_conv_feature(x)
if self.saliency=='scda':
scda_x = torch.sum(x,1,keepdim=True)
mean_x = torch.mean(scda_x.view(scda_x.size(0),-1),1,True)
scda_x = scda_x - mean_x
scda_x = scda_x>0
scda_x = scda_x.float()
x = x * scda_x
elif self.saliency == 'oc_mask':
object_tive_ness = torch.sum(x,1,keepdim=True)
max_object_score = 2* torch.mean(object_tive_ness.view(object_tive_ness.size(0),-1),1,True)
object_tive_ness = object_tive_ness / max_object_score
_,_, size_w, size_h = object_tive_ness.shape
prior_feature = np.indices((size_w,size_h))
prior_feature = prior_feature + 1
prior_feature = np.transpose(prior_feature, axes=(1, 2, 0))
prior_feature = prior_feature - (np.array((size_w,size_h)) / 2.)
sigma = size_h if size_h < size_w else size_w
sigma = sigma / 3.
prior_feature = np.exp(-1 * np.sum(prior_feature ** 2, axis=2) / (2 * (sigma ** 2)))
prior_feature = np.reshape(prior_feature,(1,1,size_w,size_h))
prior_feature_tensor = torch.Tensor(prior_feature).cuda()
indicate_mat = object_tive_ness + prior_feature_tensor> self.threshold
indicate_mat = indicate_mat.float()
x = x * indicate_mat
if self.phase == 'extract_conv_feature':
return x
if self.pool_type == 'max_avg':
avg_x = self.global_avg_pool(x)
avg_x = avg_x.view(avg_x.size(0), -1)
avg_x = F.normalize(avg_x,p=2,dim=1)
max_x = self.global_max_pool(x)
max_x = max_x.view(max_x.size(0), -1)
max_x = F.normalize(max_x,p=2,dim=1)
x = torch.cat((avg_x,max_x),dim=1)
x = x * self.scale
# the last fc layer can be treat as distance compute
if self.is_train:
fc_weight_relu = self.relu(self.class_fc.weight)
self.class_fc.weight.data = fc_weight_relu
x = self.class_fc(x)
return x,fc_weight_relu
return x
def resnet_50(pretrained=False,**kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_dict = torch.load(model_urls['resnet50'])
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
[
"zhengxiawu@126.com"
] |
zhengxiawu@126.com
|
647e11cca2a8aa827a45288dc54739ade950f14d
|
f889bc01147869459c0a516382e7b95221295a7b
|
/swagger_client/models/customer_data_group_extension_interface.py
|
3806a3f11578732561fab75c8a5c20d9992130f3
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CustomerDataGroupExtensionInterface(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
CustomerDataGroupExtensionInterface - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CustomerDataGroupExtensionInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
21daa653eb1721f4470a8e23b1c4e3f4ac9d37c7
|
1b86187256acfeca198c6683324a6ba37acc378c
|
/scripts/telocate/telocate_run.py
|
5e4b9df1e139b29933a09e26718f2cfeef85f5ff
|
[
"BSD-2-Clause"
] |
permissive
|
paa49/mcclintock
|
6359e5942913a98290dcfdd6e643f18de0eb0a61
|
10fc9c563911659f34656d06091e8b240c422490
|
refs/heads/master
| 2022-11-16T06:18:32.217877
| 2020-07-10T17:02:02
| 2020-07-10T17:02:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
import os
import sys
import subprocess
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
import config.telocate.telocate_run as config
def main():
te_gff = snakemake.input.te_gff
sam = snakemake.input.sam
ref_fasta = snakemake.input.ref
median_insert_size_file = snakemake.input.median_insert_size
log = snakemake.params.log
mccutils.log("te-locate","running TE-Locate", log=log)
with open(log,"a") as l:
l.write("TE GFF: "+te_gff+"\n")
l.write("SAM: "+sam+"\n")
l.write("reference fasta: "+ref_fasta+"\n")
telocate = snakemake.params.run_script
out_dir = snakemake.params.out_dir
sam_dir = out_dir+"/sam/"
mccutils.mkdir(sam_dir)
te_locate_sam = sam_dir+"te-locate.sam"
if os.path.exists(te_locate_sam):
os.remove(te_locate_sam)
os.symlink(sam, te_locate_sam)
os.chdir(os.path.dirname(telocate))
median_insert_size = mccutils.get_median_insert_size(median_insert_size_file)
distance = (median_insert_size * config.MIN_DISTANCE)
command = ["perl", telocate, str(config.MAX_MEM), sam_dir, te_gff, ref_fasta, out_dir, str(distance), str(config.MIN_SUPPORT_READS), str(config.MIN_SUPPORT_INDIVIDUALS)]
mccutils.run_command(command, log=log)
mccutils.run_command(["cp", out_dir+"_"+str(distance)+"_reads3_acc1.info", out_dir+"te-locate-raw.info"])
mccutils.log("te-locate", "TE-Locate complete")
if __name__ == "__main__":
main()
|
[
"pjb68507@uga.edu"
] |
pjb68507@uga.edu
|
97526d54617bd8d4a7ba932dd17601495af62fa6
|
b1b77bb1ed47586f96d8f2554a65bcbd0c7162cc
|
/SPOTIFY/crtauth/crtauth/msgpack_protocol.py
|
cc458fe3d3c4303eb316a3f064fa1e41a1a39e25
|
[
"Apache-2.0"
] |
permissive
|
DanHefrman/stuff
|
b3624d7089909972ee806211666374a261c02d08
|
b98a5c80cfe7041d8908dcfd4230cf065c17f3f6
|
refs/heads/master
| 2023-07-10T09:47:04.780112
| 2021-08-13T09:55:17
| 2021-08-13T09:55:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,988
|
py
|
# Copyright (c) 2014-2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import io
import msgpack
from crtauth import exceptions
from crtauth.constant_time_compare import constant_time_compare
PROTOCOL_VERSION = 1
HMAC_HASH_ALGORITHM = hashlib.sha256
HMAC_SIZE = HMAC_HASH_ALGORITHM().digest_size
class TypeInfo(object):
"""
TypeInfo instances contains extra information about the type of a field
"""
def __init__(self, data_type, size=None, binary=False):
self._data_type = data_type
self._size = size
self._packer = msgpack.Packer(use_bin_type=binary)
def validate(self, data, name):
if not isinstance(data, self._data_type):
raise ValueError("Value for field %s should have been of %s"
% (name, self._data_type))
def pack(self, value, stream):
stream.write(self._packer.pack(value))
class MessageBase(object):
"""
Base class with common functionality for Message and AuthenticatedMessage
"""
__fields__ = None
__magic__ = None
def __init__(self, **kw):
if len(kw) != len(self.__fields__):
raise RuntimeError("Wrong number of constructor parameters, "
"expected %d got %d",
len(self.__fields__), len(kw))
for key, _ in self.__fields__:
val = kw.get(key, None)
if val is None:
raise RuntimeError(
"Missing required argument '%s'" % key)
setattr(self, key, val)
def _do_serialize(self):
if self.__magic__ is None or self.__fields__ is None:
raise RuntimeError(
"Serialization can only be performed on classes implementing "
"__fields__ and __magic__")
buf = io.BytesIO()
msgpack.pack(PROTOCOL_VERSION, buf)
msgpack.pack(self.__magic__, buf)
for name, type_info in self.__fields__:
value = getattr(self, name)
type_info.validate(value, name)
type_info.pack(value, buf)
return buf
@classmethod
def _do_deserialize(cls, serialized):
stream = io.BytesIO(serialized)
unpacker = msgpack.Unpacker(stream)
version = unpacker.unpack()
if version != PROTOCOL_VERSION:
raise exceptions.ProtocolError(
"Wrong version, expected %d got %d" % (PROTOCOL_VERSION,
version))
magic = unpacker.unpack()
if magic != cls.__magic__:
raise exceptions.ProtocolError(
"Wrong magic, expected %d got %d" % (cls.__magic__, magic))
kw = dict()
for name, type_info in cls.__fields__:
kw[name] = unpacker.unpack()
return cls(**kw), unpacker
@classmethod
def deserialize(cls, serialized):
return cls._do_deserialize(serialized)[0]
class Message(MessageBase):
"""
Base class for messages not authenticated with a HMAC code
"""
def serialize(self):
return self._do_serialize().getvalue()
class AuthenticatedMessage(MessageBase):
"""
Base class for messages authenticated with a HMAC code
"""
def serialize(self, hmac_secret):
"""
Serialises this instance into the serialization format and appends
a SHA256 HMAC at the end computed using the provided hmac_secret
"""
buf = self._do_serialize()
offset = buf.tell()
buf.seek(0)
mac = hmac.new(hmac_secret, buf.read(), HMAC_HASH_ALGORITHM)
buf.seek(offset)
buf.write(msgpack.Packer(use_bin_type=True).pack(mac.digest()))
return buf.getvalue()
@classmethod
def deserialize_authenticated(cls, serialized, hmac_secret):
"""
Deserialises instances of this class, validating the HMAC appended
at the end using the provided hmac_secret
"""
instance, unpacker = cls._do_deserialize(serialized)
# the extra 2 bytes taken off is the serialization overhead of byte
# strings shorter than 256 bytes.
calculated_mac = hmac.new(hmac_secret, serialized[:-HMAC_SIZE-2],
HMAC_HASH_ALGORITHM).digest()
stored_mac = unpacker.unpack()
if not constant_time_compare(calculated_mac, stored_mac):
# TODO better exception, perhaps?
raise exceptions.BadResponse("Invalid authentication code")
return instance
class Challenge(AuthenticatedMessage):
"""
A challenge.
"""
__magic__ = ord('c')
__fields__ = (
("unique_data", TypeInfo(str, 20, binary=True)),
("valid_from", TypeInfo(int)),
("valid_to", TypeInfo(int)),
("fingerprint", TypeInfo(str, 6, binary=True)),
("server_name", TypeInfo(str)),
("username", TypeInfo(str))
)
class Response(Message):
"""
A response (a copy of the challenge plus a signature)
"""
__magic__ = ord('r')
__fields__ = (
("challenge", TypeInfo(str, binary=True)),
("signature", TypeInfo(str, binary=True)),
)
class Token(AuthenticatedMessage):
"""
Represents a token used to authenticate the user
"""
__magic__ = ord("t")
__fields__ = (
("valid_from", TypeInfo(int)),
("valid_to", TypeInfo(int)),
("username", TypeInfo(str))
)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
005031d4c1b14983cc629f1a19fc91dbd91a81a9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02725/s568490525.py
|
2affcb786486d1c8e87015b5691b632584056eb6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
K,N=map(int,input().split())
A=list(map(int,input().split()))
B=[0]*N
for i in range(N-1):
B[i]=A[i+1]-A[i]
B[N-1]=K-A[N-1]+A[0]
print(K-max(B))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1cbaa703c33b556e2bd7081dd2bb80906cb5e97f
|
55540f3e86f1d5d86ef6b5d295a63518e274efe3
|
/toolchain/riscv/MSYS/python/Tools/scripts/pyvenv.py
|
f84f4c78c804cdf82c673b71cf914dfb40eba39c
|
[
"Apache-2.0",
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"OpenSSL",
"Python-2.0",
"LicenseRef-scancode-newlib-historical",
"TCL",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
bouffalolab/bl_iot_sdk
|
bc5eaf036b70f8c65dd389439062b169f8d09daa
|
b90664de0bd4c1897a9f1f5d9e360a9631d38b34
|
refs/heads/master
| 2023-08-31T03:38:03.369853
| 2023-08-16T08:50:33
| 2023-08-18T09:13:27
| 307,347,250
| 244
| 101
|
Apache-2.0
| 2023-08-28T06:29:02
| 2020-10-26T11:16:30
|
C
|
UTF-8
|
Python
| false
| false
| 454
|
py
|
#!/usr/bin/env python3
if __name__ == '__main__':
import sys
import pathlib
executable = pathlib.Path(sys.executable or 'python3').name
print('WARNING: the pyenv script is deprecated in favour of '
f'`{executable} -m venv`', file=sys.stderr)
rc = 1
try:
import venv
venv.main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
|
[
"jczhang@bouffalolab.com"
] |
jczhang@bouffalolab.com
|
165628f55fbde0ee362db96cde96f48396556eb5
|
fae2430e2e7717704f9c454f75ec1cd17e0831a9
|
/tf_quant_finance/math/integration/integration_test.py
|
803aa92c54cf378a36efe302e9b332d8ccac46f8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
gsamarakoon/tf-quant-finance
|
ae00f12ab5f8bbf85c515a53379db234bd619802
|
7873ea202ec50059014836b950881239e7d154fa
|
refs/heads/master
| 2020-09-06T06:42:49.670227
| 2019-11-07T16:20:44
| 2019-11-07T16:21:10
| 220,354,269
| 1
| 0
|
Apache-2.0
| 2019-11-08T00:28:13
| 2019-11-08T00:28:12
| null |
UTF-8
|
Python
| false
| false
| 6,555
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for numeric integration methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from scipy import special
import tensorflow as tf
import tf_quant_finance as tff
tff_int = tff.math.integration
IntegrationTestCase = collections.namedtuple('IntegrationTestCase', [
'func',
'lower',
'upper',
'antiderivative',
])
# pylint:disable=g-long-lambda
BASIC_TEST_CASES = [
IntegrationTestCase(
func=lambda x: tf.exp(2 * x + 1),
lower=1.0,
upper=3.0,
antiderivative=lambda x: np.exp(2 * x + 1) / 2,
),
IntegrationTestCase(
func=lambda x: x**5,
lower=-10.0,
upper=100.0,
antiderivative=lambda x: x**6 / 6,
),
IntegrationTestCase(
func=lambda x: (x**3 + x**2 - 4 * x + 1) / (x**2 + 1)**2,
lower=0.0,
upper=10.0,
antiderivative=lambda x: sum([
2.5 / (x**2 + 1),
0.5 * np.log(x**2 + 1),
np.arctan(x),
]),
),
IntegrationTestCase(
func=lambda x: (tf.sinh(2 * x) + 3 * tf.sinh(x)) /
(tf.cosh(x)**2 + 2 * tf.cosh(0.5 * x)**2),
lower=2.0,
upper=4.0,
antiderivative=lambda x: sum([
np.log(np.cosh(x)**2 + np.cosh(x) + 1),
(4 / np.sqrt(3)) * np.arctan((1 + 2 * np.cosh(x)) / np.sqrt(3.0)),
]),
),
IntegrationTestCase(
func=lambda x: tf.exp(2 * x) * tf.math.sqrt(tf.exp(x) + tf.exp(2 * x)),
lower=2.0,
upper=4.0,
antiderivative=lambda x: sum([
np.sqrt((np.exp(x) + np.exp(2 * x))**3) / 3,
-(1 + 2 * np.exp(x)) * np.sqrt(np.exp(x) + np.exp(2 * x)) / 8,
np.log(np.sqrt(1 + np.exp(x)) + np.exp(0.5 * x)) / 8,
]),
),
IntegrationTestCase(
func=lambda x: tf.exp(-x**2),
lower=0.0,
upper=1.0,
antiderivative=lambda x: 0.5 * np.sqrt(np.pi) * special.erf(x),
),
]
TEST_CASE_RAPID_CHANGE = IntegrationTestCase(
func=lambda x: 1.0 / tf.sqrt(x + 1e-6),
lower=0.0,
upper=1.0,
antiderivative=lambda x: 2.0 * np.sqrt(x + 1e-6),
)
class IntegrationTest(tf.test.TestCase):
def _test_batches_and_types(self, integrate_function, args):
"""Checks handling batches and dtypes."""
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
a = [[0.0, 0.0], [0.0, 0.0]]
b = [[np.pi / 2, np.pi], [1.5 * np.pi, 2 * np.pi]]
a = [a, a]
b = [b, b]
k = tf.constant([[[[1.0]]], [[[2.0]]]])
func = lambda x: tf.cast(k, dtype=x.dtype) * tf.sin(x)
ans = [[[1.0, 2.0], [1.0, 0.0]], [[2.0, 4.0], [2.0, 0.0]]]
results = []
for dtype in dtypes:
lower = tf.constant(a, dtype=dtype)
upper = tf.constant(b, dtype=dtype)
results.append(integrate_function(func, lower, upper, **args))
results = self.evaluate(results)
for i in range(len(results)):
assert results[i].dtype == dtypes[i]
assert np.allclose(results[i], ans, atol=1e-3)
def _test_accuracy(self, integrate_function, args, test_case, max_rel_error):
func = test_case.func
lower = tf.constant(test_case.lower, dtype=tf.float64)
upper = tf.constant(test_case.upper, dtype=tf.float64)
exact = test_case.antiderivative(
test_case.upper) - test_case.antiderivative(test_case.lower)
approx = integrate_function(func, lower, upper, **args)
approx = self.evaluate(approx)
assert np.abs(approx - exact) <= np.abs(exact) * max_rel_error
def _test_gradient(self, integrate_function, args):
"""Checks that integration result can be differentiated."""
# We consider I(a) = int_0^1 cos(ax) dx.
# Then dI/da = (a*cos(a) - sin(a))/a^2.
def integral(a):
return integrate_function(
lambda x: tf.cos(a * x), 0.0, 1.0, dtype=tf.float64, **args)
a = tf.constant(0.5, dtype=tf.float64)
di_da = tff.math.fwd_gradient(integral, a)
true_di_da = lambda a: (a * np.cos(a) - np.sin(a)) / (a**2)
self.assertAllClose(self.evaluate(di_da), true_di_da(0.5))
def test_integrate_batches_and_types(self):
self._test_batches_and_types(tff_int.integrate, {})
for method in tff_int.IntegrationMethod:
self._test_batches_and_types(tff_int.integrate, {'method': method})
def test_integrate_accuracy(self):
for test_case in BASIC_TEST_CASES:
self._test_accuracy(tff_int.integrate, {}, test_case, 1e-8)
for method in tff_int.IntegrationMethod:
self._test_accuracy(tff_int.integrate, {'method': method}, test_case,
1e-8)
def test_integrate_gradient(self):
for method in tff_int.IntegrationMethod:
self._test_gradient(tff_int.integrate, {'method': method})
def test_integrate_int_limits(self):
for method in tff_int.IntegrationMethod:
result = tff_int.integrate(tf.sin, 0, 1, method=method, dtype=tf.float64)
result = self.evaluate(result)
self.assertAllClose(0.459697694, result)
def test_simpson_batches_and_types(self):
self._test_batches_and_types(tff_int.simpson, {})
def test_simpson_accuracy(self):
for test_case in BASIC_TEST_CASES:
self._test_accuracy(tff_int.simpson, {}, test_case,
1e-8)
def test_simpson_rapid_change(self):
self._test_accuracy(tff_int.simpson,
{'num_points': 1001}, TEST_CASE_RAPID_CHANGE, 2e-1)
self._test_accuracy(tff_int.simpson,
{'num_points': 10001}, TEST_CASE_RAPID_CHANGE, 3e-2)
self._test_accuracy(tff_int.simpson,
{'num_points': 100001}, TEST_CASE_RAPID_CHANGE, 5e-4)
self._test_accuracy(tff_int.simpson,
{'num_points': 1000001}, TEST_CASE_RAPID_CHANGE, 3e-6)
def test_simpson_gradient(self):
self._test_gradient(tff_int.simpson, {})
if __name__ == '__main__':
tf.test.main()
|
[
"tf-quant-finance-robot@google.com"
] |
tf-quant-finance-robot@google.com
|
b8cd5d4e2ecee18362ea956d197bd10a0d9c3445
|
856a1d6c6737ee3d42888831d7da3142ec11d75a
|
/cpt2wgt.py
|
0a2649be8d41ac38c2172f78249af3222274b850
|
[
"MIT"
] |
permissive
|
kevincao91/Tools
|
81d499dcac04987724142d6c7e77fa2629707f3c
|
9de429d066b4c601afd32ba09360995297736d2f
|
refs/heads/master
| 2023-05-12T07:41:26.736274
| 2023-05-05T08:29:49
| 2023-05-05T08:29:49
| 225,775,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
import pickle
import os,sys
cptPath=sys.argv[1]
wgtPath=cptPath
with open(cptPath,'rb') as f:
data = pickle.load(f,encoding='latin1')
keys = data['blobs'].keys()
# needs = ['conv','res','fpn',]
not_needs = ['fc1000','momentum']
output_dic={'blobs':{}}
print('filtered out:')
for key in keys:
keep = True
# for need in needs:
# if key.startswith(need):
# keep=True
for not_need in not_needs:
if not_need in key:
keep=False
break
if keep:
# if 'score' in key:
# print(key)
output_dic['blobs'][key] = data['blobs'][key]
#print(key)
else:
print(' - '+key)
#print(output_dic['blobs'].keys())
with open(wgtPath,'wb') as f:
pickle.dump(output_dic,f,protocol=0)
|
[
"kevin_cao_91@163.com"
] |
kevin_cao_91@163.com
|
1891b8efa206d3ac3a27653452945bb1c3676750
|
84ebacfa7c91348f1275f3945f7ee3567b91d458
|
/MusicObjectDetector/image_color_inverter.py
|
31eb7267ffda5824a9d056a90cab11ed330bdde6
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
apacha/MusicObjectDetector-TF
|
4f40f639ac1240150dcbf5e489569f80878d2008
|
d32cf96575c995f4d5b634e4dbb876845e3bcd2a
|
refs/heads/master
| 2022-11-01T03:33:31.589657
| 2022-10-09T20:26:52
| 2022-10-09T20:26:52
| 112,597,906
| 83
| 32
|
Apache-2.0
| 2022-10-09T20:26:53
| 2017-11-30T10:25:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
import argparse
import os
from glob import glob
from PIL import Image, ImageOps
from tqdm import tqdm
class ImageColorInverter:
""" Class for inverting white-on-black images to black-on-white images """
def __init__(self) -> None:
super().__init__()
def invert_images(self, image_directory: str, image_file_ending: str):
"""
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
"""
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--image_directory",
type=str,
default="../data/fornes_raw",
help="The directory, where a dataset can be found, that needs to be inverted, e.g. the original Fornés dataset")
parser.add_argument("--image_file_ending", type=str, default="*.bmp", )
flags, unparsed = parser.parse_known_args()
image_inverter = ImageColorInverter()
image_inverter.invert_images(flags.image_directory, flags.image_file_ending)
|
[
"alexander.pacha@gmail.com"
] |
alexander.pacha@gmail.com
|
a6170160382a184522bd7e3775027053bd5cd7db
|
364ec3089ac2dcdab887518ac1b816f1c0d2858e
|
/Fase10/Desafios/Desafio_032.py
|
b67d2c478d6671e29f0745ef3f89062e5b767c47
|
[] |
no_license
|
loc-dev/CursoEmVideo-Python
|
20488925da782677b9849944f9bbfd0d862e2b8f
|
d1eafebbfa88ca70ea0681e45edce6924a9c26d5
|
refs/heads/master
| 2022-12-05T22:35:23.056806
| 2020-08-26T20:20:00
| 2020-08-26T20:20:00
| 255,226,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
# Fase 10 - Condições ( Parte 1 )
# Desafio 32
# Faça um programa que leia três números
# e mostre qual é maior e qual é o menor.
n1 = int(input('Digite o primeiro número: '))
n2 = int(input('Digite o segundo número: '))
n3 = int(input('Digite o terceiro número: '))
if n1 > n2:
if n1 > n3:
print('O maior número é {}'.format(n1))
print('O menor número é {}'.format(n3))
else:
print('O maior número é {}'.format(n3))
print('O menor número é {}'.format(n2))
else:
if n2 > n3:
print('O maior número é {}'.format(n2))
print('O menor número é {}'.format(n1))
else:
print('O maior número é {}'.format(n3))
print('O menor número é {}'.format(n1))
|
[
"leonardoc.developer@gmail.com"
] |
leonardoc.developer@gmail.com
|
ee2b759f34b601d9e0a5b19d7e40fdfbbb995092
|
4cc285b0c585241ff4404087e6fbb901195639be
|
/NeuralNetworkNumbers/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/autodiff/__init__.py
|
9aff8afae8ce5d4673c70fa11a102c5053d8bfc3
|
[] |
no_license
|
strazhg/NeuralNetworksPython
|
815542f4ddbb86e918e657f783158f8c078de514
|
15038e44a5a6c342336c119cdd2abdeffd84b5b1
|
refs/heads/main
| 2023-04-16T18:51:29.602644
| 2021-04-27T14:46:55
| 2021-04-27T14:46:55
| 361,944,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:460dbd09fb995b71b073b0f30f23ef0db5586ce34f01294ee54ca7d366f974a1
size 413
|
[
"golubstrazh@gmail.com"
] |
golubstrazh@gmail.com
|
820b9f97995a650d7f84aa1abfefe03c929c296e
|
4ff8676136167cdd81d7a983272102fff86360e8
|
/python/404. 左叶子之和.py
|
3f7495600a4a204120f687388c5274a274da8162
|
[] |
no_license
|
geniuscynic/leetcode
|
0ec256af2377d19fee22ce736462a7e95e3f4e67
|
379a8f27f8213951ee8be41bd56598036995d267
|
refs/heads/master
| 2023-07-19T07:22:20.001770
| 2021-09-07T14:50:40
| 2021-09-07T14:50:40
| 297,277,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,137
|
py
|
import sys
from collections import defaultdict
from collections import Counter
from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def helper(self, root: TreeNode, parentNode):
if not root:
return 0
if not root.left and not root.right and parentNode.left and parentNode.left == root:
return root.val
return self.helper(root.left, root) + self.helper(root.right, root)
def sumOfLeftLeaves(self, root: TreeNode) -> int:
return self.helper(root, root)
def isleaf(self, root: TreeNode):
if not root.left and not root.right:
return True
return False
def sumOfLeftLeaves_1(self, root: TreeNode) -> int:
if not root:
return 0
#if not root.left and not root.right:
#return root.val
res = 0
if root.left:
res += root.left.val if self.isleaf(root.left) else self.sumOfLeftLeaves_1(root.left)
if root.right and not self.isleaf(root.right):
res += self.sumOfLeftLeaves_1(root.right)
return res
def coverttoTree():
ls =deque([-6,8,-4,8,-5,-1,None,-9,9,8,8,None,None,-5,6,None,None,None,-4,None,4,None,None,8,8,None,None,None,5,None,None,None,None,None,-9])
temp = TreeNode(ls.popleft())
res = deque()
res.append(temp)
while ls:
left = ls.popleft()
right = ls.popleft()
node = res.popleft()
#print(node.val, left, right)
if left != None:
node.left = TreeNode(left)
res.append(node.left)
if right != None:
node.right = TreeNode(right)
res.append(node.right)
return temp
if __name__ == "__main__":
solution = Solution()
nums1 = coverttoTree()
m = TreeNode(2)
nums2 = TreeNode(4)
n = 3
result = solution.sumOfLeftLeaves_1(nums1)
#print(solution.ls)
print(result)
|
[
"350810375@qq.com"
] |
350810375@qq.com
|
b77a8520217da5787c934d1d79e6e0b831e46e6d
|
039c5b793ace774bb815f4061a273ff098efd475
|
/service/venv/bin/easy_install-3.5
|
b8d6d4e653a8969421dd255380f763334d7b2df5
|
[] |
no_license
|
zzyzx4/soft
|
b7872a1c1e2dc91912f22aaaf96f2cedaf1423c1
|
264c399ddef2b55efd8a1a8b796320f72c6dec7c
|
refs/heads/master
| 2022-12-16T20:50:45.512689
| 2019-07-01T11:38:12
| 2019-07-01T11:38:12
| 184,214,960
| 0
| 0
| null | 2022-12-08T05:07:18
| 2019-04-30T07:38:24
| null |
UTF-8
|
Python
| false
| false
| 446
|
5
|
#!/home/user/PycharmProjects/service/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.5')()
)
|
[
"dastik0101@gmail.com"
] |
dastik0101@gmail.com
|
a6fdcba7f9380ceaab8bb1fef39cbc7f2713c220
|
8ac36a81c150432a989ac20c622d41f3e0d88625
|
/recruitment/recruitment/doctype/type/type.py
|
12aca933d5ba6247e8a6ade2221095ece4b498af
|
[
"MIT"
] |
permissive
|
asoral/recruitment
|
ed85fd4ef2fa7f16ec0098cb80dd67e792fc3ead
|
bcfdfd9ffe6b493cc79565b0bc1055bee6299645
|
refs/heads/master
| 2021-01-04T10:33:08.573635
| 2020-01-09T13:15:12
| 2020-01-09T13:15:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Type(Document):
pass
|
[
"abdulla.pi@voltechgroup.com"
] |
abdulla.pi@voltechgroup.com
|
23f246e57df6bb3cbf79901d0d81e4121278878e
|
4ce0f35c6aa01f5041a11979a8b5662d8ad08962
|
/learning_machine/brain.py
|
27f04210e42fe2f21713eb36f846e0542c11fc30
|
[] |
no_license
|
lanlanzky/tt
|
f125785b00b51774c9033492117305dfba19fb8f
|
4666af6a250a48200f5af9ef9692da53bbfcd79d
|
refs/heads/master
| 2016-09-06T02:19:18.492453
| 2014-09-01T13:26:55
| 2014-09-01T13:26:55
| 23,542,631
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
#coding=utf8
from numpy import *
from django.http import HttpResponse
from django.shortcuts import render_to_response
from stock.models import New_stock
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
#归一化函数处理
def normal(record):
return [ "%.5f" % round(float((i-min(record)))/(max(record)-min(record)),4) for i in record]
#返归一化
def backnormal(backdata,outdata):
large=max(backdata)
small=min(backdata)
bizhi=large-small
for i in range(len(outdata)):
for j in range(len(outdata[1])):
outdata[i][j]=outdata[i][j]*bizhi+small
return outdata
#实验数据归一化处理
def newalldate(alldate,len):
newalldate=[]
allopen=[]
allhigh=[]
alllow=[]
allclose=[]
allvolumn=[]
alladjclose=[]
for date in alldate:
allopen.append(date.open)
allhigh.append(date.high)
alllow.append(date.low)
allclose.append(date.close)
allvolumn.append(date.volume)
alladjclose.append(date.adjclose)
newallopen=normal([ float(i) for i in allopen])
newallhigh=normal([ float(i) for i in allhigh])
newalllow=normal([ float(i) for i in alllow])
newallclose=normal([ float(i) for i in allclose])
newallvolume=normal([ float(i) for i in allvolumn])
newalladjclose=normal([ float(i) for i in alladjclose])
for i in range(len):
new=[]
new.append(newallopen[i])
new.append(newallhigh[i])
new.append(newalllow[i])
new.append(newallclose[i])
new.append(newallvolume[i])
new.append(newalladjclose[i])
newalldate.append(new)
return newalldate
# 用神经网络来预测最大值
# 用神经网络来预测最小值
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
15a49b8d9a456333def2e0f3c0135a0c1957b1bc
|
529833339de2d1f78ec79d4bbe7e6f174fd66779
|
/alignments/select.py
|
0387674d35b0808a4fa574902bfe1447bb5ef0f1
|
[] |
no_license
|
standage/EmexAssemblyMay2016
|
01f98c69b481e9c7670d35a82c62628b53747927
|
7a22a17fa0ff6b28262b5da5e906a9554862bcac
|
refs/heads/master
| 2021-01-18T10:44:31.587771
| 2016-05-20T16:20:44
| 2016-05-20T16:20:44
| 59,306,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
#!/usr/bin/env python
import sys
from __future__ import print_function
def parse(data):
name, seq = None, []
for line in data:
line = line.rstrip()
if line.startswith('>'):
if name:
yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
yield (name, ''.join(seq))
if __name__ == '__main__':
for defline, seq in parse(sys.stdin):
if len(seq) > 250000:
print(defline)
print(seq)
|
[
"daniel.standage@gmail.com"
] |
daniel.standage@gmail.com
|
c73e7e538899e34cc1dd877afb850c40d7e3a7f6
|
e8fa46e0e5318c229a49b2803910e12e4d29884e
|
/interviewbit/Hashing/2-SumBook.py
|
8bd2842ecf7da92e021b911c43628a7a180a5bea
|
[] |
no_license
|
igorbragaia/algorithms
|
e6bc71b0612a65b2650c259aa2cdec593b9f6c53
|
0b4204c5a11d736c7299bd8c485e325eed630a19
|
refs/heads/master
| 2021-12-12T13:49:13.226739
| 2021-11-27T02:51:23
| 2021-11-27T02:51:23
| 106,027,078
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
class Solution:
# @param A : tuple of integers
# @param B : integer
# @return a list of integers
def twoSum(self, A, B):
lista = []
new_hash = {}
for i in range(len(A)):
if A[i] not in new_hash:
new_hash[A[i]] = [i]
else:
new_hash[A[i]].append(i)
for i in range(len(A)):
if B - A[i] in new_hash:
temp = [x for x in new_hash[B - A[i]] if x > i]
if len(temp) > 0:
lista.append((min(temp) + 1, i +1))
lista = sorted(lista)
if lista != []:
return (lista[0][1], lista[0][0])
return lista
|
[
"igor.bragaia@gmail.com"
] |
igor.bragaia@gmail.com
|
6e84d40880efd0710dc18b037665f1bc62c15700
|
f523e7bdd7f616267b82a7f00f2b7cae132dc6b9
|
/dicodile/utils/plot_config.py
|
402d82f7377e42f69177aa8ef32b9abae42dcd3c
|
[
"BSD-3-Clause"
] |
permissive
|
tomMoral/dicodile
|
2d7da76be7d32fb05502cbb358fcda0018e5c00c
|
5a64fbe456f3a117275c45ee1f10c60d6e133915
|
refs/heads/main
| 2023-05-25T11:58:05.596455
| 2023-05-19T14:35:04
| 2023-05-19T14:35:04
| 167,703,861
| 17
| 8
|
BSD-3-Clause
| 2023-05-19T14:35:06
| 2019-01-26T15:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 742
|
py
|
STYLES = {
'lgcd': {
'color': 'C1',
'linestyle': 'o-',
'hatch': '//',
'label': 'LGCD',
'label_p': 'DiCoDiLe$_Z$'
},
'greedy': {
'color': 'C0',
'linestyle': 's-',
'hatch': None,
'label': 'Greedy',
'label_p': 'Dicod'
},
'cyclic': {
'color': 'C2',
'linestyle': '^-',
'hatch': None,
'label': 'Cyclic',
'label_p': 'Cyclic'
},
}
def get_style(name, *keys, parallel=False):
all_style = STYLES[name]
style = {
'label': all_style['label_p'] if parallel else all_style['label'],
'color': all_style['color']
}
for k in keys:
style[k] = all_style[k]
return style
|
[
"thomas.moreau.2010@gmail.com"
] |
thomas.moreau.2010@gmail.com
|
075948fe95a02e474ee41679d278ff9a4a2253ec
|
6ac0aeea8229c4e2c7a041e85c3afeeb106c6b01
|
/mark_big_words.py
|
acf70f3d66db359248c9b464095c9d13919c5604
|
[] |
no_license
|
waiteb15/py3intro
|
325dafaaa642052280d6c050eacf8b406b40e01d
|
68b30f147e7408220490a46d3e595acd60513e9e
|
refs/heads/master
| 2020-03-27T10:50:25.928836
| 2019-02-28T21:47:11
| 2019-02-28T21:47:11
| 146,448,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
#!/usr/bin/env python
import re
input_file_name = 'DATA/parrot.txt'
output_file_name = 'bigwords.txt'
pattern = r'\w{8,}'
def doit(m):
return f"**{m.group(0)}**"
with open(input_file_name) as parrot_in:
with open(output_file_name, 'w') as bigwords_out:
text = parrot_in.read()
new_text = re.sub(pattern, doit, text)
bigwords_out.write(new_text)
|
[
"waiteb15@gmail.com"
] |
waiteb15@gmail.com
|
93bff11c5085d5d9b492a51224c4a331395ffe4b
|
f6c6e0ebc18b7b1a28c23367f62c960e86194c88
|
/pythonmisc/qimage2ndarray/qt_driver.py
|
483cd287af1976d6c665860fa84d3304ece7273d
|
[] |
no_license
|
TheGrim1/python_work
|
9316d6fbb71a4be9bd901f104e939949dfd91174
|
5b34277aed4c06b62276644160e0aa97a4260233
|
refs/heads/master
| 2021-01-11T13:54:54.366575
| 2019-03-12T12:38:39
| 2019-03-12T12:38:39
| 94,876,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,663
|
py
|
# Copyright 2014-2014 Hans Meine <hans_meine@gmx.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains a wrapper around three different Qt python bindings.
It will dynamically decide which one to use:
* First, the environment variable QT_DRIVER is checked
(may be one of 'PyQt5', 'PyQt4', 'PySide', 'PythonQt').
* If unset, previously imported binding modules are detected (in sys.modules).
* If no bindings are loaded, the environment variable QT_API is checked
(used by ETS and ipython, may be 'pyside' or 'pyqt').
In order to have compatible behavior between the different bindings,
PyQt4 (if used) is configured as follows::
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
Furthermore, there is a 'getprop' function that solves the following
problem: PythonQt exports Qt properties as Python properties *and*
gives the precedence over getters with the same name. Instead of
calling getters with parentheses (which must not be used in PythonQt,
but are required in PyQt and PySide), one may e.g. write
`getprop(widget.width)`.
"""
import sys, os
def getprop_PythonQt(prop):
"""getprop(property_or_getter)
Used on getters that have the same name as a corresponding
property. For PythonQt, this version will just return the
argument, which is assumed to be (the value of) a python property
through which PythonQt exposes Qt properties."""
return prop
def getprop_other(getter):
"""getprop(property_or_getter)
Used on getters that have the same name as a corresponding
property. For Qt bindings other than PythonQt, this version will
return the result of calling the argument, which is assumed to be
a Qt getter function. (With PythonQt, properties override getters
and no calling must be done.)"""
return getter()
class QtDriver(object):
DRIVERS = ('PyQt5', 'PyQt4', 'PySide', 'PythonQt')
DEFAULT = 'PyQt4'
@classmethod
def detect_qt(cls):
for drv in cls.DRIVERS:
if drv in sys.modules:
return drv
if '_PythonQt' in sys.modules:
return 'PythonQt'
return None
def name(self):
return self._drv
def getprop(self):
return getprop_PythonQt if self._drv == 'PythonQt' else getprop_other
def __init__(self, drv = os.environ.get('QT_DRIVER')):
"""Supports QT_API (used by ETS and ipython)"""
if drv is None:
drv = self.detect_qt()
if drv is None:
drv = os.environ.get('QT_API')
if drv is None:
drv = self.DEFAULT
drv = {'pyside' : 'PySide', 'pyqt' : 'PyQt4', 'pyqt5' : 'PyQt5'}.get(drv, drv) # map ETS syntax
assert drv in self.DRIVERS
self._drv = drv
@staticmethod
def _initPyQt4():
"""initialize PyQt4 to be compatible with PySide"""
if 'PyQt4.QtCore' in sys.modules:
# too late to configure API
pass
else:
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
@staticmethod
def requireCompatibleAPI():
"""If PyQt4's API should be configured to be compatible with PySide's
(i.e. QString and QVariant should not be explicitly exported,
cf. documentation of sip.setapi()), call this function to check that
the PyQt4 was properly imported. (It will always be configured this
way by this module, but it could have been imported before we got a
hand on doing so.)
"""
if 'PyQt4.QtCore' in sys.modules:
import sip
for api in ('QVariant', 'QString'):
if sip.getapi(api) != 2:
raise RuntimeError('%s API already set to V%d, but should be 2' % (api, sip.getapi(api)))
def importMod(self, mod):
if self._drv == 'PyQt4':
self._initPyQt4()
qt = __import__('%s.%s' % (self._drv, mod))
return getattr(qt, mod)
def __getattr__(self, name):
if name.startswith('Qt'):
return self.importMod(name)
return super(QtDriver, self).__getattr__(name)
|
[
"opid13@nanofocus.esrf.fr"
] |
opid13@nanofocus.esrf.fr
|
4f7c48e8a99a701068ab27a5eeb99bafc6831225
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/312/usersdata/287/75300/submittedfiles/esferas.py
|
970420237374c04f7c3159b9f92c411be16ac468
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
# -*- coding: utf-8 -*-
A=int(input('Digite o peso da esfera A: '))
B=int(input('Digite o peso da esfera B: '))
C=int(input('Digite o peso da esfera C: '))
D=int(input('Digite o peso da esfera D: '))
if A==B+C+D and B+C==D and B==C:
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f79a69fdebbb591c2349e3a4bd097ab47249f04a
|
0f77deeffc1526d3befcb777ba4faebe2059e0bb
|
/lstm.py
|
27295908eec7e8550fc16f3602f31bf6a21b5d7c
|
[] |
no_license
|
akshay1997/TRUMP-TWITTER-BOT
|
6d082a45ca939ce2f41c9cba8cd6198dadb54428
|
01f781fe2f7eeb71f11d932906b39b26776eafec
|
refs/heads/master
| 2021-01-01T16:44:19.137215
| 2017-07-21T04:59:01
| 2017-07-21T04:59:01
| 97,906,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
import numpy
#3563 = 2850+713 1426 2139 2850
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
lines = []
with open ('realDonaldTrump.txt','r') as filename:
for line in filename:
lines.append(line)
lines1 = lines[0:713]
lines2 = lines[713:1426]
lines3 = lines[1426:2139]
lines4 = lines[2139:2850]
lines5 = lines[2850:]
raw_text = lines1
raw_text = raw_text.lower()
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
n_chars = len(raw_text)
n_vocab = len(chars)
print "Total Characters: ", n_chars
print "Total Vocab: ", n_vocab
seq_length = 20
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print "Total Patterns: ", n_patterns
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
X = X / float(n_vocab)
y = np_utils.to_categorical(dataY)
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, y, nb_epoch=1, batch_size=64, callbacks=callbacks_list)
|
[
"you@example.com"
] |
you@example.com
|
1cbaac8e46e28a8e424d041eb4c906491546cbea
|
936dee544c471013bd1788b441042e22c3522633
|
/deploy_tools/fabfile.py
|
ba1a955ef64a555f54ad8ba7f9d51b656ec4899d
|
[] |
no_license
|
k5766273/test
|
bdaa808e1f10112a3f751a499e5890350d9ff733
|
b947d5d2b69b510bb17df1f66b9b03c821f141c9
|
refs/heads/master
| 2023-05-19T20:11:34.849263
| 2021-06-06T01:51:10
| 2021-06-06T01:51:10
| 356,159,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 2,325
|
py
|
from fabric.contrib.files import append, exists, sed
from fabric.api import env, local, run
import random
REPO_URL = 'https://github.com/k5766273/test'
#env.use_ssh_config = True
def deploy():
site_folder = f'/home/ubuntu/sites/{env.host}'
source_folder = site_folder + '/suplerlists'
_create_directory_structure_if_necessary(site_folder)
_get_latest_source(source_folder)
_update_settings(source_folder, env.host)
_update_virtualenv(source_folder)
_update_static_files(source_folder)
_update_database(source_folder)
def _create_directory_structure_if_necessary(site_folder):
for subfolder in ('database', 'static', 'virtualenv', 'suplerlists'):
run(f'mkdir -p {site_folder}/{subfolder}')
def _get_latest_source(source_folder):
if exists(source_folder + '/.git'):
run(f'cd {source_folder} && git fetch')
else:
run(f'git clone {REPO_URL} {source_folder}')
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f'cd {source_folder} && git reset --hard {current_commit}')
def _update_settings(source_folder, site_name):
settings_path = source_folder + '/suplerlists/settings.py'
sed(settings_path, "DEBUG = True", "DEBUG = False")
sed(settings_path,'ALLOWED_HOSTS =.+$',f'ALLOWED_HOSTS = ["{site_name}"]' )
secret_key_file = source_folder + '/suplerlists/secret_key.py'
if not exists(secret_key_file):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
append(secret_key_file, f'SECRET_KEY = "{key}"')
append(settings_path, '\nfrom .secret_key import SECRET_KEY')
def _update_virtualenv(source_folder):
virtualenv_folder = source_folder + '/../virtualenv'
if not exists(virtualenv_folder + '/bin/pip'):
run(f'python3.6 -m venv {virtualenv_folder}')
run(f'{virtualenv_folder}/bin/pip install -r {source_folder}/requirements.txt')
def _update_static_files(source_folder):
run(
f'cd {source_folder}'
' && ../virtualenv/bin/python manage.py collectstatic --noinput'
)
def _update_database(source_folder):
run(
f'cd {source_folder}'
' && ../virtualenv/bin/python manage.py migrate --noinput'
)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
688f4172210d8b670a6bc922eef372027e2123bf
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/stp/rtstpifpol.py
|
c74118f5914bc4cd48e40179c05611a98540fed7
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,225
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtStpIfPol(Mo):
"""
A target relation to the spanning-tree protocol interface policy.
"""
meta = TargetRelationMeta("cobra.model.stp.RtStpIfPol", "cobra.model.infra.AccGrp")
meta.moClassName = "stpRtStpIfPol"
meta.rnFormat = "rtinfraStpIfPol-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Abstraction of Leaf Access Policy Group"
meta.writeAccessMask = 0x4100000000001
meta.readAccessMask = 0x4100000000011
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.stp.IfPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtinfraStpIfPol-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 13244, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4387
prop.defaultValueStr = "infraAccGrp"
prop._addConstant("infraAccBndlGrp", None, 4406)
prop._addConstant("infraAccBndlPolGrp", None, 6102)
prop._addConstant("infraAccGrp", None, 4387)
prop._addConstant("infraAccPortGrp", None, 4409)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 13243, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("stpIfPolToPortGroups", "Portgroups", "cobra.model.vmm.EpPD"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("stpIfPolToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("L2IfPolToEthIf", "Interface", "cobra.model.l1.EthIf"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
a22bbe274b90794f62ec8b4f2d459c7a5e30f250
|
8e7c006a81ebbbc60c6750dbb562ebb071a1d8aa
|
/base/05_basic_convnet.py
|
310ef0b149be15f8da82789d0b20368fdd76131c
|
[] |
no_license
|
xueyangfu/tensorflow-learning
|
8b65dbc0e3a437ed2a14b4987c8fe7848ed2a6c4
|
ec477ac02ae5c2506819a7f8c147e3774baa3a4a
|
refs/heads/master
| 2021-05-15T16:32:05.194502
| 2017-01-30T12:24:46
| 2017-01-30T12:24:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,864
|
py
|
# -*- coding: utf-8 -*-
"""Simple tutorial following the TensorFlow example of a Convolutional Network.
Parag K. Mital, Jan. 2016"""
# %% Imports
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
from libs.utils import *
import matplotlib.pyplot as plt
# %% Setup input to the network and true output label. These are
# simply placeholders which we'll fill in later.
mnist = input_data.read_data_sets('../datas/mnist/', one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
# %% Since x is currently [batch, height*width], we need to reshape to a
# 4-D tensor to use it in a convolutional graph. If one component of
# `shape` is the special value -1, the size of that dimension is
# computed so that the total size remains constant. Since we haven't
# defined the batch dimension's shape yet, we use -1 to denote this
# dimension should not change size.
x_tensor = tf.reshape(x, [-1, 28, 28, 1])
# %% We'll setup the first convolutional layer
# Weight matrix is [height x width x input_channels x output_channels]
filter_size = 5
n_filters_1 = 16
W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
# %% Bias is [output_channels]
b_conv1 = bias_variable([n_filters_1])
# %% Now we can build a graph which does the first layer of convolution:
# we define our stride as batch x height x width x channels
# instead of pooling, we use strides of 2 and more layers
# with smaller filters.
h_conv1 = tf.nn.relu(
tf.nn.conv2d(input=x_tensor,
filter=W_conv1,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv1)
# %% And just like the first layer, add additional layers to create
# a deep net
n_filters_2 = 16
W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
b_conv2 = bias_variable([n_filters_2])
h_conv2 = tf.nn.relu(
tf.nn.conv2d(input=h_conv1,
filter=W_conv2,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv2)
# %% We'll now reshape so we can connect to a fully-connected layer:
h_conv2_flat = tf.reshape(h_conv2, [-1, 7 * 7 * n_filters_2])
# %% Create a fully-connected layer:
n_fc = 1024
W_fc1 = weight_variable([7 * 7 * n_filters_2, n_fc])
b_fc1 = bias_variable([n_fc])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# %% And finally our softmax layer:
W_fc2 = weight_variable([n_fc, 10])
b_fc2 = bias_variable([10])
y_pred = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)
# %% Monitor accuracy
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% We'll train in minibatches and report accuracy:
batch_size = 100
n_epochs = 5
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
x: batch_xs, y: batch_ys, keep_prob: 0.5})
print(sess.run(accuracy,
feed_dict={
x: mnist.validation.images,
y: mnist.validation.labels,
keep_prob: 1.0
}))
# %% Let's take a look at the kernels we've learned
W = sess.run(W_conv1)
plt.imshow(montage(W / np.max(W)), cmap='coolwarm')
|
[
"kkoolerter@gmail.com"
] |
kkoolerter@gmail.com
|
7582910eaa393cc51825e953f774977c6c676280
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/T/tmayor/test-3.py
|
df783a0fb6ae2cb6a043ea259a571331baf5e15d
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,422
|
py
|
# lxml is a complete library for parsing xml and html files. http://codespeak.net/lxml/
# The interface is not totally intuitive, but it is very effective to use,
# especially with cssselect.
import lxml.etree
import lxml.html
print help(lxml.html.parse)
# create an example case
samplehtml = """<html><body>
<h1>hi</h1>
<p class="cccc">something <strong>good</strong>
<p>Another paragraph</p>
<ul class="LLL">
<li class="1">first</li>
<li class="2">second</li>
<li class="1" id="nimble">third <b>jjj</b></li>junk
</ul>
</body></html>"""
#root = lxml.html.fromstring(samplehtml) # an lxml.etree.Element object
# To load directly from a url, use
root = lxml.html.parse('http://www.guardian.co.uk/news/gallery/2010/oct/12/1').getroot()
# Whenever you have an lxml element, you can convert it back to a string like so:
#print lxml.etree.tostring(root)
# Use cssselect to select elements by their css code
#print root.cssselect("li.initially-off") # returns 2 elements
#print root.cssselect("ul #nimble") # returns 1 element
#print root.cssselect(".LLL li") # returns 3 elements
# extracting text from a single element
linimble = root.cssselect("li.initially-off")[0]
#help(linimble) # prints the documentation for the object
print lxml.etree.tostring(linimble) # note how this includes trailing text 'junk'
#print linimble.text # just the text between the tag
#print linimble.tail # the trailing text
#print list(linimble) # prints the <b> object
# This recovers all the code inside the object, including any text markups like <b>
#print linimble.text + "".join(map(lxml.etree.tostring, list(linimble)))
# lxml is a complete library for parsing xml and html files. http://codespeak.net/lxml/
# The interface is not totally intuitive, but it is very effective to use,
# especially with cssselect.
import lxml.etree
import lxml.html
print help(lxml.html.parse)
# create an example case
samplehtml = """<html><body>
<h1>hi</h1>
<p class="cccc">something <strong>good</strong>
<p>Another paragraph</p>
<ul class="LLL">
<li class="1">first</li>
<li class="2">second</li>
<li class="1" id="nimble">third <b>jjj</b></li>junk
</ul>
</body></html>"""
#root = lxml.html.fromstring(samplehtml) # an lxml.etree.Element object
# To load directly from a url, use
root = lxml.html.parse('http://www.guardian.co.uk/news/gallery/2010/oct/12/1').getroot()
# Whenever you have an lxml element, you can convert it back to a string like so:
#print lxml.etree.tostring(root)
# Use cssselect to select elements by their css code
#print root.cssselect("li.initially-off") # returns 2 elements
#print root.cssselect("ul #nimble") # returns 1 element
#print root.cssselect(".LLL li") # returns 3 elements
# extracting text from a single element
linimble = root.cssselect("li.initially-off")[0]
#help(linimble) # prints the documentation for the object
print lxml.etree.tostring(linimble) # note how this includes trailing text 'junk'
#print linimble.text # just the text between the tag
#print linimble.tail # the trailing text
#print list(linimble) # prints the <b> object
# This recovers all the code inside the object, including any text markups like <b>
#print linimble.text + "".join(map(lxml.etree.tostring, list(linimble)))
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
7bf833a1e59a609e71791510c143929fed4b9090
|
de479d4a8af0e070b2bcae4186b15a8eb74971fb
|
/cn/iceknc/study/h_python_web_server/__init__.py
|
4c443bf5e4053a05fd09d03cb7e0b34b24ee6f4b
|
[] |
no_license
|
iceknc/python_study_note
|
1d8f6e38be57e4dc41a661c0a84d6ee223c5a878
|
730a35890b77ecca3d267fc875a68e96febdaa85
|
refs/heads/master
| 2020-05-19T18:44:55.957392
| 2019-09-27T01:15:54
| 2019-09-27T01:15:54
| 185,160,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
# -*- coding: utf-8 -*-
# @Author: 徐志鹏
# @Date : 2019/5/15
# @Desc :
def main():
pass
if __name__ == "__main__":
main()
|
[
"xzhipeng@lifecare.cn"
] |
xzhipeng@lifecare.cn
|
eee8bf0ebe7dad15987813d9178a9e6fc7e754d2
|
33602d2bf63bb038f29f22383c912a06045d7e00
|
/v15_pong_supervised/utils.py
|
d000d2b3eec684a2fc88e0a73247edc928d1c2f5
|
[] |
no_license
|
evanthebouncy/nnprog
|
26af89726a915d7d3f78131c4f8733cdceb6100e
|
576ea87469df2135bf133325d22c23ec4b196a92
|
refs/heads/master
| 2020-07-06T00:32:23.949875
| 2017-12-06T20:50:18
| 2017-12-06T20:50:18
| 66,980,038
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,127
|
py
|
import numpy as np
import gym
import random
import pickle
import gym.envs.atari
import draw
# Preprocesses the given image:
# (1) remove the scoreboard
# (2) make it monochromatic
# (3) make the background black
#
# obs: Image
# return: Image
# Image = np.array([n_rows, n_cols])
def preprocess(obs):
obs = obs[34:194]
obs = obs[::2,::2,0]
obs[obs == 144] = 0
return obs.astype(np.float)
def lose_color(proccessed_obs):
return np.clip(proccessed_obs, 0, 1)
# Assumes that the pixels of the given value in the given image
# exactly form a rectangle (or else there are no pixels of that color).
# Returns the rectangle if it exists, or else None.
#
# val: int
# obs: Image
# return: None | Rectangle
# Image = np.array([n_rows, n_cols])
def _get_rectangle(obs, val):
min_val = np.argmax(obs.ravel() == val)
max_val = len(obs.ravel()) - np.argmax(np.flip(obs.ravel(), 0) == val) - 1
x_pos = min_val % obs.shape[1]
y_pos = min_val / obs.shape[1]
x_len = (max_val % obs.shape[1]) - x_pos + 1
y_len = (max_val / obs.shape[1]) - y_pos + 1
return None if x_pos == 0 and y_pos == 0 and x_len == obs.shape[1] and y_len == obs.shape[0] else np.array([x_pos + x_len/2, y_pos + y_len/2])
# Retrieves the rectangle representing our paddle.
def get_our_paddle(obs):
obs = preprocess(obs)
return _get_rectangle(obs, 92)
# Retrieves the rectangle representing the ball.
def get_ball(obs):
obs = preprocess(obs)
return _get_rectangle(obs, 236)
def same_line_print(message):
sys.stdout.write("\r" + message)
sys.stdout.flush()
def render_state(env, state):
env.reset()
env.restore_full_state(state)
def get_up_down_signal(ob1, ob2):
default_val = np.array([1.0, 0.0, 0.0])
if ob2 is None:
return default_val
# obs = preprocess(obs)
paddle = get_our_paddle(ob2)
ball = get_ball(ob2)
if ball is None or paddle is None:
return default_val
return np.array([0.0, 1.0, 0.0]) if paddle[1] >= ball[1] else np.array([0.0, 0.0, 1.0])
def get_simple_signal(ob1, ob2):
def _state1(ob1, ob2):
default_val = np.array([1.0, 0.0, 0.0])
if ob1 is None or ob2 is None:
return default_val
# obs = preprocess(obs)
paddle = get_our_paddle(ob2)
ball = get_ball(ob2)
if ball is None or paddle is None or ball[0] < 50:
return default_val
return np.array([0.0, 1.0, 0.0]) if paddle[1] >= ball[1] else np.array([0.0, 0.0, 1.0])
def _state2(ob1, ob2):
default_val = np.array([1.0, 0.0])
if ob1 is None or ob2 is None:
return default_val
paddle = get_our_paddle(ob2)
if 38 <= paddle[1] <= 42: return np.array([0.0, 1.0])
return default_val
return np.concatenate([_state1(ob1,ob2), _state2(ob1,ob2)])
def get_signal(obs, prev_obs, prev_move):
default_val = np.array([0.0 for i in range(9)])
if obs is None or prev_obs is None:
return default_val
# obs = preprocess(obs)
paddle = get_our_paddle(obs)
ball = get_ball(obs)
prev_ball = get_ball(prev_obs)
prev_paddle = get_our_paddle(prev_obs)
if ball is None or paddle is None or prev_ball is None or prev_paddle is None:
return default_val
# print "some stuff "
# print "prev ball ", prev_ball
# print "ball ", ball
# print "paddle ", paddle
# older
paddle = paddle[1:] / 80.0
prev_paddle = prev_paddle[1:] / 80.0
diff = ball - prev_ball
# print "diff ", diff
diff = diff / float(np.max(abs(diff))) if np.max(abs(diff)) > 0 else np.array([0.0, 0.0])
ball = ball / 80.0
prev_move = np.array([1.0, 0.0] if prev_move == 2 else [0.0, 1.0])
care = 1.0 if ball[0] >= 60.0 / 80.0 and ball[0] <= 71.0 / 80.0 else 0.0
# print "ball ", ball
signal = np.concatenate([paddle, prev_paddle, ball, diff, prev_move, [care]])
signal = signal * care
# newer
# print ball, prev_ball
# diff = ball - prev_ball
# print "diff ", diff
# a = 1.0 if paddle[1] > ball[1] else -1.0
# b = diff[0]
# signal = np.array([a,b, 0.0, 0.0, 0.0, 0.0])
return signal
def get_signal_full_image(obs, prev_obs):
if obs is None or prev_obs is None:
return None
obs = lose_color(preprocess(obs))
prev_obs = lose_color(preprocess(prev_obs))
# obs_diff = obs - prev_obs
# draw.draw(obs_diff, "obs.png")
return obs, prev_obs
# generate a pong trace, the actor takes in the last 2 states as inputs
def generate_pong_trace(env, start_state, agent, n=200, do_render=True):
env.reset()
env.restore_full_state(start_state)
trace = []
all_obs = [None, None]
all_actions = [2]
for i in range(n):
action = agent.act((all_obs[-2], all_obs[-1],all_actions[-1]), show_prob = do_render)
obs, reward, done, comments = env.step(action)
if do_render:
env.render()
trace.append(((all_obs[-2], all_obs[-1],all_actions[-1]), action, reward))
all_obs.append(obs)
all_actions.append(action)
if done: break
return trace
def get_random_state(env, start_state):
env.reset()
env.restore_full_state(start_state)
for i in range(random.randint(100, 500)):
_, a, b, c = env.step(random.choice([2,3]))
state = env.clone_full_state()
return state
|
[
"evanthebouncy@gmail.com"
] |
evanthebouncy@gmail.com
|
b43a0ea6840af2ffc73bc3ec3411ff7e6682262b
|
5ba3115523fb052d32db827e09443248ec5f6629
|
/algorithm/PycharmProjects/week2/셀렉션 알고리즘.py
|
ea0f1342ff0818baef2e0b87e8900f027e4bd11d
|
[] |
no_license
|
oliviaspark0825/TIL
|
841095003ae794e14bd8c7e8c883826667c25f37
|
8bc66836f9a1eea5f42e9e1172f81f005abc042d
|
refs/heads/master
| 2023-01-10T22:14:15.341489
| 2019-08-22T09:09:52
| 2019-08-22T09:09:52
| 162,099,057
| 0
| 0
| null | 2023-01-04T07:52:28
| 2018-12-17T08:32:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 645
|
py
|
# k 번만큼 골라서 제일 작은 거를 찾아서 바꾼다, 교환 횟수가 작음
#
# def select(list, k):
# for i in range(0, k):
# minInex = for j in range ( i+1, len(list)):
# if list[minIndex] > list[j]:
# minIndex = j
# list[i], list[minIndex] = list[minIndex], list[i]
# return list[k -1]
def selectionSort(a):
for i in range(0, len(a) -1): # 0부터 n-1 까지
min = i
for j in range(i+1, len(a)):
if a[min] > a[j]:
min = j
a[i], a[min] = a[min], a[i]
data = [64, 25, 10, 22, 11]
selectionSort(data)
print(data)
|
[
"suhyunpark0825@gmail.com"
] |
suhyunpark0825@gmail.com
|
2dc4b913336525af52c6bd856739646b091e1ebd
|
af4b5830b2a23d1f3d126297c7eb057bb3f8e42f
|
/pymatflow/cp2k/base/pw_dft.py
|
fd0c25c249ac97dad547a452ce45111dd806c4d6
|
[
"MIT"
] |
permissive
|
mukhtarbayerouniversity/pymatflow
|
de2b2d573ceed68c1dd3c149c538588394029137
|
9ab61e56659519cd6c83d5bd32da1262f44da065
|
refs/heads/master
| 2023-02-13T01:50:32.993401
| 2021-01-13T15:19:36
| 2021-01-13T15:19:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,477
|
py
|
#!/usr/bin/evn python
# _*_ coding: utf-8 _*_
import numpy as np
import sys
import os
import shutil
from pymatflow.cp2k.base.pw_dft_control import cp2k_pw_dft_control
from pymatflow.cp2k.base.pw_dft_iterative_solver import cp2k_pw_dft_iterative_solver
from pymatflow.cp2k.base.pw_dft_mixer import cp2k_pw_dft_mixer
from pymatflow.cp2k.base.pw_dft_parameters import cp2k_pw_dft_parameters
"""
usage:
"""
# ============================================
# CP2K / PW_DFT
#=============================================
class cp2k_pw_dft:
"""
"""
def __init__(self):
"""
"""
self.params = {
}
self.status = False
self.control = cp2k_pw_dft_control()
self.iterative_solver = cp2k_pw_dft_iterative_solver()
self.mixer = cp2k_pw_dft_mixer()
self.parameters = cp2k_pw_dft_parameters()
# basic setting
self.control.status = True
self.iterative_solver.status = True
self.mixer.status = True
self.parameters.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t&PW_DFT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t%s %s\n" % (item, self.params[item]))
if self.control.status == True:
self.control.to_input(fout)
if self.iterative_solver.status == True:
self.iterative_solver.to_input(fout)
if self.mixer.status == True:
self.mixer.to_input(fout)
if self.parameters.status == True:
self.parameters.to_input(fout)
fout.write("\t&END PW_DFT\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 2:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[1] == "CONTROL":
self.control.set_params({item: params[item]})
elif item.split("-")[1] == "ITERATIVE_SOLVER":
self.iterative_solver.set_params({item: params[item]})
elif item.split("-")[1] == "MIXER":
self.mixer.set_params({item: params[item]})
elif item.split("-")[1] == "PARAMETERS":
self.parameters.set_params({item: params[item]})
else:
pass
|
[
"deqi_tang@163.com"
] |
deqi_tang@163.com
|
770a4f0fac37204c56e7b46b9d6c7531262723e9
|
dc99adb79f15b3889a7ef6139cfe5dfc614889b8
|
/Aplikace_1_0/Source/ewitis/gui/dfTableRaceInfo.py
|
43c2a4ae52ddd807c9956e0ae4200b57071eb4be
|
[] |
no_license
|
meloun/ew_aplikace
|
95d1e4063a149a10bb3a96f372691b5110c26b7b
|
f890c020ad8d3d224f796dab3f1f222c1f6ba0eb
|
refs/heads/master
| 2023-04-28T06:43:12.252105
| 2023-04-18T19:59:36
| 2023-04-18T19:59:36
| 2,674,595
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,188
|
py
|
# -*- coding: utf-8 -*-
import time
import pandas as pd
import pandas.io.sql as psql
from PyQt4 import QtCore, QtGui
from ewitis.gui.aTab import MyTab
from libs.myqt.DataframeTableModel import DataframeTableModel, ModelUtils
from ewitis.gui.dfTable import DfTable
from ewitis.gui.dfTableTimes import tableTimes
from ewitis.gui.dfTableUsers import tableUsers
from ewitis.gui.dfTableCategories import tableCategories
from ewitis.data.dstore import dstore
class DfModelRaceInfo(DataframeTableModel):
"""
RaceInfo table
states:
- race (default)
- dns (manually set)
- dq (manually set)
- dnf (manually set)
- finished (time received)
NOT finally results:
- only finished
Finally results:
- finished = with time
- race + dnf = DNF
- dns = DNS
- dq = DQ
"""
def __init__(self, table):
super(DfModelRaceInfo, self).__init__(table)
def getDefaultTableRow(self):
row = pd.Series()
row["id"] = 0
row["name"] = "NOTDEF"
row["cell#1"] = "-"
row["cell#2"] = "-"
row["cell#3"] = "-"
row["cell#4"] = "-"
row["cell#5"] = "-"
row["cell#250"] = "-"
return row
#virtual function to override
def GetDataframe(self):
row_id = 1
rows = pd.DataFrame()
#check if df is alread available
if tableTimes.model.df.empty:
return pd.DataFrame()
'''ADD TOTAL'''
#group by cell and get size
serTimesByCell_size = tableTimes.model.df.groupby("cell", as_index=False).size()
#create new row
row = self.getDefaultTableRow()
row["id"] = row_id
row["name"] = "Total"
for (k,v) in serTimesByCell_size.iteritems():
key = "cell#"+str(k)
row[key] = v
#append new row
rows = rows.append(row, ignore_index=True)
row_id = row_id + 1
'''ADD CATEGORIES'''
#group by category and get size
gbTimesByCategory = tableTimes.model.df.groupby("category")
for category, dfTimesInCategory in gbTimesByCategory:
serTimesForCategoryByCell_size = dfTimesInCategory.groupby("cell").size()
#create new row
row = self.getDefaultTableRow()
row["id"] = row_id
row["name"] = category
for (k,v) in serTimesForCategoryByCell_size.iteritems():
key = "cell#"+str(k)
row[key] = v
#add new row and increment id
rows = rows.append(row, ignore_index=True)
row_id = row_id + 1
df = pd.DataFrame(rows, columns=row.keys())
return df
'''
Proxy Model
'''
class DfProxymodelRaceInfo(QtGui.QSortFilterProxyModel, ModelUtils):
def __init__(self, parent = None):
QtGui.QSortFilterProxyModel.__init__(self, parent)
#This property holds whether the proxy model is dynamically sorted and filtered whenever the contents of the source model change.
self.setDynamicSortFilter(True)
#This property holds the column where the key used to filter the contents of the source model is read from.
#The default value is 0. If the value is -1, the keys will be read from all columns.
self.setFilterKeyColumn(-1)
# view <- proxymodel <- model
class DfTableRaceInfo(DfTable):
def __init__(self):
DfTable.__init__(self, "RaceInfo")
def Init(self):
DfTable.Init(self)
self.gui['view'].sortByColumn(0, QtCore.Qt.AscendingOrder)
#v modelu tahle funkce šahá do db, raceinfo nema tabulku v db
def updateDbCounter(self):
pass
tableRaceInfo = DfTableRaceInfo()
tabRaceInfo = MyTab(tables = [tableRaceInfo,])
|
[
"lubos.melichar@gmail.com"
] |
lubos.melichar@gmail.com
|
5b16b645adc70ad58e2d3385d3f9776e44594bf6
|
44a7330dfa4fe321eb432ee57a32328578dec109
|
/milk/supervised/gridsearch.py
|
265712ad1d69da0ae0eb8f9345e6471362c1c126
|
[
"MIT"
] |
permissive
|
tzuryby/milk
|
7cb6760fad600e9e0d0c9216dc749db289b596fb
|
a7159b748414d4d095741978fb994c4affcf6b9b
|
refs/heads/master
| 2020-12-29T02:45:33.044864
| 2011-03-15T20:23:29
| 2011-03-15T20:25:11
| 1,485,748
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,128
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from .classifier import normaliselabels
__all__ = [
'gridminimise',
'gridsearch',
]
def _allassignments(options):
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# from http://docs.python.org/library/itertools.html#itertools.product
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from itertools import repeat, izip
for ks,vs in izip(repeat(options.keys()), product(*options.values())):
yield zip(ks,vs)
def _set_assignment(obj,assignments):
for k,v in assignments:
obj.set_option(k,v)
def gridminimise(learner, features, labels, params, measure=None, nfolds=10):
'''
best = gridminimise(learner, features, labels, params, measure={0/1 loss})
Grid search for the settings of parameters that maximises a given measure
This function is equivalent to searching the grid, but does not actually
search the whole grid.
Parameters
----------
learner : a classifier object
features : sequence of features
labels : sequence of labels
params : dictionary of sequences
keys are the options to change,
values are sequences of corresponding elements to try
measure : function, optional
a function that takes labels and outputs and returns the loss.
Default: 0/1 loss. This must be an *additive* function.
nfolds : integer, optional
nr of folds to run, default: 10
Returns
-------
best : a sequence of assignments
'''
# The algorithm is as follows:
#
# for all assignments: error = 0, next_iteration = 0
#
# at each iteration:
# look for assignment with smallest error
# if that is done: return it
# else: perform one more iteration
#
# When the function returns, that assignment has the lowest error of all
# assignments and all the iterations are done. Therefore, other assignments
# could only be worse even if we never computed the whole error!
from ..measures.nfoldcrossvalidation import foldgenerator
if measure is None:
def measure(real, preds):
return np.sum(np.asarray(real) != np.asarray(preds))
labels,_ = normaliselabels(labels)
allassignments = list(_allassignments(params))
N = len(allassignments)
iteration = np.zeros(N, int)
error = np.zeros(N, float)
folds = [(Tr.copy(), Te.copy()) for Tr,Te in foldgenerator(labels, nfolds)]
# foldgenerator might actually decide on a smaller number of folds,
# depending on the distribution of class sizes:
nfolds = len(folds)
while True:
next_pos = (error == error.min())
iter = iteration[next_pos].max()
if iter == nfolds:
(besti,) = np.where(next_pos & (iteration == iter))
besti = besti[0]
return allassignments[besti]
(ps,) = np.where(next_pos & (iteration == iter))
p = ps[0]
_set_assignment(learner, allassignments[p])
train, test = folds[iter]
model = learner.train(features[train], labels[train], normalisedlabels=True)
preds = [model.apply(f) for f in features[test]]
error[p] += measure(labels[test], preds)
iteration[p] += 1
class gridsearch(object):
'''
G = gridsearch(base, measure=accuracy, nfolds=10, params={ 'param1 : [...], param2 : [...]})
Perform a grid search for the best parameter values.
When G.train() is called, then for each combination of p1 in param1, p2 in
param2, ... it performs::
base_classifier.param1 = p1
base_classifier.param2 = p2
...
value[p1, p2,...] = measure(crossvaliation(base_classifier)
it then picks the highest set of parameters and re-learns a model on the
whole data.
Parameters
-----------
base_classifier : classifier to use
measure : function, optional
a function that takes labels and outputs and returns the loss.
Default: 0/1 loss. This must be an *additive* function.
nfolds : integer, optional
Nr of folds
params : dictionary
'''
def __init__(self, base, measure=None, nfolds=10, params={}):
self.params = params
self.base = base
self.nfolds = 10
self.measure = measure
def is_multi_class(self):
return self.base.is_multi_class()
def train(self, features, labels, normalisedlabels=False):
self.best = gridminimise(self.base, features, labels, self.params, self.measure, self.nfolds)
_set_assignment(self.base, self.best)
return self.base.train(features, labels, normalisedlabels=normalisedlabels)
|
[
"lpc@cmu.edu"
] |
lpc@cmu.edu
|
deec8d033bdd63658b11e459446f53fd82d72ea6
|
fd4510e0bf959de7527bd0c62d3b4fb3f78cee5e
|
/drivers/hot.py
|
c4607b3bd6c73821e3566c20a1a2b3c2605203d1
|
[] |
no_license
|
RuoAndo/nii-cyber-security-admin
|
8dde8ab68b0f7fa882adbe8e828546aa1739e685
|
e77b9d581e124f9fd5f721e18cd77d3bccecad19
|
refs/heads/master
| 2022-12-13T21:40:46.330389
| 2022-12-07T14:01:00
| 2022-12-07T14:01:00
| 71,614,880
| 5
| 1
| null | 2020-10-13T08:40:46
| 2016-10-22T03:41:30
|
Python
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
# -*- coding:utf-8 -*-
import psycopg2
conn = psycopg2.connect(
host = "192.168.1.1",
port = 5432,
database="xen460",
user="postgres",
password="")
cur = conn.cursor()
sql = "SELECT relname, n_tup_upd, n_tup_hot_upd, round(n_tup_hot_upd*100/n_tup_upd, 2) AS hot_upd_ratio FROM pg_stat_user_tables WHERE n_tup_upd > 0 ORDER BY hot_upd_ratio;"
cur.execute(sql)
ans =cur.fetchall()
print ans
#conn.commit()
cur.close()
conn.close()
|
[
"ando.ruo@gmail.com"
] |
ando.ruo@gmail.com
|
1a8a4b1ec31cbf8dbe7046d88ecb72043feede10
|
8096e140f0fd38b9492e0fcf307990b1a5bfc3dd
|
/Python/madlibs/version3.py
|
68bb0d07fff518a717900d9beeaf33fbb77b915a
|
[] |
no_license
|
perennialAutodidact/PDXCodeGuild_Projects
|
0cacd44499c0bdc0c157555fe5466df6d8eb09b6
|
28a8258eba41e1fe6c135f54b230436ea7d28678
|
refs/heads/master
| 2022-11-15T22:26:45.775550
| 2020-07-07T17:13:01
| 2020-07-07T17:13:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
game_over = False
while(game_over == False):
print("\nPlease enter:")
adjectives = input("Five adjectives, separated by commas, then press enter: ").replace(' ', '').split(",")
gerunds = input("Two \"-ing\" verbs, separated by commas: ").replace(' ', '').split(",")
place = input("A place: ")
plural_noun = input("A plural noun: ")
noun = input("A noun: ")
adjective_1 = adjectives[0]
adjective_2 = adjectives[1]
adjective_3 = adjectives[2]
adjective_4 = adjectives[3]
adjective_5 = adjectives[4]
verb_1 = gerunds[0]
verb_2 = gerunds[1]
f"\nResult: \n\nIf you go to some {adjective_1} place like {place} , you must know how to deal with wild animals such as bears, wolves and{plural_noun} . The most important of these is the bear. There are three kinds of bear, the grizzly bear, the {adjective_2} bear and the {adjective_3} bear. Bears spend most of their time {verb_1} or {verb_2} . They look very {adjective_4} , but if you make them {adjective_5} , they might bite your {noun} ."
play_again = input("\n\nWould you like to play again? Enter \"y\" for Yes and \"n\" for No: ")
if play_again == "y":
print("\nOkay, let's do it!")
game_over = False
elif play_again == "n":
print("Okay, goodbye!")
game_over = True
|
[
"keegood8@gmail.com"
] |
keegood8@gmail.com
|
43cff66ffbaeb8e9ebc37fe1c4ddde3bf3d93ec0
|
613152f5e19ab472974f0c8a87a38c1bb1c792fc
|
/users/migrations/0002_auto_20190529_1957.py
|
2650af02ec1518e6067eddb54c7ad539ae9ac4a7
|
[] |
no_license
|
KIM-JAEHYUNG/boro-wang
|
ed19181b2282f47a5ba1fe0f84f74f3a76b9902b
|
544bbbcc8b589ab0dfb936734d999c172a201864
|
refs/heads/master
| 2022-12-12T00:53:22.740279
| 2019-08-09T19:22:54
| 2019-08-09T19:22:54
| 201,409,510
| 0
| 1
| null | 2022-12-08T05:59:44
| 2019-08-09T06:54:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
# Generated by Django 2.0.13 on 2019-05-29 10:57
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='followings',
field=models.ManyToManyField(related_name='_user_followings_+', to=settings.AUTH_USER_MODEL),
),
]
|
[
"jsw2095@naver.com"
] |
jsw2095@naver.com
|
35d64fcc70ce2f581774ee385e08a42d750180c6
|
19d43cac1c70ad7c1e202486bd6d0951d774c7ab
|
/a_social_project/settings/__init__.py
|
14b4ac3405b477a70dc3201e2ace18a69ab7c397
|
[] |
no_license
|
Raju-Pinnam/raju-social-app
|
e75d6f11964d08103ce2df85fc49ff5141ce346f
|
b809745df2e7a26a32b5ff151c414f68c83112ed
|
refs/heads/master
| 2023-02-20T23:45:42.649867
| 2021-01-24T18:19:37
| 2021-01-24T18:19:37
| 331,834,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
from .base import *
if config('ENV') == 'LOCAL':
from .local import *
elif config('ENV') == 'PROD':
from .prod import *
|
[
"pinnampadmaraju@gmail.com"
] |
pinnampadmaraju@gmail.com
|
f4bc23d4d623902bcf8c8e4cd2238b727839d0e9
|
3b786d3854e830a4b46ee55851ca186becbfa650
|
/SystemTesting/pylib/nsx/nvp/transport_node/schema/nvp_transport_node_schema.py
|
ad2bff5982153948e66bc22e6ea73f016294ab8c
|
[] |
no_license
|
Cloudxtreme/MyProject
|
d81f8d38684333c22084b88141b712c78b140777
|
5b55817c050b637e2747084290f6206d2e622938
|
refs/heads/master
| 2021-05-31T10:26:42.951835
| 2015-12-10T09:57:04
| 2015-12-10T09:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
import base_schema
import credential_schema
import transport_connector_schema
import nvp_transport_zone_binding_schema
import nvp_tag_schema
class TransportNode(base_schema.BaseSchema):
_schema_name = "transportNode"
def __init__(self, py_dict=None):
super(TransportNode, self).__init__()
self.display_name = None
self.transport_connectors = [transport_connector_schema.TransportConnector()]
self.uuid = None
self.tags = [nvp_tag_schema.Tag()]
self.integration_bridge_id = None
self.mgmt_rendezvous_client = None
self.mgmt_rendezvous_server = None
self.credential = credential_schema.Credential()
self.tunnel_probe_random_vlan = None
self.zone_forwarding = None
if py_dict is not None:
self.get_object_from_py_dict(py_dict)
self._uuid_meta = {'isReq':False,'type':'string'}
self._tags_meta = {'isReq':False,'type':'array','maxLen':5}
self._display_name_meta = {'isReq':False,'type':'string',
'default':'<uuid>','maxLen':40}
self._transport_connectors_meta = {'isReq':False,'type':'array'}
self._integration_bridge_id_meta = {'isReq':False,'type':'string'}
self._mgmt_rendezvous_client_meta = {'isReq':False,'type':'boolean',
'default':False}
self._mgmt_rendezvous_server_meta = {'isReq':False,'type':'boolean',
'default':False}
self._credential_meta = {'isReq':False,'type':'object'}
self._tunnel_probe_random_vlan_meta = {'isReq':False,'type':'boolean',
'default':False}
self._zone_forwarding_meta = {'isReq':False,'type':'boolean',
'default':False}
def add_transport_connector(self, transport_connetor):
self.transport_connectors.append(transport_connector)
def add_tag(self, tag):
self.tags.append(tag)
if __name__=='__main__':
pass
|
[
"bpei@vmware.com"
] |
bpei@vmware.com
|
6301dd36de7e57837bd0faca3facc53b2efcd28b
|
9360aeefb3605a3fe0c5e512e52ec3bc0942903f
|
/bin/jupyter-kernel
|
52f4f4b2c7281de7c9de88b24087c584be53a550
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
eliaswalyba/facebound
|
1ff7dc32cc4bf50d14f2e6434af2adfb14300245
|
92500e61b1bc50702ea339563ee8b38b55a31169
|
refs/heads/master
| 2022-07-01T17:42:02.360416
| 2020-05-08T15:23:03
| 2020-05-08T15:23:03
| 262,851,606
| 0
| 0
|
MIT
| 2020-05-10T18:37:03
| 2020-05-10T18:37:02
| null |
UTF-8
|
Python
| false
| false
| 264
|
#!/Users/fodediop/dev/deep-learning/facebound/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"fodesdiop@gmail.com"
] |
fodesdiop@gmail.com
|
|
b5d47fab88804b7fb68fcbecbdf9db94a8054a2a
|
b6475b69ae89f5a2ffb3c03c21d747bc6fddbdd2
|
/facility/urls.py
|
c7d25fdaceb3e173d5a1a1b442d033ca3d0ba1c2
|
[] |
no_license
|
LeeSuHa98/14-2nd-SHABANG-backend
|
3718516abc1a423da7e97d9363c61bfc7dd5ec4f
|
13cc50c80aca273277bae8d8b15a1623b860ce55
|
refs/heads/main
| 2023-02-18T05:57:27.863525
| 2021-01-19T04:47:20
| 2021-01-19T04:47:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
from django.urls import path
from facility.views import NearFacilityView
urlpatterns = [
path("/<int:id>", NearFacilityView.as_view())
]
|
[
"fergith@naver.com"
] |
fergith@naver.com
|
83a592cb4e501fdef642301cc6c4b81c6f8e086a
|
c2ee51902020596e08aacd4462ab44715432c8f8
|
/pyapprox/tests/test_mixture_model.py
|
817c0ce729c40dff37793eefee398f5e6f383587
|
[
"MIT"
] |
permissive
|
ConnectedSystems/pyapprox
|
bb1462aa8ee54258ee559d734f7bffb744e09c78
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
refs/heads/master
| 2021-09-13T09:49:59.048327
| 2021-08-29T03:38:43
| 2021-08-29T03:38:43
| 252,080,343
| 0
| 0
|
MIT
| 2020-04-01T05:26:29
| 2020-04-01T05:26:29
| null |
UTF-8
|
Python
| false
| false
| 4,778
|
py
|
import unittest
from functools import partial
from scipy import stats
import numpy as np
from pyapprox.mixture_model import \
get_leja_univariate_quadrature_rules_of_beta_mixture, sample_mixture, \
get_mixture_sparse_grid_quadrature_rule, \
get_mixture_tensor_product_gauss_quadrature, \
compute_grammian_of_mixture_models_using_sparse_grid_quadrature
from pyapprox.univariate_polynomials.quadrature import leja_growth_rule
from pyapprox.multivariate_polynomials import PolynomialChaosExpansion, \
define_poly_options_from_variable_transformation
from pyapprox.variable_transformations import \
define_iid_random_variable_transformation
from pyapprox.indexing import compute_hyperbolic_indices
class TestMixtureModel(unittest.TestCase):
def test_mixture_model_sparse_grid_quadrature(self):
num_vars = 2
level = 5
rv_params = [[2, 4], [4, 2]]
rv_params = [[2, 6], [6, 2]]
# rv_params = [[6,2]]
num_mixtures = len(rv_params)
def function(x): return np.array(
[np.sum(x**2, axis=0), np.sum(x**3, axis=0)+x[0, :]*x[1, :]]).T
mixture_samplers = []
for ii in range(num_mixtures):
def lambda_sampler(a, b, nn): return 2 * \
np.random.beta(a, b, (num_vars, nn))-1
# partial is needed to make sure correct alpha and beta parameters
# are used and not overwritten
sampler = partial(
lambda_sampler, rv_params[ii][0], rv_params[ii][1])
mixture_samplers.append(sampler)
mc_samples = sample_mixture(mixture_samplers, num_vars, int(1e6))
mc_integral = function(mc_samples).mean(axis=0)
# print ('mc',mc_integral)
leja_basename = None
mixtures, mixture_univariate_quadrature_rules = \
get_leja_univariate_quadrature_rules_of_beta_mixture(
rv_params, leja_growth_rule, leja_basename)
mixture_univariate_growth_rules = [leja_growth_rule]*num_mixtures
sg_samples, sg_weights = get_mixture_sparse_grid_quadrature_rule(
mixture_univariate_quadrature_rules,
mixture_univariate_growth_rules,
num_vars, level)
sg_integral = function(sg_samples).T.dot(sg_weights)
# print ('sg',sg_integral)
print('todo: replace with exact analytical integral')
assert np.allclose(sg_integral, mc_integral, atol=1e-2)
mixtures, mixture_univariate_quadrature_rules = \
get_leja_univariate_quadrature_rules_of_beta_mixture(
rv_params, leja_growth_rule, leja_basename,
return_weights_for_all_levels=False)
nquad_samples_1d = leja_growth_rule(level)
tp_samples, tp_weights = get_mixture_tensor_product_gauss_quadrature(
mixture_univariate_quadrature_rules, nquad_samples_1d, num_vars)
tp_integral = function(sg_samples).T.dot(sg_weights)
# print ('tp',tp_integral)
assert np.allclose(tp_integral, mc_integral, atol=1e-2)
def test_compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
self):
num_vars = 2
degree = 3
# rv_params = [[6,2],[2,6]]
rv_params = [[1, 1]]
leja_basename = None
mixtures, mixture_univariate_quadrature_rules = \
get_leja_univariate_quadrature_rules_of_beta_mixture(
rv_params, leja_growth_rule, leja_basename)
poly = PolynomialChaosExpansion()
var_trans = define_iid_random_variable_transformation(
stats.uniform(-1, 2), num_vars)
poly_opts = define_poly_options_from_variable_transformation(var_trans)
indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
poly.configure(poly_opts)
poly.set_indices(indices)
num_mixtures = len(rv_params)
mixture_univariate_growth_rules = [leja_growth_rule]*num_mixtures
grammian_matrix = \
compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
poly.basis_matrix, indices,
mixture_univariate_quadrature_rules,
mixture_univariate_growth_rules, num_vars)
assert (np.all(np.isfinite(grammian_matrix)))
if num_mixtures == 1:
II = np.where(abs(grammian_matrix) > 1e-8)
# check only non-zero inner-products are along diagonal, i.e.
# for integrals of indices multiplied by themselves
assert np.allclose(
II, np.tile(np.arange(indices.shape[1]), (2, 1)))
if __name__ == "__main__":
mixture_model_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestMixtureModel)
unittest.TextTestRunner(verbosity=2).run(mixture_model_test_suite)
|
[
"29109026+jdjakem@users.noreply.github.com"
] |
29109026+jdjakem@users.noreply.github.com
|
142e1232f03e6245fe3538cf1dbe1a8210792eef
|
3cc7def40ac121c25105ffac6b33e7f12d1c7f97
|
/muddery/typeclasses/locked_exit.py
|
3d5d615a7bdff60213ec1a33bb047d9f5236e029
|
[
"BSD-3-Clause"
] |
permissive
|
ming-inside/muddery
|
8a6d8c9f25fed6137616d109904788927a1059e1
|
8442d6339d4776b8fb81827bcfe0138cf0bc73b5
|
refs/heads/master
| 2020-03-31T11:13:30.792060
| 2018-10-08T16:49:58
| 2018-10-08T16:49:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,615
|
py
|
"""
Exits
Exits are connectors between Rooms. An exit always has a destination property
set and has a single command defined on itself with the same name as its key,
for allowing Characters to traverse the exit to its destination.
"""
from __future__ import print_function
import traceback
from muddery.utils import utils
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils.localized_strings_handler import _
from muddery.mappings.typeclass_set import TYPECLASS
from evennia.utils import logger
from django.conf import settings
class MudderyLockedExit(TYPECLASS("EXIT")):
"""
Characters must unlock these exits to pass it.
The view and commands of locked exits are different from unlocked exits.
"""
typeclass_key = "LOCKED_EXIT"
typeclass_name = _("Locked Exit", "typeclasses")
def after_data_loaded(self):
"""
Set data_info to the object."
"""
super(MudderyLockedExit, self).after_data_loaded()
self.unlock_condition = getattr(self.dfield, "unlock_condition", "")
self.unlock_verb = getattr(self.dfield, "unlock_verb", "")
self.locked_desc = getattr(self.dfield, "locked_desc", "")
self.auto_unlock = getattr(self.dfield, "auto_unlock", False)
def at_before_traverse(self, traversing_object):
"""
Called just before an object uses this object to traverse to
another object (i.e. this object is a type of Exit)
Args:
traversing_object (Object): The object traversing us.
Notes:
The target destination should normally be available as
`self.destination`.
If this method returns False/None, the traverse is cancelled
before it is even started.
"""
if not super(MudderyLockedExit, self).at_before_traverse(traversing_object):
return False
# Only can pass exits which have already unlockde.
if traversing_object.is_exit_unlocked(self.get_data_key()):
return True
if self.auto_unlock:
if self.can_unlock(traversing_object):
# Automatically unlock the exit when a character looking at it.
traversing_object.unlock_exit(self)
return True
# Show the object's appearance.
appearance = self.get_appearance(traversing_object)
traversing_object.msg({"look_obj": appearance})
return False
def can_unlock(self, caller):
"""
Unlock an exit.
"""
# Only can unlock exits which match there conditions.
return STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
def get_appearance(self, caller):
"""
This is a convenient hook for a 'look'
command to call.
"""
# Get name and description.
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
can_unlock = self.can_unlock(caller)
if self.auto_unlock and can_unlock:
# Automatically unlock the exit when a character looking at it.
caller.unlock_exit(self)
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
cmds = []
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock_exit", "args": self.dbref}]
info = {"dbref": self.dbref,
"name": self.name,
"desc": self.locked_desc,
"cmds": cmds}
return info
def get_available_commands(self, caller):
"""
This returns a list of available commands.
"args" must be a string without ' and ", usually it is self.dbref.
"""
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common commands.
return super(MudderyLockedExit, self).get_available_commands(caller)
cmds = []
can_unlock = STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock", "args": self.dbref}]
return cmds
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
efd659f109141d794d98452979ad0f7016c59ad0
|
a54007706a09b387690f79fd7ffd889decad42f1
|
/day11/code/03_pygame框架使用.py
|
500591bf438640322dcfd73908d34c025d1fabd3
|
[] |
no_license
|
lvah/201903python
|
d425534544a1f91e5b80b5ff0de5ca34037fe6e9
|
1415fcb7697dfa2884d94dcd8963477e12fe0624
|
refs/heads/master
| 2020-07-06T16:45:37.882819
| 2019-09-08T10:13:07
| 2019-09-08T10:13:07
| 203,082,401
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
import pygame
import sys
pygame.init() # 初始化pygame
size = width, height = 320, 240 # 设置窗口大小
screen = pygame.display.set_mode(size) # 显示窗口
while True: # 死循环确保窗口一直显示
for event in pygame.event.get(): # 遍历所有事件
if event.type == pygame.QUIT: # 如果单击关闭窗口,则退出
# exit(0) --- 结束程序, 0代表正常退出,
sys.exit(0)
# pygame.KEYDOWN: 代表按下键盘
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
print("UP")
elif event.key == pygame.K_DOWN:
print('DOWN')
elif event.key == pygame.K_q: # Q
sys.exit(0)
pygame.quit() # 退出pygame
|
[
"root@foundation0.ilt.example.com"
] |
root@foundation0.ilt.example.com
|
90413f84cf6b0e827f63c0a6370c22e5db575ae4
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/test/test_524_longest_word_in_dictionary_through_deleting.py
|
dd8614be77d386c33da05a8c850208c59e040bcc
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from unittest import TestCase
from problems.N524_Longest_Word_In_Dictionary_Through_Deleting import Solution
class TestSolution(TestCase):
def test_findLongestWord(self):
self.assertEqual("apple", Solution().findLongestWord(s = "abpcplea", d = ["ale","apple","monkey","plea"]))
def test_findLongestWord_1(self):
self.assertEqual("a", Solution().findLongestWord(s = "abpcplea", d = ["a","b","c"]))
def test_findLongestWord_2(self):
s = "wordgoodgoodgoodbestword"
d = ["word", "good", "best", "good"]
self.assertEqual("best", Solution().findLongestWord(s, d))
|
[
"rarry2012@gmail.com"
] |
rarry2012@gmail.com
|
b3f79671754cfe80ab04743bc318dc84ee6f0b93
|
c3e47ce05f1d6a237a03742ce431d6958ca388b2
|
/crowd/plug-in/bkp/whatIsCmd.py
|
c65eacdc5846cdb8491ebdd3b6694ae6e3f60396
|
[] |
no_license
|
fsanges/subins_tutorials
|
27426ac71365124c28e924c502484c5bb172f715
|
9c50ec8e3200c29f1c7141ca013cbb0a5b4f8e4e
|
refs/heads/master
| 2020-09-16T04:38:43.696690
| 2019-11-23T13:06:51
| 2019-11-23T13:06:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,481
|
py
|
"""
To use, make sure that whatIsCmd.py is in your MAYA_PLUG_IN_PATH (and the C++
version is not) then do the following:
import maya.cmds
maya.cmds.loadPlugin("whatIsCmd.py")
maya.cmds.spWhatIs()
"""
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
# command
class WhatIsCmd(OpenMayaMPx.MPxCommand):
kPluginCmdName = "spWhatIs"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def cmdCreator():
return OpenMayaMPx.asMPxPtr( WhatIsCmd() )
def doIt(self, args):
selectList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList( selectList )
node = OpenMaya.MObject()
depFn = OpenMaya.MFnDependencyNode()
iter = OpenMaya.MItSelectionList(selectList)
while (iter.isDone() == 0):
iter.getDependNode( node )
depFn.setObject(node)
name = depFn.name()
types = []
OpenMaya.MGlobal.getFunctionSetList( node, types )
print "Name: %s" % name
print "Type: %s" % node.apiTypeStr()
sys.stdout.write( "Function Sets: " )
sys.stdout.write(", ".join(types) + '\n')
iter.next()
# Initialize the script plug-in
def initializePlugin(plugin):
pluginFn = OpenMayaMPx.MFnPlugin(plugin)
try:
pluginFn.registerCommand(
WhatIsCmd.kPluginCmdName, WhatIsCmd.cmdCreator
)
except:
sys.stderr.write(
"Failed to register command: %s\n" % WhatIsCmd.kPluginCmdName
)
raise
# Uninitialize the script plug-in
def uninitializePlugin(plugin):
pluginFn = OpenMayaMPx.MFnPlugin(plugin)
try:
pluginFn.deregisterCommand(WhatIsCmd.kPluginCmdName)
except:
sys.stderr.write(
"Failed to unregister command: %s\n" % WhatIsCmd.kPluginCmdName
)
raise
#-
# ==========================================================================
# Copyright (C) 2011 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of
# the Software, unless such copies or derivative works are solely
# in the form of machine-executable object code generated by a
# source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
# AUTODESK DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED
# WARRANTIES INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF
# NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
# PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE, OR
# TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS LICENSORS
# BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK
# AND/OR ITS LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY
# OR PROBABILITY OF SUCH DAMAGES.
#
# ==========================================================================
#+
|
[
"subing85@gmail.com"
] |
subing85@gmail.com
|
218811171578585d9acec414683ae88e25e5ede6
|
a66460a46611483dfbdc94c7996893f427e60d97
|
/ansible/my_env/lib/python2.7/site-packages/ansible/utils/module_docs_fragments/azure_tags.py
|
ff8579fde490d0ca15e0b0c7577ca688d53f5209
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/yyashkin_infra
|
06b57807dde26f94f501828c07503d6bf1d70816
|
0cd0c003884155ac922e3e301305ac202de7028c
|
refs/heads/master
| 2020-04-29T02:42:22.056724
| 2019-05-15T16:24:35
| 2019-05-15T16:24:35
| 175,780,718
| 0
| 0
|
MIT
| 2019-05-15T16:24:36
| 2019-03-15T08:37:35
|
HCL
|
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Azure doc fragment
DOCUMENTATION = '''
options:
tags:
description:
- >
Dictionary of string:string pairs to assign as metadata to the object.
Metadata tags on the object will be updated with any provided values. To remove tags set append_tags option to false.
append_tags:
description:
- Use to control if tags field is canonical or just appends to existing tags.
When canonical, any tags not found in the tags parameter will be removed from the object's metadata.
type: bool
default: 'yes'
'''
|
[
"theyashkins@gmail.com"
] |
theyashkins@gmail.com
|
e6bb8d0ea125800830cc4c6c06f82f5d3bfcf228
|
5a319a47587653dab9472eab4055144bcfd25967
|
/src/opendr/perception/face_recognition/algorithm/backbone/model_mobilenet.py
|
2b552ba355b2561135316393c4e933e363248854
|
[
"Apache-2.0"
] |
permissive
|
passalis/demos
|
612a7e07ba125d9815e110ff483132e162759dd7
|
d8aeb045ee1832418fa232bc1c73783d72d10cf7
|
refs/heads/main
| 2023-07-19T03:25:05.269333
| 2021-09-21T19:59:42
| 2021-09-21T19:59:42
| 397,585,032
| 1
| 0
|
Apache-2.0
| 2021-09-21T19:58:51
| 2021-08-18T12:02:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,419
|
py
|
from torch import nn
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, padding, bias=False):
super(DepthwiseSeparableConv, self).__init__()
self.depthwise = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, padding=padding, groups=in_planes,
bias=bias)
self.pointwise = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(in_planes)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.bn2(x)
x = self.relu(x)
return x
class GDConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, padding, bias=False):
super(GDConv, self).__init__()
self.depthwise = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, padding=padding, groups=in_planes,
bias=bias)
self.bn = nn.BatchNorm2d(in_planes)
def forward(self, x):
x = self.depthwise(x)
x = self.bn(x)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileFaceNet(nn.Module):
def __init__(self, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileFaceNet, self).__init__()
block = InvertedResidual
input_channel = 64
last_channel = 512
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[2, 64, 5, 2],
[4, 128, 1, 2],
[2, 128, 6, 1],
[4, 128, 1, 2],
[2, 128, 2, 1],
]
# Only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# Building first layer
# input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
self.conv1 = ConvBNReLU(3, input_channel, stride=2)
self.dw_conv = DepthwiseSeparableConv(in_planes=64, out_planes=64, kernel_size=3, padding=1)
features = list()
# Building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# Building last several layers
self.conv2 = ConvBNReLU(input_channel, self.last_channel, kernel_size=1)
self.gdconv = GDConv(in_planes=512, out_planes=512, kernel_size=7, padding=0)
self.conv3 = nn.Conv2d(512, 128, kernel_size=1)
self.bn = nn.BatchNorm2d(128)
# Make it nn.Sequential
self.features = nn.Sequential(*features)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.conv1(x)
x = self.dw_conv(x)
x = self.features(x)
x = self.conv2(x)
x = self.gdconv(x)
x = self.conv3(x)
x = self.bn(x)
x = x.view(x.size(0), -1)
return x
|
[
"passalis@csd.auth.gr"
] |
passalis@csd.auth.gr
|
06c35de26a5846395b4534c99e2ccfe287ad949d
|
cec68acfc0187b7d92fb7d6e5107058e3f8269ea
|
/GUI/GUI.py
|
478f934901894f957075743f6d6cdfe1d7fa20ea
|
[] |
no_license
|
vektorelpython/Python8
|
441575224100a687467c4934f7c741aa0c4bd087
|
d135fbf1444d56a0da38c42fd2e8feda48646f49
|
refs/heads/master
| 2022-01-18T12:17:40.387422
| 2019-09-07T13:47:55
| 2019-09-07T13:47:55
| 205,534,765
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 örnek window'
self.left = 50
self.top = 50
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
[
"Kurs"
] |
Kurs
|
3bfa4d51d40d0d78e7436d184f839ab558a13f1b
|
4bb0faf7b0a05b3d487ff386783adf742f26df86
|
/run.py
|
aa3f35368664d6bc7bb5e3e1fc0855b016a2a113
|
[] |
no_license
|
AIXME/ins-scraping
|
f7ba1c5180489e4a497906d06b3bdcb57f05c14b
|
250761c436fc0fe033c7c1493f8e0aa2335c1409
|
refs/heads/master
| 2021-01-04T15:56:37.476985
| 2019-07-11T08:24:34
| 2019-07-11T08:24:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
from core.info import InsInfo
if __name__ == "__main__":
username = input('输入你的ins账号:')
password = input('输入你的password:')
target = input('输入需要爬取的用户:')
ins = InsInfo(username,password,nickname=target)
ins.login().followers().combo().following().export_all().combo().post_images().save()
input('采集已结束,输入任意键退出:')
ins.driver.close()
|
[
"bhg889@163.com"
] |
bhg889@163.com
|
aca277c2fb030993e12a39e51af2e0754de6ca1d
|
5ba34cad2a933adfed6b5df5b1229e48038596d4
|
/fabfile.py
|
719cea8e51a7f90de04799ef68dbea18f7f5b9aa
|
[
"MIT"
] |
permissive
|
Firmicety/fomalhaut-panel
|
bececa59cd42edd8793440a652d206b250591cb9
|
3e662db65a7ca654f75a19e38cb0931be21f92e9
|
refs/heads/master
| 2020-06-06T07:52:27.211654
| 2019-06-20T11:38:39
| 2019-06-20T11:38:39
| 192,683,216
| 0
| 0
|
MIT
| 2019-06-19T07:39:07
| 2019-06-19T07:39:07
| null |
UTF-8
|
Python
| true
| false
| 3,882
|
py
|
# -*- coding: utf-8 -*-
# Created by restran on 2016/7/27
from __future__ import unicode_literals, absolute_import
from fabric.api import *
from datetime import datetime
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# 登录用户和主机名:
env.user = 'root'
# env.password = 'password'
# 如果有多个主机,fabric会自动依次部署
env.hosts = ['192.168.14.101']
TAR_FILE_NAME = 'fomalhaut_dashboard_deploy.tar.gz'
def pack():
"""
定义一个pack任务, 打一个tar包
:return:
"""
local('rm -rf dist')
local('fis3 release -d dist')
tar_files = ['*.py', 'static/*', 'templates/*', 'common/*',
'fomalhaut/*', 'dashboard/*', 'accounts/*', 'requirements.txt']
exclude_files = ['fabfile.py', 'deploy/*', '*.tar.gz', '.DS_Store', '*/.DS_Store', '__pycache__/*', '*.log']
exclude_files = ['--exclude=\'%s\'' % t for t in exclude_files]
local('rm -f %s' % TAR_FILE_NAME)
with lcd('dist'):
local('tar -czvf %s %s %s' %
(TAR_FILE_NAME, ' '.join(exclude_files), ' '.join(tar_files)))
print('在当前目录创建一个打包文件: %s' % TAR_FILE_NAME)
def backup():
now = datetime.now().strftime('%Y%m%d_%H%M%S')
backup_file = '/home/backup/fomalhaut_dashboard_%s.tar.gz' % now
# 如果不存在, 则创建文件夹
run('mkdir -p /home/backup')
exclude_files = ['*.log', '*.pyc']
exclude_files = ['--exclude=\'%s\'' % t for t in exclude_files]
run('tar -czvf %s %s /home/python/fomalhaut_dashboard' % (backup_file, ' '.join(exclude_files)))
print('创建备份文件: %s' % backup_file)
def deploy():
"""
定义一个部署任务
:return:
"""
# 先进行打包
pack()
# 备份服务器上的版本
backup()
# 远程服务器的临时文件
remote_tmp_tar = '/tmp/%s' % TAR_FILE_NAME
run('rm -f %s' % remote_tmp_tar)
# 上传tar文件至远程服务器
put('dist/%s' % TAR_FILE_NAME, remote_tmp_tar)
# 解压
remote_dist_dir = '/home/python/fomalhaut_dashboard'
# 如果不存在, 则创建文件夹
run('mkdir -p %s' % remote_dist_dir)
name = 'fomalhaut_dashboard'
with cd(remote_dist_dir):
print('解压文件到到目录: %s' % remote_dist_dir)
run('tar -xzvf %s' % remote_tmp_tar)
print('安装 requirements.txt 中的依赖包')
run('pip install -r requirements.txt')
remote_settings_file = '%s/fomalhaut/settings.py' % remote_dist_dir
settings_file = 'deploy/settings.py'
print('上传 settings.py 文件 %s' % settings_file)
put(settings_file, remote_settings_file)
# 创建日志文件夹, 因为当前启动 django 进程用的是 nobody, 会没有权限
remote_logs_path = '%s/logs' % remote_dist_dir
# 如果不存在, 则创建文件夹
run('mkdir -p %s' % remote_logs_path)
nginx_file = 'deploy/%s.conf' % name
remote_nginx_file = '/etc/nginx/conf.d/%s.conf' % name
print('上传 nginx 配置文件 %s' % nginx_file)
put(nginx_file, remote_nginx_file)
print('设置文件夹权限')
run('chown -R oxygen /home/python/%s' % name)
supervisor_file = 'deploy/%s.ini' % name
remote_supervisor_file = '/etc/supervisord.d/%s.ini' % name
print('上传 supervisor 配置文件 %s' % supervisor_file)
put(supervisor_file, remote_supervisor_file)
run('supervisorctl reload')
run('nginx -s reload')
run('nginx -t')
run('supervisorctl restart fomalhaut_dashboard:')
run('supervisorctl restart fomalhaut_celery_beat:')
run('supervisorctl restart fomalhaut_celery_worker:')
# run('service nginx restart')
# 删除本地的打包文件
local('rm -f %s' % TAR_FILE_NAME)
local('rm -rf dist')
# run('supervisorctl restart ')
|
[
"grestran@gmail.com"
] |
grestran@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.