hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1aa5cb26673be28d20ce4e1d7ff0bb74d202e4d0 | 274 | py | Python | venv/Lib/site-packages/astroid/__pkginfo__.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/astroid/__pkginfo__.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/astroid/__pkginfo__.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
__version__ = "2.11.2"
version = __version__
| 39.142857 | 85 | 0.759124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.850365 |
1aa645d6214dc6ac17d39206b4dd6bba8e6281ea | 921 | py | Python | T_dynamic_programming/problems/A_longest_common_subsequence.py | Princeton21/DSA | 0f2321b284fc54f4ddf73733cc1a8d05e549aeea | [
"MIT"
] | 58 | 2021-01-06T10:05:51.000Z | 2022-02-10T05:15:19.000Z | T_dynamic_programming/problems/A_longest_common_subsequence.py | Princeton21/DSA | 0f2321b284fc54f4ddf73733cc1a8d05e549aeea | [
"MIT"
] | 5 | 2021-02-22T04:14:24.000Z | 2021-12-26T09:19:17.000Z | T_dynamic_programming/problems/A_longest_common_subsequence.py | Princeton21/DSA | 0f2321b284fc54f4ddf73733cc1a8d05e549aeea | [
"MIT"
] | 27 | 2021-02-09T13:58:33.000Z | 2022-03-06T03:48:08.000Z | def method1(X, Y):
m = len(X)
n = len(Y)
L = [[None] * (n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif X[i - 1] == Y[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
def method2(X, Y, m, n):
if m == 0 or n == 0:
return 0
elif X[m - 1] == Y[n - 1]:
return 1 + method2(X, Y, m - 1, n - 1)
else:
return max(method2(X, Y, m, n - 1), method2(X, Y, m - 1, n))
if __name__ == "__main__":
"""
from timeit import timeit
X = "AGGTAB"
Y = "GXTXAYB"
print(timeit(lambda: method1(X, Y), number=10000)) # 0.14817858800233807
print(
timeit(lambda: method2(X, Y, len(X), len(Y)), number=10000)
) # 0.5299446069984697
""" | 23.025 | 77 | 0.427796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.295331 |
1aa6c9a331682a90ddc2469eb7ca101775ba532a | 661 | py | Python | AtCoder/ABC057/D.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | 1 | 2018-11-25T04:15:45.000Z | 2018-11-25T04:15:45.000Z | AtCoder/ABC057/D.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | null | null | null | AtCoder/ABC057/D.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | 2 | 2018-08-08T13:01:14.000Z | 2018-11-25T12:38:36.000Z | def combination(n, r):
r = min(n - r, r)
result = 1
for i in range(n, n - r, -1):
result *= i
for i in range(1, r + 1):
result //= i
return result
N, A, B = map(int, input().split())
v_list = list(map(int, input().split()))
v_list.sort(reverse=True)
mean_max = sum(v_list[:A]) / A
comb = 0
if v_list[0] != v_list[A - 1]:
x = v_list.count(v_list[A - 1])
y = v_list[:A].count(v_list[A - 1])
comb = combination(x, y)
else:
x = v_list.count(v_list[A - 1])
for i in range(A, B + 1):
if v_list[i - 1] == v_list[0]:
comb += combination(x, i)
print("{:.10f}".format(mean_max))
print(comb)
| 22.793103 | 40 | 0.534039 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.013616 |
1aa76cb1ca43006b2cc5f674dcb72f031f583b9e | 545 | py | Python | tests/features/steps/triggers_repo/test_triggers_list.py | dataloop-ai/dtlpy | 2c73831da54686e047ab6aefd8f12a8e53ea97c2 | [
"Apache-2.0"
] | 10 | 2020-05-21T06:25:35.000Z | 2022-01-07T20:34:03.000Z | tests/features/steps/triggers_repo/test_triggers_list.py | dataloop-ai/dtlpy | 2c73831da54686e047ab6aefd8f12a8e53ea97c2 | [
"Apache-2.0"
] | 22 | 2019-11-17T17:25:16.000Z | 2022-03-10T15:14:28.000Z | tests/features/steps/triggers_repo/test_triggers_list.py | dataloop-ai/dtlpy | 2c73831da54686e047ab6aefd8f12a8e53ea97c2 | [
"Apache-2.0"
] | 8 | 2020-03-05T16:23:55.000Z | 2021-12-27T11:10:42.000Z | import behave
@behave.when(u"I list triggers")
def step_impl(context):
context.trigger_list = context.service.triggers.list()
@behave.then(u'I receive a Trigger list of "{count}" objects')
def step_impl(context, count):
assert context.trigger_list.items_count == int(count)
if int(count) > 0:
for page in context.trigger_list:
for trigger in page:
assert isinstance(trigger, context.dl.entities.Trigger) or \
isinstance(trigger, context.dl.entities.trigger.CronTrigger)
| 32.058824 | 83 | 0.677064 | 0 | 0 | 0 | 0 | 525 | 0.963303 | 0 | 0 | 66 | 0.121101 |
1aa7720202db2a1c258c5499dab4c82e6d875c22 | 437 | py | Python | P0053.py | sebastianaldi17/ProjectEuler | 19562fba3456ec904bcc264fb786a92610e42622 | [
"MIT"
] | null | null | null | P0053.py | sebastianaldi17/ProjectEuler | 19562fba3456ec904bcc264fb786a92610e42622 | [
"MIT"
] | null | null | null | P0053.py | sebastianaldi17/ProjectEuler | 19562fba3456ec904bcc264fb786a92610e42622 | [
"MIT"
] | null | null | null | # Combinatoric selections
# https://projecteuler.net/problem=53
from collections import defaultdict
from copy import deepcopy
from itertools import permutations
from math import fmod, sqrt, factorial
from time import time
start = time()
f = [factorial(i) for i in range(101)]
ans = 0
for n in range(1, 101):
for r in range(1, n+1):
if f[n] / (f[r] * f[n-r]) >= 1000000: ans += 1
print(ans)
print(time() - start, "seconds") | 24.277778 | 54 | 0.688787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.162471 |
1aa865526377a0edc68f44ca28d256cfe6780eb3 | 99 | py | Python | prohmr/models/heads/__init__.py | akashsengupta1997/ProHMR | 7015a3d070c79b4571d43abdf5e522468091a94d | [
"BSD-3-Clause"
] | 120 | 2021-08-27T23:21:17.000Z | 2022-03-30T03:34:07.000Z | prohmr/models/heads/__init__.py | akashsengupta1997/ProHMR | 7015a3d070c79b4571d43abdf5e522468091a94d | [
"BSD-3-Clause"
] | 17 | 2021-09-08T10:10:37.000Z | 2022-03-17T02:40:21.000Z | prohmr/models/heads/__init__.py | akashsengupta1997/ProHMR | 7015a3d070c79b4571d43abdf5e522468091a94d | [
"BSD-3-Clause"
] | 10 | 2021-08-31T06:08:49.000Z | 2022-03-29T21:51:14.000Z | from .smpl_flow import SMPLFlow
from .skeleton_flow import SkeletonFlow
from .fc_head import FCHead | 33 | 39 | 0.858586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1aaace265db2e8442d60e8792192c99cd4ab024b | 327 | py | Python | vibhaga/test/demo_abstract.py | keremkoseoglu/vibhaga | 6978ff55e5a6e945098e7127ce49cfef4b0747d0 | [
"MIT"
] | null | null | null | vibhaga/test/demo_abstract.py | keremkoseoglu/vibhaga | 6978ff55e5a6e945098e7127ce49cfef4b0747d0 | [
"MIT"
] | 3 | 2020-06-09T04:20:48.000Z | 2022-03-31T12:44:59.000Z | vibhaga/test/demo_abstract.py | keremkoseoglu/vibhaga | 6978ff55e5a6e945098e7127ce49cfef4b0747d0 | [
"MIT"
] | null | null | null | """ Module for demo abstract class """
from abc import ABC, abstractmethod
class DemoAbstract(ABC):
""" Demo abstract class for testing purposes """
@abstractmethod
def demo_abstract_method(self):
""" Demo abstract method """
def demo_method(self):
""" Demo concrete method """
pass
| 21.8 | 52 | 0.642202 | 249 | 0.761468 | 0 | 0 | 88 | 0.269113 | 0 | 0 | 142 | 0.434251 |
1aab51dc0877d9fd63a1f310c0d32a392b291683 | 1,953 | py | Python | Client_side.py | SanRam/server-client-chat-python | 010a296db57c352a2ace7eac7206fa641981538b | [
"MIT"
] | null | null | null | Client_side.py | SanRam/server-client-chat-python | 010a296db57c352a2ace7eac7206fa641981538b | [
"MIT"
] | null | null | null | Client_side.py | SanRam/server-client-chat-python | 010a296db57c352a2ace7eac7206fa641981538b | [
"MIT"
] | null | null | null | # The client program connects to server and sends data to other connected
# clients through the server
import socket
import thread
import sys
def recv_data():
"Receive data from other clients connected to server"
while 1:
try:
recv_data = client_socket.recv(4096)
except:
#Handle the case when server process terminates
print ("Server closed connection, thread exiting.")
thread.interrupt_main()
break
if not recv_data:
# Recv with no data, server closed connection
print ("Server closed connection, thread exiting.")
thread.interrupt_main()
break
else:
print '{}'.format(recv_data)
def send_data():
"Send data from other clients connected to server"
while 1:
send_data_1 = str(raw_input(''))
send_data=name_id+': '+send_data_1
if send_data_1 == "q" or send_data == "Q":
client_socket.send(send_data)
thread.interrupt_main()
break
else:
client_socket.send(send_data)
if __name__ == "__main__":
print ('\t\t******* Socket Programming Using Python ********')
print ('\t\t******* TCP/IP Chat Client ********')
print ('\nConnecting to server at 173.253.224.102:5000')
global name_id
name_id= str(raw_input('Enter Username: '))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('173.253.224.102', 5000))
print ('Connected to server at 173.253.224.102:5000')
thread.start_new_thread(send_data,())
thread.start_new_thread(recv_data,())
try:
while 1:
continue
except:
print ("Client program quits....")
client_socket.close()
| 30.046154 | 75 | 0.561188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.345622 |
1aae905a964704653256bef35e2f3ec3e8f5f5c0 | 4,487 | py | Python | 2D/__animacija2D.py | KSpenko/mafijaVikend_numDelav | 1f94c764a16fca04f0e9aa89190e8150bb9a5830 | [
"MIT"
] | 1 | 2022-03-17T20:35:00.000Z | 2022-03-17T20:35:00.000Z | 2D/__animacija2D.py | KSpenko/mafijaVikend_numDelav | 1f94c764a16fca04f0e9aa89190e8150bb9a5830 | [
"MIT"
] | null | null | null | 2D/__animacija2D.py | KSpenko/mafijaVikend_numDelav | 1f94c764a16fca04f0e9aa89190e8150bb9a5830 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
class animacija2D:
def __init__(self, f, xInterval, yInterval, fN=20):
""" Priprava grafa in skiciranje funkcije. """
self.f = f
self.xlim = xInterval
self.ylim = yInterval
self.fN = fN
self.runs = []
x = np.linspace(self.xlim[0], self.xlim[1], 30)
y = np.linspace(self.ylim[0], self.ylim[1], 30)
X, Y = np.meshgrid(x, y)
fxy = np.zeros(X.shape)
for i in range(len(fxy)):
for j in range(len(fxy[0])):
fxy[i,j] = self.f([X[i,j], Y[i,j]])
self.fig = plt.figure()
self.ax = self.fig.add_subplot(projection='3d')
self.ax.plot_surface(X, Y, fxy, cmap=cm.coolwarm, linewidth=0, antialiased=False, alpha=0.5)
self.ax.set_xlabel('x')
self.ax.set_ylabel('y')
self.ax.set_zlabel('f(x,y)')
self.ax.set_xlim(self.xlim)
self.ax.set_ylim(self.ylim)
zlim = [np.amin(fxy), np.amax(fxy)]
self.zlim = (zlim[0]-0.1*abs(zlim[1]-zlim[0]), zlim[1]+0.1*abs(zlim[1]-zlim[0]))
self.ax.set_zlim(self.zlim)
def racunaj(self, metoda, x0, y0, par, N=10, eps=1e-3, konv=False):
""" Priročna funkcija za iteriranje oz. večkratno korakanje.
Funkcija se lahko uporablja za končno število korakov: konv = False,
ali pa dokler ne konvergira za dano vrednost eps: konv = True """
tabPoints = []
count = 0
if konv:
minimum = self.f([x0, y0])
while True and count < 1000:
xN, yN, par = metoda(self.f, x0, y0, par)
tabPoints.append( [x0, y0, self.f([x0, y0])] )
x0 = xN
y0 = yN
fxyN = self.f([x0, y0])
if abs(minimum-fxyN) < eps: break
minimum = min(minimum, fxyN)
count += 1
else:
for i in range(N+1):
xN, yN, par = metoda(self.f, x0, y0, par)
tabPoints.append( [x0, y0, self.f([x0, y0])] )
x0 = xN
y0 = yN
count += 1
self.runs.append( tabPoints )
print((x0, y0), self.f([x0, y0]), count)
return x0, y0
def zacetekAnimacije(self):
""" Podmetoda za zacetek animacije. """
self.fig.suptitle("0")
self.artists = []
artists = []
for j in range(len(self.runs)):
sc, = self.ax.plot( self.runs[j][0][0], self.runs[j][0][1], self.runs[j][0][2], linestyle="", marker="o" )
self.artists.append( sc )
artists.append(sc)
return artists
def animiraj(self, i):
""" Podmetoda za animiranje. """
self.fig.suptitle(str(i))
artists = []
for j in range(len(self.runs)):
col = self.artists[j].get_color()
if i == len(self.runs[j])-1:
vline = self.ax.plot([self.runs[j][-1][0],self.runs[j][-1][0]], [self.runs[j][-1][1],self.runs[j][-1][1]], [self.zlim[0], self.zlim[1]], linestyle="--", color=col)
artists.append(vline)
elif i >= len(self.runs[j]): continue
if self.verbose == 0:
self.artists[j].set_data( self.runs[j][i][0], self.runs[j][i][1])
self.artists[j].set_3d_properties( self.runs[j][i][2] )
artists.append( self.artists[j] )
elif self.verbose == 1:
arw = self.ax.quiver( self.runs[j][i-1][0], self.runs[j][i-1][1], self.runs[j][i-1][2], self.runs[j][i][0]-self.runs[j][i-1][0], self.runs[j][i][1]-self.runs[j][i-1][1], self.runs[j][i][2]-self.runs[j][i-1][2], color=col)
self.artists.append( arw )
artists.append(arw)
return artists
def maxIteration(self):
""" Podmetoda za izračun števila slik. """
maxN = 0
for i in range(len(self.runs)):
maxN = max(maxN, len(self.runs[i]))
return maxN
def narisi(self, casAnimacije=500, verbose=0, save=False):
""" Funkcija za risanje animacij. """
self.verbose = verbose
ani = animation.FuncAnimation(self.fig, self.animiraj, np.arange(1, self.maxIteration()), interval=casAnimacije, init_func=self.zacetekAnimacije, repeat=False)
if save != False: ani.save(save+".gif", dpi=80, writer="imagemagick")
plt.show() | 41.546296 | 237 | 0.531536 | 4,373 | 0.973292 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.102827 |
1aaeaca4a1e2ac529fe75552949c04273d36808b | 2,452 | py | Python | src/Algorithms/VotingClassifier.py | hirohio/Hello-World-ML | 398b7b9f492d563226e9ba0374bb2844ad0dbf18 | [
"MIT"
] | null | null | null | src/Algorithms/VotingClassifier.py | hirohio/Hello-World-ML | 398b7b9f492d563226e9ba0374bb2844ad0dbf18 | [
"MIT"
] | null | null | null | src/Algorithms/VotingClassifier.py | hirohio/Hello-World-ML | 398b7b9f492d563226e9ba0374bb2844ad0dbf18 | [
"MIT"
] | null | null | null | # External Modules
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
# Internal Modules
import Helpers.PrintHelpers.PrintHelper as phelper
import Helpers.DataFrameHelpers.DataframeConverter as dfc
class VotingClass:
"""Voting Class
"""
@property
def df(self):
"""Dataframe: Original dataframe to used for learning."""
return self._df
def __init__(self,df,algorithms):
""" Constracter
Args:
df (Dataframe): dataframe for learning.
algorithms (list): list of algorithms.
"""
self._df = df
self._algorithms = algorithms
phelper.PrintHelper.print_title(self.__class__.__name__)
def learn(self,column,params):
"""
"""
#predict data
y = self._df[column]
self._y = y
#learning data
X = self._df.drop([column], axis=1)
self._X = X
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.3, random_state=0)
eclf = VotingClassifier(estimators=self._algorithms)
if params == None:
self._learned_model = eclf
eclf.fit(X_train, y_train)
print('...Predicting Test Data...')
predicted_result = self._learned_model.predict(X_test).astype(int)
else:
phelper.PrintHelper.print_title('Params from a file')
print(params)
print('...Doing Grid Search...')
cv = GridSearchCV(eclf, params, cv = 10, scoring = 'neg_mean_squared_error', n_jobs=1, refit = True)
cv.fit(X_train, y_train)
self._best_params = cv.best_params_
self._learned_model = cv.best_estimator_
phelper.PrintHelper.print_title('Best Params')
print(cv.best_params_)
self._learned_model = cv
# Print accuracy score
print('...Predicting Test Data...')
predicted_result = self._learned_model.predict(X_test).astype(int)
phelper.PrintHelper.print_title('Accuracy Score')
print(accuracy_score(y_test,predicted_result))
return True
def predict(self,test_df):
return self._learned_model.predict(test_df).astype(int)
| 29.190476 | 112 | 0.639478 | 2,036 | 0.830343 | 0 | 0 | 117 | 0.047716 | 0 | 0 | 481 | 0.196166 |
1aaff9e26cf79480dec5e7e7f9444230c9a834ca | 22,847 | py | Python | maysics/preprocess.py | HOKOTATE-pzw/maysics | 59c38089c51db74948e3e2133c0a860880dcf0eb | [
"MIT"
] | 4 | 2021-06-14T01:47:22.000Z | 2022-03-20T07:55:32.000Z | maysics/preprocess.py | HOKOTATE-pzw/maysics | 59c38089c51db74948e3e2133c0a860880dcf0eb | [
"MIT"
] | 1 | 2022-01-20T04:30:42.000Z | 2022-01-20T04:30:42.000Z | maysics/preprocess.py | HOKOTATE-pzw/maysics | 59c38089c51db74948e3e2133c0a860880dcf0eb | [
"MIT"
] | 1 | 2022-03-20T07:58:24.000Z | 2022-03-20T07:58:24.000Z | '''
本模块用于数据预处理
This module is used for data preproccessing
'''
import numpy as np
from maysics.utils import e_distances
from matplotlib import pyplot as plt
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False
from io import BytesIO
from lxml import etree
import base64
import math
def _rc(arg):
cov_mat = np.cov(arg)
var_mat = np.diagonal(cov_mat)**0.5
var_mat[var_mat == 0] = 1
for i in range(cov_mat.shape[0]):
cov_mat[i] /= var_mat[i]
cov_mat[:, i] /= var_mat[i]
return cov_mat
def _preview_process(data, value_round):
'''
预览处理
'''
data = np.array(data, dtype=float)
name_list = ['平均值', '中位数', '方差', '标准差', '最大值', '最小值', '偏度', '峰度']
value_list = []
mean_ = data.mean(axis=0)
value_list.append(np.round(mean_, value_round))
value_list.append(np.round(np.median(data, axis=0), value_round))
value_list.append(np.round(data.var(axis=0), value_round))
value_list.append(np.round(data.std(axis=0), value_round))
value_list.append(np.round(data.max(axis=0), value_round))
value_list.append(np.round(data.min(axis=0), value_round))
value_list.append(np.round(((data - mean_)**3).mean(axis=0), value_round))
value_list.append(np.round(((data - mean_)**4).mean(axis=0), value_round))
value_list = np.array(value_list).flatten()
style = '''
<style>
table{
border-collapse: collapse;
}
table, table tr td {
border:1px solid #ccc;
}
table tr td{
padding: 5px 10px;
}
</style>
'''
table = '<h2 style="padding-left:50px; border-top:1px solid #ccc">数值特征</h2>' + style + '<table align="center"><caption></caption>'
for i in range(8):
table += '<tr><td>' + name_list[i] + '</td>' + '<td>%s</td>' * data.shape[1] + '</tr>'
table = '<h1 style="padding-left:50px;">数据信息</h1>' + table % tuple(value_list) + '</table>'
data = np.ascontiguousarray(data.T)
num = data.shape[0]
plt.figure(figsize=(9, 3 * num))
for i in range(num):
q1, q2, q3 = np.percentile(data[i], [25, 50, 75])
plt.scatter(mean_[i], i+1, marker='o', color='white', s=30, zorder=3)
plt.hlines(i+1, q1, q3, color='k', linestyle='-', lw=1)
bx = plt.violinplot(data.tolist(), showextrema=False, vert=False)
plt.title('分布图')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = 'data:image/png;base64,' + ims
im1 = '<div align="center"><img src="%s"></div>' % imd
im1 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">密度分布</h2>' + im1
cov_mat = _rc(data)
matrix = '<table border="0"><caption></caption>'
for i in range(num):
matrix += '<tr>' + '<td>%s</td>' * num + '</tr>'
matrix = matrix % tuple(np.round(cov_mat.flatten(), value_round)) + '</table>'
plt.figure(figsize=(8, 8))
plt.matshow(cov_mat, fignum=0, cmap='Blues')
plt.colorbar()
plt.title('相关系数图')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = 'data:image/png;base64,' + ims
im2 = '<div style="display:flex;flex-direction:row;vertical-align:middle;justify-content:center;width:100%;height:80vh"><div style="margin:auto 0;white-space:pre-wrap;max-width:50%">'
im2 = im2 +'相关矩阵:'+ matrix + '</div><img style="object-fit:contain;max-width:45%;max-height:80vh" src="{}"/></div>'.format(imd)
im2 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">相关性</h2>' + im2
plt.figure(figsize=(2.5 * num, 2.5 * num))
for i in range(num * num):
ax = plt.subplot(num, num, i+1)
ax.plot(data[i//num], data[i%num], 'o')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = "data:image/png;base64," + ims
im3 = '<div align="center"><img src="%s"></div>' % imd
im3 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">散点关系</h2>' + im3
return '<title>数据信息预览</title>' + table + im1 + im2 + im3
def preview_file(filename, data, value_round=3):
'''
生成数据预览报告的html文件
参数
----
filename:字符串类型,文件名
data:二维数组,数据
value_round:整型,数字特征保留的小数点后的位数
Generate preview report with html file
Parameters
----------
filename: str, file name
data: 2-D array, data
value_round: int, the number of digits after the decimal point retained by numeric features
'''
root = _preview_process(data=data, value_round=value_round)
html = etree.HTML(root)
tree = etree.ElementTree(html)
tree.write(filename)
def preview(data, value_round=3):
'''
在jupyter中显示数据预览报告
参数
----
data:二维数组,数据
value_round:整型,数字特征保留的小数点后的位数
Display preview report in jupyter
Parameters
----------
data: 2-D array, data
value_round: int, the number of digits after the decimal point retained by numeric features
'''
root = _preview_process(data=data, value_round=value_round)
from IPython.core.display import display, HTML
display(HTML(root))
def length_pad(seq, maxlen=None, value=0, padding='pre', dtype=float):
'''
填充二维列表,使得每行长度都为maxlen
参数
----
seq:二维列表,需要填充的对象
maxlen:整型,可选,每行的最大长度,默认为原二维列表最大的长度
value:数类型,可选,填充值,默认为0
padding:字符串类型,可选,填充位置,'pre'代表从前面填充,'post'代表从后面填充,默认为'pre'
dtype:可选,输出的元素类型,默认为float
返回
----
二维ndarray
Pad the 2-D list so that every row is 'maxlen' in length
Parameters
----------
seq: 2-D list, objects that need to be padded
maxlen: int, callable, the maximum length of each row, default = the maximum length of the original 2-D list
value: num, callable, padding value, default=0
padding: str, callable, padding location, 'pre' means padding from the front and 'post' from the back, default='pre'
dtype: callable, the element type of the output, default=float
Return
------
2-D ndarray
'''
seq = list(seq)
if not maxlen:
maxlen = 0
for i in seq:
if len(i) > maxlen:
maxlen = len(i)
if padding == 'pre':
for i in range(len(seq)):
if maxlen > len(seq[i]):
seq[i] = [value] * (maxlen - len(seq[i])) + seq[i]
elif maxlen < len(seq[i]):
seq[i] = seq[i][-1 * maxlen:]
elif padding == 'post':
for i in range(len(seq)):
if maxlen > len(seq[i]):
seq[i] += [value] * (maxlen - len(seq[i]))
elif maxlen < len(seq[i]):
seq[i] = seq[i][:maxlen]
return np.array(seq, dtype=dtype)
def sample_pad(data, index=0, padding=None):
'''
对二维数据进行样本填充
先对data中的每个二维数据进行遍历,以各个index列的值作为全集,再对data的每个二维数据进行填充
如:data1 = [[0, 1],
[1, 2],
[2, 3]]
data2 = [[2, 3],
[3, 4],
[4, 5]]
data = (data1, data2)
则得到输出:
output = [array([[0, 1],
[1, 2],
[2, 3],
[3, nan],
[4, nan]]),
array([[0, nan],
[1,nan],
[2, 3],
[3, 4],
[4, 5]])]
data:元组或列表类型,数据
index:整型,作为扩充全集的标准列的索引
padding:填充值,可选,默认为None
Sample filling for 2D data
Values of each index column will be taken as the complete set, then each two-dimensional data of data is padded
e.g. data1 = [[0, 1],
[1, 2],
[2, 3]]
data2 = [[2, 3],
[3, 4],
[4, 5]]
data = (data1, data2)
output = [array([[0, 1],
[1, 2],
[2, 3],
[3, nan],
[4, nan]]),
array([[0, nan],
[1,nan],
[2, 3],
[3, 4],
[4, 5]])]
data: tuple or list, data
index: int, the index of a standard column as an extended complete set
padding: padding value, optional, default=None
'''
time_set = set()
result = []
if not padding:
padding = [np.nan] * (len(data[0][0]) - 1)
else:
padding = list([padding])
for i in range(len(data)):
data_part = np.array(data[i], dtype=np.object)
result.append(data_part)
time_set = time_set | set(data_part[:, index])
for i in range(len(result)):
different_set_list = np.array([list(time_set - set(result[i][:, index]))], dtype=np.object).T
num = len(different_set_list)
padding_new = np.array(padding * num, dtype=np.object).reshape(num, -1)
different_set_list = np.hstack((padding_new[:, :index], different_set_list, padding_new[:, index:]))
result[i] = np.vstack((result[i], different_set_list))
return result
def shuffle(*arg):
'''
打乱一个序列或以相同方法打乱多个序列
返回
----
一个ndarray
Shuffle a sequence or shuffle multiple sequences in the same way
Return
------
a ndarray
'''
state = np.random.get_state()
a_new_list = []
for li in arg:
np.random.set_state(state)
np.random.shuffle(li)
a_new_list.append(li)
return np.array(a_new_list)
def data_split(data, targets, train_size=None, test_size=None, shuffle=True, random_state=None):
'''
分离数据
参数
----
data:数据
targets:指标
train_size:浮点数类型,可选,训练集占总数据量的比,取值范围为(0, 1],默认为0.75
test_size:浮点数类型,可选,测试集占总数据量的比,取值范围为[0, 1),当train_size被定义时,该参数无效
shuffle:布尔类型,可选,True表示打乱数据,False表示不打乱数据,默认为True
random_state:整型,可选,随机种子
返回
----
元组,(数据测试集, 指标测试集, 数据验证集, 指标验证集)
split the data
Parameters
----------
data: data
targets: targets
train_size: float, callable, ratio of training set to total data, value range is (0, 1], default=0.75
test_size: float, callable, ratio of test set to total data, value range is [0, 1)
shuffle: bool, callable, 'True' will shuffle the data, 'False' will not, default = True
random_state: int, callable, random seed
Return
------
tuple, (train_data, train_target, validation_data, validation_target)
'''
data = np.array(data)
targets = np.array(targets)
if not (train_size or test_size):
train_size = 0.75
elif test_size:
train_size = 1 - test_size
if train_size <= 0 or train_size > 1:
raise Exception("'train_size' should be in (0, 1], 'test_size' should be in [0, 1)")
if shuffle:
np.random.seed(random_state)
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(targets)
num_of_data = len(data)
train_data = data[:int(num_of_data * train_size)]
train_target = targets[:int(num_of_data * train_size)]
validation_data = data[int(num_of_data * train_size):]
validation_target = targets[int(num_of_data * train_size):]
return train_data, train_target, validation_data, validation_target
def kfold(data, targets, n, k=5):
'''
参数
----
data:数据
targets:指标
n:整型,表示将第n折作为验证集,从0开始
k:整型,可选,k折验证的折叠数,默认k=5
返回
----
元组,(数据测试集, 指标测试集, 数据验证集, 指标验证集)
Parameters
----------
data: data
targets: targets
n: int, take the nth part as validation set, starting from 0
k: int, callable, the number of k-fold, default = 5
Return
------
tuple, (train_data, train_target, validation_data, validation_target)
'''
data = np.array(data)
targets = np.array(targets)
num_validation_samples = len(data) // k
validation_data = data[num_validation_samples * n:
num_validation_samples * (n + 1)]
validation_targets = targets[num_validation_samples * n:
num_validation_samples * (n + 1)]
train_data = np.concatenate([data[: num_validation_samples * n],
data[num_validation_samples * (n + 1):]])
train_targets = np.concatenate([targets[: num_validation_samples * n],
targets[num_validation_samples * (n + 1):]])
return train_data, train_targets, validation_data, validation_targets
def dataloader(data, targets, choose_rate=0.3, shuffle=True, random_state=None):
'''
数据随机生成器
参数
----
data:数据
targets:指标
choose_rate:浮点数类型,可选,生成率,即一次生成数据量在原数据量的占比,范围为[0, 1],默认为0.3
shuffle:布尔类型,可选,True表示打乱数据,False表示不打乱数据,默认为True
random_state:整型,可选,随机种子
返回
----
生成器
Data Random Generator
Parameters
----------
data: data
targets: targets
choose_rate: float, callable, generation rate (the proportion of data generated at one time in the original data) whose range is [0, 1], default=0.3
shuffle: bool, callable, 'True' will shuffle the data, 'False' will not, default = True
random_state: int, callable, random seed
Return
------
generator
'''
data = np.array(data)
targets = np.array(targets)
if shuffle:
np.random.seed(random_state)
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(targets)
num = len(data)
choose_rate = int(num * choose_rate)
times = int(math.ceil(num / choose_rate))
for i in range(times):
loc_1 = i * choose_rate
loc_2 = (i + 1) * choose_rate
yield data[loc_1: loc_2], targets[loc_1: loc_2]
def standard(data, mean=True, var=True, index=None):
'''
标准化数据
z = (x - u) / s
z:新数据; x:原数据; u:均值; s:方差
如果某一列数据完全相同(即方差s=0),则该列数据全部归零
参数
----
data:2-D的ndarray数据
mean:布尔类型或ndarray,可选,布尔类型决定是否将均值调整为0,ndarray用于设定不同的均值
var:布尔类型或ndarray,可选,是否将方差调整为1,ndarray用于设定不同的方差
index:列表类型,可选,需要进行标准化的列的索引,默认为全部
返回
----
tuple
Standardize data
z = (x - u) / s
z: new data; x: origin data; u: mean value; s: variance
if data in one column are the same(s=0), data in this column will be turned to 0
Parameters
----------
data: 2-D ndarray
mean: bool or ndarray, callable, bool decides if adjust the mean value to 0, ndarray is used to set different means
var: bool or ndarray, callable, bool decides if adjust the variance to 0, ndarray is used to set different variances
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
2-D ndarray
'''
data=np.array(data, dtype=float)
if index:
if mean is True:
mean = data[:, index].mean(axis=0)
elif mean is None or mean is False:
mean = np.zeros(len(index))
data[:, index] -= mean
if not var is None and not var is False:
if var is True:
var = data[:, index].std(axis=0)
else:
var = np.array(var)
std_zero_indices = np.nonzero(var == 0)
std = var.copy()
std[std==0] = 1.0
data[:, index] /= std
if list(std_zero_indices[0]):
for i in std_zero_indices[0]:
data[:, index][:, i] *= 0
else:
std = 0
else:
if mean is True:
mean = data.mean(axis=0)
elif mean is None or mean is False:
mean = np.zeros(data.shape[1])
else:
mean = np.array(mean)
data -= mean
if not var is None and not var is False:
if var is True:
var = data.std(axis=0)
else:
var = np.array(var)
std_zero_indices = np.nonzero(var == 0)
std = var.copy()
std[std==0] = 1.0
data /= std
if list(std_zero_indices[0]):
for i in std_zero_indices[0]:
data[:, i] *= 0
else:
std = 0
return data, mean, std
def minmax(data, feature_range=(0, 1), min_max=None, index=None):
'''
归一化数据
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_new = X_std * (feature_range[1] - feature_range[0]) + feature_range[0]
参数
----
data:2-D的ndarray数据
feature_range:元组类型,可选,需要转换的范围,默认为(0, 1)
min_max:元组类型,可选,用于设定最大最小值
index:列表类型,可选,需要进行标准化的列的索引,默认为全部
返回
----
元组,(归一化后的数据, (最小值,最大值))
Normalize data
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_new = X_std * (feature_range[1] - feature_range[0]) + feature_range[0]
Parameters
----------
data: 2-D的ndarray数据
feature_range: tuple, callabel, final range of transformed data
min_max: tuple, callable, set min and max values
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
tuple, (Normalized data, (miniment, maximent))
'''
data=np.array(data, dtype=float)
if index:
if not min_max:
min_max = (data[:, index].min(axis=0), data[:, index].max(axis=0))
length = min_max[1] - min_max[0]
data[:, index] = (data[:, index] - min_max[0]) / length
data[:, index] = data[:, index] * (feature_range[1] - feature_range[0]) + feature_range[0]
else:
if not min_max:
min_max = (data.min(axis=0), data.max(axis=0))
length = min_max[1] - min_max[0]
data = (data - min_max[0]) / length
data = data * (feature_range[1] - feature_range[0]) + feature_range[0]
return data, min_max
def normalizer(data, index=None):
'''
使每个数据的模为1
参数
----
data:2-D的ndarray数据
index:列表形式,可选,需要进行标准化的列的索引,默认为全部
返回
----
2-D ndarray
Making the moduli of data equal 1
Parameters
----------
data: 2-D的ndarray数据
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
2-D ndarray
'''
data = np.array(data, dtype=float)
if index:
distance_list = e_distances(data[:, index])
distance_list[distance_list == 0] = 1
data[:, index] /= np.array([distance_list]).T
else:
distance_list = e_distances(data)
distance_list[distance_list == 0] = 1
data /= np.array([distance_list]).T
return data
def pca(data, n=None, eig_vector=None):
'''
主成分分析
参数
----
data:二维数组,数据
n:整型或浮点数类型,可选,当n >= 1时,表示降至n维,当0< n < 1时,表示降维至累计方差贡献率 >= n,默认不降维
eig_vector:元组类型,可选,用于设定不同的特征向量,当设置该参数时,累计方差贡献率无意义,默认为None
返回
----
元组,(转换后的数据, (累计方差贡献率, 各主成分方差, 各主成分方差贡献率), 特征向量)
Principal Component Analysis
Parameters
----------
data:2-D array, data
n: int or float, callable, when n > = 1, it means to reduce the dimension to n; when 0 < n < 1, it means to reduce the dimension to cumulative variance ratio > = n, and it is not reduced by default
eig_value_vector: tuple, callable, set different eigenvectors, when this parameter is set, the cumulative variance ratio is meaningless, default=None
Return
------
tuple, (transformed data, (cumulative variance ratio, variance, variance ratio), eigenvectors)
'''
data = np.array(data, dtype=float)
if eig_vector is None or eig_vector is False:
cov_mat = np.cov(data.T)
eig_value, eig_vector = np.linalg.eig(cov_mat)
sort_index = np.flipud(np.argsort(eig_value))
eig_value = eig_value[sort_index]
eig_vector = eig_vector[:, sort_index]
eig_ratio = eig_value / eig_value.sum()
contri = 0
if not n is None and not n is False:
if n >= 1:
n = int(n)
contri = eig_ratio[:n].sum()
elif n < 1:
for i in range(eig_value.shape[0]):
contri += eig_ratio[i]
if contri >= n:
n = i + 1
break
eig_value = eig_value[:n]
eig_ratio = eig_ratio[:n]
eig_vector = eig_vector[:, :n]
else:
contri = 1
else:
contri = None
eig_value = None
eig_ratio = None
data = np.dot(data, eig_vector)
return data, (contri, eig_value, eig_ratio), eig_vector
class RC():
'''
相关系数
参数
----
*arg:列表类型
属性
----
rc_mat:相关系数矩阵
correlation coefficient
Parameter
---------
*arg: list
Attribute
---------
rc_mat: correlation coefficient matrix
'''
def __init__(self, *arg):
arg = np.array(arg, dtype=float)
if len(arg.shape) != 2:
raise Exception("Input list should be 1-D.")
else:
self.rc_mat = _rc(arg)
def __img_process(self, index, cmap):
plt.matshow(self.rc_mat, cmap=cmap)
plt.colorbar()
if index:
n_list = range(len(index))
plt.xticks(n_list, index)
plt.yticks(n_list, index)
def show(self, index=None, cmap='Blues'):
'''
作图并显示
参数
----
index:列表形式,可选,各数组名称
cmap:字符串形式,可选,颜色板,默认为'Blues'
Display the image
Parameters
----------
index: list, callable, names of each array
cmap: str, callable, color board, default='Blues'
'''
self.__img_process(index=index, cmap=cmap)
plt.show()
def savefig(self, filename, index=None, cmap='Blues'):
'''
作图并保存
参数
----
filename:字符串形式,文件名
index:列表形式,可选,各数组名称
cmap:字符串形式,可选,颜色板,默认为'Blues'
Save the image
Parameters
----------
filename: str, file name
index: list, callable, names of each array
cmap: str, callable, color board, default='Blues'
'''
self.__img_process(index=index, cmap=cmap)
plt.savefig(filename) | 28.241038 | 201 | 0.554559 | 1,950 | 0.076956 | 1,479 | 0.058369 | 0 | 0 | 0 | 0 | 13,281 | 0.524133 |
1ab0013a4d518f8e51ed0c4c383fca433990afad | 756 | py | Python | Application/Model/GridArea.py | Thomas145/PythonWebSocketsGame | 6c527ab33c43e689e125211d5abc6d579ff060a9 | [
"MIT"
] | null | null | null | Application/Model/GridArea.py | Thomas145/PythonWebSocketsGame | 6c527ab33c43e689e125211d5abc6d579ff060a9 | [
"MIT"
] | null | null | null | Application/Model/GridArea.py | Thomas145/PythonWebSocketsGame | 6c527ab33c43e689e125211d5abc6d579ff060a9 | [
"MIT"
] | null | null | null | from .Styles import NoStyle
class GridArea:
def __init__(self, position_marker):
self.style = NoStyle()
self.position = position_marker
def reset(self):
self.style = NoStyle()
def current_state(self):
print(self.style.display(), end="", flush=True)
def grid_area_style(self):
return self.style
def grid_area_position(self):
return self.position
def open(self):
return not self.style.selected()
def selected(self, player):
if self.style.selected() is False:
if player is not None:
if player.chosen_style() is not None:
self.style = player.chosen_style()
return True
return False
| 22.909091 | 55 | 0.592593 | 725 | 0.958995 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.002646 |
1ab0f01b6cfb191ba89088f444dfc7d72d15e946 | 2,140 | py | Python | happy/Utils.py | yunhanw-google/happy | d482f1eeb188a03e3bcd1aefe54424fb2589c9e9 | [
"Apache-2.0"
] | 42 | 2017-09-20T07:09:59.000Z | 2021-11-08T12:08:30.000Z | happy/Utils.py | yunhanw-google/happy | d482f1eeb188a03e3bcd1aefe54424fb2589c9e9 | [
"Apache-2.0"
] | 30 | 2018-06-16T14:48:14.000Z | 2020-10-13T04:02:35.000Z | happy/Utils.py | yunhanw-google/happy | d482f1eeb188a03e3bcd1aefe54424fb2589c9e9 | [
"Apache-2.0"
] | 17 | 2017-09-20T10:37:56.000Z | 2021-02-09T06:27:44.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import time
##
# @file
# Provides general, low-level utilities used in software implementing Happy.
#
##
# Formats input string to print in Red on terminal.
#
# @param[in] txt A string or object that can be coverted to string.
#
# @return string with prefixed and suffixed ASCII color formatting.
#
def hred(txt):
return '\033[31m' + str(txt) + '\033[0m'
##
# Formats input string to print in Green on terminal.
#
# @param[in] txt A string or object that can be coverted to string.
#
# @return string with prefixed and suffixed ASCII color formatting.
#
def hgreen(txt):
return '\033[32m' + str(txt) + '\033[0m'
##
# Formats input string to print in Yellow on terminal.
#
# @param[in] txt A string or object that can be coverted to string.
#
# @return string with prefixed and suffixed ASCII color formatting.
#
def hyellow(txt):
return '\033[33m' + str(txt) + '\033[0m'
##
# Formats input string to print in Blue on terminal.
#
# @param[in] txt A string or object that can be coverted to string.
#
# @return string with prefixed and suffixed ASCII color formatting.
#
def hblue(txt):
return '\033[34m' + str(txt) + '\033[0m'
##
# Delays execution of a program by sec seconds.
#
# @param[in] sec A number of seconds to delay execution by.
#
# @return none
#
def delayExecution(sec):
time.sleep(sec)
| 26.097561 | 82 | 0.671495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,805 | 0.843458 |
1ab2047efaf257da85d65323e0025e66394bdeb4 | 2,383 | py | Python | Python Scraping Series/P2_Listing/pull_test.py | kadnan/vidtutorials | f2a924ee8cc9ea50d3cb65287b2f9fae834855b0 | [
"MIT"
] | null | null | null | Python Scraping Series/P2_Listing/pull_test.py | kadnan/vidtutorials | f2a924ee8cc9ea50d3cb65287b2f9fae834855b0 | [
"MIT"
] | 1 | 2018-03-23T10:36:56.000Z | 2018-03-23T10:54:48.000Z | Python Scraping Series/P2_Listing/pull_test.py | kadnan/vidtutorials | f2a924ee8cc9ea50d3cb65287b2f9fae834855b0 | [
"MIT"
] | 3 | 2018-03-23T10:58:51.000Z | 2021-01-06T22:03:30.000Z | import requests
from bs4 import BeautifulSoup
from time import sleep
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
def parse(url):
print('Parsing..' + url)
return 'Parsed..' + url
def pull(category_url):
total_pages = 0
total_links = []
try:
print('Processing...' + category_url)
r = requests.get(category_url, headers=headers, timeout=5)
if r.status_code == 200:
html = r.text.strip()
soup = BeautifulSoup(html, 'lxml')
# Find total pages
pagination_section = soup.select('.pagination li > a')
if pagination_section:
# -2 because the last is NEXT button
total_pages = int(pagination_section[len(pagination_section) - 2].text)
links = soup.select('.products .link')
# results = [parse(l['href']) for l in links]
for l in links:
total_links.append(l['href'])
for x in range(2, total_pages + 1):
sleep(2)
cat_url = 'https://www.daraz.pk/mens-smart-watches/?page={0}'.format(x)
print('Processing...' + cat_url)
r = requests.get(category_url, headers=headers, timeout=5)
if r.status_code == 200:
links = soup.select('.products .link')
[total_links.append(l['href']) for l in links]
except requests.ConnectionError as e:
print("OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\n")
print(str(e))
except requests.Timeout as e:
print("OOPS!! Timeout Error")
print(str(e))
except requests.RequestException as e:
print("OOPS!! General Error")
print(str(e))
except KeyboardInterrupt:
print("Someone closed the program")
finally:
# Save links into file
if len(total_links) > 0:
with open('links.txt', 'a+', encoding='utf=8') as f:
f.write('\n'.join(total_links))
if __name__ == '__main__':
cat_url = 'https://www.daraz.pk/mens-smart-watches/'
pull(cat_url)
with open('links.txt',encoding='utf-8') as f:
lines = f.readlines()
for l in lines:
parse(l)
| 35.567164 | 140 | 0.576164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.292488 |
1ab21cc87223f786572255bf50a065de4031a07d | 2,700 | py | Python | examples/pseudo/stamps/convert.py | golly-splorts/gollyx-maps | ad57b6e0665a7f2a54f2cfa31717ce152ac3d046 | [
"MIT"
] | null | null | null | examples/pseudo/stamps/convert.py | golly-splorts/gollyx-maps | ad57b6e0665a7f2a54f2cfa31717ce152ac3d046 | [
"MIT"
] | null | null | null | examples/pseudo/stamps/convert.py | golly-splorts/gollyx-maps | ad57b6e0665a7f2a54f2cfa31717ce152ac3d046 | [
"MIT"
] | null | null | null | import os
from pprint import pprint
import json
def main():
patterns = {
"l_pentomino": '[{"30":[30,31],"31":[30],"32":[30],"33":[30]}]',
"flower_pentomino": '[{"30":[31],"31":[30,31],"32":[32],"33":[31]}]',
"kite_heptomino": '[{"30":[30,31],"31":[30],"32":[30,31],"33":[32]}]',
"boomerang_heptomino": '[{"30":[30],"31":[30,31],"32":[30,31],"33":[33]}]',
"t_heptomino": '[{"30":[30],"31":[30,31],"32":[30],"33":[31,32]}]',
"lockpick_heptomino": '[{"30":[30],"31":[30],"32":[30,31,32],"33":[32]}]',
"facade_heptomino": '[{"30":[30],"31":[30,32,33],"32":[30],"33":[33]}]',
"raygun_heptomino": '[{"30":[30,32,33],"31":[30,31],"33":[30]}]',
"broken_l_heptomino": '[{"30":[30],"31":[30],"32":[31,32,33],"33":[30]}]',
"angel_heptomino": '[{"30":[30,33],"31":[30,31],"32":[32],"33":[31]}]',
"sticky_heptomino": '[{"30":[30,33],"31":[30],"32":[31,32],"33":[31]}]',
"reverse_f_heptomino": '[{"30":[30],"31":[31],"32":[30,31,33],"33":[31]}]',
"swandive_octomino": '[{"30":[30,33],"31":[30],"32":[30,31,32],"33":[32,33]}]',
"stretchydog_octomino": '[{"30":[30,32],"31":[30,31,33],"32":[30,32],"33":[33]}]',
"capacitor_octomino": '[{"30":[30],"31":[30,33],"32":[30,31,33],"33":[31,33]}]',
"brass_knuckles_nonomino": '[{"30":[31,32],"31":[30,31,33],"32":[30,32],"33":[31,32]}]',
"mcnasty_nonomino": '[{"30":[30,32],"31":[31],"32":[30,31,32,33],"33":[31,32]}]',
"octomino_oscillator": '[{"30":[30,32],"31":[32,33],"32":[30,31],"33":[31,33]}] ',
}
try:
os.mkdir("output")
except FileExistsError:
pass
for pattern_name, pattern in patterns.items():
print(f"Exporting pattern {pattern_name}")
s = convert(pattern)
fname = pattern_name + ".txt"
with open(os.path.join("output", fname), "w") as f:
f.write(s)
def convert(s):
t = ""
points = []
d = json.loads(s)[0]
for y in d:
for x in d[y]:
points.append((x, int(y)))
minx = min([p[0] for p in points])
miny = min([p[1] for p in points])
new_points = []
for x, y in points:
new_points.append((x - minx, y - miny))
maxx = max([p[0] for p in new_points])
maxy = max([p[1] for p in new_points])
pattern = []
for iy in range(maxy + 1):
row = []
for ix in range(maxx + 1):
if (ix, iy) in new_points:
row.append("o")
else:
row.append(".")
pattern.append(row)
for row in pattern:
t += "".join(row)
t += "\n"
return t
if __name__ == "__main__":
main()
| 33.75 | 96 | 0.475556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,370 | 0.507407 |
1ab561ca4e660e97c6f68f02dd1daf19300748c5 | 935 | py | Python | examples/issues/issue345_docs2.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 666 | 2016-11-14T18:17:40.000Z | 2022-03-29T03:53:22.000Z | examples/issues/issue345_docs2.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 598 | 2016-10-20T21:04:09.000Z | 2022-03-15T22:44:49.000Z | examples/issues/issue345_docs2.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 95 | 2017-01-19T12:23:58.000Z | 2022-03-06T18:16:21.000Z | import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
if btn == "FIRST": app.firstFrame("Pages")
elif btn == "NEXT": app.nextFrame("Pages")
elif btn == "PREV": app.prevFrame("Pages")
elif btn == "LAST": app.lastFrame("Pages")
def changed():
msg = "Changed from: " + str(app.getPreviousFrame("Pages")) + " to " + str(app.getCurrentFrame("Pages"))
print(msg)
# return app.okBox("Sure?", msg)
with gui("FRAME STACK") as app:
with app.frameStack("Pages", change=changed):#, start=1):
with app.frame(bg='red'):
for i in range(5):
app.label("Text: " + str(i))
with app.frame(bg='green'):
for i in range(5):
app.entry("e" + str(i))
with app.frame(bg='pink'):
for i in range(5):
app.button(str(i), None)
app.buttons(["FIRST", "PREV", "NEXT", "LAST"], press)
changed()
| 30.16129 | 108 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.233155 |
1ab66b053488cd225ef0133491265daccc61ff6f | 5,557 | py | Python | backend/tournesol/views/polls.py | Vikka/tournesol | 161322d8ca2d6de885552ab8131fb55c68e88ae6 | [
"CC0-1.0"
] | null | null | null | backend/tournesol/views/polls.py | Vikka/tournesol | 161322d8ca2d6de885552ab8131fb55c68e88ae6 | [
"CC0-1.0"
] | null | null | null | backend/tournesol/views/polls.py | Vikka/tournesol | 161322d8ca2d6de885552ab8131fb55c68e88ae6 | [
"CC0-1.0"
] | null | null | null | import logging
from django.conf import settings
from django.db.models import Case, F, Prefetch, Q, Sum, When
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiExample,
OpenApiParameter,
extend_schema,
extend_schema_view,
)
from rest_framework import serializers
from rest_framework.generics import ListAPIView, RetrieveAPIView
from tournesol.models import Entity, EntityCriteriaScore, Poll
from tournesol.serializers.poll import (
PollSerializer,
RecommendationSerializer,
RecommendationsFilterSerializer,
)
from tournesol.views import PollScopedViewMixin
logger = logging.getLogger(__name__)
@extend_schema_view(
get=extend_schema(
parameters=[
RecommendationsFilterSerializer,
OpenApiParameter(
"weights",
OpenApiTypes.OBJECT,
style="deepObject",
description="Weights for criteria in this poll."
" The default weight is 10 for each criteria.",
examples=[
OpenApiExample(
name="weights example",
value={
"reliability": 10,
"importance": 10,
"ignored_criteria": 0,
},
)
],
),
],
)
)
class PollRecommendationsBaseAPIView(PollScopedViewMixin, ListAPIView):
"""
A base view used to factorize behaviours common to all recommendation
views.
It doesn't define any serializer, queryset nor permission.
"""
def filter_by_parameters(self, request, queryset, poll: Poll):
"""
Filter the queryset according to the URL parameters.
The `unsafe` parameter is not processed by this method.
"""
filter_serializer = RecommendationsFilterSerializer(data=request.query_params)
filter_serializer.is_valid(raise_exception=True)
filters = filter_serializer.validated_data
search = filters["search"]
if search:
queryset = poll.entity_cls.filter_search(queryset, search)
date_lte = filters["date_lte"]
if date_lte:
queryset = poll.entity_cls.filter_date_lte(queryset, date_lte)
date_gte = filters["date_gte"]
if date_gte:
queryset = poll.entity_cls.filter_date_gte(queryset, date_gte)
return queryset, filters
def filter_unsafe(self, queryset, filters):
"""Filter the queryset according to the `unsafe` URL parameters.
This method requires a queryset annotated with the entities weighted
total score.
"""
show_unsafe = filters["unsafe"]
if show_unsafe:
queryset = queryset.filter(total_score__isnull=False)
else:
queryset = queryset.filter(
rating_n_contributors__gte=settings.RECOMMENDATIONS_MIN_CONTRIBUTORS
).filter(total_score__gt=0)
return queryset
def _build_criteria_weight_condition(
self, request, poll: Poll, when="criteria_scores__criteria"
):
"""
Return a `Case()` expression associating for each criterion the weight
provided in the URL parameters.
"""
criteria_cases = []
for crit in poll.criterias_list:
raw_weight = request.query_params.get(f"weights[{crit}]")
if raw_weight is not None:
try:
weight = int(raw_weight)
except ValueError as value_error:
raise serializers.ValidationError(
f"Invalid weight value for criteria '{crit}'"
) from value_error
else:
weight = 10
criteria_cases.append(When(**{when: crit}, then=weight))
return Case(*criteria_cases, default=0)
def annotate_with_total_score(self, queryset, request, poll: Poll):
criteria_weight = self._build_criteria_weight_condition(request, poll)
queryset = queryset.annotate(
total_score=Sum(
F("criteria_scores__score") * criteria_weight,
filter=Q(criteria_scores__poll=poll),
)
)
return queryset.prefetch_related(
Prefetch(
"criteria_scores",
queryset=EntityCriteriaScore.objects.filter(poll=poll),
)
)
class PollsView(RetrieveAPIView):
"""
Fetch a poll and its related criteria.
"""
permission_classes = []
queryset = Poll.objects.prefetch_related("criteriarank_set__criteria")
lookup_field = "name"
serializer_class = PollSerializer
class PollsRecommendationsView(PollRecommendationsBaseAPIView):
"""
List the recommended entities of a given poll sorted by decreasing total
score.
"""
# overwrite the default value of `PollScopedViewMixin`
poll_parameter = "name"
permission_classes = []
queryset = Entity.objects.none()
serializer_class = RecommendationSerializer
def get_queryset(self):
poll = self.poll_from_url
queryset = Entity.objects.filter(criteria_scores__poll=poll)
queryset, filters = self.filter_by_parameters(self.request, queryset, poll)
queryset = self.annotate_with_total_score(queryset, self.request, poll)
queryset = self.filter_unsafe(queryset, filters)
return queryset.order_by("-total_score", "-pk")
| 32.688235 | 86 | 0.629116 | 4,126 | 0.742487 | 0 | 0 | 3,845 | 0.69192 | 0 | 0 | 1,206 | 0.217024 |
1ab6b7bbb61696547dfea1ed3c22423263dab38c | 2,704 | py | Python | cabotage/server/ext/config_writer.py | di/cabotage-app | a119800ce5a83547c0abf31540bd0bc90a0e6d10 | [
"MIT"
] | 15 | 2018-04-12T20:57:25.000Z | 2022-02-25T01:51:58.000Z | cabotage/server/ext/config_writer.py | di/cabotage-app | a119800ce5a83547c0abf31540bd0bc90a0e6d10 | [
"MIT"
] | 10 | 2018-01-30T19:55:06.000Z | 2021-06-15T21:54:17.000Z | cabotage/server/ext/config_writer.py | di/cabotage-app | a119800ce5a83547c0abf31540bd0bc90a0e6d10 | [
"MIT"
] | 6 | 2018-01-30T10:49:17.000Z | 2022-02-25T01:51:59.000Z | import os
from flask import current_app
from flask import _app_ctx_stack as stack
class ConfigWriter(object):
def __init__(self, app=None, consul=None, vault=None):
self.app = app
self.consul = consul
self.vault = vault
if app is not None:
self.init_app(app, consul, vault)
def init_app(self, app, consul, vault):
self.consul = consul
self.vault = vault
self.consul_prefix = app.config.get('CONSUL_PREFIX', 'cabotage')
self.vault_prefix = app.config.get('VAULT_PREFIX', 'secret/cabotage')
app.teardown_appcontext(self.teardown)
def teardown(self, exception):
pass
def write_configuration(self, org_slug, project_slug, app_slug, configuration):
version = configuration.version_id + 1 if configuration.version_id else 1
if configuration.secret:
if self.vault is None:
raise RuntimeError('No Vault extension configured!')
config_key_name = (f'{self.vault_prefix}/automation'
f'/{org_slug}/{project_slug}-{app_slug}/configuration/'
f'{configuration.name}/{version}')
build_key_name = (f'{self.vault_prefix}/buildtime'
f'/{org_slug}/{project_slug}-{app_slug}/configuration/'
f'{configuration.name}/{version}')
storage = 'vault'
self.vault.vault_connection.write(
config_key_name, **{configuration.name: configuration.value},
)
if configuration.buildtime:
self.vault.vault_connection.write(
build_key_name, **{configuration.name: configuration.value},
)
else:
if self.consul is None:
raise RuntimeError('No Consul extension configured!')
config_key_name = (f'{self.consul_prefix}'
f'/{org_slug}/{project_slug}-{app_slug}/configuration/'
f'{configuration.name}/{version}/{configuration.name}')
build_key_name = config_key_name
storage = 'consul'
self.consul.consul_connection.kv.put(config_key_name, configuration.value)
config_key_name = '/'.join(config_key_name.split('/')[:-1])
return {
'config_key_slug': f'{storage}:{config_key_name}',
'build_key_slug': f'{storage}:{build_key_name}',
}
def read(self, key_slug, build=False, secret=False):
if secret:
return self.vault.vault_connection.read(key_slug)
return self.consul.consul_connection.read(key_slug)
| 41.6 | 86 | 0.593565 | 2,618 | 0.968195 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.224482 |
1ab6d73debd5df0f3ab75a30e6c5f37cb702dd35 | 177 | py | Python | HW3/VeronyWise/task3,3.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW3/VeronyWise/task3,3.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW3/VeronyWise/task3,3.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | 6 | 2022-02-22T22:30:49.000Z | 2022-03-28T12:51:19.000Z | variable1 = input('Variable 1:')
variable2 = input('Variable 2:')
variable1, variable2 = variable2, variable1
print(f"Variable 1: {variable1}")
print(f"Variable 2: {variable2}") | 35.4 | 43 | 0.734463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.440678 |
1aba01da53c4ec829e4ab3ced3f3d42f6a25fa37 | 1,748 | py | Python | stats_extractor.py | diegojromerolopez/pystats-trello | 958e33a689573335d36c73dddd4198694feaf203 | [
"MIT"
] | 2 | 2020-12-27T15:33:19.000Z | 2021-03-06T06:45:40.000Z | stats_extractor.py | diegojromerolopez/pystats-trello | 958e33a689573335d36c73dddd4198694feaf203 | [
"MIT"
] | null | null | null | stats_extractor.py | diegojromerolopez/pystats-trello | 958e33a689573335d36c73dddd4198694feaf203 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import re
import settings
from auth.connector import TrelloConnector
from stats import summary
from stats.trelloboardconfiguration import TrelloBoardConfiguration
def extract_stats(configuration_file_path):
"""
Extract stats for a given configuration file that defines a trello board and other settings.
:param configuration_file_path: file path where the configuration file is.
"""
configuration = TrelloBoardConfiguration.load_from_file(configuration_file_path)
summary.make(trello_connector, configuration)
def file_is_configuration_file(_file_name):
return re.match(r"^[^\.]+\.conf\.txt", _file_name)
if __name__ == "__main__":
api_key = settings.TRELLO_API_KEY
api_secret = settings.TRELLO_API_SECRET
token = settings.TRELLO_TOKEN
token_secret = settings.TRELLO_TOKEN_SECRET
trello_connector = TrelloConnector(api_key, api_secret, token, token_secret)
if len(sys.argv) < 2:
raise ValueError(u"Error. Use python stats_extractor.py <configuration_file_path>")
# Configuration file path
configuration_path = sys.argv[1]
# If configuration path is a file, extract stats of the board written in this file
if os.path.isfile(configuration_path):
extract_stats(configuration_path)
# Otherwise, if configuration path is a directory, loop through directory files and extract stats
# for each of these files
elif os.path.isdir(configuration_path):
for file_name in os.listdir(configuration_path):
if file_is_configuration_file(file_name):
print(u"Processing {0}".format(file_name))
extract_stats(u"{0}/{1}".format(configuration_path, file_name))
| 32.981132 | 101 | 0.740847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.32151 |
1abb0d4acfc6ecf3ec22c58282eb86a6173b6334 | 310 | py | Python | src/housie_game.py | Eclair24/housie | 7892002914c2ced422f89cd8c050993c2f931deb | [
"Apache-2.0"
] | 1 | 2020-08-10T07:44:01.000Z | 2020-08-10T07:44:01.000Z | src/housie_game.py | Eclair24/housie | 7892002914c2ced422f89cd8c050993c2f931deb | [
"Apache-2.0"
] | 1 | 2020-12-07T16:31:05.000Z | 2020-12-09T09:04:58.000Z | src/housie_game.py | Eclair24/housie | 7892002914c2ced422f89cd8c050993c2f931deb | [
"Apache-2.0"
] | 1 | 2020-10-01T17:54:19.000Z | 2020-10-01T17:54:19.000Z | """Convenience file to help start the game when the repo is cloned from git rather than installed via pip
This was required as we needed to run the script from the same level as the housie/ package in order for the imports
to work correctly.
"""
from housie.game import display_main_menu
display_main_menu()
| 34.444444 | 116 | 0.793548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.793548 |
1abc873e62dabe851c37307f75c1f607203ad768 | 4,076 | py | Python | plyse/parser.py | arcodergh/plyse | bb44543f9c812401489ceba68b24b8618d263830 | [
"MIT"
] | 26 | 2016-05-31T14:45:24.000Z | 2021-04-27T01:54:52.000Z | plyse/parser.py | arcodergh/plyse | bb44543f9c812401489ceba68b24b8618d263830 | [
"MIT"
] | 11 | 2016-05-31T20:09:57.000Z | 2022-02-18T11:43:50.000Z | plyse/parser.py | arcodergh/plyse | bb44543f9c812401489ceba68b24b8618d263830 | [
"MIT"
] | 13 | 2016-05-31T19:41:36.000Z | 2021-03-01T15:22:38.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from copy import deepcopy
from .query_tree import Operator, OperatorFactory, Operand, And, Or, Not
from .query import Query
from .term_parser import Term
class QueryParserError(Exception):
pass
class QueryParser(object):
def __init__(self, grammar):
self._grammar = grammar
def parse(self, query_string, fail_if_syntax_mismatch=False):
"""
Runs the query through the grammar and transforms the results into a boolean
tree of operators and operands. Will raise ParseExceptopm if fail_if_syntax_mismatch
is set and the full input can't be parsed.
Grammar parse result should be a concatenation of lists where the elements/leafs
are :class:Term 's representing the properties of each defined grammar element
matched in the query string
"""
return Query(
self.parse_elements(self._grammar.parse(query_string, fail_if_syntax_mismatch)),
raw_query=query_string
)
def parse_elements(self, elements, stack=None):
if not elements:
return deepcopy(stack.pop()) if stack else None
stack = [] if not stack else stack
e = elements[0]
if type(e) is str and e.lower() in [And.type, Or.type, Not.type]:
op = OperatorFactory.create(e)
if op.has_left_operand():
op.add_input(stack.pop())
stack.append(op)
else:
if isinstance(e, dict):
operand = Operand(**e)
else:
operand = self.parse_elements(e)
if len(stack) == 0:
stack.append(operand)
elif isinstance(stack[-1], Operator) or isinstance(stack[-1], Operand):
current_elem = stack.pop().add_input(operand)
# 'Not' operator only works on the right element, if there was a previous operator
# the stack would be have 2 elements, so the new operand is added to the current operator
# (Not operator) and then the Not operator is added as an input to the previous operator
# finishing the cicle and leaving the stack with only one element (an operator)
if stack:
current_elem = stack.pop().add_input(current_elem)
stack.append(current_elem)
else:
msg = """The previous element of an operand should be None or another Operand.
The inputted parse result is invalid! Type '{type}', Stack: {stack}"""
raise QueryParserError(msg.format(type=type(stack[-1]), stack=stack))
return self.parse_elements(elements[1:], stack)
def stringify(self, query):
"""
Converts a query into its original string representation (like reversing the original parsing)
:param query: :class:Query representing the original query string
:return: string
"""
s = self._do_stringify(query.query_as_tree)
return s[1:-1] if s.startswith('(') else s
def _do_stringify(self, node):
if node.is_leaf:
s = self._leaf_to_string(Term(**node))
elif node.type is Not.type:
s = Not.type + " " + self._do_stringify(node.children[0])
else:
s = "(%s %s %s)" % (self._do_stringify(node.children[0]), node.type, self._do_stringify(node.children[1]))
return s
def _leaf_to_string(self, term):
if type(term.field) is list:
s = str(term.value)
else:
# We are reverting the query to string, we have the already aliased fields and we want the original ones
aliases = {v: k for k, v in iter(self._grammar.term_parser.aliases.items())}
field = aliases[term.field] if term.field in aliases else term.field
value = "%s..%s" % (term.value[0], term.value[1]) if type(term.value) is list else term.value
s = "%s:%s" % (field, value)
return s
| 37.394495 | 118 | 0.60476 | 3,874 | 0.950442 | 0 | 0 | 0 | 0 | 0 | 0 | 1,354 | 0.332188 |
1abffd1e7ddaed83c42f68175043724dc70e197b | 3,244 | py | Python | get_aozora.py | lithium0003/Image2UTF8-Transformer | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | [
"MIT"
] | null | null | null | get_aozora.py | lithium0003/Image2UTF8-Transformer | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | [
"MIT"
] | null | null | null | get_aozora.py | lithium0003/Image2UTF8-Transformer | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | [
"MIT"
] | null | null | null | import json
import sys
import urllib.parse
import urllib.request
import os
import zipfile
import io
import csv
import re
from html.parser import HTMLParser
code_list = {}
with open('data/codepoints.csv') as f:
reader = csv.reader(f)
for row in reader:
d1,d2,d3 = row[0].split('-')
d1 = int(d1)
d2 = int(d2)
d3 = int(d3)
code_list['%d-%02d-%02d'%(d1,d2,d3)] = chr(int(row[1], 16))
def get_aozora_urls():
aozora_csv_url = 'https://www.aozora.gr.jp/index_pages/list_person_all_extended_utf8.zip'
xhtml_urls = []
html = urllib.request.urlopen(aozora_csv_url)
with zipfile.ZipFile(io.BytesIO(html.read())) as myzip:
with myzip.open('list_person_all_extended_utf8.csv') as myfile:
reader = csv.reader(io.TextIOWrapper(myfile))
idx = -1
for row in reader:
if idx < 0:
idx = [i for i, x in enumerate(row) if 'URL' in x]
idx = [i for i in idx if 'HTML' in row[i]]
if len(idx) == 0:
exit()
idx = idx[0]
continue
if row[idx].startswith('https://www.aozora.gr.jp/cards/'):
xhtml_urls.append(row[idx])
return xhtml_urls
class MyHTMLParser(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main = False
self.count = 0
self.startpos = (-1,-1)
self.endpos = (-1,-1)
def handle_starttag(self, tag, attrs):
if tag == 'div':
if self.main:
self.count += 1
elif ('class', 'main_text') in attrs:
self.main = True
self.startpos = self.getpos()
def handle_endtag(self, tag):
if tag == 'div':
if self.main:
if self.count == 0:
self.endpos = self.getpos()
else:
self.count -= 1
def get_contents(url):
html = urllib.request.urlopen(url)
contents = html.read().decode('cp932')
parser = MyHTMLParser()
parser.feed(contents)
maintext = []
for lineno, line in enumerate(contents.splitlines()):
if parser.startpos[0] == lineno + 1:
maintext.append(line[parser.startpos[1]:])
elif parser.startpos[0] < lineno + 1 <= parser.endpos[0]:
if parser.endpos[0] == lineno + 1:
if parser.endpos[1] == 0:
pass
else:
maintext.append(line[:parser.endpos[1]])
else:
maintext.append(line)
maintext = '\n'.join(maintext)
maintext = re.sub(r'<ruby><rb>(.*?)</rb>.*?</ruby>', r'\1', maintext)
m = True
while m:
m = re.search(r'<img .*?/(\d-\d\d-\d\d)\.png.*?>', maintext)
if m:
maintext = maintext[:m.start()] + code_list[m.group(1)] + maintext[m.end():]
maintext = re.sub(r'<span class="notes">.*?</span>', r'', maintext)
maintext = re.sub(r'<[^>]*?>', r'', maintext)
return maintext
if __name__ == '__main__':
urls = get_aozora_urls()
for u in urls:
print(u)
print(get_contents(u)) | 32.767677 | 93 | 0.526202 | 717 | 0.221023 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.111282 |
1ac017762eaadd38399b55a8bb81b1edf9f81ac2 | 3,677 | py | Python | python/EggNetExtension/setup.py | marbleton/FPGA_MNIST | 4b4a30e0adca35de9adcad7b3fec08c516260790 | [
"MIT"
] | 7 | 2019-11-13T12:24:36.000Z | 2021-03-31T02:39:35.000Z | python/EggNetExtension/setup.py | marbleton/FPGA_MNIST | 4b4a30e0adca35de9adcad7b3fec08c516260790 | [
"MIT"
] | 29 | 2019-12-17T22:06:04.000Z | 2022-03-12T00:20:45.000Z | python/EggNetExtension/setup.py | marbleton/FPGA_MNIST | 4b4a30e0adca35de9adcad7b3fec08c516260790 | [
"MIT"
] | 4 | 2019-10-20T15:12:52.000Z | 2020-10-13T13:36:37.000Z | #!/usr/bin/env python
"""
setup.py file for SWIG Interface of Ext
"""
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from os import walk
import numpy
import wget
from setuptools import Extension
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext
try:
# Obtain the numpy include directory. This logic works across numpy versions.
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
def readme():
with open('./README.md') as f:
return f.read()
def download_numpy_interface(path):
"""
Downloads numpy.i
:return: None
"""
print("Download Numpy SWIG Interface")
np_version = re.compile(r'(?P<MAJOR>[0-9]+)\.'
'(?P<MINOR>[0-9]+)') \
.search(numpy.__version__)
np_version_string = np_version.group()
np_version_info = {key: int(value)
for key, value in np_version.groupdict().items()}
np_file_name = 'numpy.i'
np_file_url = 'https://raw.githubusercontent.com/numpy/numpy/maintenance/' + \
np_version_string + '.x/tools/swig/' + np_file_name
if np_version_info['MAJOR'] == 1 and np_version_info['MINOR'] < 9:
np_file_url = np_file_url.replace('tools', 'doc')
wget.download(np_file_url, path)
return
# Download numpy.i if needed
if not os.path.exists('./EggNetExtension/numpy.i'):
print('Downloading numpy.i')
project_dir = os.path.dirname(os.path.abspath(__file__))
download_numpy_interface(path='./EggNetExtension/')
source_files = ['./EggNetExtension/NNExtension.i', './EggNetExtension/cconv.c',
'./EggNetExtension/cpool.c', './EggNetExtension/crelu.c',
'./EggNetExtension/cmatmul.c', './EggNetExtension/chelper.c']
print("************************ SOURCE FILES *************************")
print(source_files)
print("************************ SOURCE FILES *************************")
include_dirs = ['./EggNetExtension/', numpy_include]
# Simple Platform Check (not entirely accurate because here should the compiler be checked)
# ToDo: Should be done better for example via CMake -> https://www.benjack.io/2017/06/12/python-cpp-tests.html
if platform.system() == 'Linux':
extra_args = ['-std=gnu99']
elif platform.system() == 'Darwin':
extra_args = ['--verbose', '-Rpass=loop-vectorize', '-Rpass-analysis=loop-vectorize', '-ffast-math']
elif platform.system() == 'Windows':
# extra_args = ['/Qrestrict', '/W3']
extra_args = []
else:
raise RuntimeError('Operating System not supported?')
extra_link_args = []
NN_ext_module = Extension('EggNetExtension._EggNetExtension',
sources=source_files,
include_dirs=include_dirs,
swig_opts=['-py3'],
extra_compile_args=extra_args,
extra_link_args=extra_link_args,
depends=['numpy'],
optional=False)
setup(name='EggNetExtension',
version='1.0',
author="Benjamin Kulnik",
author_email="benjamin.kulnik@student.tuwien.ac.com",
license="MIT",
description="""NN calculation library for python""",
url='https://github.com/marbleton/FPGA_MNIST',
packages=['EggNetExtension'],
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.rst', '*.i', '*.c', '*.h'],
},
ext_modules=[NN_ext_module],
install_requires=['numpy', 'wget', 'idx2numpy'],
)
| 34.046296 | 110 | 0.622518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,540 | 0.41882 |
1ac02a13f60d57e364428009ff4c96c1e00ef39d | 2,442 | py | Python | GT-ECONOMY-BOT/economy/trashmoney.py | iFanID/e.Koenomi-DBot | a017acaae810d402a4d76668daf83a0dd69ad9d7 | [
"Info-ZIP"
] | 1 | 2021-11-13T06:19:26.000Z | 2021-11-13T06:19:26.000Z | GT-ECONOMY-BOT/economy/trashmoney.py | iFanID/e.Koenomi-DBot | a017acaae810d402a4d76668daf83a0dd69ad9d7 | [
"Info-ZIP"
] | null | null | null | GT-ECONOMY-BOT/economy/trashmoney.py | iFanID/e.Koenomi-DBot | a017acaae810d402a4d76668daf83a0dd69ad9d7 | [
"Info-ZIP"
] | null | null | null | import discord
import subprocess
import os, random, re, requests, json
import asyncio
from datetime import datetime
from discord.ext import commands
class Economy(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print('[+] Trashmoney Code ACTIVE!')
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.command(aliases=['tm'])
async def trashmoney(self,ctx,amount:int):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
balancee = await update_bank(ctx.author)
time = 10
if amount > balancee[0]:
await ctx.send('You poor lmao! what money u want to TRASH! NOOB!!!')
return
if amount < 0:
await ctx.send('You are poor or wrong put amount?')
return
await update_bank(ctx.author,-1*amount, 'wallet')
await ctx.send(f"{user} Trash he money! type [!!claim] to get the money!")
msg = await ctx.send(f'Member had {time}s to claim!')
with open('trash_money.txt','w') as f:
f.write(str(amount))
f.close()
while True:
time -= 1
if time == 0:
f = open('trash_money.txt','r')
if f.read() == '0':
await ctx.send('Someone claimed the trash money!')
else:
await ctx.send('No one claimed the trash money!')
break
await msg.edit(content=f'Member had {time}s to claim!')
await asyncio.sleep(1)
async def open_account(user):
users = await get_bank_data()
with open('./bank.json','r') as f:
users = json.load(f)
if str(user.id) in users:
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["wallet"] = 0
users[str(user.id)]["bank"] = 0
with open('./bank.json','w') as f:
json.dump(users,f)
return True
async def get_bank_data():
with open('./bank.json','r') as f:
users = json.load(f)
return users
async def update_bank(user,change = 0,mode = 'wallet'):
users = await get_bank_data()
users[str(user.id)][mode] += change
with open('./bank.json','w') as f:
json.dump(users,f)
balancee = [users[str(user.id)]['wallet'],users[str(user.id)]['bank']]
return balancee
def setup(bot):
bot.add_cog(Economy(bot)) | 30.525 | 80 | 0.581081 | 1,410 | 0.577396 | 0 | 0 | 1,327 | 0.543407 | 2,026 | 0.829648 | 458 | 0.187551 |
1ac0e70ee50f70a3cd951509022bba75c1104f45 | 1,738 | gyp | Python | third_party/ctmalloc/ctmalloc.gyp | dandv/syzygy | 2444520c8e6e0b45b2f45b680d878d60b9636f45 | [
"Apache-2.0"
] | 1 | 2019-04-03T13:56:37.000Z | 2019-04-03T13:56:37.000Z | third_party/ctmalloc/ctmalloc.gyp | pombreda/syzygy | 7bac6936c0c28872bfabc10a1108e0157ff65d4a | [
"Apache-2.0"
] | 1 | 2015-03-19T18:20:25.000Z | 2015-03-19T18:20:25.000Z | third_party/ctmalloc/ctmalloc.gyp | sebmarchand/syzygy | 6c6db0e70e8161f1fec171138a825f6412e7778a | [
"Apache-2.0"
] | 1 | 2020-10-10T16:09:45.000Z | 2020-10-10T16:09:45.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Build configuration for ctmalloc. This is not a part of the original
# library.
{
'targets': [
{
'target_name': 'ctmalloc_lib',
'type': 'static_library',
'sources': [
'wtf/AsanHooks.cpp',
'wtf/AsanHooks.h',
'wtf/Assertions.h',
'wtf/Atomics.h',
'wtf/BitwiseOperations.h',
'wtf/ByteSwap.h',
'wtf/Compiler.h',
'wtf/config.h',
'wtf/CPU.h',
'wtf/malloc.cpp',
'wtf/PageAllocator.cpp',
'wtf/PageAllocator.h',
'wtf/PartitionAlloc.cpp',
'wtf/PartitionAlloc.h',
'wtf/ProcessID.h',
'wtf/SpinLock.h',
'wtf/WTFExport.h',
],
'defines': [
'CTMALLOC_NDEBUG',
],
'include_dirs': [
'<(src)/third_party/ctmalloc',
],
'all_dependent_settings': {
'defines': [
# We disable debug features of the CtMalloc heap as they are redundant
# given SyzyASan's extensive debug features.
'CTMALLOC_NDEBUG',
],
'include_dirs': [
'<(src)/third_party/ctmalloc',
],
},
},
],
}
| 28.491803 | 80 | 0.597814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,315 | 0.756617 |
1ac1f4c5e45389491c264f6ff24bf7abfbd16a93 | 1,515 | py | Python | src/chat/tasks.py | klapen/chat | 84273ae3276da77bff6a2529bd59786f9dc3afcb | [
"MIT"
] | null | null | null | src/chat/tasks.py | klapen/chat | 84273ae3276da77bff6a2529bd59786f9dc3afcb | [
"MIT"
] | 10 | 2019-05-24T21:37:38.000Z | 2022-02-11T03:43:09.000Z | src/chat/tasks.py | klapen/simplechat | 84273ae3276da77bff6a2529bd59786f9dc3afcb | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from celery import shared_task
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
import requests
import csv
@shared_task
def getStockQuote(room_group_name, stock_code):
url = 'https://stooq.com/q/l/?s=%s&f=sd2t2ohlcv&h&e=csv'
with requests.Session() as s:
response = s.get(url % stock_code)
if response.status_code != 200:
print('getStockQuote - Failed to get data: %s', response.status_code)
async_to_sync(channel_layer.group_send)(
room_group_name,
{
'type': 'bot_message',
'data': {
'command': 'bot_message',
'from': 'Bot stock',
'message': 'Error getting %s information' % stock_code.upper()
}
}
)
else:
data = csv.DictReader(response.text.strip().split('\n'))
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
room_group_name,
{
'type': 'bot_message',
'data': {
'command': 'bot_message',
'from': 'Bot stock',
'message': '%s quote is $%s per share' % (stock_code.upper(), next(data)['Close'])
}
}
)
return
| 36.95122 | 106 | 0.505611 | 0 | 0 | 0 | 0 | 1,315 | 0.866842 | 0 | 0 | 307 | 0.202373 |
1ac28d2ea873a98c9e255a253bd3c457c121b939 | 1,397 | py | Python | scripts/tests/test_summarize.py | JoshSkrzypczak/people | 2183b0feab9442222a88e698b849ff5351f9aea8 | [
"CC0-1.0"
] | 1 | 2021-04-19T20:42:59.000Z | 2021-04-19T20:42:59.000Z | scripts/tests/test_summarize.py | JoshSkrzypczak/people | 2183b0feab9442222a88e698b849ff5351f9aea8 | [
"CC0-1.0"
] | null | null | null | scripts/tests/test_summarize.py | JoshSkrzypczak/people | 2183b0feab9442222a88e698b849ff5351f9aea8 | [
"CC0-1.0"
] | null | null | null | from summarize import Summarizer
def test_person_summary():
s = Summarizer()
people = [
{
"gender": "F",
"image": "https://example.com/image1",
"party": [{"name": "Democratic"}, {"name": "Democratic", "end_date": "1990"}],
},
{
"gender": "F",
"image": "https://example.com/image2",
"party": [{"name": "Democratic"}, {"name": "Working Families"}],
"extras": {"religion": "Zoroastrian"},
"contact_details": [{"fax": "123-456-7890", "note": "Capitol Office"}],
"other_identifiers": [{"scheme": "fake", "identifier": "abc"}],
"ids": {"twitter": "fake"},
},
{
"gender": "M",
"image": "https://example.com/image3",
"party": [{"name": "Republican"}],
"contact_details": [{"phone": "123-456-7890", "note": "Capitol Office"}],
"other_identifiers": [{"scheme": "fake", "identifier": "123"}],
},
]
for p in people:
s.summarize(p)
assert s.parties == {"Republican": 1, "Democratic": 2, "Working Families": 1}
assert s.contact_counts == {"Capitol Office phone": 1, "Capitol Office fax": 1}
assert s.id_counts == {"fake": 2, "twitter": 1}
assert s.optional_fields == {"gender": 3, "image": 3}
assert s.extra_counts == {"religion": 1}
| 35.820513 | 90 | 0.503937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 664 | 0.475304 |
1ac31d5a21b2b7fca6b188dc0ccbac2303f50fbe | 853 | py | Python | ex091.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | ex091.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | ex091.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | """Exercício Python 091: Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios.
Guarde esses resultados em um dicionário em Python. No final, coloque esse dicionário em ordem,
sabendo que o vencedor tirou o maior número no dado."""
from random import randint
from time import sleep
from operator import itemgetter
ranking = list()
resultados = {'Jogado 1': randint(1, 6),
'Jogador 2': randint(1, 6),
'Jogador 3': randint(1, 6),
'Jogador 4': randint(1,6)}
print(resultados)
print('Valores sorteados')
print('-='*25)
for k, v in resultados.items():
print(f'O {k} tirou {v} no dado.')
sleep(0.5)
ranking = sorted(resultados.items(), key=itemgetter(1), reverse=True)
print(' == RANKING DOS JOGADORES ==')
for i, v in enumerate(ranking):
print(f' {i+1}º lugar: {v[0]} com {v[1]}') | 40.619048 | 105 | 0.675264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.494761 |
1ac4c3ae760d51232d569cf1431cc2c3ab3cdc2f | 1,499 | py | Python | _/Chapter 03/transfrauddetect.py | paullewallencom/hadoop-978-1-7839-8030-7 | 267f24e736dcee0910593d9ff76c10387e6406c3 | [
"Apache-2.0"
] | 2 | 2019-05-25T22:48:59.000Z | 2021-10-04T04:52:58.000Z | _/Chapter 03/transfrauddetect.py | paullewallencom/hadoop-978-1-7839-8030-7 | 267f24e736dcee0910593d9ff76c10387e6406c3 | [
"Apache-2.0"
] | null | null | null | _/Chapter 03/transfrauddetect.py | paullewallencom/hadoop-978-1-7839-8030-7 | 267f24e736dcee0910593d9ff76c10387e6406c3 | [
"Apache-2.0"
] | 6 | 2016-12-27T13:57:45.000Z | 2021-04-22T18:33:14.000Z | # Submit to spark using
# spark-submit /Users/anurag/hdproject/eclipse/chapt3/transfrauddetect.py
# You need the full path of the python script
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.mllib.clustering import KMeans, KMeansModel
from pyspark.streaming import StreamingContext
from pyspark.mllib.linalg import Vectors
def detect(rdd):
count = rdd.count()
print "RDD -> ", count
if count > 0:
arrays = rdd.map(lambda line: [float(x) for x in line.split(" ")])
print arrays.collect()
indx = 0
while indx < count:
vec = Vectors.dense(arrays.collect()[indx])
indx += 1
clusternum = model.predict(vec)
print "Cluster -> ", clusternum, vec
return
# Create a local StreamingContext with two working thread and batch interval of 1 second
conf = SparkConf().setAppName("Fraud Detector")
conf = conf.setMaster("local[2]")
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 10)
# Create a DStream that will connect to hostname:port, like localhost:9999
lines = ssc.socketTextStream("localhost", 8999)
# Split each line into words
model = KMeansModel.load(sc, "kmeansmodel01")
print model.clusterCenters
print "************************** Loaded the model *********************"
words = lines.flatMap(lambda line: line.split(" "))
lines.foreachRDD(detect)
ssc.start() # Start the computation
ssc.awaitTermination() # Wait for the computation to terminate
| 33.311111 | 88 | 0.683122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 540 | 0.36024 |
1ac55722387ed906681600c081466660b9eb3f11 | 904 | py | Python | Bioinformatics Stronghold/(3) Complementing a Stand of DNA/Complementing a Stand of DNA/Complementing_a_Stand_of_DNA.py | LawTam/ROSALIND | 18c4eaa4ad094d4c5f77af7211c93b88e3e9eb4f | [
"MIT"
] | null | null | null | Bioinformatics Stronghold/(3) Complementing a Stand of DNA/Complementing a Stand of DNA/Complementing_a_Stand_of_DNA.py | LawTam/ROSALIND | 18c4eaa4ad094d4c5f77af7211c93b88e3e9eb4f | [
"MIT"
] | null | null | null | Bioinformatics Stronghold/(3) Complementing a Stand of DNA/Complementing a Stand of DNA/Complementing_a_Stand_of_DNA.py | LawTam/ROSALIND | 18c4eaa4ad094d4c5f77af7211c93b88e3e9eb4f | [
"MIT"
] | null | null | null | def main():
# Manage input file
input = open(r"C:\Users\lawht\Desktop\Github\ROSALIND\Bioinformatics Stronghold\Complementing a Stand of DNA\Complementing a Stand of DNA\rosalind_revc.txt","r")
DNA_string = input.readline(); # take first line of input file for counting
# Take in input file of DNA string and print out its reverse complement
print(reverse_complement(DNA_string))
input.close()
# Given: A DNA string s of length at most 1000 bp.
# Return: The reverse complement sc of s.
def reverse_complement(s):
sc = "";
for n in reversed(s):
if n == "A":
sc = sc + "T"
elif n == "T":
sc = sc + "A"
elif n == "C":
sc = sc + "G"
elif n == "G":
sc = sc + "C"
else:
continue
return sc
# Manually call main() on the file load
if __name__ == "__main__":
main() | 29.16129 | 165 | 0.586283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 446 | 0.493363 |
1ac5abcc5a3efb1d526c5d1acedc8e1845408bcd | 950 | py | Python | binary_tree_postorder_traversal/solution.py | mahimadubey/leetcode-python | 38acc65fa4315f86acb62874ca488620c5d77e17 | [
"BSD-2-Clause"
] | 528 | 2015-01-08T21:27:06.000Z | 2022-03-17T09:23:44.000Z | binary_tree_postorder_traversal/solution.py | durgaharish1993/leetcode-python | 6c523ef4759a57433e10271b584eece16f9f05f3 | [
"BSD-2-Clause"
] | null | null | null | binary_tree_postorder_traversal/solution.py | durgaharish1993/leetcode-python | 6c523ef4759a57433e10271b584eece16f9f05f3 | [
"BSD-2-Clause"
] | 278 | 2015-01-12T06:45:17.000Z | 2022-02-20T08:09:22.000Z | """
Given a binary tree, return the postorder traversal of its nodes' values.
For example:
Given binary tree {1,#,2,3},
1
\
2
/
3
return [3,2,1].
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
path = []
if root is None:
return path
stack1 = []
stack2 = []
stack1.append(root)
while stack1:
root = stack1.pop()
stack2.append(root.val)
if root.left is not None:
stack1.append(root.left)
if root.right is not None:
stack1.append(root.right)
while stack2:
path.append(stack2.pop())
return path
| 22.093023 | 73 | 0.513684 | 607 | 0.638947 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.424211 |
1ac5b8ba600d2983a2c4bfa715ce743d29c60c5b | 687 | py | Python | src/test/base.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 2 | 2016-08-10T15:08:47.000Z | 2016-10-25T14:27:51.000Z | src/test/base.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 41 | 2016-08-04T20:19:49.000Z | 2017-03-07T20:05:53.000Z | src/test/base.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 3 | 2016-09-26T19:04:51.000Z | 2017-10-26T22:13:45.000Z | import yaml
class BaseTest(object):
def setUp(self):
with open("properties.yaml") as f:
self.cfg = yaml.safe_load(f)
self.general = self.cfg['general']
self.reseller = self.cfg['reseller']
self.client = self.cfg['client']
self.service_zimbra = self.cfg['service_zimbra']
self.service_mxhero = self.cfg['service_mxhero']
self.service_dns = self.cfg['service_dns']
self.user = self.cfg['user']
self.domain = self.cfg['domain']
self.dlists = self.cfg['dlists']
self.general['headers'] = {
'Content-Type' : 'application/json',
'Authorization' : 'Bearer %s' % self.general['token']
}
def tearDown(self):
pass | 26.423077 | 59 | 0.636099 | 672 | 0.978166 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.269287 |
1ac820cbcf7d0d98431fa2fed01dad0baafedf01 | 99 | py | Python | apps/test_find_application_with_mainflow/views.py | HeMan/jobbergate | 1381821aafe3d217ee22078be09104a566ec2420 | [
"MIT"
] | 4 | 2019-11-05T09:30:43.000Z | 2020-04-22T15:24:31.000Z | apps/test_find_application_with_mainflow/views.py | HeMan/jobbergate | 1381821aafe3d217ee22078be09104a566ec2420 | [
"MIT"
] | 52 | 2019-10-17T09:46:09.000Z | 2020-05-19T07:39:19.000Z | apps/test_find_application_with_mainflow/views.py | HeMan/jobbergate | 1381821aafe3d217ee22078be09104a566ec2420 | [
"MIT"
] | 1 | 2020-02-18T13:38:25.000Z | 2020-02-18T13:38:25.000Z | from jobbergate import appform
def mainflow(data):
return [appform.Const("val", default=10)]
| 16.5 | 45 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.050505 |
1acfdd5cb2dc4513136eed0af799bf76bf0968d8 | 105 | py | Python | crawlerAPI/Stay_Hungry_API/apps.py | epikjjh/Stay_Hungry_Server | 4d0ab3a2313c6a8f8a21053ced9834e7f2a13995 | [
"MIT"
] | null | null | null | crawlerAPI/Stay_Hungry_API/apps.py | epikjjh/Stay_Hungry_Server | 4d0ab3a2313c6a8f8a21053ced9834e7f2a13995 | [
"MIT"
] | null | null | null | crawlerAPI/Stay_Hungry_API/apps.py | epikjjh/Stay_Hungry_Server | 4d0ab3a2313c6a8f8a21053ced9834e7f2a13995 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class OsoricrawlerapiConfig(AppConfig):
name = 'osoriCrawlerAPI'
| 17.5 | 39 | 0.790476 | 68 | 0.647619 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.161905 |
1ad00f5bdba69c5627c62e40a06d13f11f8f971b | 2,186 | py | Python | quotes/tests/requests/test_home_page.py | daviferreira/defprogramming | a4ec20a6a9d116eb1f82fd146e4bb7a2fad5a516 | [
"MIT"
] | 6 | 2016-01-17T02:21:51.000Z | 2020-09-01T20:16:36.000Z | quotes/tests/requests/test_home_page.py | daviferreira/defprogramming | a4ec20a6a9d116eb1f82fd146e4bb7a2fad5a516 | [
"MIT"
] | 3 | 2017-11-27T17:02:50.000Z | 2021-01-21T14:22:36.000Z | quotes/tests/requests/test_home_page.py | daviferreira/defprogramming | a4ec20a6a9d116eb1f82fd146e4bb7a2fad5a516 | [
"MIT"
] | null | null | null | # coding: utf-8
from lxml import html
from django.test import TestCase
from django.test.client import Client
from quotes.tests.utils import create_test_quote
class HomePageTestCase(TestCase):
def setUp(self):
self.client = Client()
self.quote = create_test_quote()
self.dom = ''
self.quotes = []
def tearDown(self):
self.quote = ''
self.dom = ''
self.quotes = []
def __load_dom(self):
response = self.client.get('/')
self.dom = html.fromstring(response.content)
def testHomePageResponse(self):
response = self.client.get('/')
self.failUnlessEqual(response.status_code, 200)
def testHomePageShouldHaveTheRightTitle(self):
self.__load_dom()
assert self.dom.cssselect('h1 a')[0].text_content(), 'def programming'
def testHomePageShouldListQuotes(self):
self.__load_dom()
assert len(self.dom.cssselect('div.quote-card')), 1
assert self.dom.cssselect('div.quote-card q')[0].text_content(), self.quote.body
assert self.dom.cssselect('div.quote-card .quote-card-author')[0].text_content(), 'Author 1 & Author 2'
assert self.dom.cssselect('div.quote-card .quote-card-tags')[0].text_content(), 'tagged under Tag 1, Tag 2'
# assert self.dom.cssselect('div.quote-card q a')[0].attrib['href'], ("/q/%s/" % self.quote.uuid)
# TODO: not a home page test, more like a site test
# should also test for footer links
def testHomePageShouldShowMenu(self):
self.__load_dom()
menu_links = self.dom.cssselect('header nav a')
assert len(menu_links), 6
assert menu_links[0].text_content(), 'Home'
assert menu_links[0].attrib['href'], '/'
assert menu_links[1].text_content(), 'Authors'
assert menu_links[1].attrib['href'], '/authors'
assert menu_links[2].text_content(), 'Tags'
assert menu_links[2].attrib['href'], '/tags'
assert menu_links[3].text_content(), 'Random'
assert menu_links[3].attrib['href'], '/random'
assert menu_links[4].text_content(), 'Submit'
assert menu_links[4].attrib['href'], '/submit' | 37.689655 | 115 | 0.643184 | 2,024 | 0.925892 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.229643 |
1ad0dd75967c20852aadd774c8b8d093d2adc738 | 414 | py | Python | src/components/decode_encode/confirm.py | DuckyMomo20012/flask-server | 62a7d351b42f72dd03e886ebf7e393a62a7023b1 | [
"MIT"
] | 3 | 2022-01-12T15:16:44.000Z | 2022-03-06T12:51:06.000Z | src/components/decode_encode/confirm.py | DuckyMomo20012/flask-server | 62a7d351b42f72dd03e886ebf7e393a62a7023b1 | [
"MIT"
] | null | null | null | src/components/decode_encode/confirm.py | DuckyMomo20012/flask-server | 62a7d351b42f72dd03e886ebf7e393a62a7023b1 | [
"MIT"
] | 1 | 2022-03-06T12:48:40.000Z | 2022-03-06T12:48:40.000Z | from numpy import char
from function_support import *
def confirm(n,e,d):
s = 'i have publicKey'
temp = ""
encode = []
#encrypt
for i in s:
c = powermod(ord(i),e,n)
encode.append(c)
#decrypt
for i in encode:
m = powermod(i,d,n)
print(m)
temp = temp + chr(m)
print(temp)
if s == temp:
return True
else:
return False
| 18 | 32 | 0.519324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.086957 |
1ad19b8e5fbe29f4b8f2f258cc293e0fd9d3e22f | 678 | py | Python | luckydonaldUtils/regex/telegram.py | luckydonald/python-utils | 455f5174707804a39384776185b8bc307223e19f | [
"MIT"
] | 5 | 2016-12-06T00:49:21.000Z | 2019-10-03T04:18:13.000Z | luckydonaldUtils/regex/telegram.py | luckydonald/python-utils | 455f5174707804a39384776185b8bc307223e19f | [
"MIT"
] | 5 | 2016-03-19T02:08:14.000Z | 2018-12-01T02:30:19.000Z | luckydonaldUtils/regex/telegram.py | luckydonald/python-utils | 455f5174707804a39384776185b8bc307223e19f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
__author__ = 'luckydonald'
__all__ = [
'USERNAME_REGEX', '_USERNAME_REGEX', 'USER_AT_REGEX', '_USER_AT_REGEX',
'FULL_USERNAME_REGEX', '_FULL_USERNAME_REGEX'
]
_USERNAME_REGEX = '[a-zA-Z](?:[a-zA-Z0-9]|_(?!_)){3,30}[a-zA-Z0-9]' # https://regex101.com/r/nZdOHS/2
USERNAME_REGEX = re.compile(_USERNAME_REGEX)
_USER_AT_REGEX = '@(?P<username>' + _USERNAME_REGEX + ')'
USER_AT_REGEX = re.compile(_USER_AT_REGEX)
from .urls.telegram import _TELEGRAM_DOMAIN_REGEX
_FULL_USERNAME_REGEX = '(?P<prefix>(?P<domain>' + _TELEGRAM_DOMAIN_REGEX + ')|@)(?P<username>' + _USERNAME_REGEX + ')'
FULL_USERNAME_REGEX = re.compile(_FULL_USERNAME_REGEX)
| 35.684211 | 118 | 0.715339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.427729 |
46b8fb46b8c0cdd613c5bd9b26b79ccbc17c69b5 | 42,439 | py | Python | sdk/python/pulumi_azure_nextgen/compute/v20200930/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/compute/v20200930/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/compute/v20200930/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'DataDiskImageEncryptionResponse',
'DisallowedResponse',
'EncryptionImagesResponse',
'GalleryApplicationVersionPublishingProfileResponse',
'GalleryArtifactVersionSourceResponse',
'GalleryDataDiskImageResponse',
'GalleryIdentifierResponse',
'GalleryImageFeatureResponse',
'GalleryImageIdentifierResponse',
'GalleryImageVersionPublishingProfileResponse',
'GalleryImageVersionStorageProfileResponse',
'GalleryOSDiskImageResponse',
'ImagePurchasePlanResponse',
'OSDiskImageEncryptionResponse',
'RecommendedMachineConfigurationResponse',
'RegionalReplicationStatusResponse',
'ReplicationStatusResponse',
'ResourceRangeResponse',
'SharingProfileGroupResponse',
'SharingProfileResponse',
'TargetRegionResponse',
'UserArtifactManageResponse',
'UserArtifactSourceResponse',
]
@pulumi.output_type
class DataDiskImageEncryptionResponse(dict):
"""
Contains encryption settings for a data disk image.
"""
def __init__(__self__, *,
lun: int,
disk_encryption_set_id: Optional[str] = None):
"""
Contains encryption settings for a data disk image.
:param int lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
:param str disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set.
"""
pulumi.set(__self__, "lun", lun)
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
@property
@pulumi.getter
def lun(self) -> int:
"""
This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
"""
return pulumi.get(self, "lun")
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[str]:
"""
A relative URI containing the resource ID of the disk encryption set.
"""
return pulumi.get(self, "disk_encryption_set_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DisallowedResponse(dict):
"""
Describes the disallowed disk types.
"""
def __init__(__self__, *,
disk_types: Optional[Sequence[str]] = None):
"""
Describes the disallowed disk types.
:param Sequence[str] disk_types: A list of disk types.
"""
if disk_types is not None:
pulumi.set(__self__, "disk_types", disk_types)
@property
@pulumi.getter(name="diskTypes")
def disk_types(self) -> Optional[Sequence[str]]:
"""
A list of disk types.
"""
return pulumi.get(self, "disk_types")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EncryptionImagesResponse(dict):
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
"""
def __init__(__self__, *,
data_disk_images: Optional[Sequence['outputs.DataDiskImageEncryptionResponse']] = None,
os_disk_image: Optional['outputs.OSDiskImageEncryptionResponse'] = None):
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
:param Sequence['DataDiskImageEncryptionResponseArgs'] data_disk_images: A list of encryption specifications for data disk images.
:param 'OSDiskImageEncryptionResponseArgs' os_disk_image: Contains encryption settings for an OS disk image.
"""
if data_disk_images is not None:
pulumi.set(__self__, "data_disk_images", data_disk_images)
if os_disk_image is not None:
pulumi.set(__self__, "os_disk_image", os_disk_image)
@property
@pulumi.getter(name="dataDiskImages")
def data_disk_images(self) -> Optional[Sequence['outputs.DataDiskImageEncryptionResponse']]:
"""
A list of encryption specifications for data disk images.
"""
return pulumi.get(self, "data_disk_images")
@property
@pulumi.getter(name="osDiskImage")
def os_disk_image(self) -> Optional['outputs.OSDiskImageEncryptionResponse']:
"""
Contains encryption settings for an OS disk image.
"""
return pulumi.get(self, "os_disk_image")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryApplicationVersionPublishingProfileResponse(dict):
"""
The publishing profile of a gallery image version.
"""
def __init__(__self__, *,
published_date: str,
source: 'outputs.UserArtifactSourceResponse',
enable_health_check: Optional[bool] = None,
end_of_life_date: Optional[str] = None,
exclude_from_latest: Optional[bool] = None,
manage_actions: Optional['outputs.UserArtifactManageResponse'] = None,
replica_count: Optional[int] = None,
storage_account_type: Optional[str] = None,
target_regions: Optional[Sequence['outputs.TargetRegionResponse']] = None):
"""
The publishing profile of a gallery image version.
:param str published_date: The timestamp for when the gallery image version is published.
:param 'UserArtifactSourceResponseArgs' source: The source image from which the Image Version is going to be created.
:param bool enable_health_check: Optional. Whether or not this application reports health.
:param str end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
:param bool exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
:param int replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
:param str storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
:param Sequence['TargetRegionResponseArgs'] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
pulumi.set(__self__, "published_date", published_date)
pulumi.set(__self__, "source", source)
if enable_health_check is not None:
pulumi.set(__self__, "enable_health_check", enable_health_check)
if end_of_life_date is not None:
pulumi.set(__self__, "end_of_life_date", end_of_life_date)
if exclude_from_latest is not None:
pulumi.set(__self__, "exclude_from_latest", exclude_from_latest)
if manage_actions is not None:
pulumi.set(__self__, "manage_actions", manage_actions)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
if target_regions is not None:
pulumi.set(__self__, "target_regions", target_regions)
@property
@pulumi.getter(name="publishedDate")
def published_date(self) -> str:
"""
The timestamp for when the gallery image version is published.
"""
return pulumi.get(self, "published_date")
@property
@pulumi.getter
def source(self) -> 'outputs.UserArtifactSourceResponse':
"""
The source image from which the Image Version is going to be created.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="enableHealthCheck")
def enable_health_check(self) -> Optional[bool]:
"""
Optional. Whether or not this application reports health.
"""
return pulumi.get(self, "enable_health_check")
@property
@pulumi.getter(name="endOfLifeDate")
def end_of_life_date(self) -> Optional[str]:
"""
The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
"""
return pulumi.get(self, "end_of_life_date")
@property
@pulumi.getter(name="excludeFromLatest")
def exclude_from_latest(self) -> Optional[bool]:
"""
If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
"""
return pulumi.get(self, "exclude_from_latest")
@property
@pulumi.getter(name="manageActions")
def manage_actions(self) -> Optional['outputs.UserArtifactManageResponse']:
return pulumi.get(self, "manage_actions")
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[int]:
"""
The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
"""
return pulumi.get(self, "replica_count")
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[str]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@property
@pulumi.getter(name="targetRegions")
def target_regions(self) -> Optional[Sequence['outputs.TargetRegionResponse']]:
"""
The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
return pulumi.get(self, "target_regions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryArtifactVersionSourceResponse(dict):
"""
The gallery artifact version source.
"""
def __init__(__self__, *,
id: Optional[str] = None,
uri: Optional[str] = None):
"""
The gallery artifact version source.
:param str id: The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
:param str uri: The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
"""
return pulumi.get(self, "uri")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryDataDiskImageResponse(dict):
"""
This is the data disk image.
"""
def __init__(__self__, *,
lun: int,
size_in_gb: int,
host_caching: Optional[str] = None,
source: Optional['outputs.GalleryArtifactVersionSourceResponse'] = None):
"""
This is the data disk image.
:param int lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
:param int size_in_gb: This property indicates the size of the VHD to be created.
:param str host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
:param 'GalleryArtifactVersionSourceResponseArgs' source: The gallery artifact version source.
"""
pulumi.set(__self__, "lun", lun)
pulumi.set(__self__, "size_in_gb", size_in_gb)
if host_caching is not None:
pulumi.set(__self__, "host_caching", host_caching)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter
def lun(self) -> int:
"""
This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
"""
return pulumi.get(self, "lun")
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
"""
This property indicates the size of the VHD to be created.
"""
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[str]:
"""
The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
return pulumi.get(self, "host_caching")
@property
@pulumi.getter
def source(self) -> Optional['outputs.GalleryArtifactVersionSourceResponse']:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryIdentifierResponse(dict):
"""
Describes the gallery unique name.
"""
def __init__(__self__, *,
unique_name: str):
"""
Describes the gallery unique name.
:param str unique_name: The unique name of the Shared Image Gallery. This name is generated automatically by Azure.
"""
pulumi.set(__self__, "unique_name", unique_name)
@property
@pulumi.getter(name="uniqueName")
def unique_name(self) -> str:
"""
The unique name of the Shared Image Gallery. This name is generated automatically by Azure.
"""
return pulumi.get(self, "unique_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryImageFeatureResponse(dict):
"""
A feature for gallery image.
"""
def __init__(__self__, *,
name: Optional[str] = None,
value: Optional[str] = None):
"""
A feature for gallery image.
:param str name: The name of the gallery image feature.
:param str value: The value of the gallery image feature.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the gallery image feature.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The value of the gallery image feature.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryImageIdentifierResponse(dict):
"""
This is the gallery image definition identifier.
"""
def __init__(__self__, *,
offer: str,
publisher: str,
sku: str):
"""
This is the gallery image definition identifier.
:param str offer: The name of the gallery image definition offer.
:param str publisher: The name of the gallery image definition publisher.
:param str sku: The name of the gallery image definition SKU.
"""
pulumi.set(__self__, "offer", offer)
pulumi.set(__self__, "publisher", publisher)
pulumi.set(__self__, "sku", sku)
@property
@pulumi.getter
def offer(self) -> str:
"""
The name of the gallery image definition offer.
"""
return pulumi.get(self, "offer")
@property
@pulumi.getter
def publisher(self) -> str:
"""
The name of the gallery image definition publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> str:
"""
The name of the gallery image definition SKU.
"""
return pulumi.get(self, "sku")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryImageVersionPublishingProfileResponse(dict):
"""
The publishing profile of a gallery image Version.
"""
def __init__(__self__, *,
published_date: str,
end_of_life_date: Optional[str] = None,
exclude_from_latest: Optional[bool] = None,
replica_count: Optional[int] = None,
storage_account_type: Optional[str] = None,
target_regions: Optional[Sequence['outputs.TargetRegionResponse']] = None):
"""
The publishing profile of a gallery image Version.
:param str published_date: The timestamp for when the gallery image version is published.
:param str end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
:param bool exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
:param int replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
:param str storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
:param Sequence['TargetRegionResponseArgs'] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
pulumi.set(__self__, "published_date", published_date)
if end_of_life_date is not None:
pulumi.set(__self__, "end_of_life_date", end_of_life_date)
if exclude_from_latest is not None:
pulumi.set(__self__, "exclude_from_latest", exclude_from_latest)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
if target_regions is not None:
pulumi.set(__self__, "target_regions", target_regions)
@property
@pulumi.getter(name="publishedDate")
def published_date(self) -> str:
"""
The timestamp for when the gallery image version is published.
"""
return pulumi.get(self, "published_date")
@property
@pulumi.getter(name="endOfLifeDate")
def end_of_life_date(self) -> Optional[str]:
"""
The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
"""
return pulumi.get(self, "end_of_life_date")
@property
@pulumi.getter(name="excludeFromLatest")
def exclude_from_latest(self) -> Optional[bool]:
"""
If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
"""
return pulumi.get(self, "exclude_from_latest")
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[int]:
"""
The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
"""
return pulumi.get(self, "replica_count")
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[str]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@property
@pulumi.getter(name="targetRegions")
def target_regions(self) -> Optional[Sequence['outputs.TargetRegionResponse']]:
"""
The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
return pulumi.get(self, "target_regions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryImageVersionStorageProfileResponse(dict):
"""
This is the storage profile of a Gallery Image Version.
"""
def __init__(__self__, *,
data_disk_images: Optional[Sequence['outputs.GalleryDataDiskImageResponse']] = None,
os_disk_image: Optional['outputs.GalleryOSDiskImageResponse'] = None,
source: Optional['outputs.GalleryArtifactVersionSourceResponse'] = None):
"""
This is the storage profile of a Gallery Image Version.
:param Sequence['GalleryDataDiskImageResponseArgs'] data_disk_images: A list of data disk images.
:param 'GalleryOSDiskImageResponseArgs' os_disk_image: This is the OS disk image.
:param 'GalleryArtifactVersionSourceResponseArgs' source: The gallery artifact version source.
"""
if data_disk_images is not None:
pulumi.set(__self__, "data_disk_images", data_disk_images)
if os_disk_image is not None:
pulumi.set(__self__, "os_disk_image", os_disk_image)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="dataDiskImages")
def data_disk_images(self) -> Optional[Sequence['outputs.GalleryDataDiskImageResponse']]:
"""
A list of data disk images.
"""
return pulumi.get(self, "data_disk_images")
@property
@pulumi.getter(name="osDiskImage")
def os_disk_image(self) -> Optional['outputs.GalleryOSDiskImageResponse']:
"""
This is the OS disk image.
"""
return pulumi.get(self, "os_disk_image")
@property
@pulumi.getter
def source(self) -> Optional['outputs.GalleryArtifactVersionSourceResponse']:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GalleryOSDiskImageResponse(dict):
"""
This is the OS disk image.
"""
def __init__(__self__, *,
size_in_gb: int,
host_caching: Optional[str] = None,
source: Optional['outputs.GalleryArtifactVersionSourceResponse'] = None):
"""
This is the OS disk image.
:param int size_in_gb: This property indicates the size of the VHD to be created.
:param str host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
:param 'GalleryArtifactVersionSourceResponseArgs' source: The gallery artifact version source.
"""
pulumi.set(__self__, "size_in_gb", size_in_gb)
if host_caching is not None:
pulumi.set(__self__, "host_caching", host_caching)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
"""
This property indicates the size of the VHD to be created.
"""
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[str]:
"""
The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
return pulumi.get(self, "host_caching")
@property
@pulumi.getter
def source(self) -> Optional['outputs.GalleryArtifactVersionSourceResponse']:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ImagePurchasePlanResponse(dict):
"""
Describes the gallery image definition purchase plan. This is used by marketplace images.
"""
def __init__(__self__, *,
name: Optional[str] = None,
product: Optional[str] = None,
publisher: Optional[str] = None):
"""
Describes the gallery image definition purchase plan. This is used by marketplace images.
:param str name: The plan ID.
:param str product: The product ID.
:param str publisher: The publisher ID.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
pulumi.set(__self__, "product", product)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def product(self) -> Optional[str]:
"""
The product ID.
"""
return pulumi.get(self, "product")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
The publisher ID.
"""
return pulumi.get(self, "publisher")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OSDiskImageEncryptionResponse(dict):
"""
Contains encryption settings for an OS disk image.
"""
def __init__(__self__, *,
disk_encryption_set_id: Optional[str] = None):
"""
Contains encryption settings for an OS disk image.
:param str disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set.
"""
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[str]:
"""
A relative URI containing the resource ID of the disk encryption set.
"""
return pulumi.get(self, "disk_encryption_set_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecommendedMachineConfigurationResponse(dict):
"""
The properties describe the recommended machine configuration for this Image Definition. These properties are updatable.
"""
def __init__(__self__, *,
memory: Optional['outputs.ResourceRangeResponse'] = None,
v_cpus: Optional['outputs.ResourceRangeResponse'] = None):
"""
The properties describe the recommended machine configuration for this Image Definition. These properties are updatable.
:param 'ResourceRangeResponseArgs' memory: Describes the resource range.
:param 'ResourceRangeResponseArgs' v_cpus: Describes the resource range.
"""
if memory is not None:
pulumi.set(__self__, "memory", memory)
if v_cpus is not None:
pulumi.set(__self__, "v_cpus", v_cpus)
@property
@pulumi.getter
def memory(self) -> Optional['outputs.ResourceRangeResponse']:
"""
Describes the resource range.
"""
return pulumi.get(self, "memory")
@property
@pulumi.getter(name="vCPUs")
def v_cpus(self) -> Optional['outputs.ResourceRangeResponse']:
"""
Describes the resource range.
"""
return pulumi.get(self, "v_cpus")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegionalReplicationStatusResponse(dict):
"""
This is the regional replication status.
"""
def __init__(__self__, *,
details: str,
progress: int,
region: str,
state: str):
"""
This is the regional replication status.
:param str details: The details of the replication status.
:param int progress: It indicates progress of the replication job.
:param str region: The region to which the gallery image version is being replicated to.
:param str state: This is the regional replication state.
"""
pulumi.set(__self__, "details", details)
pulumi.set(__self__, "progress", progress)
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def details(self) -> str:
"""
The details of the replication status.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def progress(self) -> int:
"""
It indicates progress of the replication job.
"""
return pulumi.get(self, "progress")
@property
@pulumi.getter
def region(self) -> str:
"""
The region to which the gallery image version is being replicated to.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def state(self) -> str:
"""
This is the regional replication state.
"""
return pulumi.get(self, "state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReplicationStatusResponse(dict):
"""
This is the replication status of the gallery image version.
"""
def __init__(__self__, *,
aggregated_state: str,
summary: Sequence['outputs.RegionalReplicationStatusResponse']):
"""
This is the replication status of the gallery image version.
:param str aggregated_state: This is the aggregated replication status based on all the regional replication status flags.
:param Sequence['RegionalReplicationStatusResponseArgs'] summary: This is a summary of replication status for each region.
"""
pulumi.set(__self__, "aggregated_state", aggregated_state)
pulumi.set(__self__, "summary", summary)
@property
@pulumi.getter(name="aggregatedState")
def aggregated_state(self) -> str:
"""
This is the aggregated replication status based on all the regional replication status flags.
"""
return pulumi.get(self, "aggregated_state")
@property
@pulumi.getter
def summary(self) -> Sequence['outputs.RegionalReplicationStatusResponse']:
"""
This is a summary of replication status for each region.
"""
return pulumi.get(self, "summary")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceRangeResponse(dict):
"""
Describes the resource range.
"""
def __init__(__self__, *,
max: Optional[int] = None,
min: Optional[int] = None):
"""
Describes the resource range.
:param int max: The maximum number of the resource.
:param int min: The minimum number of the resource.
"""
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[int]:
"""
The maximum number of the resource.
"""
return pulumi.get(self, "max")
@property
@pulumi.getter
def min(self) -> Optional[int]:
"""
The minimum number of the resource.
"""
return pulumi.get(self, "min")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SharingProfileGroupResponse(dict):
"""
Group of the gallery sharing profile
"""
def __init__(__self__, *,
ids: Optional[Sequence[str]] = None,
type: Optional[str] = None):
"""
Group of the gallery sharing profile
:param Sequence[str] ids: A list of subscription/tenant ids the gallery is aimed to be shared to.
:param str type: This property allows you to specify the type of sharing group. <br><br> Possible values are: <br><br> **Subscriptions** <br><br> **AADTenants**
"""
if ids is not None:
pulumi.set(__self__, "ids", ids)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def ids(self) -> Optional[Sequence[str]]:
"""
A list of subscription/tenant ids the gallery is aimed to be shared to.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
This property allows you to specify the type of sharing group. <br><br> Possible values are: <br><br> **Subscriptions** <br><br> **AADTenants**
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SharingProfileResponse(dict):
"""
Profile for gallery sharing to subscription or tenant
"""
def __init__(__self__, *,
groups: Sequence['outputs.SharingProfileGroupResponse'],
permissions: Optional[str] = None):
"""
Profile for gallery sharing to subscription or tenant
:param Sequence['SharingProfileGroupResponseArgs'] groups: A list of sharing profile groups.
:param str permissions: This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups**
"""
pulumi.set(__self__, "groups", groups)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def groups(self) -> Sequence['outputs.SharingProfileGroupResponse']:
"""
A list of sharing profile groups.
"""
return pulumi.get(self, "groups")
@property
@pulumi.getter
def permissions(self) -> Optional[str]:
"""
This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups**
"""
return pulumi.get(self, "permissions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TargetRegionResponse(dict):
"""
Describes the target region information.
"""
def __init__(__self__, *,
name: str,
encryption: Optional['outputs.EncryptionImagesResponse'] = None,
regional_replica_count: Optional[int] = None,
storage_account_type: Optional[str] = None):
"""
Describes the target region information.
:param str name: The name of the region.
:param 'EncryptionImagesResponseArgs' encryption: Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
:param int regional_replica_count: The number of replicas of the Image Version to be created per region. This property is updatable.
:param str storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
"""
pulumi.set(__self__, "name", name)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if regional_replica_count is not None:
pulumi.set(__self__, "regional_replica_count", regional_replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the region.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.EncryptionImagesResponse']:
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="regionalReplicaCount")
def regional_replica_count(self) -> Optional[int]:
"""
The number of replicas of the Image Version to be created per region. This property is updatable.
"""
return pulumi.get(self, "regional_replica_count")
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[str]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class UserArtifactManageResponse(dict):
def __init__(__self__, *,
install: str,
remove: str,
update: Optional[str] = None):
"""
:param str install: Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
:param str remove: Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
:param str update: Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
"""
pulumi.set(__self__, "install", install)
pulumi.set(__self__, "remove", remove)
if update is not None:
pulumi.set(__self__, "update", update)
@property
@pulumi.getter
def install(self) -> str:
"""
Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "install")
@property
@pulumi.getter
def remove(self) -> str:
"""
Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "remove")
@property
@pulumi.getter
def update(self) -> Optional[str]:
"""
Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "update")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class UserArtifactSourceResponse(dict):
"""
The source image from which the Image Version is going to be created.
"""
def __init__(__self__, *,
media_link: str,
default_configuration_link: Optional[str] = None):
"""
The source image from which the Image Version is going to be created.
:param str media_link: Required. The mediaLink of the artifact, must be a readable storage page blob.
:param str default_configuration_link: Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
"""
pulumi.set(__self__, "media_link", media_link)
if default_configuration_link is not None:
pulumi.set(__self__, "default_configuration_link", default_configuration_link)
@property
@pulumi.getter(name="mediaLink")
def media_link(self) -> str:
"""
Required. The mediaLink of the artifact, must be a readable storage page blob.
"""
return pulumi.get(self, "media_link")
@property
@pulumi.getter(name="defaultConfigurationLink")
def default_configuration_link(self) -> Optional[str]:
"""
Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
"""
return pulumi.get(self, "default_configuration_link")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 38.233333 | 290 | 0.654045 | 40,699 | 0.959 | 0 | 0 | 41,159 | 0.969839 | 0 | 0 | 21,646 | 0.51005 |
46ba83324e04ac36f037fcc3c6b035e1f76cfb13 | 809 | py | Python | tests/test_datetime_fields.py | 20c/django-syncref | af8678b550657d9f1082babe1139dead038c155a | [
"Apache-2.0"
] | null | null | null | tests/test_datetime_fields.py | 20c/django-syncref | af8678b550657d9f1082babe1139dead038c155a | [
"Apache-2.0"
] | null | null | null | tests/test_datetime_fields.py | 20c/django-syncref | af8678b550657d9f1082babe1139dead038c155a | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
import pytest
from django.test import TestCase
from tests.models import Org, Sub, Widget
data_org = {"name": "Acme Widgets"}
class FieldTestCase(TestCase):
def setUp(self):
self.org = Org.objects.create(**data_org)
self.created = datetime.now()
self.one_sec = timedelta(seconds=1)
pass
# org = Org.objects.create(**data_org)
def test_obj_creation(self):
assert self.one_sec > self.created - self.org.created
assert self.one_sec > self.created - self.org.updated
def test_updated(self):
self.org.name = "Updated"
self.org.save()
now = datetime.now()
assert self.one_sec > self.created - self.org.created
assert self.one_sec > now - self.org.updated
| 26.096774 | 61 | 0.651422 | 637 | 0.787392 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.091471 |
46bb3deb8367127a5cfa628614d6868e96cd7fbc | 6,888 | py | Python | app.py | Antinator11/Creative-Space | 73bcd8eeed39c57e1d9098b3fe99e2c92a67e4e8 | [
"Apache-2.0"
] | null | null | null | app.py | Antinator11/Creative-Space | 73bcd8eeed39c57e1d9098b3fe99e2c92a67e4e8 | [
"Apache-2.0"
] | null | null | null | app.py | Antinator11/Creative-Space | 73bcd8eeed39c57e1d9098b3fe99e2c92a67e4e8 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, request, redirect, url_for, Markup, \
flash # Imports Flask and all required modules
import databasemanager # Provides the functionality to load stuff from the database
app = Flask(__name__)
import errormanager # Enum for types of errors
# DECLARE datamanager as TYPE: databasemanager
datamanager = databasemanager
# DECLARE errorman as TYPE: errormanager
errorman = errormanager
# DECLARE Current User as string
# Provides a means of the application knowing who is signed in
CurrentUser: str
# Route function for homepage.
# @return Returns render template of base.hmtl
@app.route('/')
def Home():
datamanager.LoadContent()
return render_template('base.html', entries=datamanager.entries, bFailure=False, app=datamanager)
# Checks the username and the password and handles any errors
# @route Homepage
# @method: POST
# @return redirect: Redirect to 'AdminHome' function after successful login
# @return render_template: base.html with failure condition
@app.route('/', methods=['POST'])
def Login():
if request.method == "POST":
try:
password = request.form['Password']
username = request.form['Username']
if (password != '') and (username != ''):
if datamanager.CheckUser(username, password) == True:
global CurrentUser
CurrentUser = username
globals()
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=username))
else:
Failure = errorman.EErrorType.FailedPassword
return render_template('base.html', fail=Failure, failenum=errorman.EErrorType,
entries=datamanager.entries, bFailure=True, app=datamanager)
else:
Failure = errorman.EErrorType.FailedNone
return render_template('base.html', fail=Failure, failenum=errorman.EErrorType, bFailure=True,
entires=datamanager.entries, app=datamanager)
except:
return render_template('base.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType,
bFailure=True, entries=datamanager.entries)
# Main route for admin homepage
# Checks for encrypted string to ensure access was granted
# @route: '/adminbase' <auth: encrypted string> <user: user's username>
# @param auth: Encrypted string used for security
# @param user: Username of user
# @return render_template: adminbase.html with entries, the username and the datamanager
# @return redirect: 'Home' will return the user to home if they don't have valid acsses
@app.route('/adminbase/<auth> <user>')
def AdminHome(auth, user):
if auth == str(datamanager.Encrypt('True')):
datamanager.LoadContent()
print(datamanager.entries)
return render_template('adminbase.html', entries=datamanager.entries, user=user, app=datamanager)
else:
return redirect(url_for('Home'))
# Gets the users inputted values for a new entry and adds them to the website
# @route: '/adminbase.html' <user: username of signed in user>
# @param user: username of the signed in user
# @return redirect: 'Admin Home' function with encryption string and username
@app.route('/adminbase.html/<user>', methods=["POST"])
def CreateNew(user: str):
if request.method == "POST":
# try:
title = request.form['Title']
desc = request.form['Desc']
image = request.form['Image']
caption = request.form['Caption']
id = len(datamanager.entries)
ind = str(id)
datamanager.AddNewItem(title, desc, caption, image, id, ind, 0)
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=user))
# except:
# return render_template('error.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType)
# Deprecated
#@app.route('/adminbase', methods=["POST"])
#def Delete():
#if request.method == "POST":
# delete = request.form['Del']
# if delete == True:
# datamanager.RemoveItem(0)
# return render_template(url_for('AdminHome', auth=str(datamanager.Encrypt('True'))))
#else:
# return render_template(url_for('AdminHome', auth=str(datamanager.Encrypt('True'))))
# Main route for signup page
# @route: '/signup'
# @return render_template: signup.html
@app.route('/signup')
def SignUp():
return render_template('signup.html')
# Gets the entry input values and adds to database also handles errors
# @route '/sign' methods: GET and POST
# @return redirect: 'Home'
# @return render_template: 'error.html' with error type
@app.route('/sign', methods=["POST", "GET"])
def AddNewUser():
try:
if request.method == "POST":
AdminKey = request.form['Key']
Password = request.form['Password']
Username = request.form['Username']
ConfirmPass = request.form['ConfirmPassword']
if datamanager.CheckKey(AdminKey) == True:
if ((Password != '') and (Username != '') and (ConfirmPass != '')):
if ConfirmPass == Password:
if datamanager.NewUser(Username, Password) == True:
return redirect(url_for('Home'))
else:
return render_template('error.html', fail=errorman.EErrorType.FailedPassword,
failenum=errorman.EErrorType)
else:
return render_template('error.html', fail=errorman.EErrorType.FailedNone,
failenum=errorman.EErrorType)
return render_template('error.html')
except:
return render_template('error.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType)
# Deprecated
@app.route('/likes/<id>')
def Like(id: int):
datamanager.AddLike(id)
return redirect(url_for('Home'))
# Deprecated
@app.route('/deleteconfirm', methods=['GET'])
def ChangeDeleteTarget():
id = request.form['Delete']
global deletetarget
deletetarget = id
print(deletetarget)
globals()
return 'hi' # This exists because Flask is bad
# Deprecated
@app.route('/delete')
def Delete():
datamanager.RemoveItem(datamanager.deletetarget)
global CurrentUser
CurrentUser = 'user'
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=CurrentUser, app=datamanager))
# Main Flask Loop
if __name__ == '__main__':
app.secret_key = datamanager.Encrypt('key')
app.run()
| 41.745455 | 116 | 0.63313 | 0 | 0 | 0 | 0 | 4,395 | 0.638066 | 0 | 0 | 2,667 | 0.387195 |
46bdc3e0ab2a6edc65d4cb4238647e95d5203d67 | 8,108 | py | Python | electromorpho/structure/graphs.py | CIG-UPM/electro-morpho | 708b6f47a431050e840f1daab89724e248adc4c8 | [
"MIT"
] | 1 | 2020-03-03T04:50:02.000Z | 2020-03-03T04:50:02.000Z | pymoreg/structure/graphs.py | mllera14/multi-output-regression | 5932a5efc117f7dd8e61e71620e2f37b40c10219 | [
"MIT"
] | null | null | null | pymoreg/structure/graphs.py | mllera14/multi-output-regression | 5932a5efc117f7dd8e61e71620e2f37b40c10219 | [
"MIT"
] | null | null | null | import scipy.sparse as ssp
import scipy.sparse.csgraph as csgraph
import networkx as nx
import pylab as pl
import pygraphviz as pgv
from itertools import product, chain
class DiGraph(ssp.lil_matrix):
"""
An implementation of a directed graph with a Sparse Matrix representation using Scipy's sparse module.
Specifically the lil_matrix representation is used since it allows for efficient modification of the
sparse structure, which is useful for sampling. Most methods are aliases for operations we can perform directly
with the ones we inherit from the Scipy sparse matrix class.
Parameters
----------
arg1: object
This can be instantiated in several ways:
DiGraph(D)
with a dense matrix or rank-2 ndarray D
DiGraph(S)
with another sparse matrix S (equivalent to S.tolil())
DiGraph((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
shape: 2-tuple
The size of the underlying dimensions
dtype: type
The type of the data. Supported are bool for adjacency representations, and float for weighted edges
copy: bool
In case arg1 is a sparse matrix, whether to copy its contents when constructing a new instance
names: list of strings
A list of true names for the nodes of the graph
Attributes
----------
names: list
The list of names of the nodes if any. Useful if using non numerical identifiers for the nodes
"""
def __init__(self, arg1, shape=None, dtype=bool, copy=False, names=None):
if dtype is None:
dtype = bool
elif dtype not in [bool, float]:
raise ValueError('Either adjacency or weighted graph')
super().__init__(arg1, shape, dtype, copy)
self._names = names
@property
def n_nodes(self):
return self.shape[0]
@property
def n_edges(self):
return self.count_nonzero()
@property
def names(self):
if self._names is None:
return list(range(self.n_nodes))
return self._names
def nodes(self, as_names=False):
if as_names and self._names is not None:
return self._names
return list(range(self.shape[0]))
def nodes_iter(self, as_names=False):
if as_names and self._names is not None:
return iter(self._names)
return range(self.shape[0])
def edges(self):
return list(zip(*self.nonzero()))
def edges_iter(self):
return zip(*self.nonzero())
def add_edge(self, u, v, value=None):
if not self.is_valid_edge(u, v):
raise ValueError('Edge {0}-->{1} cannot be added'.format(u, v))
if value is None:
value = 1
self[u, v] = value
def add_edges(self, edges, value=None):
if any(map(lambda e: not self.is_valid_edge(*e), edges)):
raise ValueError('At least one edge cannot be added')
if value is None:
value = 1
us, vs = list(zip(*edges))
self[us, vs] = value
def remove_edge(self, u, v):
self[u, v] = 0
def remove_edges(self, edges):
us, vs = zip(*edges)
self[us, vs] = 0
def parents(self, node):
return self.T[node].nonzero()[1]
def children(self, node):
return self[node].nonzero()[1]
def descendants(self, node, sort=False):
descendants = csgraph.breadth_first_order(self, i_start=node, directed=True, return_predecessors=False)[1:]
if not sort:
return descendants
return sorted(descendants)
def ancestors(self, node, sort=False):
ancestors = csgraph.breadth_first_order(self.T, i_start=node, directed=True, return_predecessors=False)[1:]
if not sort:
return ancestors
return sorted(ancestors)
def has_path(self, u, v):
return u in self.ancestors(v)
def is_valid_edge(self, u, v):
return u != v
def copy(self):
arg1 = ssp.lil_matrix.copy(self)
a = DiGraph(arg1=arg1, names=self._names)
return a
def to_nx_digraph(self):
return nx.from_scipy_sparse_matrix(self, create_using=nx.DiGraph())
class MBCGraph(DiGraph):
def __init__(self, arg1, n_features, shape=None, dtype=None, copy=False, names=None):
super().__init__(arg1, shape, dtype, copy, names)
self.n_features = n_features
@property
def n_targets(self):
return self.shape[0] - self.n_features
def is_valid_edge(self, u, v):
# if u >= n_features it's a target i.e. it can have edges to any variables
# if not, then it's a feature and v must also be a feature i.e. < n_features
return u != v and (u >= self.n_features or v < self.n_features)
def copy(self):
arg1 = ssp.lil_matrix.copy(self)
a = MBCGraph(arg1=arg1, n_features=self.n_features, names=self._names)
return a
# Helper functions
def possible_edges_iter(targets, feature):
edges = chain(product(targets, targets), product(targets, feature), product(feature, feature))
edges = filter(lambda e: e[0] != e[1], edges)
return edges
def possible_edges(targets, features):
return list(possible_edges_iter(targets, features))
def topsort(G: ssp.spmatrix, nodes=None, reverse=False):
order = []
seen = set()
explored = set()
if nodes is None:
nodes = range(G.shape[0])
for v in nodes: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in G[w].nonzero()[1]:
if n not in explored:
if n in seen: # CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
if reverse:
return order
else:
return list(reversed(order))
def plot_digraph(graph: DiGraph):
nx.draw_networkx(nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph()))
pl.show()
def load_graph(path):
dot_graph = pgv.AGraph(filename=path)
if 'names' in dot_graph.graph_attr:
names = dot_graph.graph_attr['names']
else:
names = None
dtype = dot_graph.graph_attr['data_type']
if dtype == 'bool':
dtype = bool
elif dtype == 'float64':
dtype = float
else:
raise ValueError('Unrecognized data type')
n_nodes = dot_graph.number_of_nodes()
graph = DiGraph((n_nodes, n_nodes), dtype=dtype, names=names)
if dtype == bool:
u, v = zip(dot_graph.edges_iter())
u = list(map(int, u))
v = list(map(int, v))
graph[u, v] = True
else:
for u, v in dot_graph.edges():
weight = dot_graph.get_edge(u, v).attr['weight']
graph[int(u), int(v)] = weight
return graph
def save_graph(graph: DiGraph, path):
if path[-3:] != '.gv' and path[-4:] != '.dot':
path += '.gv'
if graph._names is None:
dot_graph = pgv.AGraph(data_type=str(graph.dtype))
else:
dot_graph = pgv.AGraph(data_type=str(graph.dtype), names=graph._names)
dot_graph.add_nodes_from(graph.nodes())
if graph.dtype == bool:
dot_graph.add_edges_from(graph.edges())
else:
for u, v in graph.edges_iter():
dot_graph.add_edge(u, v, weight=graph[u, v])
dot_graph.write(path)
| 29.376812 | 115 | 0.607548 | 4,871 | 0.600765 | 0 | 0 | 344 | 0.042427 | 0 | 0 | 2,031 | 0.250493 |
46bf9a97a24d5d137bbdd0a57fc1e121b4a7e9e2 | 9,487 | py | Python | MyWeather.py | luisegarduno/MyWeather | dba3881c5964384a50887563577395f1a5540a18 | [
"MIT"
] | null | null | null | MyWeather.py | luisegarduno/MyWeather | dba3881c5964384a50887563577395f1a5540a18 | [
"MIT"
] | null | null | null | MyWeather.py | luisegarduno/MyWeather | dba3881c5964384a50887563577395f1a5540a18 | [
"MIT"
] | null | null | null | import sys
import json
import time
import os.path
import subprocess
# Current working directory
cwd = os.path.dirname(os.path.realpath(__file__))
# Boolean flag that tells the program whether the user enabled text notifications
txt_notifs = True
# Clear terminal screeen
os.system('cls' if os.name == 'nt' else 'clear')
# Check to see if program has been ran before
if os.path.isfile('keys.oof') and os.path.isfile('weather-data.xlsx'):
with open('keys.oof') as file:
accuweather_api = file.readline()
line2 = file.readline()
# If 'line3' is empty, this means that there is only two lines in the file
line3 = file.readline()
if line3 != '':
account_sid = line2
auth_token = line3
my_number = file.readline()
twilio_number = file.readline()
location_key = file.readline()
else:
location_key = line2
txt_notifs = False
file.close()
else:
print("Please wait while I check if you have all the necessary python modules \n")
# implement pip as a subprocess & install necessary packages:
subprocess.check_call([sys.executable, '-m', 'pip', 'install','xlrd', '-q'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install','lxml', '-q'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install','pandas', '-q'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install','urllib3', '-q'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install','requests', '-q'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install','openpyxl', '-q'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install','xlsxwriter', '-q'])
# Print examples of accepted formatting for requested user input
print("Okay, the necessary libraries have been downloaded")
time.sleep(2)
os.system('cls' if os.name == 'nt' else 'clear')
flag = True
while flag:
# Ask user if they would like text message notifications
txts = str(input("Would you like to enable txt-notifications? (y/n) : "))
os.system('cls' if os.name == 'nt' else 'clear')
if txts[0].lower() == 'y' or txts[0].lower() == 'n':
creds = open("keys.oof", "w+")
print("Let's get your credentials, here's some examples of the formatting ")
print(" > [AccuWeather API] : AB1234a12a12a1234567a9a94573a1234a")
print(" > [Postal/Zip Code] : 12345")
# Enable Text-Messages: Using Twilio
if txts[0].lower() == 'y':
print(" > [Account SID] : AB1234a12a12a1234567a9a94573a1234a")
print(" > [Authentication Token] : AB1234a12a12a1234567a9a94573a1234a")
print(" > [Personal Phone Number] : 9721231234")
print(" > [TWILIO Phone Number] : 9721231234\n")
print("** Need help? Check out the README (https://github.com/luisegarduno/MyWeather/blob/main/README.md)\n")
print("Please enter the following information ")
accuweather_api = str(input(" > [AccuWeather API] : "))
creds.write(accuweather_api + "\n")
search_code = str(input(" > [Postal/Zip Code] : "))
params = (('apikey', accuweather_api), ('q', search_code), ('language', 'en-us'), ('details', 'true'))
z_res = reqs.get('http://dataservice.accuweather.com/locations/v1/postalcodes/search', params=params)
z_dict = json.loads(z_res.text)
location_key = z_dict[0]['ParentCity']['Key']
account_sid = str(input(" > [Account SID] : "))
creds.write(account_sid + "\n")
auth_token = str(input(" > [Authenticantion Token] : "))
creds.write(auth_token + "\n")
my_number = str(input(" > [Personal Phone Number] : "))
creds.write(my_number + "\n")
twilio_number = str(input(" > [TWILIO Phone Number] : "))
creds.write(twilio_number + "\n")
creds.write(location_key)
print("Installing additional python modules...")
subprocess.check_call([sys.executable, '-m', 'pip', 'install','twilio', '-q'])
# Print introduction message & create file named 'secret.file'
print("\n\nDone! Now if all the information is correct, program will start in a couple seconds :)")
time.sleep(2)
os.system('cls' if os.name == 'nt' else 'clear')
txt_notifs = True
flag = False
# Disable text-message feature
elif txts[0].lower() == 'n':
print("** Need help? Check out the README (https://github.com/luisegarduno/MyWeather/blob/main/README.md)\n")
print("Please enter the following information ")
accuweather_api = str(input(" > [AccuWeather API] : "))
creds.write(accuweather_api + "\n")
import requests as reqs
search_code = str(input(" > [Postal/Zip Code] : "))
params = (('apikey', accuweather_api), ('q', search_code), ('language', 'en-us'), ('details', 'true'))
z_res = reqs.get('http://dataservice.accuweather.com/locations/v1/postalcodes/search', params=params)
z_dict = json.loads(z_res.text)
location_key = z_dict[0]['ParentCity']['Key']
creds.write(location_key)
txt_notifs = False
flag = False
creds.close()
else:
print("Invalid Option. Try Again (Options: y OR n)")
# Create Excel file if not already created
import xlsxwriter
if os.path.isfile('weather-data.xlsx'):
print('Continuing...')
else:
init_book = xlsxwriter.Workbook('weather-data.xlsx')
init_book.close()
# -------------- Launch actual script now --------------- #
import openpyxl
import xlsxwriter
import pandas as pd
import requests as reqs
from urllib.request import urlopen
# Open Excel file
weather_data = pd.read_excel('weather-data.xlsx')
os.system('cls' if os.name == 'nt' else 'clear')
print("*Weather*\n")
params = (('apikey', accuweather_api), ('language', 'en-us'), ('details', 'true'), ('metric', 'false'))
response_1 = reqs.get('http://dataservice.accuweather.com/forecasts/v1/daily/1day/' + str(location_key), params=params)
resp_dict1 = json.loads(response_1.text)
# Low Temperature
lo_unit = resp_dict1['DailyForecasts'][0]['Temperature']['Minimum']['Unit']
lo_temp = resp_dict1['DailyForecasts'][0]['Temperature']['Minimum']['Value']
lo = str(lo_temp) + lo_unit
# High Temperature
hi_unit = resp_dict1['DailyForecasts'][0]['Temperature']['Maximum']['Unit']
hi_temp = resp_dict1['DailyForecasts'][0]['Temperature']['Maximum']['Value']
hi = str(hi_temp) + hi_unit
# Wind (Speed / Direction)
wind_value = resp_dict1['DailyForecasts'][0]['Day']['Wind']['Speed']['Value']
wind_degrees = resp_dict1['DailyForecasts'][0]['Day']['Wind']['Direction']['Degrees']
wind_localized = resp_dict1['DailyForecasts'][0]['Day']['Wind']['Direction']['Localized']
wind = str(wind_value) + 'mph / ' + str(wind_degrees) + wind_localized
response_2 = reqs.get('http://dataservice.accuweather.com/currentconditions/v1/' + str(location_key), params=params)
resp_dict2 = json.loads(response_2.text)
# Pressure (Imperial)
pressure_value = resp_dict2[0]['Pressure']['Imperial']['Value']
pressure_unit = resp_dict2[0]['Pressure']['Imperial']['Unit']
pressure = str(pressure_value) + pressure_unit
# Precipitation
precipitation_value = resp_dict2[0]['PrecipitationSummary']['Precipitation']['Imperial']['Value']
precipitation_unit = resp_dict2[0]['PrecipitationSummary']['Precipitation']['Imperial']['Unit']
precipitation = str(precipitation_value) + precipitation_unit
# Humidity
relative_humidity = str(resp_dict2[0]['RelativeHumidity']) + '%'
indoor_relative_humidity = resp_dict2[0]['IndoorRelativeHumidity']
# Information that will be printed to console (same to what user will receive via text-message)
date_rn = time.strftime("%m/%d/%Y")
txt_2_me = 'Date : ' + str(date_rn)
txt_2_me += '\nLow Temperature : ' + lo
txt_2_me += '\nHi Temperature : ' + hi
txt_2_me += '\nPrecipitation : ' + precipitation
txt_2_me += '\nHumidity (Relative) : ' + relative_humidity
txt_2_me += '\nPressure : ' + pressure
txt_2_me += '\nWind (Speed/Direction) : ' + wind
print(txt_2_me + '\n')
line = date_rn + ',' + lo + ',' + hi + ',' + precipitation + ',' + relative_humidity + ',' + pressure + ',' + wind
weather_data2 = pd.DataFrame({'Date': [date_rn], 'Low Temperature': [lo], 'High Temperature': [hi], 'Precipitation': [precipitation],
'Humidity': [relative_humidity], 'Pressure': [pressure], 'Wind (strength, direction)': [wind]})
weather_data = weather_data.append(weather_data2)
weather_data.to_excel('./weather-data.xlsx', index=False)
if txt_notifs:
from twilio.rest import Client
client = Client(account_sid, auth_token) # Twilio Client
client.messages.create(to=my_number,from_=twilio_number,body=txt_2_me)
| 44.75 | 134 | 0.604933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,949 | 0.416254 |
46bff1adfd5577acabc4e0dff9a754921e785d60 | 1,312 | py | Python | filter_plugins/containers2volumes.py | gabriel-duque/sadm | 00483d486b336c71066e244a61c29042a924e75c | [
"MIT"
] | 4 | 2020-07-28T00:22:43.000Z | 2020-12-01T16:03:01.000Z | filter_plugins/containers2volumes.py | gabriel-duque/sadm | 00483d486b336c71066e244a61c29042a924e75c | [
"MIT"
] | 8 | 2020-07-05T23:23:42.000Z | 2020-09-04T00:30:58.000Z | filter_plugins/containers2volumes.py | zuh0/sadm | 00483d486b336c71066e244a61c29042a924e75c | [
"MIT"
] | null | null | null | from ansible.errors import AnsibleFilterError
def container2volumes(container, vol_type="all"):
vol_types = ["generated", "persistent", "volatile"]
catch_all_type = "all"
if vol_type != catch_all_type and vol_type not in vol_types:
raise AnsibleFilterError(
f"container2volumes: {vol_type} is not in allowed volume types ('all', 'generated', 'persistent', 'volatile')"
)
return list(
filter(
lambda vol: vol_type == "all" or vol.get("type") == vol_type,
container.get("volumes", {}).values(),
)
)
def containers2volumes(containers, vol_type="all"):
vol_types = ["generated", "persistent", "volatile"]
catch_all_type = "all"
if vol_type != catch_all_type and vol_type not in vol_types:
raise AnsibleFilterError(
f"containers2volumes: {vol_type} is not in allowed volume types ('all', 'generated', 'persistent', 'volatile')"
)
return sum(
(container2volumes(c, vol_type) for c in containers.values()), []
)
class FilterModule(object):
"""Get volume information from a container or a container list."""
def filters(self):
return {
"containers2volumes": containers2volumes,
"container2volumes": container2volumes,
}
| 32.8 | 123 | 0.637957 | 255 | 0.19436 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.329268 |
46bff90e4aae9b5e239fcf2a9690645d59cd4e37 | 4,593 | py | Python | cryptofeed_werks/exchanges/bitmex/api.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | cryptofeed_werks/exchanges/bitmex/api.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | cryptofeed_werks/exchanges/bitmex/api.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | import datetime
import json
import re
import time
from decimal import Decimal
import httpx
from cryptofeed_werks.controllers import HTTPX_ERRORS, iter_api
from cryptofeed_werks.lib import parse_datetime
from .constants import API_URL, MAX_RESULTS, MIN_ELAPSED_PER_REQUEST, MONTHS
def get_bitmex_api_url(url, pagination_id):
url += f"&count={MAX_RESULTS}&reverse=true"
if pagination_id:
return url + f"&endTime={pagination_id}"
return url
def get_bitmex_api_pagination_id(timestamp, last_data=[], data=[]):
return format_bitmex_api_timestamp(timestamp)
def get_bitmex_api_timestamp(trade):
return parse_datetime(trade["timestamp"])
def format_bitmex_api_timestamp(timestamp):
return timestamp.replace(tzinfo=None).isoformat()
def get_active_futures(root_symbol, timestamp_from, pagination_id, log_prefix=None):
endpoint = "instrument/active"
return get_futures(
endpoint, root_symbol, timestamp_from, pagination_id, log_prefix=log_prefix
)
def get_expired_futures(root_symbol, timestamp_from, pagination_id, log_prefix=None):
endpoint = "instrument"
return get_futures(
endpoint, root_symbol, timestamp_from, pagination_id, log_prefix=log_prefix
)
def get_futures(endpoint, root_symbol, timestamp_from, pagination_id, log_prefix=None):
filters = json.dumps({"rootSymbol": root_symbol})
url = f"{API_URL}/{endpoint}?filter={filters}"
timestamp_key = "timestamp"
results = iter_api(
url,
timestamp_key,
get_bitmex_api_pagination_id,
get_bitmex_api_response,
MAX_RESULTS,
MIN_ELAPSED_PER_REQUEST,
timestamp_from=timestamp_from,
pagination_id=pagination_id,
log_prefix=log_prefix,
)
instruments = []
regex = re.compile(f"^{root_symbol}" + r"(\w)\d+$")
for instrument in results:
symbol = instrument["symbol"]
match = regex.match(symbol)
if match:
is_future = match.group(1) in MONTHS
if is_future:
listing = parse_datetime(instrument["listing"])
expiry = parse_datetime(instrument["expiry"])
if expiry >= timestamp_from:
instruments.append(
{"symbol": symbol, "listing": listing, "expiry": expiry}
)
return instruments
def get_funding(symbol, timestamp_from, pagination_id, log_prefix=None):
url = f"{API_URL}/funding?symbol={symbol}"
timestamp_key = "timestamp"
return [
{
"timestamp": parse_datetime(f["timestamp"]),
"rate": f["fundingRate"],
}
for f in iter_api(
url,
timestamp_key,
get_bitmex_api_pagination_id,
get_bitmex_api_response,
parse_datetime,
MAX_RESULTS,
MIN_ELAPSED_PER_REQUEST,
timestamp_from=timestamp_from,
pagination_id=pagination_id,
log_prefix=log_prefix,
)
]
def get_trades(symbol, timestamp_from, pagination_id, log_prefix=None):
url = f"{API_URL}/trade?symbol={symbol}"
return iter_api(
url,
get_bitmex_api_pagination_id,
get_bitmex_api_timestamp,
get_bitmex_api_response,
MAX_RESULTS,
MIN_ELAPSED_PER_REQUEST,
timestamp_from=timestamp_from,
pagination_id=pagination_id,
log_prefix=log_prefix,
)
def get_bitmex_api_response(url, pagination_id=None, retry=30):
try:
response = httpx.get(get_bitmex_api_url(url, pagination_id))
if response.status_code == 200:
remaining = response.headers["x-ratelimit-remaining"]
if remaining == 0:
timestamp = datetime.datetime.utcnow().timestamp()
reset = response.headers["x-ratelimit-reset"]
if reset > timestamp:
sleep_duration = reset - timestamp
print(f"Max requests, sleeping {sleep_duration} seconds")
time.sleep(sleep_duration)
result = response.read()
return json.loads(result, parse_float=Decimal)
elif response.status_code == 429:
retry = response.headers.get("Retry-After", 1)
time.sleep(int(retry))
else:
raise Exception(f"HTTP {response.status_code}: {response.reason_phrase}")
except HTTPX_ERRORS:
if retry > 0:
time.sleep(1)
retry -= 1
return get_bitmex_api_response(url, pagination_id, retry)
raise
| 32.118881 | 87 | 0.644677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.115175 |
46c085ac7c0934855c0208b4c1f43ee8a0d905c0 | 381 | py | Python | lowest-unique.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | lowest-unique.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | lowest-unique.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | import sys
def lowest_unique_number(line):
numbers = sorted(map(int, line.split()))
for e in numbers:
if numbers.count(e) == 1:
return line.index(str(e))//2+1
return 0
def main():
with open(sys.argv[1]) as input_file:
for line in input_file:
print(lowest_unique_number(line.strip()))
if __name__ == '__main__':
main()
| 22.411765 | 53 | 0.606299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.026247 |
46c12f59ad64dcb22ae69898d7c1b35ed5998385 | 3,247 | py | Python | flask_server.py | MichalYoung/eld-sentence-scramble | 3af1573daf2803e9ab9dbe03c58d0f42115f3bc3 | [
"MIT"
] | null | null | null | flask_server.py | MichalYoung/eld-sentence-scramble | 3af1573daf2803e9ab9dbe03c58d0f42115f3bc3 | [
"MIT"
] | null | null | null | flask_server.py | MichalYoung/eld-sentence-scramble | 3af1573daf2803e9ab9dbe03c58d0f42115f3bc3 | [
"MIT"
] | null | null | null | """
A simple game for English language development students in
primary school. English sentences are presented with a
scrambled word order. Students click each word to put it in
correct English order (e.g., adjectives come before nouns).
"""
import config
import flask
from flask import request
from flask import session
from flask import jsonify
from flask import g
import random
import logging
from typing import List
###
# Globals
###
app = flask.Flask(__name__)
# Some resources are located relative to the
# script directory (i.e., to this file)
import os
scriptdir = os.path.dirname(__file__)
import uuid
app.secret_key = config.get("app_key")
app.debug = config.get("debug")
if app.debug:
app.logger.setLevel(logging.DEBUG)
else:
app.logger.setLevel(logging.INFO)
##############
# URL routing
###############
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Entering index")
level = session.get("level")
if not level:
app.logger.debug(f"session.get('level') returned {level}")
app.logger.debug("Level not set, redirecting to level chooser")
return flask.redirect(flask.url_for("choose_level"))
app.logger.debug(f"Level was set to '{level}'")
sentence = load_sentence(level).split()
app.logger.debug(f"Selected sentence: {sentence}")
g.sentence = "".join(sentence)
g.scrambled = scramble(sentence)
app.logger.debug(f"Rendering scramble as {g.scrambled}")
return flask.render_template("scramble.html")
@app.route("/choose")
def choose_level():
return flask.render_template("choose_level.html")
@app.route("/_choose")
def _choose():
"""Pick a level (== a list of sentences to scramble)"""
app.logger.debug("Entering '_choose'")
try:
level=request.args.get("level")
session["level"] = level
app.logger.debug(f"Redirecting to index, level={level}")
return flask.redirect(flask.url_for("index"))
except:
return flask.redirect(flask.url_for("choose_level"))
#############
# Used by request handlers
#############
def scramble(sentence: List[str]) -> List[str]:
scrambled = sentence.copy()
while (len(scrambled) > 1 and scrambled == sentence):
random.shuffle(scrambled)
return scrambled
def load_sentence(level: str) -> str:
"""Selects a random sentence from the levels
file.
Note we read the whole file on each
interaction, in preference to making the session
object large. If this becomes a performance issue,
we can interpose a database and read a single sentence
from it. It is unlikely to be a problem for level files
under several kilobytes.
"""
data = open(f"{scriptdir}/static/data/{level}.txt").readlines()
limit = 1000
attempts = 1
sentence = random.choice(data)
while sentence.startswith("#"):
attempts += 1
assert attempts < limit, "Did not find non-comment line in level file"
sentence = random.choice(data)
return sentence
#############
# Startup
#############
if __name__ == "__main__":
import uuid
app.secret_key = str(uuid.uuid4())
app.debug = config.get("debug")
app.logger.setLevel(logging.DEBUG)
app.run(port=config.PORT)
| 26.185484 | 78 | 0.669849 | 0 | 0 | 0 | 0 | 1,183 | 0.364336 | 0 | 0 | 1,389 | 0.427779 |
46c24f9e543823c6b0cda1b66bf196b243046c21 | 3,717 | py | Python | tests/test_model.py | RandalJBarnes/OnekaPy | ba082f76f3f7f4394a11864623981cc876e1b253 | [
"MIT"
] | null | null | null | tests/test_model.py | RandalJBarnes/OnekaPy | ba082f76f3f7f4394a11864623981cc876e1b253 | [
"MIT"
] | null | null | null | tests/test_model.py | RandalJBarnes/OnekaPy | ba082f76f3f7f4394a11864623981cc876e1b253 | [
"MIT"
] | null | null | null | """
Test the Model class.
Notes
-----
o The specific test values were computed using the MatLab code
from the "Object Based Analytic Elements" project.
Author
------
Dr. Randal J. Barnes
Department of Civil, Environmental, and Geo- Engineering
University of Minnesota
Version
-------
09 May 2020
"""
import numpy as np
import pytest
from oneka.model import Model
@pytest.fixture
def my_model():
base = 500.0
conductivity = 1.0
porosity = 0.25
thickness = 100.0
xo, yo = (0.0, 0.0)
coef = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 500.0])
wells = [
(100.0, 200.0, 1.0, 1000.0),
(200.0, 100.0, 1.0, 1000.0)
]
mo = Model(base, conductivity, porosity, thickness, wells, xo, yo, coef)
return mo
def test_model_compute_potential(my_model):
x, y = [100.0, 100.0]
Phi = my_model.compute_potential(x, y)
assert np.isclose(Phi, 32165.8711977589, rtol=1.0e-6)
def test_model_compute_head(my_model):
x, y = [100.0, 100.0]
head = my_model.compute_head(x, y)
assert np.isclose(head, 371.658711977589, rtol=1.0e-6)
def test_model_compute_discharge(my_model):
x, y = [120.0, 160.0]
Qx, Qy = my_model.compute_discharge(x, y)
Qx_true, Qy_true = [-401.318309886184, -438.771830796713]
assert np.isclose(Qx, Qx_true, rtol=1.0e-6)
assert np.isclose(Qy, Qy_true, rtol=1.0e-6)
def test_model_compute_velocity(my_model):
x, y = [100.0, 100.0]
Vx, Vy = my_model.compute_velocity(x, y)
Vx_true, Vy_true = [-11.976338022763, -11.976338022763]
assert np.isclose(Vx, Vx_true, rtol=1.0e-6)
assert np.isclose(Vy, Vy_true, rtol=1.0e-6)
x, y = [120.0, 160.0]
Vx, Vy = my_model.compute_velocity(x, y)
Vx_true, Vy_true = [-16.052732395447, -17.550873231869]
assert np.isclose(Vx, Vx_true, rtol=1.0e-6)
assert np.isclose(Vy, Vy_true, rtol=1.0e-6)
def test_model_compute_fit(my_model):
ev_true = np.array([[0.9916], [0.9956], [0.9422], [171.85], [165.8], [9667.8]])
x_true, y_true = [58.52, 52.76]
cov_true = np.array([
[3.195e-05, -2.031e-06, -2.437e-06, 2.176e-04, -3.117e-05, -1.340e-02],
[-2.031e-06, 1.917e-05, -3.666e-06, 1.282e-04, 1.297e-04, -1.121e-02],
[-2.437e-06, -3.666e-06, 1.294e-05, -2.745e-05, 5.914e-05, 3.349e-03],
[2.176e-04, 1.282e-04, -2.745e-05, 1.083e-02, 6.399e-04, -1.719e-01],
[-3.117e-05, 1.297e-04, 5.914e-05, 6.399e-04, 7.370e-03, -6.047e-02],
[-1.340e-02, -1.121e-02, 3.349e-03, -1.719e-01, -6.047e-02, 1.715e+01]])
obs = list([
[23.00, 11.00, 573.64, 0.10],
[24.00, 85.00, 668.55, 0.10],
[26.00, 80.00, 661.58, 0.10],
[28.00, 65.00, 637.97, 0.10],
[37.00, 50.00, 626.62, 0.10],
[41.00, 21.00, 598.85, 0.10],
[42.00, 53.00, 637.51, 0.10],
[42.00, 74.00, 673.32, 0.10],
[45.00, 70.00, 670.52, 0.10],
[46.00, 15.00, 599.43, 0.10],
[52.00, 76.00, 694.14, 0.10],
[58.00, 90.00, 736.75, 0.10],
[64.00, 22.00, 629.54, 0.10],
[71.00, 19.00, 637.34, 0.10],
[72.00, 36.00, 660.54, 0.10],
[72.00, 55.00, 691.45, 0.10],
[74.00, 50.00, 686.57, 0.10],
[75.00, 18.00, 642.92, 0.10],
[76.00, 43.00, 678.80, 0.10],
[77.00, 79.00, 752.05, 0.10],
[79.00, 66.00, 727.81, 0.10],
[81.00, 81.00, 766.23, 0.10],
[82.00, 77.00, 759.15, 0.10],
[86.00, 26.00, 673.24, 0.10],
[90.00, 57.00, 734.72, 0.10]])
coef_ev, coef_cov = my_model.fit_regional_flow(obs, x_true, y_true)
assert np.allclose(coef_ev, ev_true, rtol=0.001)
assert np.allclose(coef_cov, cov_true, rtol=0.001)
| 30.719008 | 83 | 0.571429 | 0 | 0 | 0 | 0 | 382 | 0.102771 | 0 | 0 | 324 | 0.087167 |
46c2b16869fd2a8293075005691ad0df8c253672 | 830 | py | Python | Company/thoughtworks/FizzBuzzWhizz/solution-python/FizzBuzzWhizz.py | OctopusLian/leetcode-solutions | 40920d11c584504e805d103cdc6ef3f3774172b3 | [
"MIT"
] | 1 | 2020-12-01T18:35:24.000Z | 2020-12-01T18:35:24.000Z | Company/thoughtworks/FizzBuzzWhizz/solution-python/FizzBuzzWhizz.py | OctopusLian/leetcode-solutions | 40920d11c584504e805d103cdc6ef3f3774172b3 | [
"MIT"
] | 18 | 2020-11-10T05:48:29.000Z | 2020-11-26T08:39:20.000Z | Company/thoughtworks/FizzBuzzWhizz/solution-python/FizzBuzzWhizz.py | OctopusLian/leetcode-solutions | 40920d11c584504e805d103cdc6ef3f3774172b3 | [
"MIT"
] | 5 | 2020-11-09T07:43:00.000Z | 2021-12-02T14:59:37.000Z | # This is python2 version.
def FizzBuzzWhizz(args):
"""args[0] = Fizz, Buzz, Whizz
args[1]= 3, 5, 7"""
def FBW(Number):
return Number%args[1] and Number or args[0]
return FBW
def sayWhat(l_sayWhat,Number):
return l_sayWhat.count(Number)<3 and "".join([s for s in l_sayWhat if type(s) is str]) or Number
def zmap(func,seq):
mapped_seq = []
for eachItem in func:
mapped_seq.append(eachItem(seq))
return mapped_seq
def even_filter(nums, rule):
for num in range(1,nums):
yield sayWhat(zmap(map(FizzBuzzWhizz, rule), num),num)
rule = [("Fizz",3),("Buzz", 5),("Whizz",7)]
count = 101
for even in even_filter(count,rule):
print even
fiz = lambda a,b,c,d:['Fizz'*(x%a==0)+'Buzz'*(x%b==0)+'Whizz'*(x%c==0) or x for x in range(1,d)]
print fiz(3,5,7,101) | 25.9375 | 100 | 0.616867 | 0 | 0 | 121 | 0.145783 | 0 | 0 | 0 | 0 | 124 | 0.149398 |
46c39db6dd0b69722eb312b2a6c9c225e95716f4 | 3,022 | py | Python | figures/perception/randomwalk.py | patricknaughton01/RoboticSystemsBook | 0fc67cbccee0832b5f9b00d848c55697fa69bedf | [
"Apache-2.0"
] | 116 | 2018-08-27T15:32:59.000Z | 2022-02-28T10:41:37.000Z | figures/perception/randomwalk.py | patricknaughton01/RoboticSystemsBook | 0fc67cbccee0832b5f9b00d848c55697fa69bedf | [
"Apache-2.0"
] | 2 | 2021-05-04T12:56:40.000Z | 2022-02-18T23:13:33.000Z | figures/perception/randomwalk.py | patricknaughton01/RoboticSystemsBook | 0fc67cbccee0832b5f9b00d848c55697fa69bedf | [
"Apache-2.0"
] | 29 | 2019-06-20T20:13:36.000Z | 2022-02-20T14:01:34.000Z | import matplotlib.pyplot as plt
import numpy as np
from kalman import *
def kf_trace(F,g,P,H,j,Q,Xmean,Xvar,Z):
if not isinstance(F,np.ndarray): F = np.array([[F]])
if not isinstance(g,np.ndarray): g = np.array([g])
if not isinstance(P,np.ndarray): P = np.array([[P]])
if H is not None:
if not isinstance(H,np.ndarray): H = np.array([[H]])
if not isinstance(j,np.ndarray): j = np.array([j])
if not isinstance(Q,np.ndarray): Q = np.array([[Q]])
if not isinstance(Xmean,np.ndarray): Xmean = np.array([Xmean])
if not isinstance(Xvar,np.ndarray): Xvar = np.array([[Xvar]])
cur_mean,cur_cov = Xmean,Xvar
res_mean = [cur_mean]
res_cov = [cur_cov]
for z in Z:
if not isinstance(z,np.ndarray): z = np.array([z])
cur_mean,cur_cov = kalman_filter_predict(cur_mean,cur_cov,F,g,P)
if H is not None:
cur_mean,cur_cov = kalman_filter_update(cur_mean,cur_cov,F,g,P,H,j,Q,z)
res_mean.append(cur_mean)
res_cov.append(cur_cov)
return res_mean,res_cov
T = 100
N = 20
dt = 0.1
motion_noise_magnitude = 1.0
noise_magnitude = 0.3
fig1 = plt.figure(figsize=(10,4))
ax1 = fig1.add_subplot(1, 2, 1)
ax1.set_xlabel("Time")
ax1.set_ylabel("State")
ax1.set_ylim(-3,3)
ax1.set_xlim(0,10)
x = np.array(range(T))*dt
for i in xrange(N):
eps = np.random.normal(size=T)*motion_noise_magnitude
y = np.cumsum(eps*dt)
ax1.plot(x,y)
y,yvar = kf_trace(F=1,g=0,P=motion_noise_magnitude*dt**2,H=None,j=None,Q=noise_magnitude**2,Xmean=0,Xvar=0,Z=eps)
y = np.array([yi[0] for yi in y])
yvar = np.array([yi[0,0] for yi in yvar])
kf_pred, = ax1.plot(x,y[:-1],label="KF prediction")
ax1.plot(x,y[:-1]+2.0*np.sqrt(yvar)[:-1],label="KF prediction + 2*std",lw=0.5,color='k',linestyle='--')
ax1.plot(x,y[:-1]-2.0*np.sqrt(yvar)[:-1],label="KF prediction + 2*std",lw=0.5,color='k',linestyle='--')
ax1.legend(handles=[kf_pred])
ax2 = fig1.add_subplot(1, 2, 2)
ax2.set_xlabel("Time")
ax2.set_ylabel("State")
ax2.set_ylim(-3,3)
ax2.set_xlim(0,10)
#eps_truth = np.random.normal(size=T)
#y_truth = np.cumsum(eps*dt)
y_truth = np.sin(np.array(range(T))*dt*0.5)*1.0
x = np.array(range(T))*dt
z = y_truth + np.random.normal(size=T)*noise_magnitude
y,yvar = kf_trace(F=1,g=0,P=motion_noise_magnitude*dt**2,H=1,j=0,Q=noise_magnitude**2,Xmean=0,Xvar=0,Z=z)
y = np.array([yi[0] for yi in y])
yvar = np.array([yi[0,0] for yi in yvar])
Zmse = np.sqrt(np.sum((z-y_truth)**2))
KFmse = np.sqrt(np.sum((y[:-1]-y_truth)**2))
print "Z MSE",Zmse
print "KF MSE",KFmse
print "Reduction (%)",(Zmse-KFmse)/Zmse*100
ground_truth, = ax2.plot(x,y_truth,label="Ground truth",color='k')
obs = ax2.scatter(x,z,label="Observations",color='gray',s=9)
kf_estimate, = ax2.plot(x,y[:-1],label="KF estimate")
ax2.plot(x,y[:-1]+2.0*np.sqrt(yvar)[:-1],label="KF estimate + 2*std",lw=0.5,color='k',linestyle='--')
ax2.plot(x,y[:-1]-2.0*np.sqrt(yvar)[:-1],label="KF estimate + 2*std",lw=0.5,color='k',linestyle='--')
ax2.legend(handles=[ground_truth,obs,kf_estimate])
plt.show()
| 39.246753 | 113 | 0.657512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.099934 |
46c3d72b7e02560f09287d4b7a34b0b06179e42e | 6,687 | py | Python | data/MuCo/MuCo.py | GUO-W/PI-Net | 0c93a05d3aa277a80101f69ad196e5d6c8edba76 | [
"MIT"
] | 2 | 2021-05-21T14:07:08.000Z | 2022-01-13T07:39:00.000Z | data/MuCo/MuCo.py | GUO-W/PI-Net | 0c93a05d3aa277a80101f69ad196e5d6c8edba76 | [
"MIT"
] | null | null | null | data/MuCo/MuCo.py | GUO-W/PI-Net | 0c93a05d3aa277a80101f69ad196e5d6c8edba76 | [
"MIT"
] | null | null | null | ##
## Software PI-Net: Pose Interacting Network for Multi-Person Monocular 3D Pose Estimation
## Copyright Inria and UPC
## Year 2021
## Contact : wen.guo@inria.fr
##
## The software PI-Net is provided under MIT License.
##
#used in train for skeleton input
import os
import os.path as osp
import numpy as np
import math
from utils.pose_utils import get_bbox
from pycocotools.coco import COCO
from config import cfg
import json
from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel
from utils.vis import vis_keypoints, vis_3d_skeleton
import cv2 as cv
def larger_bbox(bbox):
w = bbox[2]
h = bbox[3]
c_x = bbox[0] + w/2.
c_y = bbox[1] + h/2.
aspect_ratio = cfg.input_shape[1]/cfg.input_shape[0]
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
bbox[2] = w*1.25
bbox[3] = h*1.25
bbox[0] = c_x - bbox[2]/2.
bbox[1] = c_y - bbox[3]/2.
return bbox
class MuCo:
def __init__(self, data_split, is_val):
self.data_split = data_split
self.img_dir = osp.join(cfg.data_dir, 'MuCo', 'data')
self.train_annot_path = cfg.train_annot_path
self.val_annot_path = cfg.val_annot_path
self.joint_num = 21
self.joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
self.flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) )
self.skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
self.joints_have_depth = True
self.root_idx = self.joints_name.index('Pelvis')
self.is_val = is_val
self.pair_index_path = cfg.pair_index_path_muco
self.data = self.load_data()
def load_data(self):
if self.data_split == 'train':
db = COCO(self.train_annot_path)
data = []
id2pairId = json.load(open(self.pair_index_path,'r'))
n = 0
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy = img['f']
cx, cy = img['c']
f = np.array([fx, fy]); c = np.array([cx, cy]);
joint_cam = np.array(ann['keypoints_cam'])
joint_cam_posenet = np.array(ann['keypoints_cam_posenet'])
root_cam = joint_cam[self.root_idx]
joint_img = np.array(ann['keypoints_img'])
joint_img = np.concatenate([joint_img, joint_cam[:,2:]],1)
joint_img[:,2] = joint_img[:,2] - root_cam[2]
joint_vis = np.ones((self.joint_num,1))
bbox_id = ann['id']
orig_bbox = ann['bbox']
bbox = np.array(ann['bbox'])
img_width, img_height = img['width'], img['height']
x, y, w, h = bbox
center = [x+w/2, y+h/2]
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((img_width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((img_height - 1, y1 + np.max((0, h - 1))))
if w*h > 0 and x2 >= x1 and y2 >= y1:
bbox = np.array([x1, y1, x2-x1, y2-y1])
else:
print("sanitize bboxes:",image_id)
continue
bbox = larger_bbox(bbox)
n_copain = id2pairId[str(bbox_id)] - bbox_id + n # n_copain - n = id_copain - id
id_list = db.getAnnIds(image_id) # ids of instances in same img
dis2id = {}
n_list = []
for cand_id in id_list:
bbox_cand = db.loadAnns(cand_id)[0]['bbox']
center_cand = [bbox_cand[0] + bbox_cand[2]/2, bbox_cand[1] + bbox_cand[3]/2]
dis = math.sqrt((center[0] - center_cand[0])**2 + (center[1] - center_cand[1])**2)
dis2id[dis] = cand_id
id_list_sorted = [dis2id[k] for k in sorted(dis2id.keys())]
for cand_id in id_list_sorted:
n_list.append(cand_id - bbox_id + n)
data.append({
'img_id': image_id,
'img_path': img_path,
'id': bbox_id,
'n_copain': n_copain,
'n_list': n_list,
'orig_bbox': orig_bbox,
'bbox': bbox,
'joint_img': joint_img, # [org_img_x, org_img_y, depth - root_depth]
'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate
'joint_vis': joint_vis,
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
'joint_cam_posenet': joint_cam_posenet, # result from posenet_nonefine
#'noise': noise,
})
n = n + 1
return data
def evaluate(self, preds, result_dir):
# test for img output, use in test.py
# add posenet 3d cam result to gt file as 'MuPoTS-3D_with_posenet_result.json', add key 'keypoints_cam_posenet'
gts = self.load_data()#self.data
sample_num = len(preds)
joint_num = self.joint_num
pred_2d_per_bbox = {}
pred_2d_save = {}
pred_3d_save = {}
gt_dict_orig = json.load(open('data/MuCo/data/annotations/MuCo-3DHP.json','r'))
gt_dict = gt_dict_orig
for n in range(sample_num):
gt = gts[n]
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
bbox_id = gt['id']
f = gt['f']
c = gt['c']
pred_2d_kpt = preds[n].copy()
pred_2d_kpt = warp_coord_to_original(pred_2d_kpt, bbox, gt_3d_root)
if str(n) in pred_2d_per_bbox:
pred_2d_per_bbox[str(n)].append(pred_2d_kpt)
else:
pred_2d_per_bbox[str(n)] = [pred_2d_kpt]
pred_2d_kpt = pred_2d_per_bbox[str(n)].copy()
pred_2d_kpt = np.mean(np.array(pred_2d_kpt), axis=0)
pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
### add posenet 3d cam result to gt file as 'MuCo_with_posenet_result.json', add key 'keypoints_cam_posenet'
gt_dict['annotations'][int(bbox_id)]['keypoints_cam_posenet'] = pred_3d_kpt.tolist()
with open('data/MuCo/MuCo_with_posenet_result.json','w') as w:
json.dump(gt_dict, w)
| 36.741758 | 246 | 0.549125 | 5,685 | 0.850157 | 0 | 0 | 0 | 0 | 0 | 0 | 1,349 | 0.201735 |
46c4c877c664ccafd3ca1715fdec77ace89a64db | 174 | py | Python | search/urls.py | Tariqalrehily/iShop | 0454106c1ff5b508b734b95f3f6cfe1008d630b5 | [
"PostgreSQL"
] | null | null | null | search/urls.py | Tariqalrehily/iShop | 0454106c1ff5b508b734b95f3f6cfe1008d630b5 | [
"PostgreSQL"
] | 5 | 2020-06-06T00:28:48.000Z | 2022-01-13T02:04:54.000Z | search/urls.py | Tariqalrehily/iShop | 0454106c1ff5b508b734b95f3f6cfe1008d630b5 | [
"PostgreSQL"
] | 1 | 2020-02-18T19:33:21.000Z | 2020-02-18T19:33:21.000Z | from django.conf.urls import url
from .views import do_search, filter
urlpatterns = [
url(r'^$', do_search, name='search'),
url(r'^filter/', filter, name='filter')
] | 24.857143 | 43 | 0.672414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.183908 |
46c4d484fef50c8a875c458a4b1e3f90c5962e12 | 693 | py | Python | examples/main.py | qenu/slash_util | a251e594917a8b1a40f28506fb812f44824aeeb9 | [
"MIT"
] | 30 | 2022-01-27T01:43:00.000Z | 2022-03-18T21:41:10.000Z | examples/main.py | qenu/slash_util | a251e594917a8b1a40f28506fb812f44824aeeb9 | [
"MIT"
] | 16 | 2022-01-27T03:17:20.000Z | 2022-02-21T22:14:23.000Z | examples/main.py | qenu/slash_util | a251e594917a8b1a40f28506fb812f44824aeeb9 | [
"MIT"
] | 18 | 2022-01-27T01:42:49.000Z | 2022-03-06T21:55:29.000Z | import discord
import slash_util
class SampleCog(slash_util.Cog):
@slash_util.slash_command(guild_id=123)
async def pog(self, ctx: slash_util.Context):
await ctx.send("pog", ephemeral=True)
@slash_util.message_command(guild_id=123)
async def quote(self, ctx: slash_util.Context, message: discord.Message): # the `message` parameter is REQURIED for message commands
await ctx.send(f"> {message.clean_content}\n- {message.author}")
@slash_util.user_command(guild_id=123)
async def bonk(self, ctx: slash_util.Context, user: discord.Member):
await ctx.send(f"{ctx.author} bonks {user} :hammer:")
def setup(bot):
bot.add_cog(SampleCog(bot))
| 36.473684 | 137 | 0.717172 | 609 | 0.878788 | 0 | 0 | 560 | 0.808081 | 427 | 0.616162 | 148 | 0.213564 |
46c64df06c02cc7d7adc3ca436f95b853a81c0c7 | 1,047 | py | Python | elkatip/elkatip.py | kompasim/elkatip | d20d557518dcf3ba5d41f799986301c4054fa658 | [
"MIT"
] | 8 | 2020-06-06T11:05:30.000Z | 2022-03-25T21:14:14.000Z | elkatip/elkatip.py | Dev-arxidin/elkatip | d20d557518dcf3ba5d41f799986301c4054fa658 | [
"MIT"
] | null | null | null | elkatip/elkatip.py | Dev-arxidin/elkatip | d20d557518dcf3ba5d41f799986301c4054fa658 | [
"MIT"
] | 3 | 2020-07-28T18:32:51.000Z | 2021-04-06T06:09:22.000Z | # encoding=utf-8
# Elkatip
import os
import imp
# main class
class Elkatip():
api = None
gui = None
def __init__(self):
self.modulePath = os.path.dirname(__file__)
pass
def toExt(self, text):
if not self.api:
api = imp.load_source("api", self.modulePath + "/api.py")
self.api = api.Api()
return self.api.toExt(text)
def toBase(self, text):
if not self.api:
api = imp.load_source("api", self.modulePath + "/api.py")
self.api = api.Api()
return self.api.toBase(text)
def showGui(self):
if not self.gui:
gui = imp.load_source("gui", self.modulePath + "/gui.py")
self.gui = gui.Gui()
self.gui.showGui()
if __name__ == "__main__":
ktp = Elkatip()
ktp.showGui()
# uighurche = "ئالىمجان" # base
# print(uighurche)
# uyghurqa = ktp.toExt(uighurche) # ext
# uighurche = ktp.toBase(uyghurqa) # base
# print(uyghurqa)
# print(uighurche)
| 23.266667 | 69 | 0.554919 | 722 | 0.68436 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.247393 |
46c69834d12a671ab5422edcba87960ceaab8bc1 | 7,704 | py | Python | src/model/hurdle_regression.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | 1 | 2022-02-16T01:24:17.000Z | 2022-02-16T01:24:17.000Z | src/model/hurdle_regression.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | null | null | null | src/model/hurdle_regression.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from sklearn.exceptions import NotFittedError
from sklearn.neighbors import KernelDensity
from sklearn.linear_model import LinearRegression, LogisticRegression
import pickle
import os
import matplotlib.pylab as plt
from sklearn.externals import joblib
import numpy as np
from sklearn.model_selection import GridSearchCV
import seaborn as sbn
import logging
from .absmodel import Module
logger_format = "%(levelname)s [%(asctime)s]: %(message)s"
logging.basicConfig(filename="logfile.log",
level=logging.DEBUG, format=logger_format,
filemode='w') # use filemode='a' for APPEND
logger = logging.getLogger(__name__)
def grid_fit_kde(residual):
"""
Grid search for best bandwidth of KDE
Args:
residual: residual value.
Returns:
"""
grid = GridSearchCV(KernelDensity(), {'bandwidth':np.linspace(0.1,1.0,20)}, cv=20)
grid.fit(residual)
return grid.best_params_
class MixLinearModel(Module):
"""
Mixture of linear src.
Train logistic regression for 0/1 prediction. And fit weighted linear regression,
with weight from output of the logistic regression.
Fit mixture of linear-src for rainy and non-rainy events.
"""
def __init__(self, linear_reg=LinearRegression(), log_reg=LogisticRegression(),
kde=KernelDensity(kernel="gaussian"), eps=0.0001, offset = -.05):
super(MixLinearModel, self).__init__()
self.linear_reg = linear_reg
self.eps = eps
self.log_reg = log_reg
self.kde = kde
self.fitted = False
self.residual = False
self.offset= offset
@staticmethod
def residual_plot(observed, true_value, fitted):
plt.scatter(true_value, np.log(observed))
plt.plot(true_value, fitted, '-r')
plt.xlabel('Log (predictor + eps)')
plt.ylabel('Log (response + eps)')
plt.show()
@staticmethod
def residual_density_plot(residual):
plt.subplot(211)
sbn.distplot(residual,hist=True )
plt.subplot(212)
sbn.kdeplot(residual)
@staticmethod
def grid_fit_kde(residual):
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(KernelDensity(), {'bandwidth':np.linspace(0.1,1.0,20)}, cv=20)
grid.fit(residual)
return grid.best_params_
def _fit(self, x, y, verbose=False, load=False):
"""
Args:
y: Nx1 ndarray observed value.
x: NxD ndarry features.
Returns:
"""
x, y = Module.validate(x, y)
l_x, l_y = np.log(x + self.eps), np.log(y + self.eps)
y_zero_one = (y > 0.0).astype(int)
if y_zero_one.max() == y_zero_one.min():
raise NotFittedError("Logistic model couldn't fit, because the number of classes is <2")
self.log_reg.fit(x, y_zero_one)
sample_weight = self.log_reg.predict_proba(x)[:, 1]
# Linear regression under log mode.
self.linear_reg.fit(X=l_x, y=l_y, sample_weight=sample_weight)
self.fitted = self.linear_reg.predict(l_x)
self.residual = (self.fitted - l_y)
# Grid fit for bandwidth.
if load is False:
param = grid_fit_kde(self.residual)
self.kde = KernelDensity(bandwidth=param["bandwidth"])
self.kde.fit(self.residual)
else:
self.kde = pickle.load(open("all_kde.kd","rb"))
self.fitted = True
#logger.debug("KDE bandwidth %s"%self.kde.bandwidth)
return self
def predict(self, x, y, label=None):
"""
Predict log-likelihood of given observation under the trained src.
Args:
y: ndarray Ground truth observation.
x: ndarray matrix Features.
label: None,
Returns:
"""
x , y = Module.validate(x, y)
if self.fitted is False:
raise NotFittedError("Call fit before prediction")
log_pred = self.log_reg.predict_proba(x)[:, 1]
linear_pred = self.linear_reg.predict(np.log(x + self.eps))
return self.mixl(y, log_pred, linear_pred)
def decision_function(self, score):
"""
Return decision based on the anomaly score.
Args:
x:
y:
label:
Returns:
"""
return score - self.offset
def mixl(self, y, logreg_prediction, linear_predictions):
"""
- if RAIN = 0, $ -log (1-p_1)$
- if RAIN > 0, $ -log [p_1 \frac{P(log(RAIN + \epsilon)}{(RAIN + \epsilon)}]$
Args:
y: (np.array) observations.
logreg_prediction:(np.array) fitted values from logistic regression (0/1 src).
linear_predictions:(np.array) fitted values from linear regression on log scale.
"""
# Reshape the data
p = logreg_prediction.reshape([-1, 1])
observations = y.reshape([-1, 1])
predictions = linear_predictions.reshape([-1, 1])
zero_rain = np.multiply((1 - p), (observations == 0))
# density of residual and convert to non-log value.
residual = predictions - np.log(observations + self.eps)
residual_density = np.exp(self.kde.score_samples(residual)).reshape(-1,1)
non_zero_rain = np.divide(np.multiply(p, residual_density),
(observations + self.eps))
result = zero_rain + non_zero_rain
return -np.log(result + np.max(result))
def to_json(self):
if not self.fitted:
raise NotFittedError("Fit method should be called before save operation.")
model_config = {
"kde_model": self.kde,
"logistic_model": self.log_reg,
"linear_model": self.linear_reg
}
return model_config
@classmethod
def from_json(cls, model_config):
mlm = MixLinearModel(linear_reg=model_config['linear_model'], log_reg=model_config['logistic_model'],
kde=model_config['kde_model'])
mlm.fitted = True
return mlm
def save(self, model_id="001", model_path="rainqc_model"):
"""
save the reg src.
Returns:
"""
# model_config = {"model_id":model_id,
# "kde":self.kde,
# "logistic_reg":self.log_reg,
# "linear_regression":self.linear_reg}
# localdatasource.dump(model_config,open(model_id+".localdatasource","wb"))
current_model = os.path.join(model_path, model_id)
if not os.path.exists(current_model):
os.makedirs(current_model)
joblib.dump(self.kde, os.path.join(current_model, "kde_model.pk"))
joblib.dump(self.linear_reg, os.path.join(current_model, "linear_model.pk"))
joblib.dump(self.log_reg, os.path.join(current_model, "logistic_model.pk"))
@classmethod
def load(cls, model_id="001", model_path="rainqc_model"):
loaded_model = os.path.join(model_path, model_id)
# model_config = localdatasource.load(open(model_id+".localdatasource","rb"))
if not os.path.exists(loaded_model):
return ValueError("Directory for saved models don't exist")
reg_model = joblib.load(os.path.join(loaded_model, "linear_model.pk"))
kde = joblib.load(os.path.join(loaded_model, "kde_model.pk"))
log_reg = joblib.load(os.path.join(loaded_model, "logistic_model.pk")) # pickle.load(model_config['zerone'])
mxll = MixLinearModel(linear_reg=reg_model, log_reg=log_reg, kde=kde)
mxll.fitted = True
return mxll
| 33.938326 | 117 | 0.616693 | 6,718 | 0.872015 | 0 | 0 | 1,689 | 0.219237 | 0 | 0 | 2,459 | 0.319185 |
46c6c52458aed5ead8d1c69894d74a5069c08e0c | 998 | py | Python | mundo2/exercicio065.py | beatriznaimaite/Exercicios-Python-Curso-Em-Video | e4213c2054a67d7948aa9023f2f0f33ab7e8eb96 | [
"MIT"
] | null | null | null | mundo2/exercicio065.py | beatriznaimaite/Exercicios-Python-Curso-Em-Video | e4213c2054a67d7948aa9023f2f0f33ab7e8eb96 | [
"MIT"
] | null | null | null | mundo2/exercicio065.py | beatriznaimaite/Exercicios-Python-Curso-Em-Video | e4213c2054a67d7948aa9023f2f0f33ab7e8eb96 | [
"MIT"
] | null | null | null | """
Crie um programa que leia vários números inteiros pelo teclado. No final da execução, mostre a média entre
todos os valores e qual foi o maior e o menor valores lidos. O programa deve perguntar ao usuário se ele
quer ou não continuar a digitar valores.
"""
resposta = 'S'
cont = soma = maior = menor = media = 0
while resposta == 'S':
numero = int(input('Digite um número: '))
cont += 1
soma += numero
if cont == 1:
maior = menor = numero
else:
if numero > maior:
maior = numero
if numero < menor:
menor = numero
resposta = str(input('Quer continuar? [S/N]: ')).strip().upper()[0]
while resposta not in 'SN':
resposta = str(input('Quer continuar? [S/N]: ')).strip().upper()[0]
if resposta == 'N':
resposta = False
print('Finalizando...')
media = soma/cont
print(f'A média entre os valores lidos foi de {media:.2f}.')
print(f'O maior valor digitado foi {maior} e o menor {menor}.')
| 30.242424 | 106 | 0.614228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 477 | 0.473684 |
46c7b45fdc6d0c9df561c0e5dd726412c25bc59b | 1,329 | py | Python | tpdata/templeplus/lib/templeplus/pymod.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | tpdata/templeplus/lib/templeplus/pymod.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | tpdata/templeplus/lib/templeplus/pymod.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | import tpdp
class PythonModifier(tpdp.ModifierSpec):
def AddHook(self, eventType, eventKey, callbackFcn, argsTuple ):
self.add_hook(eventType, eventKey, callbackFcn, argsTuple)
def ExtendExisting(self, condName):
self.extend_existing(condName)
def AddItemForceRemoveHandler(self): # in charge of backing up condition args
self.add_item_force_remove_callback()
def MapToFeat(self, feat_enum, feat_list_max = -1, feat_cond_arg2 = 0):
self.add_to_feat_dict(feat_enum, feat_list_max, feat_cond_arg2)
# Spell related standard hooks
def AddSpellCountdownStandardHook(self):
# adds an ET_OnBeginRound handler that (normally) does:
# If countdown expired: (<0)
# 1. Float text "Spell Expired"
# 2. RemoveSpell() (has case-by-case handlers for Spell_End; Temple+ adds generic handling for wall spells here)
# 3. RemoveSpellMod()
# Else:
# Decrement count, update spell packet duration
self.add_spell_countdown_standard()
def AddAoESpellEndStandardHook(self):
# adds a EK_S_Spell_End handler that:
# 1. Ends particles for all spell objects
# 2. RemoveSpellMod()
self.add_aoe_spell_ender()
def AddSpellDismissStandardHook(self):
self.add_spell_dismiss_hook() | 47.464286 | 122 | 0.696012 | 1,316 | 0.990218 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.359669 |
46c8699b2b2f0da56da0744dcc8bdfa6005669d4 | 7,279 | py | Python | utils/Network/base_Network_module.py | mohammedayub44/ObjectDetection | 6d151e417ff9322b6be5722b40bc4a209282d13d | [
"BSD-3-Clause"
] | null | null | null | utils/Network/base_Network_module.py | mohammedayub44/ObjectDetection | 6d151e417ff9322b6be5722b40bc4a209282d13d | [
"BSD-3-Clause"
] | null | null | null | utils/Network/base_Network_module.py | mohammedayub44/ObjectDetection | 6d151e417ff9322b6be5722b40bc4a209282d13d | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
# --1.2.1
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(one_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
# --1.2.2
class double_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(double_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
# --1.2.3
class three_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(three_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class resconv2(nn.Module):
def __init__(self,in_ch,out_ch,ksize=3,kstride=1,kpad=1):
super(resconv2,self).__init__()
self.conv1 = nn.Conv2d(in_ch,out_ch,ksize,stride=kstride,padding=kpad)
self.conv2 = nn.Conv2d(out_ch,out_ch,ksize,stride=kstride,padding=kpad)
if in_ch != out_ch:
self.red = nn.Conv2d(in_ch,out_ch,(1,1),stride=1,padding=0)
else:
self.red = None
def forward(self,x):
rx = self.conv1(x)
rx = F.relu(rx)
rx= self.conv2(rx)
rx = F.relu(rx)
if self.red!=None:
x = self.red(x)+rx
else:
x = x + rx
return rx
class up_res(nn.Module):
def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False):
super(up_res, self).__init__()
self.if_convt = if_convt
if self.if_convt:
self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2)
else:
self.up = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=False)
self.conv1 = nn.Conv2d(up_in_ch,up_out_ch,(3,3))
self.conv2 = resconv2(cat_in_ch,cat_out_ch)
def forward(self, x1, x2):
if self.if_convt:
x1 = self.up(x1)
else:
x1 = self.up(x1)
x1 = self.conv1(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
#pad to make up for the loss when downsampling
x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)),
diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5
x = torch.cat([x2, x1], dim=1)
del x2,x1
x = self.conv2(x)
return x
# --1.3.1
class up(nn.Module):
def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False):
super(up, self).__init__()
self.if_convt = if_convt
if self.if_convt:
self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2)
else:
self.up = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=False)
self.conv1 = one_conv(up_in_ch,up_out_ch)
self.conv2 = double_conv(cat_in_ch, cat_out_ch)
def forward(self, x1, x2):
if self.if_convt:
x1 = self.up(x1)
else:
x1 = self.up(x1)
x1 = self.conv1(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
#pad to make up for the loss when downsampling
x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)),
diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5
x = torch.cat([x2, x1], dim=1)
del x2,x1
x = self.conv2(x)
return x
# --1.3.2
class upcat(nn.Module):
def __init__(self, up_in_ch, up_out_ch,if_convt=False):
super(upcat, self).__init__()
self.if_convt = if_convt
if self.if_convt:
self.up = nn.ConvTranspose2d(up_in_ch, up_out_ch, 2, stride=2)
else:
self.up = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=False)
self.conv1 = one_conv(up_in_ch,up_out_ch)
def forward(self, x1, x2):
if self.if_convt:
x1 = self.up(x1)
else:
x1 = self.up(x1)
x1 = self.conv1(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
#pad to make up for the loss when downsampling
x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)),
diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5
x = torch.cat([x2, x1], dim=1)
del x2,x1
return x
# --1.4
def change_padding(net,del_or_add='del',pad_size=(1,1)):
for m in net.modules():
if isinstance(m,nn.Conv2d):
m.padding = (0,0) if del_or_add =='del' else pad_size
return net
# --1.5 can only compute linear
def compute_rf(net):
rf_size,rf_pad,rf_stride = 1,0,1
for m in net.modules():
if isinstance(m,(nn.Conv2d,nn.MaxPool2d)):
tmp_kernel_size = m.kernel_size[0] if isinstance(m.kernel_size,(tuple,list)) else m.kernel_size
tmp_padding = m.padding[0] if isinstance(m.padding,(tuple,list)) else m.padding
tmp_stride = m.stride[0] if isinstance(m.stride,(tuple,list)) else m.stride
# rf_pad relates with the last layer's rf_stride
rf_pad += tmp_padding*rf_stride
# rf_size relates with the last layers's rf_stride
rf_size += (tmp_kernel_size-1)*rf_stride
rf_stride *= tmp_stride
return {'rf_size':rf_size,'rf_pad':rf_pad,'rf_stride':rf_stride} | 32.936652 | 108 | 0.517104 | 6,072 | 0.834181 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.085863 |
46c928585677c4ea370d9cde42b1e5b9a1eac936 | 45 | py | Python | CTFd/constants/themes.py | nox237/CTFd | ff6e093fa6bf23b526ecddf9271195b429240ff4 | [
"Apache-2.0"
] | 3,592 | 2017-03-12T19:44:07.000Z | 2022-03-30T16:03:33.000Z | CTFd/constants/themes.py | nox237/CTFd | ff6e093fa6bf23b526ecddf9271195b429240ff4 | [
"Apache-2.0"
] | 1,648 | 2017-03-12T23:44:34.000Z | 2022-03-31T15:28:38.000Z | CTFd/constants/themes.py | nox237/CTFd | ff6e093fa6bf23b526ecddf9271195b429240ff4 | [
"Apache-2.0"
] | 1,736 | 2017-03-13T14:01:28.000Z | 2022-03-31T08:14:24.000Z | ADMIN_THEME = "admin"
DEFAULT_THEME = "core"
| 15 | 22 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.288889 |
46c97fefdfff899068770ce996e87c74a6a8d373 | 6,865 | py | Python | mongodbhttpinterface/__main__.py | anrylu/mongodbhttpinterface | 23bf6eda558a92380e378450da18f3d6a233c266 | [
"MIT"
] | null | null | null | mongodbhttpinterface/__main__.py | anrylu/mongodbhttpinterface | 23bf6eda558a92380e378450da18f3d6a233c266 | [
"MIT"
] | null | null | null | mongodbhttpinterface/__main__.py | anrylu/mongodbhttpinterface | 23bf6eda558a92380e378450da18f3d6a233c266 | [
"MIT"
] | null | null | null | import json
from flask import Flask, request
from pymongo import MongoClient, ASCENDING, DESCENDING
from pymongo.errors import ConnectionFailure, ConfigurationError, OperationFailure, AutoReconnect
app = Flask(__name__)
mongo_connections = {}
@app.route('/_connect', methods=['POST'])
def connect():
name = request.form.get('name', 'default')
server = request.form.get('server', 'localhost:27017')
app.logger.info('connect to %s (%s)', server, name)
try:
client = MongoClient('mongodb://' + server)
mongo_connections[name] = client
return {
"ok": 1,
"server": server,
"name": name
}
except Exception:
return {
"ok": 0,
"errmsg": "could not connect",
"server": server,
"name": name}
@app.route('/<dbname>/_authenticate', methods=['POST'])
def authenticate(dbname):
name = request.form.get('name', 'default')
username = request.form.get('username')
password = request.form.get('password')
# get connection
client = mongo_connections.get(name)
if client is None:
return {
"ok": 0,
"errmsg": "couldn\'t get connection to mongo"
}
try:
app.logger.info(
'authenticate %s with %s (%s)',
dbname, username, password)
client[dbname].authenticate(
username,
password,
source='admin',
mechanism='SCRAM-SHA-1'
)
return {
"ok": 1
}
except Exception:
app.logger.exception("authenticate failed")
return {
"ok": 0,
"errmsg": "authentication failed"
}
@app.route('/<dbname>/_cmd', methods=['POST'])
def cmd(dbname):
pass
@app.route('/<dbname>/<collname>/_insert', methods=['POST'])
def insert(dbname, collname):
name = request.form.get('name', 'default')
# get connection
client = mongo_connections.get(name)
if client is None:
return {
"ok": 0,
"errmsg": "couldn\'t get connection to mongo"
}
# get docs to insert
docs = json.loads(request.form.get('docs'))
if not docs:
return {
"ok": 0,
"errmsg": "missing docs"
}
try:
app.logger.exception("insert %s", docs)
client[dbname][collname].insert(docs)
return {
"ok": 1
}
except Exception:
app.logger.exception("insert failed")
return {
"ok": 0,
"errmsg": "insert failed"
}
def __output_results(cursor):
"""
Iterate through the next batch
"""
results = []
try:
while True:
result = cursor.next()
result['_id'] = str(result['_id'])
results.append(result)
except AutoReconnect:
return {"ok": 0, "errmsg": "auto reconnecting, please try again"}
except OperationFailure as of:
return {"ok": 0, "errmsg": "%s" % of}
except StopIteration:
pass
return {
"results": results,
"ok" : 1
}
@app.route('/<dbname>/<collname>/_find', methods=['GET'])
def find(dbname, collname):
name = request.form.get('name', 'default')
# get connection
client = mongo_connections.get(name)
if client is None:
return {
"ok": 0,
"errmsg": "couldn\'t get connection to mongo"
}
# get criteria
criteria = request.args.get('criteria')
app.logger.info("find by criteria %s", criteria)
if criteria:
criteria = json.loads(criteria)
# get fields
fields = request.args.get('fields')
if fields:
fields = json.loads(fields)
# get limit
limit = int(request.args.get('limit', 0))
# get skip
skip = int(request.args.get('skip', 0))
# get sort
sort_to_use = []
sort = request.args.get('sort')
if sort:
sort = json.loads(sort)
for sort_key in sort:
if sort[sort_key] == -1:
sort_to_use.append([sort_key, DESCENDING])
else:
sort_to_use.append([sort_key, ASCENDING])
try:
app.logger.info("find by criteria %s", criteria)
cursor = client[dbname][collname].find(
criteria, fields, skip, limit, sort=sort_to_use)
setattr(cursor, "id", id)
return __output_results(cursor)
except Exception:
app.logger.exception("find failed")
return {
"ok": 0,
"errmsg": "find failed"
}
@app.route('/<dbname>/<collname>/_update', methods=['POST'])
def update(dbname, collname):
name = request.form.get('name', 'default')
# get connection
client = mongo_connections.get(name)
if client is None:
return {
"ok": 0,
"errmsg": "couldn\'t get connection to mongo"
}
# get criteria
criteria = request.form.get('criteria')
if criteria:
criteria = json.loads(criteria)
# get newobj
newobj = request.form.get('newobj')
if newobj:
newobj = json.loads(newobj)
if not newobj:
return {"ok": 0, "errmsg": "missing newobj"}
# get upsert
upsert = request.form.get('upsert', False)
if upsert:
upsert = bool(upsert)
# get multi
multi = request.form.get('multi', False)
if multi:
multi = bool(multi)
try:
app.logger.info("update by criteria %s", criteria)
if multi:
client[dbname][collname].update_many(
criteria,
newobj,
upsert)
else:
client[dbname][collname].update_one(
criteria,
newobj,
upsert)
return {
"ok": 1
}
except Exception:
app.logger.exception("update failed")
return {
"ok": 0,
"errmsg": "update failed"
}
@app.route('/<dbname>/<collname>/_remove', methods=['POST'])
def remove(dbname, collname):
name = request.form.get('name', 'default')
# get connection
client = mongo_connections.get(name)
if client is None:
return {
"ok": 0,
"errmsg": "couldn\'t get connection to mongo"
}
# get criteria
criteria = request.form.get('criteria')
if criteria:
criteria = json.loads(criteria)
try:
app.logger.info("remove by criteria %s", criteria)
client[dbname][collname].delete_many(criteria)
return {
"ok": 1
}
except Exception:
app.logger.exception("remove failed")
return {
"ok": 0,
"errmsg": "remove failed"
}
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=27080)
| 25.238971 | 97 | 0.543627 | 0 | 0 | 0 | 0 | 5,980 | 0.871085 | 0 | 0 | 1,578 | 0.229862 |
46c98777ff9d65877370b3771c5ded1b1ada78db | 4,147 | py | Python | doc/examples/transform/plot_ransac.py | smheidrich/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | 1 | 2019-02-17T23:16:44.000Z | 2019-02-17T23:16:44.000Z | doc/examples/transform/plot_ransac.py | smheidrich/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | null | null | null | doc/examples/transform/plot_ransac.py | smheidrich/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | 1 | 2021-12-27T15:17:32.000Z | 2021-12-27T15:17:32.000Z | """
=========================================
Robust line model estimation using RANSAC
=========================================
In this example we see how to robustly fit a line model to faulty data using
the RANSAC (random sample consensus) algorithm.
Firstly the data are generated by adding a gaussian noise to a linear function.
Then, the outlier points are added to the data set.
RANSAC iteratively estimates the parameters from the data set.
At each iteration the following steps are performed:
1. Select ``min_samples`` random samples from the original data and check
whether the set of data is valid (see ``is_data_valid`` option).
2. Estimate a model on the random subset
(``model_cls.estimate(*data[random_subset]``) and check whether the
estimated model is valid (see ``is_model_valid`` option).
3. Classify all the data points as either inliers or outliers by calculating
the residuals using the estimated model (``model_cls.residuals(*data)``) -
all data samples with residuals smaller than the ``residual_threshold``
are considered as inliers.
4. If the number of the inlier samples is greater than ever before,
save the estimated model as the best model. In case the current estimated
model has the same number of inliers, it is considered as the best model
only if the sum of residuals is lower.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all the
inlier samples of the previously determined best model.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
x = np.arange(-200, 200)
y = 0.2 * x + 20
data = np.column_stack([x, y])
# add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
# add faulty data
faulty = np.array(30 * [(180., -100)])
faulty += 10 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# fit line using all data
model = LineModelND()
model.estimate(data)
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)
fig, ax = plt.subplots()
ax.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
label='Inlier data')
ax.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
label='Outlier data')
ax.plot(line_x, line_y, '-k', label='Line model from all data')
ax.plot(line_x, line_y_robust, '-b', label='Robust line model')
ax.legend(loc='lower left')
plt.show()
######################################################################
# Now, we generalize this example to 3D points.
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
point = np.array([0, 0, 0], dtype='float')
direction = np.array([1, 1, 1], dtype='float') / np.sqrt(3)
xyz = point + 10 * np.arange(-100, 100)[..., np.newaxis] * direction
# add gaussian noise to coordinates
noise = np.random.normal(size=xyz.shape)
xyz += 0.5 * noise
xyz[::2] += 20 * noise[::2]
xyz[::4] += 100 * noise[::4]
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(xyz, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xyz[inliers][:, 0], xyz[inliers][:, 1], xyz[inliers][:, 2], c='b',
marker='o', label='Inlier data')
ax.scatter(xyz[outliers][:, 0], xyz[outliers][:, 1], xyz[outliers][:, 2], c='r',
marker='o', label='Outlier data')
ax.legend(loc='lower left')
plt.show()
| 35.444444 | 80 | 0.684591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,187 | 0.527369 |
46ca3bad392fc2171355c3c74fef7e69fc9669b7 | 116 | py | Python | 5 kyu/Memoized Fibonacci.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | 5 kyu/Memoized Fibonacci.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | 5 kyu/Memoized Fibonacci.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | def fibonacci(n, res=[0,1]):
if len(res)<=n:
res.append(fibonacci(n-1)+fibonacci(n-2))
return res[n] | 29 | 49 | 0.586207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
46cb93aa92bbe683bb38be532385096924e02464 | 915 | py | Python | crawler/crawler/spiders/all_591_cities.py | eala/tw-rental-house-data | 5f595e6bfac8cc85ddff0746b3ee6806e83dec3a | [
"MIT"
] | null | null | null | crawler/crawler/spiders/all_591_cities.py | eala/tw-rental-house-data | 5f595e6bfac8cc85ddff0746b3ee6806e83dec3a | [
"MIT"
] | null | null | null | crawler/crawler/spiders/all_591_cities.py | eala/tw-rental-house-data | 5f595e6bfac8cc85ddff0746b3ee6806e83dec3a | [
"MIT"
] | null | null | null | all_591_cities = [
{
"city": "台北市",
"id": "1"
},
{
"city": "新北市",
"id": "3"
},
{
"city": "桃園市",
"id": "6"
},
{
"city": "新竹市",
"id": "4"
},
{
"city": "新竹縣",
"id": "5"
},
{
"city": "基隆市",
"id": "2"
},
{
"city": "宜蘭縣",
"id": "21"
},
{
"city": "台中市",
"id": "8"
},
{
"city": "彰化縣",
"id": "10"
},
{
"city": "苗栗縣",
"id": "7"
},
{
"city": "雲林縣",
"id": "14"
},
{
"city": "南投縣",
"id": "11"
},
{
"city": "高雄市",
"id": "17"
},
{
"city": "台南市",
"id": "15"
},
{
"city": "嘉義市",
"id": "12"
},
{
"city": "屏東縣",
"id": "19"
},
{
"city": "嘉義縣",
"id": "13"
},
{
"city": "花蓮縣",
"id": "23"
},
{
"city": "台東縣",
"id": "22"
},
{
"city": "金門縣",
"id": "25"
},
{
"city": "澎湖縣",
"id": "24"
}
]
| 10.517241 | 18 | 0.259016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.496638 |
46cd5ffb65eea5ca8f885ef00d2ff3cf731b7ffc | 7,178 | py | Python | create_google_prior.py | AdrianNunez/zeroshot-action-recognition-action-priors | d336f2d7df990777aad9a95ff983cb23a2fafb48 | [
"MIT"
] | 3 | 2020-01-31T23:42:17.000Z | 2020-02-05T12:47:40.000Z | create_google_prior.py | AdrianNunez/zeroshot-action-recognition-action-priors | d336f2d7df990777aad9a95ff983cb23a2fafb48 | [
"MIT"
] | 18 | 2020-03-24T18:15:57.000Z | 2022-03-12T00:16:40.000Z | create_google_prior.py | AdrianNunez/zeroshot-action-recognition-action-priors | d336f2d7df990777aad9a95ff983cb23a2fafb48 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import os
import json
import logging
from googleapiclient.discovery import build
from tqdm import tqdm
from data import get_classes_ordered
logging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR)
variables_file = 'variables.json'
with open(variables_file) as f:
config = json.load(f)
# ============================================================
# VARIABLES TO MODIFY
# ============================================================
output_path = config['project_folder'] + 'google_prior/'
# ============================================================
# API keys --------------------------------------------
api_keys = [
# add API keys
]
# Google Custom Search --------------------------------
cse_ids = [
# add Google Custom Search IDs
]
# Function to perform a google search
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res
def transform_obj(obj):
tweakedObj = [obj]
if obj == 'bell_pepper':
tweakedObj = ['bell pepper', 'green pepper', 'red pepper']
elif obj == 'cup':
tweakedObj = ['cup', 'mug']
elif obj == 'pot':
tweakedObj = ['pot', 'saucepan', 'pan']
elif obj == 'pan':
tweakedObj = ['pan', 'frying pan']
elif obj == 'eating_utensil':
tweakedObj = ['eating utensil', 'knife', 'spoon', 'fork']
elif obj == 'cooking_utensil':
tweakedObj = ['cooking utensil', 'knife', 'scissors', 'peeler',
'scale', 'jug', 'colander', 'strainer', 'blender']
elif obj == 'fridge_drawer':
tweakedObj = ['fridge drawer', 'refrigerator drawer']
elif obj == 'cutting_board':
tweakedObj = ['cutting board', 'cut board', 'chopping board',
'chop board']
elif obj == 'cheese_container':
tweakedObj = ['cheese container', 'cheese recipient', 'cheese package']
elif obj == 'oil_container':
tweakedObj = ['oil container', 'oil recipient', 'oil bottle']
elif obj == 'bread_container':
tweakedObj = ['bread container', 'bread recipient', 'bread package',
'bread bag']
elif obj == 'grocery_bag':
tweakedObj = ['grocery bag', 'groceries']
elif obj == 'seasoning_container':
tweakedObj = ['seasoning container', 'seasoning recipient',
'seasoning bottle', 'seasoning package']
elif obj == 'condiment_container':
tweakedObj = ['condiment container', 'condiment recipient',
'condiment bottle']
elif obj == 'tomato_container':
tweakedObj = ['tomato container', 'tomato recipient', 'tomato bottle']
elif obj == 'fridge':
tweakedObj = ['fridge', 'refrigerator']
elif obj == 'paper_towel':
tweakedObj = ['paper towel', 'tissue', 'kitchen paper',
'kitchen towel']
elif obj == 'cabinet':
tweakedObj = ['cabinet', 'locker', 'cupboard']
return tweakedObj
def transform_verb(verb):
tweakedVerb = [verb]
if verb == 'divide/pull apart':
tweakedVerb = ['divide', 'pull apart', 'separate', 'split', 'shred']
elif verb == 'move_around':
tweakedVerb = ['move around', 'move', 'transfer']
elif verb == 'take':
tweakedVerb = ['take', 'pick', 'pick up', 'grab']
elif verb == 'put':
tweakedVerb = ['put', 'leave', 'place']
elif verb == 'cut':
tweakedVerb = ['cut', 'slice', 'mince']
elif verb == 'wash':
tweakedVerb = ['wash', 'clean']
elif verb == 'mix':
tweakedVerb = ['mix', 'mingle', 'blend']
return tweakedVerb
if __name__ == '__main__':
if not os.path.exists(output_path):
os.makedirs(output_path)
objects,_ = get_classes_ordered(config['objects_file'])
verbs,_ = get_classes_ordered(config['verbs_file'])
total = 0
results_dict, action_priors = dict(), dict()
if os.path.exists(output_path + 'google_search.json'):
with open(output_path + 'google_search.json', 'r') as json_file:
results_dict = json.load(json_file)
def check_queries_left(results_dict):
queries_done, queries_left = 0, 0
# Check how many queries are left
for verb in verbs:
v = transform_verb(verb)
for v_option in v:
for obj in objects:
o = transform_obj(obj)
for o_option in o:
if not verb + ' ' + obj in results_dict:
queries_left += 1
elif not v_option + ' ' + o_option in results_dict[verb + ' ' + obj]:
queries_left += 1
else:
queries_done += 1
print('Queries done: {}, queries left: {}, total queries: {}'.format(
queries_done, queries_left, queries_done + queries_left
))
# It should print the total queries that must be done
check_queries_left(results_dict)
for my_api_key, my_cse_id in tqdm(zip(api_keys, cse_ids)):
# For each verb and object (and their synonyms)
for verb in verbs:
v = transform_verb(verb)
for v_option in v:
for obj in objects:
o = transform_obj(obj)
for o_option in o:
try:
if not verb + ' ' + obj in results_dict:
results_dict[verb + ' ' + obj] = dict()
action = v_option + ' * ' + o_option
if not v_option + ' ' + o_option in results_dict[verb + ' ' + obj]:
#print(action)
result = google_search('"' + action + '"', my_api_key, my_cse_id)
results_dict[verb + ' ' + obj][v_option + ' ' + o_option] = result
with open(output_path + 'google_search.json', 'w') as f:
json.dump(results_dict, f, indent=4)
except:
pass
# It should print 0, otherwise it must be repeated
check_queries_left(results_dict)
# Create the prior using the computed results
accum_total = 0.
for verb in verbs:
for obj in objects:
action = verb + ' ' + obj
info = []
for key in results_dict[action].keys():
num = int(
results_dict[action][key]['searchInformation']['totalResults']
)
info.append(num)
accum, nb_elems = 0., 0
for i in range(len(info)):
if info[i] > 0:
accum += float(info[i])
nb_elems += 1
total = float(accum) / max(1,float(nb_elems))
accum_total += total
action_priors[action] = total
with open(output_path + 'unnormalised_action_priors.json', 'w') as f:
json.dump(action_priors, f, indent=4)
for key in action_priors.keys():
action_priors[key] /= float(accum_total)
with open(output_path + 'action_priors.json', 'w') as f:
json.dump(action_priors, f, indent=4) | 38.385027 | 94 | 0.547924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,287 | 0.318612 |
46ce5b3676d526f750eecbff8d36672c35b7b6b1 | 1,041 | py | Python | AoC2019/Day_2/Day_2.py | byarmis/AdventOfCode | 9c91808c2ea06d49f7e726779ac44918a99136f0 | [
"Unlicense"
] | null | null | null | AoC2019/Day_2/Day_2.py | byarmis/AdventOfCode | 9c91808c2ea06d49f7e726779ac44918a99136f0 | [
"Unlicense"
] | null | null | null | AoC2019/Day_2/Day_2.py | byarmis/AdventOfCode | 9c91808c2ea06d49f7e726779ac44918a99136f0 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def part_1(program):
i = 0
while i < len(program):
opcode, a, b, dest = program[i:i+4]
i += 4
assert opcode in {1, 2, 99}, f'Unexpected opcode: {opcode}'
if opcode == 1:
val = program[a] + program[b]
elif opcode == 2:
val = program[a] * program[b]
elif opcode == 99:
return program
program[dest] = val
def part_2(program, goal):
for noun in range(99):
for verb in range(99):
p = program[:]
p[1] = noun
p[2] = verb
p = part_1(p)
if p[0] == goal:
return noun, verb
if __name__ == '__main__':
with open('input.txt') as f:
program = f.readline()
program = [int(i) for i in program.split(',')]
p_1 = program[:]
p_1[1] = 12
p_1[2] = 2
p = part_1(p_1)
print(f'part 1: {p[0]}')
noun, verb = part_2(program, 19690720)
print(f'part 2: noun={noun}\n\tverb={verb}')
| 23.659091 | 67 | 0.487992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.146974 |
46d074f1f3a34238441330f57b34adc307f55fe6 | 1,493 | bzl | Python | third_party/opencv_configs.bzl | zycv/edge-brain | 6c4ba7dd06f2c24dad93d617a21c42b5b7122d57 | [
"Apache-2.0"
] | null | null | null | third_party/opencv_configs.bzl | zycv/edge-brain | 6c4ba7dd06f2c24dad93d617a21c42b5b7122d57 | [
"Apache-2.0"
] | null | null | null | third_party/opencv_configs.bzl | zycv/edge-brain | 6c4ba7dd06f2c24dad93d617a21c42b5b7122d57 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Duan-JM, Sun Aries
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: passing both BUILD_SHARED_LIBS=ON and BUILD_STATIC_LIBS=ON to cmake
# still only builds the shared libraries, so we have to choose one or the
# other. We build shared libraries by default, but this variable can be used
# to switch to static libraries.
OPENCV_SHARED_LIBS = False
OPENCV_TAG = "4.5.0"
OPENCV_SO_VERSION = "4.5" # OPENCV_SO_VERSION need to match OPENCV_TAG
# Note: this determines the order in which the libraries are passed to the
# linker, so if library A depends on library B, library B must come _after_.
# Hence core is at the bottom.
OPENCV_MODULES = [
"calib3d",
"features2d",
"flann",
"highgui",
"video",
"videoio",
"imgcodecs",
"imgproc",
"core",
]
OPENCV_THIRD_PARTY_DEPS = [
"liblibjpeg-turbo.a",
"liblibpng.a",
"liblibprotobuf.a",
"libquirc.a",
"libtegra_hal.a",
"libzlib.a",
"libade.a",
"liblibopenjp2.a"
]
| 30.469388 | 76 | 0.716678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,254 | 0.83992 |
46d0bc415f0da8ec3ab5ab337e4a454b61b7cfdc | 1,712 | py | Python | treestructure.py | Mouedrhiri/Website-tree-structure | e93b686e4e1760a4f189225265854adef65ce7c5 | [
"MIT"
] | 1 | 2021-05-10T22:56:50.000Z | 2021-05-10T22:56:50.000Z | treestructure.py | Mouedrhiri/Website-tree-structure | e93b686e4e1760a4f189225265854adef65ce7c5 | [
"MIT"
] | null | null | null | treestructure.py | Mouedrhiri/Website-tree-structure | e93b686e4e1760a4f189225265854adef65ce7c5 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import urllib.request
import xml.etree.ElementTree as ET
from tqdm import tqdm
from time import sleep
#Try With This Website http://igm.univ-mlv.fr/
LinksList = []
def progress(rang):
for i in tqdm(rang, desc ="Progress : "):
sleep(.1)
var=input("Enter a Website : ")
var=str(var)
html_page = urllib.request.urlopen(var)
soup = BeautifulSoup(html_page)
for link in soup.findAll('a'):
linkfound = link.get('href')
progress(link)
if linkfound == '#' or linkfound == 'None' or linkfound == '/' or linkfound == None :
pass
else:
LinksList.append(linkfound)
print(f"We've Found {len(LinksList)} Arborescence")
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
xml_dox = ET.Element('root')
for i in range(0,len(LinksList)-1):
LinksList[i] = ET.SubElement(xml_dox,f'{LinksList[i]}')
ET.SubElement(LinksList[i], f'{LinksList[i]}')
ET.SubElement(LinksList[i], f'{LinksList[i+1]}')
indent(xml_dox)
tree = ET.ElementTree(xml_dox)
#I Named The File With These Name Cause I have problem with other Names But You Can Name it Whatever You Like
tree.write('pypy.xml',encoding='utf-8',xml_declaration=True)
input() | 33.568627 | 110 | 0.61507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.205607 |
46d23d217543d8a9b9de55f8928d348dc9fb5455 | 3,237 | py | Python | main.py | cecemel/fabbrikka-cart-service | b18ac45c0effb72ab8ef112c468e5ede0b053016 | [
"MIT"
] | null | null | null | main.py | cecemel/fabbrikka-cart-service | b18ac45c0effb72ab8ef112c468e5ede0b053016 | [
"MIT"
] | null | null | null | main.py | cecemel/fabbrikka-cart-service | b18ac45c0effb72ab8ef112c468e5ede0b053016 | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify
import os, sys
import helpers, escape_helpers
import logging
import config
from rdflib.namespace import Namespace
##############
# INIT CONFIG
##############
CONFIG = config.load_config(os.environ.get('ENVIRONMENT', "DEBUG"))
app = Flask(__name__)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setLevel(CONFIG["LOG_LEVEL"])
app.logger.addHandler(handler)
# vocabularies
mu = Namespace('http://mu.semte.ch/vocabularies/')
mu_core = Namespace('http://mu.semte.ch/vocabularies/core/')
mu_ext = Namespace('http://mu.semte.ch/vocabularies/ext/')
graph = CONFIG['MU_APPLICATION_GRAPH']
# sparql wrapper
sparql_wrapper = helpers.init_sparql_wrapper(CONFIG)
#################
# API
#################
@app.route('/shopping-carts', methods=["PATCH"])
def associate_cart():
"""
associates cart with mu-session
:return:
"""
# validates
data = request.get_json(force=True)['data']
errors = [helpers.validate_resource_type("shopping-carts", data), helpers.validate_json_api_content_type(request)]
if any(errors):
return next(e for e in errors if e)
cart_id = data.get("id", None)
if not cart_id:
return helpers.error("CART ID missing")
# get session id in the request
session_id = helpers.session_id_header(request)
if not session_id:
return helpers.error("MU_SESSION_ID missing")
# now fetch the cart
query = """SELECT ?cart
WHERE{
GRAPH <http://mu.semte.ch/application> {
?cart <http://mu.semte.ch/vocabularies/core/uuid> %s
}
}
""" % escape_helpers.sparql_escape(cart_id)
carts = helpers.query(app.logger, sparql_wrapper["sparql_query"], query).get('results', []).get('bindings')
if not len(carts) == 1:
return helpers.error("no/too many cart(s) found for {}".format(cart_id))
cart_uri = carts[0]['cart']['value']
# update the cart with session
query = """
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
INSERT DATA
{
GRAPH <http://mu.semte.ch/application> {
<%s> ext:ownerSession <%s>.
}
}
""" % (cart_uri, session_id)
helpers.query(app.logger, sparql_wrapper["sparql_update"], query)
return "", 204
@app.route('/shopping-carts')
def return_associate_cart():
# get session id in the request
session_id = helpers.session_id_header(request)
if not session_id:
return helpers.error("MU_SESSION_ID missing")
# now fetch the carts uid
query = """PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
SELECT ?uid
WHERE {
GRAPH <http://mu.semte.ch/application> {
?uri <http://mu.semte.ch/vocabularies/ext/ownerSession> <%s>.
?uri mu:uuid ?uid
}
}
""" % session_id
uids = helpers.query(app.logger, sparql_wrapper["sparql_query"], query).get('results', []).get('bindings')
return jsonify([e["uid"]["value"] for e in uids]), 200
#######################
## Start Application ##
#######################
if __name__ == '__main__':
app.logger.info("---cart-service is starting")
app.run(host='0.0.0.0', port=80, debug=True) | 27.905172 | 118 | 0.624344 | 0 | 0 | 0 | 0 | 2,278 | 0.703738 | 0 | 0 | 1,534 | 0.473896 |
46d523c499eb9dbe0d15573f941e28e4f1830269 | 2,406 | py | Python | pgmock/tests/test_examples.py | CloverHealth/pgmock | bfebf405696f3613b52958dc640a58e634815ee8 | [
"BSD-3-Clause"
] | 54 | 2018-03-29T21:41:41.000Z | 2022-01-26T11:38:22.000Z | pgmock/tests/test_examples.py | CloverHealth/pgmock | bfebf405696f3613b52958dc640a58e634815ee8 | [
"BSD-3-Clause"
] | 2 | 2018-07-31T19:39:24.000Z | 2020-04-16T06:28:12.000Z | pgmock/tests/test_examples.py | CloverHealth/pgmock | bfebf405696f3613b52958dc640a58e634815ee8 | [
"BSD-3-Clause"
] | 1 | 2018-08-22T16:15:08.000Z | 2018-08-22T16:15:08.000Z | """
This file illustrates a few examples of using pgmock with pytest.
A postgres testing database from pytest-pgsql (https://github.com/CloverHealth/pytest-pgsql)
is used and a fixture is created for using the mock context manager. This is the
preferred way of using pgmock, but it's also possible to render SQL yourself and execute
patched versions of it. Examples of this are also included here
"""
import pytest
import pgmock
@pytest.fixture
def pgmocker(transacted_postgresql_db):
with pgmock.mock(transacted_postgresql_db.connection) as mocker:
yield mocker
def test_table_patching_w_mocker(transacted_postgresql_db, pgmocker):
"""Tests patching a table while using the mocker returned by ``pgmock.mock``"""
pgmocker.patch(pgmock.table('test_table'), [('val1', 'val2'), ('val3', 'val4')], ['c1', 'c2'])
results = list(transacted_postgresql_db.connection.execute('SELECT * from test_table'))
assert results == [('val1', 'val2'), ('val3', 'val4')]
def test_patch_subquery_from_file(transacted_postgresql_db, tmpdir):
"""Tests reading a subquery from a file and testing a patched version of it"""
# Create the example file
file_name = tmpdir.join('file.sql')
file_name.write('SELECT sub.c1, sub.c2 FROM (SELECT * FROM test_table) sub;')
# Read the subquery 'sub' from the file
subquery = pgmock.sql_file(str(file_name), pgmock.subquery('sub'))
assert subquery == 'SELECT * FROM test_table'
# Patch the table of the subquery and verify it returns the proper results
patched = pgmock.sql(subquery, pgmock.patch(
pgmock.table('test_table'),
rows=[('v1', 'v2'), ('v3', 'v4')],
cols=['c1', 'c2']
))
assert (
patched == "SELECT * FROM (VALUES ('v1','v2'),('v3','v4')) AS test_table(\"c1\",\"c2\")"
)
# Patches can also be applied with list of dictionaries, filling in only what's needed.
# Column names must still be provided. null values will be filled for all missing columns
patched = pgmock.sql(subquery, pgmock.patch(
pgmock.table('test_table'),
rows=[{'c1': 'v1'}, {'c2': 'v4'}],
cols=['c1', 'c2']
))
assert (
patched == "SELECT * FROM (VALUES ('v1',null),(null,'v4')) AS test_table(\"c1\",\"c2\")"
)
results = list(transacted_postgresql_db.connection.execute(patched))
assert results == [('v1', None), (None, 'v4')]
| 39.442623 | 98 | 0.672485 | 0 | 0 | 129 | 0.053616 | 145 | 0.060266 | 0 | 0 | 1,303 | 0.541563 |
46d5533b2c8563437c9f6b3a8da67452f5e8e47b | 3,019 | py | Python | jsonserver.py | hellfyre/StratumsphereStatusBot | 478dce7007a60c3a33c41c789893242c074e4e8d | [
"Apache-2.0"
] | null | null | null | jsonserver.py | hellfyre/StratumsphereStatusBot | 478dce7007a60c3a33c41c789893242c074e4e8d | [
"Apache-2.0"
] | null | null | null | jsonserver.py | hellfyre/StratumsphereStatusBot | 478dce7007a60c3a33c41c789893242c074e4e8d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Matthias Uschok <dev@uschok.de>'
import json
import BaseHTTPServer
import threading
from urlparse import parse_qs, urlparse
import status
callbacks = dict()
class JsonHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
print("path:", self.path)
if self.path == '/status.json':
data = {
'api' : '0.13',
'space' : 'Stratum 0',
'logo' : 'https:\/\/stratum0.org\/mediawiki\/images\/thumb\/c\/c6\/Sanduhr-twitter-avatar-black.svg\/240px-Sanduhr-twitter-avatar-black.svg.png',
'url': 'https:\/\/stratum0.org',
'location' : {
'address': 'Hamburger Strasse 273a, 38114 Braunschweig, Germany',
'lon' : 10.5211247,
'lat' : 52.2785658
},
'state' : {
'open' : status.space['open'],
'lastchange' : status.space['last_change'],
'trigger_person' : status.space['by'],
'icon' : {
'open' : 'http:\/\/status.stratum0.org\/open_square.png',
'closed' : 'http:\/\/status.stratum0.org\/closed_square.png'
},
'ext_since' : status.space['since']
},
'contact' : {
'phone' : '+4953128769245',
'twitter' : '@stratum0',
'ml' : 'normalverteiler@stratum0.org',
'issue-mail' : 'cm9oaWViK3NwYWNlYXBpLWlzc3Vlc0Byb2hpZWIubmFtZQ==',
'irc' : 'irc:\/\/irc.freenode.net\/#stratum0'
},
'issue_report_channels' : [
'issue-mail'
]
}
data_string = json.dumps(data)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(data_string)
self.wfile.write('\n')
elif self.path.startswith('/update?'):
queryurl = urlparse(self.path)
params = parse_qs(queryurl.query)
if len(params) > 0:
by = ''
if 'by' in params:
by = params['by'][0]
status.update(params['open'][0]=='true', by)
callbacks['send_status']()
self.send_response(200)
else:
self.send_response(400)
else:
self.send_response(404)
class JsonServer(threading.Thread):
def __init__(self, address):
super(JsonServer, self).__init__()
self.address = address
self.stop_requested = False
def run(self):
self.httpd = BaseHTTPServer.HTTPServer(self.address, JsonHandler)
while not self.stop_requested:
self.httpd.handle_request()
def stop(self):
self.stop_requested = True
| 34.306818 | 161 | 0.496191 | 2,817 | 0.93309 | 0 | 0 | 0 | 0 | 0 | 0 | 862 | 0.285525 |
46d59eb1b0b3c39e785f15faba7490ca7d08e591 | 4,456 | py | Python | apps/permissions/views.py | yhkl-dev/JAutoOps | e42342fc6d814813dcac2e0154cd5dfdc1adf4c1 | [
"MIT"
] | null | null | null | apps/permissions/views.py | yhkl-dev/JAutoOps | e42342fc6d814813dcac2e0154cd5dfdc1adf4c1 | [
"MIT"
] | null | null | null | apps/permissions/views.py | yhkl-dev/JAutoOps | e42342fc6d814813dcac2e0154cd5dfdc1adf4c1 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import Permission, Group
from rest_framework import viewsets, mixins, response, status
from rest_framework.generics import get_object_or_404
from .serializer import PermissionSerializer
from .common import get_permission_obj
from .filter import PermissionFilter
class PermissionsViewset(viewsets.ReadOnlyModelViewSet):
"""
权限列表 视图类
list:
返回permission列表
"""
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
filter_class = PermissionFilter
filter_fields = ("name",)
def get_queryset(self):
queryset = super(PermissionsViewset, self).get_queryset()
queryset = queryset.order_by("content_type__id")
return queryset
class GroupPermissionsViewset(viewsets.ReadOnlyModelViewSet,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
"""
用户组权限
retrieve:
返回用户组的权限列表
update:
给指定用户组增加权限,参数pid: permission id
destroy:
删除指定组下的权限,参数pid: permission id
"""
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
filter_class = PermissionFilter
filter_fields = ("name",)
def process_permission(self, group_permission_queryset, data):
for record in data:
try:
group_permission_queryset.get(pk=record.get("id", None))
record["status"] = True
except Exception:
pass
return data
def get_group_permissions(self):
groupobj = self.get_object()
queryset = groupobj.permissions.all()
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return response.Response(serializer.data)
def get_modify_permissions(self):
groupobj = self.get_object()
group_permission_queryset = groupobj.permissions.all()
queryset = Permission.objects.all()
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(self.process_permission(group_permission_queryset, serializer.data))
serializer = self.get_serializer(queryset, many=True)
return response.Response(self.process_permission(group_permission_queryset, serializer.data))
def retrieve(self, request, *args, **kwargs):
modify = request.GET.get("modify", None)
if modify is not None:
return self.get_modify_permissions()
else:
return self.get_group_permissions()
def get_object(self):
queryset = Group.objects.all()
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
def update(self, request, *args, **kwargs):
ret = {"status": 0}
groupobj = self.get_object()
permission_obj = get_permission_obj(request.data.get("pid", ""))
if permission_obj is None:
ret["status"] = 1
ret["errmsg"] = "permission 不存在"
else:
groupobj.permissions.add(permission_obj)
return response.Response(ret, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
ret = {"status": 0}
groupobj = self.get_object()
permission_obj = get_permission_obj(request.data.get("pid", ""))
if permission_obj is None:
ret["status"] = 1
ret["errmsg"] = "permission 不存在"
else:
groupobj.permissions.remove(permission_obj)
return response.Response(ret, status=status.HTTP_200_OK) | 35.648 | 115 | 0.656194 | 4,269 | 0.934136 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.140481 |
46d61300693e53921017fdd21edfe3a6e707f091 | 4,267 | py | Python | mrtarget/common/Redis.py | pieterlukasse/data_pipeline-1 | 823645a36a999e76dc51584aa784f5f9e3f245e7 | [
"Apache-2.0"
] | null | null | null | mrtarget/common/Redis.py | pieterlukasse/data_pipeline-1 | 823645a36a999e76dc51584aa784f5f9e3f245e7 | [
"Apache-2.0"
] | null | null | null | mrtarget/common/Redis.py | pieterlukasse/data_pipeline-1 | 823645a36a999e76dc51584aa784f5f9e3f245e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import simplejson as json
from collections import Counter
import jsonpickle
from mrtarget.common import require_all
from mrtarget.common.connection import new_redis_client
jsonpickle.set_preferred_backend('simplejson')
import logging
import uuid
import datetime
import numpy as np
import cProfile
np.seterr(divide='warn', invalid='warn')
from mrtarget.Settings import Config
try:
import cPickle as pickle
except ImportError:
import pickle
import time
from multiprocessing import Process, current_process
logger = logging.getLogger(__name__)
import signal
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, timeout_handler)
def millify(n):
try:
n = float(n)
millnames=['','K','M','G','P']
millidx=max(0, min(len(millnames) - 1,
int(np.math.floor(np.math.log10(abs(n)) / 3))))
return '%.1f%s'%(n/10**(3*millidx),millnames[millidx])
except Exception:
return str(n)
class RedisLookupTable(object):
'''
Simple Redis-based key value store for string-based objects. Faster than
its subclasses since it does not serialise and unseriliase strings. By
default keys will expire in 2 days.
Allows to store a lookup table (key/value store) in memory/redis so that it
can be accessed quickly from multiple processes, reducing memory usage by
sharing.
'''
LOOK_UPTABLE_NAMESPACE = 'lookuptable:%(namespace)s'
KEY_NAMESPACE = '%(namespace)s:%(key)s'
def __init__(self,
namespace = None,
r_server = None,
ttl = 60*60*24+2):
if namespace is None:
namespace = uuid.uuid4()
self.namespace = self.LOOK_UPTABLE_NAMESPACE % {'namespace': namespace}
self.r_server = new_redis_client() if not r_server else r_server
self.default_ttl = ttl
require_all(self.r_server is not None)
def set(self, key, obj, r_server = None, ttl = None):
self._get_r_server(r_server).setex(self._get_key_namespace(key),
self._encode(obj),
ttl or self.default_ttl)
def get(self, key, r_server = None):
server = self._get_r_server(r_server)
value = server.get(self._get_key_namespace(key))
if value is not None:
return self._decode(value)
raise KeyError(key)
def keys(self, r_server = None):
return [key.replace(self.namespace+':','') \
for key in self._get_r_server(r_server).keys(self.namespace+'*')]
def set_r_server(self, r_server):
self.r_server = r_server
def _get_r_server(self, r_server = None):
return r_server if r_server else self.r_server
def _get_key_namespace(self, key, r_server=None):
return self.KEY_NAMESPACE % {'namespace': self.namespace, 'key': key}
def _encode(self, obj):
return obj
def _decode(self, obj):
return obj
def __contains__(self, key, r_server=None):
server = self._get_r_server(r_server)
return server.exists(self._get_key_namespace(key))
def __getitem__(self, key, r_server=None):
self.get(self._get_key_namespace(key),
r_server=self._get_r_server(r_server))
def __setitem__(self, key, value, r_server=None):
if not self.lt_reuse:
self.set(self._get_key_namespace(key), value,
r_server=self._get_r_server(r_server))
class RedisLookupTableJson(RedisLookupTable):
'''
Simple Redis-based key value store for Json serialised objects
By default keys will expire in 2 days
'''
def _encode(self, obj):
return json.dumps(obj)
def _decode(self, obj):
return json.loads(obj)
class RedisLookupTablePickle(RedisLookupTable):
'''
Simple Redis-based key value store for pickled objects
By default keys will expire in 2 days
'''
def _encode(self, obj):
return base64.encodestring(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL))
def _decode(self, obj):
return pickle.loads(base64.decodestring(obj))
| 28.446667 | 81 | 0.65948 | 3,198 | 0.749473 | 0 | 0 | 0 | 0 | 0 | 0 | 782 | 0.183267 |
46d6aee61459f1fde35e599535d517f88ab7a4d8 | 2,133 | py | Python | helpers/module_api_pages_manager.py | Maveo/Spark | 93fd7a43ac830350991ef1b722205a94a2623409 | [
"MIT"
] | 2 | 2022-01-23T16:24:27.000Z | 2022-03-19T23:23:30.000Z | helpers/module_api_pages_manager.py | Maveo/Spark | 93fd7a43ac830350991ef1b722205a94a2623409 | [
"MIT"
] | 10 | 2022-02-01T20:02:45.000Z | 2022-03-30T10:47:52.000Z | helpers/module_api_pages_manager.py | Maveo/Spark | 93fd7a43ac830350991ef1b722205a94a2623409 | [
"MIT"
] | 1 | 2022-03-20T23:13:11.000Z | 2022-03-20T23:13:11.000Z | from typing import *
import discord
from helpers.exceptions import ModuleNotActivatedException
from webserver import Page
if TYPE_CHECKING:
from helpers.module_manager import ModuleManager
from helpers.spark_module import SparkModule
class ModuleApiPagesManager:
def __init__(self, module_manager: 'ModuleManager'):
self.module_manager = module_manager
self.default_api_pages: Dict[str, Page] = {}
def initialize(self, modules: List['SparkModule']):
pages_keys: Dict[str, str] = {}
for module in modules:
if not isinstance(module.get_api_pages(), list):
raise RuntimeError('api pages of module {} not list'.format(module.get_name()))
for api_page in module.get_api_pages():
if not isinstance(api_page, Page):
raise RuntimeError('api page {} of module {} is not of type Page'.format(api_page.path,
module.get_name()))
if api_page.path in pages_keys:
raise RuntimeError('duplicate pages path ({}) in modules {} and {}'.format(
api_page.path, module.get_name(), pages_keys[api_page.path]
))
else:
pages_keys[api_page.path] = module.get_name()
self.default_api_pages[api_page.path] = api_page.new(
view_func=self.module_wrapper(api_page.view_func, module)
)
def module_wrapper(self, func, module):
async def _call(guild: discord.Guild, member: discord.Member, *args, **kwargs):
activated_modules = self.module_manager.get_activated_modules(guild.id)
if module.get_name() not in activated_modules:
raise ModuleNotActivatedException('module "{}" not activated'.format(module.get_name()))
return await func(self.module_manager.get(module.get_name()), guild, member, *args, **kwargs)
return _call
def all(self):
return self.default_api_pages.values()
| 44.4375 | 112 | 0.608064 | 1,885 | 0.883732 | 0 | 0 | 0 | 0 | 433 | 0.203 | 182 | 0.085326 |
46d79143d42745acfc58bf24b940e5ad645fcc18 | 401 | py | Python | project/app/migrations/0006_housing_description.py | ryan-lam/hackupc2021 | 2f63b47f831f3d6d01077a9bf2f94d9babe6bfce | [
"MIT"
] | null | null | null | project/app/migrations/0006_housing_description.py | ryan-lam/hackupc2021 | 2f63b47f831f3d6d01077a9bf2f94d9babe6bfce | [
"MIT"
] | null | null | null | project/app/migrations/0006_housing_description.py | ryan-lam/hackupc2021 | 2f63b47f831f3d6d01077a9bf2f94d9babe6bfce | [
"MIT"
] | 2 | 2021-05-23T04:36:35.000Z | 2021-05-27T04:27:04.000Z | # Generated by Django 3.2 on 2021-05-16 05:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20210515_1932'),
]
operations = [
migrations.AddField(
model_name='housing',
name='description',
field=models.CharField(default='null', max_length=500),
),
]
| 21.105263 | 67 | 0.603491 | 310 | 0.773067 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.256858 |
46d7c8e865c33bd48710e0960deb0a7786e9aa63 | 14,313 | py | Python | simpleml/utils/training/create_persistable.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | null | null | null | simpleml/utils/training/create_persistable.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | null | null | null | simpleml/utils/training/create_persistable.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | null | null | null | '''
Module with helper classes to create new persistables
'''
from abc import ABCMeta, abstractmethod
from simpleml.persistables.meta_registry import SIMPLEML_REGISTRY
from simpleml.datasets.base_dataset import Dataset
from simpleml.pipelines.base_pipeline import Pipeline
from simpleml.models.base_model import Model
from simpleml.metrics.base_metric import Metric
from simpleml.utils.errors import TrainingError
import logging
from future.utils import with_metaclass
LOGGER = logging.getLogger(__name__)
__author__ = 'Elisha Yadgaran'
class PersistableCreator(with_metaclass(ABCMeta, object)):
@classmethod
def retrieve_or_create(self, **kwargs):
'''
Wrapper method to first attempt to retrieve a matching persistable and
then create a new one if it isn't found
'''
cls, filters = self.determine_filters(**kwargs)
persistable = self.retrieve(cls, filters)
if persistable is not None:
LOGGER.info('Using existing persistable: {}, {}, {}'.format(cls.__tablename__, persistable.name, persistable.version))
persistable.load()
return persistable
else:
LOGGER.info('Existing {} not found. Creating new one now'.format(cls.__tablename__))
persistable = self.create(**kwargs)
LOGGER.info('Using new persistable: {}, {}, {}'.format(cls.__tablename__, persistable.name, persistable.version))
return persistable
@staticmethod
def retrieve(cls, filters):
'''
Query database using the table model (cls) and filters for a matching
persistable
'''
return cls.where(**filters).order_by(cls.version.desc()).first()
@staticmethod
def retrieve_dependency(dependency_cls, **dependency_kwargs):
'''
Base method to query for dependency
Raises TrainingError if dependency does not exist
'''
dependency = dependency_cls.retrieve(
*dependency_cls.determine_filters(**dependency_kwargs))
if dependency is None:
raise TrainingError('Expected dependency is missing')
dependency.load()
return dependency
@abstractmethod
def determine_filters(cls, strict=False, **kwargs):
'''
method to determine which filters to apply when looking for
existing persistable
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
Default design iterates through 2 (or 3) options when retrieving persistables:
1) By name and version (unique properties that define persistables)
2) By name, registered_name, and computed hash
2.5) Optionally, just use name and registered_name (assumes class
definition is the same and would result in an identical persistable)
Returns: database class, filter dictionary
'''
@abstractmethod
def create(cls, **kwargs):
'''
method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
'''
@staticmethod
def retrieve_from_registry(registered_name):
'''
stateless method to query registry for class definitions. handles errors
'''
cls = SIMPLEML_REGISTRY.get(registered_name)
if cls is None:
raise TrainingError('Referenced class unregistered: {}'.format(registered_name))
return cls
class DatasetCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name='', version=None, strict=True, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to assume same class and name = same persistable,
or, load the data and compare the hash
'''
if version is not None:
filters = {
'name': name,
'version': version
}
else:
registered_name = kwargs.pop('registered_name')
# Check if dependency object was passed
pipeline = kwargs.pop('pipeline', None)
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
if strict:
# Build dummy object to retrieve hash to look for
new_dataset = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_dataset.add_pipeline(pipeline)
new_dataset.build_dataframe()
filters = {
'name': name,
'registered_name': registered_name,
'hash_': new_dataset._hash()
}
else:
# Assume combo of name, class, and pipeline will be unique
filters = {
'name': name,
'registered_name': registered_name,
'pipeline_id': pipeline.id if pipeline is not None else None
}
return Dataset, filters
@classmethod
def create(cls, registered_name, pipeline=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param dataset_pipeline: dataset pipeline object
'''
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
new_dataset = cls.retrieve_from_registry(registered_name)(**kwargs)
new_dataset.add_pipeline(pipeline)
new_dataset.build_dataframe()
new_dataset.save()
return new_dataset
@classmethod
def retrieve_pipeline(cls, **pipeline_kwargs):
# Datasets do not require dataset pipelines so return None if it isn't passed
if not pipeline_kwargs:
LOGGER.warning('Dataset Pipeline parameters not passed, skipping dependencies. \
Only use this if dataset is already in the right format!')
return None
return cls.retrieve_dependency(PipelineCreator, **pipeline_kwargs)
class PipelineCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name='', version=None, strict=False, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
'''
if version is not None:
filters = {
'name': name,
'version': version
}
else:
# Check if dependency object was passed
dataset = kwargs.pop('dataset', None)
if dataset is None:
# Use dependency reference to retrieve object
dataset = cls.retrieve_dataset(**kwargs.pop('dataset_kwargs', {}))
# Build dummy object to retrieve hash to look for
registered_name = kwargs.pop('registered_name')
new_pipeline = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_pipeline.add_dataset(dataset)
if strict:
new_pipeline.fit()
filters = {
'name': name,
'registered_name': registered_name,
'hash_': new_pipeline._hash()
}
return Pipeline, filters
@classmethod
def create(cls, registered_name, dataset=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param dataset: dataset object
'''
if dataset is None:
# Use dependency reference to retrieve object
dataset = cls.retrieve_dataset(**kwargs.pop('dataset_kwargs', {}))
new_pipeline = cls.retrieve_from_registry(registered_name)(**kwargs)
new_pipeline.add_dataset(dataset)
new_pipeline.fit()
new_pipeline.save()
return new_pipeline
@classmethod
def retrieve_dataset(cls, **dataset_kwargs):
return cls.retrieve_dependency(DatasetCreator, **dataset_kwargs)
class ModelCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name='', version=None, strict=False, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
'''
if version is not None:
filters = {
'name': name,
'version': version
}
else:
# Check if dependency object was passed
pipeline = kwargs.pop('pipeline', None)
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
# Build dummy object to retrieve hash to look for
registered_name = kwargs.pop('registered_name')
new_model = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_model.add_pipeline(pipeline)
if strict:
new_model.fit()
filters = {
'name': name,
'registered_name': registered_name,
'hash_': new_model._hash()
}
return Model, filters
@classmethod
def create(cls, registered_name, pipeline=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param pipeline: pipeline object
'''
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
new_model = cls.retrieve_from_registry(registered_name)(**kwargs)
new_model.add_pipeline(pipeline)
new_model.fit()
new_model.save()
return new_model
@classmethod
def retrieve_pipeline(cls, **pipeline_kwargs):
return cls.retrieve_dependency(PipelineCreator, **pipeline_kwargs)
class MetricCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name=None, model_id=None, strict=False, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
'''
# Check if dependency object was passed
model = kwargs.pop('model', None)
if name is not None and (model_id is not None or model is not None):
# Can't use default name because metrics are hard coded to reflect dataset split + class
filters = {
'name': name,
'model_id': model_id if model_id is not None else model.id,
}
else:
if model is None:
# Use dependency reference to retrieve object
model = cls.retrieve_model(**kwargs.pop('model_kwargs', {}))
# Build dummy object to retrieve hash to look for
registered_name = kwargs.pop('registered_name')
new_metric = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_metric.add_model(model)
if strict:
new_metric.score()
filters = {
'name': new_metric.name,
'registered_name': registered_name,
'hash_': new_metric._hash()
}
return Metric, filters
@classmethod
def create(cls, registered_name, model=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param model: model class
'''
if model is None:
# Use dependency reference to retrieve object
model = cls.retrieve_model(**kwargs.pop('model_kwargs', {}))
new_metric = cls.retrieve_from_registry(registered_name)(**kwargs)
new_metric.add_model(model)
new_metric.score()
new_metric.save()
return new_metric
@classmethod
def retrieve_model(cls, **model_kwargs):
return cls.retrieve_dependency(ModelCreator, **model_kwargs)
| 37.273438 | 130 | 0.631314 | 13,759 | 0.961294 | 0 | 0 | 13,435 | 0.938657 | 0 | 0 | 6,283 | 0.438972 |
46d8731739d55091c02beebba8b44113b38fd70d | 13,068 | py | Python | drizzlepac/haputils/make_poller_files.py | check-spelling/drizzlepac | 19baaf5a416c72f272889800b13d251f33f76d2c | [
"BSD-3-Clause"
] | 28 | 2016-08-16T04:16:32.000Z | 2022-03-27T15:39:29.000Z | drizzlepac/haputils/make_poller_files.py | check-spelling/drizzlepac | 19baaf5a416c72f272889800b13d251f33f76d2c | [
"BSD-3-Clause"
] | 822 | 2016-03-10T01:19:28.000Z | 2022-03-30T20:25:34.000Z | drizzlepac/haputils/make_poller_files.py | check-spelling/drizzlepac | 19baaf5a416c72f272889800b13d251f33f76d2c | [
"BSD-3-Clause"
] | 33 | 2016-03-16T19:18:03.000Z | 2021-12-27T04:20:44.000Z | #!/usr/bin/env python
"""Generates a poller file that will be used as input to runsinglehap.py, hapsequencer.py, runmultihap.py or
hapmultisequencer.py based on the files or rootnames listed user-specified list file.
USAGE
>>> python drizzlepac/haputils/make_poller_files.py <input filename> -[ost]
- input filename: Name of a file containing a list of calibrated fits files (ending with "_flt.fits" or
"_flc.fits") or rootnames (9 characters, usually ending with a "q" to process. The corresponding
flc.fits or flt.fits files must exist in the user-specified path, the current working directory or the
online cache
- The '-o' optional input allows users to input the name of an output poller file that will be created.
If not explicitly specified, the poller file will be named "poller_file.out".
- The '-s' optional input allows users to input the Name of the skycell. The correct syntax for skycell
names is "skycell-pNNNNxXXyXX", where NNNN is the 4-digit projection cell number, and XX and YY are the
two-digit X and Y skycell indices, respectively. NOTE: this input argument is not needed for SVM poller
file creation, but *REQUIRED* for MVM poller file creation. Users can determine the skycell(s) that
their observations occupy using the ``haputils.which_skycell`` script.
- The '-t' optional input allows users to specify the type of poller file that will be created. The
valid input options are "svm" to create a poller file for use with the single-visit mosaics pipeline
or "mvm" to create a poller file for use with the multiple-visit mosaics pipeline. If not explicitly
specified, the default value is "svm". NOTE: if creating a MVM poller file, one must specify the
skycell name using the "-s" input argument.
Python USAGE:
>>> python
>>> from drizzlepac.haputils import make_poller_files
>>> make_poller_files.generate_poller_file(input_list, poller_file_type='svm', output_poller_filename="poller_file.out", skycell_name=None):
"""
import argparse
import os
import re
import sys
from astropy.io import fits
from drizzlepac.haputils import poller_utils
__taskname__ = 'make_poller_files'
def generate_poller_file(input_list, poller_file_type='svm', output_poller_filename="poller_file.out",
skycell_name=None):
"""Creates a properly formatted SVM or MVM poller file.
Parameters
----------
input_list : str
Name of the text file containing the list of filenames or rootnames to process
poller_file_type : str, optional
Type of poller file to create. 'svm' for single visit mosaic, 'mvm' for multi-visit mosaic. Default
value is 'svm'.
output_poller_filename : str, optional
Name of the output poller file that will be created. Default value is 'poller_file.out'.
skycell_name : str, optional
Name of the skycell to use when creating a MVM poller file. skycell_name is REQUIRED for the creation
of a MVM poller file, but completely unnecessary for the creation of a SVM poller file. The correct
syntax for skycell names is 'skycell-pNNNNxXXyXX', where NNNN is the 4-digit projection cell number,
and XX and YY are the two-digit X and Y skycell indices, respectively. Default value is logical
'None'. NOTE: this input argument is not needed for SVM poller file creation, but *REQUIRED* for MVM
poller file creation. Users can determine the skycell(s) that their observations occupy using the
``haputils.which_skycell`` script.
Returns
-------
Nothing.
"""
if poller_file_type == 'svm' and skycell_name:
print("PROTIP: Users only need to provide a skycell name for the creation of MVM poller files, not SVM poller files.")
# Open rootname list file
f = open(input_list, 'r')
rootname_list = f.readlines()
f.close()
output_list = []
for rootname in rootname_list:
rootname = rootname.strip()
fullfilepath = locate_fitsfile(rootname)
if len(fullfilepath) > 0:
if rootname.endswith(".fits"):
print("Found fits file {}".format(fullfilepath))
else:
print("Rootname {}: Found fits file {}".format(rootname, fullfilepath))
imgname = fullfilepath.split(os.sep)[-1]
else:
# Warn user if no fits file can be located for a given rootname, and skip processing of the file.
if rootname.endswith(".fits"):
item_type = "filename"
else:
item_type = "rootname"
print("WARNING: No fits file found for {} '{}'. This {} will be omitted from the poller file.".format(item_type, rootname, item_type))
continue
# Build each individual poller file line
linelist = []
linelist.append(imgname)
imghdu = fits.open(fullfilepath)
imghdr = imghdu[0].header
linelist.append("{}".format(imghdr['proposid']))
linelist.append(imgname.split("_")[-2][1:4].upper())
linelist.append(imghdr['linenum'].split(".")[0])
linelist.append("{}".format(imghdr['exptime']))
if imghdr['INSTRUME'].lower() == "acs":
filter = poller_utils.determine_filter_name("{};{}".format(imghdr['FILTER1'], imghdr['FILTER2']))
elif imghdr['INSTRUME'].lower() == "wfc3":
filter = poller_utils.determine_filter_name(imghdr['FILTER'])
linelist.append(filter.upper())
linelist.append(imghdr['detector'].upper())
if poller_file_type == 'mvm': # Additional stuff to add to MVM poller files
if skycell_name:
pattern = re.compile("(skycell-p\d{4}x\d{2}y\d{2})")
skycell_name_format_check = pattern.match(skycell_name)
if skycell_name_format_check:
linelist.append("{}".format(skycell_name))
else:
raise ValueError("'{}' is an improperly formatted skycell name. Please refer to documentation for information regarding correct skycell name syntax.".format(skycell_name))
else:
raise Exception("No skycell name was provided. The name of the skycell that the observations occupy is required for MVM poller file creation.")
linelist.append("NEW")
linelist.append(fullfilepath)
imghdu.close()
# Append newly created poller file line to the list of lines to be written to the output file.
output_list.append(",".join(linelist))
# adding carriage returns to all but the very last line in the output file.
list_size = len(output_list)
for ctr in range(0, list_size):
if ctr != list_size-1:
trailing_char = "\n"
else:
trailing_char = ""
output_list[ctr] = output_list[ctr]+trailing_char
# write output poller file
with open(output_poller_filename, 'w') as f:
f.writelines(output_list)
print("wrote {} poller file '{}'.".format(poller_file_type.upper(), output_poller_filename))
# ============================================================================================================
def locate_fitsfile(search_string):
"""returns full file name (fullpath + filename) for a specified rootname or filename. The search
algorithm looks for the file in the following order:
- Search for a _flc.fits file in the current working directory
- Search for a _flt.fits file in the current working directory
- Search for a _flc.fits file in subdirectory in the path specified in $DATA_PATH
- Search for a _flt.fits file in subdirectory in the path specified in $DATA_PATH
Parameters
----------
search_string : str
rootname or filename to locate
Returns
-------
fullfilepath : str
full file path + image name of specified search_string.
"""
if search_string.endswith("_flt.fits") or search_string.endswith("_flc.fits"): # Process search_string as a full filename
# Look in user-provided path (assuming they provided one)
if os.path.exists(search_string) and os.sep in search_string:
return search_string
# Look for files in CWD
if os.path.exists(search_string) and os.sep not in search_string:
return os.getcwd() + os.sep + search_string
# If not found in CWD, look elsewhere...
if not os.getenv("DATA_PATH"):
sys.exit("ERROR: Undefined online cache data root path. Please set environment variable 'DATA_PATH'")
fullfilepath = "{}{}{}{}{}{}{}".format(os.getenv("DATA_PATH"), os.sep, search_string[:4],
os.sep, search_string[:-9], os.sep, search_string)
if os.path.exists(search_string):
return fullfilepath
else:
return "" # Return a null string if no file is found
else: # Process search_string as a rootname
# Look for files in CWD first
for fits_ext in ["flc", "flt"]:
if os.path.exists("{}_{}.fits".format(search_string, fits_ext)):
return "{}{}{}_{}.fits".format(os.getcwd(), os.sep, search_string, fits_ext)
# If not found in CWD, look elsewhere...
if not os.getenv("DATA_PATH"):
sys.exit("ERROR: Undefined online cache data root path. Please set environment variable 'DATA_PATH'")
filenamestub = "{}{}{}{}{}{}{}".format(os.getenv("DATA_PATH"), os.sep, search_string[:4],
os.sep, search_string, os.sep, search_string)
for fits_ext in ["flc", "flt"]:
if os.path.exists("{}_{}.fits".format(filenamestub, fits_ext)):
return "{}_{}.fits".format(filenamestub, fits_ext)
# it should never get here unless no file was found either locally or elsewhere in $DATA_PATH.
return "" # Return a null string if no file is found
# ============================================================================================================
if __name__ == '__main__':
# Parse input arguments
parser = argparse.ArgumentParser(description='Create a HAP SVM or MVM poller file')
parser.add_argument('input_list',
help='Name of a file containing a list of calibrated fits files (ending with '
'"_flt.fits" or "_flc.fits") or rootnames (9 characters, usually ending '
'with a "q" to process. The corresponding flc.fits or flt.fits files must '
'exist in the user-specified path, the current working directory or the online '
'cache')
parser.add_argument('-o', '--output_poller_filename', required=False, default="poller_file.out",
help='Name of an output poller file that will be created. If not explicitly '
'specified, the poller file will be named "poller_file.out".')
parser.add_argument('-s', '--skycell_name', required=False, default="None",
help='Name of the skycell. The correct syntax for skycell names is '
'"skycell-pNNNNxXXyXX", where NNNN is the 4-digit projection cell number, and '
'XX and YY are the two-digit X and Y skycell indices, respectively. NOTE: this '
'input argument is not needed for SVM poller file creation, but *REQUIRED* for '
'MVM poller file creation. Users can determine the skycell(s) that their '
'observations occupy using the haputils.which_skycell.py script.')
parser.add_argument('-t', '--poller_file_type', required=False, choices=['svm', 'mvm'], default='svm',
help='Type of poller file to be created. "svm" to create a poller file for use with '
'the single-visit mosaics pipeline and "mvm" to create a poller file for use '
'with the multiple-visit mosaics pipeline. If not explicitly '
'specified, the default value is "svm". NOTE: if creating a MVM poller file, '
'one must specify the skycell name using the "-s" input argument.')
in_args = parser.parse_args()
# reformat input args
if in_args.skycell_name == 'None':
in_args.skycell_name = None
# logic to make sure user has specified the skycell name if a MVM poller file is to be created.
if in_args.poller_file_type == "mvm" and in_args.skycell_name is None:
parser.error("ERROR: To create a MVM poller file, a skycell name must be specified with the '-s' argument.")
generate_poller_file(in_args.input_list,
poller_file_type=in_args.poller_file_type,
output_poller_filename=in_args.output_poller_filename,
skycell_name=in_args.skycell_name)
| 54.224066 | 191 | 0.633762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,882 | 0.603153 |
46dd137785094becd75941a367274b47778b4b02 | 2,118 | py | Python | radloggerpy/tests/config/test_conf_fixture.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | radloggerpy/tests/config/test_conf_fixture.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | radloggerpy/tests/config/test_conf_fixture.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
# Copyright (c) 2019 Dantali0n
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from radloggerpy import config
from radloggerpy.tests import base
from radloggerpy import version
LOG = log.getLogger(__name__)
CONF = config.CONF
class TestConfFixture(base.TestCase):
"""Test conf fixture resetting config between tests"""
def setUp(self):
super(TestConfFixture, self).setUp()
# store the value for the filename database option
self.filename_opts = [i for i in config.database.DATABASE_OPTS
if i.name == 'filename'][0]
def test_cfg_reset_part_one(self):
self.assertEqual(self.filename_opts.default,
CONF.database.filename)
CONF.database.filename = 'part_one'
self.assertEqual('part_one', CONF.database.filename)
def test_cfg_reset_part_two(self):
self.assertEqual(self.filename_opts.default,
CONF.database.filename)
CONF.database.filename = 'part_two'
self.assertEqual('part_two', CONF.database.filename)
def test_cfg_parse_args_one(self):
version_default = version.version_info.release_string()
self.assertEqual(version_default, CONF.version)
CONF.version = 'args_one'
self.assertEqual('args_one', CONF.version)
def test_cfg_parse_args_two(self):
version_default = version.version_info.release_string()
self.assertEqual(version_default, CONF.version)
CONF.version = 'args_two'
self.assertEqual('args_two', CONF.version)
| 35.898305 | 75 | 0.699245 | 1,336 | 0.630784 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.370161 |
46dd367bc104a82f56c26623af9a311c62796d6c | 3,932 | py | Python | native/jni/external/selinux/python/sepolicy/sepolicy/templates/rw.py | Joyoe/Magisk-nosbin_magisk-nohide | 449441921740bf85926c14f41b3532822ca0eb65 | [
"MIT"
] | 2 | 2022-01-16T00:59:54.000Z | 2022-02-09T12:00:48.000Z | native/jni/external/selinux/python/sepolicy/sepolicy/templates/rw.py | Joyoe/Magisk-nosbin_magisk-nohide | 449441921740bf85926c14f41b3532822ca0eb65 | [
"MIT"
] | null | null | null | native/jni/external/selinux/python/sepolicy/sepolicy/templates/rw.py | Joyoe/Magisk-nosbin_magisk-nohide | 449441921740bf85926c14f41b3532822ca0eb65 | [
"MIT"
] | 2 | 2022-02-09T12:00:39.000Z | 2022-02-21T18:34:46.000Z | # Copyright (C) 2007-2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# policygentool is a tool for the initial generation of SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
########################### tmp Template File #############################
te_types="""
type TEMPLATETYPE_rw_t;
files_type(TEMPLATETYPE_rw_t)
"""
te_rules="""
manage_dirs_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
manage_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
manage_lnk_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
"""
########################### Interface File #############################
if_rules="""
########################################
## <summary>
## Search TEMPLATETYPE rw directories.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_search_rw_dir',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
allow $1 TEMPLATETYPE_rw_t:dir search_dir_perms;
files_search_rw($1)
')
########################################
## <summary>
## Read TEMPLATETYPE rw files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_read_rw_files',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
read_files_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
allow $1 TEMPLATETYPE_rw_t:dir list_dir_perms;
files_search_rw($1)
')
########################################
## <summary>
## Manage TEMPLATETYPE rw files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_rw_files',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
manage_files_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
')
########################################
## <summary>
## Create, read, write, and delete
## TEMPLATETYPE rw dirs.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_rw_dirs',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
manage_dirs_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
')
"""
te_stream_rules="""
manage_sock_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
"""
if_stream_rules="""\
########################################
## <summary>
## Connect to TEMPLATETYPE over a unix stream socket.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_stream_connect',`
gen_require(`
type TEMPLATETYPE_t, TEMPLATETYPE_rw_t;
')
stream_connect_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_t)
')
"""
if_admin_types="""
type TEMPLATETYPE_rw_t;"""
if_admin_rules="""
files_search_etc($1)
admin_pattern($1, TEMPLATETYPE_rw_t)
"""
########################### File Context ##################################
fc_file="""
FILENAME -- gen_context(system_u:object_r:TEMPLATETYPE_rw_t,s0)
"""
fc_sock_file="""\
FILENAME -s gen_context(system_u:object_r:TEMPLATETYPE_etc_rw_t,s0)
"""
fc_dir="""
FILENAME(/.*)? gen_context(system_u:object_r:TEMPLATETYPE_rw_t,s0)
"""
| 24.72956 | 81 | 0.653611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,771 | 0.959054 |
46ddb3dd7ba68b21876cf1b57fdf4ef088c06100 | 5,909 | py | Python | src/htmxl/compose/write/writer.py | schireson/htmxl | d4adef9fe0630f39c3664d3913bbbbe3db3ec069 | [
"MIT"
] | 2 | 2021-08-11T15:15:29.000Z | 2022-03-20T04:04:54.000Z | src/htmxl/compose/write/writer.py | schireson/htmxl | d4adef9fe0630f39c3664d3913bbbbe3db3ec069 | [
"MIT"
] | 5 | 2021-08-18T20:54:32.000Z | 2022-01-24T20:06:46.000Z | src/htmxl/compose/write/writer.py | schireson/htmxl | d4adef9fe0630f39c3664d3913bbbbe3db3ec069 | [
"MIT"
] | null | null | null | """A module dedicated to writing data to a workbook."""
import logging
from contextlib import contextmanager
import openpyxl.styles
from htmxl.compose.cell import Cell
from htmxl.compose.recording import Recording
from htmxl.compose.style import style_range
from htmxl.compose.write import elements
logger = logging.getLogger(__name__)
class Writer:
def __init__(self, sheet, ref="A1"):
self.current_cell = Cell(ref)
self.sheet = sheet
self._recordings = {}
self._validations = {}
self._auto_filter_set = False
self._element_handlers = {
"root": elements.write_body,
"html": elements.write_body,
"body": elements.write_body,
"head": elements.write_head,
"div": elements.write_div,
"span": elements.write_span,
"br": elements.write_br,
"table": elements.write_table,
"tr": elements.write_tr,
"th": elements.write_th,
"td": elements.write_td,
"thead": elements.write_thead,
"tbody": elements.write_tbody,
"string": elements.write_string,
"datalist": elements.create_datavalidation,
"input": elements.write_input,
}
@property
def row(self):
return self.current_cell.row
@property
def col(self):
return self.current_cell.col
@property
def ref(self):
return self.current_cell.ref
def write(self, element, styler):
elements.write(writer=self, element=element, styler=styler)
def get_cell(self, *, ref=None):
if ref is None:
ref = self.current_cell
return self.sheet.cell(column=ref.col, row=ref.row)
def write_cell(self, value, styler, style=None):
cell = self.sheet.cell(column=self.col, row=self.row, value=value)
if style:
cell.style = styler.calculate_style(style)
if self._recordings:
for recording in self._recordings.values():
recording.append(self.current_cell)
def move_down(self, num=1):
self.current_cell = Cell.from_location(col=self.col, row=self.row + num)
def move_up(self, num=1):
if self.row == 1:
return
self.current_cell = Cell.from_location(col=self.col, row=self.row - num)
def move_left(self, num=1):
if self.col == 1:
return
self.current_cell = Cell.from_location(col=self.col - num, row=self.row)
def move_right(self, num=1):
self.current_cell = Cell.from_location(col=self.col + num, row=self.row)
def move(self, movement):
movement_function = getattr(self, "move_{}".format(movement))
movement_function()
def move_to(self, col, row):
self.current_cell = Cell.from_location(col=col, row=row)
@contextmanager
def record(self):
recording = Recording()
recording_id = recording.id()
self._recordings[recording_id] = recording
yield recording
self.stop_recording(recording_id)
def stop_recording(self, recording_id):
del self._recordings[recording_id]
def auto_filter(self, ref):
if self._auto_filter_set:
raise RuntimeError("You may only set autofilter once per sheet.")
else:
self.sheet.auto_filter.ref = ref
self._auto_filter_set = True
def style_inline(self, element, included_cells, inline_style):
if inline_style.get("width"):
column_refs = set()
for cell in included_cells:
column_refs.add(cell.col_ref)
num_cols = len(column_refs)
column_width = round(inline_style.get("width") / num_cols)
for col_ref in column_refs:
self.sheet.column_dimensions[col_ref].width = column_width
if inline_style.get("height"):
row_refs = set()
for cell in included_cells:
row_refs.add(cell.row)
num_rows = len(row_refs)
column_height = round(inline_style.get("height") / num_rows)
for row_ref in row_refs:
self.sheet.row_dimensions[row_ref].height = column_height
if inline_style.get("text-align"):
horizontal_alignment = inline_style.get("text-align")
for cell in included_cells:
self.sheet[cell.ref].alignment = self.sheet[
cell.ref
].alignment + openpyxl.styles.Alignment(horizontal=horizontal_alignment)
if inline_style.get("vertical-align"):
vertical_alignment = inline_style.get("vertical-align")
for cell in included_cells:
self.sheet[cell.ref].alignment = self.sheet[
cell.ref
].alignment + openpyxl.styles.Alignment(vertical=vertical_alignment)
if inline_style.get("data-wrap-text"):
if inline_style.get("data-wrap-text").lower() == "true":
for cell in included_cells:
logger.debug("Setting wrapText=True for cell {}".format(cell))
self.sheet[cell.ref].alignment = self.sheet[
cell.ref
].alignment + openpyxl.styles.Alignment(wrapText=True)
def style_range(self, reference_style, cell_range):
style_range(self.sheet, reference_style, cell_range)
def add_validation(self, id, validation):
self._validations[id] = validation
self.sheet.add_data_validation(validation)
def add_validation_to_cell(self, validation_name):
validation = self._validations.get(validation_name)
if not validation:
raise ValueError(f"<datalist> validation with name '{validation_name}' does not exist")
current_cell = self.get_cell()
validation.add(current_cell)
| 33.573864 | 99 | 0.617871 | 5,567 | 0.942122 | 205 | 0.034693 | 420 | 0.071078 | 0 | 0 | 438 | 0.074124 |
46dea0e929c6d4425f82b7f896f31290465dc137 | 820 | py | Python | punto/__init__.py | xu-hong/Punto | 906d2725d8e4c72b5c094a5f1f738f5ce2a7532f | [
"MIT"
] | null | null | null | punto/__init__.py | xu-hong/Punto | 906d2725d8e4c72b5c094a5f1f738f5ce2a7532f | [
"MIT"
] | null | null | null | punto/__init__.py | xu-hong/Punto | 906d2725d8e4c72b5c094a5f1f738f5ce2a7532f | [
"MIT"
] | null | null | null | """
Compartmentalize:
[ ascii art input ]
---------------
| maybe some |
| sort of auxillary |
| drawing program |
|
|
\ /
v
[ lex/parser ] --> [ translater ]
--------- ----------
| grammar | | notes literals|
| to numerical |
| patterns with |
| timestamps |
| and midi meta |
| data |
|
,------------------------'
|
`--> [ sequencer ]
-----------
| do the |
| sequencing |
| duh |
|
|
\ /
v
[ midi sender ]
----------
| communicate to |
| midi external |
"""
| 20.5 | 46 | 0.265854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.99878 |
46df810892583016f332b3bb6b66329f389547e7 | 189 | py | Python | engine/tests/scenes/test_scenes.py | LloydTao/ecm3423-fur-effect | fefa73665b459dfd1648dca97a95e8313cf53dd5 | [
"MIT"
] | null | null | null | engine/tests/scenes/test_scenes.py | LloydTao/ecm3423-fur-effect | fefa73665b459dfd1648dca97a95e8313cf53dd5 | [
"MIT"
] | null | null | null | engine/tests/scenes/test_scenes.py | LloydTao/ecm3423-fur-effect | fefa73665b459dfd1648dca97a95e8313cf53dd5 | [
"MIT"
] | null | null | null | import unittest
from ...scenes import Scene
class TestSceneMethods(unittest.TestCase):
def test_scene(self):
scene = Scene()
if __name__ == "__main__":
unittest.main()
| 14.538462 | 42 | 0.68254 | 92 | 0.486772 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.05291 |
46e269d01f45d8d68174ca63ae8d4c89e50af8a2 | 1,545 | py | Python | paper/F3/D_fixed_points.py | FedeClaudi/manyfolds | 68f7abba9059e3063eae46285f1c95157ea454cf | [
"MIT"
] | 3 | 2021-11-04T10:29:40.000Z | 2021-11-04T12:28:57.000Z | paper/F3/D_fixed_points.py | FedeClaudi/manyfolds | 68f7abba9059e3063eae46285f1c95157ea454cf | [
"MIT"
] | null | null | null | paper/F3/D_fixed_points.py | FedeClaudi/manyfolds | 68f7abba9059e3063eae46285f1c95157ea454cf | [
"MIT"
] | null | null | null | import sys
import numpy as np
sys.path.append("./")
from vedo import screenshot
from vedo.shapes import Tube
from myterial import salmon
from manifold import embeddings, Plane
from manifold.visualize import Visualizer
from manifold import visualize
from manifold.rnn import RNN
"""
3D viisualization of an RNN's dynamics over time fitted
to the plane with a single fixed point attractor
at the center.
"""
visualize.reco_surface_radius = 0.5
visualize.point_size = 0.03
visualize.tangent_vector_radius = 0.015
visualize.rnn_trace_alpha = 0.62
N = 64
K = 12
def vfield(point):
# fixed point at center
p0, p1 = point.coordinates
return ((0.5 - p0) * 3, (0.5 - p1) * 3)
M = Plane(embeddings.plane_to_rn_flat, n_sample_points=12)
M.vectors_field = vfield
# fit and run RNN
rnn = RNN(M, n_units=N)
rnn.build_W(k=K, scale=1)
rnn.run_points(n_seconds=60, cut=False)
# visualize vector field
viz = Visualizer(M, rnn=None, axes=0, manifold_alpha=1, pca_sample_points=100)
cam = dict(
pos=(-7.84, -8.65, 3.76),
focalPoint=(2.38e-7, 0, 1.49),
viewup=(0.0954, 0.171, 0.981),
distance=11.9,
clippingRange=(6.02, 19.3),
)
for trace in rnn.traces:
trace_pc = viz.pca.transform(trace.trace)
coords = trace_pc.copy()
coords[:, 2] = np.linspace(0, 3, len(coords))
tube = Tube(coords, c=salmon, r=0.01, alpha=1)
viz.actors.append(tube)
viz._add_silhouette(tube)
# show vector field
viz.show(scale=0.15, show_points=True, camera=cam)
screenshot(f"./paper/images/3D_vfield_.png")
| 22.391304 | 78 | 0.70356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.16699 |
46e4328cfd69808c97427162d164c51749e7b2f4 | 2,567 | py | Python | everest/window/data/pile.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 2 | 2020-12-17T02:27:28.000Z | 2020-12-17T23:50:13.000Z | everest/window/data/pile.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 1 | 2020-12-07T10:14:45.000Z | 2020-12-07T10:14:45.000Z | everest/window/data/pile.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 1 | 2020-10-22T11:16:50.000Z | 2020-10-22T11:16:50.000Z | ###############################################################################
''''''
###############################################################################
from functools import cached_property
from collections import OrderedDict
from collections.abc import Sequence
from .channel import DataChannel
from .spread import DataSpread
# def merge_dicts(d1, d2):
# for key, val in d2.items():
# if key not in ('lims', 'capped', 'label', 'i'):
# if key in d1:
# if not d1[key] == val:
# raise ValueError("Key clash.")
# continue
# d1[key] = val
class DataPile(Sequence):
def __init__(self,
*datas
):
self.datas = list(DataSpread.convert(d) for d in datas)
@cached_property
def concatenated(self):
if len(self.datas) == 1:
return self.datas[0]
outs = []
for dim in ('x', 'y', 'z', 'c', 's', 'l'):
datas = [d[dim] for d in self.datas]
datas = [d for d in datas if not d is None]
if datas:
allD = datas[0]
for data in datas[1:]:
allD = allD.merge(data)
else:
allD = None
outs.append(allD)
return DataSpread(*outs) # pylint: disable=E1120
def _delself(self):
try:
del self.concatenated
except AttributeError:
pass
def __getitem__(self, key):
return self.datas.__getitem__(key)
def __setitem__(self, key, val):
self._delself()
self.datas.__setitem__(key, val)
def __delitem__(self, key):
self._delself()
self.datas.__delitem__(key)
def __len__(self):
return len(self.datas)
def append(self, arg):
self.datas.append(arg)
def align_channel(self, arg, ind):
if arg is None:
return arg
arg = DataChannel.convert(arg)
conc = self.concatenated[ind]
if conc is None:
return arg
return conc.align(arg)
def add(self, *args):
self._delself()
args = (self.align_channel(arg, ind) for ind, arg in enumerate(args))
spread = DataSpread(*args)
self.append(spread)
self._delself()
return spread
def clear(self):
self.datas.clear()
self._delself()
###############################################################################
''''''
###############################################################################
| 32.0875 | 79 | 0.470978 | 1,757 | 0.684457 | 0 | 0 | 555 | 0.216206 | 0 | 0 | 657 | 0.255941 |
46e449ebb96ea8dbe3851d2cb33720b71feeea7a | 3,591 | py | Python | app/modules/entity/endpoint_entity.py | Clivern/Kevin | dfa6fe99d2599a3f1a9da7c9690e2fba6a825f1d | [
"Apache-2.0"
] | 2 | 2018-06-18T09:37:36.000Z | 2021-06-23T02:09:41.000Z | app/modules/entity/endpoint_entity.py | Clivern/Kevin | dfa6fe99d2599a3f1a9da7c9690e2fba6a825f1d | [
"Apache-2.0"
] | 45 | 2018-04-08T11:53:05.000Z | 2018-06-12T20:45:38.000Z | app/modules/entity/endpoint_entity.py | Clivern/Kevin | dfa6fe99d2599a3f1a9da7c9690e2fba6a825f1d | [
"Apache-2.0"
] | null | null | null | """
Endpoint Entity Module
"""
# Django
from django.contrib.auth.models import User
# local Django
from app.models import Endpoint
from app.models import Namespace
from app.models import Endpoint_Meta
from app.modules.util.helpers import Helpers
class Endpoint_Entity():
GET = "get"
POST = "post"
HEADE = "head"
PUT = "put"
DELETE = "delete"
PATCH = "patch"
TRACE = "trace"
OPTIONS = "options"
CONNECT = "connect"
ANY = "any"
VALIDATE = "validate"
DEBUG = "debug"
DYNAMIC = "dynamic"
def insert_one(self, endpoint):
"""Insert a New Endpoint"""
endpoint = Endpoint(
route=endpoint["route"],
method=endpoint["method"],
target=endpoint["target"],
route_rules=endpoint["route_rules"],
headers_rules=endpoint["headers_rules"],
body_rules=endpoint["body_rules"],
namespace=Namespace.objects.get(pk=endpoint["namespace_id"])
)
endpoint.save()
return False if endpoint.pk is None else endpoint
def insert_many(self, endpoints):
"""Insert Many Endpoints"""
status = True
for endpoint in endpoints:
status &= True if self.insert_one(endpoint) != False else False
return status
def get_one_by_id(self, id):
"""Get Endpoint By ID"""
try:
endpoint = Endpoint.objects.get(pk=id)
return False if endpoint.pk is None else endpoint
except:
return False
def get_many_by_namespace(self, namespace_id, order_by, asc):
"""Get Many Endpoints By Namespace ID"""
endpoints = Endpoint.objects.filter(namespace=namespace_id).order_by(order_by if asc else "-%s" % order_by)
return endpoints
def get_many_ids_by_namespace(self, namespace_id):
"""Get Many Endpoints By Namespace ID"""
endpoints = Endpoint.objects.filter(namespace=namespace_id)
return [endpoint.id for endpoint in endpoints]
def count_by_namespace(self, namespace_id):
"""Count Endpoints By Namespace ID"""
count = Endpoint.objects.filter(namespace=namespace_id).count()
return count
def count_by_target(self, target, namespace_id):
count = Endpoint.objects.filter(namespace=namespace_id, target=target).count()
return count
def update_one_by_id(self, id, new_data):
"""Update Endpoint By ID"""
endpoint = self.get_one_by_id(id)
if endpoint != False:
if "route" in new_data:
endpoint.route = new_data["route"]
if "method" in new_data:
endpoint.method = new_data["method"]
if "target" in new_data:
endpoint.target = new_data["target"]
if "route_rules" in new_data:
endpoint.route_rules = new_data["route_rules"]
if "headers_rules" in new_data:
endpoint.headers_rules = new_data["headers_rules"]
if "body_rules" in new_data:
endpoint.body_rules = new_data["body_rules"]
if "namespace_id" in new_data:
endpoint.namespace = Namespace.objects.get(pk=new_data["namespace_id"])
endpoint.save()
return True
return False
def delete_one_by_id(self, id):
"""Delete Endpoint By ID"""
endpoint = self.get_one_by_id(id)
if endpoint != False:
count, deleted = endpoint.delete()
return True if count > 0 else False
return False | 29.195122 | 115 | 0.610972 | 3,341 | 0.930382 | 0 | 0 | 0 | 0 | 0 | 0 | 630 | 0.175439 |
46e63dd31aa69a8cfc27255c2ad18d4d445a70e1 | 12,005 | py | Python | src/krux/pages/home.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | src/krux/pages/home.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | src/krux/pages/home.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import binascii
import gc
import hashlib
import lcd
from ..baseconv import base_encode
from ..display import DEFAULT_PADDING
from ..psbt import PSBTSigner
from ..qr import FORMAT_NONE, FORMAT_PMOFN
from ..input import BUTTON_ENTER
from ..wallet import Wallet, parse_address
from ..i18n import t
from . import Page, Menu, MENU_CONTINUE, MENU_EXIT
class Home(Page):
"""Home is the main menu page of the app"""
def __init__(self, ctx):
Page.__init__(
self,
ctx,
Menu(
ctx,
[
(t("Mnemonic"), self.mnemonic),
(t("Extended Public Key"), self.public_key),
(t("Wallet"), self.wallet),
(t("Scan Address"), self.scan_address),
(t("Sign"), self.sign),
(t("Shutdown"), self.shutdown),
],
),
)
def mnemonic(self):
"""Handler for the 'mnemonic' menu item"""
self.display_mnemonic(self.ctx.wallet.key.mnemonic)
self.ctx.input.wait_for_button()
self.print_qr_prompt(self.ctx.wallet.key.mnemonic, FORMAT_NONE)
return MENU_CONTINUE
def public_key(self):
"""Handler for the 'xpub' menu item"""
zpub = "Zpub" if self.ctx.wallet.key.multisig else "zpub"
for version in [None, self.ctx.wallet.key.network[zpub]]:
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
self.ctx.wallet.key.key_expression(version, pretty=True)
)
self.ctx.input.wait_for_button()
xpub = self.ctx.wallet.key.key_expression(version)
self.display_qr_codes(xpub, FORMAT_NONE, None)
self.print_qr_prompt(xpub, FORMAT_NONE)
return MENU_CONTINUE
def wallet(self):
"""Handler for the 'wallet' menu item"""
if not self.ctx.wallet.is_loaded():
self.ctx.display.draw_centered_text(t("Wallet not found."))
btn = self.prompt(t("Load one?"), self.ctx.display.bottom_prompt_line)
if btn == BUTTON_ENTER:
return self._load_wallet()
else:
self.display_wallet(self.ctx.wallet)
wallet_data, qr_format = self.ctx.wallet.wallet_qr()
self.print_qr_prompt(wallet_data, qr_format)
return MENU_CONTINUE
def _load_wallet(self):
wallet_data, qr_format = self.capture_qr_code()
if wallet_data is None:
self.ctx.display.flash_text(t("Failed to load wallet"), lcd.RED)
return MENU_CONTINUE
try:
wallet = Wallet(self.ctx.wallet.key)
wallet.load(wallet_data, qr_format)
self.display_wallet(wallet, include_qr=False)
btn = self.prompt(t("Load?"), self.ctx.display.bottom_prompt_line)
if btn == BUTTON_ENTER:
self.ctx.wallet = wallet
self.ctx.log.debug(
"Wallet descriptor: %s" % self.ctx.wallet.descriptor.to_string()
)
self.ctx.display.flash_text(t("Loaded wallet"))
except Exception as e:
self.ctx.log.exception("Exception occurred loading wallet")
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
t("Invalid wallet:\n%s") % repr(e), lcd.RED
)
self.ctx.input.wait_for_button()
return MENU_CONTINUE
def scan_address(self):
"""Handler for the 'scan address' menu item"""
data, qr_format = self.capture_qr_code()
if data is None or qr_format != FORMAT_NONE:
self.ctx.display.flash_text(t("Failed to load address"), lcd.RED)
return MENU_CONTINUE
addr = None
try:
addr = parse_address(data)
except:
self.ctx.display.flash_text(t("Invalid address"), lcd.RED)
return MENU_CONTINUE
self.display_qr_codes(data, qr_format, title=addr)
self.print_qr_prompt(data, qr_format)
if self.ctx.wallet.is_loaded() or not self.ctx.wallet.is_multisig():
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
t("Check that address belongs to this wallet?")
)
btn = self.prompt(" ", self.ctx.display.bottom_prompt_line)
if btn != BUTTON_ENTER:
return MENU_CONTINUE
found = False
num_checked = 0
while not found:
for recv_addr in self.ctx.wallet.receive_addresses(
num_checked, limit=20
):
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
t("Checking receive address %d for match..") % num_checked
)
num_checked += 1
found = addr == recv_addr
if found:
break
gc.collect()
if not found:
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
t("Checked %d receive addresses with no matches.") % num_checked
)
btn = self.prompt(
t("Try more?"), self.ctx.display.bottom_prompt_line
)
if btn != BUTTON_ENTER:
break
self.ctx.display.clear()
result_message = (
t("%s\n\nis a valid receive address") % addr
if found
else t("%s\n\nwas NOT FOUND in the first %d receive addresses")
% (addr, num_checked)
)
self.ctx.display.draw_centered_text(result_message)
self.ctx.input.wait_for_button()
return MENU_CONTINUE
def sign(self):
"""Handler for the 'sign' menu item"""
submenu = Menu(
self.ctx,
[
(t("PSBT"), self.sign_psbt),
(t("Message"), self.sign_message),
(t("Back"), lambda: MENU_EXIT),
],
)
index, status = submenu.run_loop()
if index == len(submenu.menu) - 1:
return MENU_CONTINUE
return status
def sign_psbt(self):
"""Handler for the 'sign psbt' menu item"""
if not self.ctx.wallet.is_loaded():
self.ctx.display.draw_centered_text(
t("WARNING:\nWallet not loaded.\n\nSome checks cannot be performed."),
lcd.WHITE,
)
btn = self.prompt(t("Proceed?"), self.ctx.display.bottom_prompt_line)
if btn != BUTTON_ENTER:
return MENU_CONTINUE
data, qr_format = self.capture_qr_code()
qr_format = FORMAT_PMOFN if qr_format == FORMAT_NONE else qr_format
if data is None:
self.ctx.display.flash_text(t("Failed to load PSBT"), lcd.RED)
return MENU_CONTINUE
self.ctx.display.clear()
self.ctx.display.draw_centered_text(t("Loading.."))
signer = PSBTSigner(self.ctx.wallet, data)
self.ctx.log.debug("Received PSBT: %s" % signer.psbt)
outputs = signer.outputs()
self.ctx.display.clear()
self.ctx.display.draw_hcentered_text("\n \n".join(outputs))
btn = self.prompt(t("Sign?"), self.ctx.display.bottom_prompt_line)
if btn == BUTTON_ENTER:
signed_psbt = signer.sign()
self.ctx.log.debug("Signed PSBT: %s" % signer.psbt)
signer = None
gc.collect()
self.display_qr_codes(signed_psbt, qr_format)
self.print_qr_prompt(signed_psbt, qr_format)
return MENU_CONTINUE
def sign_message(self):
"""Handler for the 'sign message' menu item"""
data, qr_format = self.capture_qr_code()
if data is None or qr_format != FORMAT_NONE:
self.ctx.display.flash_text(t("Failed to load message"), lcd.RED)
return MENU_CONTINUE
data = data.encode() if isinstance(data, str) else data
message_hash = None
if len(data) == 32:
# It's a sha256 hash already
message_hash = data
else:
if len(data) == 64:
# It may be a hex-encoded sha256 hash
try:
message_hash = binascii.unhexlify(data)
except:
pass
if message_hash is None:
# It's a message, so compute its sha256 hash
message_hash = hashlib.sha256(data).digest()
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
t("SHA256:\n%s") % binascii.hexlify(message_hash).decode()
)
btn = self.prompt(t("Sign?"), self.ctx.display.bottom_prompt_line)
if btn != BUTTON_ENTER:
return MENU_CONTINUE
sig = self.ctx.wallet.key.sign(message_hash)
# Encode sig as base64 string
encoded_sig = base_encode(sig.serialize(), 64).decode()
self.ctx.display.clear()
self.ctx.display.draw_centered_text(t("Signature:\n\n%s") % encoded_sig)
self.ctx.input.wait_for_button()
self.display_qr_codes(encoded_sig, qr_format)
self.print_qr_prompt(encoded_sig, qr_format)
pubkey = binascii.hexlify(self.ctx.wallet.key.account.sec()).decode()
self.ctx.display.clear()
self.ctx.display.draw_centered_text(t("Public Key:\n\n%s") % pubkey)
self.ctx.input.wait_for_button()
self.display_qr_codes(pubkey, qr_format)
self.print_qr_prompt(pubkey, qr_format)
return MENU_CONTINUE
def display_wallet(self, wallet, include_qr=True):
"""Displays a wallet, including its label and abbreviated xpubs.
If include_qr is True, a QR code of the wallet will be shown
which will contain the same data as was originally loaded, in
the same QR format
"""
about = wallet.label + "\n"
if wallet.is_multisig():
xpubs = []
for i, xpub in enumerate(wallet.policy["cosigners"]):
xpubs.append(
str(i + 1)
+ ". "
+ xpub[4:7]
+ ".."
+ xpub[len(xpub) - 3 : len(xpub)]
)
about += "\n".join(xpubs)
else:
xpub = wallet.key.xpub()
about += xpub[4:7] + ".." + xpub[len(xpub) - 3 : len(xpub)]
if include_qr:
wallet_data, qr_format = wallet.wallet_qr()
self.display_qr_codes(wallet_data, qr_format, title=about)
else:
self.ctx.display.draw_hcentered_text(about, offset_y=DEFAULT_PADDING)
| 38.477564 | 88 | 0.576676 | 10,545 | 0.878384 | 0 | 0 | 0 | 0 | 0 | 0 | 2,608 | 0.217243 |
46e64d04cb168b2efed6cd2ba94658c95071fa2b | 21,705 | py | Python | src/main/python/main.py | ctmrbio/list_scanner | 895a49d4d86a1e794592f4f9b9e711109015e475 | [
"MIT"
] | null | null | null | src/main/python/main.py | ctmrbio/list_scanner | 895a49d4d86a1e794592f4f9b9e711109015e475 | [
"MIT"
] | 6 | 2018-09-23T17:41:21.000Z | 2018-10-24T16:32:46.000Z | src/main/python/main.py | ctmrbio/list_scanner | 895a49d4d86a1e794592f4f9b9e711109015e475 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.5
"""CTMR list scanner"""
__author__ = "Fredrik Boulund"
__date__ = "2018"
__version__ = "0.4.0b"
from datetime import datetime
from pathlib import Path
import sys
from fbs_runtime.application_context import ApplicationContext, cached_property
from PyQt5 import QtCore
from PyQt5.QtWidgets import (
QWidget, QGridLayout, QGroupBox, QFormLayout, QVBoxLayout, QHBoxLayout,
QPushButton, QFileDialog, QLineEdit, QProgressBar, QLabel, QCheckBox,
QTextEdit, QRadioButton, QComboBox, QMenuBar, QListView, QTableView
)
from PyQt5.QtGui import QPixmap
# Handle sneaky hidden pandas imports for PyInstaller
import pandas._libs.tslibs.np_datetime
import pandas._libs.tslibs.nattype
import pandas._libs.skiplist
from sample_list import SampleList, ScannedSampleDB, __version__ as sample_list_version
class AppContext(ApplicationContext): # 1. Subclass ApplicationContext
def run(self): # 2. Implement run()
self.window.setWindowTitle("CTMR List Scanner version {} (SampleList: version {})".format(
__version__, sample_list_version
))
self.window.resize(1000, 700)
self.window.show()
return self.app.exec_() # 3. End run() with this line
@cached_property
def window(self):
return MainWindow()
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.keyPressEvent = self._keypress_event_action # Define custom handling of keypress events
self.focusNextPrevChild = lambda x: False # Disable Qt intercepting TAB keypress event
self.fluidx = ""
self.search_list = ""
self.sample_list = None
self.dbfile = "CTMR_scanned_items.sqlite3"
self.db = ScannedSampleDB(dbfile=self.dbfile)
self._session_saved = False
pixmap_art = QPixmap(appctxt.get_resource("bacteria.png")).scaledToHeight(50)
art = QLabel()
art.setPixmap(pixmap_art)
art.setAlignment(QtCore.Qt.AlignRight)
pixmap_logo = QPixmap(appctxt.get_resource("CTMR_logo_white_background.jpg")).scaledToHeight(50)
logo = QLabel()
logo.setPixmap(pixmap_logo)
logo.setAlignment(QtCore.Qt.AlignLeft)
self.scantype_combo = QComboBox()
self.scantype_combo.addItems([
"Search: Search for samples in list(s)",
"Register: Create sample registration list(s)",
])
self.scantype_combo.currentTextChanged.connect(self.select_scantype)
# Search: select and load lists
self._input_search_list_button = QPushButton("Select search list")
self._input_search_list_button.clicked.connect(self.select_search_list)
self._headers_checkbox = QCheckBox("Headers")
load_search_list_button = QPushButton("Load search list")
load_search_list_button.clicked.connect(self.load_search_list)
search_layout = QFormLayout()
list_headers_hbox = QHBoxLayout()
list_headers_hbox.addWidget(self._input_search_list_button)
list_headers_hbox.addWidget(self._headers_checkbox)
search_layout.addRow("1. Select list:", list_headers_hbox)
search_layout.addRow("2. Load list:", load_search_list_button)
self._search_list_group = QGroupBox("Select list of samples to search for")
self._search_list_group.setLayout(search_layout)
# Search: Manual scan
self._scanfield = QLineEdit(placeholderText="Scan/type item ID")
search_scan_button = QPushButton("Search for item")
search_scan_button.clicked.connect(self.scan_button_action)
manual_scan_layout = QGridLayout()
manual_scan_layout.addWidget(self._scanfield, 0, 0)
manual_scan_layout.addWidget(search_scan_button, 0, 1)
self._manual_scan_group = QGroupBox("Search: Manual scan")
self._manual_scan_group.setLayout(manual_scan_layout)
# Search: FluidX CSV
self._search_fluidx_csv_button = QPushButton("Select FluidX CSV")
self._search_fluidx_csv_button.clicked.connect(self.select_search_fluidx)
load_search_fluidx_button = QPushButton("Load FluidX CSV")
load_search_fluidx_button.clicked.connect(self.load_search_fluidx)
search_fluidx_layout = QGridLayout()
search_fluidx_layout.addWidget(self._search_fluidx_csv_button)
search_fluidx_layout.addWidget(load_search_fluidx_button)
self._search_fluidx_group = QGroupBox("Search: FluidX CSV")
self._search_fluidx_group.setLayout(search_fluidx_layout)
# Register: Manual or FluidX CSV
self._register_fluidx_csv_button = QPushButton("Select FluidX CSV")
self._register_fluidx_csv_button.clicked.connect(self.select_register_fluidx)
self._sample_type = QComboBox(editable=True)
self._sample_type.addItems([
"Fecal",
"Vaginal swab",
"Rectal swab",
"Saliva",
"Saliva swab",
"Biopsy",
])
self._load_register_fluidx_csv_button = QPushButton("Load FluidX CSV")
self._load_register_fluidx_csv_button.clicked.connect(self.load_register_fluidx)
self._select_sample_type_label = QLabel("Select sample type:")
self._select_sample_box_label = QLabel("Select box name:")
self._register_box = QLineEdit(placeholderText="Box name")
self._register_scanfield = QLineEdit(placeholderText="Scan/type item ID")
register_scan_button = QPushButton("Register item")
register_scan_button.clicked.connect(self.register_scanned_item)
register_fluidx_layout = QFormLayout()
register_fluidx_layout.addRow("Select FluidX CSV:", self._register_fluidx_csv_button)
register_fluidx_layout.addRow("Load FluidX CSV:", self._load_register_fluidx_csv_button)
register_sample_type_hbox = QHBoxLayout()
register_sample_type_hbox.addWidget(self._select_sample_type_label)
register_sample_type_hbox.addWidget(self._sample_type)
register_sample_type_hbox.addWidget(self._select_sample_box_label)
register_sample_type_hbox.addWidget(self._register_box)
register_scan_hbox = QHBoxLayout()
register_scan_hbox.addWidget(self._register_scanfield)
register_scan_hbox.addWidget(register_scan_button)
register_fluidx_layout.addRow(register_sample_type_hbox)
register_fluidx_layout.addRow(register_scan_hbox)
self._register_fluidx_group = QGroupBox("Register: Create sample registration lists")
self._register_fluidx_group.setLayout(register_fluidx_layout)
# Session log
self._session_log = QTextEdit()
self._search_progress = QProgressBar()
self._search_progress.setMinimum(0)
self._search_progress.setMaximum(0)
self.save_button = QPushButton("Save current session log")
self.save_button.clicked.connect(self.save_report)
self.export_button = QPushButton("Export log from old session")
self.export_button.clicked.connect(self.export_sample_list)
self.exit_button = QPushButton("Exit")
self.exit_button.clicked.connect(self.exit)
session_log_layout = QVBoxLayout()
session_log_layout.addWidget(self._search_progress)
session_log_layout.addWidget(self._session_log)
button_row = QHBoxLayout()
button_row.addWidget(self.save_button)
button_row.addWidget(self.export_button)
button_row.addWidget(self.exit_button)
session_log_layout.addLayout(button_row)
self._session_log_group = QGroupBox("Session log")
self._session_log_group.setLayout(session_log_layout)
# Overall layout
layout = QGridLayout()
layout.addWidget(art, 0, 1)
layout.addWidget(logo, 0, 2)
layout.addWidget(self.scantype_combo, 0, 0)
layout.addWidget(self._search_list_group, 2, 0, 1, 3)
layout.addWidget(self._search_fluidx_group, 3, 0, 1, 3)
layout.addWidget(self._manual_scan_group, 4, 0, 1, 3)
layout.addWidget(self._register_fluidx_group, 2, 0, 1, 3)
layout.addWidget(self._session_log_group, 5, 0, 1, 3)
self.setLayout(layout)
self.session_log("Started CTMR List Scanner version {} (SampleList: version {})".format(
__version__, sample_list_version,
))
self.select_scantype() # Set up the default chosen scantype layout
def select_scantype(self):
selected_scantype = self.scantype_combo.currentText()
self.session_log("Selected {}".format(selected_scantype))
if selected_scantype == "Search: Search for samples in list(s)":
self.scantype_combo.show()
self._search_list_group.show()
self._manual_scan_group.show()
self._search_fluidx_group.show()
self._register_fluidx_group.hide()
self._search_progress.show()
self._session_log_group.show()
elif selected_scantype == "Register: Create sample registration list(s)":
self.scantype_combo.show()
self._search_list_group.hide()
self._manual_scan_group.hide()
self._search_fluidx_group.hide()
self._register_fluidx_group.show()
self._search_progress.hide()
self._session_log_group.show()
self.db.create_session("REGISTRATION")
def select_search_list(self):
self.search_list, _ = QFileDialog.getOpenFileName(self, "Select search list")
self._input_search_list_button.setText(self.search_list)
self.session_log("Selected search list '{}'".format(self.search_list))
def load_search_list(self):
if Path(self.search_list).is_file():
self.db.create_session(self.search_list)
self.sample_list = SampleList(
self.search_list,
self.db,
self._headers_checkbox.isChecked()
)
self.session_log("Started new session: {}".format(
self.db.session_id
))
self.session_log("Loaded {} containing {} items.".format(
self.search_list,
self.sample_list.total_items,
))
self._search_progress.setMaximum(self.sample_list.total_items)
else:
self.session_log("Cannot load file '{}'.".format(
self.search_list
))
def scan_button_action(self):
scanned_item = self._scanfield.text()
if not scanned_item:
return False
item = self.search_scanned_item(scanned_item)
if item.id:
self.session_log("Found item {} in column {}".format(
item.item, item.column,
))
else:
self.session_log("Could not find item {} in lists.".format(
item.item
))
self._scanfield.setText("")
def search_scanned_item(self, scanned_item):
item = self.db.find_item(scanned_item)
self.db.store_scanned_item(item)
# Update progressbar
scanned_items = self.db.get_items_scanned_in_session(self.db.session_id)
distinct_scanned_items = set((item[1:] for item in scanned_items))
self._search_progress.setValue(len(distinct_scanned_items))
if self._search_progress.value == self._search_progress.maximum:
self.session_log("COMPLETED: All {} items ".format(
self.sample_list.total_items
) + "in file {} have been scanned.".format(
self.sample_list.filename
)
)
return item
def register_scanned_item(self):
item = self._register_scanfield.text()
sample_type = self._sample_type.currentText()
box = self._register_box.text()
self.session_log("Registering item '{}' of type '{}' into box '{}'".format(
item, sample_type, box
))
self.db.register_scanned_item(item, sample_type, box)
self._register_scanfield.setText("")
def select_search_fluidx(self):
self.fluidx, _ = QFileDialog.getOpenFileName(self, "Select FluidX CSV")
self._search_fluidx_csv_button.setText(self.fluidx)
self.session_log("Selected FluidX CSV '{}'".format(self.fluidx))
def select_register_fluidx(self):
self.fluidx, _ = QFileDialog.getOpenFileName(self, "Select FluidX CSV")
self._register_fluidx_csv_button.setText(self.fluidx)
self.session_log("Selected FluidX CSV '{}'".format(self.fluidx))
def load_search_fluidx(self):
if not Path(self.fluidx).is_file():
self.session_log("ERROR: Cannot load FluidX file")
return
if not self.sample_list:
self.session_log("ERROR: Load search list before loading FluidX file.")
return
self.session_log("Loading items from FluidX CSV: '{}'".format(self.fluidx))
scanned_items = self.sample_list.scan_fluidx_list(self.fluidx)
for position, barcode, _, rack_id in scanned_items:
item = self.search_scanned_item(barcode)
if item.id:
self.session_log("Found item {} from pos {} in rack {} of type {}.".format(
item.item, position, rack_id, item.column,
))
else:
self.session_log("Could not find item {} in lists!".format(
item.item
))
def load_register_fluidx(self):
if not Path(self.fluidx).is_file():
self.session_log("ERROR: Cannot load FluidX file")
return
sample_type = self._sample_type.currentText()
self.session_log("Registering items from FluidX CSV: '{}' as sample type '{}'".format(
self.fluidx, sample_type
))
def scan_fluidx_list(fluidx_file):
import pandas as pd
items = pd.read_csv(fluidx_file, header=None)
self.session_log("FluidX shape is (rows, columns): {}".format(items.shape))
return items.values.tolist()
fluidx_items = scan_fluidx_list(self.fluidx)
for position, barcode, _, rack_id in fluidx_items:
self.db.register_scanned_item(barcode, sample_type, rack_id, position)
self.session_log("Registered item '{}' of type '{}' in box '{}' at position '{}'".format(
barcode, sample_type, rack_id, position
))
def session_log(self, message):
self._session_log.append("{datetime}: {message}".format(
datetime=datetime.now(),
message=message,
))
def save_report(self):
selected_scantype = self.scantype_combo.currentText()
if selected_scantype == "Search: Search for samples in list(s)" and not self.search_list:
self.session_log("ERROR: Cannot save report without first loading search list.")
return
outfolder = QFileDialog.getExistingDirectory(self, "Select directory to save report to")
if Path(outfolder).is_dir():
if selected_scantype == "Register: Create sample registration list(s)":
input_stem = "Registered_samples"
else:
input_stem = Path(self.search_list).stem
fn_datetime = self.db.session_datetime.replace(":", "-").replace(" ", "_")
session_basename = Path("{}_{}_{}".format(
fn_datetime, self.db.session_id, input_stem,
))
session_report = outfolder / session_basename.with_suffix(".csv")
if selected_scantype == "Register: Create sample registration list(s)":
self.db.export_register_report(str(session_report))
else:
self.db.export_session_report(str(session_report))
self.session_log("Saved scanning session report to: {}".format(session_report))
session_log = outfolder / session_basename.with_suffix(".log")
with open(str(session_log), 'w') as outf:
outf.write(self._session_log.toPlainText())
self.session_log("Wrote session log to {}".format(session_log))
self._session_saved = True
else:
self.session_log("ERROR: Could not save report to {}".format(outfolder))
def export_sample_list(self):
self.export_old_session_window = ExportOldSessionWindow(self, dbfile=self.dbfile)
self.export_old_session_window.show()
def exit(self):
if self._session_saved:
exit()
else:
self.session_log("Exit button pressed,"
" but session log hasn't been saved."
" Press again to confirm exit!"
)
self._session_saved = True
def _keypress_event_action(self, key):
if key.key() == QtCore.Qt.Key_Tab:
selected_scantype = self.scantype_combo.currentText()
if selected_scantype == "Search: Search for samples in list(s)":
self.scan_button_action()
elif selected_scantype == "Register: Create sample registration list(s)":
self.register_scanned_item()
class ExportOldSessionWindow(QWidget):
def __init__(self, parent, dbfile):
super(ExportOldSessionWindow, self).__init__()
self.setWindowTitle("Export old scanning session")
self.resize(700, 400)
self.db = ScannedSampleDB(dbfile=dbfile)
self._parent = parent
self.session_list = QTableView()
self.session_list.setShowGrid(False)
self.session_list.setSelectionBehavior(1) # Select only rows
self.session_list.setSortingEnabled(True)
header = ["Datetime", "List filename", "Session ID"]
self.session_list.setModel(SessionTableModel(header=header, table_data=self.db.get_sessions_list()))
self.session_list.resizeColumnsToContents()
self.session_list.resizeRowsToContents()
self.export_button = QPushButton("Export log from selected session")
self.export_button.clicked.connect(self.export_session)
self.close_button = QPushButton("Close")
self.close_button.clicked.connect(self.close_window)
layout = QGridLayout()
layout.addWidget(self.session_list, 0, 0, 1, 2)
layout.addWidget(self.export_button, 1, 0, 1, 1)
layout.addWidget(self.close_button, 1, 1, 1, 1)
self.setLayout(layout)
@staticmethod
def grouper(iterable, n):
""" Group iterable into n-size groups. `iterable` must have a multiple of `n` values."""
grouped = []
current_group = []
for idx, thing in enumerate(iterable, start=1):
current_group.append(thing)
if idx % 3 == 0:
grouped.append(current_group)
current_group = []
return grouped
def export_session(self):
outfolder = QFileDialog.getExistingDirectory(self, "Select directory to export session report to")
if Path(outfolder).is_dir():
for selected_row in self.grouper(self.session_list.selectedIndexes(), 3):
selected_data = [
self.session_list.model().data(selected_row[0], QtCore.Qt.DisplayRole),
self.session_list.model().data(selected_row[1], QtCore.Qt.DisplayRole),
self.session_list.model().data(selected_row[2], QtCore.Qt.DisplayRole),
]
self._export_session_to_folder(
outfolder=outfolder,
datetime=selected_data[0],
filename_stem=Path(selected_data[1]).stem,
session_id=selected_data[2],
)
else:
self._parent.session_log("ERROR: No valid output folder selected")
def _export_session_to_folder(self, outfolder, datetime, filename_stem, session_id):
fn_datetime = datetime.replace(":", "-").replace(" ", "_")
session_basename = Path("{}_{}_{}".format(
fn_datetime, session_id, filename_stem,
))
session_report = outfolder / session_basename.with_suffix(".csv")
self.db.export_session_report(str(session_report), session_id=session_id)
self._parent.session_log("Saved scanning session report to: {}".format(session_report))
def close_window(self):
self.hide()
class SessionTableModel(QtCore.QAbstractTableModel):
def __init__(self, header, table_data):
super(SessionTableModel, self).__init__()
self.header = header
self.table_data = table_data
def rowCount(self, parent):
return len(self.table_data)
def columnCount(self, parent):
return len(self.header)
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole:
return self.table_data[index.row()][index.column()]
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.header[col]
return None
def sort(self, ncol, order):
self.table_data = sorted(self.table_data, key=lambda row: row[ncol])
if order == QtCore.Qt.DescendingOrder:
self.table_data.reverse()
self.layoutChanged.emit()
if __name__ == '__main__':
appctxt = AppContext() # 4. Instantiate the subclass
exit_code = appctxt.run() # 5. Invoke run()
sys.exit(exit_code) | 43.672032 | 108 | 0.651693 | 20,664 | 0.952039 | 0 | 0 | 483 | 0.022253 | 0 | 0 | 3,252 | 0.149827 |
46e72776df2b4121bbdb1420024b89de418060c1 | 4,411 | py | Python | bento/loaders/noop.py | ARM-software/bento-linker | b6cbdb6b7b35ebd96fcac070e5045919acdb65bf | [
"BSD-3-Clause"
] | 20 | 2021-01-21T08:23:34.000Z | 2021-12-10T07:45:58.000Z | bento/loaders/noop.py | QPC-database/bento-linker | b6cbdb6b7b35ebd96fcac070e5045919acdb65bf | [
"BSD-3-Clause"
] | 1 | 2021-04-26T14:44:46.000Z | 2021-09-04T22:13:26.000Z | bento/loaders/noop.py | QPC-database/bento-linker | b6cbdb6b7b35ebd96fcac070e5045919acdb65bf | [
"BSD-3-Clause"
] | 6 | 2021-01-24T09:29:10.000Z | 2021-11-22T16:43:35.000Z | #
# Default loader that doesn't do anything special
#
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
from .. import loaders
import os
@loaders.loader
class NoOpLoader(loaders.Loader):
"""
A loader that does nothing, allowing execution in
the specified flash and RAM.
"""
__argname__ = "noop"
__arghelp__ = __doc__
def __init__(self):
super().__init__()
def box_parent(self, parent, box):
super().box_parent(parent, box)
self._load_plug = parent.addexport(
'__box_%s_load' % box.name, 'fn() -> err',
scope=parent.name, source=self.__argname__, weak=True)
def build_mk(self, output, box):
# create boxing rule, to be invoked if embedding an elf is needed
data_init = None
if any(section.name == 'data'
for memory in box.memoryslices
for section in memory.sections):
data_init = box.consume('rp', 0)
loadmemories = []
for memory in box.memoryslices:
if 'p' in memory.mode:
loadmemories.append((memory.name, memory,
[section.name for section in memory.sections]))
for child in box.boxes:
for memory in child.memories:
if 'p' in memory.mode:
name = 'box.%s.%s' % (child.name, memory.name)
loadmemories.append((name, memory, [name]))
out = output.rules.append(
doc="a .box is a .elf containing a single section for "
"each loadable memory region")
out.printf('%%.box: %%.elf %(memory_boxes)s',
memory_boxes=' '.join(
'%.box.'+name for name, _, _ in loadmemories))
with out.indent():
out.writef('$(strip $(OBJCOPY) $< $@')
with out.indent():
# objcopy won't let us create an empty elf, but we can
# fake it by treating the input as binary and striping
# all implicit sections. Needed to get rid of program
# segments which create warnings later.
out.writef(' \\\n-I binary')
out.writef(' \\\n-O elf32-littlearm')
out.writef(' \\\n-B arm')
out.writef(' \\\n--strip-all')
out.writef(' \\\n--remove-section=*')
for i, (name, memory, _) in enumerate(loadmemories):
with out.pushattrs(
memory=name,
addr=memory.addr,
n=2+i):
out.writef(' \\\n--add-section '
'.box.%(box)s.%(memory)s=$(word %(n)d,$^)')
out.writef(' \\\n--change-section-address '
'.box.%(box)s.%(memory)s=%(addr)#.8x')
out.writef(' \\\n--set-section-flags '
'.box.%(box)s.%(memory)s='
'contents,alloc,load,readonly,data')
out.printf(')')
for name, _, sections in loadmemories:
out = output.rules.append()
out.printf('%%.box.%(memory)s: %%.elf', memory=name)
with out.indent():
out.writef('$(strip $(OBJCOPY) $< $@')
with out.indent():
for section in sections:
out.writef(' \\\n--only-section .%(section)s',
section=section)
# workaround to get the data_init section in the
# right place
if section == 'text' and data_init is not None:
out.writef(' \\\n--only-section .data')
out.printf(' \\\n-O binary)\n')
super().build_mk(output, box)
def build_parent_c(self, output, parent, box):
super().build_parent_c(output, parent, box)
if not self._load_plug.links:
# if someone else provides load we can just skip this
return
output.decls.append('//// %(box)s loading ////')
out = output.decls.append()
out.printf("static int __box_%(box)s_load(void) {")
with out.indent():
out.printf("// default loader does nothing")
out.printf("return 0;")
out.printf("}")
| 40.842593 | 73 | 0.502834 | 4,207 | 0.953752 | 0 | 0 | 4,223 | 0.957379 | 0 | 0 | 1,406 | 0.318749 |
46e9e9cdbe1c2421007218ef09b2430850f50384 | 8,007 | py | Python | tests/cli/test_output_option.py | mohammad-sdsc/renku-python | 3a7bf2339ab56a3bc00a689bb27a864bb5bf55da | [
"Apache-2.0"
] | null | null | null | tests/cli/test_output_option.py | mohammad-sdsc/renku-python | 3a7bf2339ab56a3bc00a689bb27a864bb5bf55da | [
"Apache-2.0"
] | null | null | null | tests/cli/test_output_option.py | mohammad-sdsc/renku-python | 3a7bf2339ab56a3bc00a689bb27a864bb5bf55da | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test behavior of ``--output`` option."""
import os
from pathlib import Path
from renku.core.models.entities import Collection
from renku.core.models.provenance.activities import Activity
def read_all_workflow_files(client, glob='*.yaml'):
"""
Return an array where its elements are content of CWL file
found in the Renku project.
"""
return [
Activity.from_yaml(f, client=client)
for f in client.workflow_path.glob(glob)
]
def test_run_succeeds_normally(cli, client, subdirectory):
"""Test when an output is detected"""
foo = os.path.relpath(client.path / 'foo', os.getcwd())
exit_code, cwl = cli('run', 'touch', foo)
cwl = cwl.association.plan
assert 0 == exit_code
assert 0 == len(cwl.inputs)
assert 1 == len(cwl.outputs)
assert 'foo' == cwl.outputs[0].produces.path
def test_when_no_change_in_outputs_is_detected(cli, subdirectory):
"""Test when no output is detected"""
cli('run', 'touch', 'foo')
exit_code, cwl = cli('run', 'ls', 'foo')
assert 1 == exit_code
def test_with_no_output_option(cli, client, subdirectory):
"""Test --no-output option with no output detection"""
foo = os.path.relpath(client.path / 'foo', os.getcwd())
cli('run', 'touch', foo)
exit_code, cwl = cli('run', '--no-output', 'touch', foo)
cwl = cwl.association.plan
assert 0 == exit_code
assert 1 == len(cwl.inputs)
assert 'foo' == str(cwl.inputs[0].consumes.path)
assert 0 == len(cwl.outputs)
def test_explicit_outputs_directory(cli, client, subdirectory):
"""Test detection of an output file with --output option."""
foo = Path(os.path.relpath(client.path / 'foo', os.getcwd()))
foo.mkdir()
file_ = foo / 'file'
exit_code, cwl = cli('run', '--output', str(foo), 'touch', str(file_))
cwl = cwl.association.plan
assert 0 == exit_code
assert 0 == len(cwl.inputs)
assert 1 == len(cwl.outputs)
assert isinstance(cwl.outputs[0].produces, Collection)
assert 'foo' == cwl.outputs[0].produces.path
def test_explicit_output_results(cli, client, subdirectory):
"""Test explicit output yield same results as normal run"""
foo = os.path.relpath(client.path / 'foo', os.getcwd())
cli('run', 'touch', foo)
cli('run', '--output', foo, 'touch', foo)
cwls = read_all_workflow_files(client)
# There should be two command line tool.
assert 2 == len(cwls)
def test_explicit_outputs_and_normal_outputs(cli, client, subdirectory):
"""Test explicit outputs and normal outputs can both exist"""
foo = os.path.relpath(client.path / 'foo', os.getcwd())
bar = os.path.relpath(client.path / 'bar', os.getcwd())
cli('run', 'touch', foo)
exit_code, cwl = cli('run', '--output', foo, 'touch', foo, bar)
assert 0 == exit_code
cwl = cwl.association.plan
cwl.inputs.sort(key=lambda e: e.position)
assert 2 == len(cwl.outputs)
assert 'bar' == str(cwl.outputs[0].produces.path)
assert 'foo' == str(cwl.outputs[1].produces.path)
def test_explicit_outputs_and_std_output_streams(cli, client, subdirectory):
"""Test that unchanged std output streams can be marked with explicit
outputs"""
exit_code, _ = cli('run', 'sh', '-c', 'echo foo > bar')
assert 0 == exit_code
exit_code, _ = cli('run', 'sh', '-c', 'echo foo > bar')
assert 1 == exit_code
exit_code, _ = cli('run', '--output', 'bar', 'sh', '-c', 'echo foo > bar')
assert 0 == exit_code
def test_output_directory_with_output_option(cli, client, subdirectory):
"""Test output directories are not deleted with --output"""
outdir = os.path.relpath(client.path / 'outdir', os.getcwd())
a_script = ('sh', '-c', 'mkdir -p "$0"; touch "$0/$1"')
cli('run', *a_script, outdir, 'foo')
exit_code, _ = cli('run', '--output', outdir, *a_script, outdir, 'bar')
assert 0 == exit_code
assert (client.path / 'outdir' / 'foo').exists()
assert (client.path / 'outdir' / 'bar').exists()
def test_output_directory_without_separate_outputs(cli, client):
"""Test output files not listed as separate outputs.
See https://github.com/SwissDataScienceCenter/renku-python/issues/387
"""
a_script = ('sh', '-c', 'mkdir -p "$0"; touch "$0/$1"')
exit_code, cwl = cli('run', *a_script, 'outdir', 'foo')
cwl = cwl.association.plan
assert 0 == exit_code
assert 1 == len(cwl.outputs)
assert isinstance(cwl.outputs[0].produces, Collection)
def test_explicit_inputs_must_exist(cli):
"""Test explicit inputs exist before run"""
exit_code, _ = cli('run', '--input', 'foo', 'touch', 'bar')
assert 1 == exit_code
def test_explicit_inputs_are_inside_repo(cli):
"""Test explicit inputs are inside the Renku repo"""
exit_code, _ = cli('run', '--input', '/tmp', 'touch', 'foo')
assert 1 == exit_code
def test_explicit_inputs_and_outputs_are_listed(cli, client):
"""Test explicit inputs and outputs will be in generated CWL file"""
foo = Path(os.path.relpath(client.path / 'foo', os.getcwd()))
foo.mkdir()
cli('run', 'touch', 'foo/file')
cli('run', 'touch', 'bar', 'baz')
exit_code, cwl = cli(
'run', '--input', 'foo', '--input', 'bar', '--output', 'baz', 'echo'
)
assert 0 == exit_code
cwl = cwl.association.plan
assert 2 == len(cwl.inputs)
cwl.inputs.sort(key=lambda e: e.consumes.path)
assert cwl.inputs[0].position is None
assert 'bar' == str(cwl.inputs[0].consumes.path)
assert cwl.inputs[1].position is None
assert 'foo' == str(cwl.inputs[1].consumes.path)
assert isinstance(cwl.inputs[1].consumes, Collection)
assert cwl.outputs[0].position is None
assert not isinstance(cwl.outputs[0].produces, Collection)
assert 'baz' == cwl.outputs[0].produces.path
def test_explicit_inputs_can_be_in_inputs(cli, client, subdirectory):
"""Test explicit inputs that are in inputs are treated as normal inputs"""
foo = os.path.relpath(client.path / 'foo', os.getcwd())
cli('run', 'touch', foo)
exit_code, cwl = cli('run', '--input', foo, '--no-output', 'ls', foo)
cwl = cwl.association.plan
assert 0 == exit_code
assert 1 == len(cwl.inputs)
assert 'foo' == str(cwl.inputs[0].consumes.path)
assert not isinstance(cwl.inputs[0].consumes, Collection)
assert cwl.inputs[0].position is not None
def test_explicit_inputs_in_subdirectories(cli, client):
"""Test explicit inputs that are in sub-dirs are made accessible"""
# Set up a script with hard dependency
cli('run', '--no-output', 'mkdir', 'foo')
cli('run', 'sh', '-c', 'echo "some changes" > foo/bar')
cli('run', 'sh', '-c', 'echo "cat foo/bar" > script.sh')
exit_code, _ = cli(
'run', '--input', 'foo/bar', '--input', 'script.sh', 'sh', '-c',
'sh script.sh > output'
)
assert 0 == exit_code
# Status must be dirty if foo/bar changes
cli('run', 'sh', '-c', 'echo "new changes" > foo/bar')
exit_code, _ = cli('status')
assert 1 == exit_code
exit_code, _ = cli('update')
assert 0 == exit_code
assert (client.path / 'foo' / 'bar').exists()
assert (client.path / 'script.sh').exists()
assert (client.path / 'output').exists()
| 33.78481 | 78 | 0.653678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,919 | 0.364329 |
46ea0e15fcb12765259fb6e7e21ff4cff83a6d51 | 2,458 | py | Python | backend/blog/util/config.py | o8oo8o/blog | 2a6f44f86469bfbb472dfd1bec4238587d8402bf | [
"MIT"
] | null | null | null | backend/blog/util/config.py | o8oo8o/blog | 2a6f44f86469bfbb472dfd1bec4238587d8402bf | [
"MIT"
] | null | null | null | backend/blog/util/config.py | o8oo8o/blog | 2a6f44f86469bfbb472dfd1bec4238587d8402bf | [
"MIT"
] | null | null | null | #!/usr/bin/evn python3
# coding=utf-8
import logging
import redis
from typing import Any
from conf import dev_conf as conf
from util import singleton
@singleton
class Config:
"""
根据指定的配置文件,把conf文件转换成字典
默认情况下使用 conf 中的配置
"""
def __init__(self):
self.config = conf
self.redis_db = None
def get_dict(self, exclude: str = "__") -> dict:
"""
把配置文件的内容转换成字典,默认情况下会忽略配置文件中以 '__xxx' 开头的配置项
:param exclude:
:return: dict
"""
config_data = {}
for key in dir(self.config):
if not str(key).startswith(exclude):
config_data[key] = getattr(self.config, key)
return config_data
def get_conf(self, key_name: str) -> Any:
"""
获取单个配置值
:param key_name:
:return:
"""
all_config = self.get_dict()
return all_config[key_name]
def get_db_url(self, db_role="master", db_name="default") -> str:
"""
# 获取MySQL 连接字符串
:param db_role:
:param db_name:
:return:
"""
try:
db_config = self.get_conf("mysql").get(db_role).get(db_name)
except AttributeError as error_info:
logging.error(f"Config: {error_info} Key!")
else:
tmp = "mysql+pymysql://{username}:{password}@{hostname}:" + \
"{port}/{database}?charset={charset}"
db_url = tmp.format(**db_config)
return db_url
def get_redis(self, db_role="master", db_name="default") -> redis.Redis:
"""
# 获取redis 配置
:param db_role:
:param db_name:
:return:
"""
if self.redis_db:
return self.redis_db
try:
db_conf = self.get_conf("redis").get(db_role).get(db_name)
pool = redis.ConnectionPool(
host=db_conf["host"],
port=db_conf["port"],
db=db_conf["db"]
)
self.redis_db = redis.Redis(connection_pool=pool)
except AttributeError as db_exp:
logging.error(f"get_redis_error:{db_exp}")
else:
return self.redis_db
class ConfigMixIn:
"""
方便其它类混入使用
"""
conf = Config()
if __name__ == '__main__':
a = Config()
b = Config()
print(id(a))
print(id(b))
c = a.get_redis()
d = b.get_redis()
print(id(c))
print(id(d))
c.set("ka", "va")
| 23.864078 | 76 | 0.537022 | 2,270 | 0.8605 | 0 | 0 | 2,195 | 0.83207 | 0 | 0 | 921 | 0.349128 |
46ebb2375f6354f283088526e6acc20b627eadfb | 1,340 | py | Python | rss/resources.py | victorchen796/reddit-submission-scraper | 01401c6b35af8547eb9640e441a28633c38408bd | [
"MIT"
] | null | null | null | rss/resources.py | victorchen796/reddit-submission-scraper | 01401c6b35af8547eb9640e441a28633c38408bd | [
"MIT"
] | null | null | null | rss/resources.py | victorchen796/reddit-submission-scraper | 01401c6b35af8547eb9640e441a28633c38408bd | [
"MIT"
] | null | null | null | import json
import os
script_path = os.path.abspath(__file__)
script_dir = os.path.split(script_path)[0]
def get_config():
rel_path = 'resources/config.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'r') as f:
config = json.loads(f.read())
return config
def get_submissions():
rel_path = 'resources/submissions.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'r') as f:
submissions = json.loads(f.read())
return submissions
def get_subreddits():
rel_path = 'resources/subreddits.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'r') as f:
subreddits = json.loads(f.read())
return subreddits
def update_config(config):
rel_path = 'resources/config.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'w') as f:
f.write(json.dumps(config, indent=2))
def update_submissions(submissions):
rel_path = 'resources/submissions.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'w') as f:
f.write(json.dumps(submissions, indent=2, default=str))
def update_subreddits(subreddits):
rel_path = 'resources/subreddits.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'w') as f:
f.write(json.dumps(subreddits, indent=2)) | 26.27451 | 63 | 0.671642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.129851 |
46eca7e793a2053c4124a22d938ceda349298ccd | 907 | py | Python | tests/test/algorithms/convert/OsuToQua/test.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | tests/test/algorithms/convert/OsuToQua/test.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | tests/test/algorithms/convert/OsuToQua/test.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | import unittest
from reamber.algorithms.convert.OsuToQua import OsuToQua
from reamber.osu.OsuMap import OsuMap
from tests.test.RSC_PATHS import *
# import logging
#
# logging.basicConfig(filename="event.log", filemode="w+", level=logging.DEBUG)
class TestOsuToQua(unittest.TestCase):
# @profile
def test_osu1(self):
# Complex BPM Points
osu = OsuMap.readFile(OSU_CARAVAN)
qua = OsuToQua.convert(osu)
# qua.writeFile("out.qua")
# @profile
def test_osu2(self):
# Stops
osu = OsuMap.readFile(OSU_ESCAPES)
qua = OsuToQua.convert(osu)
# qua.writeFile("out.qua")
# @profile
def test_osu3(self):
# Complex BPM
osu = OsuMap.readFile(OSU_GRAVITY)
qua = OsuToQua.convert(osu)
qua.music = "Gravity.mp3"
# qua.writeFile("out.qua")
if __name__ == '__main__':
unittest.main()
| 21.093023 | 79 | 0.638368 | 607 | 0.669239 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.294377 |
46ed48e43be2fce5de7ba35ab5fb8953a3a9a2ad | 1,644 | py | Python | connection_events.py | Teplitsa/false-security-1 | 9e5cc23c8bf324d923965bb2624cac4994891154 | [
"MIT"
] | 1 | 2020-10-01T17:44:26.000Z | 2020-10-01T17:44:26.000Z | connection_events.py | Teplitsa/false-security-1 | 9e5cc23c8bf324d923965bb2624cac4994891154 | [
"MIT"
] | null | null | null | connection_events.py | Teplitsa/false-security-1 | 9e5cc23c8bf324d923965bb2624cac4994891154 | [
"MIT"
] | 1 | 2021-10-05T12:09:07.000Z | 2021-10-05T12:09:07.000Z | from flask import g
from flask_socketio import SocketIO, emit
from logic.game_manager import GameManager
from logic.player_manager import PlayerManager
from logic.player_logic import PlayerLogic
from globals import socketio, db
from session import SessionHelper, SessionKeys
from utils.response import Response
from utils.socketio_helper import commit_and_notify_if_dirty
@socketio.on('disconnect')
def on_disconnect():
change_state(False)
@socketio.on('connect')
def on_connect():
change_state(True)
def change_state(is_online: bool):
if SessionHelper.has(SessionKeys.PLAYER_ID):
pm = PlayerManager(db)
player = pm.get_my_player()
if player is None:
return
try:
gm = GameManager(db)
g.game = gm.get_my_game(optional=True)
g.game.set_dirty()
player.set_online(is_online)
change_admin(is_online, player)
commit_and_notify_if_dirty()
db.session.remove()
except Exception as e:
# emit('waitroom', Response.Error("Не удалось сменить статус").as_dicts())
db.session.rollback()
db.session.remove()
raise
def change_admin(is_online: bool, player: PlayerLogic):
if not is_online and player.model.isAdmin:
gm = GameManager(db)
new_adm = next((p.model for p in gm.get_my_game().get_players(True) if (not p.model.isAdmin and p.model.isOnline)), None)
if new_adm is not None:
new_adm.isAdmin = True
player.model.isAdmin = False
db.session.commit()
gm.get_my_game().notify()
| 30.444444 | 129 | 0.663625 | 0 | 0 | 0 | 0 | 135 | 0.081032 | 0 | 0 | 117 | 0.070228 |
46ef6c38effd587fa6ee1174353229a1f1d10d8f | 1,943 | py | Python | AgentsVisualization/Server/server.py | mateoglzc/TC2008B | e4e217d9edbb0d6c73df28324686ec069b361f0d | [
"MIT"
] | 7 | 2021-11-01T15:36:16.000Z | 2021-11-11T03:37:43.000Z | AgentsVisualization/Server/server.py | mateoglzc/TC2008B | e4e217d9edbb0d6c73df28324686ec069b361f0d | [
"MIT"
] | null | null | null | AgentsVisualization/Server/server.py | mateoglzc/TC2008B | e4e217d9edbb0d6c73df28324686ec069b361f0d | [
"MIT"
] | 4 | 2021-11-01T23:35:02.000Z | 2021-11-30T02:01:49.000Z | # TC2008B. Sistemas Multiagentes y Gráficas Computacionales
# Python flask server to interact with Unity. Based on the code provided by Sergio Ruiz.
# Octavio Navarro. October 2021
from flask import Flask, request, jsonify
from RandomAgents import *
# Size of the board:
number_agents = 10
width = 28
height = 28
trafficModel = None
currentStep = 0
app = Flask("Traffic example")
# @app.route('/', methods=['POST', 'GET'])
@app.route('/init', methods=['POST', 'GET'])
def initModel():
global currentStep, trafficModel, number_agents, width, height
if request.method == 'POST':
number_agents = int(request.form.get('NAgents'))
width = int(request.form.get('width'))
height = int(request.form.get('height'))
currentStep = 0
print(request.form)
print(number_agents, width, height)
trafficModel = RandomModel(number_agents, width, height)
return jsonify({"message":"Parameters recieved, model initiated."})
@app.route('/getAgents', methods=['GET'])
def getAgents():
global trafficModel
if request.method == 'GET':
carPositions = [{"x": x, "y":1, "z":z} for (a, x, z) in trafficModel.grid.coord_iter() if isinstance(a, RandomAgent)]
return jsonify({'positions':carPositions})
@app.route('/getObstacles', methods=['GET'])
def getObstacles():
global trafficModel
if request.method == 'GET':
carPositions = [{"x": x, "y":1, "z":z} for (a, x, z) in trafficModel.grid.coord_iter() if isinstance(a, ObstacleAgent)]
return jsonify({'positions':carPositions})
@app.route('/update', methods=['GET'])
def updateModel():
global currentStep, trafficModel
if request.method == 'GET':
trafficModel.step()
currentStep += 1
return jsonify({'message':f'Model updated to step {currentStep}.', 'currentStep':currentStep})
if __name__=='__main__':
app.run(host="localhost", port=8585, debug=True) | 31.33871 | 127 | 0.66598 | 0 | 0 | 0 | 0 | 1,430 | 0.735597 | 0 | 0 | 542 | 0.278807 |
46f05cf6545f1bd019299906868ea89580724e08 | 331 | py | Python | lrthubcore/ratings/admin.py | xrojan/lrthub-core | 757189942c87f7136fd1f1fee536375d248d8233 | [
"BSD-3-Clause"
] | null | null | null | lrthubcore/ratings/admin.py | xrojan/lrthub-core | 757189942c87f7136fd1f1fee536375d248d8233 | [
"BSD-3-Clause"
] | null | null | null | lrthubcore/ratings/admin.py | xrojan/lrthub-core | 757189942c87f7136fd1f1fee536375d248d8233 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from .models import Rating
# Register your models here.
@admin.register(Rating)
class RatingAdmin(admin.ModelAdmin):
date_hierarchy = 'created_on'
search_fields = ['user_id__username', 'value']
list_display = ('user_id', 'value',)
list_filter = ('user_id', 'value', 'is_deleted')
| 27.583333 | 52 | 0.719033 | 215 | 0.649547 | 0 | 0 | 239 | 0.722054 | 0 | 0 | 110 | 0.332326 |
46f2fb3f4a355efbd5abadbe36f9f51a55519a5b | 20,406 | py | Python | scitbx/math/tests/tst_gaussian.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | scitbx/math/tests/tst_gaussian.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | scitbx/math/tests/tst_gaussian.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
from scitbx.examples import immoptibox_ports
from scitbx.math import gaussian
from scitbx.array_family import flex
from libtbx.test_utils import approx_equal, eps_eq
from libtbx.utils import format_cpu_times
try:
import cPickle as pickle
except ImportError:
import pickle
from cStringIO import StringIO
import math
import sys
def finite_gradient_dx_at_x(gaussian, x, eps=1.e-5):
if (x == 0): return 0
assert x >= eps
tm = gaussian.at_x(x-eps)
tp = gaussian.at_x(x+eps)
return (tp-tm)/(2*eps)
def exercise_gradient_dx(gaussian, x_max=1., n_points=50):
for i in xrange(n_points+1):
x = x_max * i / n_points
grad_finite = finite_gradient_dx_at_x(gaussian, x)
grad_analytical = gaussian.gradient_dx_at_x(x)
assert eps_eq(grad_finite, grad_analytical)
def exercise_integral_dx(gaussian, x_max=1., n_points=1000):
numerical_integral = 0
x_step = x_max / n_points
for i in xrange(n_points+1):
x = x_max * i / n_points
new_value = gaussian.at_x(x)
if (i):
numerical_integral += (prev_value + new_value) * .5
prev_value = new_value
analytical_integral = gaussian.integral_dx_at_x(x, 1.e-3)
assert eps_eq(analytical_integral, gaussian.integral_dx_at_x(x))
assert eps_eq(numerical_integral*x_step, analytical_integral, eps=1.e-5)
def term_finite_gradient_d_ab_at_x(term, x, eps=1.e-5):
tm = gaussian.term(term.a-eps,term.b).at_x(x)
tp = gaussian.term(term.a+eps,term.b).at_x(x)
gr_a = (tp-tm)/(2*eps)
tm = gaussian.term(term.a,term.b-eps).at_x(x)
tp = gaussian.term(term.a,term.b+eps).at_x(x)
gr_b = (tp-tm)/(2*eps)
return gaussian.term(gr_a, gr_b)
def exercise_term_gradients_d_ab(term, x_max=1., n_points=50):
for i in xrange(n_points+1):
x = x_max * i / n_points
grad_finite = term_finite_gradient_d_ab_at_x(term, x)
grad_analytical = term.gradients_d_ab_at_x_sq(x*x)
assert eps_eq(grad_finite.a, grad_analytical.a)
assert eps_eq(grad_finite.b, grad_analytical.b)
def exercise_term():
t = gaussian.term(2,3)
assert approx_equal(t.a, 2)
assert approx_equal(t.b, 3)
assert approx_equal(t.at_x_sq(4), 2*math.exp(-3*4))
assert approx_equal(t.at_x(2), 2*math.exp(-3*4))
eps = 1.e-5
for ix in (xrange(10)):
x = ix/10.
assert eps_eq((t.at_x(x+eps)-t.at_x(x-eps))/(2*eps), t.gradient_dx_at_x(x))
for f in [1,-1]:
for t in [gaussian.term(f*2,3),
gaussian.term(f*3,0),
gaussian.term(f*4,1.e-4),
gaussian.term(f*5,-1)]:
exercise_gradient_dx(t)
exercise_integral_dx(t)
exercise_term_gradients_d_ab(t)
def exercise_sum():
g = gaussian.sum(0)
assert g.n_terms() == 0
assert g.array_of_a() == ()
assert g.array_of_b() == ()
assert approx_equal(g.c(), 0)
assert g.use_c()
assert g.n_parameters() == 1
assert approx_equal(g.parameters(), [0])
g = gaussian.sum(0, True)
assert g.use_c()
g = gaussian.sum(0, False)
assert not g.use_c()
g = gaussian.sum(1)
assert g.n_terms() == 0
assert g.array_of_a() == ()
assert g.array_of_b() == ()
assert approx_equal(g.c(), 1)
assert g.use_c()
assert g.n_parameters() == 1
assert approx_equal(g.parameters(), [1])
g = gaussian.sum((), ())
assert g.n_terms() == 0
assert g.array_of_a() == ()
assert g.array_of_b() == ()
assert g.c() == 0
assert not g.use_c()
assert g.n_parameters() == 0
assert g.parameters().size() == 0
g = gaussian.sum((), (), -2)
assert g.n_terms() == 0
assert g.array_of_a() == ()
assert g.array_of_b() == ()
assert approx_equal(g.c(), -2)
g = gaussian.sum(flex.double((1,2,3,4)))
assert approx_equal(g.array_of_a(), (1,3))
assert approx_equal(g.array_of_b(), (2,4))
assert approx_equal(g.c(), 0)
assert not g.use_c()
assert approx_equal(g.parameters(), [1,2,3,4])
g = gaussian.sum(flex.double((1,2,3,4)), 0, True)
assert approx_equal(g.c(), 0)
assert g.use_c()
g = gaussian.sum(flex.double((1,2,3,4)), 5)
assert approx_equal(g.c(), 5)
assert g.use_c()
assert approx_equal(g.parameters(), [1,2,3,4,5])
g = gaussian.sum(flex.double((1,2,3,4,5)))
assert approx_equal(g.c(), 5)
assert g.use_c()
assert approx_equal(g.parameters(), [1,2,3,4,5])
g = gaussian.sum((1,-2,3,-4,5), (-.1,.2,-.3,.4,-.5), 6)
assert g.n_terms() == 5
assert approx_equal(g.array_of_a(),(1,-2,3,-4,5))
assert approx_equal(g.array_of_b(),(-.1,.2,-.3,.4,-.5))
assert approx_equal(g.c(), 6)
assert approx_equal(g.at_x_sq(3/4.), 13.4251206)
assert approx_equal(g.at_x_sq(flex.double([2/4.,3/4.])),
[11.8723031, 13.4251206])
assert approx_equal(g.at_x(math.sqrt(3/4.)), 13.4251206)
assert approx_equal(g.at_x(flex.sqrt(flex.double([2/4.,3/4.]))),
[11.8723031, 13.4251206])
s = pickle.dumps(g)
l = pickle.loads(s)
assert l.n_terms() == g.n_terms()
assert approx_equal(l.array_of_a(), g.array_of_a())
assert approx_equal(l.array_of_b(), g.array_of_b())
assert approx_equal(l.c(), g.c())
assert l.use_c()
s = pickle.dumps(gaussian.sum((),()))
l = pickle.loads(s)
assert not l.use_c()
exercise_gradient_dx(gaussian.sum(
[5.5480], [10.4241], 0))
exercise_gradient_dx(gaussian.sum(
[2.657506,1.078079,1.490909,-4.241070,0.713791],
[14.780758,0.776775,42.086842,-0.000294,0.239535],
4.297983))
exercise_integral_dx(gaussian.sum([5.5480], [10.4241]))
exercise_integral_dx(gaussian.sum([5.5480], [10.4241], 3))
exercise_integral_dx(gaussian.sum([5.5480], [0], 0))
exercise_integral_dx(gaussian.sum([5.5480], [-0.01]))
exercise_integral_dx(gaussian.sum(
[2.657506,1.078079,1.490909,-4.241070,0.713791],
[14.780758,0.776775,42.086842,-0.000294,0.239535],
4.297983))
g = gaussian.sum((1,-2,3,-4,5), (-.1,.2,-.3,.4,-.5), 6)
s = StringIO()
g.show(s)
assert len(s.getvalue().split()) == 14
g = gaussian.sum((3,-2,1,-4,5), (-.3,.2,-.1,.4,-.5))
s = StringIO()
g.show(s)
assert len(s.getvalue().split()) == 12
assert isinstance(g.sort(), gaussian.sum)
assert approx_equal(g.sort().array_of_a(), (5,-4,3,-2,1))
assert approx_equal(g.sort().array_of_b(), (-.5,.4,-.3,.2,-.1))
assert not g.sort().use_c()
g = gaussian.sum((1,2),(3,4),5)
assert approx_equal(g.sort().array_of_a(), (2,1))
assert approx_equal(g.sort().array_of_b(), (4,3))
assert approx_equal(g.sort().c(), 5)
assert g.sort().use_c()
def fit_finite_diff_gradients(gfit, x, eps=1.e-2):
gr = flex.double()
c = gfit.c()
use_c = gfit.use_c()
for i in xrange(gfit.n_terms()):
t = []
for seps in (eps, -eps):
a = list(gfit.array_of_a())
a[i] += seps
t.append(
gaussian.sum(a, gfit.array_of_b(), c, use_c).at_x(x))
gr.append((t[0]-t[1])/(2*eps))
t = []
for seps in (eps, -eps):
b = list(gfit.array_of_b())
b[i] += seps
t.append(
gaussian.sum(gfit.array_of_a(), b, c, use_c).at_x(x))
gr.append((t[0]-t[1])/(2*eps))
if (use_c):
t = []
for seps in (eps, -eps):
t.append(
gaussian.sum(
gfit.array_of_a(), gfit.array_of_b(), c+seps, use_c).at_x(x))
gr.append((t[0]-t[1])/(2*eps))
return gr
def fit_finite_diff_target_gradients(gfit, power, use_sigmas, eps=1.e-2):
assert gfit.table_x().size() == 1
weight = 1/gfit.table_sigmas()[0]**2
gr = flex.double()
c = gfit.c()
use_c = gfit.use_c()
for i in xrange(gfit.n_terms()):
t = []
for seps in (eps, -eps):
a = list(gfit.array_of_a())
a[i] += seps
gf = gaussian.fit(
gfit.table_x(),
gfit.table_y(),
gfit.table_sigmas(),
gaussian.sum(a, gfit.array_of_b(), c, use_c))
t.append(gf.target_function(power, use_sigmas, gf.differences()))
gr.append((t[0]-t[1])/(2*eps))
t = []
for seps in (eps, -eps):
b = list(gfit.array_of_b())
b[i] += seps
gf = gaussian.fit(
gfit.table_x(),
gfit.table_y(),
gfit.table_sigmas(),
gaussian.sum(gfit.array_of_a(), b, c, use_c))
t.append(gf.target_function(power, use_sigmas, gf.differences()))
gr.append((t[0]-t[1])/(2*eps))
if (use_c):
t = []
for seps in (eps, -eps):
gf = gaussian.fit(
gfit.table_x(),
gfit.table_y(),
gfit.table_sigmas(),
gaussian.sum(gfit.array_of_a(), gfit.array_of_b(), c+seps, use_c))
t.append(gf.target_function(power, use_sigmas, gf.differences()))
gr.append((t[0]-t[1])/(2*eps))
return gr
def exercise_fit():
x = flex.double((0.1, 0.2, 0.5))
y = flex.double((3,2,1))
sigmas = flex.double((0.04,0.02,0.01))
gf = gaussian.fit(
x, y, sigmas,
gaussian.sum((1,2), (4,5)))
assert approx_equal(gf.array_of_a(), (1,2))
assert approx_equal(gf.array_of_b(), (4,5))
assert approx_equal(gf.c(), 0)
assert not gf.use_c()
assert approx_equal(gf.table_x(), x)
assert approx_equal(gf.table_y(), y)
assert approx_equal(gf.table_sigmas(), sigmas)
assert approx_equal(gf.fitted_values(),
[2.8632482881537511, 2.4896052951221748, 0.94088903489182252])
reference_gaussian = gaussian.sum((1,2,3), (4,5,6))
gf = gaussian.fit(
x, reference_gaussian, sigmas,
gaussian.sum((1,2), (4,5)))
assert approx_equal(gf.array_of_a(), (1,2))
assert approx_equal(gf.array_of_b(), (4,5))
assert approx_equal(gf.c(), 0)
assert approx_equal(gf.table_x(), x)
assert approx_equal(gf.table_y(), reference_gaussian.at_x(x))
assert approx_equal(gf.table_sigmas(), sigmas)
assert isinstance(gf.sort(), gaussian.fit)
assert gf.sort().table_x() == gf.table_x()
assert gf.sort().table_y() == gf.table_y()
assert gf.sort().table_sigmas() == gf.table_sigmas()
assert approx_equal(gf.differences(), gf.at_x(x)-reference_gaussian.at_x(x))
c_fit = gaussian.fit(
flex.double([0.0, 0.066666666666666666, 0.13333333333333333,
0.2, 0.26666666666666666]),
gaussian.sum(
(2.657506, 1.078079, 1.490909, -4.2410698, 0.71379101),
(14.780758, 0.776775, 42.086842, -0.000294, 0.239535),
4.2979832),
flex.double(5, 0.0005),
gaussian.sum(
(1.1423916, 4.1728425, 0.61716694),
(0.50733125, 14.002512, 41.978928)))
differences = flex.double([-0.064797341823577881, 0.003608505180995536,
0.098159179757290715, 0.060724224581695019, -0.10766283796372011])
assert approx_equal(c_fit.differences(), differences)
assert approx_equal(c_fit.significant_relative_errors(),
[0.0107212, 0.0005581, 0.0213236, 0.0169304, 0.0385142])
gf = gaussian.fit(
x, reference_gaussian, flex.double(x.size(), 1),
gaussian.sum((1,2), (4,5)))
assert list(gf.bound_flags(False, False)) == [False,False,False,False]
assert list(gf.bound_flags(True, False)) == [True,False,True,False]
assert list(gf.bound_flags(False, True)) == [False,True,False,True]
sgf = gf.apply_shifts(flex.double((3,-3,4,6)), True)
assert approx_equal(sgf.array_of_a(), (1+3,2+4))
assert approx_equal(sgf.array_of_b(),
((math.sqrt(4)-3)**2,(math.sqrt(5)+6)**2))
assert approx_equal(sgf.c(), 0)
assert not sgf.use_c()
sgf = gf.apply_shifts(flex.double((3,-3,4,6)), False)
assert approx_equal(sgf.array_of_a(), (1+3,2+4))
assert approx_equal(sgf.array_of_b(), (4-3,5+6))
assert approx_equal(sgf.c(), 0)
assert not sgf.use_c()
differences = sgf.differences()
for use_sigmas in [False, True]:
assert approx_equal(sgf.target_function(2, use_sigmas, differences),
25.0320634)
assert approx_equal(sgf.target_function(4, use_sigmas, differences),
256.2682575)
assert approx_equal(
sgf.gradients_d_abc(2, use_sigmas, differences),
[15.6539271, -4.1090114, 10.4562306, -1.6376781])
gfc = gaussian.fit(
x, reference_gaussian, flex.double(x.size(), 1),
gaussian.sum((1,2), (4,5), 6))
assert list(gfc.bound_flags(False, False)) == [False,False,False,False,False]
assert list(gfc.bound_flags(True, False)) == [True,False,True,False,True]
assert list(gfc.bound_flags(False, True)) == [False,True,False,True,False]
sgfc = gfc.apply_shifts(flex.double((3,-3,4,6,-5)), True)
assert approx_equal(sgfc.array_of_a(), (1+3,2+4))
assert approx_equal(sgfc.array_of_b(),
((math.sqrt(4)-3)**2,(math.sqrt(5)+6)**2))
assert approx_equal(sgfc.c(), 6-5)
assert sgfc.use_c()
sgfc = gfc.apply_shifts(flex.double((3,-3,4,6,-5)), False)
assert approx_equal(sgfc.array_of_a(), (1+3,2+4))
assert approx_equal(sgfc.array_of_b(), (4-3,5+6))
assert approx_equal(sgfc.c(), 6-5)
assert sgfc.use_c()
differences = sgfc.differences()
for use_sigmas in [False, True]:
assert approx_equal(sgfc.target_function(2, use_sigmas, differences),
44.8181444)
assert approx_equal(sgfc.target_function(4, use_sigmas, differences),
757.3160329)
assert approx_equal(
sgfc.gradients_d_abc(2, use_sigmas, differences),
[21.1132071, -6.0532695, 13.6638274, -2.2460994, 22.7860809])
differences = c_fit.differences()
gabc = c_fit.gradients_d_abc(2, False, differences)
assert approx_equal(
gabc,
[-0.016525391425206391, 0.0074465239375589107, 0.020055876723667564,
0.00054794635257838251, -0.018754011379726425, -0.0011194004809549143])
assert approx_equal(
c_fit.gradients_d_shifts(flex.double((0.1,0.4,0.2,0.5,0.3,0.6)), gabc),
[-0.0165254, 0.01656512, 0.0200559, 0.0046488, -0.0187540, -0.0158487])
g5c = gaussian.sum(
(2.657505989074707, 1.0780789852142334, 1.4909089803695679,
-4.2410697937011719, 0.71379101276397705),
(14.780757904052734, 0.77677500247955322, 42.086841583251953,
-0.00029399999766610563, 0.23953500390052795),
4.2979831695556641)
for include_constant_term in (False, True):
a = flex.double(g5c.array_of_a())
b = flex.double(g5c.array_of_b())
permutation = flex.sort_permutation(data=flex.abs(a), reverse=True)[:4]
gf = gaussian.fit(
flex.double([0]),
g5c,
flex.double(1, 1),
gaussian.sum(
iter(a.select(permutation)),
iter(b.select(permutation)), 0, include_constant_term))
assert approx_equal(gf.differences(), [-5.01177418232])
shifts = flex.double(8,-1)
if (include_constant_term): shifts.append(-.2)
sgf = gf.apply_shifts(shifts, False)
assert approx_equal(sgf.array_of_a(),
[-5.2410698, 1.657506, 0.49090898, 0.078078985])
assert approx_equal(sgf.array_of_b(),
[-1.0002940, 13.780758, 41.086842, -0.223225])
if (include_constant_term):
assert approx_equal(sgf.c(), -.2)
expected_gradients = [1,0,1,0,1,0,1,0]
if (include_constant_term): expected_gradients.append(1)
assert approx_equal(
fit_finite_diff_gradients(sgf, 0),
expected_gradients,
eps=1.e-4)
for i in xrange(10):
gf = gaussian.fit(
flex.double([i / 10.]),
g5c,
flex.double(1, 1),
sgf)
differences = flex.double([0.5])
assert approx_equal(
gf.gradients_d_abc(2, False, differences),
fit_finite_diff_gradients(gf, gf.table_x()[0]),
eps=1.e-3)
for sigma in [0.04,0.02,0.01]:
gf = gaussian.fit(
flex.double([i / 20.]),
g5c,
flex.double([sigma]),
sgf)
for power in [2,4]:
for use_sigmas in [False, True]:
differences = gf.differences()
an=gf.gradients_d_abc(power, use_sigmas, differences)
fi=fit_finite_diff_target_gradients(gf, power, use_sigmas)
assert eps_eq(an, fi, eps=1.e-3)
carbon_s_y_table = [
0.00, 6.000, 0.01, 5.990, 0.02, 5.958, 0.03, 5.907, 0.04, 5.837, 0.05, 5.749,
0.06, 5.645, 0.07, 5.526, 0.08, 5.396, 0.09, 5.255, 0.10, 5.107, 0.11, 4.952,
0.12, 4.794, 0.13, 4.633, 0.14, 4.472, 0.15, 4.311, 0.16, 4.153, 0.17, 3.998,
0.18, 3.847, 0.19, 3.701, 0.20, 3.560, 0.22, 3.297, 0.24, 3.058, 0.25, 2.949,
0.26, 2.846, 0.28, 2.658, 0.30, 2.494, 0.32, 2.351, 0.34, 2.227, 0.35, 2.171,
0.36, 2.120, 0.38, 2.028, 0.40, 1.948, 0.42, 1.880, 0.44, 1.821, 0.45, 1.794,
0.46, 1.770, 0.48, 1.725, 0.50, 1.685, 0.55, 1.603, 0.60, 1.537, 0.65, 1.479,
0.70, 1.426, 0.80, 1.322, 0.90, 1.219, 1.00, 1.114, 1.10, 1.012, 1.20, 0.914,
1.30, 0.822, 1.40, 0.736, 1.50, 0.659, 1.60, 0.588, 1.70, 0.525, 1.80, 0.468,
1.90, 0.418, 2.00, 0.373, 2.50, 0.216, 3.00, 0.130, 3.50, 0.081, 4.00, 0.053,
5.00, 0.025, 6.00, 0.013]
class tabulated_fit:
def __init__(self, limit, coefficients):
self.limit = limit
self.coefficients = coefficients
carbon_fit_6 = tabulated_fit(6.0, [
2.18188567686, 13.4533708328,
1.77612377639, 32.5790123523,
1.08772011297, 0.747293264573,
0.641460989931, 0.251251498175,
0.207885994451, 80.9799313275,
0.105219184507, 0.0587297979816])
carbon_fit_5 = tabulated_fit(6.0, [
2.65463431663, 14.7665037505,
1.49420264709, 42.0409767208,
1.05563210943, 0.780856499884,
0.688021531597, 0.258963998784,
0.104681246572, 0.0579465611728])
carbon_fit_4 = tabulated_fit(3.0, [
2.21557580709, 12.7523000206,
1.98306066831, 36.4905110196,
1.31636728472, 0.632825354093,
0.480812064621, 0.148079120135])
carbon_fit_3 = tabulated_fit(1.4, [
2.51340127252, 31.8053433708,
1.74867019409, 0.445605499982,
1.72398202356, 10.5831679451])
carbon_fit_2 = tabulated_fit(0.5, [
3.54355550695, 25.6239838191,
2.42579673735, 1.50364460774])
carbon_fit_1 = tabulated_fit(0.15, [
5.96792806111, 14.8957682987])
carbon_it1992 = tabulated_fit(2.0, [
2.31000, 20.8439,
1.02000, 10.2075,
1.58860, 0.568700,
0.865000, 51.6512,
0.215600])
carbon_wk1995 = tabulated_fit(6.0, [
2.657506, 14.780758,
1.078079, 0.776775,
1.490909, 42.086842,
-4.241070, -0.000294,
0.713791, 0.239535,
4.297983])
class carbon_fit(immoptibox_ports.test_function):
def __init__(self, tab_fit, perturb, verbose):
self.tab_fit = tab_fit
self.perturb = perturb
self.verbose = verbose
carbon_ss = flex.double(carbon_s_y_table)[0::2]
carbon_ys = flex.double(carbon_s_y_table)[1::2]
selection = carbon_ss <= tab_fit.limit + 1.e-3
self.fit = gaussian.fit(
carbon_ss.select(selection),
carbon_ys.select(selection),
flex.double(selection.count(True), 1),
gaussian.sum(flex.double(tab_fit.coefficients)))
n = self.fit.n_parameters()
immoptibox_ports.test_function.__init__(self,
m=self.fit.table_x().size(),
n=n,
check_with_finite_differences=(n <= 6 or n == 9),
verbose=verbose)
def initialization(self):
self.x0 = self.fit.parameters()
self.capital_f_x_star = 0.5*self.f(x=self.x0).norm()**2
if (self.perturb):
mersenne_twister = flex.mersenne_twister(seed=0)
self.x0 *= 1 + mersenne_twister.random_double(
size=self.x0.size(), factor=0.01)
self.tau0 = 1e-8
self.delta0 = 10
self.x_star = None
def label(self):
return "carbon_fit(n=%d, perturb=%s)" % (
self.fit.n_parameters(), str(self.perturb))
def check_minimized_capital_f_x_star(self, f_x_star, tolerance=1.e-3):
capital_f_x_star = 0.5*f_x_star.norm()**2
if (capital_f_x_star > self.capital_f_x_star):
assert capital_f_x_star < tolerance, (
capital_f_x_star, self.capital_f_x_star)
if (self.verbose):
print " WARNING: minimization converged to larger residual", \
"than original solution:"
print " original:", self.capital_f_x_star
assert self.perturb
def f(self, x):
fit = gaussian.fit(
self.fit.table_x(), self.fit.table_y(), self.fit.table_sigmas(),
gaussian.sum(x))
return fit.differences()
def jacobian_analytical(self, x):
fit = gaussian.fit(
self.fit.table_x(), self.fit.table_y(), self.fit.table_sigmas(),
gaussian.sum(x))
return fit.least_squares_jacobian_abc()
def hessian_analytical(self, x):
j = self.jacobian_analytical(x=x)
fit = gaussian.fit(
self.fit.table_x(), self.fit.table_y(), self.fit.table_sigmas(),
gaussian.sum(x))
return fit.least_squares_hessian_abc_as_packed_u() \
.matrix_packed_u_as_symmetric()
def exercise_fit_jacobian_and_hessian(verbose):
for tab_fit in [carbon_fit_1, carbon_fit_2, carbon_fit_3,
carbon_fit_4, carbon_fit_5, carbon_fit_6,
carbon_it1992, carbon_wk1995]:
for perturb in [False, True]:
carbon_fit(tab_fit=tab_fit, perturb=perturb, verbose=verbose)
def run():
exercise_term()
exercise_sum()
exercise_fit()
exercise_fit_jacobian_and_hessian(verbose="--verbose" in sys.argv[1:])
print format_cpu_times()
if (__name__ == "__main__"):
run()
| 36.053004 | 79 | 0.653925 | 2,473 | 0.12119 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.007106 |
46f372711799b0f95a2ba0f4f20bd63cd2ae95f4 | 575 | py | Python | examples/application_commands/option_names.py | DiscPy/DiscPy | 883e1690ade23b0cc56c671285fcd214d287350a | [
"MIT"
] | 2 | 2021-09-09T10:55:24.000Z | 2021-09-09T10:56:54.000Z | examples/application_commands/option_names.py | DiscPy/DiscPy | 883e1690ade23b0cc56c671285fcd214d287350a | [
"MIT"
] | null | null | null | examples/application_commands/option_names.py | DiscPy/DiscPy | 883e1690ade23b0cc56c671285fcd214d287350a | [
"MIT"
] | 2 | 2021-10-01T15:03:57.000Z | 2021-10-08T07:24:33.000Z | import discpy
from discpy import commands
bot = commands.Bot(command_prefix='!')
# you can set "arg" keyword argument to the name of argument that represents the option in the command function
# and then change the option name as desired.
@bot.slash_command()
@discpy.application.option('sentence', arg='text', description='The text to say!')
async def say(ctx, text):
await ctx.respond(f'{ctx.author.name} said: {text}')
# in above command, the option name in discord will appear "sentence" but in this function, it will
# be passed to text argument
bot.run('token')
| 31.944444 | 111 | 0.747826 | 0 | 0 | 0 | 0 | 184 | 0.32 | 80 | 0.13913 | 360 | 0.626087 |
46f4304f0194b1310f816ea7b6c9eb36a154d157 | 192 | py | Python | huaweisms/api/monitoring.py | mcsarge/huawei-modem-python-api-client | 1bac12d9c44b3c0ee85a469004e9d37e4f7bbd37 | [
"MIT"
] | 2 | 2018-03-15T18:15:35.000Z | 2018-03-16T20:23:42.000Z | huaweisms/api/monitoring.py | mcsarge/huawei-modem-python-api-client | 1bac12d9c44b3c0ee85a469004e9d37e4f7bbd37 | [
"MIT"
] | null | null | null | huaweisms/api/monitoring.py | mcsarge/huawei-modem-python-api-client | 1bac12d9c44b3c0ee85a469004e9d37e4f7bbd37 | [
"MIT"
] | null | null | null | from huaweisms.api.common import get_from_url, ApiCtx
from .config import API_URL
def status(ctx: ApiCtx):
url = "{}/monitoring/status".format(API_URL)
return get_from_url(url, ctx)
| 24 | 53 | 0.744792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.114583 |
46f4ca2022403d4568c5fcb36b8d0df73945b02b | 366 | py | Python | accounts/migrations/0005_auto_20210104_0129.py | julesc00/CRM1 | ec5955b2cb84e2bb7631bea7201bf6de1f8d8d4b | [
"MIT"
] | null | null | null | accounts/migrations/0005_auto_20210104_0129.py | julesc00/CRM1 | ec5955b2cb84e2bb7631bea7201bf6de1f8d8d4b | [
"MIT"
] | null | null | null | accounts/migrations/0005_auto_20210104_0129.py | julesc00/CRM1 | ec5955b2cb84e2bb7631bea7201bf6de1f8d8d4b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-04 01:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20210103_1820'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='products',
new_name='product',
),
]
| 19.263158 | 48 | 0.587432 | 281 | 0.76776 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.295082 |
46f6b99cf764b66602044cdd17f1e9d9f4b6fced | 1,708 | py | Python | migrations/versions/221ccee39de7_add_role.py | MashSoftware/flux-api | 8763f752d44868b53a351f44f243ae760b28be31 | [
"MIT"
] | 1 | 2021-06-19T18:06:34.000Z | 2021-06-19T18:06:34.000Z | migrations/versions/221ccee39de7_add_role.py | MashSoftware/flux-api | 8763f752d44868b53a351f44f243ae760b28be31 | [
"MIT"
] | null | null | null | migrations/versions/221ccee39de7_add_role.py | MashSoftware/flux-api | 8763f752d44868b53a351f44f243ae760b28be31 | [
"MIT"
] | null | null | null | """add role
Revision ID: 221ccee39de7
Revises: ba9704e35cb2
Create Date: 2021-05-13 23:51:53.241485
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "221ccee39de7"
down_revision = "ba9704e35cb2"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"role",
sa.Column("id", postgresql.UUID(), nullable=False),
sa.Column("title", sa.String(), nullable=False),
sa.Column("grade_id", postgresql.UUID(), nullable=False),
sa.Column("practice_id", postgresql.UUID(), nullable=False),
sa.Column("organisation_id", postgresql.UUID(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(["grade_id"], ["grade.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["organisation_id"], ["organisation.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["practice_id"], ["practice.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_role_created_at"), "role", ["created_at"], unique=False)
op.create_index(op.f("ix_role_title"), "role", ["title"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_role_title"), table_name="role")
op.drop_index(op.f("ix_role_created_at"), table_name="role")
op.drop_table("role")
# ### end Alembic commands ###
| 37.130435 | 94 | 0.675059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.38993 |
46f80c5cfc8c3b9af38f4c0eab931d67dbfef11c | 5,101 | py | Python | httpclient.py | NErgezinger/CMPUT404-assignment-web-client | 02d887b500879c28fd81ebb29f6b2eea300273c8 | [
"Apache-2.0"
] | null | null | null | httpclient.py | NErgezinger/CMPUT404-assignment-web-client | 02d887b500879c28fd81ebb29f6b2eea300273c8 | [
"Apache-2.0"
] | null | null | null | httpclient.py | NErgezinger/CMPUT404-assignment-web-client | 02d887b500879c28fd81ebb29f6b2eea300273c8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import time
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
from urllib.parse import quote
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_host_port_path(self, url):
if url[:7] == 'http://':
url = url[7:]
elif url[:8] == 'https://':
url = url[8:]
host_port = url.split('/')[0].split(':')
host = host_port[0]
if len(host_port) > 1:
port = int(host_port[1])
else:
port = 80
path = '/' + '/'.join(url.split('/')[1:])
return host, port, path
def get_code(self, data):
return int(data.split()[1])
def get_headers(self,data):
return data.split("\r\n\r\n")[0]
def get_body(self, data):
return data.split("\r\n\r\n")[1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('ISO-8859-1')
def GET(self, url, args=None):
host, port, path = self.get_host_port_path(url)
data_send = "GET " + path + " HTTP/1.1\r\n" + "Host: " + host + "\r\nAccept: */*\r\nConnection: close\r\n\r\n"
self.connect(host, port)
self.sendall(data_send)
data_recv = self.recvall(self.socket)
code = self.get_code(data_recv)
body = self.get_body(data_recv)
self.close()
return HTTPResponse(code, body)
def POST(self, url, args=None):
host, port, path = self.get_host_port_path(url)
data_send = "POST " + path + " HTTP/1.1\r\n" + "Host: " + host + "\r\nAccept: */*\r\nConnection: close\r\nUser-Agent: Assignment/2\r\n"
if args is not None:
arg_string = ""
for i, (key, value) in enumerate(args.items()):
arg_string += quote(key) + '=' + quote(value)
if i != len(args)-1:
arg_string += '&'
content_length = len(arg_string)
data_send += "Content-Length: " + str(content_length) + "\r\n"
data_send += "Content-Type: application/x-www-form-urlencoded\r\n\r\n"
data_send += arg_string
else:
data_send += "Content-Length: 0"
data_send += '\r\n\r\n'
print(data_send)
self.connect(host, port)
self.sendall(data_send)
data_recv = self.recvall(self.socket)
code = self.get_code(data_recv)
body = self.get_body(data_recv)
self.close()
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
response = client.command( sys.argv[2], sys.argv[1] )
print("Code:", response.code)
print("Body:")
print(response.body)
elif (len(sys.argv) == 4):
# Split data string into dict
args_split = sys.argv[3].split('&')
args = {}
for arg in args_split:
key, value = arg.split('=')
args[key] = value
response = client.command( sys.argv[2], sys.argv[1], args )
print("Code:", response.code)
print("Body:")
print(response.body)
else:
response = client.command( sys.argv[1] )
| 28.819209 | 143 | 0.571849 | 3,268 | 0.640659 | 0 | 0 | 0 | 0 | 0 | 0 | 1,397 | 0.273868 |