text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python3
import json
from pathlib import Path
from natsort import natsorted
# load_test_cases {{{1
def load_test_cases(families=None):
root_dir = Path(__file__).parent.parent
test_dir = root_dir / 'test_cases'
test_cases = [
TestCase(d)
for d in natsorted(test_dir.iterdir())
if not families or any(d.name.startswith(f) for f in families)
]
return test_cases
# load_json {{{1
def load_json(path):
try:
return json.loads(path.read_text())
except json.JSONDecodeError as err:
# If there's a problem, include the path in the error message.
raise json.JSONDecodeError(
f"{path}: {err.msg}",
err.doc,
err.pos,
) from None
# load_py {{{1
def load_py(path):
with open(path, 'rb') as f:
code = compile(f.read(), str(path), 'exec')
globals = {}; locals = {}
exec(code, globals, locals)
try:
return locals['data']
except KeyError:
raise AssertionError(f"{path}: 'data' not defined")
# iter_load_success_cases {{{1
def iter_load_success_cases(cases):
yield from (x for x in cases if x.is_load_success())
# iter_load_error_cases {{{1
def iter_load_error_cases(cases):
yield from (x for x in cases if x.is_load_error())
# iter_dump_success_cases {{{1
def iter_dump_success_cases(cases):
yield from (x for x in cases if x.is_dump_success())
# iter_dump_error_cases {{{1
def iter_dump_error_cases(cases):
yield from (x for x in cases if x.is_dump_error())
# TestCase {{{1
class TestCase:
def __init__(self, dir):
self.case = case = {}
self.case['path'] = {}
self.dir = Path(dir)
self.id = self.dir.name
if '_' in self.id:
self.family, self.num = self.id.rsplit('_', 1)
else:
self.family = self.id
self.num = None
load_in = dir / 'load_in.nt'
load_out = dir / 'load_out.json'
load_err = dir / 'load_err.json'
dump_in_json = dir / 'dump_in.json'
dump_in_py = dir / 'dump_in.py'
dump_out = dir / 'dump_out.nt'
dump_err = dir / 'dump_err.json'
if load_in.exists():
case['load'] = {}
case['load']['in'] = {
'path': load_in,
}
if load_out.exists() and load_err.exists():
raise AssertionError(f"{dir}: ambiguous expected result: both '{load_out.name}' and '{load_err.name}' are present")
elif load_out.exists():
case['load']['out'] = {
'path': load_out,
'data': load_json(load_out),
}
elif load_err.exists():
case['load']['err'] = {
'path': load_err,
'data': load_json(load_err),
}
else:
raise AssertionError(f"{dir}: no expected result: neither '{load_out.name}' nor '{load_err.name}' are present")
if dump_in_json.exists() or dump_in_py.exists():
case['dump'] = {}
if dump_in_json.exists() and dump_in_py.exists():
raise AssertionError(f"{dir}: ambiguous input: both '{dump_in_json.name}' and '{dump_in_py.name}' are present")
elif dump_in_json.exists():
case['dump']['in'] = {
'path': dump_in_json,
'data': load_json(dump_in_json),
}
elif dump_in_py.exists():
case['dump']['in'] = {
'path': dump_in_py,
'data': load_py(dump_in_py),
}
if dump_out.exists() and dump_err.exists():
raise AssertionError(f"{dir}: ambiguous expected result: both '{dump_out.name}' and '{dump_err.name}' are present")
elif dump_out.exists():
case['dump']['out'] = {
'path': dump_out,
}
elif dump_err.exists():
case['dump']['err'] = {
'path': dump_err,
'data': load_json(dump_err),
}
else:
raise AssertionError(f"{dir}: no expected result: neither '{dump_out.name}' nor '{dump_err.name}' are present")
expected_files = {
load_in,
load_out,
load_err,
dump_in_json,
dump_in_py,
dump_out,
dump_err,
dir / 'README',
dir / '__pycache__',
}
actual_files = set(dir.glob('*'))
unexpected_files = actual_files - expected_files
if unexpected_files:
raise AssertionError(f"{dir}: found unexpected files: {quoted_join(unexpected_files)}")
def __getitem__(self, key):
return self.case[key]
def __contains__(self, key):
return key in self.case
def is_load_case(self):
return 'load' in self
def is_dump_case(self):
return 'dump' in self
def is_success_case(self):
return self.is_load_success() or self.is_dump_success()
def is_error_case(self):
return self.is_load_error() or self.is_dump_error()
def is_load_success(self):
return ('load' in self) and ('out' in self['load'])
def is_load_error(self):
return ('load' in self) and ('err' in self['load'])
def is_dump_success(self):
return ('dump' in self) and ('out' in self['dump'])
def is_dump_error(self):
return ('dump' in self) and ('err' in self['dump'])
def quoted_join(paths):
return ', '.join(f"'{x.name}'" for x in sorted(paths))
# vim: fdm=marker
|
from django.db import models
from safedelete.config import SOFT_DELETE
from safedelete.models import SafeDeleteModel, SOFT_DELETE_CASCADE
import datetime
from clients.models import Client
from users.models import User
from .managers import ImportManager
class Import(SafeDeleteModel):
_safedelete_policy = SOFT_DELETE_CASCADE
YEAR_CHOICES = []
for r in range(1980, (datetime.datetime.now().year+1)):
YEAR_CHOICES.append((r,r))
MONTH_CHOICES = []
for i in range(1, 13):
MONTH_CHOICES.append((i, i))
year = models.IntegerField(choices=YEAR_CHOICES,
default=datetime.datetime.now().year)
month = models.SmallIntegerField(choices=MONTH_CHOICES, default=datetime.datetime.now().month)
_import = models.FloatField(null=True)
domestic_economy = models.FloatField(null=True)
foreign_invested_economy = models.FloatField(null=True)
machinery_equipment = models.FloatField(null=True)
plastic_material = models.FloatField(null=True)
cashew = models.FloatField(null=True)
rubber = models.FloatField(null=True)
cloth = models.FloatField(null=True)
iron_steel = models.FloatField(null=True)
animal_feed = models.FloatField(null=True)
chemical = models.FloatField(null=True)
textile_material = models.FloatField(null=True)
metal = models.FloatField(null=True)
corn = models.FloatField(null=True)
chemical_product = models.FloatField(null=True)
textile_yarn = models.FloatField(null=True)
electronic_product = models.FloatField(null=True)
cotton = models.FloatField(null=True)
pesticide = models.FloatField(null=True)
wood_product = models.FloatField(null=True)
medicine = models.FloatField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, related_name='import_creators', on_delete=models.SET_NULL, unique=False, null=True)
organization = models.ForeignKey(Client, related_name='import_organizations', on_delete=models.SET_NULL, unique=False, null=True)
objects = ImportManager()
class Meta():
app_label = 'imports'
db_table = 'import'
|
import networkx as nx
import matplotlib.pyplot as plt
G=nx.complete_graph(3) #create complete graphs of 3 nodes
print(G.nodes) #print the name of edges
nx.draw(G)
plt.show()
|
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.integrate import odeint
from scipy import interpolate
from scipy.interpolate import interp1d
#Definition of the potential.
def pot(phi, params):
model, M, gamma, alpha, Omega_M, Omega_R = params
term_1 = M**2*np.exp(-gamma)
exponent_tmp = gamma*np.tanh(phi/np.sqrt(6.*alpha))
term_2 = np.exp(exponent_tmp)
if model == "EXP_model_1":
V_0 = 0.
prefac = 1.
elif model == "EXP_model_2":
V_0 = -M**2*np.exp(-2.*gamma)
prefac = 1.
elif model == "LCDM":
V_0 = M**2*np.exp(-2.*gamma)
prefac = 0.
return prefac*term_1*term_2 + V_0
#The derivative of the potential
def pot_der(phi, params):
model, M, gamma, alpha, Omega_M, Omega_R = params
term_1 = M**2*gamma*np.exp(-gamma)
exponent_tmp = gamma*np.tanh(phi/np.sqrt(6.*alpha))
term_2 = np.exp(exponent_tmp)
term_3 = 1./np.cosh(phi/np.sqrt(6.*alpha))**2/np.sqrt(6.*alpha)
return term_1*term_2*term_3
#Friedmann equation
def Hubble_sqr_E(phi, phi_prime, params, N):
model, M, gamma, alpha, Omega_M, Omega_R = params
tmp_num = pot(phi, params)/3. + Omega_M*np.exp(-3.*N) + Omega_R*np.exp(-4.*N)
tmp_denom = 1. - phi_prime**2/6.
return tmp_num/tmp_denom
#Expression for eps
def eps(phi, phi_prime, params, N):
model, M, gamma, alpha, Omega_M, Omega_R = params
tmp_hbl_E_sqr = Hubble_sqr_E(phi, phi_prime, params, N)
tmp_1 = phi_prime**2/2.
tmp_2 = (3.*Omega_M*np.exp(-3.*N) + 4.*Omega_R*np.exp(-4.*N))/tmp_hbl_E_sqr/2.
return tmp_1 + tmp_2
#The r.h.s. of the e.o.m's.
def diff_evolve(y, N, params):
phi, phi_prime = y
tmp_eps = eps(phi, phi_prime, params, N)
tmp_hbl_E_sqr = Hubble_sqr_E(phi, phi_prime, params, N)
dydt = [phi_prime, - phi_prime*(3. - tmp_eps) - pot_der(phi, params)/tmp_hbl_E_sqr]
return dydt
def solve(N_lst, initial_condition, params_in):
model, gamma_scan, h_scan, M, gamma, alpha, Omega_M, Omega_R = params_in
def E_today_gamma_scan(gamma):
params = [model, M, gamma, alpha, Omega_M, Omega_R]
sol_phi = odeint(diff_evolve, initial_condition, N_lst, args=(params,))
sol_Hubble_E_sqr = Hubble_sqr_E(sol_phi[:, 0], sol_phi[:, 1], params, N_lst)
E_interp = interpolate.interp1d(N_lst, np.sqrt(sol_Hubble_E_sqr))
return np.abs(E_interp(0.) - 1.)
def E_today_h_scan(hfid_over_h):
params = [model, M*hfid_over_h, gamma, alpha, Omega_M, Omega_R]
sol_phi = odeint(diff_evolve, initial_condition, N_lst, args=(params,))
sol_Hubble_E_sqr = Hubble_sqr_E(sol_phi[:, 0], sol_phi[:, 1], params, N_lst)
E_interp = interpolate.interp1d(N_lst, np.sqrt(sol_Hubble_E_sqr))
return np.abs(E_interp(0.) - 1.)
if gamma_scan == True and h_scan == False:
gamma_finding = minimize_scalar(E_today_gamma_scan, bounds=(100., 150.), method='bounded')
gamma = gamma_finding.x
print("gamma = ", gamma, E_today_gamma_scan(gamma) + 1., "model = ", model)
params = [model, M, gamma, alpha, Omega_M, Omega_R]
if gamma_scan == False and h_scan == True:
h_finding = minimize_scalar(E_today_h_scan, bounds=(0.1, 10.), method='bounded')
hfid_over_h = h_finding.x
print("h_fid/h = ", hfid_over_h, E_today_h_scan(hfid_over_h) + 1., "model = ", model)
params = [model, M*hfid_over_h, gamma, alpha, Omega_M, Omega_R]
#We solve
sol_phi = odeint(diff_evolve, initial_condition, N_lst, args=(params,))
sol_eps = eps(sol_phi[:, 0], sol_phi[:, 1], params, N_lst)
sol_Hubble_E_sqr = Hubble_sqr_E(sol_phi[:, 0], sol_phi[:, 1], params, N_lst)
return [params, sol_phi, sol_eps, sol_Hubble_E_sqr]
def f_solve(N_lst_f_evolve, N_lst, sol_Hubble_E_sqr, Omega_M):
def f_evolve(y, N):
f = y[0]
E = E_interp(N)
E_der = E_der_interp(N)
a = np.exp(N)
OM = Omega_M/a**3/E**2
dfdN = [-f**2 - (2. + E_der/E)*f + 1.5*OM]
return dfdN
E_interp = interpolate.interp1d(N_lst, np.sqrt(sol_Hubble_E_sqr))
E_der = np.gradient(np.sqrt(sol_Hubble_E_sqr), N_lst[1] - N_lst[0])
E_der_interp = interpolate.interp1d(N_lst, E_der)
a_ini = np.exp(N_lst_f_evolve[0])
OM = Omega_M/a_ini**3/E_interp(N_lst_f_evolve[0])**2
power = 5./9.
initial_condition = [OM**power]
sol_f = odeint(f_evolve, initial_condition, N_lst_f_evolve)
return sol_f
|
import sys
import scanf
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import seaborn as sns
import bow_projection as bp
import standing_wave
try:
BASE_SHAPE_ID = sys.argv[1]
except:
sys.exit(f"Usage: {sys.argv[0]} BASE_SHAPE_ID")
fileroot = sys.argv[0].replace('.py', f'-{BASE_SHAPE_ID}')
# Choose which base shape according to command-line argument and parse
# out the shape parameters if any
if BASE_SHAPE_ID == "paraboloid":
base_shape = bp.paraboloid_R_theta
shape_label = "Paraboloid"
elif BASE_SHAPE_ID == "wilkinoid":
base_shape = bp.wilkinoid_R_theta
shape_label = "Wilkinoid"
elif BASE_SHAPE_ID.startswith("cantoid"):
ibeta, = scanf.scanf("cantoid-beta%d", BASE_SHAPE_ID)
beta = ibeta / 100000
base_shape = bp.Spline_R_theta_from_function(
ngrid=1000, shape_func=bp.cantoid_R_theta, shape_func_pars=(beta,))
shape_label = rf"Cantoid $\beta = {beta}$"
elif BASE_SHAPE_ID.startswith("ancantoid"):
ixi, ibeta = scanf.scanf("ancantoid-xi%d-beta%d", BASE_SHAPE_ID)
xi, beta = ixi / 100, ibeta / 100000
base_shape = ancantoid_shape.Ancantoid(xi=xi, beta=beta, n=301)
shape_label = rf"Ancantoid $\xi = {xi:.1f}$, $\beta = {beta}$"
elif BASE_SHAPE_ID.startswith("dragoid"):
ialpha, = scanf.scanf("dragoid-alpha%d", BASE_SHAPE_ID)
alpha = ialpha / 100
base_shape = dragoid_shape.Dragoid(alpha=alpha)
shape_label = rf"Dragoid $\alpha_\mathrm{{drag}} = {alpha:.2f}$"
sns.set_style('ticks')
fig, axes = plt.subplots(1, 3, figsize=(8, 4))
# Different (amplitude, wavenumber) are different panels
amplitudes = [0.1, 0.05, 0.02]
wavenumbers = [1.0, 2.0, 5.0]
# All are for zero inclination
inclination = 0.0
th_inf = bp.theta_infinity(base_shape)
line_artists = {}
for amplitude, wavenumber, ax in zip(amplitudes, wavenumbers, axes.flat):
wave_label = fr"$A = {amplitude}$, $N = {wavenumber}$"
# artist that will get animated for the different phases
line_artists[(amplitude, wavenumber)], = ax.plot([], [],
lw=2, alpha=0.7,
color='r', label='perturbed')
# plot the bsse shape
th = np.linspace(0.0, th_inf, 301)
xp, yp = bp.xyprime_t(th, inclination, base_shape)
m = np.isfinite(xp) & np.isfinite(yp)
xxp = np.concatenate((xp[m][::-1], xp[m]))
yyp = np.concatenate((-yp[m][::-1], yp[m]))
ax.plot(xxp, yyp, 'k', lw=1, label='base')
#
ax.plot([0], [0], 'o', color='k')
ax.axhline(0, color='k', lw=0.8)
ax.legend(title=wave_label, ncol=1, loc="upper right")
ax.set(
xlabel=r"$x / R_0$",
ylabel=r"$y / R_0$",
xlim=[-2.5, 1.5],
ylim=[-0.5, 4.8],
xticks=[-2, -1, 0, 1],
yticks=[0, 1, 2, 3],
)
ax.set_aspect('equal', adjustable='box')
def animate_bows(phase):
for amplitude, wavenumber, ax in zip(amplitudes, wavenumbers, axes.flat):
shape = standing_wave.StandingWave(base_shape,
amplitude=amplitude,
wavenumber=wavenumber)
th_inf = bp.theta_infinity(shape)
shape.phase = phase
th0, th90 = bp.theta_0_90(inclination, shape)
th = np.linspace(th0, th_inf, 301)
xp, yp = bp.xyprime_t(th, inclination, shape)
m = np.isfinite(xp) & np.isfinite(yp)
if m.sum() == 0:
# Case of no tangent line at all at this inclination
continue
xxp = np.concatenate((xp[m][::-1], xp[m]))
yyp = np.concatenate((-yp[m][::-1], yp[m]))
radii = bp.characteristic_radii_projected(inclination, shape)
R0p = radii['R_0 prime']
R0p = 1.0
x = xxp/R0p
y = yyp/R0p
line_artists[(amplitude, wavenumber)].set_data(x, y)
return line_artists.values()
sns.despine(bottom=True)
fig.tight_layout()
anim = animation.FuncAnimation(fig, animate_bows,
frames=np.linspace(0, 1.0, 50),
blit=True)
moviefile = fileroot + '.mp4'
anim.save(moviefile, writer='ffmpeg', fps=60, dpi=200)
print(moviefile, end='')
|
%load_ext autoreload
%autoreload 2
import sys
sys.path.append(r'C:\Users\Luke\Documents\qn\py')
import os
os.chdir(r'D:\Qiaonan Working\projects\milestones\chart\organizeData')
import qn
coll = qn.getcoll('milestones',db="LINCS",inst="loretta",u="readWriteUser",
p="askQiaonan")[0]
categoryMap = {}
deferredDocs = []
for doc in coll.find():
print(str(doc["_id"]))
print(doc["cell-lines"][0])
if "perturbagens" in doc:
print(doc["perturbagens"][0])
else:
print("no information about perturbagens")
pertCategoryHint = input('Enter the category for perturbagens: ')
if pertCategoryHint == "c":
pertCategory = "external"
elif pertCategoryHint == "g":
pertCategory = "genetic"
elif pertCategoryHint == "d":
pertCategory = "disease"
elif pertCategoryHint == "n":
pertCategory = "none"
else:
pertCategory = "defer"
print(doc["assay"])
assayOutcomeHint = input('Enter the category for assay: ')
if assayOutcomeHint == "t":
assayOutcome = "Transcriptomic"
elif assayOutcomeHint == "p":
assayOutcome = "Proteomic"
elif assayOutcomeHint == "h":
assayOutcome = "phenotipic"
elif assayOutcomeHint == "i":
assayOutcome = "image"
elif assayOutcomeHint == "e"
assayOutcome = "phenotipic"
else:
assayOutcome = "defer"
if pertCategory == "defer" or assayOutcome == "defer":
deferredDocs.append(doc)
else:
categoryMap[str(doc["_id"])] = {}
categoryMap[str(doc["_id"])]["pertClass"] = pertCategory
categoryMap[str(doc["_id"])]["assayClass"] = assayOutcome
print('\n\n')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
from sgmllib import SGMLParser
from HTMLParser import HTMLParser
import urllib2
import os
import re
from Site import *
from Card import *
from Edition import *
from Price import *
url="http://www.magiccorporation.com/gathering-cartes-edition-170-tenebres-sur-innistrad.html"
def findColor(url):
if url=='/images/magic/couleurs/micro/incolor.gif':
return 'incolor'
elif url=='/images/magic/couleurs/micro/white.gif':
return 'white'
elif url=='/images/magic/couleurs/micro/blue.gif':
return 'blue'
elif url=='/images/magic/couleurs/micro/red.gif':
return 'red'
elif url=='/images/magic/couleurs/micro/green.gif':
return 'green'
elif url=='/images/magic/couleurs/micro/black.gif':
return 'black'
elif url=='/images/magic/couleurs/micro/multicolor.gif':
return 'multicolor'
return ''
def findLevel(url):
if url=='/images/magic/rarete/icon/rare.gif':
return 2
elif url=='/images/magic/rarete/icon/common.gif':
return 0
elif url=='/images/magic/rarete/icon/uncommon.gif':
return 1
elif url=='/images/magic/rarete/icon/mystic_rare.gif':
return 3
def findCost(value):
cost=""
for i in value:
cost=cost+i[len(i)-5]
return cost
class CardParser(HTMLParser):
def __init__(self,idCard):
HTMLParser.__init__(self)
self.html_table_editions=False
self.image=False
self.caracteristiqueVO=False
self.caracteristiqueVF=False
self.readPrice=False
self.price=0.0
self.card=Card()
self.card.load(idCard)
self.card.capaVO=""
self.card.capaVF=""
self.text=False
self.readNameCost=False
self.readCost=False
self.readCapa=False
self.endCost=False
self.imgCost=[]
self.brCounter=0
self.tdCounter=0
self.listPrice=[]
self.nameCost=""
self.read=False
self.ligneStartAnglais=None
self.ligneStartFrancais=None
def handle_starttag(self, tag, attrs):
#print "Encountered an start tag :", tag
if tag=='div':
if len(attrs)>0:
if self.caracteristiqueVO==True and attrs[0][0]=='class' and attrs[0][1]=='block_content':
self.readCost=True
for i in attrs:
if i[0]=='style':
if i[1]=='width: 225px':
self.image=True
self.text=False
self.price=False
elif i[1]=='width: 30%':
self.image=False
self.text=True
self.price=False
elif i[1]=='width: 27%':
self.image=False
self.text=False
self.price=True
if tag=='img' and self.readCost==True and self.endCost==False:
self.imgCost.append(attrs[0][1]);
# Enregistre l'image
if tag=='img' and self.image==True:
self.image=False
for i in attrs:
if i[0]=='src':
self.card.image=i[1]
# Recupere les informations de prix
if self.price:
if tag=='tr':
self.tdCounter=0
self.nameCost=""
if tag=='td' and self.price==True:
self.tdCounter+=1
if self.tdCounter==1 or self.tdCounter==2 or self.tdCounter==3:
self.readNameCost=True
else:
self.readNameCost=False
if self.tdCounter==4:
self.readPrice=True
else:
self.readPrice=False
if tag=='br' or tag=='b' and self.text:
self.brCounter+=1
def handle_endtag(self, tag):
#print "Encountered an end tag :", tag
if tag=='br' and self.readCost==True:
self.readCost=False
self.endCost=True
self.readCapa=True
if tag=='div' and self.readCapa==True:
self.caracteristiqueVO=False
self.card.cost=findCost(self.imgCost)
if tag=='html':
self.card.display()
def handle_data(self, data):
for i in data :
if ord(i)==9 or ord(i)==10:
data = data.replace(i,"")
if len(data)==1 and ord(data[0])==0x20:
data = data.replace(data[0],"")
if len(data)>0:
if self.text:
if self.ligneStartAnglais==None and data=="Texte Anglais":
self.ligneStartAnglais=self.brCounter
if self.ligneStartFrancais==None and data[0:10]=="Texte Fran":
self.ligneStartFrancais=self.brCounter
if self.ligneStartAnglais != None:
if self.text and self.brCounter==(self.ligneStartAnglais+1):
self.card.nameVO=data
if self.text and self.brCounter==(self.ligneStartAnglais+2):
self.card.typeVO=data
if self.text and self.brCounter>=(self.ligneStartAnglais+3) and self.ligneStartFrancais==None:
print "capa "+self.card.capaVO
self.card.capaVO=self.card.capaVO+data
print data
if self.ligneStartFrancais != None:
if self.text and self.brCounter==(self.ligneStartFrancais+1):
self.card.nameVF=data
if self.text and self.brCounter==(self.ligneStartFrancais+2):
self.card.typeVF=data
if self.text and self.brCounter>=(self.ligneStartFrancais+3):
print "capa "+self.card.capaVF
self.card.capaVF=" "+self.card.capaVF+" "+data+" "
if self.readNameCost:
self.nameCost=self.nameCost+" "+data
self.readNameCost=False
if self.readPrice:
self.listPrice.append((self.nameCost,self.extract_price(data)))
self.readPrice=False
if self.text:
#print str(self.brCounter)+" "+(data)
"""for i in data :
print ord(i),
print " "
"""
def extract_price(self, value):
price=0.0
price=value[0:len(value)-2]
return float(price)
def extract_name(self, value):
print value
return None
def price_min(self,price):
minPrice=price[0]
for i in price:
if i<minPrice:
minPrice=i
return minPrice
if __name__ == "__main__":
print "////////////////////////////////////////////////////////////"
print "/// Lit les cartes et les enregistres dans la BDD ///"
print "////////////////////////////////////////////////////////////"
"""
edition=Edition()
allEdition=edition.all()
for temp in allEdition:
parser = EditionParser(temp[0])
urlSite='http://www.magiccorporation.com/'+temp[1]
print urlSite
site=Site(urlSite)
parser.feed(site.html)
"""
parser=CardParser(6)
site=Site("http://www.magiccorporation.com/gathering-cartes-view-29824-calciderm.html")
parser.feed(site.html)
"""
try:
conn = mysql.connector.connect(host="localhost",user="root",password="magicpswd", database="magic")
cursor = conn.cursor()
cursor.execute("SELECT id, url FROM card")
rows = cursor.fetchall()
for i in rows:
print i[0],
print i[1]
try:
parser=CardParser(i[0])
site=Site(i[1])
parser.feed(site.html)
except Exception as e:
print("Erreur")
print e
except Exception as e:
print("Erreur")
print e
"""
|
import re
import subprocess
def run_the_function(repeat, width, print_result):
#subprocess.call('LD_LIBRARY_PATH="/dls_sw/prod/tools/RHEL6-x86_64/hdf5/1-8-15/prefix/lib"', shell=True)
#subprocess.call('HDF5_DISABLE_VERSION_CHECK="1"', shell=True)
# repeat = 5;
# width = 1024 * 55
program_to_execute = './Profiling/cppProcessing2.0 ' + str(width) + ' ' + str(repeat)
subprocess.call('(/usr/bin/time -v ' + program_to_execute + ') &> profile_report.txt', shell=True)
subprocess.call("echo $'\n' >> profile_report.txt", shell=True)
subprocess.call('operf '+program_to_execute, shell=True)
subprocess.call('opreport --symbols >> profile_report.txt', shell=True)
f = open('./profile_report.txt', 'r')
s = f.readline()
total_time = 0
percentage_time = 0
while s != '':
if 'Elapsed (wall clock) time ' in s:
parsed = s.split(' ')
time = parsed[len(parsed)-1]
time = re.split('\:|\.',time)
length = len(time)
msc = 0
sec = 0
min = 0
hrs = 0
msc = int(time[length-1]) * 10
if length > 1:
sec = int(time[length-2]) * 1000
if length > 2:
min = int(time[length-3]) * 60 * 1000
if length > 3:
hrs = int(time[length-4]) * 60 * 60 * 1000
total_time = hrs + msc + sec + min
elif 'percival_ADC_decode(percival_frame<short> const&, percival_frame<float>&)' in s:
delimited = s.split(' ')
parsed = [item for item in delimited if item != '']
percentage_time = float(parsed[1])
s = f.readline()
function_time = total_time * percentage_time /100 /repeat
if print_result == True:
print '=' * 100
print 'The function took ' + str(function_time) + 'ms to run.'
print 'Image size ' + str(width) +' * ' + '160' + ' pixels.'
print str(width * 160 * 2) + ' Bytes.'
print 'Statistics collected for '+str(repeat)+' iterations'
print '=' * 100
return function_time
time_array = []
for number in range(0,50,4):
x = run_the_function(5, 1024 * number, False)
time_array.append(x)
for number in range(0,50,4):
print number * 1024 * 160 * 2, time_array[number/4]
# print time_array
|
import unittest
from katas.kyu_8.opposites_attract import lovefunc
class LoveTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(lovefunc(1, 4))
def test_true_2(self):
self.assertTrue(lovefunc(0, 1))
def test_false(self):
self.assertFalse(lovefunc(2, 2))
def test_false_2(self):
self.assertFalse(lovefunc(0, 0))
|
import typing
from direct.actor.Actor import Actor
from panda3d.core import CompassEffect, ClockObject, Point3
from panda3d.core import CollisionBox, CollisionSegment, CollisionNode, CollisionRay
from panda3d.core import CollisionHandlerPusher, CollisionHandlerQueue, CollisionTraverser
cam_pivot_z_value: float = 3.0
cam_movement: float = 5.0
cam_max_distance: float = 70.0
cam_min_distance: float = 10.0
mouse_tolerance: float = .05
ralph: str = "./models/ralph.egg"
ralph_animations: typing.Dict[str, str] = {"idle": "./models/ralph-idle",
"walk": "./models/ralph-walk",
"run": "./models/ralph-run",
"fall": "./models/ralph-fall"}
angles_map = {(True, False, False, False): 180, # gets the angle of the avatar based on pressed keys
(False, True, False, False): 90, # invalid combinations of keys are ignored
(False, False, True, False): 0,
(False, False, False, True): -90,
(True, True, False, False): 135,
(True, False, False, True): -135,
(False, True, True, False): 45,
(False, False, True, True): -45}
fixed_update_delta: float = .01
walk_speed: float = -20
run_speed: float = -30
collider_x = 1
collider_y = 1
collider_bottom = 1
collider_top = 5
climb_threshold = 1
fall_speed = 15
jump_time = 1
jump_speed = 10
class Avatar(Actor):
def __get_closest_entry(entries, node, axis):
res = None
try:
res = entries[0].getSurfacePoint(node)[axis]
pass
except IndexError:
return res
for n3 in entries:
if n3.getSurfacePoint(node)[axis] > res:
res = n3.getSurfacePoint(node)[axis]
pass
pass
return res
def __init__(self, game_base, model=ralph, animation_dict=None):
"""
:param game_base: direct.showbase.ShowBase.ShowBase
:param model: str
:param animation_dict: typing.Dict[str: str]
"""
if animation_dict is None:
animation_dict = ralph_animations
pass
super().__init__(model, animation_dict)
self.__cam = game_base.cam
self.__cam_distance: float = 20.0
self.__task_manager = game_base.taskMgr # gets the task manager
"""camera controls section"""
self.__mouse_watcher_node = game_base.mouseWatcherNode # gets the mouse watcher from game_base
self.__win = game_base.win # gets the window from the game_base
self.__skip_frame = False # a bool to skip a frame when returning from a pause.
# this variable is needed since the cursor is moved to the given position in the next frame.
self.__cam_pivot = self.attachNewNode("camera-pivot-point") # adds a point for the camera to rotate around
self.__cam_pivot.setZ(cam_pivot_z_value) # sets the height of the point
self.__cam_pivot.setEffect(CompassEffect.make(game_base.render)) # makes the pivot ignore the avatar rotations
game_base.cam.reparentTo(self.__cam_pivot) # attach the camera to the node
game_base.cam.setY(-self.__cam_distance) # moves the camera back so avatar is visible
"""avatar movement section"""
self.__global_clock = ClockObject.getGlobalClock()
self.__key_map = {"w": False, # a dictionary to keep track of the pressed movement buttons
"a": False,
"s": False,
"d": False,
"shift": False}
# when a movement button is pressed, changes it's value to True in the keymap
self.accept("w", self.__set_key, ["w", True])
self.accept("a", self.__set_key, ["a", True])
self.accept("s", self.__set_key, ["s", True])
self.accept("d", self.__set_key, ["d", True])
self.accept("shift", self.__set_key, ["shift", True])
self.accept("shift-w", self.__set_key, ["w", True])
self.accept("shift-a", self.__set_key, ["a", True])
self.accept("shift-s", self.__set_key, ["s", True])
self.accept("shift-d", self.__set_key, ["d", True])
self.accept("shift", self.__set_key, ["shift", True])
self.accept("w-up", self.__set_key, ["w", False])
self.accept("a-up", self.__set_key, ["a", False])
self.accept("s-up", self.__set_key, ["s", False])
self.accept("d-up", self.__set_key, ["d", False])
self.accept("shift-up", self.__set_key, ["shift", False])
"""animation section"""
self.__blend_map = {animation_name: .0 for animation_name in animation_dict}
self.enableBlend()
self.loop("idle")
self.__current_animation = "idle"
self.__prev_animation = None
self.play_char()
"""collisions section"""
self.__player_collider = self.attachNewNode(CollisionNode("player-collider"))
self.__player_collider.node().addSolid(CollisionBox(Point3(collider_x, collider_y, collider_bottom),
Point3(-collider_x, -collider_y, collider_top)))
self.__player_collider.node().setIntoCollideMask(0b0)
pusher = CollisionHandlerPusher()
pusher.addCollider(self.__player_collider, self)
# base cTrav may not be instantiated, may need to do it here
try:
base.cTrav.addCollider(pusher, self.__collider_handler)
pass
except AttributeError:
print("warning (Avatar): base.cTrav is not instantiated yet. Consider doing it before creating a Avatar")
base.cTrav = CollisionTraverser()
base.cTrav.addCollider(self.__player_collider, pusher)
pass
pass
"""the character needs something that pushes him to the ground.
this won't be simulated gravity force since speed is gonna be constant"""
self.__ground_pusher = self.attachNewNode(CollisionNode("ground-pusher"))
self.__ground_pusher.node().addSolid(CollisionRay(0, 0, collider_bottom, 0, 0, -1))
self.__ground_handler = CollisionHandlerQueue()
self.__ground_pusher.show()
self.__ground_pusher.node().setIntoCollideMask(0b0)
game_base.cTrav.addCollider(self.__ground_pusher, self.__ground_handler)
self.__task_manager.add(self.__ground_task, "ground-task")
"""fall animation section, also used for jumping"""
self.__is_grounded = True
"""jumping"""
self.accept("space", self.__jump)
self.accept("shift-space", self.__jump)
"""the camera gets into meshes, it's really a bother,
this will make the camera stick to the closest surface in range"""
self.__camera_collider = self.__cam_pivot.attachNewNode(CollisionNode("camera-collider"))
self.__camera_segment = CollisionSegment(0, 0, 0, 0, -self.__cam_distance, 0)
self.__camera_collider.node().addSolid(self.__camera_segment)
self.__camera_collider.node().setIntoCollideMask(0b0)
self.__camera_handler = CollisionHandlerQueue()
game_base.cTrav.addCollider(self.__camera_collider, self.__camera_handler)
self.__task_manager.add(self.__camera_collide, "cam-collider")
"""scrolling the mouse wheel makes the camera get closer/further from the avatar"""
self.accept("wheel_up", self.__move_camera, [True])
self.accept("wheel_down", self.__move_camera, [False])
def __set_key(self, key, value):
"""
Updates the element in the key map for the given key to the given value
:param key: str the key which value is getting updated
:param value: bool the value assigned to the key
:return: None
"""
self.__key_map[key] = value
return
def __cam_rotation_task(self, task):
"""
the task in charge of rotating the camera relatively to the mouse movement
:param task: direct.task.Task panda assigned Task obj
:return: Task.cont
"""
if self.__mouse_watcher_node.hasMouse():
props = self.__win.getProperties()
x = self.__mouse_watcher_node.getMouseX() * 20 # gets x and y coordinates of cursor
y = self.__mouse_watcher_node.getMouseY() * 20
self.__win.movePointer(0, # moves cursor to the center
int(props.getXSize() / 2),
int(props.getYSize() / 2))
"""when returning from a pause, the cursor may not be in the center of the window.
to avoid moving the camera when returning to play, is necessary to skip a frame.
"""
if self.__skip_frame:
self.__skip_frame = False
return task.cont
"""
checks if the cursor is far enough from the center, or in some cases (usually when the window gets resized)
the camera may move even if the cursor doesn't. Then rotates the camera pivot node based on the coordinates
"""
if abs(x) > mouse_tolerance:
self.__cam_pivot.setH(self.__cam_pivot.getH() - x * 10) # Z axis rotation (Heading)
pass
"""also checks if the angle doesn't put the camera upside down"""
if (y > mouse_tolerance and self.__cam_pivot.getP() < 70) or (
y < -mouse_tolerance and self.__cam_pivot.getP() > -70):
self.__cam_pivot.setP(self.__cam_pivot.getP() + y * 10) # X axis rotation (Pitch)
pass
pass
return task.cont
def __movement_task(self, task):
"""
:param task: direct.task.Task panda assigned Task obj
:return: Task.cont
"""
# gets the angle for the pressed keys combination
angle = angles_map.get((self.__key_map["w"], self.__key_map["a"], self.__key_map["s"], self.__key_map["d"]))
if angle is not None: # if the combination of keys is valid
self.setH(self.__cam_pivot.getH() - angle) # rotates the avatar the given value relatively to the camera
if self.__key_map["shift"]: # if shift is pressed (run)
self.setY(self, run_speed * self.__global_clock.getDt())
if self.__is_grounded:
self.set_animation("run")
pass
else:
self.set_animation("fall")
pass
pass
else:
self.setY(self, walk_speed * self.__global_clock.getDt())
if self.__is_grounded:
self.set_animation("walk")
pass
else:
self.set_animation("fall")
pass
pass
pass
else:
if self.__is_grounded:
self.set_animation("idle")
pass
else:
self.set_animation("fall")
pass
pass
return task.cont
def __blend_task(self, task):
"""
:param task: direct.task.Task panda assigned Task obj
:return: Task.cont
"""
for animation in self.__blend_map:
if animation == self.__current_animation and self.__blend_map[animation] < 1.0:
self.__blend_map[animation] += .1
pass
elif animation != self.__current_animation and self.__blend_map[animation] > 0.1:
self.__blend_map[animation] -= .1
pass
self.setControlEffect(animation, self.__blend_map[animation])
if self.__blend_map[animation] < .0:
self.getAnimControl(animation).stop()
pass
pass
return task.cont
def set_animation(self, animation):
"""
:param animation: str the animation name
:return:
"""
if self.__prev_animation != animation:
self.loop(animation)
self.__prev_animation = self.__current_animation
self.__current_animation = animation
pass
return
def __ground_task(self, task):
entries = list(self.__ground_handler.getEntries())
z = Avatar.__get_closest_entry(self.__ground_handler.getEntries(), self, 2)
if z is None:
return task.cont
if z < -climb_threshold:
self.setZ(self, -fall_speed * self.__global_clock.getDt())
self.__is_grounded = False
pass
elif z < climb_threshold * -.08:
self.setZ(self, -fall_speed * self.__global_clock.getDt())
self.__is_grounded = True
pass
return task.cont
def play_char(self):
self.__skip_frame = True
self.__task_manager.add(self.__cam_rotation_task, "camera_rotation_task")
self.__task_manager.add(self.__movement_task, "movement_task")
self.__task_manager.add(self.__blend_task, "animation_blend")
self.acceptOnce("escape", self.stop_char)
return
def stop_char(self):
self.__task_manager.remove("camera_rotation_task")
self.__task_manager.remove("movement_task")
self.set_animation("idle")
self.acceptOnce("escape", self.play_char)
return
def __jump_task(self, task):
self.setZ(self, jump_speed * self.__global_clock.getDt())
return task.cont
def __end_jump(self):
self.__task_manager.remove("jumping")
self.__task_manager.add(self.__ground_task, "ground-task")
return
def __jump(self):
if self.__is_grounded:
self.__is_grounded = False
self.__task_manager.remove("ground-task") # remove the ground task, or the avatar gets pushed down
self.__task_manager.add(self.__jump_task, "jumping")
self.__task_manager.doMethodLater(jump_time, self.__end_jump, "end-jump", extraArgs=[])
pass
return
def __move_camera(self, direction):
if direction and self.__cam_distance < cam_max_distance:
self.__cam_distance += cam_movement
pass
elif not direction and self.__cam_distance > cam_min_distance:
self.__cam_distance -= cam_movement
pass
else:
return
self.__camera_segment.setPointB((0, -self.__cam_distance, 0))
return
def __camera_collide(self, task):
distance = Avatar.__get_closest_entry(self.__camera_handler.getEntries(), self.__cam_pivot, 1)
if distance is not None:
self.__cam.setY(self.__cam_pivot, distance)
pass
else:
self.__cam.setY(self.__cam_pivot, -self.__cam_distance)
pass
return task.cont
pass
if __name__ == "__main__": # for testing and example
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
base.cTrav = CollisionTraverser
avatar = Avatar(base)
avatar.reparentTo(base.render)
ground = base.loader.loadModel("./models/world.egg")
ground.reparentTo(base.render)
avatar.setZ(5)
base.run()
pass
|
# how to find help documentation
* help func
* numpy.lookfor('function', module='module_name')
numpy.lookfor('remove', module='os')
|
class Solution:
def kill_switch(self, input):
def split_lists(left,right=[],difference=0):
left_sum = sum(left)
right_sum = sum(right)
if left_sum < right_sum or len(left) < len(right):
return
if left_sum - right_sum == difference:
return left, right, difference
for i,num in enumerate(left):
result = split_lists(left[:i]+left[i+1:],right+[num], difference)
if result:
return result
if right or difference > 0:
return
for target in range(1,left_sum - min(left)+1):
result = split_lists(left,right,target)
if result:
return result
best_split = (split_lists(input))
if best_split[2] == 0:
return True
else:
return False
|
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.ids.unit_typeid import *
from sc2.ids.ability_id import *
from sc2.player import Bot, Computer
from sc2.unit import Unit
from sc2.units import Units
from sc2.position import Point2, Point3
# i just copied over my import statements because you'll probably end up using all this stuff
def main():
"""
Make sure whatever map you use is present in your "Username/Documents/Starcraft II/Maps" folder!
replace Terran and CompetitiveBot() with whatever race and name your bot has
replace Zerg and Medium with whatever race and difficulty you want to play against
Replays are saved in: C:\Users\<Username>\Documents\StarCraft II\Replays\Multiplayer
"""
sc2.run_game(
sc2.maps.get("CatalystLE"),
[Bot(Race.Terran, CompetitiveBot()), Computer(Race.Zerg, Difficulty.Medium)],
realtime=True, # Set to True to watch in real time, False to play through as fast as possible
)
####################################
### Bot Template by Erik Nielsen ###
### A python-sc2 AI Template ###
####################################
class CompetitiveBot(sc2.BotAI): # give it a cool name tho
async def on_start(self):
print("Game started")
# Do things here JUST BEFORE the game starts
async def on_step(self, iteration):
"""
Populate this function with whatever your bot should do!
This function will run every iteration; multiple times for second
note that the rate of iterations is not constant when playing non-realtime games
use self.time to get a FLOAT of elapsed seconds in-game instead
it's a float, so don't use `if self.time == 30`, instead do `if self.time > 30`
"""
if iteration == 0: # runs immediately after the game begins
await self.split_workers()
if iteration == 10: # runs exactly once
await self.chat_send("(glhf)")
if iteration % 10 == 0: # run less frequently for performance reasons
await self.distribute_workers() # automagically distribute workers between bases and send idle workers to mine
async def split_workers(self):
# order every worker to gather from the mineral field closest to them
for w in self.workers:
w.gather(self.mineral_field.closest_to(w))
def on_end(self, result):
print("Game ended.")
# Do things here after the game ends
if __name__ == "__main__":
main()
|
# Day 18: Operation Order
# <ryc> 2021
def inputdata():
with open('day_18_2020.input') as stream:
data = [ line for line in stream ]
return data
def calculate(stream, functions):
stream = list(stream)
stack = list()
operators = ['(']
while len(stream) != 0:
token = stream.pop(0)
if '0' <= token <= '9':
while len(stream) != 0 and '0' <= stream[0] <= '9':
token += stream.pop(0)
stack.append(int(token))
elif token == '(':
operators.append(token)
elif token in functions:
while len(operators) != 0 and functions[operators[len(operators)-1]][0] >= functions[token][0]:
stack.append(functions[operators.pop()][1](stack.pop(),stack.pop()))
operators.append(token)
elif token == ')':
token = operators.pop()
while token != '(':
stack.append(functions[token][1](stack.pop(),stack.pop()))
token = operators.pop()
token = operators.pop()
while token != '(':
stack.append(functions[token][1](stack.pop(),stack.pop()))
token = operators.pop()
return stack.pop()
def sumatory(data):
basic = 0
advance = 0
functions1 = { '(': [0], '+':[1, lambda x, y: x + y], '*':[1, lambda x, y: x * y] }
functions2 = { '(': [0], '+':[2, lambda x, y: x + y], '*':[1, lambda x, y: x * y] }
for row in data:
basic += calculate(row, functions1)
advance += calculate(row, functions2)
return basic, advance
if __name__ == '__main__':
print('\n18: Operation Order')
data = inputdata()
basic, advance = sumatory(data)
print('\n basic sumatory =', basic)
print('\n advance sumatory =', advance)
|
# Conditional Statements
mivalor = 7
if mivalor == 2:
mivalor = 5
if mivalor == 5:
mivalor = 4
print(mivalor)
else:
mivalor = 2
else:
mivalor = 8
print(mivalor)
def multiplo_raro(a):
if a == 2:
res = a*a+1
elif a == 3:
res = a+a-1
elif a == 7:
res = a*a+5
elif a == 11:
res = a*(a-2)+9
else:
res = a*a-2
return res
prueba = multiplo_raro(3)
print(prueba)
|
import pandas as pd
path = 'C:/Users/13781/Desktop/学生信息表.xlsx'
c=pd.read_excel(path,sheet_name=1)
print(c)
print(pd.show_versions())
|
UNKNOWN = '░'
MINE = '©'
CURSOR = '█'
|
import wx
import cv2
import numpy
"""
Next steps:
- Show histogram for each image.
- Create histogram widget.
- Ability to load multiple images.
Goal:
------------
| Load Images
------------
| Image 1 | Image 2 | Image 3 | Operation | Run | Show Result |
| Result 1 | Result 2 | Result 3 | Next Operation | Run |
------------
Operations:
- Threshold, Blur, Expand, Collapse, Change color space, subtract, add
- Compute variable, Histogram
"""
app = None
def setApp(thisapp):
global app
app=thisapp
def getHistogram(img):
histSize = [256]
hranges = [0.0, 255.0]
ranges = hranges
channels = [0]
mask = numpy.ones(1).astype('uint8')
return cv2.calcHist([img.astype('uint8')], channels,
None, histSize, ranges)
def getHistogramImage(img):
hist = getHistogram(img)
(minVal, maxVal, _, _) = cv2.minMaxLoc(hist)
histImg = numpy.zeros((256, 256, 1), numpy.uint8)
hpt = 0.9 * 256
for h in range(0,256):
binVal = hist[h]
intensity = binVal * hpt / maxVal
cv2.line(histImg, (h, 256), (h, 256 - intensity), 255)
return histImg
def convertImage(img):
print img.shape
if len(img.shape) == 2 or img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.cv.CV_GRAY2RGB)
else:
img = cv2.cvtColor(img, cv2.cv.CV_BGR2RGB)
scale = 1
h, w = img.shape[:2]
if w > 1000:
scale = 7
wxbmp = wx.BitmapFromBuffer(w, h, img)
return wx.BitmapFromImage(wx.ImageFromBitmap(wxbmp).Scale(w/scale, h/scale))
images = [
# Image ImagePanel ControlPanel
]
def loadImage(path):
img = cv2.imread(path)
return convertImage(img)
def performOperation(img, name):
print "Performing operation: %s"%(name)
if name == "ToGray":
result = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
app.GetFrame().AddImagePanel(result)
return
if name == "Histogram":
app.GetFrame().AddImagePanel(getHistogramImage(img))
return
class HistogramParameters(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1, wx.DefaultPosition, wx.DefaultSize)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.Button(self, label="BlackAndWhite"))
self.SetSizer(sizer)
self.Fit()
class ControlPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1, wx.DefaultPosition, wx.DefaultSize)
self.ImagePanel = parent
sizer = wx.GridBagSizer(wx.VERTICAL)
self.sizer = sizer
items = [
"None", "Histogram", "ToGray", "ToBlackAndWhite", "Threshold"]
#sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, -1, "Operation:"), (1, 1))
self.ops = wx.Choice(self, choices=items)
sizer.Add(self.ops, (2, 1))
#sizer.AddSpacer(5)
self.run = wx.Button(self, label="+")
self.paramPanel = None
sizer.Add(self.run, (4, 1))
self.Bind(wx.EVT_CHOICE, self.OnChoice, self.ops)
self.Bind(wx.EVT_BUTTON,
lambda event: performOperation(self.ImagePanel.GetImage(),
self.ops.GetStringSelection()), self.run)
self.ops.SetSelection(0)
self.SetSizer(sizer)
def OnChoice(self, event):
selection = self.ops.GetStringSelection()
index = self.ops.GetSelection()
if selection == "Histogram":
self.paramPanel = HistogramParameters(self)
self.sizer.Add(self.paramPanel, (3, 1))
self.Layout()
else:
self.paramPanel.Destroy()
self.Layout()
print "Selected Item %d %s"%(index, selection)
class ImagePanel(wx.Panel):
"""
This class displays an image along with all of the stats about the image
such as resolution, color format, name, etc.
"""
def __init__(self, parent, img, index):
wx.Panel.__init__(self, parent, -1, wx.DefaultPosition, wx.DefaultSize)
self.parent = parent
self.index = index
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer = sizer
self.img = img
self.addImage(self.img)
#self.addImage(getHistogramImage(self.img))
sizer.Add(ControlPanel(self))
self.SetBackgroundColour((255, 255, 255))
self.SetSizer(sizer)
def addImage(self, img):
bmp = wx.StaticBitmap(self, bitmap=convertImage(img))
self.sizer.Add(bmp)
def GetImage(self):
return self.img
class MyFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title="",
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE,
name="MyFrame"):
super(MyFrame, self).__init__(parent, id, title,
pos, size, style, name)
# Attribute
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.AddImagePanel(cv2.imread("LightBall.jpg"))
#sizer.AddSpacer(5);
#sizer.Add(ImagePanel(self, "DarkBall.jpg"), 1, wx.EXPAND)
self.SetBackgroundColour((0, 0, 0))
self.SetSizerAndFit(self.sizer)
def AddImagePanel(self, img):
global images
imagePanel = ImagePanel(self, img, len(images) - 1)
images.append(imagePanel)
self.sizer.Add(imagePanel, 1, wx.EXPAND)
self.Fit()
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, title="OpenCV Image Pipeline Editor")
self.SetTopWindow(self.frame)
self.frame.Show()
setApp(self)
return True
def GetFrame(self):
return self.frame
if __name__ == "__main__":
app = MyApp(False)
app.MainLoop()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
USE = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\US equities.csv")
DMX = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\DM ex US.csv")
AZE = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\Asian equities.csv")
CNE = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\China equities.csv")
UST = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\US treasuries.csv")
USHY = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\US high Yield.csv")
AZC = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\Asian Credit.csv")
oil = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\oil.csv")
gold = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\gold.csv")
cop = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\copper.csv")
#data = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\cleanAssets.csv")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.scatter(USE['growth'], USE['inflation'], USE['return'], c='paleturquoise')
#ax.scatter(DMX['growth'], DMX['inflation'], DMX['return'],c='moccasin')
#ax.scatter(AZE['growth'], AZE['inflation'], AZE['return'],c='lightcoral')
#ax.scatter(CNE['growth'], CNE['inflation'], CNE['return'],c='seagreen')
#ax.scatter(UST['growth'], UST['inflation'], UST['return'],c='mediumpurple')
#ax.scatter(USHY['growth'], USHY['inflation'], USHY['return'],c='dimgray')
#ax.scatter(AZC['growth'], AZC['inflation'], AZC['return'],c='honeydew')
#ax.scatter(oil['growth'], oil['inflation'], oil['return'],c='darkblue')
#ax.scatter(gold['growth'], gold['inflation'], gold['return'],c='red')
#ax.scatter(cop['growth'], cop['inflation'], cop['return'],c='hotpink')
#ax.set_xlabel('growth')
#ax.set_ylabel('inflation')
#ax.set_zlabel('returns')
ax.scatter(USE['growth'], USE['risk app'], USE['return'], c='paleturquoise')
ax.scatter(DMX['growth'], DMX['risk app'], DMX['return'],c='moccasin')
ax.scatter(AZE['growth'], AZE['risk app'], AZE['return'],c='lightcoral')
ax.scatter(CNE['growth'], CNE['risk app'], CNE['return'],c='seagreen')
ax.scatter(UST['growth'], UST['risk app'], UST['return'],c='mediumpurple')
ax.scatter(USHY['growth'], USHY['risk app'], USHY['return'],c='dimgray')
ax.scatter(AZC['growth'], AZC['risk app'], AZC['return'],c='honeydew')
ax.scatter(oil['growth'], oil['risk app'], oil['return'],c='darkblue')
ax.scatter(gold['growth'], gold['risk app'], gold['return'],c='red')
ax.scatter(cop['growth'], cop['risk app'], cop['return'],c='hotpink')
ax.set_xlabel('growth')
ax.set_ylabel('risk app')
ax.set_zlabel('returns')
plt.show()
|
import pytest
from pyasn1.type.namedval import NamedValues
from asn1PERser.codec.per.encoder import encode as per_encoder
from asn1PERser.classes.data.builtin.EnumeratedType import EnumeratedType
from asn1PERser.classes.types.constraint import ExtensionMarker
def SCHEMA_my_enum(enumerationRoot_list, extensionMarker_value=False):
class MyEnum(EnumeratedType):
'''
MyEnum ::= ENUMERATED {
e0,
e1,
.
.
.
eN-1
eN
}
'''
subtypeSpec = ExtensionMarker(extensionMarker_value)
enumerationRoot = NamedValues(
*[(item, index) for index, item in enumerate(enumerationRoot_list)]
)
extensionAddition = NamedValues(
)
namedValues = enumerationRoot + extensionAddition
return MyEnum
def SCHEMA_my_ext_enum(enumerationRoot_list, extensionAddition_list, extensionMarker_value=False):
class MyEnum(EnumeratedType):
'''
MyEnum::= ENUMERATED
{
e0,
e1,
.
.
.
eN - 1
eN,
...,
eN+1
.
.
.
eM-1,
eM
}
'''
subtypeSpec = ExtensionMarker(extensionMarker_value)
enumerationRoot = NamedValues(
*[(item, index) for index, item in enumerate(enumerationRoot_list)]
)
extensionAddition = NamedValues(
*[(item, index) for index, item in enumerate(extensionAddition_list, start=len(enumerationRoot_list))]
)
namedValues = enumerationRoot + extensionAddition
return MyEnum
def DATA_my_enum(enum, value):
return enum(value)
short_enum = ['a0', 'a1']
enumeration_list = ['e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9',
'e10', 'e11', 'e12', 'e13', 'e14', 'e15', 'e16', 'e17', 'e18', 'e19',
'e20', 'e21', 'e22', 'e23', 'e24', 'e25', 'e26', 'e27', 'e28', 'e29',
'e30', 'e31', 'e32', 'e33', 'e34', 'e35', 'e36', 'e37', 'e38', 'e39',
'e40', 'e41', 'e42', 'e43', 'e44', 'e45', 'e46', 'e47', 'e48', 'e49',
'e50', 'e51', 'e52', 'e53', 'e54', 'e55', 'e56', 'e57', 'e58', 'e59',
'e60', 'e61', 'e62', 'e63', 'e64', 'e65', 'e66', 'e67', 'e68', 'e69',
'e70', 'e71', 'e72', 'e73', 'e74', 'e75', 'e76', 'e77', 'e78', 'e79',
'e80', 'e81', 'e82', 'e83', 'e84', 'e85', 'e86', 'e87', 'e88', 'e89',
'e90', 'e91', 'e92', 'e93', 'e94', 'e95', 'e96', 'e97', 'e98', 'e99',
'e100', 'e101', 'e102', 'e103', 'e104', 'e105', 'e106', 'e107', 'e108', 'e109',
'e110', 'e111', 'e112', 'e113', 'e114', 'e115', 'e116', 'e117', 'e118', 'e119',
'e120', 'e121', 'e122', 'e123', 'e124', 'e125', 'e126', 'e127', 'e128', 'e129',
'e130', 'e131', 'e132', 'e133', 'e134', 'e135', 'e136', 'e137', 'e138', 'e139',
'e140', 'e141', 'e142', 'e143', 'e144', 'e145', 'e146', 'e147', 'e148', 'e149',
'e150', 'e151', 'e152', 'e153', 'e154', 'e155', 'e156', 'e157', 'e158', 'e159',
'e160', 'e161', 'e162', 'e163', 'e164', 'e165', 'e166', 'e167', 'e168', 'e169',
'e170', 'e171', 'e172', 'e173', 'e174', 'e175', 'e176', 'e177', 'e178', 'e179',
'e180', 'e181', 'e182', 'e183', 'e184', 'e185', 'e186', 'e187', 'e188', 'e189',
'e190', 'e191', 'e192', 'e193', 'e194', 'e195', 'e196', 'e197', 'e198', 'e199',
'e200', 'e201', 'e202', 'e203', 'e204', 'e205', 'e206', 'e207', 'e208', 'e209',
'e210', 'e211', 'e212', 'e213', 'e214', 'e215', 'e216', 'e217', 'e218', 'e219',
'e220', 'e221', 'e222', 'e223', 'e224', 'e225', 'e226', 'e227', 'e228', 'e229',
'e230', 'e231', 'e232', 'e233', 'e234', 'e235', 'e236', 'e237', 'e238', 'e239',
'e240', 'e241', 'e242', 'e243', 'e244', 'e245', 'e246', 'e247', 'e248', 'e249',
'e250', 'e251', 'e252', 'e253', 'e254', 'e255', 'e256', 'e257', 'e258', 'e259']
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2]), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2]), 'e1'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:10]), 'e9'), '90'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:17]), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33]), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33]), 'e32'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:100]), 'e98'), 'C4'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e126'), '7E'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e127'), '7F'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e128'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e128'), '0080'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e254'), '00FE'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e255'), '00FF'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e256'), '0100'),
])
def test_no_extension_marker_enumerated_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2], extensionMarker_value=True), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2], extensionMarker_value=True), 'e1'), '40'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:10], extensionMarker_value=True), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:17], extensionMarker_value=True), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33], extensionMarker_value=True), 'e9'), '12'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33], extensionMarker_value=True), 'e32'), '40'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:100], extensionMarker_value=True), 'e98'), '62'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e126'), '3F00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e127'), '3F80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e128'), '4000'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e128'), '000080'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e254'), '0000FE'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e255'), '0000FF'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e256'), '000100'),
])
def test_extension_marker_is_present_and_extension_addition_is_empty_but_value_is_from_root_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:2], extensionAddition_list=short_enum, extensionMarker_value=True), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:2], extensionAddition_list=short_enum, extensionMarker_value=True), 'e1'), '40'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:10], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:17], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:33], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '12'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:33], extensionAddition_list=short_enum, extensionMarker_value=True), 'e32'), '40'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:100], extensionAddition_list=short_enum, extensionMarker_value=True), 'e98'), '62'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e126'), '3F00'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e127'), '3F80'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e128'), '4000'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e128'), '000080'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e254'), '0000FE'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e255'), '0000FF'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e256'), '000100'),
])
def test_extension_marker_is_present_and_extension_addition_is_not_empty_but_value_is_from_root_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:2], extensionMarker_value=True), 'e0'), '80'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:2], extensionMarker_value=True), 'e1'), '81'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:10], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:17], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:33], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:33], extensionMarker_value=True), 'e32'), 'A0'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:100], extensionMarker_value=True), 'e98'), 'C00162'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e126'), 'C0017E'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e127'), 'C0017F'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e128'), 'C00180'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e128'), 'C00180'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e254'), 'C001FE'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e255'), 'C001FF'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e256'), 'C0020100'),
])
def test_extension_marker_is_present_and_value_is_from_extension_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
|
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
#channel.queue_purge(queue='task_queue')
channel.queue_purge(queue='lightvm_one_files')
connection.close()
|
N=input()
set1=set(N)#去掉字符串(或者其它序列)中的重复元素
sum1=0
for i in set1:
sum1+=eval(i)
print(sum1)
|
from analyze_design_decoys import fix_file
import argparse
import design_protease as dp
from os import makedirs, remove
from os.path import basename, isdir, isfile, join
from pyrosetta import *
from pyrosetta.rosetta.core.select.residue_selector import \
ChainSelector, OrResidueSelector, ResidueIndexSelector
from pyrosetta.rosetta.protocols.constraint_generator import \
AddConstraints, CoordinateConstraintGenerator
from pyrosetta.rosetta.protocols.enzdes import ADD_NEW, AddOrRemoveMatchCsts
from pyrosetta.rosetta.protocols.flexpep_docking import FlexPepDockingProtocol
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_struct", required=True,
default='fibrils_collaboration/other_models/crude_ext_cat.pdb',
help="Pick starting PDB")
parser.add_argument("-od", "--out_dir", required=True,
help="Name an output directory for decoys")
parser.add_argument("-site", "--site", required=True, type=int,
help="What is the asyn cleavage site in this frame?")
parser.add_argument("-seq", "--sequence", required=True, type=str,
help="What substrate sequence do you want to thread?")
parser.add_argument("-n", "--number_decoys", type=int, default=10,
help="How many decoys should be made? (Default is 10.)")
parser.add_argument('-x', '--extend',
help='Extend output name, ex: job from SLURM')
args = parser.parse_args()
return args
def apply_constraints(pose):
"""
Applies enzdes constraints form the input CST file to a pose
Also applies coordinate constraints to the substrate peptide, assumed to
be chain B
"""
# Enzdes constraints
cstm = AddOrRemoveMatchCsts()
cstm.set_cst_action(ADD_NEW)
cstm.apply(pose)
# Determine peptide residues to constrain, preserving
# H-bonding residues of original peptide, P3-P1
first_pep_cst_res = pose.pdb_info().pdb2pose('B', 4)
last_pep_cst_res = pose.pdb_info().pdb2pose('B', 6)
cst_range = '{}-{}'.format(first_pep_cst_res, last_pep_cst_res)
# Coordinate constraints
cg = CoordinateConstraintGenerator()
ors = OrResidueSelector()
ors.add_residue_selector(ChainSelector('A')) # Constrain main backbone
ors.add_residue_selector(ResidueIndexSelector(cst_range))
cg.set_residue_selector(ors)
ac = AddConstraints()
ac.add_generator(cg)
ac.apply(pose)
return
args = parse_args()
if not isdir(args.out_dir):
makedirs(args.out_dir)
opts = '-enzdes::cstfile fibrils_collaboration/htra1_protease.cst -run:preserve_header -mute core'
opts += ' -pep_refine -ex1 -ex2 -use_input_sc -flip_HNQ -no_optH false -score:weights ref2015_cst'
init(opts)
# Score function and starting PDB
sf = create_score_function('ref2015_cst')
pose = pose_from_pdb(args.start_struct)
# Changing peptide sequence
asyn_seq = args.sequence.upper()
pose = dp.make_residue_changes(pose, sf, asyn_seq, 212, [61, 91, 169], None)
# Creating FlexPepDock protocol using init options
fpdock = FlexPepDockingProtocol()
# Making name
decoy_name = join(args.out_dir, 'htra1_prot_asyn_ext')
decoy_name += '_' + str(args.site)
decoy_name += '_' + asyn_seq
if args.extend:
decoy_name += '_' + args.extend
# Fixing constraints text block, since enzdes constraints are not dynamic
fix_pdb = decoy_name + '_tmp.pdb'
pose.dump_pdb(fix_pdb)
fix_file(fix_pdb)
pose = pose_from_pdb (fix_pdb)
remove(fix_pdb)
# Applying constraints to the pose
apply_constraints(pose)
jd = PyJobDistributor(decoy_name, args.number_decoys, sf)
while not jd.job_complete:
pp = Pose(pose)
fpdock.apply(pp)
jd.output_decoy(pp)
|
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("Temperature")
sc = SparkContext(conf = conf)
def fun(line):
fields = line.split(',')
station = fields[0]
e_type = fields[2]
temp = float(fields[3])
return (station,e_type, temp)
lines = sc.textFile("file:///SparkCourse//1800.csv")
rdd = lines.map(fun)
weather=rdd.filter(lambda x : "TMIN" in x[1])
x = weather.map(lambda x : (x[0],x[2]))
y = x.reduceByKey(lambda x,y : min(x,y))
#rdd = lines.map(fun)
results = y.collect();
for result in results:
print(result)
|
from Crypto.Util import number
import gmpy
def nth_root(x, n):
gs = gmpy.mpz(x)
g3 = gmpy.mpz(n)
mask = gmpy.mpz(0x8080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808000)
test = 0
while True:
if test == 0:
gs = gs
root,exact = gs.root(g3)
if (root & mask).bit_length() < 8:
break
return int(root)
ciphertext = int(open('ciphertext', 'r').read())
public_key = open('public_key', 'r').read().split(":")
n = int(public_key[0])
e = int(public_key[1])
plaintext = nth_root(ciphertext, e)
plaintext_string = number.long_to_bytes(plaintext)[0:-1]
print(plaintext_string)
|
from raptor import RaptorQ
class Decoder(RaptorQ):
def __init__(self, symbols):
#Figure out sub-blocks. Basically split up "chunk" so it can fit in memory. Then split that into K pieces.
#We'll just use one block for now
super(Decoder, self).__init__(symbols)
def append(self, symbol):
self.symbols.append(symbol)
def decode(self):
self.calculate_i_symbols()
|
# This collects all other helper functions
#!/usr/bin/env python
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Paola Petrelli <paola.petrelli@utas.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import itertools
from datetime import date
import glob
import subprocess
import re
from collections import defaultdict
from ARCCSSive.data import *
from ARCCSSive.CMIP5.update_db_functions import add_bulk_items, update_item
from ARCCSSive.CMIP5.Model import Instance, VersionFile
def combine_constraints(**kwargs):
''' Works out any possible combination given lists of constraints
:argument dictionary, keys are fields and values are lists of values for each field
:return: a set of dictionaries, one for each constraints combination i
'''
try:
return [dict(itertools.izip(kwargs, x)) for x in itertools.product(*kwargs.itervalues())]
except:
return [dict(zip(kwargs, x)) for x in itertools.product(*kwargs.values())]
def join_varmip(var0,mip0):
''' List all combinations of selected variables and mips '''
comb = ["_".join(x) for x in itertools.product(*[var0,mip0])]
print(comb)
return comb
def get_instance(dataset_id):
''' Break dataset_id from ESGF search in dictionary of instance attributes '''
bits=dataset_id.split(".")
return {'model': bits[3],'experiment': bits[4],'mip':bits[7],'ensemble':bits[8],'version':bits[9].split("|")[0]}
def compare_instances(db,remote,local,const_keys,admin):
''' Compare remote and local search results they're both a list of dictionaries
:argument db: sqlalchemy local db session
:argument remote: each dict has keys version, files (objs), filenames, tracking_ids, dataset_id
:argument local: list of version objects
:argument const_keys: list of instance attributes defined by user constraints
:argument admin: boolean if True user is admin and can update db directly, optherwise new info saved in log file
:return: remote, local with updated attributes
'''
global logfile
logdir="/g/data1/ua6/unofficial-ESG-replica/tmp/pxp581/requests/"
if not admin:
logfile=logdir+"log_" + os.environ['USER'] + "_" + today.replace("-","") + ".txt"
print(logfile)
# a list of the unique constraints defining one instance in the database which are not in user constraints
undefined=[x for x in Instance.__table_args__[0].columns.keys() if x not in const_keys]
# loop through all returned remote datasets
for ind,ds in enumerate(remote):
# loop through all local versions
ds_instance=get_instance(ds['dataset_id'])
for v in local:
dummy=[False for key in undefined if ds_instance[key] != v.variable.__dict__[key]]
if False in dummy:
continue
v.checked_on = today
# compare files for all cases except if version regular but different from remote
if v.version in [ds['version'],'NA',r've\d*']:
extra = compare_files(db,ds,v,admin)
# if tracking_ids or checksums are same
if extra==set([]):
v.to_update = False
else:
v.to_update = True
if not admin:
ds_info=[str(x) for x in ds_instance.items()]
write_log(" ".join(["update"]+ds_info+[v.version,v.path,"\n"]))
# if local dataset_id is the same as remote skip all other checks
if v.dataset_id==ds['dataset_id']:
v.is_latest = True
# if version same as latest on esgf
elif v.version == ds['version']:
v.dataset_id = ds['dataset_id']
v.is_latest = True
# if version undefined
elif v.version in ['NA',r've\d*']:
if extra==set([]):
v.version = ds['version']
v.dataset_id = ds['dataset_id']
v.is_latest = True
# if version different or undefined but one or more tracking_ids are different
# assume different version from latest
# NB what would happen if we fix faulty files? tracking_ids will be same but md5 different,
# need to set a standard warning for them
else:
v.is_latest = False
if v.version > ds['version']:
print("Warning!!!")
print(" ".join(["Local version",v.version,"is more recent than the latest version",ds['version'], "found on ESGF"]))
if v.dataset_id is None: v.dataset_id = "NA"
# update local version on database
if admin:
db.commit()
else:
if db.dirty:
line=["version"]+ds_instance.values()[:-1]+[v.version,str(v.id),str(v.dataset_id),
str(v.is_latest),str(v.checked_on),"\n"]
write_log(" ".join(line))
# add to remote dictionary list of local identical versions
remote[ind]['same_as']=[v.id for v in local if v.dataset_id == ds['dataset_id']]
try:
flog.close()
except:
pass
return remote, local
def compare_files(db,rds,v,admin):
''' Compare files of remote and local version of a dataset
:argument rds: dictionary of remote dataset object selected attributes
:argument v: local version object
:return: result set, NB updating VerisonFiles object in databse if calculating checksums
'''
extra=set([])
# if there are no files on db for local version add them
if v.filenames()==[]:
rows=[]
for f in v.build_filepaths():
checksum=check_hash(f,'sha256')
rows.append(dict(filename=f.split("/")[-1], sha256=checksum, version_id=v.id))
if admin:
add_bulk_items(db, VersionFile, rows)
else:
for r in rows:
write_log("new file "+ str(r) + "\n")
# first compare tracking_ids if all are present in local version
local_ids=v.tracking_ids()
if (local_ids and "" not in local_ids):
extra = compare_tracking_ids(rds['tracking_ids'],local_ids)
# if tracking_ids are the same or if they are not present compare checksums
# calculate checksums and update local db if necessary
if extra==set([]):
local_sums=[]
if rds['checksum_type'] in ['md5','MD5']:
for f in v.files:
if f.md5 in ["", None]:
f.md5 = check_hash(v.path+"/"+f.filename,'md5')
if admin:
update_item(db,VersionFile,f.id,{'md5':f.md5})
else:
write_log(" ".join(['md5',str(f.id),f.md5,"\n"]))
local_sums.append(f.md5)
else:
for f in v.files:
if f.sha256 in ["",None]:
f.sha256=check_hash(v.path+"/"+f.filename,'sha256')
if admin:
update_item(db,VersionFile,f.id,{'sha256':f.sha256})
else:
write_log(" ".join(['sha256',str(f.id),f.sha256,"\n"]))
local_sums.append(f.sha256)
extra = compare_checksums(rds['checksums'],local_sums)
return extra
def compare_tracking_ids(remote_ids,local_ids):
''' Compare the lists of the tracking_ids from a remote and a local version of a dataset
:argument remote_ids: list of remote tracking_ids
:argument local_ids: list of local tracking_ids
:return: result set
'''
return set(remote_ids).symmetric_difference(local_ids)
def compare_checksums(remote_sums,local_sums):
''' Compare the lists of the checksums from a remote and a local version of a dataset
:argument remote_sums: list of remote checksums
:argument local_sums: list of local checksums
:return: result set
'''
return set(remote_sums).symmetric_difference(local_sums)
# these functions are to manage drstree and tmp/tree directories
def list_drstree(**kwargs):
''' find directories matching kwargs constraints in drstree
check if they are in database,
if not add them to db
return: list of matches '''
if 'mip' in kwargs.keys(): kwargs['frequency']=frequency(kwargs['mip'])
indir=drstree + drs_glob(**kwargs)
return glob.glob(indir)
def list_tmpdir(flist):
''' this read from file list of instances on tmp/tree and return the ones matching constraints '''
# skip first line
# variable,mip_table,model,experiment,ensemble,realm,version,path
keys=['variable','mip','model','experiment',
'ensemble', 'realm', 'version', 'path']
f=open(flist,'r')
inst_list=[]
lines=f.readlines()
for line in lines[1:]:
values=line[:-1].split(',')
inst_list.append(dict(zip(keys, values)))
return inst_list
def file_glob(**kwargs):
""" Get the glob string matching the CMIP5 filename
"""
value=defaultdict(lambda: "*")
value.update(kwargs)
return '%s_%s_%s_%s_%s*.nc'%(
value['variable'],
value[ 'mip'],
value['model'],
value['experiment'],
value['ensemble'])
def drs_glob(**kwargs):
""" Get the glob string matching the directory structure under drstree
"""
value=defaultdict(lambda: "*")
value.update(kwargs)
return '%s/%s/%s/%s/%s/%s/%s'%(
value['model'],
value['experiment'],
value['frequency'],
value['realm'],
value['variable'],
value['ensemble'],
value['version'])
def tree_glob(**kwargs):
""" Get the glob string matching the directory structure under tmp/tree
"""
value=defaultdict(lambda: "*")
value.update(kwargs)
return '%s/%s/%s/%s/%s/%s'%(
value['model'],
value['experiment'],
value['frequency'],
value['realm'],
value['variable'],
value['ensemble'])
def drs_details(path):
''' Split the drstree path in model, experiment, frequency, realm, variable, ensemble '''
keys=['model','experiment', 'frequency', 'realm', 'variable','ensemble']
values=path.replace(drstree,"").split('/')
dummy=dict((keys[i],values[i]) for i in range(len(values)))
return dummy.pop('frequency'), dummy
def file_details(fname):
''' Split the filename in variable, MIP code, model, experiment, ensemble (period is excluded) '''
keys=['variable','mip','model','experiment','ensemble']
values = fname.split('_')
if len(values) >= 5:
return dict((keys[i],values[i]) for i in range(len(values[:-1])))
else:
return
def find_version(bits,string):
''' Returns matching string if found in directory structure '''
dummy = filter(lambda el: re.findall( string, el), bits)
if len(dummy) == 0:
return 'not_specified'
else:
return dummy[0]
def list_drs_versions(path):
''' Returns matching string if found in directory structure '''
return [x.split("/")[-1] for x in glob.glob(path+"/v*")]
def list_drs_files(path):
''' Returns matching string if found in directory structure '''
return [x.split("/")[-1] for x in glob.glob(path+"/*.nc")]
def get_mip(path):
''' Returns mip for instance
input: instance path
'''
onefile = os.path.basename(glob.glob(path + "/latest/*.nc")[0])
dummy = file_details(onefile)
return dummy['mip']
def tree_path(drspath):
''' Returns the tmp/tree path for a particular instance & version from
input: drstree path for one the version files
'''
path=os.path.realpath(drspath)
return "/".join(path.split("/")[:-1])
def check_hash(path,hash_type):
''' Execute md5sum/sha256sum on file on tree and return checksum value '''
hash_cmd="md5sum"
if hash_type in ["SHA256","sha256"]: hash_cmd="sha256sum"
try:
return subprocess.check_output([hash_cmd, path]).split()[0]
except:
print("Warning cannot calculate ",hash_type," for file ",path)
return ""
# functions to manage dictionaries
def assign_mips(**kwargs):
''' Append the cmip5 mip tables corresponding to the input frequency
return updates list of mips '''
if kwargs['mip'] is None: kwargs['mip']=[]
if kwargs['frq']:
kwargs['mip'].extend([y for x in kwargs['frq'] for y in frq_dict[x]])
return list(set([x for x in kwargs['mip']]))
def frequency(mip):
''' returns frequency for input mip '''
return mip_dict[mip]
# functions to write logs
def write_log(line):
''' add str to log file, open new file if does not exist yet '''
global flog
try:
flog.write(line)
except:
flog=open(logfile,"a")
flog.write(line)
return
# this should be taken by setting environment variable DRSTREE
# define root cirectory for drstree and /tmp/tree
try:
drstree = os.environ['DRSTREE']
except KeyError:
drstree = "/g/data1/ua6/drstree/CMIP5/GCM/"
drstree="/g/data1/ua8/cmip-download/drstree/CMIP5/GCM/"
tmptree="/g/data1/ua6/unofficial-ESG-replica/tmp/tree/"
# define date string for current date
today = date.today().strftime('%Y-%m-%d')
|
#!/usr/bin/env python
# coding: utf-8
# # Taking two numbers as input and print the result of addition,subtraction,multiplication and division
# In[7]:
x=int(input())
y=int(input())
print ( "sum=", x+y)
print ("subtraction=",x-y)
print ("multiplication=",x*y)
print ("division=",x/y )
# # Taking two numbers X and Y as input and printing result of X to the power Y (X^Y)
#
# In[8]:
x=int(input())
y=int(input())
print (x**y)
# In[ ]:
|
from __future__ import unicode_literals
from django.apps import AppConfig
class DjangoAndAjaxConfig(AppConfig):
name = 'django_and_ajax'
|
# models.py
import os
from flask import Flask
from flask_marshmallow import Marshmallow
config_name = os.getenv('FLASK_CONFIG')
if config_name == 'development':
from .. app import db
elif config_name == 'testing':
from app import db
app = Flask(__name__)
ma = Marshmallow(app)
class User(db.Model):
"""
Create user table
"""
# Ensures table will be named in plural and not in singular
# as is the name of the model
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(60), index=True, unique=True)
first_name = db.Column(db.String(60), index=True)
last_name = db.Column(db.String(60), index=True)
age = db.Column(db.Integer, index=True)
phone_number = db.Column(db.Integer, index=True)
class UserSchema(ma.ModelSchema):
class Meta:
"""
Fields to expose
"""
fields = ('id', 'email', 'first_name', 'last_name', 'age', 'phone_number')
|
# Test TCP with Json format
# Server
from datetime import datetime
import socket
import struct
import json
server_address = ('localhost', 6788)
max_size = 4096
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(server_address)
server.listen(10)
sendData = [{
"Veh_id": 0,
"result": 0
}, {
"Veh_id": 1,
"result": 0
}, {
"Veh_id": 2,
"result": 0
}, {
"Veh_id": 3,
"result": 0
}, {
"Veh_id": 4,
"result": 0
}, {
"Veh_id": 5,
"result": 0
}, {
"Veh_id": 6,
"result": 0
}, {
"Veh_id": 7,
"result": 0
}, {
"Veh_id": 8,
"result": 0
}, {
"Veh_id": 9,
"result": 0
}]
STOP_CHAT = True
while STOP_CHAT:
check = 0
print('starting the server at', datetime.now())
print('waiting for a client to call.')
client, addr = server.accept()
data = client.recv(max_size)
data = data.decode('utf-8')
recData = json.loads(data)
print(recData["arrival_time"])
if recData["arrival_time"] < 5:
sendData[recData["Veh_id"]]["result"] = 1
print(sendData)
#Send Json
mes = bytes(json.dumps(sendData[recData["Veh_id"]]), encoding='utf-8')
client.send(mes)
# Send Str
# str = struct.pack("i", 66666)
# server.sendto(str, client)
# if data[0] > 5:
# str = struct.pack("i", 66666)
# server.sendto(str, client)
# else:
# str = struct.pack("i", 23333)
# server.sendto(str, client)
client.close()
server.close()
|
# 数学推导
class Solution:
def integerBreak(self, n: int) -> int:
if n <= 3: return n-1
a, b = n // 3, n % 3
if b == 0:
return int(math.pow(3, a))
if b == 1:
return int(math.pow(3, a-1)*4)
return int(math.pow(3, a)*2)
# 动态规划
# dp[i]表示为i的最大乘积, dp[i]可由dp[j] * (i-j) 中的最大值转移过来
# 但由于题目说必须拆分所以 还要与 j*(i-j)相比
class Solution:
def integerBreak(self, n: int) -> int:
dp = [0] * (n+1)
for i in range(2, n+1):
for j in range(i):
dp[i] = max(dp[i], dp[j]*(i-j), j*(i-j))
return dp[n]
|
NSIDE = 1024
PATH = '/users/dlenz/projects/global_emissivity/'
|
from tornado.iostream import IOStream
from sidl.message import Message
from log import logger
from connection import Connection
import socket
class Client(object):
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = IOStream(self.sock)
self._connected = False
self.message_callbacks = {}
def connect(self, host, port, connect_callback=None):
self.connect_callback = connect_callback
self.stream.connect((host, port), self._connect_callback)
def _connect_callback(self):
self._connected = True
self.connection = Connection(self.stream,
self.message_callbacks,
self.handle_close)
if self.connect_callback:
self.connect_callback(self)
def handle_close(self, connection):
logger.info('connection close %s', connection)
def register(self, message_type, callback):
if not issubclass(message_type, Message):
raise TypeError('message_type must be subclass of Message')
self.message_callbacks[message_type.id] = (message_type, callback)
|
import os
import luigi
import pandas as pd
from luigi.contrib.spark import PySparkTask
from pyspark import Row
from pyspark.sql import SparkSession
from bicis.etl.raw_data.split import UnifyRawData
from bicis.lib.data_paths import data_dir
class NextWindowTarget(PySparkTask):
"""
Builds a series for each station.
:param key: Determines the x axis of the series.
Outputs a csv file with this columns: [id, cnt]
"""
mode = luigi.ChoiceParameter(choices=['rent', 'return'])
window = luigi.Parameter(default='1h')
def output(self):
return luigi.LocalTarget(os.path.join(data_dir, 'next_{}_{}s_by_station.csv'.format(self.window, self.mode)))
def requires(self):
return UnifyRawData()
@property
def station_field(self):
return '{}_station'.format(self.mode)
@property
def timestamp_field(self):
return '{}_date'.format(self.mode)
@property
def output_field(self):
return 'n_{}s'.format(self.mode)
def main(self, sc, *args):
(
SparkSession.builder.getOrCreate()
.read.load(
self.input().path,
format="csv",
sep=",",
inferSchema="true",
header="true")
.rdd # TODO: check how to load and save RDDs directly
# Group data by station
.map(lambda x: (x[self.station_field], x))
.groupByKey()
# Compute target variable for each station
.flatMap(lambda x: self._compute_targets(x[1]))
# Dump to csv
.toDF()
.write.csv(self.output().path, header='true')
)
def _compute_targets(self, station_data):
if not station_data: return []
index = [x[self.timestamp_field] for x in station_data]
df = pd.DataFrame(
{
'series': [1] * len(station_data),
'id': [e['id'] for e in station_data]
}
,index=index
).sort_index(ascending=False)
# pandas does not support rolling opperations over non monotonic indices
# so here we construct a monotonic index that respects the diffs between events
# and allows us to go back and forth
monotonic_index = [df.index[0]]
for i, prev in enumerate(df.index[1:], 1):
# The series is sorted in descending order
next = df.index[i-1]
monotonic_index.append(
monotonic_index[-1] + (next - prev)
)
# consider only full windows
max_date = df.series.index[0] - pd.Timedelta(self.window)
mask = [timestamp <= max_date for timestamp in df.index]
df['res'] = (
pd.Series(df.series.values, index=monotonic_index)
.rolling(self.window)
.sum()
.values
)
df = df[mask].set_index('id')
res_data = df.res.to_dict()
res = []
for doc in station_data:
if doc['id'] not in res_data: continue
res.append(
Row(**{
self.output_field: int(res_data[doc['id']]),
'id': doc['id']
})
)
return res
if __name__ == '__main__':
luigi.run(main_task_cls=NextWindowTarget)
|
# A number N is abundant if the sum of all its proper divisors exceeds N
# By mathematical analysis, it can be shown that all integers greater than
# 28123 can be written as the sum of two abundant numbers.
# Find the sum of all positive integers which cannot be written as the sum
# of two adundant numbers.
from math import floor, sqrt
LIMIT = 28123
# Find the sum of all positive integers which CANNOT be written as the sum
# of two adundant numbers.
def findSpecialSum():
listAbundant = generateAbundantNumber()
mySum = 0
for x in range(1, LIMIT + 1):
if not isSumOfAbundantNum(x, listAbundant):
mySum += x
return mySum
# Return true if N can be expressed of the sum of 2 abundant numbers
# in listAbundant
# Note that listAbundant contains abundant numbers in ascending order
def isSumOfAbundantNum(N, listAbundant):
if N > LIMIT:
return True
# Case 1: N = 2 * X where X is an abundant number
for x in listAbundant:
if x + x == N:
return True
# Case 2: N = X + Y where X, Y are 2 distinct abundant numbers
head = 0
tail = len(listAbundant) - 1
while head <= tail:
if listAbundant[head] + listAbundant[tail] == N:
return True
elif listAbundant[head] + listAbundant[tail] < N:
head += 1
else:
tail -= 1
return False
# Generate a list of abundant numbers whose value is at most LIMIT
def generateAbundantNumber():
listAbundant = []
for x in range(2, LIMIT + 1):
if findSumProperDivisor(x) > x:
listAbundant.append(x)
return listAbundant
# Find the sum of proper divisors of an input number N
def findSumProperDivisor(N):
if N <= 1:
return 0
mySum = 1
x = 2
upperBound = floor(sqrt(N))
while x <= upperBound:
if N % x == 0:
mySum += x
otherDivisor = N // x
if otherDivisor != x:
mySum += otherDivisor
x += 1
return mySum
|
from sklearn.model_selection import train_test_split
import pandas as pd
data = pd.read_csv("diabetes.csv", names=[1,2,3,4,5,6,7,8,9], header=None)
X = data[[1,2,3,4,5,6,7,8]]
y = data[9]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=8)
""" Logistic Regression """
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
print("logistic: {}".format(lr.score(X_test, y_test)))
""" KNN """
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
print("KNN: {}".format(clf.score(X_test, y_test)))
""" DecisionTree """
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier()
tree.fit(X_train, y_train)
print("Tree: {}".format(tree.score(X_test, y_test)))
""" RandomForest """
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
print("Forest: {}".format(rf.score(X_test, y_test)))
""" SupportVectorMachine """
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train, y_train)
print("SVM: {}".format(svc.score(X_test, y_test)))
""" NeuralNetwork """
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(max_iter=10000)
mlp.fit(X_train, y_train)
print("MLP: {}".format(mlp.score(X_test, y_test)))
|
from FK import *
import serial
import math
def usage():
print "Usage : input 3 pose parameters."
ser = serial.Serial(
port='/dev/cu.usbmodem1421',
baudrate=9600,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.SEVENBITS
)
ser.isOpen()
print 'Enter your commands below.\r\nInsert "exit" to leave the application.'
cur_th = [0,-50,-50,0,-50,0]
key_loca = ["",""]
key_hole = ["",""]
tmp_cmd = []
target_cmd = []
while 1 :
# get keyboard input
cmd = raw_input(">> ")
if cmd == 'exit':
ser.close()
exit()
else:
#print "Wrote : %s" % (cmd)
pose = map(str, cmd.split())
if len(pose) != 3:
usage()
continue
cmd_input = "1"
cmd_input += " ".join(pose)
cmd_input += " "
print cmd_input
pose = []
ser.write(cmd_input)
#ik_result = IK(cur_th, pose, 1)
#cur_th = list(ik_result)
#cmd_2 = ["","","","","",""]
#for i in range(len(cmd_2)):
# cmd_2[i] = str(int(cur_th[i]))
#
#print "cmd input_1", " ".join(cmd_2)
#print "cmd input_2", " ".join(cmd_1)
#ser.write(" ".join(cmd_2)+" "+" ".join(cmd_1)+" ")
#print cur_th
#ser.write(' '.join(cur_th) + '\n')
#print ' '.join(cur_th) + '\n'
out = 'result'
#while ser.inWaiting() > 0:
# out += ser.read(1)
if out != '':
print "<< " + out
|
#!/usr/bin/env python3
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Incomplete example for working the SerDes-based a PIPE PHY. """
from amaranth import *
from luna import top_level_cli
from luna.gateware.platform import NullPin
from luna.gateware.usb.devices.ila import USBIntegratedLogicAnalyzer, USBIntegratedLogicAnalyzerFrontend
from luna.gateware.interface.serdes_phy.backends.ecp5 import LunaECP5SerDes
from luna.gateware.interface.serdes_phy.phy import SerDesPHY
WITH_ILA = False
class PIPEPhyExample(Elaboratable):
""" Hardware module that demonstrates grabbing a PHY resource with gearing. """
def __init__(self):
if WITH_ILA:
self.serdes_rx = Signal(32)
self.ctrl = Signal(4)
self.valid = Signal()
self.rx_gpio = Signal()
self.ila = USBIntegratedLogicAnalyzer(
bus="usb",
domain="ss",
signals=[
self.serdes_rx,
self.ctrl,
self.valid,
self.rx_gpio,
],
sample_depth=128,
max_packet_size=64
)
def emit(self):
frontend = USBIntegratedLogicAnalyzerFrontend(ila=self.ila)
frontend.emit_vcd("/tmp/output.vcd")
def elaborate(self, platform):
m = Module()
if WITH_ILA:
m.submodules.ila = self.ila
# Generate our domain clocks/resets.
m.submodules.car = platform.clock_domain_generator()
# Create our core PIPE PHY. Since PHY configuration is per-board, we'll just ask
# our platform for a pre-configured USB3 PHY.
m.submodules.phy = phy = platform.create_usb3_phy()
if WITH_ILA:
# Grab the SerDes from our PHY, for debugging.
serdes = phy.serdes
m.d.comb += [
# ILA
self.serdes_rx .eq(serdes.source.data),
self.ctrl .eq(serdes.source.ctrl),
self.valid .eq(serdes.source.valid),
self.rx_gpio .eq(serdes.rx_gpio),
self.ila.trigger .eq(~serdes.rx_gpio)
]
# Return our elaborated module.
return m
if __name__ == "__main__":
ex = top_level_cli(PIPEPhyExample)
if WITH_ILA:
ex.emit()
|
#!/usr/bin/env python
# coding: utf-8
"""
Utilities for manipulating and correcting OWL files in RDF.
"""
__all__ = [
"all_restrictions",
"is_bad_restr",
"describe_bad_restr",
"translate_bad_restr",
"all_bad_restrictions",
"repair_graph",
"repair_all_bad_restrictions",
"RELATIONS",
"IGNORE_PROPERTIES",
]
import argparse
import logging
import os
import sys
from typing import Any, List, Optional, Set, Tuple
import rdflib as rdf
from rdflib import OWL, RDF, RDFS, Graph
from rdflib.term import Node
Triple = Tuple[Any, Any, Any]
# Properties of a restriction that indicate the actual constraint
RELATIONS = [
OWL.allValuesFrom,
OWL.someValuesFrom,
OWL.minCardinality,
OWL.maxCardinality,
OWL.cardinality,
OWL.hasValue,
OWL.qualifiedCardinality,
]
# Properties of a restriction that we ignore when rewriting (because they are
# constraints ON the restrictions rather than constraints on the restrictED)
IGNORE_PROPERTIES = [OWL.onProperty, RDFS.comment, RDF.type]
CARDINALITY_RELS = [
OWL.minCardinality,
OWL.maxCardinality,
OWL.cardinality,
OWL.qualifiedCardinality,
]
RESTRICTIONS_QUERY = (
"""
PREFIX owl: <%s>
SELECT ?r
{
?r a owl:Restriction .
}
"""
% rdf.OWL
)
LOGGER = logging.getLogger(__name__)
def all_restrictions(graph: Graph) -> List[Node]:
"""Return a list of the nodes naming owl:Restrictions."""
return [r["r"] for r in graph.query(RESTRICTIONS_QUERY)]
PRINT_RELATIONS = [
x.replace("http://www.w3.org/2002/07/owl#", "owl:") for x in RELATIONS
]
rc_explanation: str = (
f": All restrictions must have one property from: {', '.join(PRINT_RELATIONS)}"
)
def is_bad_restr(restr: Node, graph: Graph) -> bool:
"""
Is this an ill-formed restriction?
"""
rrs: Set[Node] = set()
rel: Node
has_restricted: bool = False
has_onClass: bool = False
is_cardinality_rel: bool = False
global rc_explanation # pylint: disable=global-statement
for _r, rel, _x in graph.triples((restr, None, None)):
if rel == OWL.onProperty:
has_restricted = True
if rel in CARDINALITY_RELS:
is_cardinality_rel = True
if rel in RELATIONS:
rrs.add(rel)
if rel == OWL.onClass:
has_onClass = True
if not has_restricted:
print(f"Need owl:onProperty in {restr}")
return True
if len(rrs) == 0:
print(f"No components to restriction {restr}{rc_explanation}")
rc_explanation = ""
return True
if len(rrs) > 1:
restrs: str = ", ".join(
[x.replace("http://www.w3.org/2002/07/owl#", "owl:") for x in rrs]
)
print(f"Multiple components to restriction {restr}: {restrs}")
return True
if has_onClass and not is_cardinality_rel:
print("owl:onClass is only permissible in cardinality restrictions.")
return False
def describe_bad_restr(b: Node, g: Graph) -> None:
"""Print description of a bad restriction to sys.stdout"""
nsm = rdf.namespace.NamespaceManager(g)
nsm.bind("owl", OWL)
triples = g.triples((b, None, None))
for x, _y, z in g.triples((b, RDF.type, None)):
print("%s a %s" % (x, z))
for _, y, z in triples:
if y not in {RDFS.comment, RDF.type}:
print("\t%s %s" % (nsm.normalizeUri(y), nsm.normalizeUri(z)))
for x, _y, z in g.triples((b, RDFS.comment, None)):
print("\t%s rdfs:comment %s" % (x, z))
print()
def translate_bad_restr(b: Node, g: Graph) -> Tuple[List[Triple], List[Triple]]:
"""
Return a list of RDF triples to be added and removed to repair a bad restriction.
The triples to be removed are the triples describing the bad restriction, and triples
indicating that some OWL class inherits from the bad restriction.
The triples to be added are triples that create a set of well-formed restrictions,
one per constraint, and triples that indicate subclasses of those restrictions.
Parameters
----------
b : rdflib.term.Node
Node designating a restriction that is ill-formed and must be repaired.
g : rdflib.Graph
Graph containing the restriction to be repaired.
Returns
-------
to_add, to_delete : Tuple whose first element is a list of triples to be added
to `g`, and whose second is a list of triples to be removed.
"""
comment: Optional[Any] = None
new_bnodes: List[rdf.BNode] = []
to_add: List[Triple] = []
to_delete: List[Triple] = list(g.triples((b, None, None)))
nsm = rdf.namespace.NamespaceManager(g)
nsm.bind("owl", OWL)
def normalize(x):
return nsm.normalizeUri(x)
def find_children() -> List[Node]:
child_triples = g.triples((None, RDFS.subClassOf, b))
children = [x for x, _, _ in child_triples]
assert len(children) >= 1
return children
triples: List[Triple] = g.triples((b, None, None))
types: List[Node] = [z for _x, _y, z in g.triples((b, RDF.type, None))]
props: List[Node] = [z for _x, _y, z in g.triples((b, OWL.onProperty, None))]
comments: List[Node] = [z for _x, _y, z in g.triples((b, RDFS.comment, None))]
assert len(props) == 1
assert len(comments) <= 1
assert len(types) == 1
prop = props[0]
if comments:
comment = comments[0]
for _, y, z in triples:
if y not in set(IGNORE_PROPERTIES) | {RDF.type}:
bnode: rdf.BNode = rdf.BNode()
new_bnodes.append(bnode)
LOGGER.info(f"{nsm.normalizeUri(bnode)} a {nsm.normalizeUri(types[0])} ;")
to_add.append((bnode, RDF.type, types[0]))
LOGGER.info(f"\towl:onProperty {nsm.normalizeUri(prop)} ;")
to_add.append((bnode, OWL.onProperty, prop))
msg = f"\t{nsm.normalizeUri(y)} {nsm.normalizeUri(z)}"
to_add.append((bnode, y, z))
if comment:
msg += f"\n\trdfs:comment {comment} ."
to_add.append((bnode, RDFS.comment, comment))
else:
msg += " ."
LOGGER.info(msg)
LOGGER.info("Children of this restriction are:")
for x in find_children():
LOGGER.info(f"\t{x}")
LOGGER.info(f"\tRemove {normalize(x)} rdfs:subClassOf {normalize(b)}")
to_delete.append((x, RDFS.subClassOf, b))
for nb in new_bnodes:
LOGGER.info(
f"\t{x} {nsm.normalizeUri(RDFS.subClassOf)} {nsm.normalizeUri(nb)}"
)
to_add.append((x, RDFS.subClassOf, nb))
return to_add, to_delete
def all_bad_restrictions(g: Graph) -> List[Node]:
"""List of all bad restrictions in graph."""
restrs = all_restrictions(g)
return [r for r in restrs if is_bad_restr(r, g)]
def repair_all_bad_restrictions(
g: rdf.Graph, bad: Optional[List[rdf.BNode]] = None
) -> Graph:
if bad is None:
bad = all_bad_restrictions(g)
all_adds: List[Triple] = []
all_deletes: List[Triple] = []
for x in bad:
to_add, to_delete = translate_bad_restr(x, g)
all_adds += to_add
all_deletes += to_delete
for x in all_adds:
g.add(x)
for x in all_deletes:
g.remove(x)
return g
def repair_graph(
bad: List[Node], graph: Graph, dry_run: bool, file=sys.stdout, format_name="turtle"
) -> None:
if dry_run:
if file != sys.stdout:
LOGGER.addHandler(logging.StreamHandler(file))
for x in bad:
translate_bad_restr(x, graph)
else:
new_graph = repair_all_bad_restrictions(graph, bad)
print(new_graph.serialize(format=format_name).decode(), file=file)
def main(
*,
infile: str,
action: str,
verbose: int = 0,
quiet: bool = False,
dry_run: bool = False,
outfile: Optional[str] = None,
):
if verbose == 1:
LOGGER.setLevel(logging.INFO)
elif verbose >= 2:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.WARNING)
# log to standard error
logging.basicConfig()
assert os.path.exists(infile), f"No such file: {infile}"
format_name = (
rdf.util.guess_format(outfile) if outfile else rdf.util.guess_format(infile)
)
LOGGER.debug("Guessed format is %s", format_name)
graph = rdf.Graph()
graph.parse(infile, format=format_name)
bad = all_bad_restrictions(graph)
if action == "check":
if bad:
print("Found bad restrictions in graph")
if not quiet:
to_file: bool = False
if outfile:
sys.stdout = open(outfile, "w")
to_file = True
for b in bad:
describe_bad_restr(b, graph)
if to_file:
sys.stdout.close()
sys.exit(1)
sys.exit(0)
elif action == "repair":
if not bad:
print("No repairs needed", file=sys.stderr)
sys.exit(1)
if outfile:
with open(outfile, "w") as file:
repair_graph(bad, graph, dry_run, file)
else:
repair_graph(bad, graph, dry_run)
def process_args():
ap = argparse.ArgumentParser()
ap.add_argument(
"action",
help="Action to perform.",
choices=["check", "repair"],
default="check",
)
ap.add_argument("input", help="File containing RDF graph to check")
ap.add_argument(
"--output", "-o", help="Write repaired RDF graph or check results here."
)
ap.add_argument("--verbose", "-v", dest="verbose", action="count")
ap.add_argument(
"--dry-run",
help="If repairing, just print the set of changes to be made, don't write output.",
action="store_true",
)
ap.add_argument(
"--quiet",
help="Don't print descriptions of bad restrictions: just set exit flag.",
action="store_true",
)
values = ap.parse_args()
verbose: int = getattr(values, "verbose", 0) or 0
outfile = getattr(values, "output", None)
main(
action=values.action,
infile=values.input,
outfile=outfile,
verbose=verbose,
quiet=values.quiet,
dry_run=values.dry_run,
)
if __name__ == "__main__":
process_args()
|
from Contacts import Contact
import operator
class AddressBookConsoleService:
address_books = {}
def create_contact(self):
"""
Method to create contact object
"""
contact_dict = {
"first_name" : "",
"last_name" : "",
"address" : "",
"city" : "",
"state" : "",
"zip" : "",
"phone_number" : "",
"email" : ""
}
contact = Contact(contact_dict)
contact = self.get_Details(contact)
return contact
def get_Details(self,contact):
"""
Method to fetch contact details from user
"""
contact.first_name = input("Enter first name \n").capitalize()
contact.last_name = input("Enter last name \n").capitalize()
contact.address = input("Enter address \n").capitalize()
contact.city = input("Enter city \n").capitalize()
contact.state = input("Enter state \n").capitalize()
contact.zip = input("Enter zip code \n").capitalize()
contact.phone_number = input("Enter phone number \n").capitalize()
contact.email = input("Enter email address \n")
return contact
def add_contact(self):
"""
Method to add contact to local storage
"""
new_contact = self.create_contact()
print("contact created")
address_book_name = input("Enter the address book name \n").capitalize()
address_book = self.address_books.get(address_book_name)
# if book does no already exists then creating a new book
if address_book == None:
contact_list = [new_contact]
self.address_books[address_book_name] = contact_list
print("New address book created and contact added to it")
# if book already exsists then adding contact to existing book
else:
contact = AddressBookConsoleService.search_by_first_name(address_book,new_contact.first_name)
if len(contact) == 0:
address_book.append(new_contact)
print("Contact added sucessfully")
else:
print("Contact alread exsist")
def display_contact(self):
"""
Method to display all the contact that are present in the local storage
"""
for address_book in self.address_books:
contacts = "\n".join(str(contact) for contact in self.address_books.get(address_book))
print(f"Contacts In {address_book} are \n{contacts}")
def edit_contact(self):
"""
Method to edit existing contact
"""
book_name = input("Enter the address book name ").capitalize()
address_book = self.address_books.get(book_name)
if address_book != None:
first_name = input("Enter the person name \n").capitalize()
contact_to_edit = AddressBookConsoleService.search_by_first_name(address_book,first_name)
if len(contact_to_edit) == 0:
print("Contact not found")
else:
self.get_Details(contact_to_edit[0])
print("Contact Edited Sucessfully")
else:
print("No such address book")
def delete_contact(self):
"""
Method to delete contact from address book
"""
book_name = input("Enter the address book name ").capitalize()
address_book = self.address_books.get(book_name)
if address_book != None:
first_name = input("Enter the person name \n").capitalize()
contact_to_delete = AddressBookConsoleService.search_by_first_name(address_book,first_name)
if len(contact_to_delete) == 0:
print("Contact not found")
else:
address_book.remove(contact_to_delete[0])
print("Contact removed sucessfully")
else:
print("No such address book")
@staticmethod
def search_by_first_name(address_book,first_name):
"""
This method is used to search the contacts that are having first name same as given in
function parameter of given address book and returns the list of contact have first name
"""
return [contact for contact in address_book if contact.first_name == first_name]
def search_person_by_location(self):
"""
Method to search person details by their location across the multiple address book
"""
contacts = self.contact_founder()
if len(contacts) == 0:
print("No such contact found")
else:
search_contacts = "\n".join(contact.first_name +" "+ contact.last_name for contact in contacts)
print(search_contacts)
def view_person_by_location(self):
"""
Method to search person details by their location across the multiple address book
"""
contacts = self.contact_founder()
if len(contacts) == 0:
print("No such contact found")
else:
view_contacts = "\n".join(str(contact) for contact in contacts)
print(view_contacts)
def count_number_of_contact_by_location(self):
"""
Method to search person details by their location across the multiple address book and return the count
"""
contacts = self.contact_founder()
print(f"The contact having same city or state are : {len(contacts)}")
def contact_founder(self):
"""
Method to search contact by location
"""
location = input("Enter the city or state of which contacts name you have to find \n").capitalize()
matched_contacts_with_location = []
for address_book in self.address_books:
matched_contacts_with_location.extend([contact for contact in self.address_books.get(address_book) if contact.city == location or contact.state == location])
return matched_contacts_with_location
def sort_by_person_name(self):
"""
Method to sort contacts in address book by person name
"""
self.sort_by("first_name")
def sort_by(self,value):
"""
Method to sort contacts
"""
for address_book in self.address_books:
contacts = self.address_books.get(address_book)
contacts.sort(key= operator.attrgetter(value))
print("Sorting Successful")
def sort_by_location(self):
"""
Method to sort contacts by location
"""
print("Enter the your choice"
"\n 1 zip"
"\n 2 state"
"\n 3 city")
user_choice = int(input())
if user_choice == "1":
self.sort_by("zip")
elif user_choice == "2":
self.sort_by("state")
elif user_choice == "3":
self.sort_by("city")
else:
print("Invaild option")
|
def tutte_polynomial(G): ...
def chromatic_polynomial(G): ...
|
import json
from networkx.readwrite import json_graph
def export(G, D3, Sigma, GEXF, name='Graph'):
if D3 == True:
print("Starting D3 Export for", name)
# D3
# Print JSON
f = open(name + 'D3.json', 'w')
f.write(json_graph.dumps(G))
print("D3 Exported")
if Sigma == True:
print("Starting Sigma Export for", name)
# Sigma
graphThing = json.loads(json_graph.dumps(G))
for link in graphThing["links"]:
link["source"] = link["sources"]
del link["sources"]
link["target"] = link["targets"]
del link["targets"]
graphThing["edges"] = graphThing["links"]
del graphThing["links"]
# Print JSON
f = open(name + 'Sigma.json', 'w')
f.write(json.dumps(graphThing,indent=2))
print("Exporting for Sigma")
if GEXF == True:
print("Starting GEXF export for", name)
# Print GEXF
nx.write_gexf(G, name + ".gexf", prettyprint=True)
print("Exporting GEXF")
if not D3 and not Sigma and not GEXF:
print("Not doin' nuthin'")
|
# encoding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import numpy as np
import sklearn as sk
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture as GMM
from sklearn.metrics import silhouette_score as SC
from hparams import hparams
def kmeans(X, hparams):
km = KMeans(hparams.n_clusters)
km.fit(X)
return km.labels_, km.cluster_centers_
def gmm(X, hparams):
gmm = GMM(hparams.n_clusters)
gmm.fit(X)
return gmm.predict(X), gmm.means_
def measure_silhouette(labels):
raise NotImplementedError
def get_char(labels):
return [chr(97 + l) for l in labels]
|
from commonFunctions import takeHttpMethod
from commonFunctions import takeJsonFromFile
from commonFunctions import takeUrlPath
def takeRMQInput():
indirectInput = {
'name': 'rmqIndirectInput',
'queue': raw_input('Enter queue name to load the messages into : ')
}
messages = []
messageCount = int(raw_input('Enter the number of messages to be added :'))
print 'Enter message in each line'
for i in range(messageCount):
messages.append(raw_input())
indirectInput['messages'] = messages
return indirectInput
def takeKafkaInput():
indirectInput = {
'name': 'kafkaIndirectInput',
'topic': raw_input('Enter topic name : ')
}
messages = []
messageCount = int(raw_input('Enter the number of messages to be added :'))
print 'Enter message in each line'
for i in range(messageCount):
messages.append(raw_input())
indirectInput['messages'] = messages
return indirectInput
def takeAerospikeInput():
indirectInput = {
'name': 'aerospikeIndirectInput',
'connectionInfo': {
'host': raw_input("Enter aerospike host IP : "),
'port': int(raw_input('Enter port number : ')),
'user': raw_input("Enter aerospike user : "),
'password': raw_input("Enter aerospike password : ")
}
}
aerospikeData = []
for i in range(0, int(raw_input('Enter the number of namespaces required : '))):
namespace = {
'namespace': raw_input('Enter the namespace name with prefix regression_ : ')
}
if 'regression_' not in namespace['namespace']:
namespace['namespace'] = raw_input('The name entered did not have prefix regression_ . Try again : ')
assert 'regression_' in namespace['namespace']
namespace['set'] = raw_input('Enter set name : ')
records = []
while True:
if raw_input('Do you want to add records (Y/N) : ').upper() == 'Y':
record = {'PK': raw_input('Enter primary key : ')}
binData = {}
for j in range(0, int(raw_input("Enter the number of bins to be added : "))):
binKey = raw_input('Enter bin key : ')
binValue = raw_input('Enter bin value : ')
binData[binKey] = binValue
record['binData'] = binData
records.append(record)
else:
break
namespace['records'] = records
aerospikeData.append(namespace)
indirectInput['aerospikeData'] = aerospikeData
return indirectInput
def takeHbaseInput():
connectionTypeMap = {
1: 'REMOTE',
2: 'IN_MEMORY'
}
indirectInput = {'name': 'hbaseIndirectInput'}
tableName = raw_input('Enter table name with prefix regression_ :')
if 'regression_' not in tableName:
tableName = raw_input('Table name entered did not have prefix regression_ . Try again')
assert 'regression' in tableName
indirectInput['tableName'] = tableName
indirectInput['connectionType'] = connectionTypeMap[
int(raw_input('Enter connectionType : \n1.REMOTE \n2.IN_MEMORY : '))]
rows = []
while True:
if raw_input('Do you want to add row (Y/N) : ').upper() == 'Y':
row = {'rowKey': raw_input("Enter row key : ")}
data = {}
while True:
if raw_input('Do you want to add column family in the table ' + tableName + ' (Y/N) : ').upper() == 'Y':
colFamName = raw_input('Enter column family name : ')
columns = {}
for i in range(0, int(
raw_input("Enter the number of columns to want to add under column family " + colFamName))):
columnName = raw_input("Column name : ")
columnValue = raw_input("Column value : ")
columns[columnName] = columnValue
data[colFamName] = columns
else:
break
row['data'] = data
rows.append(row)
else:
break
indirectInput['rows'] = rows
return indirectInput
def takeElasticSearchInput():
connectionTypeMap = {
1: 'REMOTE',
2: 'IN_MEMORY'
}
indirectInput = {'name': 'elasticSearchIndirectInput'}
if connectionTypeMap[int(raw_input('Enter connectionType 1.REMOTE 2.IN_MEMORY: '))] =='REMOTE':
indirectInput['connectionInfo'] = {
'clusterName': raw_input('Enter cluster name '),
'host':raw_input('Enter the host IP '),
'connectionType':'REMOTE'
}
documentsOfIndexAndType = []
while True:
if raw_input('Do you want to add an index (Y/N) : ').upper() == 'Y':
index = {
'index': raw_input(
'Enter the index name (add prefix \'regression_\' if connection type is REMOTE) : '),
'type': raw_input('Enter the typename : '),
'mappingFile': raw_input('Enter the path to mapping file : ')
}
if raw_input(" Do you want to use routing key for the data insertion in ES (Y/N) : ").upper() == 'Y':
index['routingKey'] = raw_input('Enter the routing key value : ')
documents = []
while True:
if raw_input('Do you want to add a document under the index regression_' + index['index'] + ' (Y/N) : ').upper() == 'Y':
document = {'_id': raw_input('Enter the document id : ')}
document.update(takeJsonFromFile())
documents.append(document)
else:
break
index['documents'] = documents
else:
break
documentsOfIndexAndType.append(index)
indirectInput['documentsOfIndexAndType'] = documentsOfIndexAndType
return indirectInput
def takeHazelCastInput():
def getMaps():
maps = {}
while True:
if raw_input('Do you want to add map (Y/N) : ').upper() == 'Y':
mapName = raw_input('Enter map name : ')
map = maps[mapName] = {}
mapData = {}
for i in range(0, int(raw_input("Enter the number of K-V pair to be added : "))):
testKey = raw_input('Enter key : ')
value = raw_input('Enter value : ')
mapData[testKey] = value
map['mapData'] = mapData
map['keyClass'] = raw_input('Enter complete canonical class name of the key ')
map['valueClass'] = raw_input('Enter complete canonical class name of the value ')
else:
break
return maps
hazelCastType = {
1: 'Embedded',
2: 'Remote'
}
selectDS = {
1: 'maps',
2: 'none'
}
indirectInput = {}
hazelCastCluster = hazelCastType[
int(raw_input('Enter the type of Hazelcast cluster you need to run/connect to : \n1.Embedded\n2.Remote\n'))]
if hazelCastCluster == 'Embedded':
indirectInput = {
'name': 'embeddedhzIndirectInput'
}
hazelcastDS = {}
while True:
ds = selectDS[int(raw_input('Enter the type of data structure you want to load : \n1.maps\n2.none '))]
if ds == 'none':
break
elif ds == 'maps':
hazelcastDS[ds] = getMaps()
indirectInput['hazelcastDS'] = hazelcastDS
elif hazelCastCluster == 'Remote':
indirectInput = {
'name': 'serverhzIndirectInput',
'group': raw_input('Enter group name : '),
'password': raw_input('Enter password : '),
'user': raw_input('Enter user name : '),
}
hazelcastDS = {}
while True:
ds = selectDS[int(raw_input('Enter the type of data structure you want to load : \n1.maps\n2.none '))]
if ds == 'none':
break
elif ds == 'maps':
hazelcastDS[ds] = getMaps()
indirectInput['hazelcastDS'] = hazelcastDS
serializerConfigMap = {}
while True:
choice = raw_input('Do you want to add serializer class in serializer config map (Y/N) : ')
if choice == 'Y':
className = raw_input('Enter class name : ')
serializerClass = raw_input('Enter serializer class name : ')
serializerConfigMap[className] = serializerClass
else:
break
indirectInput['serializerConfigMap'] = serializerConfigMap
return indirectInput
def takeRedisInput():
def getHashMap():
hashMap = {}
while True:
if raw_input('Do you want to add an outer key (Y/N) : ').upper() == 'Y':
outerKey = raw_input('Enter outer key : ')
innerMap = {}
for j in range(0, int(raw_input("Enter the number of K-V pair to be added under " + outerKey))):
innerKey = raw_input('Enter the inner key : ')
innerValue = raw_input('Enter the inner value : ')
innerMap[innerKey] = innerValue
hashMap[outerKey] = innerMap
else:
break
return hashMap
def getKeyValues():
keyValues = {}
for j in range(0, int(raw_input('Enter the number of key:value pair to be added : '))):
key = raw_input('Enter key : ')
val = int(raw_input('Enter value : '))
keyValues[key] = val
return keyValues
def getSortedSets():
sortedSets = {}
for j in range(0, int(raw_input('Enter the number of sorted set to be added : '))):
setName = raw_input('Enter name of set ' + str(i + 1) + ' :')
set = sortedSets[setName] = {}
for j in range(0, int(raw_input("Enter the number of elements to be added in set " + setName + ' :'))):
key = raw_input('Enter mem key :')
value = raw_input('Enter value :')
set[key] = value
return sortedSets
def getSet():
sets = {}
for j in range(0, int(raw_input('Enter the number of sets to be added : '))):
setKey = raw_input('Enter set key : ')
set = sets[setKey] = []
values = int(raw_input('Enter the number of elements to be added under set ' + setKey + ' : '))
print 'Enter value in each line'
for j in range(values):
set.append(raw_input())
return sets
def getLists():
lists = {}
for j in range(0, int(raw_input('Enter the number of list you want to use : '))):
listKey = raw_input('Enter list key : ')
list = lists[listKey] = []
values = int(raw_input('Enter the number of elements to be added under : ' + listKey))
print 'Enter value in each line'
for j in range(values):
list.append(raw_input())
return lists
clusterType = {
1: 'SENTINEL',
2: 'SINGLE_HOST'
}
dataStructure = {
1: 'hashMap',
2: 'keyValues',
3: 'sortedSets',
4: 'sets',
5: 'lists',
6: 'none'
}
indirectInput = {
'name': 'redisIndirectInput',
"clusterType": clusterType[int(raw_input('Enter the type of cluster 1.SENTINEL 2.SINGLE_HOST: '))],
"masterName": "master"
}
dbToDSMap = {}
for i in range(0, int(raw_input('How many databases do you need to load : '))):
dbNumber = raw_input('Enter the db number : ')
dbMap = {}
while True:
ds = dataStructure[int(raw_input('Enter the data structure that you want to add : '
'1.hashMap 2.keyValues 3.sortedSets 4.sets 5.lists 6.none: '))]
if ds == 'none':
break
elif ds == 'hashMap':
dbMap[ds] = getHashMap()
elif ds == 'keyValues':
dbMap[ds] = getKeyValues()
elif ds == 'sortedSets':
dbMap[ds] = getSortedSets()
elif ds == 'sets':
dbMap[ds] = getSet()
elif ds == 'lists':
dbMap[ds] = getLists()
dbToDSMap[dbNumber] = dbMap
indirectInput['dbToDSMap'] = dbToDSMap
return indirectInput
def takeSolrInput():
connectionTypeMap = {
1: 'LOCALHOST',
2: 'IN_MEMORY'
}
indirectInput = {
"name": "solrIndirectInput",
"connectionType": connectionTypeMap[
int(raw_input('Enter connectionType 1.LOCALHOST 2.IN_MEMORY: '))],
"solrData":
{
"coreName": raw_input('Enter core name : '),
"solrConfigFiles": raw_input(
'Enter the path of folder containing solr.xml,solrconfig.xml and managed-schema : '),
"uniqueKey": raw_input('Enter unique key of the document : ')
}
}
documents = []
while True:
if raw_input('Do you want to add document (Y/N) : ').upper() == 'Y':
document = takeJsonFromFile()
documents.append(document)
else:
break
solrData = {'documents': documents}
indirectInput['solrData'].update(solrData)
return indirectInput
def takeHttpInput():
indirectInput = {
'name': 'httpIndirectInput',
'specification': {
'request': {
'method': takeHttpMethod(),
'url': takeUrlPath()
},
'response': {
'body': takeJsonFromFile("Enter the file containing response expected:")
}
}
}
return indirectInput
def takeMysqlInput():
connectionTypeMap = {
1: 'LOCALHOST',
2: 'IN_MEMORY'
}
indirectInput = {
'name': 'mysqlIndirectInput',
'databaseName': raw_input('Enter DB name : '),
'connectionType': connectionTypeMap[int(raw_input('Enter connectionType 1.LOCALHOST 2.IN_MEMORY: '))],
'ddlStatements': []
}
statementsFile = open(raw_input(
'Enter the path to .sql file that contains SQL commands to run one command each line: create table, insert data: '),
'r')
sqlFile = statementsFile.read()
statementsFile.close()
sqlCommands = sqlFile.split(";")
for command in sqlCommands:
indirectInput['ddlStatements'].append(command)
return indirectInput
def takeIndirectInputs():
funcMapper = {
1: takeHttpInput,
2: takeMysqlInput,
3: takeRedisInput,
4: takeSolrInput,
5: takeHbaseInput,
6: takeAerospikeInput,
7: takeElasticSearchInput,
8: takeHazelCastInput,
9: takeKafkaInput,
10: takeRMQInput
}
indirectInputs = []
message = 'Add dependencies(Y/N)?'
while True:
print message,
message = 'Add more dependencies(Y/N)?'
choice = raw_input()
assert choice.upper() == 'Y' or choice.upper() == 'N'
if choice.upper() == 'N':
break
print "Enter dependency number 1.Http 2.Mysql 3.Redis 4.Solr 5.Hbase 6.Aerospike 7.elasticsearch 8.hazelCast 9.Kafka 10.RMQ: ",
depType = int(raw_input())
assert 1 <= depType <= 10
prepareFunc = funcMapper[depType]
indirectInputs.append(prepareFunc())
return indirectInputs
# print json.dumps(indirectInputs, indent=4)
# takeIndirectInputs()
|
from data_process.parse_vol_data import parse_vol_data
from vol_models.black_cubic import BlackCubic
from vol_models.dupire_local import DupireLocal
from vol_models.heston_slv import HestonSLV
from visualization.visualize_vol_surface import visualize_vol_surface
from opts.volmodel_opts import VolModelOpts
def black_cubic_test(opt):
data = opt.data
data_parsed = parse_vol_data(data)
vol_model = BlackCubic(data_parsed[0])
visualize_vol_surface(vol_model, 535.0, 730.0, 0.0, 2.0, name = 'black_cubic', saveDir = opt.saveDir)
def dupire_local_test(opt):
data = opt.data
data_parsed = parse_vol_data(data)
vol_model = DupireLocal(data_parsed[0])
visualize_vol_surface(vol_model, 535.0, 730.0, 0.0, 2.0, name = 'dupire_local', saveDir = opt.saveDir)
def heston_slv_test(opt):
data = opt.data
data_parsed = parse_vol_data(data)
vol_model = HestonSLV(data_parsed[0])
visualize_vol_surface(vol_model, 535.0, 730.0, 0.0, 2.0, name = 'heston_slv', saveDir = opt.saveDir)
def main():
opt = VolModelOpts().parse()
print('Black cubic vol surface:')
black_cubic_test(opt)
print('Dupire local vol surface:')
dupire_local_test(opt)
print('Heston stochastic local vol surface:')
heston_slv_test(opt)
if __name__ == '__main__':
main()
|
from Tkinter import *
from tkFileDialog import *
from tkFont import *
import os.path
import sys
import re
from assemblerlib import *
basefilename = "Untitled"
filename = ""
fileexists = False
saved = True
canvaswidthdefault = 20
canvaswidth = canvaswidthdefault
currentlength = 1
mnemonicstring = "sll|srl|add|sub|nand|nor|and|or|bez|bnez|bgez|blez|bgz|blz|li|lb|sb"
registerstring = "r0|r1|r2|r3|r4|r5|r6|r7"
def openFile():
global filename
global basefilename
global saved
openfilename = askopenfilename()
if openfilename is not None:
filename = openfilename
basefilename = os.path.basename(filename)
asmfile = open(filename, "r")
asmfile.seek(0)
asmdata = asmfile.read()
textArea.delete("1.0", "end - 1c")
textArea.insert("1.0", asmdata)
asmfile.close()
filemenu.entryconfig(filemenu.index("Save"), state = NORMAL)
frame.title("muCPU Assembler [" + basefilename + "]")
frame.focus()
initonOpen()
print "File Opened"
saved = True
def saveFile():
global filename
global saved
asmdata = textArea.get("1.0", "end - 1c")
asmfile = open(filename, "w")
asmfile.seek(0)
asmfile.truncate()
asmfile.write(asmdata)
asmfile.close()
print "Save Complete"
saved = True
frame.title("muCPU Assembler [" + basefilename + "]")
def saveFileAs():
global filename
global fileexists
global basefilename
global saved
saveasfilename = asksaveasfilename()
if saveasfilename is not None:
filename = saveasfilename
basefilename = os.path.basename(filename)
fileexists = True
asmdata = textArea.get("1.0", "end - 1c")
asmfile = open(filename, "w")
asmfile.seek(0)
asmfile.truncate()
asmfile.write(asmdata)
asmfile.close()
filemenu.entryconfig(filemenu.index("Save"), state = NORMAL)
frame.title("muCPU Assembler [" + basefilename + "]")
frame.focus()
print "Save Complete"
saved = True
def exitApp():
frame.destroy()
sys.exit()
def compileASM():
global filename
cpu_out = ""
asm_in = textArea.get("1.0", END)
asmlines = re.split("\n", asm_in)
for i in range (len(asmlines)):
if (asmlines[i] != ""):
cpu_out += str(i) + " => x\"" + decode(asmlines[i]) + "\",\n"
name, ext = os.path.splitext(filename)
hexfilename = name + ".hex"
hexfile = open(hexfilename, "w")
hexfile.seek(0)
hexfile.truncate()
hexfile.write(cpu_out)
hexfile.close()
print ("Compiled hex code to " + hexfilename)
def updateLinesEvent(event):
drawLinenums()
def updateHighlightEvent(event):
highlightSyntax((re.split("\.", textArea.index(INSERT))[0] + ".0"), INSERT)
def initonOpen():
highlightSyntax("1.0", END)
drawLinenums()
def drawLinenums():
global canvaswidth
global canvaswidthdefault
linenumbers.delete("all")
i = textArea.index("@0,0")
while True:
dline = textArea.dlineinfo(i)
if dline is None: break
y = dline[1]
linenum = str(i).split(".")[0]
linenumbers.create_text(canvaswidth, y, anchor=NE,text=linenum)
i = textArea.index("%s+1line" % i)
linenumbers.config(width = canvaswidth)
def highlightSyntax(start, end):
global mnemonicstring
global registerstring
mnemoniclen = StringVar()
registerlen = StringVar()
numberlen = StringVar()
commentlen = StringVar()
pos = start
while True:
pos = textArea.search(mnemonicstring, pos, end, regexp = True, count = mnemoniclen)
#print pos
if not pos: break
textArea.tag_add("mnemonic", pos, pos + " + " + str(mnemoniclen.get()) + "c")
posarry = re.split("\.", pos)
posarry[1] = str(int(posarry[1]) + 1)
pos = posarry[0] + "." + posarry[1] + ("0" * (len(posarry[1]) - 2))
pos = start
while True:
pos = textArea.search("-?\\d", pos, end, regexp = True, count = numberlen)
#print pos
if not pos: break
textArea.tag_add("number", pos, pos + " + " + str(numberlen.get()) + "c")
posarry = re.split("\.", pos)
posarry[1] = str(int(posarry[1]) + 1)
pos = posarry[0] + "." + posarry[1] + ("0" * (len(posarry[1]) - 2))
pos = start
while True:
pos = textArea.search(registerstring, pos, end, regexp = True, count = registerlen)
#print pos
if not pos: break
textArea.tag_add("register", pos, pos + " + " + str(registerlen.get()) + "c")
posarry = re.split("\.", pos)
posarry[1] = str(int(posarry[1]) + 1)
pos = posarry[0] + "." + posarry[1] + ("0" * (len(posarry[1]) - 2))
pos = start
while True:
pos = textArea.search("//", pos, end, regexp = False, count = commentlen)
#print pos
if not pos: break
textArea.tag_add("comment", pos, re.split("\.", pos)[0] + ".end")
posarry = re.split("\.", pos)
posarry[1] = str(int(posarry[1]) + 1)
pos = posarry[0] + "." + posarry[1] + ("0" * (len(posarry[1]) - 2))
def keypressed(event):
global saved
saved = False
frame.title("muCPU Assembler *[" + basefilename + "]*")
updateHighlightEvent(event)
updateLinesEvent(event)
scrollbar.set
def updateall(event):
highlightSyntax("1.0", END)
updateLinesEvent(event)
scrollbar.set
def saveevent(event):
saveFile()
Tk().withdraw()
frame = Toplevel(bg="#D8D8D8")
frame.bind("<Button-1>", updateall)
frame.bind("<MouseWheel>", updateall)
frame.bind("<B1-Motion>", updateall)
frame.bind("<ButtonRelease-1>", updateall)
frame.bind("<Key>", keypressed)
frame.bind("<Control-s>", saveevent)
scrollbar = Scrollbar(frame)
scrollbar.pack(side = RIGHT, fill = Y)
frame.title("muCPU Assembler [" + basefilename + "]")
textArea = Text(frame, height = 30, width = 100, padx = 3, pady = 3, yscrollcommand = scrollbar.set, selectbackground="#C5C5C5")
textArea.pack(side=RIGHT)
scrollbar.config(command=textArea.yview)
mnemonicfont = Font(frame, family = "Courier", size = 10, weight = "bold")
textArea.tag_config("mnemonic", foreground = "blue", font = mnemonicfont)
numberfont = Font(frame, family = "Courier", size = 10)
textArea.tag_config("number", foreground = "#df9200", font = numberfont)
registerfont = Font(frame, family = "Courier", size = 10)#, slant = "italic")
textArea.tag_config("register", foreground = "red", font = registerfont)
commentfont = Font(frame, family = "Courier", size = 10)#, slant = "italic")
textArea.tag_config("comment", foreground = "#3FA023", font = commentfont)
linenumbers = Canvas(frame, width = canvaswidthdefault, height = 487, bg = "#D8D8D8", highlightbackground = "#D8D8D8")
linenumbers.pack()
menubar = Menu(frame)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=openFile)
filemenu.add_command(label="Save", command=saveFile, state = DISABLED)
filemenu.add_command(label="Save as...", command=saveFileAs)
filemenu.add_command(label="Exit", command=exitApp)
menubar.add_cascade(label="File", menu=filemenu)
runmenu = Menu(menubar, tearoff=0)
runmenu.add_command(label="Compile", command=compileASM)
menubar.add_cascade(label="Run", menu=runmenu)
frame.config(menu=menubar)
initonOpen()
frame.resizable(0,0)
frame.mainloop()
#Current code
"""
lw r4, 176(r0)
lw r3, 177(r0)
sub r2, r4, r1
bez r2, 8
sw r1, 252(r0)
bez r0, -8
add r1, r1, r3
sll r0, r0, r0
bez r0, -2
"""
|
from fares_db import *
from user_db import *
from userFlights_db import *
from helpers import *
from privateConstants import *
from datetime import datetime
# TODO: grep out logs for scraper, runUserFares and Email (once running)
usernames = getAllUsernames()
usersNoFlights = 0
usernameRank = []
for username in usernames:
flights = getUserFlights(username)
usernameRank.append((username, len(flights)))
if len(flights) == 0:
usersNoFlights = usersNoFlights + 1
usernameRank = sorted(usernameRank, key=lambda tup: tup[1], reverse = True)
flights = getAllFlights()
flightsNoFares = 0
for flight in flights:
fares = getFaresForFlight(flight)
if (len(fares) == 0):
flightsNoFares = flightsNoFares + 1
flightCount = countUserFlights()
users = countUsers()
fares = countFares()
flightsNoFaresPerc = 100.0 * float(flightsNoFares) / float(flightCount)
usersNoFlightsPerc = 100.0 * float(usersNoFlights) / float(users)
email_str = "Fares: %s\nUsers: %s\nUserFlights: %s \n\nPercent users with no flight: %s%%\nPercent flights with no fares: %s%%" % ("{:,}".format(fares), "{:,}".format(users), "{:,}".format(flightCount), "{:10.1f}".format(usersNoFlightsPerc), "{:10.1f}".format(flightsNoFaresPerc)) + "\n"
############### Display top 5 users ##################
i = 0
for item in usernameRank:
i = i + 1
email_str = email_str + "\n" + item[0] + ": " + str(item[1])
if i > 5:
break
############# Display top 5 flights with stalest fares ###############
email_str = email_str + "\n"
times = []
nowtime = datetime.now()
for flight in flights:
if flight.date < (datetime.now() + timedelta(days=3)): #close flight
continue
fares = getFaresForFlight(flight)
if len(fares) > 0:
diff = nowtime - fares[-1].fare_validity_date
times.append((flight,diff))
times = sorted(times, key=lambda a: a[1], reverse = True)
a = 1
for t in times:
email_str = email_str + "\n" + str(t[0]) + ": " + str(t[1])
a = a + 1
if a > 5:
break
subject = "Daily Report: %s" % (datetime.now().strftime("%m/%d/%Y"))
print(email_str)
sendEmail(reportEmail, subject, email_str)
|
import argparse
import asyncore
import logging
from Component import Component
parser = argparse.ArgumentParser(description="Ircd-component for xmpp servers")
parser.add_argument( "--xmpp-host", default="localhost"
, dest="xmpp_host"
, help="The xmpp-server to connect to" )
parser.add_argument( "--xmpp-port", default=5347, type=int
, dest="xmpp_port"
, help="The port of the xmpp-server" )
parser.add_argument( "--transport-domain", required=True
, dest="transport_domain"
, help="The domain this component will serve under" )
parser.add_argument( "--transport-password", required=True
, dest="transport_password"
, help="Password used to auth against the xmpp-server" )
parser.add_argument( "--ircd-room", required=True
, dest="ircd_room"
, help="The room to relay to the muc" )
parser.add_argument( "--muc-room", required=True
, dest="muc_room"
, help="The muc-room" )
parser.add_argument( "--debug", default=False, action='store_true'
, help="Print a lot of debug-messages" )
config = parser.parse_args()
logger = logging.getLogger()
fmt = logging.StreamHandler()
fmt.setFormatter(logging.Formatter(fmt='%(name)s: %(message)s'))
logger.addHandler(fmt)
if config.debug:
logger.setLevel(logging.DEBUG)
Component(vars(config))
asyncore.loop()
|
from log import log
class Region:
def __init__(self, rectange, frame):
self.rectange = rectange
self.frame = frame
def is_car(self):
# TODO: return False if primary color indicates region is just headlights
return True
def to_dict(self):
(x,y, height, width) = self.rectange
return {
'x': x, 'y': y,
'height': height, 'width': width,
'image_s3_key': self.frame.s3_key,
}
@staticmethod
def merge_regions(regions):
regions = list(regions)
already_merged = set()
for i in range(len(regions)):
if i + 1 > len(regions):
break
for j in range(i + 1, len(regions)):
if i in already_merged or j in already_merged:
continue
r1 = regions[i]
r2 = regions[j]
if can_merge(r1, r2):
already_merged.add(i)
already_merged.add(j)
yield merge(r1, r2)
# also return anything that wasn't merged
for index, region in enumerate(regions):
if index in already_merged:
continue
yield region
def can_merge(r1, r2):
# TODO: compare regions
return False
def merge(r1, r2):
# TODO: merge regions
return r1
|
from SignalGenerationPackage.SignalController import SignalController
from SignalGenerationPackage.UserSignal.UserSignal import UserSignal
from SignalGenerationPackage.UserSignal.UserSignalObserver import UserSignalObserver
from SignalGenerationPackage.UserSignal.AccelerationTimeCallBackOperator import AccelerationTimeCallBackOperator
from SignalGenerationPackage.UserSignal.DecelerationTimeCallBackOperator import DecelerationTimeCallBackOperator
from SignalGenerationPackage.UserSignal.HighLevelFrequencyCallBackOperator import HighLevelFrequencyCallBackOperator
from SignalGenerationPackage.UserSignal.LowLevelFrequencyCallBackOperator import LowLevelFrequencyCallBackOperator
from SignalGenerationPackage.UserSignal.PlateauTimeCallBackOperator import PlateauTimeCallBackOperator
from SignalGenerationPackage.UserSignal.PointsNumberCallBackOperator import PointsNumberCallBackOperator
from SignalGenerationPackage.UserSignal.EndTimeCallBackOperator import EndTimeCallBackOperator
from SignalGenerationPackage.UserSignal.StartTimeCallBackOperator import StartTimeCallBackOperator
from SignalGenerationPackage.UserSignal.VerticalOffsetCallBackOperator import VerticalOffsetCallBackOperator
from SignalGenerationPackage.UserSignal.AutoFillCallBackOperator import AutoFillCallBackOperator
from SignalGenerationPackage.UserSignal.UserSignalMainWindow import UserSignalMainWindow
from SignalGenerationPackage.UserSignal.UserSignalUIParameters import UserSignalUIParameters
from SignalGenerationPackage.UserSignal.RequestFrequencyCallBackOperator import RequestFrequencyCallBackOperator
from CallBackOperators.ForwardSendingOperator import ForwardSendingOperator
class UserSignalController(SignalController):
def __init__(self):
super().__init__()
# overridden
def init_model(self):
self.model = UserSignal()
# overridden
def init_observer(self):
self.observer = UserSignalObserver(self.model, self.main_window.plot)
# overridden
def init_main_window(self):
self.main_window = UserSignalMainWindow()
# overridden
def init_param_names(self):
self.param_names = [
'Start Time', 'Acceleration Time', 'Plateau Time',
'Deceleration Time', 'Low Level Frequency', 'High Level Frequency',
'Vertical Offset', 'Points Number', 'End Time', 'Request Frequency'
]
# overridden
def init_slider_constants(self):
self.slider_constants = [
UserSignalUIParameters.StartTimeCalcConstant,
UserSignalUIParameters.AccelerationTimeCalcConstant,
UserSignalUIParameters.PlateauTimeCalcConstant,
UserSignalUIParameters.DecelerationTimeCalcConstant,
UserSignalUIParameters.LowLevelFrequencyCalcConstant,
UserSignalUIParameters.HighLevelFrequencyCalcConstant,
UserSignalUIParameters.VerticalOffsetCalcConstant,
UserSignalUIParameters.PointsNumberCalcConstant,
UserSignalUIParameters.EndTimeCalcConstant,
UserSignalUIParameters.RequestFreqCalcConstant
]
# overridden
def init_sliders(self):
ui = self.main_window.user_interface
self.sliders = [
ui.StartTimehorizontalSlider,
ui.AccelerationTimehorizontalSlider,
ui.PlateauTimehorizontalSlider,
ui.DecelerationTimehorizontalSlider,
ui.LowLevelFrequencyhorizontalSlider,
ui.HighLevelFrequencyhorizontalSlider,
ui.VerticalOffsethorizontalSlider,
ui.PointsNumberhorizontalSlider,
ui.EndTimehorizontalSlider,
ui.RequestFrequencyhorizontalSlider
]
def init_plot_widget(self):
self.plot_widget = self.main_window.user_interface.plot_widget
def init_callback_operators(self):
self.callback_operators = \
[
StartTimeCallBackOperator(self.model),
AccelerationTimeCallBackOperator(self.model),
PlateauTimeCallBackOperator(self.model),
DecelerationTimeCallBackOperator(self.model),
EndTimeCallBackOperator(self.model),
VerticalOffsetCallBackOperator(self.model),
HighLevelFrequencyCallBackOperator(self.model),
LowLevelFrequencyCallBackOperator(self.model),
PointsNumberCallBackOperator(self.model),
RequestFrequencyCallBackOperator(self.model),
AutoFillCallBackOperator(self.slider_constants, self.param_names, self.sliders, model=None)
]
# overridden
def append_sending_operator(self):
self.callback_operators.append(ForwardSendingOperator(self.main_window, self.plot_widget, DebugMode=True))
# Подключится к виджетам окна с генерацией сигнала.
# Чтобы отправить сигнал можно было прямо из окна генерирования сигнала (удобство польз-ля)
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
total_sum = sum(nums)
nums.sort(reverse=True)
current_sum = 0
for i, num in enumerate(nums):
current_sum += num
if 2 * current_sum > total_sum:
return nums[: i + 1]
if __name__ == "__main__":
solution = Solution()
assert [10, 9] == solution.minSubsequence([4, 3, 10, 9, 8])
assert [7, 7, 6] == solution.minSubsequence([4, 4, 7, 6, 7])
assert [6] == solution.minSubsequence([6])
|
import os, sys
# http://effbot.org/zone/import-string.htm
def my_import(name):
m = __import__(name)
for n in name.split(".")[1:]:
m = getattr(m, n)
return m
def run(D):
print 'in run_script.run: ', os.path.basename(__file__)
prog = D['prog']
module = my_import("scripts." + prog)
print module.__file__
return module.run(D)
|
"""
Deep Lab Cut
============
This module provides an interface between data collected during the reaching task and
forepaw coordinates extracted from videos of the task using Deep Lab Cut.
This depends on pandas, pytables and h5py.
"""
import glob
import json
import os
import pickle
import pandas as pd
import reach
from reach.utilities import cache
class Session(reach.Session):
"""
This class adds methods to reach.Session that facilitate loading of Deep Lab Cut
data and aligning this data with data collected during the reach task. This data can
then be used for analysis or plotting.
It is assumed that the JSON file storing data for a mouse contains a "video" field
for each session to be analysed that provides the file name of the video from which
Deep Lab Cut coordinates were extracted. This is used to identify the DLC data
files, as the video's file name prefixes DLC's outputs.
"""
def __init__(self, data, dlc_dir):
self.data = data
self.dlc_dir = dlc_dir
self.coordinates = self.load_coordinates()
@classmethod
def init_all_from_file(cls, data_file, dlc_dir):
"""
Generate a :class:`list` of :class:`Session` objects from data stored in a
Training JSON.
Parameters
----------
data_file : :class:`str`
Full path to file containing existing training data.
dlc_dir : :class:`str`
Directory containing data output by Deep Lab Cut for this mouse.
"""
with open(data_file, "r") as fd:
previous_data = json.load(fd)
training_data = [cls(data=data, dlc_dir=dlc_dir) for data in previous_data]
return training_data
def load_coordinates(self):
"""
Load coordinate data generated by DLC as a pandas DataFrame. This can be in H5
or CSV format.
"""
basename = os.path.splitext(self.data["video"])[0]
h5_path = glob.glob(os.path.join(self.dlc_dir, basename + "*.h5"))[0]
if os.path.isfile(h5_path):
df = pd.read_hdf(h5_path, "df_with_missing")
return df[df.keys()[0][0]]
csv_path = glob.glob(os.path.join(self.dlc_dir, basename + "*.csv"))[0]
if os.path.isfile(csv_path):
df = pd.read_csv(csv_path, "df_with_missing")
return df[df.keys()[0][0]]
return None
@cache
def metadata(self):
"""
Load DLC metadata from pickle file.
"""
basename = os.path.splitext(self.data["video"])[0]
pickle_path = glob.glob(os.path.join(self.dlc_dir, basename + "*.pickle"))[0]
if os.path.isfile(pickle_path):
with open(pickle_path, "br") as fd:
return pickle.load(fd)
return None
class Mouse(reach.Mouse):
"""
This class is an extension of reach.Mouse that loads and manages reach.dlc.Session
objects instead of reach.Session objects.
"""
@classmethod
def init_from_file(cls, data_dir, mouse_id, dlc_dir):
"""
Initialise Mouse object using pre-existing training data stored within
a training JSON.
Parameters
----------
data_dir : :class:`str`
Directory containing training data.
mouse_id : :class:`str`
Mouse ID to pass to :class:`Mouse` instance. Will be used to find
JSON if json_path is a folder.
dlc_dir : :class:`str`
Directory containing data output by Deep Lab Cut for this mouse.
"""
data_file = os.path.join(data_dir, f"{mouse_id}.json")
if not os.path.isfile(data_file):
raise SystemError(f"Could not find data file {data_file}.")
training_data = Session.init_all_from_file(data_file, dlc_dir)
return cls(mouse_id=mouse_id, training_data=training_data)
class Cohort(reach.Cohort): # pylint: disable=R0901
"""
This class is an extension of reach.Cohort that manages reach.dlc.Mouse objects
instead of reach.Mouse objects.
"""
@classmethod
def init_from_files(cls, data_dir, mouse_ids, dlc_dir):
"""
Initialise the cohort of mice using training files stored within the same
folder. This class extends reach.Cohort to include Deep Lab Cut data, and so
needs to know the folder containing this data.
Parameters
----------
data_dir : :class:`str`
Directory containing the training data files.
mouse_ids : :class:`list` of :class:`str`\s
IDs for the mice to be handled within the cohort.
dlc_dir : :class:`str`
Directory containing Deep Lab Cut data for all mice. This folder should
contain a folder for each mouse containing its mouse ID, and these folders
should each contain the data output by Deep Lab Cut in files that are
referenced in the mouse's training data.
"""
mice = []
for mouse_id in mouse_ids:
mouse_dlc_dirs = glob.glob(os.path.join(dlc_dir, f"*{mouse_id}*"))
if len(mouse_dlc_dirs) > 1:
print(f"Found more than one folder containing '{mouse_id}'")
raise Exception("Unsure where to get deeplabcut data. Please merge.")
mouse = Mouse.init_from_file(
data_dir=data_dir, mouse_id=mouse_id, dlc_dir=mouse_dlc_dirs[0]
)
mice.append(mouse)
return cls(mice, mouse_ids)
|
# Generated by Django 3.1.7 on 2021-02-27 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0006_auto_20210226_1819'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='items',
field=models.ManyToManyField(blank=True, to='shop.CartProduct'),
),
]
|
items = input().split('|')
budget = int(input())
old_price_of_all_items = 0
new_price_of_all_items = 0
CLOTHES_MAX_PRICE = 50
SHOES_MAX_PRICE = 35
ACCESSORIES_MAX_PRICE = 20.50
NEEDED_MONEY_FOR_TICKETS = 150
for i in items:
tokens = i.split('->')
item_type = tokens[0]
price = float(tokens[1])
if budget < price:
continue
if item_type == 'Clothes' and price <= CLOTHES_MAX_PRICE:
budget -= price
old_price_of_all_items += price
new_price_of_all_items += price + price * 0.4
print(f'{price + price * 0.4:.2f}', end=' ')
elif item_type == 'Shoes' and price <= SHOES_MAX_PRICE:
budget -= price
old_price_of_all_items += price
new_price_of_all_items += price + price * 0.4
print(f'{price + price * 0.4:.2f}', end=' ')
elif item_type == 'Accessories' and price <= ACCESSORIES_MAX_PRICE:
budget -= price
old_price_of_all_items += price
new_price_of_all_items += price + price * 0.4
print(f'{price + price * 0.4:.2f}', end=' ')
profit = new_price_of_all_items - old_price_of_all_items
print()
print(f'Profit: {profit:.2f}')
budget += new_price_of_all_items
if budget >= NEEDED_MONEY_FOR_TICKETS:
print('Hello, France!')
else:
print('Time to go.')
# MAX_PRICE_CLOTHES = 50.00
# MAX_PRICE_SHOES = 35.00
# MAX_PRICE_ACCESSORIES = 20.50
# PRICE_FLIGHT = 150
#
# collection_of_items = input().split("|")
# budget = float(input())
#
# new_prices = []
# bought_products_prices = []
# profit = 0
#
# # buying products
#
# while len(collection_of_items) > 0:
# current_product_and_price = collection_of_items.pop(0)
# current_product_and_price = current_product_and_price.split("->")
#
# type_product = current_product_and_price[0]
# price_product = float(current_product_and_price[1])
#
# if type_product == "Clothes":
# if price_product > MAX_PRICE_CLOTHES or budget < price_product:
# continue
# else:
# budget -= price_product
# bought_products_prices.append(price_product)
#
# elif type_product == "Shoes":
# if price_product > MAX_PRICE_SHOES or budget < price_product:
# continue
# else:
# budget -= price_product
# bought_products_prices.append(price_product)
#
# elif type_product == "Accessories":
# if price_product > MAX_PRICE_ACCESSORIES or budget < price_product:
# continue
# else:
# budget -= price_product
# bought_products_prices.append(price_product)
#
# for index in range(len(bought_products_prices)):
# old_price = bought_products_prices[index]
# new_price = float(f"{(old_price * 140/100):.2f}")
# profit += new_price - old_price
# new_price.append(new_prices)
#
# total_budget = budget + sum(new_prices)
#
# for new_price_2 in new_prices:
# print(new_price_2, end=" ")
#
# print()
#
# print(f"Profit: {profit:.2f}")
#
# if total_budget >= PRICE_FLIGHT:
# print("Hello, France!")
# else:
# print("Time to go.")
|
# coding:utf-8
import matplotlib.pyplot as plt
import pandas as pd
from pyramid.arima import auto_arima
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arima_model import ARIMA
from pandas.tools.plotting import autocorrelation_plot
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
def arimaMainProcess(train_dt, predict_len, test_str):
train_df = pd.DataFrame(columns=['check_seconds', 'flow'])
print('Combining data ...')
train_dt = ['20141201', '20141202', '20141203', '20141204'
, '20141208', '20141209', '20141210', '20141211'
, '20141215', '20141216', '20141217', '20141218'
, '20141223', '20141225', '20141226']
for date_str in train_dt:
temp_df = pd.read_csv(
'E:\Pycharm\PythonProjects\Subway\data\InFlow\InFlow_for14_line1_' + date_str + '.csv')
train_df = train_df.append(temp_df, ignore_index=True)
len = train_df.shape[0]
train_df = train_df.loc[:, ['flow']]
train_df = pd.DataFrame(train_df.flow.values, index=pd.date_range('2008-01-01', periods=len), columns=['flow'])
train_df.index = pd.to_datetime(train_df.index)
train_df['flow'] = train_df['flow'].astype(float)
ts = train_df['flow']
decomposition = seasonal_decompose(ts, freq=predict_len, two_sided=False, model='additive')
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
# residual.dropna(inplace=True)
# decomposition.plot()
# plt.show()
trend.dropna(inplace=True)
# print testStationarity(trend)
trend_anl = trend.diff(periods=1).dropna()
# trend_anl.plot()
# plt.show()
# print testStationarity(trend_anl)
# trend_anl = trend.diff().dropna().diff().dropna()
# draw_acf_pacf(trend_anl)
# draw_acf_pacf(trend_anl)
# p_and_q(trend)
# trend_model = ARIMA(trend, order=(4, 0, 0))
trend_model = ARIMA(trend, order=(2, 1, 2))
trend_arma = trend_model.fit(disp=0)
trend_rs = trend_arma.forecast(predict_len)[0]
pre_rs = []
train_terms = ts.shape[0]/predict_len
for i in range(predict_len):
temp = []
for j in range(train_terms):
temp.append(seasonal[j*predict_len+i])
seasonal_part = np.mean(temp)
pre_rs.append(trend_rs[i]+seasonal_part)
pre_rs = [round(i) for i in pre_rs]
test_df = pd.read_csv('E:\Pycharm\PythonProjects\Subway\data\InFlow\InFlow_for14_line1_' + test_str + '.csv')
plt.plot(test_df.check_seconds, pre_rs, c='r', label='arima')
plt.plot(test_df.check_seconds, test_df.flow, c='b', label='real data')
plt.legend()
plt.show()
print r2_score(test_df.flow, pre_rs)
print mean_absolute_error(pre_rs, test_df.flow)
print mean_squared_error(pre_rs, test_df.flow)
return pre_rs
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return diff
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
def draw_ts(timeseries):
timeseries.plot()
plt.show()
def draw_acf_pacf(ts, lags=31):
f = plt.figure(facecolor='white')
ax1 = f.add_subplot(211)
plot_acf(ts, lags=40, ax=ax1)
ax2 = f.add_subplot(212)
plot_pacf(ts, lags=40, ax=ax2)
plt.show()
def testStationarity(ts):
dftest = adfuller(ts)
# 对上述函数求得的值进行语义描述
dfoutput = pd.Series(dftest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
return dfoutput
def p_and_q(trend):
pmax = 6
qmax = 6
d=1
aic_matrix = [] # bic矩阵
bic_matrix = [] # bic矩阵
aic_lest = 0
bic_lest = 0
p_aic = 0
q_aic = 0
p_bic = 0
q_bic = 0
for p in range(pmax + 1):
aic_tmp = []
bic_tmp = []
for q in range(qmax + 1):
try:
a = ARIMA(trend, (p, d, q)).fit().aic
b = ARIMA(trend, (p, d, q)).fit().bic
if a<aic_lest:
aic_lest = a
p_aic = p
q_aic = q
if b<bic_lest:
bic_lest = b
p_bic = p
q_bic = q
aic_tmp.append(a)
bic_tmp.append(b)
except:
aic_tmp.append(0)
bic_tmp.append(0)
aic_matrix.append(aic_tmp)
bic_matrix.append(bic_tmp)
aic_df = pd.DataFrame(aic_matrix)
bic_df = pd.DataFrame(bic_matrix)
aic_df.to_csv('E:\Pycharm\PythonProjects\Subway\data\TrainData\Aic_matrix_26.csv')
bic_df.to_csv('E:\Pycharm\PythonProjects\Subway\data\TrainData\Bic_matrix_26.csv')
print 'aic: p = %d, q = %d', (p_aic, q_aic)
print 'bic: p = %d, q = %d', (p_bic, q_bic)
print 'finish'
# dts = ['20141201', '20141202', '20141203', '20141204', '20141205'
# , '20141208', '20141209', '20141210', '20141211', '20141212'
# , '20141215', '20141216', '20141217', '20141218', '20141219']
# # dts = ['20141201', '20141202', '20141203', '20141204', '20141205']
# arimaMainProcess(dts, 204, '20141229')
# df2 = pd.read_csv('E:\Pycharm\PythonProjects\Subway\data\WaitTime\waitTime_for14_line1_20141203.csv')
# x = []
# y = []
# for i in range(1, 205):
# check_seconds = i * 300
# x.append(check_seconds)
# count = df2[(df2.in_seconds > (check_seconds - 300)) & (df2.in_seconds <= check_seconds)].shape[0]
# y.append(count)
# plt.plot(x, y)
# plt.show()
if __name__ == '__main__':
dts = ['20141201', '20141202', '20141203', '20141204', '20141205'
, '20141208', '20141209', '20141210', '20141211', '20141212'
, '20141215', '20141216', '20141217', '20141218', '20141219']
arimaMainProcess(dts, 204, '20141229')
|
import argparse
import os
from time import time
from multiprocessing import Pool
import random
import pydub as pd
import numpy as np
import scipy.signal as ssi
import math
def convolve(irfile_path, speech_path, output_path, target_len=1):
IR = pd.AudioSegment.from_file(irfile_path)
speech = pd.AudioSegment.from_file(speech_path)
tracks = IR.split_to_mono()
speechsamples = np.array(speech.get_array_of_samples()) / speech.max_possible_amplitude
if len(speechsamples) > speech.frame_rate * target_len:
rand_start = random.randint(0, len(speechsamples) - speech.frame_rate * target_len - 1)
speechsamples = speechsamples[rand_start:(rand_start + speech.frame_rate * target_len)]
convolved = []
for i in range(len(tracks)):
IRsamples = np.array(tracks[i].get_array_of_samples()) / IR.max_possible_amplitude
if IR.frame_rate != speech.frame_rate:
newlen = int(math.ceil(len(IRsamples) * speech.frame_rate / IR.frame_rate))
IRsamples = ssi.resample(IRsamples, newlen)
temp = np.convolve(speechsamples, IRsamples)
convolved.append(temp)
convolved = np.array(convolved)
maxval = np.max(np.fabs(convolved))
if maxval == 0:
print("file {} not saved due to zero strength".format(output_path))
return -1
amp_ratio = 1.0 / maxval
convolved *= amp_ratio
convolved *= IR.max_possible_amplitude
rawdata = convolved.transpose().astype(np.int32).tobytes()
sound = pd.AudioSegment(data=rawdata, sample_width=IR.sample_width, frame_rate=speech.frame_rate, channels=IR.channels)
sound.export(output_path, format='wav')
def main():
parser = argparse.ArgumentParser(prog='batch_colvolver',
description="""Batch convolve IR folder with speech folder""")
parser.add_argument("--irfolder", "-i", help="Directory containing IR files", type=str, required=True)
parser.add_argument("--speechfolder", "-s", help="Directory containing speech clips", type=str, required=True)
parser.add_argument("--output", "-o", help="Output directory", type=str, required=True)
parser.add_argument("--nthreads", "-n", type=int, default=1, help="Number of threads to use")
args = parser.parse_args()
irpath = args.irfolder
speechpath = args.speechfolder
nthreads = args.nthreads
outpath = args.output
if not os.path.exists(irpath):
print('IR folder {} non-exist, abort!'.format(irpath))
return
if not os.path.exists(speechpath):
print('Speech folder {} non-exist, abort!'.format(speechpath))
return
if not os.path.exists(outpath):
os.makedirs(outpath)
irlist = [os.path.join(root, name) for root, dirs, files in os.walk(irpath)
for name in files if name.endswith(".wav")]
speechlist = [os.path.join(root, name) for root, dirs, files in os.walk(speechpath)
for name in files if name.endswith((".wav", ".flac"))]
ts = time()
pool = Pool(processes=nthreads)
res = []
try:
# Create a pool to communicate with the worker threads
for irfile_path in irlist:
output_path = irfile_path.replace(irpath, outpath)
new_dir = os.path.dirname(output_path)
speech_path = random.choice(speechlist)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
pool.apply_async(convolve, args=(irfile_path, speech_path, output_path,))
except Exception as e:
print(e)
pool.close()
pool.close()
pool.join()
print('Took {}'.format(time() - ts))
if __name__ == '__main__':
main()
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
#붓꽃 데이터 읽어들이기
iris_data = pd.read_csv("./data/iris2.csv", encoding="utf-8")
#붓꽃 데이터를 레이블과 입력 데이터로 분리하기
y = iris_data.loc[:, "Name"]
x = iris_data.loc[:, ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"]]
#학습 전용과 테스트 전용 분리하기
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, train_size = 0.8, shuffle=True)
# 그리드 서치에서 사용할 매개변수
parameters = [
{"n_neighbors": [1, 10, 20], "algorithm": ["auto"]},
{"n_neighbors": [1, 10, 20], "algorithm": ["ball_tree"]},
{"n_neighbors": [1, 10, 20], "algorithm": ["kd_tree"]},
{"n_neighbors": [1, 10, 20], "algorithm": ["brute"]}
]
#그리드 서치
kfold_cv = KFold(n_splits=5, shuffle=True)
clf = GridSearchCV(KNeighborsClassifier(), parameters, cv = kfold_cv)
clf.fit(x_train, y_train)
print("최적의 매개변수 = ", clf.best_estimator_)
#최적의 매개변수로 평가하기
y_pred = clf.predict(x_test)
print("최종 정답률 = ", accuracy_score(y_test, y_pred))
last_score = clf.score(x_test, y_test)
print("최종 정답률 = ", last_score)
|
# Objective function(s) inspired by filtering problems
from microfilter.univariate.expnormdist import ExpNormDist
from microfilter.univariate.expnormdist import DEFAULT_EXPNORM_PARAMS, DEFAULT_EXPNORM_LOWER, DEFAULT_EXPNORM_UPPER
from microfilter.univariate.noisysim import sim_data
from copy import deepcopy
from functools import partial, update_wrapper
from itertools import permutations, combinations
NOISY_DATA = sim_data(chronological=True)
lagged_values = list(reversed(NOISY_DATA))
lagged_times = [1. for _ in NOISY_DATA]
def cube_to_params(xs,v1,v2,v3):
"""
:param xs: lie in [-1,1]^3
"""
variable_names = [v1,v2,v3]
params = deepcopy(DEFAULT_EXPNORM_PARAMS)
for x,var in zip(xs,variable_names):
params[var]=DEFAULT_EXPNORM_LOWER[var]+ 0.5*(x+1)*(DEFAULT_EXPNORM_UPPER[var] - DEFAULT_EXPNORM_LOWER[var])
return params
def expnorm_objective( xs:[float],v1,v2,v3)->float:
dist = ExpNormDist()
params = cube_to_params(xs,v1,v2,v3)
return dist.loss(lagged_values=lagged_values, lagged_times=lagged_times, params=params), # Tuple convention as per DEAP
def wrapped_partial(func, *args, **kwargs):
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def make_expnorm_objectives():
objectives = dict()
all_vars = ['g1','g2','logK','loc','logScale']
perms = combinations(all_vars,3)
for vars in perms:
objective1 = wrapped_partial(expnorm_objective,v1=vars[0],v2=vars[1],v3=vars[2])
objective1.__name__ = 'expnorm (varying '+','.join(vars)+')'
objectives.update({objective1:1})
return objectives
EXPNORM_OBJECTIVES = make_expnorm_objectives()
|
import json
import pandas
import cv2
import os, random
'''
This script will randomly view one of the image from crowdman dataset with yolo-style label
'''
source_img = '/media/n0v0b/m1/dataset/crowdman-raw/val/'
source_label = '/media/n0v0b/m1/dataset/crowdman-labels/val/'
image_name = random.choice(os.listdir(source_img)) #change dir name to whatever
image_name_full = source_img + image_name
identifier = image_name[:-4]
print(image_name)
print(image_name_full)
print(identifier)
label_name = identifier + '.txt'
label_name_full = source_label + identifier + '.txt'
print(label_name_full)
file = open(label_name_full, "r")
img = cv2.imread(image_name_full)
# row(Y), col (X)
Y = img.shape[0]
X = img.shape[1]
print(Y,X)
for row in file:
item = row.split()
# <x> <y> <width> <height> - float values relative to width and height of image, it can be equal from (0.0 to 1.0]
# for example: <x> = <absolute_x> / <image_width> or <height> = <absolute_height> / <image_height>
print(float(item[2]))
x1 = int(float(item[1]) * X) - int(float(item[3]) * X/2)
y1 = int(float(item[2]) * Y) - int(float(item[4]) * X/2)
x2 = int(float(item[1]) * X) + int(float(item[3]) * X/2)
y2 = int(float(item[2]) * Y) + int(float(item[4]) * Y/2)
print(x1)
print(y1)
print(x2)
print(y2)
img[y1:y2, x1:x2] = 255
cv2.imshow('image',img)
cv2.waitKey(0)
|
from language import Language
from pythonlanguage import PythonLanguage
class LanguageFactory:
def __init__(self):
self.idsToLangs = {1 : PythonLanguage()}
def create(self, langId):
if (langId in self.idsToLangs):
return self.idsToLangs[langId]
else:
raise Exception("No such language")
|
class Node:
def __init__(self, value, next, prev):
self.value = value
self.next = next
self.prev = prev
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def is_empty(self):
return self.head == None
def prepend(self, value):
if self.head:
node = Node(value, self.head, None)
self.head.prev = node
self.head = node
else:
self.head = self.tail = Node(value, None, None)
def append(self, value):
if self.head:
node = Node(value, None, self.tail)
self.tail.next = node
self.tail = node
else:
self.head = self.tail = Node(value, None, None)
def set_head(self, index):
if self.head:
cur_node = self.head
for _ in range(index):
cur_node = cur_node.next
self.head = cur_node
self.head.prev = None
return True
else:
return False
def access(self, index):
if self.head:
cur_node = self.head
for _ in range(index):
cur_node = cur_node.next
return cur_node.value
else:
return False
def insert(self, index, value):
if index == 0:
self.append(value)
return
cur_node = self.head
for _ in range(index - 1):
cur_node = cur_node.next
node = Node(value, cur_node.next, cur_node)
cur_node.next.prev = node
cur_node.next = node
def remove(self, index):
if index == 0:
self.head = self.head.next
self.head.prev = None
return
cur_node = self.head
for _ in range(index - 1):
cur_node = cur_node.next
next_node = cur_node.next.next
cur_node.next = next_node
next_node.prev = cur_node
def print(self):
cur_node = self.head
while cur_node != None:
print(cur_node.value, end=' ')
cur_node = cur_node.next
print()
def print_inverse(self):
cur_node = self.tail
while cur_node != None:
print(cur_node.value, end=' ')
cur_node = cur_node.prev
print()
if __name__=="__main__":
doublyLinkedList = DoublyLinkedList()
for i in range(7):
doublyLinkedList.prepend(i + 1)
doublyLinkedList.print()
doublyLinkedList.print_inverse()
print()
for i in range(7):
doublyLinkedList.append(i + 10)
doublyLinkedList.print()
doublyLinkedList.print_inverse()
print()
doublyLinkedList.set_head(5)
doublyLinkedList.print()
doublyLinkedList.print_inverse()
print()
doublyLinkedList.insert(5,1000)
doublyLinkedList.print()
doublyLinkedList.print_inverse()
print()
doublyLinkedList.remove(3)
doublyLinkedList.print()
doublyLinkedList.print_inverse()
print()
print(doublyLinkedList.access(4))
|
import unittest
import sbol3
import labop
import uml
class TestValidationErrorChecking(unittest.TestCase):
def test_activity_multiflow(self):
"""Test whether validator can detect nondeterminism due to activity multiple outflows"""
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
labop.import_library("sample_arrays")
# Create the protocol
print("Creating protocol")
protocol = labop.Protocol("broken")
doc.add(protocol)
# get a plate
plate = protocol.primitive_step("EmptyContainer", specification="placeholder")
# use it in three places
s1 = protocol.primitive_step("PlateCoordinates", coordinates="A1:D1")
protocol.edges.append(
uml.ObjectFlow(
source=plate.output_pin("samples"), target=s1.input_pin("source")
)
)
s2 = protocol.primitive_step("PlateCoordinates", coordinates="A2:D2")
protocol.edges.append(
uml.ObjectFlow(
source=plate.output_pin("samples"), target=s2.input_pin("source")
)
)
s3 = protocol.primitive_step("PlateCoordinates", coordinates="A3:D3")
protocol.edges.append(
uml.ObjectFlow(
source=plate.output_pin("samples"), target=s3.input_pin("source")
)
)
# Validate the document, which should produce one error
print("Validating and writing protocol")
v = doc.validate()
assert len(v.errors) == 0, f"Expected zero errors, but found {len(v)}"
assert (
len(v.warnings) == 1
), f"Expected precisely one warning, but found {len(v)}"
assert (
str(v.warnings[0])
== "https://bbn.com/scratch/broken/CallBehaviorAction1/OutputPin1: "
"ActivityNode has 3 outgoing edges: multi-edges can cause nondeterministic flow"
), f"Unexpected warning content: {str(v.warnings[0])}"
def test_activity_bad_inflows(self):
"""Test whether validator can detect error due to excess or missing inflows"""
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
# Create the protocol
print("Creating protocol")
protocol = labop.Protocol("broken")
doc.add(protocol)
# call order backwards, to make an edge from the final to the initial
protocol.order(protocol.final(), protocol.initial())
# access a parameter node and order it backwards too
p = uml.ActivityParameterNode()
protocol.nodes.append(p)
protocol.order(protocol.final(), p)
# Validate the document, which should produce two errors
print("Validating and writing protocol")
v = doc.validate()
assert len(v) == 3, f"Expected 3 validation issues, but found {len(v)}"
expected = [
"https://bbn.com/scratch/broken/ActivityParameterNode1: Too few values for property parameter. Expected 1, found 0",
"https://bbn.com/scratch/broken/InitialNode1: InitialNode must have no incoming edges, but has 1",
"https://bbn.com/scratch/broken/FinalNode1: Node has no incoming edges, so cannot be executed",
]
observed = [str(e) for e in v]
assert observed == expected, f"Unexpected error content: {observed}"
if __name__ == "__main__":
unittest.main()
|
from __future__ import annotations
import numpy as np
import neworder
import matplotlib.pyplot as plt # type: ignore
from matplotlib.image import AxesImage # type: ignore
from matplotlib import colors # type: ignore
class Schelling(neworder.Model):
def __init__(self,
timeline: neworder.Timeline,
gridsize: tuple[int, int],
categories: np.ndarray[np.float64, np.dtype[np.float64]],
similarity: float) -> None:
# NB missing this line can cause memory corruption
super().__init__(timeline, neworder.MonteCarlo.deterministic_identical_stream)
# category 0 is empty cell
self.ncategories = len(categories)
# randomly sample initial population according to category weights
init_pop = self.mc.sample(np.prod(gridsize), categories).reshape(gridsize)
self.sat = np.empty(gridsize, dtype=int)
self.similarity = similarity
self.domain = neworder.StateGrid(init_pop, neworder.Edge.CONSTRAIN)
self.fig, self.img = self.__init_visualisation()
def step(self) -> None:
# start with empty cells being satisfied
self.sat = (self.domain.state == 0)
# !count!
# count all neighbours, scaling by acceptable similarity ratio
n_any = self.domain.count_neighbours(lambda x: x>0) * self.similarity
for c in range(1,self.ncategories):
# count neighbour with a specific state
n_cat = self.domain.count_neighbours(lambda x: x==c)
self.sat = np.logical_or(self.sat, np.logical_and(n_cat > n_any, self.domain.state == c))
# !count!
n_unsat = np.sum(~self.sat)
pop = self.domain.state.copy()
free = list(zip(*np.where(pop == 0)))
for src in zip(*np.where(~self.sat)):
# pick a random destination
r = self.mc.raw() % len(free)
dest = free[r]
pop[dest] = pop[src]
pop[src] = 0
free[r] = src
self.domain.state = pop
neworder.log("step %d %.4f%% unsatisfied" % (self.timeline.index, 100.0 * n_unsat / pop.size))
self.__update_visualisation()
# !halt!
# finish early if everyone satisfied
if n_unsat == 0:
# set the halt flag in the runtime
self.halt()
# since the timeline is open-ended we need to explicitly call finalise
self.finalise()
# !halt!
def finalise(self) -> None:
plt.pause(5.0)
def __init_visualisation(self) -> tuple[plt.Figure, AxesImage]:
plt.ion()
cmap = colors.ListedColormap(['white', 'red', 'blue', 'green', 'yellow'][:self.ncategories])
fig = plt.figure(constrained_layout=True, figsize=(8,6))
img = plt.imshow(self.domain.state.T, cmap=cmap)
plt.axis('off')
fig.canvas.mpl_connect('key_press_event', lambda event: self.halt() if event.key == "q" else None)
fig.canvas.flush_events()
return fig, img
def __update_visualisation(self) -> None:
self.img.set_array(self.domain.state.T)
# plt.savefig("/tmp/schelling%04d.png" % self.timeline.index, dpi=80)
self.fig.canvas.flush_events()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Endpoint interfaces for providing status updates to the host.
These are mainly meant for use with interrupt endpoints; and allow a host to e.g.
repeatedly poll a device for status.
"""
from amaranth import Elaboratable, Module, Signal, Array
from ..endpoint import EndpointInterface
from ....utils.cdc import synchronize
class USBSignalInEndpoint(Elaboratable):
""" Endpoint that transmits the value of a signal to a host whenever polled.
This is intended to be usable to implement a simple interrupt endpoint that polls for a status signal.
Attributes
----------
signal: Signal(<variable width>), input
The signal to be relayed to the host. This signal's current value will be relayed each time the
host polls our endpoint.
interface: EndpointInterface
Communications link to our USB device.
status_read_complete: Signal(), output
Strobe that pulses high for a single `usb`-domain cycle each time a status read is complete.
Parameters
----------
width: int
The width of the signal we'll relay up to the host, in bits.
endpoint_number: int
The endpoint number (not address) this endpoint should respond to.
endianness: str, "big" or "little", optional
The endianness with which to send the data. Defaults to little endian.
signal_domain: str, optional
The name of the domain :attr:``signal`` is clocked from. If this value is anything other than
"usb", the signal will automatically be synchronized to the USB clock domain.
"""
def __init__(self, *, width, endpoint_number, endianness="little", signal_domain="usb"):
self._width = width
self._endpoint_number = endpoint_number
self._signal_domain = signal_domain
self._endianness = endianness
if self._endianness not in ("big", "little"):
raise ValueError(f"Endianness must be 'big' or 'little', not {endianness}.")
#
# I/O port
#
self.signal = Signal(self._width)
self.interface = EndpointInterface()
self.status_read_complete = Signal()
def elaborate(self, platform):
m = Module()
# Shortcuts.
tx = self.interface.tx
tokenizer = self.interface.tokenizer
# Grab a copy of the relevant signal that's in our USB domain; synchronizing if we need to.
if self._signal_domain == "usb":
target_signal = self.signal
else:
target_signal = synchronize(m, self.signal, o_domain="usb")
# Store a latched version of our signal, captured before we start a transmission.
latched_signal = Signal.like(self.signal)
# Grab an byte-indexable reference into our signal.
bytes_in_signal = (self._width + 7) // 8
signal_bytes = Array(latched_signal[n * 8 : n * 8 + 8] for n in range(bytes_in_signal))
# Store how many bytes we've transmitted.
bytes_transmitted = Signal(range(0, bytes_in_signal + 1))
#
# Data transmission logic.
#
# If this signal is big endian, send them in reading order; otherwise, index our multiplexer in reverse.
# Note that our signal is captured little endian by default, due the way we use Array() above. If we want
# big endian; then we'll flip it.
if self._endianness == "little":
index_to_transmit = bytes_transmitted
else:
index_to_transmit = bytes_in_signal - bytes_transmitted - 1
# Always transmit the part of the latched signal byte that corresponds to our
m.d.comb += tx.payload.eq(signal_bytes[index_to_transmit])
#
# Core control FSM.
#
endpoint_number_matches = (tokenizer.endpoint == self._endpoint_number)
targeting_endpoint = endpoint_number_matches & tokenizer.is_in
packet_requested = targeting_endpoint & tokenizer.ready_for_response
with m.FSM(domain="usb"):
# IDLE -- we've not yet gotten an token requesting data. Wait for one.
with m.State('IDLE'):
# Once we're ready to send a response...
with m.If(packet_requested):
m.d.usb += [
# ... clear our transmit counter ...
bytes_transmitted .eq(0),
# ... latch in our response...
latched_signal .eq(self.signal),
]
# ... and start transmitting it.
m.next = "TRANSMIT_RESPONSE"
# TRANSMIT_RESPONSE -- we're now ready to send our latched response to the host.
with m.State("TRANSMIT_RESPONSE"):
is_last_byte = bytes_transmitted + 1 == bytes_in_signal
# While we're transmitting, our Tx data is valid.
m.d.comb += [
tx.valid .eq(1),
tx.first .eq(bytes_transmitted == 0),
tx.last .eq(is_last_byte)
]
# Each time we receive a byte, move on to the next one.
with m.If(tx.ready):
m.d.usb += bytes_transmitted.eq(bytes_transmitted + 1)
# If this is the last byte to be transmitted, move to waiting for an ACK.
with m.If(is_last_byte):
m.next = "WAIT_FOR_ACK"
# WAIT_FOR_ACK -- we've now transmitted our full packet; we need to wait for the host to ACK it
with m.State("WAIT_FOR_ACK"):
# If the host does ACK, we're done! Move back to our idle state.
with m.If(self.interface.handshakes_in.ack):
m.d.comb += self.status_read_complete.eq(1)
m.d.usb += self.interface.tx_pid_toggle[0].eq(~self.interface.tx_pid_toggle[0])
m.next = "IDLE"
# If the host starts a new packet without ACK'ing, we'll need to retransmit.
# Wait for a new IN token.
with m.If(self.interface.tokenizer.new_token):
m.next = "RETRANSMIT"
# RETRANSMIT -- the host failed to ACK the data we've most recently sent.
# Wait here for the host to request the data again.
with m.State("RETRANSMIT"):
# Once the host does request the data again...
with m.If(packet_requested):
# ... retransmit it, starting from the beginning.
m.d.usb += bytes_transmitted.eq(0),
m.next = "TRANSMIT_RESPONSE"
return m
|
from django.contrib import admin
from .models import covid_db
# Register your models here.
admin.site.register(covid_db)
|
from aiogram import types
rz2 = types.InlineKeyboardMarkup(
inline_keyboard=[
[
types.InlineKeyboardButton(text="Ring of Aquila", callback_data="Ring of Aquila")],
[types.InlineKeyboardButton(text="Imp Claw", callback_data="Imp Claw")],
[types.InlineKeyboardButton(text="Nether Shawl", callback_data="Nether Shawl")],
[types.InlineKeyboardButton(text="<-- Назад", callback_data="Назад14")
]
])
|
# Importar librerias de airflow
from airflow import DAG
from airflow import models
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators import bigquery_operator
# [END composer_bigquery]
from airflow.contrib.operators import bigquery_to_gcs
from airflow.operators import bash_operator
from datetime import date, time, timedelta,datetime
import calendar
from airflow.contrib.hooks.ssh_hook import SSHHook
import pysftp
from airflow.models import Variable
# Apis GCP
from google.cloud import storage
# Utils
from datetime import datetime, timedelta
import pytz
import json
#############################################################################################
#############################################################################################
'''
def get_data_from_storage(bucket_name, file_name= None):
client_storage = storage.Client()
bucket = client_storage.get_bucket(bucket_name)
blobs = list(bucket.list_blobs(delimiter='/')) #prefix='/'
file_name_csv = ''
# Se busca el nombre del archivo
for obj in blobs:
if 'Rick' in blob.name:
file_name_csv = blob.name
break
blob = bucket.get_blob(file_name_csv)
data = blob.download_as_string()
#data = data.decode('iso-8859-1').encode('utf8')
data = data.decode("utf-8")
return data
def generate_df(**kwargs):
bucket_name = kwargs.get('templates_dict').get('bucket_name')
data = get_data_from_storage(bucket_name)
data_df = data
columns = ['index', 'season', 'episode', 'episode_name', 'actor_name','line']
data_df = [tuple(str(row).split(',')[:len(columns)]) for row in data.split('\n') ]
df = pd.DataFrame(data_df, columns = columns)
print('**************************************+')
print(df.loc[(df['season'] == 1) & (df['episode'] == 1)])
print('**************************************+')
tz = pytz.timezone('America/Santiago')
santiago_now = datetime.now(tz)
fecha_csv = str(santiago_now.strftime('%Y%m%d'))
#Se realiza proceso push
kwargs['ti'].xcom_push(key='dataset_episode_daily', value= 'valores ejemplos')
'''
#############################################################################################
#############################################################################################
def upload_file_sftp(**kwargs):
host = kwargs.get('templates_dict').get('hostname')
username = kwargs.get('templates_dict').get('username')
password = kwargs.get('templates_dict').get('password')
bucket_name = kwargs.get('templates_dict').get('bucket_name')
client_storage = storage.Client()
bucket = client_storage.get_bucket(bucket_name)
blobs = list(bucket.list_blobs(prefix='data')) #prefix='/'
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
for blob in blobs:
file_name = blob.name.split('/')[1]
if '.csv' in file_name:
with pysftp.Connection(host, username, password = password, cnopts=cnopts) as sftp:
blob.download_to_filename(file_name)
sftp.put(file_name,file_name) #upload file to nodejs/
return ''
params = json.loads(Variable.get("PARAMS_TEST_FALABELLA"))
params_bq_schema = json.loads(Variable.get("PARAMS_BQ_SCHEMA_FIELDS"))
params_sftp = json.loads(Variable.get("PARAMS_SFTP"))
query_sql = Variable.get("QUERY_SQL")
schedule = params['CRON']
bq_recent_questions_table_id = '{}:{}.TEMP_TABLE'.format(params['GCP_PROJECT_ID'], params['BQ_DATASET_ID'])
dag_nombre = 'test_sftp'
default_dag_args = {
'owner': 'airflow',
'depends_on_past': False,
'email_on_failure': False,
'email_on_retry': False,
'trigger_rule': 'all_done',
'catchup_by_default': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
'start_date':datetime(2020, 5, 7, 10, 0) #datetime.utcnow()
}
dag = DAG(dag_nombre,
default_args=default_dag_args,
schedule_interval=schedule
)
start = DummyOperator(task_id='start', dag=dag)
upload_files_to_sftp = PythonOperator(task_id="upload_file_to_sftp",
execution_timeout=timedelta(hours=1),
python_callable=upload_file_sftp,
provide_context=True,
trigger_rule='all_success',
templates_dict={
'hostname': params_sftp['HOST'],
'username': params_sftp['USERNAME'],
'password': params_sftp['PWD'],
'port': params_sftp['PORT'],
'bucket_name': params['BUCKET_ID']
},
dag = dag)
end = DummyOperator(task_id='end', dag=dag, trigger_rule='all_done')
start >> upload_files_to_sftp >> end
|
import warnings
from chainer.utils import conv
import numpy as np
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((1, 7))
def convert_AveragePooling2D(
func, opset_version, input_names, output_names, context):
pad = [func.ph, func.pw]
stride = [func.sy, func.sx]
ksize = [func.kh, func.kw]
if func.cover_all:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, stride)])
else:
pad = pad * 2
if opset_version == 1:
raise ValueError(
'AveragePooling2D is not compatible with ONNX\'s AveragePool-1. '
'Use operation set version >= 7.')
elif opset_version == 7:
return onnx_helper.make_node(
'AveragePool', input_names, output_names,
kernel_shape=ksize,
pads=pad,
strides=stride,
count_include_pad=1,
),
@support((1, 7))
def convert_AveragePoolingND(
func, opset_version, input_names, output_names, context):
pad = list(func.pad[:])
if func.cover_all:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, func.stride)])
else:
pad = pad * 2
if opset_version == 1:
raise ValueError(
'AveragePoolingND is not compatible with ONNX\'s AveragePool-1. '
'Use operation set version >= 7.')
elif opset_version == 7:
return onnx_helper.make_node(
'AveragePool', input_names, output_names,
kernel_shape=func.ksize,
pads=pad,
strides=func.stride,
count_include_pad=1,
),
@support((1, 8))
def convert_MaxPooling2D(
func, opset_version, input_names, output_names, context):
pad = [func.ph, func.pw]
stride = [func.sy, func.sx]
ksize = [func.kh, func.kw]
if func.cover_all:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, stride)])
else:
pad = pad * 2
if opset_version == 1:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=ksize,
pads=pad,
strides=stride
),
elif opset_version == 8:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=ksize,
pads=pad,
strides=stride,
storage_order=0, # row major
),
@support((1, 8))
def convert_MaxPoolingND(
func, opset_version, input_names, output_names, context):
pad = list(func.pad[:])
if func.cover_all:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, func.stride)])
else:
pad = pad * 2
if opset_version == 1:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=func.ksize,
pads=pad,
strides=func.stride
),
elif opset_version == 8:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=func.ksize,
pads=pad,
strides=func.stride,
storage_order=0, # row major
),
def convert_ROIPooling2D(
func, opset_version, input_names, output_names, context):
warnings.warn(
'It\'s possible that output does not match with Chainer, please check '
'each runtime\'s implementation. For example, when input x has '
'negative values, some runtimes set max(output, 0) unlike Chainer.',
UserWarning)
return onnx_helper.make_node(
'MaxRoiPool', input_names, output_names,
pooled_shape=[func.outh, func.outw],
spatial_scale=func.spatial_scale,
),
@support((7, 9, 10, 11))
def convert_Unpooling2D(
func, opset_version, input_names, output_names, context):
pad = [func.ph, func.pw]
stride = [func.sy, func.sx]
ksize = [func.kh, func.kw]
outsize = [func.outh, func.outw]
# TODO(hamaji): These could be implemented by `Slice` and `Pad`.
if func.cover_all:
raise RuntimeError('ONNX-chainer does not support `cover_all=True` '
'for Unpooling2D')
h, w = func.inputs[0].shape[2:]
expected_outsize = [
conv.get_deconv_outsize(
h, func.kh, func.sy, func.ph, cover_all=func.cover_all),
conv.get_deconv_outsize(
w, func.kh, func.sy, func.ph, cover_all=func.cover_all)
]
if outsize != expected_outsize:
raise RuntimeError('ONNX-chainer does not support `outsize!=None` '
'for Unpooling2D: expected={} actual={}'.format(
expected_outsize, outsize))
if pad != [0, 0]:
raise RuntimeError('ONNX-chainer does not support `pad!=0` '
'for Unpooling2D')
# This one would require an extra 1x1 MaxPool.
if stride != ksize:
raise RuntimeError('ONNX-chainer does not support `stride!=ksize` '
'for Unpooling2D: stride={} ksize={}'.format(
stride, ksize))
scales = [1.0, 1.0, float(func.kh), float(func.kw)]
if opset_version == 7:
return onnx_helper.make_node('Upsample', input_names, output_names,
scales=scales),
scales_name = context.add_const(
np.array(scales, dtype=np.float32), 'scales')
if opset_version in [9, 10]:
input_names.append(scales_name)
op = 'Upsample' if opset_version == 9 else 'Resize'
return onnx_helper.make_node(op, input_names, output_names),
if opset_version == 11:
roi_name = context.add_const(np.array([]), 'roi')
input_names.extend([roi_name, scales_name])
return onnx_helper.make_node('Resize', input_names, output_names),
|
'''A module that contains functions and classes for manipulating images.'''
from collections import namedtuple
from PIL import Image
Coordinate = namedtuple('Coordinate', ['x', 'y'])
BoundingBox = namedtuple('BoundingBox', ['left', 'top', 'right', 'bottom'])
class ImageZoomer:
'''A class for controlling the bounding box of an image.'''
def __init__(self, image, bounding_box=None):
self._image = image
self.history = [BoundingBox(0, 0, *image.size)]
if bounding_box:
self.history.append(bounding_box)
@classmethod
def from_array(cls, array):
'''
Create an image from an array.
:param array: (numpy.array) The array to convert into an image.
:return: (ImageZoomer)
'''
return cls(Image.fromarray(array))
@property
def size(self):
'''Return the size of the image.'''
return self._image.size
@property
def current_bounding_box(self):
'''Return the current bounding box.'''
return self.history[-1]
@property
def image(self):
'''Return the image with the appropiate bounding box.'''
return self._image.resize(self.size, box=self.current_bounding_box)
def pop(self):
'''Pop off the last bounding box created.'''
if len(self.history) > 1:
self.history.pop()
return self
def adjust(self, width_pct, height_pct, translate_width, translate_height):
'''
Adjust the bounding box.
:param width_pct: (float) Controls if the image is either expanded or
shrunk width-wise.
:param height_pct: (float) Controls if the image is either expanded or
shrunk height-wise.
:param translate_width: (float) Moves the bounding box in the image
width-wise.
:param translate_height: (float) Moves the bounding box in the image
height-wise.
'''
left, top, right, bottom = self.current_bounding_box
width = right - left
height = bottom - top
left = left + int(translate_width * width)
top = top + int(translate_height * height)
right = int(width * width_pct) + left
bottom = int(height * height_pct) + top
self.history.append(BoundingBox(left, top, right, bottom))
return self.image
def translate(self, translate_width, translate_height):
'''
Translate the bounding box.
:param translate_width: (float) Moves the bounding box in the image
width-wise.
:param translate_height: (float) Moves the bounding box in the image
height-wise.
'''
assert 0 <= translate_width <= 1
assert 0 <= translate_height <= 1
width_pct = 1 - translate_width
height_pct = 1 - translate_height
self.adjust(width_pct, height_pct, translate_width, translate_height)
def shrink(self, width_pct, height_pct):
'''
Shrink the bounding box.
:param width_pct: (float) Shrinks the image's width by a percentage of
the current width.
:param height_pct: (float) Shrinks the image's height by a percentage
of the current height.
'''
assert 0 < width_pct <= 1
assert 0 < height_pct <= 1
self.adjust(width_pct, height_pct, 0, 0)
|
def test_nothing():
return 'gpflow doesn''t even install'
|
"""
The Degree Of Vertex for Undirected Graphs
"""
def degree_of_vertex(edges):
"""
Find the Degree of Vertex for all of the edge.
Time Complexity: O(n) --> Not a very fast implementation
"""
dictver = {}
count = 0
freq = {}
i = 0
j = 0
for i, j in edges:
if ( i in freq):
freq[i] += 1
else:
freq[i] = 1
return freq
def follower_counts(results):
follcounts = []
for i in results:
follcounts.append(results[i])
return follcounts
|
#괄호 문제는 스택 구조로 풀이
from collections import deque
def solution(s):
answer = 0
deq = deque(s)
if len(s) <= 1:
return 0
for _ in range(len(s)):
deq.rotate(1)
check = deque()
count = 0
for i in deq:
if i in '[{(':
check.appendleft(i)
else:
if i == ']' and len(check) > 0:
if '[' == check[0]:
check.popleft()
count += 1
elif i == '}' and len(check) > 0:
if '{' == check[0]:
check.popleft()
count += 1
elif i == ')' and len(check) > 0:
if '(' == check[0]:
check.popleft()
count += 1
else:
break
if len(check) == 0 and count == len(s) // 2:
answer += 1
return answer
# 오답 노트
# if in 으로 확인하는 방식에서 오류가 남 stack처럼 last in first out 방식으로 check를 구성해니까 오류가 해결됨
# def solution(s):
# answer = 0
# deq = deque(s)
#
# if len(s) <= 1:
# return 0
#
# for _ in range(len(s)):
# deq.rotate(1)
# check = deque()
# count = 0
#
# for i in deq:
# if i in '[{(':
# check.appendleft(i)
# else:
# if i == ']' and len(check) > 0:
# if '[' in check: #오류구간
# check.remove('[')
# count += 1
# elif i == '}' and len(check) > 0:
# if '{' in check:
# check.remove('{')
# count += 1
# elif i == ')' and len(check) > 0:
# if '(' in check:
# check.remove('(')
# count += 1
# else:
# count += 100000
# break
#
# if len(check) == 0 and count == len(s) // 2:
# answer += 1
#
# return answer
|
#!/usr/bin/env python3
import os
import urllib.request
import urllib.parse
import urllib.error
from slugify import slugify
from sys import platform
from threading import Thread
import subprocess
import time
class Speaker:
def __init__(self, sound_directory='sounds'):
self.sound_directory = sound_directory
self.mary_tts_server = MaryTtsServer()
while 5.2 != self.mary_tts_server.get_version():
pass
def say(self, message):
error_message = None
os.makedirs(self.sound_directory, exist_ok=True)
file_path = self.sound_directory + '/' + slugify(message) + '.wav'
if not (os.path.exists(file_path)):
params = (('INPUT_TEXT', message),
('INPUT_TYPE', 'TEXT'),
('OUTPUT_TYPE', 'AUDIO'),
('AUDIO', 'WAVE_FILE'),
('LOCALE', 'de'),
('effect_Chorus_selected', 'on'),
('effect_Chorus_parameters',
'delay1:466;amp1:0.54;delay2:600;amp2:-0.10;delay3:250;amp3:0.30'))
url = 'http://localhost:59125/process?' + urllib.parse.urlencode(params)
try:
urllib.request.urlretrieve(url, file_path)
except urllib.error.URLError:
error_message = 'Can not read: ' + url
if error_message is None:
player = 'mplayer'
if 'darwin' == platform:
player = 'afplay'
os.system(player + ' ' + file_path + ' > /dev/null 2>&1')
else:
return error_message
class MaryTtsServer (Thread):
def __init__(self):
Thread.__init__(self)
self.__version = None
self.__pid = None
self.start()
def run(self):
while True:
if self.get_version() != 5.2:
command = "./vendor/marytts-5.2/bin/marytts-server"
self.__pid = subprocess.Popen(command, preexec_fn=os.setpgrp)
time.sleep(10)
time.sleep(1)
def get_version(self):
try:
urllib.request.urlopen('http://localhost:59125/version').read().decode('utf-8')
self.__version = 5.2
except urllib.error.URLError:
self.__version = None
return self.__version
|
from evaluate import pivotal, pivotality, criticality, prob_pivotal, unpacked_pivotality
from itertools import product
from simulate import *
from draw import draw, highlight_cause_effect, draw_outcomes, show_predictions
from names import *
import numpy as np
import networkx as nx
COLOR = [(0, 100), (220, 100), (120, 60), (280, 100), (24, 100)]
h = {}
h['structure'] = [['0n', 'o'], ['1n', '0g'], ['2n', '0g'], ['0g', 'o']]
s = {}
s['thresholds'] = [['0g', 2], ['o', 1]]
s['values'] = [['0n', 0], ['1n', 0], ['2n', 0]]
hierarchy = Situation(hierarchy=h, situation=s, comprehension=True, names=['Claude', 'Barry', 'Jacob'])
# hierarchy.node['0g']['team'] = 'A'
# fig = draw(hierarchy, size=True, fig=(7.5, 5), arrow=['0g'], file='experiment/static/images/instructions/image4.png')
# situation = draw_outcomes(hierarchy, fig, arrow=['0g'], file='experiment/static/images/instructions/image4.png')
# hierarchy.evaluate('0g')
# situation = draw_outcomes(hierarchy, fig, arrow=['0g'], file='experiment/static/images/instructions/image5.png')
# fig = draw(hierarchy, size=True, fig=fig, file='experiment/static/images/instructions/image6.png')
# hierarchy.evaluate('o')
# situation = draw_outcomes(hierarchy, fig, file='experiment/static/images/instructions/image7.png')
hierarchy.remove_node('0g')
hierarchy.add_edges_from([('1n', 'o'), ('2n', 'o')])
fig = draw(hierarchy, fig=(7.5, 5), arrow=['0n', '1n', '2n'], file='experiment/static/images/instructions/imag.png')
fig = draw(hierarchy, fig=fig, arrow=['0n', '1n', '2n'], file='experiment/static/images/instructions/imag.png')
hierarchy.evaluate('o')
situation = draw_outcomes(hierarchy, fig, arrow=['0n', '1n', '2n'], file='experiment/static/images/instructions/imag.png')
# h = {}
# h['structure'] = [['0n', 'o'], ['1n', '0g'], ['2n', '0g'], ['0g', 'o']]
# s = {}
# s['thresholds'] = [['0g', 2], ['o', 1]]
# s['values'] = [['0n', 1], ['1n', 1], ['2n', 0]]
# hierarchy = Situation(hierarchy=h, situation=s, names=['Claude', 'Barry', 'Jacob'])
# fig = draw(hierarchy, size=True, fig=(7.5, 5), arrow=['0g', '0n'], file='experiment/static/images/instructions/image10.png')
# situation = draw_outcomes(hierarchy, fig, arrow=['0g', '0n'], file='experiment/static/images/instructions/image10.png')
plt.close()
|
# -*- coding=utf-8 -*-
# @Time:2020/10/11 12:18 下午
# Author :王文娜
# @File:另一种文件名.py
# @Software:PyCharm
import csv
with open('test.csv','w',newline='') as f:
# 初始化写入对象
writer = csv.writer(f)
writer.writerow(['超哥哥',20])
writer.writerow(['步惊云', 22])
with open('test.csv','a',newline='') as f:
writer=csv.writer(f)
writer.writerows([('小',67),('大',90)])
|
'''
50x50 board with 50,000 agents -- agents can only defect or cooperate, and must always play. In addition, these agents have extremely simple genes.
'''
N_COLS = 50
N_ROWS = 50
NUM_AGENTS = 50000
def onlyCD(agent, action):
if not action:
return agent.cooperate()
else:
return agent.defect()
defaultPerformAction = onlyCD
N_POS_ACTIONS = 2
GENE_LENGTH = 1
HISTORY_LENGTH = GENE_LENGTH
ALWAYS_PLAY = True
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
info = {
'stu1101':"TengLan Wu",
'stu1102':"LongZe Luola",
'stu1103':"XiaoZe Maliya",
}
info['stu1101'] = "武藤兰"
info['stu1102'] = "泷泽萝拉"
info['stu1103'] = "小泽玛利亚"
info['stu1104'] = "苍井空"
for key,value in info.items():
print(key,':',value)
print(info.get('stu1105'))
print('stu1103' in info)#info.has_key(1103) in py2.x
'''
del info['stu1101']
info.pop("stu1102")
'''
for v in info.values():
print(v)
|
from django.apps import AppConfig
class ObituariesConfig(AppConfig):
name = 'obituaries'
|
r = lambda a : a + 15
print(r(5))
r = lambda x, y : x * y
print(r(2, 5))
|
"""
Heber Cooke 10/29/2019
Chapter 6 Exercise 8
The function works as expected. It prints the sentence.
It works by printing the first letter of the sentance then setting the index to the next letter. It then recursivly calls the function
with the new starting index until there are no more indexes.
The hidden cost running the function is with large inputs could start to take a very long time and use a lot of memory.
"""
count = 1
def printAll(seq, count):
if seq:
print("call #%2d" % (count), end=" ")
print(seq[0], end=" ")
print("<-->%-25s" % (seq))
count = count + 1
printAll(seq[1:], count)
s = "The most awesome sentence"
#s = ["The","most","awsome","sentence"]
printAll(s, count)
|
# viewsfile
from django.shortcuts import render
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
import pyrebase
config={
"apiKey": "AIzaSyA1_TbZc_DAJVAosBsBXHKVnANss0_220U",
"authDomain": "freshers-portal.firebaseapp.com",
"databaseURL": "https://freshers-portal.firebaseio.com",
"projectId": "freshers-portal",
"storageBucket": "freshers-portal.appspot.com",
"messagingSenderId": "620782197376",
"appId": "1:620782197376:web:f7835cc81df3aced7d2465",
"measurementId": "G-KH43ST917G",
}
firebase=pyrebase.initialize_app(config)
authe = firebase.auth()
database=firebase.database()
session1=False
def Blogs(request):
try:
idToken = request.session['uid']
session1=True
except:
session1=False
import datetime
timestamp=database.child('Blogs').shallow().get().val()
lis_time = []
for i in timestamp:
lis_time.append(i)
Descriptions = []
Titles = []
Types = []
Departments = []
images = []
Writtenbys = []
for i in lis_time:
Department = database.child('Blogs').child(i).child('Department').get().val()
Description =database.child('Blogs').child(i).child('Description').get().val()
Title =database.child('Blogs').child(i).child('Title').get().val()
Type =database.child('Blogs').child(i).child('Type').get().val()
Writtenby =database.child('Blogs').child(i).child('Writtenby').get().val()
Departments.append(Department)
Descriptions.append(Description)
Titles.append(Title)
Types.append(Type)
name = database.child('users').child(Writtenby).child('name').get().val()
image = database.child('users').child(Writtenby).child('imgUrl').get().val()
if image == "":
image = "https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad"
Writtenbys.append(name)
images.append(image)
date = []
for i in timestamp:
i = float(i)
dat = datetime.datetime.fromtimestamp(i).strftime('%H:%M %d-%m-%y')
date.append(dat)
comb_lis = zip(lis_time, date, Descriptions,Departments,Titles,Types,Writtenbys,images)
return render(request,"Blogs.html",{"comb_lis":comb_lis,"session1":session1})
def search(request):
return render(request, "search.html")
def searchusers(request):
value = request.POST.get('search')
if value =="":
return render(request, "search.html")
title = request.POST['category']
if title =="":
return render(request, "search.html")
if value is None or title is None:
print(value ,"Value",title)
return render(request, "search.html")
else:
print(value)
if title == "Notes":
data = database.child('Notes').shallow().get().val()
id = []
for i in data:
id.append(i)
for i in id:
val = database.child('Notes').child(i).child('filename').get().val()
if (val == value):
requid = i
fileurl = database.child('Notes').child(requid).child('fileurl').get().val()
return render(request, "searchNotes.html", {"fileurl": fileurl})
else:
return render(request, "search.html")
if title == "Question-papers":
data = database.child('Question-papers').shallow().get().val()
id = []
for i in data:
id.append(i)
for i in id:
val = database.child('Question-papers').child(i).child('filename').get().val()
if (val == value):
requid = i
fileurl = database.child('Question-papers').child(requid).child('fileurl').get().val()
return render(request, "searchNotes.html", {"fileurl": fileurl})
else:
return render(request, "search.html")
if title == "Users":
data = database.child('users').shallow().get().val()
uidlist = []
requid = 'null'
for i in data:
uidlist.append(i)
for i in uidlist:
val = database.child('users').child(i).child('name').get().val()
val=val.lower()
value=value.lower()
print(val,value)
if (val == value):
requid = i
if requid=='null':
return render(request, "search.html")
print(requid)
name = database.child('users').child(requid).child('name').get().val()
course = database.child('users').child(requid).child('course').get().val()
branch = database.child('users').child(requid).child('branch').get().val()
img = database.child('users').child(requid).child('imgUrl').get().val()
Name = []
Name.append(name)
Course = []
Course.append(course)
Branch = []
Branch.append(branch)
Image = []
Image.append(img)
comb_lis = zip(Name, Course, Branch, Image)
return render(request, "SearchUsers.html", {"comb_lis": comb_lis})
def searchnotes(request):
return render(request,"searchNotes.html")
def signIn(request):
return render(request,"Login.html")
def postsignIn(request):
if request.method=='POST':
email = request.POST.get('email')
pasw = request.POST.get('pass')
try:
user = authe.sign_in_with_email_and_password(email, pasw)
except:
message = "Invalid Credentials!! Please Check your credentials and try sign-in again"
return render(request, "Login.html", {"message": message})
session_id = user['idToken']
request.session['uid'] = str(session_id)
idToken = request.session['uid']
if idToken:
a = authe.get_account_info(idToken)
a = a['users']
a = a[0]
uid = a['localId']
import datetime
timestamp = database.child('Blogs').shallow().get().val()
if timestamp:
lis_time = []
for i in timestamp:
lis_time.append(i)
Descriptions = []
Titles = []
Types = []
Departments = []
Writtenbys = []
for i in lis_time:
Department = database.child('Blogs').child(i).child('Department').get().val()
Description = database.child('Blogs').child(i).child('Description').get().val()
Title = database.child('Blogs').child(i).child('Title').get().val()
Type = database.child('Blogs').child(i).child('Type').get().val()
Writtenby = database.child('Blogs').child(i).child('Writtenby').get().val()
if uid == Writtenby:
Departments.append(Department)
Descriptions.append(Description)
Titles.append(Title)
Types.append(Type)
name = database.child('users').child(Writtenby).child('name').get().val()
Writtenbys.append(name)
date = []
for i in timestamp:
i = float(i)
dat = datetime.datetime.fromtimestamp(i).strftime('%H:%M %d-%m-%y')
date.append(dat)
name = database.child('users').child(uid).child('name').get().val()
branch = database.child('users').child(uid).child('branch').get().val()
image = database.child('users').child(uid).child('imgUrl').get().val()
if image == "":
image = "https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad"
comb_lis = zip(lis_time, date, Descriptions, Departments, Titles, Types, Writtenbys)
return render(request, "ProfilePage.html", {"comb_lis": comb_lis, "name": name, "branch": branch,"image":image})
else:
Descriptions = []
Titles = []
Types = []
Departments = []
date=[]
Writtenbys = []
lis_time=[]
name = database.child('users').child(uid).child('name').get().val()
branch = database.child('users').child(uid).child('branch').get().val()
image = database.child('users').child(uid).child('imgUrl').get().val()
if image == "":
image = "https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad"
print(image)
comb_lis = zip(lis_time, date, Descriptions, Departments, Titles, Types, Writtenbys)
return render(request, "ProfilePage.html",
{"comb_lis": comb_lis, "name": name, "branch": branch, "image": image})
message = "Please Login First"
return render(request, "Login.html", {"message": message})
def reset(request):
return render(request, "Reset.html")
def postReset(request):
email = request.POST.get('email')
try:
authe.send_password_reset_email(email)
message = "A link to reset your password is succesfully sent to your email"
return render(request, "Reset.html", {"msg":message})
except:
message = "Something Went Wrong, Please check the email you provided is already registered or not!!"
return render(request, "Reset.html", {"msg":message})
def logout(request):
try:
del request.session['uid']
except:
pass
return render(request,"Login.html")
def signUp(request):
return render(request,"Registration.html")
def postsignup(request):
if request.method == 'POST':
name=request.POST.get('name')
branch=request.POST.get('sel')
enroll=request.POST.get('enrolls')
roll=request.POST.get('roll')
email=request.POST.get('email')
passw=request.POST.get('pass')
try:
user=authe.create_user_with_email_and_password(email,passw)
except:
messg="Something Went Wrong, Unable to create your account. Try Again!"
return render(request,"Registration.html",{"messg":messg})
uid = user['localId']
data={"name":name,"USER_TYPE":"user","device_token":"","email":email,"id":roll,"imgUrl":"https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad","branch":branch,"uid":uid,"enrollment":enroll}
database.child("users").child(uid).set(data)
return render(request,"Login.html")
message = "Please Login Here First "
return render(request, "Login.html", {"message": message})
def profile(request):
try:
idToken = request.session['uid']
except:
message = "Please Login In Here First "
return render(request, "Login.html", {"message": message})
if idToken:
a = authe.get_account_info(idToken)
a = a['users']
a = a[0]
uid = a['localId']
import datetime
timestamp = database.child('Blogs').shallow().get().val()
lis_time = []
for i in timestamp:
lis_time.append(i)
Descriptions = []
Titles = []
Types = []
Departments = []
Writtenbys = []
for i in lis_time:
Department = database.child('Blogs').child(i).child('Department').get().val()
Description = database.child('Blogs').child(i).child('Description').get().val()
Title = database.child('Blogs').child(i).child('Title').get().val()
Type = database.child('Blogs').child(i).child('Type').get().val()
Writtenby = database.child('Blogs').child(i).child('Writtenby').get().val()
if uid == Writtenby:
Departments.append(Department)
Descriptions.append(Description)
Titles.append(Title)
Types.append(Type)
name = database.child('users').child(Writtenby).child('name').get().val()
branch = database.child('users').child(Writtenby).child('branch').get().val()
Writtenbys.append(name)
date = []
for i in timestamp:
i = float(i)
dat = datetime.datetime.fromtimestamp(i).strftime('%H:%M %d-%m-%y')
date.append(dat)
name = database.child('users').child(uid).child('name').get().val()
branch = database.child('users').child(uid).child('branch').get().val()
image = database.child('users').child(uid).child('imgUrl').get().val()
if image=="":
image="https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad"
comb_lis = zip(lis_time, date, Descriptions, Departments, Titles, Types, Writtenbys)
return render(request,"ProfilePage.html",{"comb_lis":comb_lis,"name":name,"branch":branch,"image":image})
def addPost(request):
return render(request,"AddPost.html")
def about(request):
try:
idToken = request.session['uid']
session1=True
except:
session1=False
return render(request, "aboutcollege.html",{"session1":session1})
def home(request):
try:
idToken = request.session['uid']
session1=True
except:
session1=False
return render(request, "home2.html",{"session1":session1})
def afteraAddPost(request):
if request.method=='POST':
from datetime import datetime, timezone
import time
import pytz
idToken = request.session['uid']
if idToken:
tz = pytz.timezone('Asia/Kolkata')
Currenttime = datetime.now(timezone.utc).astimezone(tz).strftime("%H%M%S")
millis = int(Currenttime)
tyype = request.POST.get('type')
title = request.POST.get('title')
description = request.POST.get('desc')
branch=request.POST.get('sel')
a = authe.get_account_info(idToken)
a = a['users']
a = a[0]
a = a['localId']
data ={
"Type":tyype,
"Title":title,
"Description":description,
"Writtenby":a,
"Time":Currenttime,
"Department":branch,
}
database.child('Blogs').child(millis).set(data)
import datetime
a = authe.get_account_info(idToken)
a = a['users']
a = a[0]
a = a['localId']
timestamp = database.child('Blogs').shallow().get().val()
lis_time = []
for i in timestamp:
lis_time.append(i)
Descriptions = []
Titles = []
Types = []
Departments = []
Writtenbys = []
for i in lis_time:
Department = database.child('Blogs').child(i).child('Department').get().val()
Description = database.child('Blogs').child(i).child('Description').get().val()
Title = database.child('Blogs').child(i).child('Title').get().val()
Type = database.child('Blogs').child(i).child('Type').get().val()
Writtenby = database.child('Blogs').child(i).child('Writtenby').get().val()
if a == Writtenby:
Departments.append(Department)
Descriptions.append(Description)
Titles.append(Title)
Types.append(Type)
name = database.child('users').child(Writtenby).child('name').get().val()
image = database.child('users').child(a).child('imgUrl').get().val()
if image == "":
image = "https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad"
branch = database.child('users').child(Writtenby).child('branch').get().val()
Writtenbys.append(name)
date = []
for i in timestamp:
i = float(i)
dat = datetime.datetime.fromtimestamp(i).strftime('%H:%M %d-%m-%y')
date.append(dat)
comb_lis = zip(lis_time, date, Descriptions, Departments, Titles, Types, Writtenbys)
return render(request, "ProfilePage.html", {"comb_lis": comb_lis,"name":name,"branch": branch,"image":image})
message = "Please Login First"
return render(request, "Login.html", {"message": message})
def gotoedit(request):
idToken = request.session['uid']
if idToken:
a = authe.get_account_info(idToken)
a = a['users']
a = a[0]
uid = a['localId']
image = database.child('users').child(uid).child('imgUrl').get().val()
name = database.child('users').child(uid).child('name').get().val()
branch = database.child('users').child(uid).child('branch').get().val()
email = database.child('users').child(uid).child('email').get().val()
enrollment = database.child('users').child(uid).child('enrollment').get().val()
if image == "":
image = "https://firebasestorage.googleapis.com/v0/b/freshers-portal.appspot.com/o/profilepic.jpg?alt=media&token=864cf64c-a0ad-442b-8ca2-ae425baf43ad"
return render(request,'editprofile.html',{"image":image,"name":name,"email":email,"branch":branch,"enrollment":enrollment})
def postedit(request):
if request.method=='POST':
import time
from datetime import datetime,timezone
import pytz
tz = pytz.timezone('Asia/Kolkata')
time_now = datetime.now(timezone.utc).astimezone(tz)
millis = int(time.mktime(time_now.timetuple()))
imgurl=request.POST.get('url') #for image update
print("IMAGEurl",imgurl)
idtoken=request.session['uid']
a = authe.get_account_info(idtoken)
a=a['users']
a=a[0]
a=a['localId']
data={ #image update remaining--sumit
"imgUrl":imgurl,
}
database.child('users').child(a).update(data)
return render(request,'home2.html')
message = "Please Login First"
return render(request, "Login.html", {"message": message})
|
import argparse
import json
import logging
import logging.config
import os
import pkg_resources
import sys
from poller import Poller
import settings
def setup_logging(cli_args):
""" Setup logging configuration
:param cli_args: Argparse object containing parameters from the command line
:return: Logger
"""
logconfig_path = cli_args.log_config
if not os.path.isabs(logconfig_path):
resource_package = __name__
logconfig_io = pkg_resources.resource_stream(resource_package, logconfig_path)
logconfig_string = logconfig_io.getvalue().decode(encoding="utf-8")
config = json.loads(logconfig_string)
else:
with open(logconfig_path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
class EnvDefault(argparse.Action):
"""
A custom argparse class to handle the consumption of environment variables in
addition to commandline parameters.
"""
def __init__(self, envvar, required=True, default=None, **kwargs):
if envvar:
if envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def parse_cli_args():
"""
A method for organizing all commandline argument and environment variable parsing.
:return: An argparse object containing all CLI/ENV argument values.
"""
p = argparse.ArgumentParser(description="Marathon Autoscaler")
p.add_argument("-i", "--interval", dest="sleep_interval", action=EnvDefault, envvar="INTERVAL", type=int,
default=5, help="The time duration in seconds between polling events")
p.add_argument("--mesos-uri", dest="mesos_uri", action=EnvDefault, envvar="MESOS_URI", type=str, required=True,
help="The Mesos Endpoint")
p.add_argument("--agent-port", dest="agent_port", action=EnvDefault, envvar="AGENT_PORT", type=int,
required=True, default=5051, help="Mesos Agent Port")
p.add_argument("--marathon-uri", dest="marathon_uri", action=EnvDefault, envvar="MARATHON_URI", type=str,
required=True, help="The Marathon Endpoint")
p.add_argument("--marathon-user", dest="marathon_user", action=EnvDefault, envvar="MARATHON_USER", type=str,
required=False, help="The Marathon Username", default=None)
p.add_argument("--marathon-pass", dest="marathon_pass", action=EnvDefault, envvar="MARATHON_PASS", type=str,
required=False, help="The Marathon Password", default=None)
p.add_argument("--cpu-fan-out", dest="cpu_fan_out", action=EnvDefault, envvar="CPU_FAN_OUT", type=int,
default=None, required=False, help="Number of subprocesses to use for gathering and sending stats to Datadog")
p.add_argument("--dd-api-key", dest="datadog_api_key", action=EnvDefault, envvar="DATADOG_API_KEY", type=str,
required=False, help="Datadog API key")
p.add_argument("--dd-app-key", dest="datadog_app_key", action=EnvDefault, envvar="DATADOG_APP_KEY", type=str,
required=False, help="Datadog APP key")
p.add_argument("--dd-env", dest="datadog_env", action=EnvDefault, envvar="DATADOG_ENV", type=str,
required=False, help="Datadog ENV variable")
p.add_argument("--log-config", dest="log_config", action=EnvDefault, envvar="LOG_CONFIG", type=str,
default="/app/logging_config.json",
help="Path to logging configuration file")
p.add_argument("--enforce-version-match", dest="enforce_version_match", action=EnvDefault,
envvar="ENFORCE_VERSION_MATCH", type=bool, default=False,
required=False, help="If set, version matching will be required of applications to participate")
p.add_argument("--rules-prefix", dest="rules_prefix", action=EnvDefault,
envvar="RULES_PREFIX", type=str, default="mas_rule",
required=False, help="The prefix for rule names")
return p.parse_args()
def add_args_to_settings(cli_args):
for name, value in vars(cli_args).iteritems():
setattr(settings, name, value)
if __name__ == "__main__":
args = parse_cli_args()
add_args_to_settings(args)
setup_logging(args)
logging.info(args)
poller = Poller(args)
poller.start()
sys.exit(0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def latinhypercube(n, samples, limits=None, spread=True, seed=None):
import numpy as np
np.random.seed(seed)
def _spread_lh(values, ndims, max_niter=10000):
nrejected = 0
total_dist = 0
for x in range(ndims):
for y in range(0, x):
dist, ind = nearest_neighbour(values[:, [x, y]])
total_dist += np.sum(dist)
for i in range(max_niter):
# We select two random points without replacement
i1, i2 = np.random.choice(len(ind), 2, replace=False)
# Let's swap values and accept it if it creates a more homogeneous distribution
for dim in range(ndims):
values[i1, dim], values[i2, dim] = values[i2, dim], values[i1, dim]
new_total_dist = 0
for x in range(ndims):
for y in range(0, x):
dist, ind = nearest_neighbour(values[:, [x, y]])
new_total_dist += np.sum(dist)
if (new_total_dist < total_dist):
values[i1, dim], values[i2, dim] = values[i2, dim], values[i1, dim]
nrejected += 1
if nrejected == 2000:
return values
else:
nrejected = 0
total_dist = new_total_dist
return values
# Generate the intervals
cut = np.linspace(0, 1, samples + 1)
# Fill points uniformly in each interval
u = np.random.rand(samples, n)
a = cut[:samples]
b = cut[1:samples + 1]
rdpoints = np.zeros_like(u)
for j in range(n):
rdpoints[:, j] = u[:, j]*(b-a) + a
# Make the random pairings
H = np.zeros_like(rdpoints)
for j in range(n):
order = np.random.permutation(range(samples))
H[:, j] = rdpoints[order, j]
if spread is True:
H = _spread_lh(H, n)
if limits is not None:
H = (H *(limits[:,1]-limits[:,0])+limits[:,0])
return H
def set_rcParam(useTex=True):
"""Alternative styles for the plots
:param useTex: Use Latex, defaults to True
:type useTex: bool, optional
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
params = {"text.usetex": useTex,
"axes.labelpad": 10,
"axes.labelsize": 7,
"axes.linewidth": 2,
"axes.labelpad": 10,
"xtick.labelsize": 33,
"xtick.bottom": True,
"xtick.top": True,
"xtick.direction": 'in',
"xtick.minor.visible": True,
"xtick.minor.size": 6,
"xtick.minor.width": 1,
"xtick.minor.pad": 4,
"xtick.major.size": 12,
"xtick.major.width": 2,
"xtick.major.pad": 3,
"ytick.labelsize": 33,
"ytick.left": True,
"ytick.right": True,
"ytick.direction": 'in',
"ytick.minor.visible": True,
"ytick.minor.size": 6,
"ytick.minor.width": 1,
"ytick.minor.pad": 4,
"ytick.major.size": 12,
"ytick.major.width": 2,
"ytick.major.pad": 3,
"figure.figsize": "10, 10",
"figure.dpi": 80,
"figure.subplot.left": 0.05,
"figure.subplot.bottom": 0.05,
"figure.subplot.right": 0.95,
"figure.subplot.top": 0.95,
"legend.numpoints": 1,
"legend.frameon": False,
"legend.handletextpad": 0.3,
"savefig.dpi": 80,
"font.family": 'serif',
"path.simplify": True
}
# plt.rc("font", family = "serif")
plt.rcParams.update(params)
|
x,y,s=input().split(" ")
if(x>y and x>s):
print(x)
elif(y>s and y>x):
print(y)
else:
print(s)
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Post, Teacher
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .forms import PostForm
# def home(request):
# context = {
# 'posts': Post.objects.all()
# }
# return render(request, 'myapp/home.html', context)
def home(request):
context = {
'teachers': Teacher.objects.all()
}
return render(request, 'myapp/home.html', context)
class PostListView(ListView):
model = Post
template_name = 'myapp/home.html' # <app>/<model>_<viewtype>.html
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = 9
class TeacherListView(ListView):
model = Teacher
template_name = 'myapp/home.html' # <app>/<model>_<viewtype>.html
context_object_name = 'teachers'
ordering = ['-date_posted']
paginate_by = 9
class UserPostListView(ListView):
model = Post
template_name = 'myapp/user_posts.html' # <app>/<model>_<viewtype>.html
context_object_name = 'posts'
paginate_by = 9
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class UserTeacherListView(ListView):
model = Teacher
template_name = 'myapp/teacher_posts.html' # <app>/<model>_<viewtype>.html
context_object_name = 'teachers'
paginate_by = 9
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class TeacherDetailView(DetailView):
model = Teacher
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class TeacherUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Teacher
fields = ['teacher_name', 'course_content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
teacher = self.get_object()
if self.request.user == teacher.author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class TeacherDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Teacher
success_url = '/'
def test_func(self):
teacher = self.get_object()
if self.request.user == teacher.author:
return True
return False
def about(request):
return render(request, 'myapp/about.html', {'title': 'about'})
def Post(request):
if request.method == 'POST':
post_form = PostForm(request.POST, request.FILES)
if post_form.is_valid():
post_form.save()
messages.success(request, f'图片上传成功!')
context = {
'post_form': post_form
}
return render(request, 'myapp/home.html', context)
|
from bs4 import BeautifulSoup
import requests
import vk_api
import time
import sys
f1 = open(r'D:\\TEST FILES\\THESIS\\Корпус до обработки в RStudio\\new_ids.txt', 'r', encoding='utf8')
ids=[]
for line in f1:
k = line.replace('\n','')
ids.append(k)
f1.close()
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
def captcha_handler(captcha):
""" При возникновении капчи вызывается эта функция и ей передается объект
капчи. Через метод get_url можно получить ссылку на изображение.
Через метод try_again можно попытаться отправить запрос с кодом капчи
"""
key = input("Enter captcha code {0}: ".format(captcha.get_url())).strip()
# Пробуем снова отправить запрос с капчей
return captcha.try_again(key)
def auth_handler():
""" При двухфакторной аутентификации вызывается эта функция.
"""
# Код двухфакторной аутентификации
key = input("Enter authentication code: ")
# Если: True - сохранить, False - не сохранять.
remember_device = True
return key, remember_device
def main():
""" Пример обработки двухфакторной аутентификации """
login, password = LOGIN, PASSWORD
vk_session = vk_api.VkApi(
login, password,
# функция для обработки двухфакторной аутентификации
auth_handler=auth_handler,
captcha_handler=captcha_handler
)
try:
vk_session.auth()
except vk_api.AuthError as error_msg:
print(error_msg)
return
vk = vk_session.get_api()
for user in ids:
f = open(r'D:\TEST FILES\THESIS\Корпус до обработки\new\VK_PARSING_'+str(user)+'.txt', 'w', encoding = 'utf-8')
response = vk.wall.get(owner_id = str(user), count=200) # Используем метод wall.get
print('Пользователь '+user+'\n\n')
print('Пользователь '+user+'\n\n', file=f)
for k in response['items']: # для списка постов с разными значениями
if len(k['text'].translate(non_bmp_map)):
if 'copy_history' in k:
text = k['copy_history'][0]
check_len = len(text['text'].translate(non_bmp_map) + k['text'].translate(non_bmp_map)) # Длина поста и репоста вместе
if '2014' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and '2017' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and '2016' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and '2015' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and check_len > 200:
print(k['text'].translate(non_bmp_map))
print(text['text'].translate(non_bmp_map)) # текст репоста
print(k['text'].translate(non_bmp_map), file=f)
print(text['text'].translate(non_bmp_map), file=f) # текст репоста
tme = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date'])) # перевод от количества секунд с начала времён (например, 1999929929299292) в нормальное время
print('Дата создания поста - ', tme)
print('----------------------------------------------------------\n')
print('Дата создания поста - ', tme, file=f)
print('----------------------------------------------------------\n', file=f)
else:
if len(k['text'].translate(non_bmp_map)) > 200 and '2014' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and '2017' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and '2016' not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))) and '2015'not in str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date']))):
print(k['text'].translate(non_bmp_map))
print(k['text'].translate(non_bmp_map), file=f)
tme = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(k['date'])) # перевод от количества секунд с начала времён (например, 1999929929299292) в нормальное время
print('Дата создания поста - ', tme)
print('----------------------------------------------------------\n')
print('Дата создания поста - ', tme, file=f)
print('----------------------------------------------------------\n', file=f)
f.close()
if __name__ == '__main__':
main()
|
import numpy as np
from numpy import array
def foo():
a = array([[1,1,1,1,1],[0,1,1,1,1],[1,1,0,0,-1],[1,0,0,0,-1],[0,0,0,0,-1],[0,0,0,1,-1],[0,0,1,0,-1],[0,0,1,1,-1],[1,0,1,1,1],[1,1,0,1,1],[0,1,0,0,-1],[0,1,0,1,-1],[0,1,1,0,-1],[1,0,0,1,-1],[1,0,1,0,-1],[1,1,1,0,1]])
print(a.shape)
ground = a[:,4].astype(int)
print(ground)
# Adding 1 to the data
data = a[:,0:4]
bias_1 = np.ones((a.shape[0],1))
data = np.append(data,bias_1,axis=1)
print(data)
w_range = [0,0.5,1]
b_range = [-3.5,-3.25,-3,-2.75,-2.5,-2.25,-2,-1.75,-1.5,-1.25,-1,0.75,-0.5,0.25,0,0.5,1]
cnt = 0;
for i in range(len(w_range)):
for j in range(len(w_range)):
for k in range(len(w_range)):
for l in range(len(w_range)):
for m in range(len(b_range)):
w = np.transpose(array([[w_range[i],w_range[j],w_range[k],w_range[l],b_range[m]]]))
#print(np.transpose(w))
pred = (np.matmul(data,w))
pred[pred>=0] = 1
pred[pred<0] = -1
pred = np.transpose(pred)[0].astype(int);
cnt = cnt + 1
if(cnt % 100 == 0):
print(cnt)
#return
if ((pred.astype(int)==ground.astype(int)).all()):
print("Found a weight combination")
print(pred)
print(ground)
print(np.transpose(w))
return
foo()
|
from django.shortcuts import render
from .forms import PubCourseForm
from apps.course.models import Course,CourseCategory,Teacher
from django.views.generic import View
from utils import restful
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
@method_decorator(permission_required(perm='course.change_course',login_url='/'),name='dispatch')
class PubCourse(View):
def get(self,request):
context={
'categories':CourseCategory.objects.all(),
'teachers':Teacher.objects.all()
}
return render(request,'cms/pub_course.html',context=context)
def post(self,request):
form=PubCourseForm(request.POST)
if form.is_valid():
title=form.cleaned_data.get('title')
category_id=form.cleaned_data.get('category_id')
video_url=form.cleaned_data.get('video_url')
cover_url=form.cleaned_data.get('cover_url')
price=form.cleaned_data.get('price')
duration=form.cleaned_data.get('duration')
profile=form.cleaned_data.get('profile')
teacher_id=form.cleaned_data.get('teacher_id')
category=CourseCategory.objects.get(pk=category_id)
teacher=Teacher.objects.get(pk=teacher_id)
Course.objects.create(title=title,video_url=video_url,cover_url=cover_url,price=price,duration=duration,profile=profile,category=category,teacher=teacher)
return restful.success()
else:
return restful.params_error(message=form.get_errors())
|
'''
worker class
'''
# inheritance class
class Worker(Person):
'''
creates child Worker class that inherits from parent Person class
assigns additional attributes characteristic of Worker class
income is an integer value
'''
# instantiate Worker class
def __init__(self, name='Doe', age=0, height=0, weight=0, company='xyz', job_title='xyz', personal_title='xyz', income=0, training=False):
super().__init__(name, age, height, weight)
self.company = company
self.job_title = job_title
self.personal_title = personal_title
self.income = int(income)
self.training = training
# Note: parent methods 1-3 are available to the child class and do NOT have
# to be re-created to be used as-is
# create method 4
# replaces greets method for Worker class only
'''
returns user greeting with user title and user name
'''
def greets(self):
print('Hello! Please enter the following information: ')
n = input('What is your name? ')
pt = input('What is your personal title? Dr., Mr., Mrs., Ms., or Miss ')
user_name_2 = Worker( name=str(n), personal_title=str(pt))
return f'Hello, {user_name_2.personal_title} {user_name_2.name}! My name is {self.personal_title} {self.name}. I work for {self.company}. '
#import numpy as np
import random
# lists for function
name = ['Rebecca', 'Jeannine', 'Taylor']
company =['Lambda', 'Google', 'GitHub', 'Exxon' ]
personal_title = ['Dr.', 'Mr.', 'Mrs.', 'Ms.', 'Miss']
job_title = ['Engineer', 'Analyst', 'Consultant']
training = ['True', 'False']
# instantiate 10 Workers
def bunch_of_workers():
a_name = random.choice(name)
a_age = random.randint(16, 120)
a_height = random.randint(5, 501)
a_weight = random.randint(50, 500)
a_company = random.choice(company)
a_job_title = random.choice(job_title)
a_personal_title = random.choice(personal_title)
a_income = random.randint(0, 1000000000)
a_training = random.choice(training)
a_worker = Worker(a_name, a_age, a_height, a_weight, a_company, a_job_title, a_personal_title, a_income, a_training)
# worker.append(a_worker)
return a_worker
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.backend.codegen.thrift.scrooge import additional_fields
from pants.backend.codegen.thrift.scrooge.additional_fields import ScroogeFinagleBoolField
from pants.backend.codegen.thrift.scrooge.subsystem import ScroogeSubsystem
from pants.backend.codegen.thrift.target_types import (
ThriftSourceField,
ThriftSourcesGeneratorTarget,
ThriftSourceTarget,
)
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
TransitiveTargets,
TransitiveTargetsRequest,
WrappedTarget,
WrappedTargetRequest,
)
from pants.engine.unions import UnionRule
from pants.jvm.goals import lockfile
from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool, GenerateJvmToolLockfileSentinel
from pants.jvm.target_types import PrefixedJvmJdkField, PrefixedJvmResolveField
from pants.source.source_root import SourceRootsRequest, SourceRootsResult
from pants.util.logging import LogLevel
@dataclass(frozen=True)
class GenerateScroogeThriftSourcesRequest:
thrift_source_field: ThriftSourceField
lang_id: str
lang_name: str
@dataclass(frozen=True)
class GeneratedScroogeThriftSources:
snapshot: Snapshot
class ScroogeToolLockfileSentinel(GenerateJvmToolLockfileSentinel):
resolve_name = ScroogeSubsystem.options_scope
@rule
async def generate_scrooge_thrift_sources(
request: GenerateScroogeThriftSourcesRequest,
jdk: InternalJdk,
scrooge: ScroogeSubsystem,
) -> GeneratedScroogeThriftSources:
output_dir = "_generated_files"
toolcp_relpath = "__toolcp"
lockfile_request = await Get(GenerateJvmLockfileFromTool, ScroogeToolLockfileSentinel())
tool_classpath, transitive_targets, empty_output_dir_digest, wrapped_target = await MultiGet(
Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
Get(TransitiveTargets, TransitiveTargetsRequest([request.thrift_source_field.address])),
Get(Digest, CreateDigest([Directory(output_dir)])),
Get(
WrappedTarget,
WrappedTargetRequest(
request.thrift_source_field.address, description_of_origin="<infallible>"
),
),
)
transitive_sources, target_sources = await MultiGet(
Get(
SourceFiles,
SourceFilesRequest(
tgt[ThriftSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ThriftSourceField)
),
),
Get(SourceFiles, SourceFilesRequest([request.thrift_source_field])),
)
sources_roots = await Get(
SourceRootsResult,
SourceRootsRequest,
SourceRootsRequest.for_files(transitive_sources.snapshot.files),
)
deduped_source_root_paths = sorted({sr.path for sr in sources_roots.path_to_root.values()})
input_digest = await Get(
Digest,
MergeDigests(
[
transitive_sources.snapshot.digest,
target_sources.snapshot.digest,
empty_output_dir_digest,
]
),
)
maybe_include_paths = []
for path in deduped_source_root_paths:
maybe_include_paths.extend(["-i", path])
maybe_finagle_option = []
if wrapped_target.target[ScroogeFinagleBoolField].value:
maybe_finagle_option = ["--finagle"]
extra_immutable_input_digests = {
toolcp_relpath: tool_classpath.digest,
}
result = await Get(
ProcessResult,
JvmProcess(
jdk=jdk,
classpath_entries=tool_classpath.classpath_entries(toolcp_relpath),
argv=[
"com.twitter.scrooge.Main",
*maybe_include_paths,
"--dest",
output_dir,
"--language",
request.lang_id,
*maybe_finagle_option,
*target_sources.snapshot.files,
],
input_digest=input_digest,
extra_jvm_options=scrooge.jvm_options,
extra_immutable_input_digests=extra_immutable_input_digests,
extra_nailgun_keys=extra_immutable_input_digests,
description=f"Generating {request.lang_name} sources from {request.thrift_source_field.address}.",
level=LogLevel.DEBUG,
output_directories=(output_dir,),
),
)
output_snapshot = await Get(Snapshot, RemovePrefix(result.output_digest, output_dir))
return GeneratedScroogeThriftSources(output_snapshot)
@rule
def generate_scrooge_lockfile_request(
_: ScroogeToolLockfileSentinel, scrooge: ScroogeSubsystem
) -> GenerateJvmLockfileFromTool:
return GenerateJvmLockfileFromTool.create(scrooge)
def rules():
return [
*collect_rules(),
*additional_fields.rules(),
*lockfile.rules(),
UnionRule(GenerateToolLockfileSentinel, ScroogeToolLockfileSentinel),
ThriftSourceTarget.register_plugin_field(PrefixedJvmJdkField),
ThriftSourcesGeneratorTarget.register_plugin_field(PrefixedJvmJdkField),
ThriftSourceTarget.register_plugin_field(PrefixedJvmResolveField),
ThriftSourcesGeneratorTarget.register_plugin_field(PrefixedJvmResolveField),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from urllib.parse import urlparse
url='http://wikipedia.org'
obj=urlparse(url)
print(obj.scheme)
print(obj.netloc)
print(obj.hostname)
print(obj.geturl())
# In[7]:
import whois
for x in whois.whois('google.com'):
print(x)
# In[ ]:
|
from random import randint
PROGRESSION_LENGTH = 10
TITLE = "Progression game"
RULES = "What number is missing in the progression?"
def game_round():
start_num = randint(1, 20)
progression_step = randint(1, 10)
missing_num_index = randint(1, PROGRESSION_LENGTH - 1)
numbers = list(
range(
start_num,
start_num + PROGRESSION_LENGTH * progression_step,
progression_step,
)
)
missing_number = numbers[missing_num_index]
numbers[missing_num_index] = ".."
numbers = map(str, numbers)
question = f"Question: {' '.join(numbers)}"
return (question, missing_number)
|
# Test program for the Device class by Per
from jnpr.junos import Device
# import base64 # to encrypt password
#import myPassword
#import base64
try:
# Create device object
mySRX_password = base64.b64decode(myPassword.getPassword())
print('Testing if device can be created')
myDev = Device(host='192.168.1.1', user='root', password=XXX)
print('Okay, device is created. Now testing reachable...')
myDev.auto_probe = 3 # Test for 3 seconds if device is reachable
print('Okay, device is reachable. Now trying to connect...')
myDev.open() # Connect to device
print('Okay, connected. Sending cli command.')
SRX_interface = myDev.cli("show interfaces terse", warning=False) # Disable warnings
print(SRX_interface)
myDev.close()
except Exception as somethingWentWrong:
print(somethingWentWrong)
|
from .index_view import IndexView
from .detail_view import DetailView
from .results_view import ResultsView
from .vote_view import VoteView
|
fname = input("Enter file name:")
file = open (fname)
hours = dict()
for line in file:
line = line.strip()
if line.startswith('From '):
words = line.split()
time = words[5]
hour = time[:2]
hours[hour] = hours.get(hour,0) +1
for key, value in sorted(hours.items()):
print (key, value)
|
import csv
import re
import os
import sys
import string
import nltk
from nltk.tokenize import word_tokenize
from configparser import ConfigParser
#Some usefull regexp
MENTIONS = re.compile(r'@[^\s]*')
URL = re.compile(r'htt[^\s]*')
SYMBOLS = re.compile(r'[^A-Za-z ]')
RT = re.compile(r'RT ')
SPACE = re.compile(r'\s+')
pathname = os.path.dirname(sys.argv[0])
config = ConfigParser()
config.read( pathname + '/../config.ini')
#result folder with the downloaded tweets
input_folder = config['twitter']['twitter_raw_tweets_folder']
output_folder = config['twitter']['twitter_cleaned_tweets_folder']
#Load the file and launch the preprocessing
def loadFile(inputfile):
text = ""
try:
file = open(inputfile, 'rt',encoding='UTF-8')
text = file.read()
file.close()
except FileNotFoundError:
print("File not found, please insert a valid one")
return(text)
#TODO: need to implement a csv and a txt outfile
def writeFile(outfile,text,file_type):
print("Final file generated")
#Output the file to csv
if(file_type == "csv"):
outfile = outfile + ".csv"
with open(outfile, "wt", encoding="utf8", newline="") as out_file:
writer = csv.writer(out_file, delimiter="\t")
for tweet_id in text:
writer.writerow([tweet_id, text[tweet_id]])
#Output the file to txt
elif(file_type == "txt"):
outfile = outfile + ".txt"
with open(outfile, 'a', encoding='utf8') as text_file:
text_file.write(text + "\n")
#error if the extension is not valid
else:
print("No file extension valid")
print("File successfully writed")
#Standard preprocessing with regexp
def cleanTweets(text):
#Text preprocessing using the REGEXP
text = MENTIONS.sub(' ', text) # Mentions
text = URL.sub(' ', text) # URLs
text = SYMBOLS.sub(' ', text) # Symbols
text = RT.sub(' ', text) # RT
text = SPACE.sub(' ', text)
final_text = text.strip() # spaces at head or tail
return(final_text)
#Another way to do the preprocessing using nltk and some others library
def cleanTweetsNLTK(text):
#Tokenize the words
tokens = word_tokenize(text)
# convert to lower case
tokens = [w.lower() for w in tokens]
# remove punctuation from each word
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
return(words)
def preprocessing(profile):
print("Start preprocessing")
input_file = os.path.join(input_folder, "raw_%s.txt" % profile)
text = loadFile(input_file)
#call the text preprocessing
result_text = cleanTweets(text)
#write the outfile
outfile = os.path.join(output_folder, "clean_%s" % profile)
file_type = "txt"
writeFile(outfile,result_text,file_type)
print("Finish preprocessing tweets")
|
import urwid
import utils
import sniffer
import logging
import threading
class NetworkInterfaceSelector():
"""This class implements the behavior of the network interface selector.
It periodically scans the available network interfaces on the system, and
updates their list adding the corresponding actions to the GUI.
It also initializes the InterfaceSniffers, accordingly.
"""
def __init__(self, main_frame, memory_seconds, features_lag):
self._main_frame = main_frame
self._memory_seconds = memory_seconds
self._features_lag = features_lag
self._traffic_sniffers = {}
self._interface_sniffers = {}
self._interface_buttons = {}
self._list_walker = urwid.SimpleListWalker([])
self._loop = None
self._lock = threading.Lock()
self._logger = logging.getLogger(__name__)
def attach_to_loop(self, loop):
"""Makes sure that the network interface list is updated periodically.
It changes the interface sniffers according to the state
of the network interfaces."""
self._loop = loop
self._run_loop()
def _run_loop(self, loop=None, user_data=None):
net_interfaces = utils.list_interfaces()
interfaces_to_remove = []
# Mark interfaces to remove
for previous_net_interface in self._interface_buttons:
found = False
for new_net_interface in net_interfaces:
if (new_net_interface[0] == previous_net_interface):
found = True
break
if (not found):
interfaces_to_remove.append(previous_net_interface)
# Remove interfaces deactivating the sniffers associated
for net_interface in interfaces_to_remove:
self._logger.info("Network interface %s is not available anymore" % net_interface)
checkbox = self._interface_buttons[net_interface]
self._list_walker.remove(checkbox)
self._lock.acquire()
if (net_interface in self._traffic_sniffers):
self._traffic_sniffers[net_interface].terminate()
self._traffic_sniffers.pop(net_interface, None)
self._interface_buttons.pop(net_interface)
self._lock.release()
# Add new interfaces
for net_interface in net_interfaces:
if (net_interface[0] not in self._interface_buttons):
self._logger.info("Adding network interface %s - %s" % (net_interface[0], net_interface[1]))
checkbox = urwid.CheckBox("%s - %s" % (net_interface[0], net_interface[1]), False, False, self._action_interface_update, net_interface[0])
self._list_walker.append(checkbox)
self._interface_buttons[net_interface[0]] = checkbox
# Attach to loop
self._loop.set_alarm_in(1, self._run_loop)
def _action_interface_update(self, check_box, selected, interface):
self._lock.acquire()
if (selected):
if (interface not in self._traffic_sniffers):
# Create and start thread
self._traffic_sniffers[interface] = sniffer.InterfaceSniffer(interface, self._main_frame, self._memory_seconds, self._features_lag)
self._traffic_sniffers[interface].start()
self._traffic_sniffers[interface].do_start()
self._logger.debug("Selected interface %s" % interface)
else:
self._traffic_sniffers[interface].terminate()
self._traffic_sniffers.pop(interface, None)
self._logger.debug("Deselected interface %s" % interface)
self._lock.release()
def get_list_box(self):
"""Returns the list box. It makes it possible to update the
list of interfaces at regular intervals"""
return urwid.ListBox(self._list_walker)
def terminate(self):
"""It gracefully stops the interface sniffers"""
self._lock.acquire()
for interface in self._traffic_sniffers:
self._logger.info("Gracefully terminating sniffers on interface %s" % interface)
sniffer = self._traffic_sniffers[interface]
sniffer.terminate()
self._lock.release()
def get_traffic_sniffers(self):
self._lock.acquire()
sniffers_copy = dict(self._traffic_sniffers)
self._lock.release()
return sniffers_copy
|
from _typeshed import Incomplete
def hopcroft_karp_matching(G, top_nodes: Incomplete | None = None): ...
def eppstein_matching(G, top_nodes: Incomplete | None = None): ...
def to_vertex_cover(G, matching, top_nodes: Incomplete | None = None): ...
maximum_matching = hopcroft_karp_matching
def minimum_weight_full_matching(
G, top_nodes: Incomplete | None = None, weight: str = "weight"
): ...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.