blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d85ec597bf50561c780343e1b57a17fe70cdec9
|
581c2beee0cf5656971987a19064524e3de7bc68
|
/distributions/lomax.py
|
6afe6b8af24109d2284426642b481f36d5ef196f
|
[
"MIT"
] |
permissive
|
bballamudi/survival
|
ac6925b30ba3ca9ed574ba056d36830f9129114f
|
c86186b08e7896096b9d59a5013335f56283a7c4
|
refs/heads/master
| 2020-03-21T14:16:29.578172
| 2018-06-09T21:09:08
| 2018-06-09T21:09:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,123
|
py
|
import numpy as np
from scipy.stats import lomax
from distributions.basemodel import *
class Lomax(Base):
'''
We can instantiate a Lomax distribution
(https://en.wikipedia.org/wiki/Lomax_distribution)
with this class.
'''
def __init__(self, k = None, lmb = None, ti = None, xi = None):
'''
Instantiate a Lomax distribution.
args:
k: The shape parameter of the Lomax distribution.
lmb: The scale parameter of the lomax distribution.
ti: The uncensored samples for fitting the distribution.
xi: The censored samples for fitting the distribution.
'''
if ti is not None:
self.train_org = ti
self.train_inorg = xi
self.newtonRh()
else:
self.train = []
self.test = []
self.train_org = []
self.train_inorg = []
self.k = k
self.lmb = lmb
self.params = [self.k, self.lmb]
def determine_params(self, k, lmb, params):
'''
Determines the parameters. Defined in basemodel.py
'''
return super(Lomax, self).determine_params(k, lmb, params)
def pdf(self,t,k=None,lmb=None,params=None):
'''
The probability distribution function (PDF) of the Lomax distribution.
args:
t: The value at which the PDF is to be calculated.
k: The shape parameter of the Lomax distribution.
lmb: The scale parameter of the lomax distribution.
'''
[k,lmb] = self.determine_params(k,lmb,params)
return lmb*k/(1+lmb*t)**(k+1)
def cdf(self,t,k=None,lmb=None,params=None):
'''
The cumulative density functino of the Lomax distribution.
Probability that the distribution is lower than a certain value.
args:
t: The value at which CDF is to be calculated.
k: The shape parameter of the Lomax.
lmb: The sclae parameter of the Lomax.
params: A 2d array with the shape and scale parameters.
'''
[k,lmb] = self.determine_params(k,lmb,params)
return 1-(1+lmb*t)**-k
def survival(self,t,k=None,lmb=None, params = None):
'''
The survival function for the Lomax distribution.
'''
[k,lmb] = self.determine_params(k,lmb,params)
return (1+lmb*t)**-k
def logpdf(self,t,k,lmb):
'''
The logarithm of the PDF function. Handy for calculating log likelihood.
args:
t: The value at which function is to be calculated.
l: The shape parameter.
lmb: The scale parameter.
'''
return np.log(k) + np.log(lmb) - (k+1)*np.log(1+lmb*t)
def logsurvival(self,t,k,lmb):
'''
The logarithm of the survival function. Handy for calculating log likelihood.
args:
t: The value at which function is to be calculated.
l: The shape parameter.
lmb: The scale parameter.
'''
return -k*np.log(1+lmb*t)
def loglik(self,t,x,k=0.5,lmb=0.3):
'''
The logarithm of the likelihood function.
args:
t: The un-censored samples.
x: The censored samples.
l: The shape parameter.
lmb: The scale parameter.
'''
return sum(self.logpdf(t,k,lmb)) +sum(self.logsurvival(x,k,lmb))
def grad(self,t,x,k=0.5,lmb=0.3):
'''
The gradient of the log-likelihood function.
args:
t: The un-censored samples.
x: The censored samples.
l: The shape parameter.
lmb: The scale parameter.
'''
n = len(t)
m = len(x)
delk = n/k - sum(np.log(1+lmb*t)) - sum(np.log(1+lmb*x))
dellmb = n/lmb -(k+1) * sum(t/(1+lmb*t)) -k*sum(x/(1+lmb*x))
return np.array([delk,dellmb])
def numerical_grad(self,t,x,k=None,lmb=None):
'''
Calculates the gradient of the log-likelihood function numerically.
args:
t: The survival data.
x: The censored data.
k: The shape parameter.
lmb: The scale parameter.
'''
if k is None or lmb is None:
k = self.k
lmb = self.lmb
eps = 1e-5
delk = (self.loglik(t,x,k+eps,lmb) - self.loglik(t,x,k-eps,lmb))/2/eps
dellmb = (self.loglik(t,x,k,lmb+eps) - self.loglik(t,x,k,lmb-eps))/2/eps
return np.array([delk, dellmb])
def hessian(self,t,x,k=0.5,lmb=0.3):
'''
The hessian of the Loglikelihood function for Lomax.
args:
t: The un-censored samples.
x: The censored samples.
l: The shape parameter.
lmb: The scale parameter.
'''
n=len(t)
delksq = -n/k**2
dellmbsq = -n/lmb**2 + (k+1)*sum((t/(1+lmb*t))**2) + k*sum((x/(1+lmb*x))**2)
delklmb = -sum(t/(1+lmb*t)) - sum(x/(1+lmb*x))
hess = np.zeros([2,2])
hess[0,0] = delksq
hess[1,1] = dellmbsq
hess[0,1] = hess[1,0] = delklmb
return hess
def numerical_hessian(self,t,x,k=0.5,lmb=0.3):
'''
Calculates the hessian of the log-likelihood function numerically.
args:
t: The survival data.
x: The censored data.
k: The shape parameter.
lmb: The scale parameter.
'''
eps = 1e-4
delksq = (self.loglik(t,x,k+2*eps,lmb) + self.loglik(t,x,k-2*eps,lmb) - 2*self.loglik(t,x,k,lmb))/4/eps/eps
dellmbsq = (self.loglik(t,x,k,lmb+2*eps) + self.loglik(t,x,k,lmb-2*eps) - 2*self.loglik(t,x,k,lmb))/4/eps/eps
dellmbk = (self.loglik(t,x,k+eps,lmb+eps) + self.loglik(t,x,k-eps,lmb-eps)
- self.loglik(t,x,k+eps,lmb-eps) - self.loglik(t,x,k-eps,lmb+eps))/4/eps/eps
hess = np.zeros([2,2])
hess[0,0] = delksq
hess[1,1] = dellmbsq
hess[0,1] = hess[1,0] = dellmbk
return hess
def gradient_descent(self, numIter=2001, params = np.array([.5,.3]), verbose=False):
'''
Performs gradient descent to get the best fitting parameters for
this Lomax given the censored and un-censored data.
args:
numIter: The maximum number of iterations for the iterative method.
params: The initial guess for the shape and scale parameters respectively.
verbose: Set to true for debugging. Shows progress as it fits data.
'''
for i in range(numIter):
lik = self.loglik(self.train_org,self.train_inorg,params[0],params[1])
directn = self.grad(self.train_org,self.train_inorg,params[0],params[1])
params2 = params
for alp1 in [1e-8,1e-7,1e-5,1e-3,1e-2,.1]:
params1 = params + alp1 * directn
if(min(params1) > 0):
lik1 = self.loglik(self.train_org,self.train_inorg,params1[0],params1[1])
if(lik1 > lik and np.isfinite(lik1)):
lik = lik1
params2 = params1
params = params2
if i%100 == 0 and verbose:
print("Iteration " + str(i) + " ,objective function: " + str(lik) + " \nparams = " + str(params) + " \nGradient = " + str(directn))
print("\n########\n")
return params
'''
def newtonRh(self, numIter=101, params = np.array([.1,.1]), verbose=False):
"""
Fits the parameters of a Lomax distribution to data (censored and uncensored).
Uses the Newton Raphson method for explanation, see: https://www.youtube.com/watch?v=acsSIyDugP0
args:
numIter: The maximum number of iterations for the iterative method.
params: The initial guess for the shape and scale parameters respectively.
verbose: Set to true for debugging. Shows progress as it fits data.
"""
for i in range(numIter):
directn = self.grad(self.train_org,self.train_inorg,params[0],params[1])
if sum(abs(directn)) < 1e-5:
if verbose:
print("\nIt took: " + str(i) + " Iterations.\n Gradients - " + str(directn))
self.params = params
[self.k, self.lmb] = params
return params
lik = self.loglik(self.train_org,self.train_inorg,params[0],params[1])
step = np.linalg.solve(self.hessian(self.train_org,self.train_inorg,params[0],params[1]),directn)
params = params - step
if min(params) < 0:
print("Drastic measures")
params = params + step # undo the effect of taking the step.
params2 = params
for alp1 in [1e-8,1e-7,1e-5,1e-3,1e-2,.1,.5,1.0]:
params1 = params - alp1 * step
if(max(params1) > 0):
lik1 = self.loglik(self.train_org,self.train_inorg,params1[0],params1[1])
if(lik1 > lik and np.isfinite(lik1)):
lik = lik1
params2 = params1
scale = alp1
params = params2
if i % 10 == 0 and verbose:
print("Iteration " + str(i) + " ,objective function: " + str(lik) + " \nparams = " + str(params) + " \nGradient = " + str(directn) + "\n##\n\n")
[self.k, self.lmb] = params
self.params = params
return params
'''
def optimal_wait_threshold(self, intervention_cost, k=None, lmb=None):
'''
Gets the optimal time one should wait for a Lomax recovery before intervention.
args:
intervention_cost: The cost of intervening.
k: The shape parameter of this Lomax distribution.
lmb: The scale parameter of this Lomax distribution.
'''
if k is None or lmb is None:
k = self.k
lmb = self.lmb
return (intervention_cost*k - 1/lmb)
def expectedDT(self,tau,k,lmb,intervention_cost):
'''
The expected downtime incurred when the waiting threshold is set to an arbitrary value.
args:
tau: The value we should set for the intervention threshold.
k: The shape parameter of the current Lomax.
lmb: The scale parameter of the current Lomax.
intervention_cost: The cost of intervening.
'''
return 1/lmb/(k-1) - (1/lmb/(k-1) + tau*k/(k-1))*1/(1+lmb*tau)**k + (tau + intervention_cost)*1/(1+lmb*tau)**k
@staticmethod
def expectedDT_s(tau,k,lmb,intervention_cost):
'''
The expected downtime incurred when the waiting threshold is set to an arbitrary value (static version).
args:
tau: The value we should set for the intervention threshold.
k: The shape parameter of the current Lomax.
lmb: The scale parameter of the current Lomax.
intervention_cost: The cost of intervening.
'''
return 1/lmb/(k-1) - (1/lmb/(k-1) + tau*k/(k-1))*1/(1+lmb*tau)**k + (tau + intervention_cost)*1/(1+lmb*tau)**k
def expectedT(self,tau,k=None,lmb=None,params=None):
'''
The expected value of the Lomax conditional on it being less than tau.
args:
tau: Censor the Lomax here.
k: The shape parameter of the current Lomax.
lmb: The scale parameter of the current Lomax.
params: A 2-d array with shape and scale parameters.
'''
[k,lmb] = self.determine_params(k,lmb,params)
return (1/lmb/(k-1) - (1/lmb/(k-1) + tau*k/(k-1))*1/(1+lmb*tau)**k)/(1-1/(1+lmb*tau)**k)
def samples(self, k=None, lmb=None, size=1000, params=None):
'''
Generates samples for the Lomax distribution.
args:
k: Shape of Lomax.
lmb: Scale of Lomax.
size: The number of simulations to be generated.
params: A 2-d array with shape and scale parameters.
'''
[k, lmb] = self.determine_params(k, lmb, params)
return lomax.rvs(c=k, scale=(1 / lmb), size=size)
@staticmethod
def samples_s(k, lmb, size = 1000):
return lomax.rvs(c=k, scale=(1 / lmb), size=size)
def kappafn_k(self,t,x,lmb=0.1):
n = len(t)
return n/(sum(np.log(1+lmb*t)) + sum(np.log(1+lmb*x)))
def kappafn_lmb(self,t,x,lmb=0.1):
n = len(t)
return (n/lmb - sum(t/(1+lmb*t)))/(sum(t/(1+lmb*t)) + sum(x/(1+lmb*x)))
def bisection_fn(self,lmb=0.1):
return self.kappafn_k(self.train_org,self.train_inorg,lmb) - self.kappafn_lmb(self.train_org,self.train_inorg,lmb)
def bisection(self,a=1e-6,b=2000):
n=1
while n < 10000:
c=(a+b)/2
if self.bisection_fn(c) == 0 or (b-a)/2 < 1e-6:
return c
n=n+1
if (self.bisection_fn(c) > 0) == (self.bisection_fn(a) > 0):
a=c
else:
b=c
|
[
"rohitpandey576@gmail.com"
] |
rohitpandey576@gmail.com
|
cf44ce6aefffd95765ff8071f01abc34af978a80
|
41a0220bf117124bf281a50396582c0df1e0675f
|
/Pyrado/tests/environment_wrappers/test_action_delay.py
|
c04f64bbf23a9b2a2f2bfdc9db1d3b524b130d61
|
[
"BSD-3-Clause"
] |
permissive
|
jacarvalho/SimuRLacra
|
c071dfc22d4f2c54a198405e8974d03333c9961d
|
a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5
|
refs/heads/master
| 2022-11-24T20:08:52.376545
| 2020-08-03T09:01:35
| 2020-08-03T09:01:35
| 276,885,755
| 0
| 0
|
BSD-3-Clause
| 2020-07-03T11:39:21
| 2020-07-03T11:39:21
| null |
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
import numpy as np
import pytest
from pyrado.spaces.box import BoxSpace
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from tests.environment_wrappers.mock_env import MockEnv
@pytest.mark.wrappers
def test_no_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=0)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([4, 1]))
assert mockenv.last_act == [4, 1]
wenv.step(np.array([7, 5]))
assert mockenv.last_act == [7, 5]
@pytest.mark.wrappers
def test_act_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=2)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 1]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [2, 4]
@pytest.mark.wrappers
def test_reset():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([4, 4]))
assert mockenv.last_act == [0, 4]
# The next action would be [4, 4], but now we reset again
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [1, 2]
@pytest.mark.wrappers
def test_domain_param():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 1]
# change the delay and reset
wenv.domain_param = {'act_delay': 2}
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([8, 9]))
assert mockenv.last_act == [1, 2]
|
[
"fabio.muratore@famura.net"
] |
fabio.muratore@famura.net
|
81e69967751cb96ce288091f2fc4ea2ff7d9ae79
|
837d7fad3a4317fbc45345652cb0b0cee8aa310d
|
/Autonomous/pwmtester.py
|
f62fbebb490b9489e870a606e11f66962d5ae6ec
|
[] |
no_license
|
IliasChekkori/BAUROV-Autonomous
|
0f0e17ece2a22e4929334fc2d08c177476c004f9
|
6bfe1e41c7328aad6ab9dffadfdbe193829e8213
|
refs/heads/master
| 2022-12-09T00:57:01.610265
| 2020-08-31T21:55:23
| 2020-08-31T21:55:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
# Import mavutil
from pymavlink import mavutil
# connection olusturma
master = mavutil.mavlink_connection(
'/dev/ttyACM0',
baud=115200)# Raspberry pi ile pixhawk'ın iletişim kurabilmesi için
# RC pwm değerlerini olusturuyoruz
def set_rc_channel_pwm(id, pwm=1500):
t, optional): Channel pwm value 1100-1900
"""
if id < 1:
print("Channel does not exist.")
return
#http://mavlink.org/messages/common#RC_CHANNELS_OVERRIDE
if id < 9:
rc_channel_values = [65535 for _ in range(8)]
rc_channel_values[id - 1] = pwm
master.mav.rc_channels_override_send(
master.target_component, # target_component
*rc_channel_values) # Rc channel listesi
deger= int(input("Deger Giriniz: ")) #pwm değeri
#1100 Maximum ileri geri
#1900 Maximum hızda ileri
#1500 = 0
pin= int(input("Channel giriniz: ")) #komutlari integer olarak giriniz, komutları buradan ogrenebilirsiniz https://www.ardusub.com/operators-manual/rc-input-and-output.html
count = 0
while (count < 10000):
set_rc_channel_pwm(pin, deger)
count = count + 1
|
[
"noreply@github.com"
] |
IliasChekkori.noreply@github.com
|
806045494341c3fa1fb96aa5bd0843029bd4e3fc
|
14dcb10e4b0e85b7c95c2f186fe2d4093c853ea0
|
/pygmt/tests/test_grd2cpt.py
|
a1afa97312a022480419107814f227db552fb897
|
[
"BSD-3-Clause"
] |
permissive
|
xumi1993/pygmt
|
1f3f9a065544693bfbbdb688019f2988d12cdce2
|
a555ba705857aeb5e09046308b25574721fccf5f
|
refs/heads/master
| 2023-03-04T01:56:20.742803
| 2022-09-09T14:40:37
| 2022-09-09T14:40:37
| 213,913,126
| 1
| 0
|
BSD-3-Clause
| 2023-02-21T15:57:33
| 2019-10-09T12:30:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
"""
Tests for grd2cpt.
"""
import os
import pytest
from pygmt import Figure, grd2cpt
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import GMTTempFile
from pygmt.helpers.testing import load_static_earth_relief
@pytest.fixture(scope="module", name="grid")
def fixture_grid():
"""
Load the grid data from the sample earth_relief file.
"""
return load_static_earth_relief()
@pytest.mark.mpl_image_compare
def test_grd2cpt(grid):
"""
Test creating a CPT with grd2cpt to create a CPT based off a grid input and
plot it with a color bar.
"""
fig = Figure()
fig.basemap(frame="a", projection="W0/15c", region="d")
grd2cpt(grid=grid)
fig.colorbar(frame="a")
return fig
def test_grd2cpt_blank_output(grid):
"""
Use incorrect setting by passing in blank file name to output parameter.
"""
with pytest.raises(GMTInvalidInput):
grd2cpt(grid=grid, output="")
def test_grd2cpt_invalid_output(grid):
"""
Use incorrect setting by passing in invalid type to output parameter.
"""
with pytest.raises(GMTInvalidInput):
grd2cpt(grid=grid, output=["some.cpt"])
def test_grd2cpt_output_to_cpt_file(grid):
"""
Save the generated static color palette table to a .cpt file.
"""
with GMTTempFile(suffix=".cpt") as cptfile:
grd2cpt(grid=grid, output=cptfile.name)
assert os.path.getsize(cptfile.name) > 0
def test_grd2cpt_unrecognized_data_type():
"""
Test that an error will be raised if an invalid data type is passed to
grid.
"""
with pytest.raises(GMTInvalidInput):
grd2cpt(grid=0)
def test_grd2cpt_categorical_and_cyclic(grid):
"""
Use incorrect setting by setting both categorical and cyclic to True.
"""
with pytest.raises(GMTInvalidInput):
grd2cpt(grid=grid, cmap="batlow", categorical=True, cyclic=True)
|
[
"noreply@github.com"
] |
xumi1993.noreply@github.com
|
952fd72ad5a8100025aa2e461084375532616b8e
|
677562bf6835be104204f32a6c9998d9a901f9fc
|
/from_scratch/detect_metadata/times.py
|
fefebd85201b58cd0821fa91c8c528a5f775d688
|
[] |
no_license
|
santokalayil/neural_network
|
3cb2f843430e9f35e017edcde83ba13212d0f5cf
|
f453856214d027f55afc5c861784dc693a9bf2c6
|
refs/heads/main
| 2023-06-12T01:53:43.588403
| 2021-07-02T08:30:20
| 2021-07-02T08:30:20
| 382,281,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
# import platform # to detect the operating system
import os
import time
def get_last_modified_time(path_to_file):
return os.path.getmtime(path_to_file) # time.ctime(os.path.getmtime(path_to_file))
def get_created_time(path_to_file):
return os.path.getctime(path_to_file) # time.ctime(os.path.getctime(path_to_file))
|
[
"49450970+santokalayil@users.noreply.github.com"
] |
49450970+santokalayil@users.noreply.github.com
|
3b937e27177d4b2213f47baa81c00973e7037be0
|
bc4910ecec94749697dbce5e7cf5093275411125
|
/src/generator/Cheetah/ErrorCatchers.py
|
500f2490d613628fe69f683fafa883f5d586e55d
|
[
"MIT"
] |
permissive
|
se210/tracy
|
7e73a6f0d64f355842b9a11035c3720b4d40fde5
|
232a42ce1aefcffa1f8544c89d60a16ebd897033
|
refs/heads/master
| 2021-01-09T20:55:03.241111
| 2013-12-15T23:34:36
| 2013-12-15T23:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,838
|
py
|
#!/usr/bin/env python
# $Id: ErrorCatchers.py,v 1.1 2006-09-06 09:50:08 skyostil Exp $
"""ErrorCatcher class for Cheetah Templates
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.1 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2006-09-06 09:50:08 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.1 $"[11:-2]
import time
from Cheetah.NameMapper import NotFound
class Error(Exception):
pass
class ErrorCatcher:
_exceptionsToCatch = (NotFound,)
def __init__(self, templateObj):
pass
def exceptions(self):
return self._exceptionsToCatch
def warn(self, exc_val, code, rawCode, lineCol):
return rawCode
## make an alias
Echo = ErrorCatcher
class BigEcho(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
return "="*15 + "<" + rawCode + " could not be found>" + "="*15
class KeyError(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
class ListErrors(ErrorCatcher):
"""Accumulate a list of errors."""
_timeFormat = "%c"
def __init__(self, templateObj):
ErrorCatcher.__init__(self, templateObj)
self._errors = []
def warn(self, exc_val, code, rawCode, lineCol):
dict = locals().copy()
del dict['self']
dict['time'] = time.strftime(self._timeFormat,
time.localtime(time.time()))
self._errors.append(dict)
return rawCode
def listErrors(self):
"""Return the list of errors."""
return self._errors
|
[
"sami.kyostila@unrealvoodoo.org"
] |
sami.kyostila@unrealvoodoo.org
|
60ef6466e701d4230442b132a6b9adf2bb35bdfe
|
5247bbde8c929821480a8a2ceebc5039b3098ae0
|
/jobs/views.py
|
6f6ffe95680745faf8f06a3d359ad99ccae16ca4
|
[] |
no_license
|
prinzana/Potfolios-django-project
|
dbb067b8208ba08603836c4d3420cdc1a0ed8548
|
5e532d1efbab5a92391537ce4f74adda022b6be2
|
refs/heads/master
| 2023-06-08T04:13:28.467640
| 2021-07-03T13:18:39
| 2021-07-03T13:18:39
| 382,620,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from django.shortcuts import render
from .models import Job
def home(request):
jobs = Job.objects
return render(request, 'jobs/home.html', { 'jobs': jobs})
# Create your views here.
|
[
"prinzana@gmail.com"
] |
prinzana@gmail.com
|
6673858896690ec1a546222c0f8b383b73cf8ac8
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_007_20180621235112.py
|
41f8ce78220a122f130868148e83683e6dcb7b73
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,084
|
py
|
from random import randint
import copy
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, " ", " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku3 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print(' {2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
# print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
print("\n\n\n\n")
s = 0
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("Plansza nr:", sudoku_number)
s = sudoku_number
sudoku = copy.deepcopy(listaSudoku[sudoku_number])
elif int(choice) == 1:
s = 1
sudoku = copy.deepcopy(sudoku1)
elif int(choice) == 2:
s = 2
sudoku = copy.deepcopy(sudoku2)
elif int(choice) == 3:
s = 3
sudoku = copy.deepcopy(sudoku3)
while True: # prints Sudoku until is solved
# print("Your sudoku to solve:")
printSudoku()
print("\nInput 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
if s == 1:
sudoku = copy.deepcopy(sudoku1)
elif s == 2:
sudoku = copy.deepcopy(sudoku2)
elif s == 3:
sudoku = copy.deepcopy(sudoku3)
elif x == "h": # show:
print(sudoku)
print(sudoku1)
else:
print("Error - wrong number format \n ")
continue
else:
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try: # check if sudoku is solved
i = 0
list = []
while i < 9: # check are all column == 45
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
i += 1
is45 = 0 # check if sudoku is solved
for listElement in list:
if listElement == 45:
is45 = is45 + 1
#
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print(" @@@@@@@@@@ YOU WIN @@@@@@@@@@")
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
break
except TypeError:
print()
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
66ec71bb988eb0d3f33c75d5c26df53404e2780b
|
6d77cf9932bf600ab89deae26b849221fdf88704
|
/Safe Marks/Interface/TeacherCommandLineInterface.py
|
0060d6cad337413098e7b4a29ad70bae1d24b7e9
|
[
"MIT"
] |
permissive
|
mriduldhall/Safe-Marks
|
4cecc89bcc16c83b64602cc686ad3371e0f14580
|
3ea657c842fe30f084e0d70633d4319073bc82f0
|
refs/heads/main
| 2023-07-05T05:06:38.270423
| 2021-08-16T22:53:43
| 2021-08-16T22:53:43
| 307,712,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,720
|
py
|
from HelperLibrary.Validator import Validator
from HelperLibrary.Student import Student
from HelperLibrary.StorageFunctions import StorageFunctions
from Interface.SettingsCommandLineInterface import CLI as SettingsCLI
from Interface.AccountCommandLineInterface import CLI as AccountCLI
from datetime import datetime
class LogoutMenuItem:
def __init__(self):
self.is_exit_initiated = False
def execute(self):
print("Logging out...")
self.is_exit_initiated = True
def exit_initiated(self):
return self.is_exit_initiated
class SettingsMenuItem:
def __init__(self, singleton):
self.singleton = singleton
self.is_exit_initiated = False
def execute(self):
user_deleted = SettingsCLI(self.singleton).initiate()
if user_deleted:
self.is_exit_initiated = True
def exit_initiated(self):
return self.is_exit_initiated
class YearEndMenuItem:
def __init__(self):
pass
def execute(self):
if Validator("year end").should_continue():
student_list = self.get_student_list()
self.increase_year(student_list)
@staticmethod
def get_student_list():
student_list = StorageFunctions("students").list("name")
return student_list
@staticmethod
def increase_year(student_list):
for student_name in student_list:
student = Student(student_name, None, None, None, None)
student.recreate_student()
if not student.leave_date:
if student.year_group != 13:
student.year_group += 1
student.student_controller.save_student_data(save_mark_sheet_data=False)
student.student_controller.create_mark_sheets()
else:
student.year_group = None
student.leave_date = datetime.now()
student.student_controller.save_student_data(save_mark_sheet_data=False)
@staticmethod
def exit_initiated():
return False
class ManageMenuItem:
def __init__(self, admin):
self.admin = admin
def execute(self):
if Validator("manage").should_continue():
work_on_new_student = True
while work_on_new_student:
message = Student(None, None, None, None, None).manage(self.admin)
print(message)
work_on_new_student = bool(int(input("Enter 1 to enter another name and work on another student or 0 to leave.")))
@staticmethod
def exit_initiated():
return False
class ManageAccountsMenuItem:
def __init__(self, singleton):
self.singleton = singleton
def execute(self):
AccountCLI(self.singleton).initiate()
@staticmethod
def exit_initiated():
return False
class CreateMenuItem:
def __init__(self, singleton):
self.singleton = singleton
def execute(self):
if Validator("create").should_continue():
continuation = True
while continuation is True:
menu_options = {
"1": self.new_student,
"2": self.old_student,
}
menu_choice = input("Enter 1 to create a new student or 2 to add an old student back(unarchive):")
if menu_choice in menu_options.keys():
message = menu_options[menu_choice]()
else:
message = "Invalid choice"
print(message)
continuation = bool(int(input("Enter 1 to create another student and 0 to head back to main menu.")))
def new_student(self):
student = self.getstudentdetails()
return student.create_new_student()
@staticmethod
def old_student():
return Student(None, None, None, None, None).create_old_student()
@staticmethod
def getstudentdetails():
valid = False
while not valid:
name = input("Enter student's name:").capitalize()
birth_year = int(input("Enter student's year of birth:"))
birth_month = int(input("Enter student's month of birth:"))
birth_date = int(input("Enter student's date of birth:"))
date_of_birth = datetime(birth_year, birth_month, birth_date)
address = input("Enter student's address:")
father_name = input("Enter student's father's name:")
mother_name = input("Enter student's mother's name:")
student = Student(name, date_of_birth, address, father_name, mother_name)
valid, message = student.student_controller.validate_student_details()
if message:
print(message)
return student
@staticmethod
def exit_initiated():
return False
class CLI:
def __init__(self, singleton):
self.main_menu_dictionary = {
"m": ManageMenuItem(singleton.admin),
"s": SettingsMenuItem(singleton),
"l": LogoutMenuItem()
}
self.admin_main_menu_dictionary = {
"c": CreateMenuItem(singleton),
"m": ManageMenuItem(singleton.admin),
"a": ManageAccountsMenuItem(singleton),
"y": YearEndMenuItem(),
"s": SettingsMenuItem(singleton),
"l": LogoutMenuItem()
}
self.disabled_main_menu_dictionary = {
"s": SettingsMenuItem(singleton),
"l": LogoutMenuItem(),
}
self.admin = singleton.admin
self.enabled = singleton.enabled
def initiate(self):
exit_initiated = False
while not exit_initiated:
if not self.enabled:
print("Your account has been marked disabled. Please contact an administrator to get this changed.")
choice = input("Enter s for settings and l to logout:").lower()
menu_item = self.disabled_main_menu_dictionary.get(choice)
elif not self.admin:
choice = input("Enter m to manage students and their mark sheets, s for settings and l to logout:").lower()
menu_item = self.main_menu_dictionary.get(choice)
else:
choice = input("Enter c to create new students, m to manage students and their mark sheets, a to manage accounts, y to change academic year, s for settings and l to logout:").lower()
menu_item = self.admin_main_menu_dictionary.get(choice)
if menu_item is None:
print("Please enter valid choice")
continue
menu_item.execute()
exit_initiated = menu_item.exit_initiated()
|
[
"mriduldhall1@gmail.com"
] |
mriduldhall1@gmail.com
|
5207bdfd9ec7ed6f7459b591d7345960cb085457
|
6a5ce7d885db1baa5a9d43b26f0ae623a5ef0f01
|
/azure-mgmt-web/azure/mgmt/web/models/domain_registration_input.py
|
864529f0239c7032c4baa763d7558207f03f1109
|
[
"Apache-2.0"
] |
permissive
|
JammyBrand82/azure-sdk-for-python
|
333af194ff9143ec77f49203a5a71f15c399f278
|
c65e189cd41bd3464556b17bfcdee1303867996c
|
refs/heads/master
| 2021-01-17T18:31:10.661151
| 2016-03-17T21:03:08
| 2016-03-17T21:03:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,993
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class DomainRegistrationInput(Resource):
"""
Domain registration input for validation Api
:param str id: Resource Id
:param str name: Resource Name
:param str location: Resource Location
:param str type: Resource type
:param dict tags: Resource tags
:param str domain_registration_input_name: Name of the domain
:param Contact contact_admin: Admin contact information
:param Contact contact_billing: Billing contact information
:param Contact contact_registrant: Registrant contact information
:param Contact contact_tech: Technical contact information
:param str registration_status: Domain registration status. Possible
values include: 'Active', 'Awaiting', 'Cancelled', 'Confiscated',
'Disabled', 'Excluded', 'Expired', 'Failed', 'Held', 'Locked', 'Parked',
'Pending', 'Reserved', 'Reverted', 'Suspended', 'Transferred',
'Unknown', 'Unlocked', 'Unparked', 'Updated', 'JsonConverterFailed'
:param str provisioning_state: Domain provisioning state. Possible values
include: 'Succeeded', 'Failed', 'Canceled', 'InProgress', 'Deleting'
:param list name_servers: Name servers
:param bool privacy: If true then domain privacy is enabled for this
domain
:param datetime created_time: Domain creation timestamp
:param datetime expiration_time: Domain expiration timestamp
:param datetime last_renewed_time: Timestamp when the domain was renewed
last time
:param bool auto_renew: If true then domain will renewed automatically
:param bool ready_for_dns_record_management: If true then Azure can
assign this domain to Web Apps. This value will be true if domain
registration status is active and it is hosted on name servers Azure has
programmatic access to
:param list managed_host_names: All hostnames derived from the domain and
assigned to Azure resources
:param DomainPurchaseConsent consent: Legal agreement consent
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'domain_registration_input_name': {'key': 'properties.name', 'type': 'str'},
'contact_admin': {'key': 'properties.contactAdmin', 'type': 'Contact'},
'contact_billing': {'key': 'properties.contactBilling', 'type': 'Contact'},
'contact_registrant': {'key': 'properties.contactRegistrant', 'type': 'Contact'},
'contact_tech': {'key': 'properties.contactTech', 'type': 'Contact'},
'registration_status': {'key': 'properties.registrationStatus', 'type': 'DomainStatus'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'name_servers': {'key': 'properties.nameServers', 'type': '[str]'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'last_renewed_time': {'key': 'properties.lastRenewedTime', 'type': 'iso-8601'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'ready_for_dns_record_management': {'key': 'properties.readyForDnsRecordManagement', 'type': 'bool'},
'managed_host_names': {'key': 'properties.managedHostNames', 'type': '[HostName]'},
'consent': {'key': 'properties.consent', 'type': 'DomainPurchaseConsent'},
}
def __init__(self, location, id=None, name=None, type=None, tags=None, domain_registration_input_name=None, contact_admin=None, contact_billing=None, contact_registrant=None, contact_tech=None, registration_status=None, provisioning_state=None, name_servers=None, privacy=None, created_time=None, expiration_time=None, last_renewed_time=None, auto_renew=None, ready_for_dns_record_management=None, managed_host_names=None, consent=None, **kwargs):
super(DomainRegistrationInput, self).__init__(id=id, name=name, location=location, type=type, tags=tags, **kwargs)
self.domain_registration_input_name = domain_registration_input_name
self.contact_admin = contact_admin
self.contact_billing = contact_billing
self.contact_registrant = contact_registrant
self.contact_tech = contact_tech
self.registration_status = registration_status
self.provisioning_state = provisioning_state
self.name_servers = name_servers
self.privacy = privacy
self.created_time = created_time
self.expiration_time = expiration_time
self.last_renewed_time = last_renewed_time
self.auto_renew = auto_renew
self.ready_for_dns_record_management = ready_for_dns_record_management
self.managed_host_names = managed_host_names
self.consent = consent
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
7b881b0726eda7408694fbcacc36d669cda76453
|
f80c3df0b651e8bf327aac2fd9c6188205e136f5
|
/20201111_home/shortest_word.py
|
b161be4ee1430dd8837e613b7a37af878bafb1e3
|
[] |
no_license
|
rudenko86/my_project
|
b57ec2e9a4987a1a4c66f911c6098247122ac189
|
96823443e34dcb12a11b02537cbb7e3c3ce24fc6
|
refs/heads/main
| 2023-01-21T04:16:34.697095
| 2020-11-21T19:33:57
| 2020-11-21T19:33:57
| 309,800,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
''' знайти найкоротше слово і порахувати скільки разів воно зустрічається та вивести його (count) '''
a = """Октябрь уж наступил — уж роща отряхает
Последние листы с нагих своих ветвей;
Дохнул осенний хлад — дорога промерзает.
Журча еще бежит за мельницу ручей,
Но пруд уже застыл; сосед мой поспешает
В отъезжие поля с охотою своей,
И страждут озими от бешеной забавы,
И будит лай собак уснувшие дубравы.
Теперь моя пора: я не люблю весны;
Скучна мне оттепель; вонь, грязь — весной я болен;
Кровь бродит; чувства, ум тоскою стеснены.
Суровою зимой я более доволен,
Люблю ее снега; в присутствии луны
Как легкий бег саней с подругой быстр и волен,
Когда под соболем, согрета и свежа,
Она вам руку жмет, пылая и дрожа!
Как весело, обув железом острым ноги,
Скользить по зеркалу стоячих, ровных рек!
А зимних праздников блестящие тревоги?..
Но надо знать и честь; полгода снег да снег,
Ведь это наконец и жителю берлоги,
Медведю, надоест. Нельзя же целый век
Кататься нам в санях с Армидами младыми
Иль киснуть у печей за стеклами двойными"""
b = a.split()
shot_ = b[0]
for i in b[1:]:
if len(i) < len(shot_):
shot_ = i
print(shot_)
c = b.count(shot_)
print(c)
|
[
"19rudenko86@gmail.com"
] |
19rudenko86@gmail.com
|
f2d1f00036574689cfdfe68843c9fadedb487026
|
68f0ce24fa285e6b3f6f931a5a5ff075fbd7f51e
|
/DeepLearning/TensorFlowMNISTLecture.py
|
51bbc94b37276e276e0b1f08d745499ee274cdfe
|
[] |
no_license
|
Resolt/ML_Bootcamp
|
d8c26b29a49d8c1fd9767e4974d90db9fa10670d
|
2838017eab10a382c8ce655c73907dd394b0ea27
|
refs/heads/master
| 2020-04-03T10:24:23.428282
| 2018-11-07T22:23:28
| 2018-11-07T22:23:28
| 155,192,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# PART 1 - READING AND EXPLORING MNIST DATA
# LINK MNIST
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
print(mnist.train.images.shape)
print(mnist.train.images[1].shape)
# plt.imshow(mnist.train.images[1].reshape(28,28),cmap='gist_gray')
# plt.show()
ex = mnist.train.images[1].reshape(mnist.train.images[1].shape[0],1)
print(ex)
# sns.heatmap(ex)
# plt.show()
# PART 2 -
x = tf.placeholder(dtype=tf.float32,shape=[None,784]) # PLACE HOLDER FOR THE INPUT - WE KNOW ITS 784, BUT WE HAVEN'T DECIDED ON BATCH SIZE
W = tf.Variable(tf.zeros([784,10])) # THE WEIGHTS - 784 FOR THE PIXELS - 10 FOR EACH POSSIBLE VALUE (WE ARE LOOKING TO DETERMINE A NUMBER BETWEEN 0 and 9)
b = tf.Variable(tf.zeros([10])) # BIASES
y = tf.matmul(x,W) + b # THE OUTPUT
y_true = tf.placeholder(tf.float32,shape=[None,10]) # THIS IS THE SAME AS y_train. WE DON'T KNOW THE BATCH SIZE JUST YET BUT WE DO KNOW THE POSSIBLE OUTPUTS
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,logits=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)
train = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1000):
batch_x,batch_y = mnist.train.next_batch(100)
sess.run(train,feed_dict={x:batch_x,y_true:batch_y})
matches = tf.equal(tf.argmax(y,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(matches,tf.float32))
print(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels}))
|
[
"pdmmichaelsen@gmail.com"
] |
pdmmichaelsen@gmail.com
|
15d01b7c211b279668202291cd0412a2003cd5eb
|
eb8433e762930fdf0bc6519f67e176a4d29bef31
|
/okta/models/idp_policy_rule_action.py
|
52343d9048858b7dcdd70199d33336521324f95f
|
[
"Apache-2.0"
] |
permissive
|
Mogost/okta-sdk-python
|
7cd7ba3afc3a851a91c764c61aa8cbddd8dee78d
|
0e71c4d6e78af872ee95ac2372e1d630858277b0
|
refs/heads/master
| 2023-03-17T06:27:41.901434
| 2023-03-15T14:31:56
| 2023-03-15T14:31:56
| 194,839,815
| 0
| 0
|
Apache-2.0
| 2023-03-16T23:27:18
| 2019-07-02T10:13:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
# flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
from okta.okta_collection import OktaCollection
from okta.models import idp_policy_rule_action_provider\
as idp_policy_rule_action_provider
class IdpPolicyRuleAction(
OktaObject
):
"""
A class for IdpPolicyRuleAction objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.providers = OktaCollection.form_list(
config["providers"] if "providers"\
in config else [],
idp_policy_rule_action_provider.IdpPolicyRuleActionProvider
)
else:
self.providers = []
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"providers": self.providers
}
parent_req_format.update(current_obj_format)
return parent_req_format
|
[
"bretterer@gmail.com"
] |
bretterer@gmail.com
|
2f745ad5a119f8562508a06dbe64d04423026c86
|
b553d0c3834e7246c9ff3c1fe9a206ce3ab88e90
|
/employee.py
|
d7b3d47a14e4dda6024e48a5f03f756d3b01fd86
|
[] |
no_license
|
PapaKofi13/vscodes
|
c1eab9968251efe1359729e60234df47dfe02d17
|
331944fd6aba4c67f3125e884b33fbe6de1da565
|
refs/heads/master
| 2021-04-22T16:35:50.466684
| 2020-03-25T02:00:09
| 2020-03-25T02:00:09
| 249,862,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
class Employee:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
@property
def email(self):
return '{}.{}@email.com'.format(self.first,self.last)
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
def __repr__(self):
return "Employee('{}','{}')".format(self.last, self.last, self.pay)
|
[
"micjay113@gmail.com"
] |
micjay113@gmail.com
|
8de2961d32d5904a8cf7ca984a7b05b6c90d602e
|
ba1d95ed0bc04923b5f9816f2f75d45244941291
|
/task_2/task_2b/.ipynb_checkpoints/task2_v-checkpoint.py
|
340dfb3da50825e87bcb4e3d8c41b38a52ae8cbf
|
[] |
no_license
|
emmilner/swarm_sim
|
67e951d44df0bf73e22d4c4ab4453a81ac7cce3f
|
2ed721cda2af98df7dffbe8f2ae876acf7fbedb5
|
refs/heads/master
| 2023-03-26T13:46:00.097756
| 2021-03-19T18:50:23
| 2021-03-19T18:50:23
| 288,507,177
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,831
|
py
|
'''
Swarm Warehouse with Boxes Code:
Displays a bird's eye view of a warehouse with robots moving around, avoiding the walls and each other. Boxes are picked up and moved to exit zone by robots. The boxes are requested to be delivered in a given sequence and that sequence is broadcast to the swarm. The robots will only pick up a box if it is the correct on in the sequence. They will then only pick up a new box when the sequence has moved on and the previous box has been delivered to the exit zone.
** Requires the script warehouse.py to be in the same folder as this script as it is called in the code **
Code authored by Emma Milner and Elliot Hogg
The actual specification for the Toshiba robots is as follows:
agent speed = 2 m/s
agent acceleration 2 m/s/s
diameter of agent is 250 mm
width of warehouse is 5m
height (depth) of warehouse is 5m
'''
# Still to do
# Consider other exit zone options e.g. square in the centre (so that wall avoidance doesn't come in)
# if delivered change the number of boxes.num_boxes so don't have to keep plotting them?
# avoid boxes if you already have a box
import numpy as np
import math
import random
from matplotlib import pyplot as plt
from matplotlib import animation
import scipy
from scipy.spatial.distance import cdist, pdist, euclidean
import pickle
import warehouse
import sys
import os
### INPUTS ###
#num_agents = 20 # Number of agents in swarm (default 50)
radius = 12.5 # Radius of single agent (half of 25)
width = 500 # Width of warehouse (100)
height = 500 # Height (depth) of warehouse (100)
speed = 2 # Agent speed (0.5)
repulsion_distance = radius/2# Distance at which repulsion is first felt (3)
#marker_size = 14 # Diameter of circular marker on plot of warehouse (14)
#num_boxes = 3
box_radius = radius
box_range = 2*box_radius # range at which a box can be picked up
exit_width = int(0.2*width) # if it is too small then it will avoid the wall and be less likely to reach the exit zone
###
R_rob = 15
R_box = 15
R_wall = 25
pick_up_prob = 100 # prob is <= this
ani = True
if ani == True:
num_agents = 10
num_boxes = 50
p = 4
marker_size = width*0.5/20 #diameter
class swarm():
def __init__(self,num_agents,p):
self.speed = speed # Agent speed
self.num_agents = num_agents
self.check_r = np.ones(self.num_agents)
self.heading = 0.0314*np.random.randint(-100,100,self.num_agents)
self.rob_c = np.random.randint(box_radius*2,width-box_radius-exit_width,(self.num_agents,2))
self.counter = 0
self.rob_d = np.zeros((self.num_agents,2))
self.drop_off_prob = p
self.beyond_r = np.zeros(self.num_agents)
self.last_box = np.full(self.num_agents,-1)
def iterate(self,boxes): # moves the positions forward in time
dist = cdist(boxes.box_c, self.rob_c)
qu_close_box = np.min(dist,1) < box_range
qu_close_rob = np.min(dist,0) < box_range
mins = np.argmin(dist,1)
cf_box = qu_close_box*boxes.check_b
cf_rob = qu_close_rob*self.check_r
for b in range(boxes.num_boxes):
if cf_box[b] == True and cf_rob[mins[b]] == True and self.last_box[mins[b]] != b :
self.check_r[mins[b]] = 0
boxes.check_b[b] = 0
boxes.box_c[b] = self.rob_c[mins[b]]
boxes.robot_carrier[b] = mins[b]
random_walk(self,boxes) # the robots move using the random walk function
self.rob_c = self.rob_c + self.rob_d
boxes.box_d = np.zeros((boxes.num_boxes,2))
positions = np.insert(self.rob_d,self.num_agents,[0, 0],axis = 0 )
boxes.box_d = positions[boxes.robot_carrier]
boxes.box_d = (boxes.box_d.T*boxes.gone).T
boxes.box_c = boxes.box_c + boxes.box_d
boxes.beyond_b[boxes.seq] = boxes.box_c.T[0,boxes.seq] > width - exit_width - radius
sum_beyond = np.sum(boxes.beyond_b)
if boxes.robot_carrier[boxes.seq] > self.num_agents:
self.beyond_r[boxes.robot_carrier[boxes.seq]] = self.rob_c.T[0,boxes.robot_carrier[boxes.seq]] > width - exit_width - radius
anti_check_b = boxes.check_b == 0
boxes.box_c.T[0] = boxes.box_c.T[0] + (boxes.gone*boxes.beyond_b*anti_check_b*200)
boxes.gone = boxes.beyond_b == 0
anti_check_r = self.check_r == 0
self.check_r = self.check_r + self.beyond_r*anti_check_r
boxes.delivered = sum_beyond
# box_drop = np.random.randint(0,100,boxes.num_boxes)
# prob = box_drop < self.drop_off_prob # don't drop if prob below 50
# prob[boxes.seq] = 0 # don't drop sequence box
# prob_check_b = boxes.check_b == 0
# for b in range(boxes.num_boxes):
# if prob_check_b[b]*prob[b] == 1:
# self.last_box[boxes.robot_carrier[b]] = b
# self.check_r[boxes.robot_carrier[b]] = 1
# boxes.check_b = boxes.check_b + (prob*prob_check_b)
if boxes.box_c.T[0,boxes.seq] > width:
boxes.seq += 1
class boxes():
def __init__(self,number_of_boxes,robots):
self.num_boxes = number_of_boxes
self.radius = box_radius
self.check_b = np.ones(self.num_boxes)
self.delivered = 0
self.box_c = np.random.randint(box_radius*2,width-box_radius-exit_width,(self.num_boxes,2))
self.box_d = np.zeros((self.num_boxes,2))
self.gone = np.ones(self.num_boxes)
self.seq = 0
self.robot_carrier = np.full(self.num_boxes,robots.num_agents)
self.beyond_b = np.zeros(self.num_boxes)
## Avoidance behaviour for avoiding the warehouse walls ##
def avoidance(rob_c,map): # input the agent positions array and the warehouse map
num_agents = len(rob_c) # num_agents is number of agents according to position array
## distance from agents to walls ##
# distance from the vertical walls to your agent (horizontal distance between x coordinates)
difference_in_x = np.array([map.planeh-rob_c[n][1] for n in range(num_agents)])
# distance from the horizontal walls to your agent (vertical distance between y coordinates)
difference_in_y = np.array([map.planev-rob_c[n][0] for n in range(num_agents)])
# x coordinates are the first row (or column) of the agent positions transposed
agentsx = rob_c.T[0]
# y coordinates are the second row (or column) of the agent positions transposed
agentsy = rob_c.T[1]
## Are the agents within the limits of the warehouse?
# Check x coordinates are within the x boundaries
# x_lower and x_upper give a bool value of:
# TRUE if within the warehouse limits
# FALSE if outside the warehouse limits
x_lower_wall_limit = agentsx[:, np.newaxis] >= map.limh.T[0] # limh is for horizontal walls
x_upper_wall_limit = agentsx[:, np.newaxis] <= map.limh.T[1]
# Interaction combines the lower and upper limit information to give a TRUE or FALSE value to the agents depending on if it is IN/OUT the warehouse boundaries
interaction = x_upper_wall_limit*x_lower_wall_limit
# Fy is Force on the agent in y direction due to proximity to the horziontal walls
# This equation was designed to be very high when the agent is close to the wall and close to 0 otherwise
Fy = np.exp(-2*abs(difference_in_x) + R_wall)
# The Force is zero if the interaction is FALSE meaning that the agent is safely within the warehouse boundary (so that is does not keep going forever if there is a mistake)
Fy = Fy*difference_in_x*interaction
# Same as x boundaries but now in y
y_lower_wall_limit = agentsy[:, np.newaxis] >= map.limv.T[0] # limv is vertical walls
y_upper_wall_limit = agentsy[:, np.newaxis] <= map.limv.T[1]
interaction = y_lower_wall_limit*y_upper_wall_limit
Fx = np.exp(-2*abs(difference_in_y) + R_wall)
Fx = Fx*difference_in_y*interaction
# For each agent the force in x and y is the sum of the forces from each wall
Fx = np.sum(Fx, axis=1)
Fy = np.sum(Fy, axis=1)
# Combine x and y force vectors
F = np.array([[Fx[n], Fy[n]] for n in range(num_agents)])
return F
## Movement function with agent-agent avoidance behaviours ##
def random_walk(swarm,boxes):
swarm.counter += 1
# Add noise to the heading function
noise = 0.01*np.random.randint(-50,50,(swarm.num_agents))
swarm.heading += noise
# Force for movement according to new chosen heading
heading_x = 1*np.cos(swarm.heading) # move in x
heading_y = 1*np.sin(swarm.heading) # move in y
F_heading = -np.array([[heading_x[n], heading_y[n]] for n in range(0, swarm.num_agents)])
# Agent-agent avoidance
r = repulsion_distance # distance at which repulsion is felt (set at start of code)
# Compute (euclidean == cdist) distance between agents
agent_distance = cdist(swarm.rob_c, swarm.rob_c)
box_dist = cdist(boxes.box_c,swarm.rob_c)
# Compute vectors between agents
proximity_vectors = swarm.rob_c[:,:,np.newaxis]-swarm.rob_c.T[np.newaxis,:,:]
proximity_to_boxes = boxes.box_c[:,:,np.newaxis] - swarm.rob_c.T[np.newaxis,:,:]
F_box = R_box*r*np.exp(-box_dist/r)[:,np.newaxis,:]*proximity_to_boxes/(swarm.num_agents-1)
F_box = np.sum(F_box,axis=0)
not_free = swarm.check_r == 0
F_box[0] = not_free*F_box[0].T
F_box[1] = not_free*F_box[1].T
# Force on agent due to proximity to other agents
F_agent = R_rob*r*np.exp(-agent_distance/r)[:,np.newaxis,:]*proximity_vectors/(swarm.num_agents-1)
F_agent = np.sum(F_agent, axis =0).T # Sum of proximity forces
# Force on agent due to proximity to walls
F_wall_avoidance = avoidance(swarm.rob_c, swarm.map)
# Forces added together
F_agent += F_wall_avoidance + F_heading + F_box.T
F_x = F_agent.T[0] # Force in x
F_y = F_agent.T[1] # Force in y
# New movement due to forces
new_heading = np.arctan2(F_y, F_x) # new heading due to forces
move_x = swarm.speed*np.cos(new_heading) # Movement in x due to forces
move_y = swarm.speed*np.sin(new_heading) # Movement in y due to forces
# Total change in movement of agent
swarm.rob_d = -np.array([[move_x[n], move_y[n]] for n in range(0, swarm.num_agents)])
return swarm.rob_d
# New agent positions
#swarm.rob_c += M
##########################################################
def set_up(time,r,b,p):
swarm_group = swarm(r,p)
box_group = boxes(b,swarm_group)
warehouse_map = warehouse.map()
warehouse_map.warehouse_map(width,height)
warehouse_map.gen()
swarm_group.map = warehouse_map
swarm_group.iterate(box_group)
while swarm_group.counter <= time:
swarm_group.iterate(box_group)
if box_group.delivered == box_group.num_boxes:
return (1,swarm_group.counter)
exit()
sr = box_group.delivered
#print(box_group.box_times)
if sr > 0:
sr = float(sr/box_group.num_boxes)
return (sr,swarm_group.counter)
if ani == True:
swarm = swarm(num_agents,p)
boxes = boxes(num_boxes,swarm)
warehouse_map = warehouse.map()
warehouse_map.warehouse_map(width,height)
warehouse_map.gen()
swarm.map = warehouse_map
swarm.iterate(boxes)
fig = plt.figure()
ax = plt.axes(xlim=(0, width), ylim=(0, height))
dot, = ax.plot([swarm.rob_c[i,0] for i in range(swarm.num_agents)],[swarm.rob_c[i,1] for i in range(num_agents)],
'ko',
markersize = marker_size, fillstyle = 'none')
box, = ax.plot([boxes.box_c[i,0] for i in range(boxes.num_boxes)],[boxes.box_c[i,1] for i in range(boxes.num_boxes)], 'rs', markersize = marker_size-5)
seq, = ax.plot([boxes.box_c[0,0]],[boxes.box_c[0,1]],'ks',markersize = marker_size-5)
plt.axis('square')
plt.axis([0,width,0,height])
def animate(i):
swarm.iterate(boxes)
dot.set_data([swarm.rob_c[n,0] for n in range(num_agents)],[swarm.rob_c[n,1] for n in range(num_agents)])
# for b in range(num_boxes):
# plt.annotate(str(b), (boxes.box_c[b,0], boxes.box_c[b,1]))
box.set_data([boxes.box_c[n,0] for n in range(boxes.num_boxes)],[boxes.box_c[n,1] for n in range(boxes.num_boxes)])
seq.set_data([boxes.box_c[boxes.seq,0],[boxes.box_c[boxes.seq,1]]])
plt.title("Time is "+str(swarm.counter)+"s")
if boxes.delivered == boxes.num_boxes:
exit()
anim = animation.FuncAnimation(fig, animate, frames=500, interval=0.1)
plt.xlabel("Warehouse width (cm)")
plt.ylabel("Warehouse height (cm)")
ex = [width-exit_width, width-exit_width]
ey = [0, height]
plt.plot(ex,ey,':')
plt.show()
|
[
"emma.milner@bristol.ac.uk"
] |
emma.milner@bristol.ac.uk
|
7626d2627355af9ccb92ffd060e7918314e2cfc9
|
0eeb5a7e502606fe505efcda7a9b6aaeab76252d
|
/Versione-3.0.1.0/MainInterface.spec
|
72bdf1ff74fa05115d2ead5ca81cf463869bb346
|
[] |
no_license
|
Beezusburger/JaSONx
|
5b5fe917c6f3aa49140b8d27580ecbf10532b9af
|
adceb6c3fc62924f4a51886d305e66326a548baa
|
refs/heads/master
| 2020-04-02T21:53:59.720688
| 2018-11-12T10:52:45
| 2018-11-12T10:52:45
| 154,814,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['MainInterface.py'],
pathex=['C:\\Users\\Juri Francia\\Dropbox\\Progetto JaSONx\\Progetto JaSONx\\Sorgenti JaSONx\\Versione 3.0.1.0'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='MainInterface',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=False , icon='C:\\Users\\Juri Francia\\Dropbox\\Progetto JaSONx\\Progetto JaSONx\\Sorgenti JaSONx\\Versione 3.0.1.0\\image\\logo_ico.ico')
|
[
"v.zagranovskyy@reply.it"
] |
v.zagranovskyy@reply.it
|
d316fb99654c7e1da09a7124137e1d0eaca98eb7
|
877f3a0fac213a0f994203365884eaac8fba14f0
|
/planning/models.py
|
4303678fb918d798a5544afc7c1a559e1be234da
|
[] |
no_license
|
hlorofilka/Mystash
|
ff609400994a15667be69caf51d8fc40907238b6
|
810e9a9f6726b80e3611ded23fa99338dd2a5012
|
refs/heads/master
| 2020-05-19T01:00:31.779798
| 2019-07-08T19:45:12
| 2019-07-08T19:45:12
| 184,747,392
| 0
| 0
| null | 2019-07-08T19:45:13
| 2019-05-03T11:55:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
import datetime
from django.conf import settings
from django.db import models
from transactions.models import Transaction, Account
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Period(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True, default = None)
starts_at = models.DateField(default=datetime.date.today)
ends_at = models.DateField(default= datetime.date.today() + datetime.timedelta(days=30))
goal = models.DecimalField(max_digits=20, decimal_places=2, default=0)
def active_money(self):
money = 0
act_accounts = Account.objects.filter(holder=self.owner, account_type ='active')
pas_accounts = Account.objects.filter(holder=self.owner, account_type ='passive')
for act_account in act_accounts:
money += act_account.date_balance(self.starts_at)
for pas_account in pas_accounts:
money -= pas_account.date_balance(self.starts_at)
return money
def at_the_end(self):
next_day = self.ends_at + datetime.timedelta(days=1)
money = 0
act_accounts = Account.objects.filter(holder=self.owner, account_type ='active')
pas_accounts = Account.objects.filter(holder=self.owner, account_type ='passive')
for act_account in act_accounts:
money += act_account.date_balance(next_day)
for pas_account in pas_accounts:
money -= pas_account.date_balance(next_day)
return money
def completion_rate(self):
return float(self.at_the_end())/float(self.goal)*100
def duration(self):
return (self.ends_at-self.starts_at).days+1
def free_money(self):
mandatories = self.mandatorytransaction_set.all()
free_sum = self.active_money()
for mandatory in mandatories:
free_sum += float(mandatory.transaction_type+ str(mandatory.amount))
return free_sum
def max_daylimit(self):
return self.free_money()/self.duration()
def max_goal(self):
return self.free_money()
def daylimit(self):
return round((self.free_money()-float(self.goal))/self.duration(), 2)
def is_actual(self):
return datetime.date.today >= self.starts_at and datetime.date.today <= self.ends_at
def __str__(self):
return self.starts_at.strftime("%d.%m.%Y")+"-"+ self.ends_at.strftime("%d.%m.%Y")+": the goal is "+ str(self.goal)+ " day limit is "+ str(self.daylimit())
class MandatoryTransaction(models.Model):
period = models.ForeignKey(Period, on_delete=models.CASCADE, related_name="mandatorytransaction_set")
title = models.CharField(max_length=200)
transaction_type_choice = (('-', 'expense'), ('+', 'income'))
transaction_type = models.CharField(max_length=1, choices=transaction_type_choice)
amount = models.FloatField(validators=[MinValueValidator(0.0), MaxValueValidator(999999999999.99)], null=True, blank=True, default = 0)
def money_left(self):
transactions = self.transaction_set.all()
balance = self.amount
for transaction in transactions:
balance = balance - (transaction.amount)
return balance
def is_completed(self):
if self.money_left() <= 0:
return True
else:
return False
def __str__(self):
return self.title+": "+self.transaction_type+str(self.amount)
|
[
"hlorofilk@mail.ru"
] |
hlorofilk@mail.ru
|
45cfba7e9e4c299d0619bddc9ddc217fa2dc3b11
|
e7fc26a47929b983562dec490db403664d4389c9
|
/fun/times_3.py
|
53554ef469e8b470777793b8a5113b4a5761ffb8
|
[] |
no_license
|
n-trance/practice
|
0ca4427c018cc9f4a440e98df425c3d5c63e5eb1
|
9674d385d9e561268fa404c7ee99e00b962c2d41
|
refs/heads/master
| 2021-01-17T08:25:53.041272
| 2017-11-06T11:16:24
| 2017-11-06T11:16:24
| 63,580,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
'''
given a list, get the largest number by multiplying all numbers.
len(list) >= 3, integers
'''
'''
proposed solution:
1. sort list (nlogn)
2. case: all values >= 0:
- we pick last 3
case: all values <= 0:
- we pick last 3
case: mix of numbers:
- we pick last number
- check if first list[0]*list[1] > list[n-2]*list[n-3]
we pick the largest of the two
***
solution is either:
0,1,n-1 or n-1,n-2,n-3
'''
l1 = [1,2,3] #6
l2 = [0,1,2,3,4] #24
l3 = [-4,-5,-3,0,-1] #0
l4 = [-4,-5,-3, -1] #-12
l5 = [-10, 7, 5, 10, 0] #350
l6 = [-10,0,-10,1,1,1] #100
def times_3(list):
list = sorted(list) #nlogn
n = len(list)
first = list[0]*list[1]*list[n-1] # 0*1*n
second = list[n-1]*list[n-2]*list[n-3] # n*n-1*n-2
if (first > second):
return first
else:
return second
print(times_3(l1))
print(times_3(l2))
print(times_3(l3))
print(times_3(l4))
print(times_3(l5))
print(times_3(l6))
|
[
"nethan.tran@gmail.com"
] |
nethan.tran@gmail.com
|
53e7285ae5fb7e5cd1f11a10d46bf67cd9910935
|
7aa20b65a28a348273fa37e6347539cf2b28097b
|
/test/unit/test_configHelpers.py
|
b04693608209749de7eca428a4d06ce11ae9b1ae
|
[] |
no_license
|
aburzinski/MSiA-423-Project
|
b3d3d4c5a75b57e28da809dfe5cef7e897f48bfc
|
76f126152407a48b73092e3914fa3a37e3581e36
|
refs/heads/master
| 2022-07-07T00:36:56.227554
| 2019-06-12T00:02:16
| 2019-06-12T00:02:16
| 180,415,904
| 0
| 2
| null | 2022-06-21T21:58:37
| 2019-04-09T17:19:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
import sys
import os
sys.path.append(os.environ.get('PYTHONPATH'))
import src.helpers.configHelpers as base
def test_createDatabaseURI():
"""Test the createDatabaseURI method"""
# Test for incorrect dbtype
dbtype = 'postgresql'
dbname = 'test'
host = '127.0.0.1'
port = '5432'
username = 'user'
password = 'pw'
try:
base.createDatabaseURI(dbtype, host, dbname)
assert False
except ValueError:
assert True
# Test sqlite connection string
dbtype = 'sqlite'
dbname = 'test'
host = '127.0.0.1'
port = '5432'
username = 'user'
password = 'pw'
expectedOutput = 'sqlite:///127.0.0.1/test.db'
assert(base.createDatabaseURI(dbtype, host, dbname) == expectedOutput)
# Test mysql connection string
dbtype = 'mysql'
dbname = 'test'
host = '127.0.0.1'
port = '3306'
username = 'user'
password = 'pw'
expectedOutput = 'mysql+pymysql://user:pw@127.0.0.1:3306/test'
assert(base.createDatabaseURI(dbtype, host, dbname, port=port,
username=username, password=password) == expectedOutput)
|
[
"aburzinski2@gmail.com"
] |
aburzinski2@gmail.com
|
504475257dbc623201bf094a2bc7a6bfbb28627f
|
9fd3d9f37c49c0080cf8d22486b2211f708830d8
|
/grupo6.py
|
ee24b9db09e011b1448de1fa799dd1847792fe4f
|
[] |
no_license
|
tosh2/ia-mundial
|
7762288cc638a541ff7c9651fcb072c05adb63af
|
57fe100609c6f4836bf7b839e8e10c0e3b9d1e3b
|
refs/heads/master
| 2020-03-22T00:15:22.667885
| 2018-06-30T08:45:04
| 2018-06-30T08:45:04
| 139,233,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
#!/usr/bin/env python3
# Grupo 8 - IA Junio 2018
# ./grupo6.py
import math
def fase_grupos(name):
if name == "FRA": return [["AUS",2,1],["PER",1,0],["DIN",0,0]];
elif name == "ARG": return [["ISL",1,1],["CRO",0,3],["NIG",2,1]];
elif name == "URU": return [["EGY",1,0],["ARA",1,0],["RUS",3,0]];
elif name == "POR": return [["ESP",3,3],["MAR",1,0],["IRA",1,1]];
elif name == "ESP": return [["POR",3,3],["IRA",1,0],["MAR",2,2]];
elif name == "RUS": return [["ARA",5,0],["EGY",3,1],["URU",0,3]];
elif name == "CRO": return [["NIG",2,0],["ARG",3,0],["ISL",2,1]];
elif name == "DIN": return [["PER",1,0],["AUS",1,1],["FRA",0,0]];
elif name == "BRA": return [["SUI",1,1],["COS",2,0],["SER",2,0]];
elif name == "MEX": return [["ALE",1,0],["KOR",2,1],["SUE",0,3]];
elif name == "BEL": return [["PAN",3,0],["TUN",5,2],["ING",1,0]];
elif name == "JAP": return [["COL",2,1],["SEN",2,2],["POL",0,1]];
elif name == "SUE": return [["KOR",1,0],["ALE",1,2],["MEX",3,0]];
elif name == "SUI": return [["BRA",1,1],["SER",2,1],["COS",2,2]];
elif name == "COL": return [["JAP",1,2],["POL",3,0],["SEN",1,0]];
elif name == "ING": return [["TUN",2,0],["PAN",6,1],["BEL",0,1]];
def quiniela(lista_octavos):
lista = lista_octavos
while lista:
partido = lista[0]
grupo1 = fase_grupos(partido[0])
grupo2 = fase_grupos(partido[1])
goles_a_favor_equipo1 = 0.0
goles_a_favor_equipo2 = 0.0
while(grupo1):
goles_a_favor_equipo1 += grupo1.pop(0)[1]
while(grupo2):
goles_a_favor_equipo2 += grupo2.pop(0)[1]
print(partido[0] + " " + str(int(round(goles_a_favor_equipo1/3))) +"-"+ str(int(round(goles_a_favor_equipo2/3))) + " " + partido[1])
lista.pop(0)
quiniela([['FRA','ARG'],['URU','POR'],['ESP','RUS'],['CRO','DIN'],['BRA','MEX'],['BEL','JAP'],['SUE','SUI'],['COL','ING']])
|
[
"josuetz21@gmail.com"
] |
josuetz21@gmail.com
|
2af9f47d4deb43cc4c69c7bc021bb310907e4bcd
|
5ce657823418ff9d9c46598154fde03ab2f23d72
|
/data_preprocessing_and_visualization/Data_Preprocessing.py
|
4851f3e892159d651d742e98afafbd56c704ad17
|
[
"MIT"
] |
permissive
|
ZTong1201/Ship-Detection-Project
|
feb6a5e57c893479eca4ea7b328f695f7b709f4f
|
abd49c99294f62d4530807f8ca48c0e3ae79dbb4
|
refs/heads/master
| 2020-05-16T00:12:55.638525
| 2019-05-27T23:59:15
| 2019-05-27T23:59:15
| 182,574,151
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,757
|
py
|
# coding: utf-8
# # Data Preprocessing
# I refer to U-net model with submission on the website: https://www.kaggle.com/hmendonca/u-net-model-with-submission. On Augment Data part, we can tweak the parameters to process images. For some detail, I am still trying to understand them.
# In[6]:
# Lets import some useful libraires
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from skimage.io import imread
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from skimage.segmentation import mark_boundaries
#from skimage.util import montage2d as montage
from skimage.morphology import binary_opening, disk, label
import gc; gc.enable() # memory is tight
montage_rgb = lambda x: np.stack([montage(x[:, :, :, i]) for i in range(x.shape[3])], -1)
ship_dir = '../input'
train_image_dir = os.path.join(ship_dir, 'train_v2')
test_image_dir = os.path.join(ship_dir, 'test_v2')
def multi_rle_encode(img, **kwargs):
'''
Encode connected regions as separated masks
'''
labels = label(img)
if img.ndim > 2:
return [rle_encode(np.sum(labels==k, axis=2), **kwargs) for k in np.unique(labels[labels>0])]
else:
return [rle_encode(labels==k, **kwargs) for k in np.unique(labels[labels>0])]
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_encode(img, min_max_threshold=1e-3, max_mean_threshold=None):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
if np.max(img) < min_max_threshold:
return '' ## no need to encode if it's all zeros
if max_mean_threshold and np.mean(img) > max_mean_threshold:
return '' ## ignore overfilled mask
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle_decode(mask_rle, shape=(768, 768)):
'''
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T # Needed to align to RLE direction
def masks_as_image(in_mask_list):
# Take the individual ship masks and create a single mask array for all ships
all_masks = np.zeros((768, 768), dtype = np.uint8)
for mask in in_mask_list:
if isinstance(mask, str):
all_masks |= rle_decode(mask)
return all_masks
def masks_as_color(in_mask_list):
# Take the individual ship masks and create a color mask array for each ships
all_masks = np.zeros((768, 768), dtype = np.float)
scale = lambda x: (len(in_mask_list)+x+1) / (len(in_mask_list)*2) ## scale the heatmap image to shift
for i,mask in enumerate(in_mask_list):
if isinstance(mask, str):
all_masks[:,:] += scale(i) * rle_decode(mask)
return all_masks
# In[54]:
test_image_dir
# In[7]:
masks = pd.read_csv(os.path.join('../input/', 'train_ship_segmentations_v2.csv'))
not_empty = pd.notna(masks.EncodedPixels)
print(not_empty.sum(), 'masks in', masks[not_empty].ImageId.nunique(), 'images')
print((~not_empty).sum(), 'empty images in', masks.ImageId.nunique(), 'total images')
masks.head()
# # Split into training and validation groups
# We stratify by the number of boats appearing so we have nice balances in each set
# In[8]:
masks['ships'] = masks['EncodedPixels'].map(lambda c_row: 1 if isinstance(c_row, str) else 0)
unique_img_ids = masks.groupby('ImageId').agg({'ships': 'sum'}).reset_index()
unique_img_ids['has_ship'] = unique_img_ids['ships'].map(lambda x: 1.0 if x>0 else 0.0)
unique_img_ids['has_ship_vec'] = unique_img_ids['has_ship'].map(lambda x: [x])
# some files are too small/corrupt
unique_img_ids['file_size_kb'] = unique_img_ids['ImageId'].map(lambda c_img_id:
os.stat(os.path.join(train_image_dir,
c_img_id)).st_size/1024)
unique_img_ids = unique_img_ids[unique_img_ids['file_size_kb'] > 50] # keep only +50kb files
unique_img_ids['file_size_kb'].hist()
masks.drop(['ships'], axis=1, inplace=True)
unique_img_ids.sample(7)
# ### Examine Number of Ship Images
# Here we examine how often ships appear and replace the ones without any ships with 0
# In[9]:
unique_img_ids['ships'].hist(bins=unique_img_ids['ships'].max())
# # Undersample Empty Images
# Here we undersample the empty images to get a better balanced group with more ships to try and segment
# In[34]:
SAMPLES_PER_GROUP = 4000
balanced_train_df = unique_img_ids.groupby('ships').apply(lambda x: x.sample(SAMPLES_PER_GROUP) if len(x) > SAMPLES_PER_GROUP else x)
balanced_train_df['ships'].hist(bins=balanced_train_df['ships'].max()+1)
print(balanced_train_df.shape[0], 'masks')
# In[35]:
from sklearn.model_selection import train_test_split
train_ids, valid_ids = train_test_split(balanced_train_df,
test_size = 0.2,
stratify = balanced_train_df['ships'])
train_df = pd.merge(masks, train_ids)
valid_df = pd.merge(masks, valid_ids)
print(train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
# # Decode all the RLEs into Images
# We make a generator to produce batches of images
# In[36]:
# Model parameters
BATCH_SIZE = 48
EDGE_CROP = 16
GAUSSIAN_NOISE = 0.1
UPSAMPLE_MODE = 'SIMPLE'
# downsampling inside the network
NET_SCALING = (1, 1)
# downsampling in preprocessing
IMG_SCALING = (3, 3)
# number of validation images to use
VALID_IMG_COUNT = 900
# maximum number of steps_per_epoch in training
MAX_TRAIN_STEPS = 9
MAX_TRAIN_EPOCHS = 99
AUGMENT_BRIGHTNESS = False
# In[37]:
def make_image_gen(in_df, batch_size = BATCH_SIZE):
all_batches = list(in_df.groupby('ImageId'))
out_rgb = []
out_mask = []
while True:
np.random.shuffle(all_batches)
for c_img_id, c_masks in all_batches:
rgb_path = os.path.join(train_image_dir, c_img_id)
c_img = imread(rgb_path)
c_mask = np.expand_dims(masks_as_image(c_masks['EncodedPixels'].values), -1)
if IMG_SCALING is not None:
c_img = c_img[::IMG_SCALING[0], ::IMG_SCALING[1]]
c_mask = c_mask[::IMG_SCALING[0], ::IMG_SCALING[1]]
out_rgb += [c_img]
out_mask += [c_mask]
if len(out_rgb)>=batch_size:
yield np.stack(out_rgb, 0)/255.0, np.stack(out_mask, 0)
out_rgb, out_mask=[], []
# In[38]:
train_gen = make_image_gen(train_df)
train_x, train_y = next(train_gen)
print('x', train_x.shape, train_x.min(), train_x.max())
print('y', train_y.shape, train_y.min(), train_y.max())
# In[39]:
from skimage.util.montage import montage2d as montage
#from skimage.util import montage2d as montage
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (30, 10))
batch_rgb = montage_rgb(train_x)
batch_seg = montage(train_y[:, :, :, 0])
ax1.imshow(batch_rgb)
ax1.set_title('Images')
ax2.imshow(batch_seg)
ax2.set_title('Segmentations')
ax3.imshow(mark_boundaries(batch_rgb,
batch_seg.astype(int)))
ax3.set_title('Outlined Ships')
fig.savefig('overview.png')
# # Make the Validation Set
# In[40]:
get_ipython().run_cell_magic('time', '', 'valid_x, valid_y = next(make_image_gen(valid_df, VALID_IMG_COUNT))\nprint(valid_x.shape, valid_y.shape)')
# # Augment Data
# In[41]:
from keras.preprocessing.image import ImageDataGenerator
dg_args = dict(featurewise_center = False,
samplewise_center = False,
rotation_range = 45, # a value in degrees (0-180), a range within which to randomly rotate pictures
width_shift_range = 0.1, # randomly translate pictures vertically or horizontally
height_shift_range = 0.1, # randomly translate pictures vertically or horizontally
shear_range = 0.01, # randomly applying shearing transformations
zoom_range = [0.9, 1.25],
horizontal_flip = True, #randomly flipping half of the images horizontally
vertical_flip = True,
fill_mode = 'reflect', # strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.
data_format = 'channels_last')
# brightness can be problematic since it seems to change the labels differently from the images
if AUGMENT_BRIGHTNESS:
dg_args[' brightness_range'] = [0.5, 1.5]
image_gen = ImageDataGenerator(**dg_args)
if AUGMENT_BRIGHTNESS:
dg_args.pop('brightness_range')
label_gen = ImageDataGenerator(**dg_args)
def create_aug_gen(in_gen, seed = None):
np.random.seed(seed if seed is not None else np.random.choice(range(9999)))
for in_x, in_y in in_gen:
seed = np.random.choice(range(9999))
# keep the seeds syncronized otherwise the augmentation to the images is different from the masks
g_x = image_gen.flow(255*in_x,
batch_size = in_x.shape[0],
seed = seed,
shuffle=True)
g_y = label_gen.flow(in_y,
batch_size = in_x.shape[0],
seed = seed,
shuffle=True)
yield next(g_x)/255.0, next(g_y)
# In[42]:
cur_gen = create_aug_gen(train_gen)
t_x, t_y = next(cur_gen)
print('x', t_x.shape, t_x.dtype, t_x.min(), t_x.max())
print('y', t_y.shape, t_y.dtype, t_y.min(), t_y.max())
# only keep first 9 samples to examine in detail
t_x = t_x[:9]
t_y = t_y[:9]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (20, 10))
ax1.imshow(montage_rgb(t_x), cmap='gray')
ax1.set_title('images')
ax2.imshow(montage(t_y[:, :, :, 0]), cmap='gray_r')
ax2.set_title('ships')
# In[26]:
gc.collect()
# # Build a Model
# Here we use a slight deviation on the U-Net standard
# In[43]:
from keras import models, layers
# Build U-Net model
def upsample_conv(filters, kernel_size, strides, padding):
return layers.Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding)
def upsample_simple(filters, kernel_size, strides, padding):
return layers.UpSampling2D(strides)
if UPSAMPLE_MODE=='DECONV':
upsample=upsample_conv
else:
upsample=upsample_simple
input_img = layers.Input(t_x.shape[1:], name = 'RGB_Input')
pp_in_layer = input_img
if NET_SCALING is not None:
pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)
pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
pp_in_layer = layers.BatchNormalization()(pp_in_layer)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (pp_in_layer)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
p1 = layers.MaxPooling2D((2, 2)) (c1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
p2 = layers.MaxPooling2D((2, 2)) (c2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
p3 = layers.MaxPooling2D((2, 2)) (c3)
c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (p3)
c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (c4)
p4 = layers.MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same') (p4)
c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same') (c5)
u6 = upsample(64, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = layers.concatenate([u6, c4])
c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (u6)
c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (c6)
u7 = upsample(32, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = layers.concatenate([u7, c3])
c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (u7)
c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c7)
u8 = upsample(16, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = layers.concatenate([u8, c2])
c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (u8)
c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c8)
u9 = upsample(8, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = layers.concatenate([u9, c1], axis=3)
c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (u9)
c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c9)
d = layers.Conv2D(1, (1, 1), activation='sigmoid') (c9)
# d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
# d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
if NET_SCALING is not None:
d = layers.UpSampling2D(NET_SCALING)(d)
seg_model = models.Model(inputs=[input_img], outputs=[d])
seg_model.summary()
# In[44]:
import keras.backend as K
from keras.optimizers import Adam
from keras.losses import binary_crossentropy
## intersection over union
def IoU(y_true, y_pred, eps=1e-6):
if np.max(y_true) == 0.0:
return IoU(1-y_true, 1-y_pred) ## empty image; calc IoU of zeros
intersection = K.sum(y_true * y_pred, axis=[1,2,3])
union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) - intersection
return -K.mean( (intersection + eps) / (union + eps), axis=0)
# In[45]:
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('seg_model')
checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.33,
patience=1, verbose=1, mode='min',
min_delta=0.0001, cooldown=0, min_lr=1e-8)
early = EarlyStopping(monitor="val_loss", mode="min", verbose=2,
patience=20) # probably needs to be more patient, but kaggle time is limited
callbacks_list = [checkpoint, early, reduceLROnPlat]
# In[46]:
def fit():
seg_model.compile(optimizer=Adam(1e-3, decay=1e-6), loss=IoU, metrics=['binary_accuracy'])
step_count = min(MAX_TRAIN_STEPS, train_df.shape[0]//BATCH_SIZE)
aug_gen = create_aug_gen(make_image_gen(train_df))
loss_history = [seg_model.fit_generator(aug_gen,
steps_per_epoch=step_count,
epochs=MAX_TRAIN_EPOCHS,
validation_data=(valid_x, valid_y),
callbacks=callbacks_list,
workers=1 # the generator is not very thread safe
)]
return loss_history
while True:
loss_history = fit()
if np.min([mh.history['val_loss'] for mh in loss_history]) < -0.2:
break
# In[47]:
def show_loss(loss_history):
epochs = np.concatenate([mh.epoch for mh in loss_history])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(22, 10))
_ = ax1.plot(epochs, np.concatenate([mh.history['loss'] for mh in loss_history]), 'b-',
epochs, np.concatenate([mh.history['val_loss'] for mh in loss_history]), 'r-')
ax1.legend(['Training', 'Validation'])
ax1.set_title('Loss')
_ = ax2.plot(epochs, np.concatenate([mh.history['binary_accuracy'] for mh in loss_history]), 'b-',
epochs, np.concatenate([mh.history['val_binary_accuracy'] for mh in loss_history]), 'r-')
ax2.legend(['Training', 'Validation'])
ax2.set_title('Binary Accuracy (%)')
show_loss(loss_history)
# In[48]:
seg_model.load_weights(weight_path)
seg_model.save('seg_model.h5')
# In[49]:
pred_y = seg_model.predict(valid_x)
print(pred_y.shape, pred_y.min(axis=0).max(), pred_y.max(axis=0).min(), pred_y.mean())
# In[50]:
fig, ax = plt.subplots(1, 1, figsize = (6, 6))
ax.hist(pred_y.ravel(), np.linspace(0, 1, 20))
ax.set_xlim(0, 1)
ax.set_yscale('log', nonposy='clip')
# # Prepare Full Resolution Model
# Here we account for the scaling so everything can happen in the model itself
# In[51]:
if IMG_SCALING is not None:
fullres_model = models.Sequential()
fullres_model.add(layers.AvgPool2D(IMG_SCALING, input_shape = (None, None, 3)))
fullres_model.add(seg_model)
fullres_model.add(layers.UpSampling2D(IMG_SCALING))
else:
fullres_model = seg_model
fullres_model.save('fullres_model.h5')
# # Visualize predictions
# In[52]:
def raw_prediction(img, path=test_image_dir):
c_img = imread(os.path.join(path, c_img_name))
c_img = np.expand_dims(c_img, 0)/255.0
cur_seg = fullres_model.predict(c_img)[0]
return cur_seg, c_img[0]
def smooth(cur_seg):
return binary_opening(cur_seg>0.99, np.expand_dims(disk(2), -1))
def predict(img, path=test_image_dir):
cur_seg, c_img = raw_prediction(img, path=path)
return smooth(cur_seg), c_img
## Get a sample of each group of ship count
samples = valid_df.groupby('ships').apply(lambda x: x.sample(1))
fig, m_axs = plt.subplots(samples.shape[0], 4, figsize = (15, samples.shape[0]*4))
[c_ax.axis('off') for c_ax in m_axs.flatten()]
for (ax1, ax2, ax3, ax4), c_img_name in zip(m_axs, samples.ImageId.values):
first_seg, first_img = raw_prediction(c_img_name, train_image_dir)
ax1.imshow(first_img)
ax1.set_title('Image: ' + c_img_name)
ax2.imshow(first_seg[:, :, 0], cmap=get_cmap('jet'))
ax2.set_title('Model Prediction')
reencoded = masks_as_color(multi_rle_encode(smooth(first_seg)[:, :, 0]))
ax3.imshow(reencoded)
ax3.set_title('Prediction Masks')
ground_truth = masks_as_color(masks.query('ImageId=="{}"'.format(c_img_name))['EncodedPixels'])
ax4.imshow(ground_truth)
ax4.set_title('Ground Truth')
fig.savefig('validation.png')
# # Submission
# In[57]:
test_paths = np.array(os.listdir(test_image_dir))
print(len(test_paths), 'test images found')
# In[58]:
from tqdm import tqdm_notebook
def pred_encode(img, **kwargs):
cur_seg, _ = predict(img)
cur_rles = multi_rle_encode(cur_seg, **kwargs)
return [[img, rle] for rle in cur_rles if rle is not None]
out_pred_rows = []
for c_img_name in tqdm_notebook(test_paths):
out_pred_rows += pred_encode(c_img_name, min_max_threshold=1.0)
# In[59]:
sub = pd.DataFrame(out_pred_rows)
sub.columns = ['ImageId', 'EncodedPixels']
sub = sub[sub.EncodedPixels.notnull()]
sub.head()
# In[60]:
## let's see what we got
TOP_PREDICTIONS=5
fig, m_axs = plt.subplots(TOP_PREDICTIONS, 2, figsize = (9, TOP_PREDICTIONS*5))
[c_ax.axis('off') for c_ax in m_axs.flatten()]
for (ax1, ax2), c_img_name in zip(m_axs, sub.ImageId.unique()[:TOP_PREDICTIONS]):
c_img = imread(os.path.join(test_image_dir, c_img_name))
c_img = np.expand_dims(c_img, 0)/255.0
ax1.imshow(c_img[0])
ax1.set_title('Image: ' + c_img_name)
ax2.imshow(masks_as_color(sub.query('ImageId=="{}"'.format(c_img_name))['EncodedPixels']))
ax2.set_title('Prediction')
# In[ ]:
sub1 = pd.read_csv('../input/sample_submission_v2.csv')
sub1 = pd.DataFrame(np.setdiff1d(sub1['ImageId'].unique(), sub['ImageId'].unique(), assume_unique=True), columns=['ImageId'])
sub1['EncodedPixels'] = None
print(len(sub1), len(sub))
sub = pd.concat([sub, sub1])
print(len(sub))
sub.to_csv('submission.csv', index=False)
sub.head()
|
[
"buaatzy@163.com"
] |
buaatzy@163.com
|
f879c67c6bdc8c15835aa868bd0ccc952ad1aae4
|
5194f6fc9f0081464c4ed813aeba842324cf79a2
|
/services/experiments.py
|
6ebded81a4f7851d2d4cd67f4434be2ef2a36ec9
|
[] |
no_license
|
ervindobri/MyoExercises
|
157d84932493b8d2c8c84e57294dd66d98f80274
|
f791cc919f88e7d4a974fb4d768d3cb3862cae60
|
refs/heads/master
| 2023-06-09T09:11:48.355232
| 2021-07-01T09:07:17
| 2021-07-01T09:07:17
| 333,781,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
def accuracy():
labels = ["Tip Toe", "Toe Clenches", "Toe lift", "Rest"]
# array = [[49,1,0,0],
# [0, 45, 2, 3],
# [0, 0, 49, 1],
# [0, 0, 0, 50]]
array = [[19, 1, 0, 0],
[0, 19, 1, 0],
[0, 0, 20, 0],
[0, 0, 0, 20]]
# array = [[20,0,0,0],
# [2, 15, 3, 0],
# [0, 2, 17, 1],
# [0, 0, 0, 20]]
df_cm = pd.DataFrame(array, range(4), range(4))
# plt.figure(figsize=(10,7))
sn.set(font_scale=1.4) # for label size
sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, xticklabels=labels, yticklabels=labels,
cmap='Blues') # font size
plt.yticks(rotation=0)
plt.xticks(rotation=0)
plt.show()
def main():
accuracy()
pass
if __name__ == '__main__':
main()
|
[
"dobriervin@yahoo.com"
] |
dobriervin@yahoo.com
|
d3618680d0af7959816004d091cc9d1af9580b7a
|
e204ac41a9f8d91902f4392850b97e322d4b67c4
|
/assignment1/aipython/pythonDemo.py
|
eeb346ccdd81ebf98b17accd5d52c9f7b7566195
|
[] |
no_license
|
smartworlding/COMP9414
|
39446a6035323d0215059247b70014ac5c8a024e
|
5de75804404a6fafdb280305b62ac729e278a60f
|
refs/heads/master
| 2023-03-15T19:46:50.550730
| 2021-01-05T05:32:20
| 2021-01-05T05:32:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,555
|
py
|
# pythonDemo.py - Some tricky examples
# AIFCA Python3 code Version 0.8.1 Documentation at http://aipython.org
# Artificial Intelligence: Foundations of Computational Agents
# http://artint.info
# Copyright David L Poole and Alan K Mackworth 2017.
# This work is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en
fun_list1 = []
for i in range(5):
def fun1(e):
return e+i
fun_list1.append(fun1)
fun_list2 = []
for i in range(5):
def fun2(e,iv=i):
return e+iv
fun_list2.append(fun2)
fun_list3 = [lambda e: e+i for i in range(5)]
fun_list4 = [lambda e,iv=i: e+iv for i in range(5)]
i=56
# in Shell do
## ipython -i pythonDemo.py
# Try these (copy text after the comment symbol and paste in the Python prompt):
# print([f(10) for f in fun_list1])
# print([f(10) for f in fun_list2])
# print([f(10) for f in fun_list3])
# print([f(10) for f in fun_list4])
def myrange(start, stop, step=1):
"""enumerates the values from start in steps of size step that are
less than stop.
"""
assert step>0, "only positive steps implemented in myrange"
i = start
while i<stop:
yield i
i += step
print("myrange(2,30,3):",list(myrange(2,30,3)))
def ga(n):
"""generates square of even nonnegative integers less than n"""
for e in range(n):
if e%2==0:
yield e*e
a = ga(20)
def myenumerate(enum):
for i in range(len(enum)):
yield i,enum[i]
import matplotlib.pyplot as plt
def myplot(min,max,step,fun1,fun2):
plt.ion() # make it interactive
plt.xlabel("The x axis")
plt.ylabel("The y axis")
plt.xscale('linear') # Makes a 'log' or 'linear' scale
xvalues = range(min,max,step)
plt.plot(xvalues,[fun1(x) for x in xvalues],
label="The first fun")
plt.plot(xvalues,[fun2(x) for x in xvalues], linestyle='--',color='k',
label=fun2.__doc__) # use the doc string of the function
plt.legend(loc="upper right") # display the legend
def slin(x):
"""y=2x+7"""
return 2*x+7
def sqfun(x):
"""y=(x-40)^2/10-20"""
return (x-40)**2/10-20
# Try the following:
# from pythonDemo import myplot, slin, sqfun
# import matplotlib.pyplot as plt
# myplot(0,100,1,slin,sqfun)
# plt.legend(loc="best")
# import math
# plt.plot([41+40*math.cos(th/10) for th in range(50)],
# [100+100*math.sin(th/10) for th in range(50)])
# plt.text(40,100,"ellipse?")
# plt.xscale('log')
|
[
"yuki@yukideMacBook-Pro.local"
] |
yuki@yukideMacBook-Pro.local
|
8f4065d632706b252e9aaa5aef0f380f65fed859
|
57c38487a6a689318c960fa7d6b0185f372241bc
|
/presalytics_ooxml_automation/models/theme_effect_map.py
|
d5e94240d7206cc938862efbf1be434f6ab396ab
|
[
"MIT"
] |
permissive
|
presalytics/ooxml-automation-python-client
|
2c88bae455b7e567ebdb6a4ea106bbdcd192ac47
|
fa6100eef1743e43b4d25b3faac79d39fe32c9d7
|
refs/heads/master
| 2020-06-05T23:42:32.964361
| 2019-12-27T22:51:40
| 2019-12-27T22:51:40
| 192,575,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,626
|
py
|
# coding: utf-8
"""
OOXML Automation
This API helps users convert Excel and Powerpoint documents into rich, live dashboards and stories. # noqa: E501
The version of the OpenAPI document: 0.1.0-no-tags
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from presalytics_ooxml_automation.configuration import Configuration
class ThemeEffectMap(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'theme_id': 'str',
'intensity_id': 'int',
'id': 'str'
}
attribute_map = {
'theme_id': 'themeId',
'intensity_id': 'intensityId',
'id': 'id'
}
def __init__(self, theme_id=None, intensity_id=None, id=None, local_vars_configuration=None): # noqa: E501
"""ThemeEffectMap - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._theme_id = None
self._intensity_id = None
self._id = None
self.discriminator = None
self.theme_id = theme_id
if intensity_id is not None:
self.intensity_id = intensity_id
if id is not None:
self.id = id
@property
def theme_id(self):
"""Gets the theme_id of this ThemeEffectMap. # noqa: E501
:return: The theme_id of this ThemeEffectMap. # noqa: E501
:rtype: str
"""
return self._theme_id
@theme_id.setter
def theme_id(self, theme_id):
"""Sets the theme_id of this ThemeEffectMap.
:param theme_id: The theme_id of this ThemeEffectMap. # noqa: E501
:type: str
"""
self._theme_id = theme_id
@property
def intensity_id(self):
"""Gets the intensity_id of this ThemeEffectMap. # noqa: E501
:return: The intensity_id of this ThemeEffectMap. # noqa: E501
:rtype: int
"""
return self._intensity_id
@intensity_id.setter
def intensity_id(self, intensity_id):
"""Sets the intensity_id of this ThemeEffectMap.
:param intensity_id: The intensity_id of this ThemeEffectMap. # noqa: E501
:type: int
"""
self._intensity_id = intensity_id
@property
def id(self):
"""Gets the id of this ThemeEffectMap. # noqa: E501
:return: The id of this ThemeEffectMap. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ThemeEffectMap.
:param id: The id of this ThemeEffectMap. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ThemeEffectMap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ThemeEffectMap):
return True
return self.to_dict() != other.to_dict()
|
[
"kevin@chart-a-lot.com"
] |
kevin@chart-a-lot.com
|
8e36a3c996cee7f73681b2a989b6f9f54858868b
|
d233c1e5852158c21ee98477616f097d4d64b527
|
/apg4b/1_22.py
|
2f8f53d803fe493c3fd4b4bac6e3be96a68121fd
|
[] |
no_license
|
SILKYMAJOR/atcoder_repo
|
606ab1bfb04de8e750997da907b4a90e1be626fb
|
fa83edefce1ba9300932b13f99c69c0fee3a4db7
|
refs/heads/master
| 2020-07-23T11:06:56.737234
| 2020-01-01T14:57:30
| 2020-01-01T14:57:30
| 207,538,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from operator import itemgetter
N = int(input())
num_list = [list(map(int, input().split())) for _ in range(N)]
for line in sorted(num_list, key=itemgetter(1)):
print(line[0], line[1])
|
[
"silky.major@gmail.com"
] |
silky.major@gmail.com
|
a331b31ff9a44abb3e4d221a798044ccedbaaab3
|
976aef4484b41665f5e463da48c2bc83a4d192f6
|
/demo.py
|
c7494b0bca42d48a9c277b0416777d5dc309e86e
|
[] |
no_license
|
selysse/MTS_Teta_hackathon
|
0b51da510db012e4353609a444691a4e4693c908
|
b5bcd2b735e6de66eb9a2e5482c22fe7adcdb8dd
|
refs/heads/master
| 2023-08-13T19:54:25.166246
| 2021-09-18T10:22:03
| 2021-09-18T10:22:03
| 407,819,824
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
import streamlit as st
import pandas as pd
from random import randint
import base64
import joblib
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="answers.csv">Download answers.csv</a>'
model = joblib.load('models/main_model.pickle')
target_name = ['personal','technical']
def data_prep(t):
df = t.copy()
df["start_with_api"] = df["url"].str.contains("^api", regex=True).astype(int)
df["has_userapi"] = df["url"].str.contains("userapi").astype(int)
df["has_googleapis"] = df["url"].str.contains("googleapis").astype(int)
df["size_of_url"] = df["url"].apply(lambda x: len(x))
df["size_of_url_split"] = df["url"].apply(lambda x: len(x.split(".")))
df["clear_url"] = df["url"].apply(lambda x: x.replace(".", " "))
df["minus_count"] = df["url"].str.count("-")
return df
st.title('Demo of host classifier by _V3.0_')
with st.form('text'):
text_input = st.text_area('Enter host name here: ', 'yourHost.com')
submit_button = st.form_submit_button('Predict label')
if submit_button:
if ' ' not in text_input and '.' in text_input and text_input[0]!='.':
raw_data = pd.DataFrame({'url':[text_input]})
test_data = data_prep(raw_data)
predict = model.predict(test_data.drop(columns=["url"]))
st.markdown(f'Predicted label: **{target_name[predict[0]]}**'
' (probability: {:.3f} )'.format(model.predict_proba(test_data.drop(columns=["url"]))[0][predict][0]))
else:
st.write('Enter **correct** host')
st.write('OR')
with st.form('file'):
uploaded_file = st.file_uploader("Upload a csv file", ["csv"])
file_button = st.form_submit_button('Predict labels')
if uploaded_file:
file = pd.read_csv(uploaded_file)
if file_button:
clmn = file.columns
if len(clmn)!=1 and 'url' not in clmn:
st.write('Wrong format of data, please upload data with column named "url" or with only one text column')
else:
if len(clmn)==1:
file.columns=['url']
try:
prep_file = pd.DataFrame({'url':file['url'].values})
test_data = data_prep(prep_file)
predict = model.predict(test_data.drop(columns=["url"]))
prep_file['Prediction']=predict
st.write(prep_file)
st.markdown(get_table_download_link(prep_file), unsafe_allow_html=True)
except:
st.write('Oops! You data format is wrong!\nPlease make sure your data constis of strings!')
|
[
"xtrox@rambler.ru"
] |
xtrox@rambler.ru
|
48b8766fe33d5f9dc24df0f1e39a1bbc72a63a1e
|
5c5770eef858cb2f13d1f128620db860fd590663
|
/main.py
|
1159929222c34fe736af6b7c6ffd266cb87b383a
|
[] |
no_license
|
changjiang1017/1a
|
702b4513d70db8d86db2c20d1f5f56c54db4bb7b
|
bd7b413b8904973562845fa86f43284a52020c98
|
refs/heads/master
| 2023-09-03T09:41:41.008791
| 2021-10-17T09:15:09
| 2021-10-17T09:15:09
| 418,075,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
def human_detect():
global hdc, hmd
huskylens.init_mode(protocolAlgorithm.ALGORITHM_FACE_RECOGNITION)
hdc = 0
hmd = False
while True:
if huskylens.is_appear(1, HUSKYLENSResultType_t.HUSKYLENS_RESULT_BLOCK):
pass
else:
while hmd == False and hdc < 200:
if pins.analog_read_pin(AnalogPin.P0) > 200:
hmd = True
hdc += 1
basic.pause(20)
if hmd == True:
basic.show_icon(IconNames.STICK_FIGURE)
else:
basic.show_icon(IconNames.HOUSE)
hmd = False
hdc = 0
ds = DS1302.create(DigitalPin.P13, DigitalPin.P14, DigitalPin.P15)
huskylens.init_i2c()
OLED.init(128, 64)
Speech.Wait_XFS_Status(Speech.ChipStatus_Type.CHIPSTATUS_INITSUCCESSFUL)
basic.show_icon(IconNames.SMALL_DIAMOND)
human_detect()
def on_forever():
pass
basic.forever(on_forever)
|
[
"92668724+changjiang1017@users.noreply.github.com"
] |
92668724+changjiang1017@users.noreply.github.com
|
8508d3ef621fd000d8fada20c67cbf511edfbb89
|
c62da4d4764de080a45decb14dde339075b61b8a
|
/python-features/statistics/normal-distribution-II.py
|
0b105a729e48b2a8ca833b229be08be5783bd192
|
[] |
no_license
|
Aragor70/Algorithm-Examples
|
402faf92dcce6158743aec15641de8dfa2de2cfd
|
ec20d6b09e9ad328137a8c8921464a9945c7f386
|
refs/heads/master
| 2023-03-28T13:55:37.739302
| 2021-03-07T18:29:51
| 2021-03-07T18:29:51
| 321,797,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
# Part of 10 Days of Statistics at https://www.hackerrank.com/
# Objective
# In this challenge, we practice calculating the normal distributions of Cumulative Probability.
# The final grades for a Physics exam taken by a large group of students have a mean of 70
# and a standard deviation of 10.
# If we can approximate the distribution of these grades by a normal distribution, what percentage of the students:
# Scored higher than 80 (i.e., have a grade > 80)?
# Passed the test (i.e., have a grade >= 60)?
# Failed the test (i.e., have a grade < 60)?
# Find and print the answer to each question on a new line, rounded to a scale of 2 decimal places.
# Gauss error function => math.erf(x)
# https://docs.python.org/3/library/math.html#math.erf
import math
def distribution(x, std, mean):
return ( 0.5 * ( 1 + math.erf( ( x - mean) / ( 10 * (2 ** 0.5) ) )) )
value_A = distribution(80, 10, 70)
value_B = distribution(60, 10, 70)
print(round( (1 - value_A) * 100 , 2))
print(round( (1 - value_B) * 100 , 2))
print(round(value_B * 100, 2))
# python [filename].py
|
[
"mikis3@o2.pl"
] |
mikis3@o2.pl
|
3faaee0a13a0f4cb3921d81806d5c867e0523a9f
|
caba36cdcd46d8d268f58d32ee6ebaecdd3ca66e
|
/Ferme/app/forms.py
|
b5e0848bebfeb867ae23dbb9a210a1f75401ca89
|
[] |
no_license
|
kvzon1984/FermeV3.0_Portafolio
|
794d536fa3e0f10082d8a3790dda1f10476d5a35
|
3df5d75c95ebd1a3aa96bf18013ce27052fbe14c
|
refs/heads/master
| 2023-07-11T03:40:06.534668
| 2021-08-16T02:52:38
| 2021-08-16T02:52:38
| 364,424,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,576
|
py
|
from django import forms
from django.db.models import fields
from django.forms import widgets
from .models import Cliente, Empleado, FamiliaProducto, Producto,OrdenesCompra,Proveedor, TipoCliente, TipoProducto,Comuna
from django.contrib.auth.forms import UserCreationForm
class DateInput(forms.DateInput):
input_type = 'date'
class RegistroForm(forms.ModelForm):
class Meta:
model = Cliente
fields = [
"pnombre",
"snombre" ,
"appaterno",
"apmaterno",
"run_cliente",
"dvrun",
"fecha_nacimiento",
"celular",
"correo",
"direccion",
"id_comuna",
"cod_tipo_cliente",
"razon_social"
]
labels = {
"pnombre" : 'Primer nombre',
"snombre" : '<br>Segundo nombre' ,
"appaterno" : '<br>Apellido paterno',
"apmaterno" : '<br>Apellido materno',
"run_cliente" : '<br>Rut Cliente',
"dvrun" : '<br>dv' ,
"fecha_nacimiento": '<br>Fecha de nacimiento',
"celular": '<br>Telefono',
"correo": '<br>Email',
"direccion" : '<br>Direccion',
"id_comuna" : '<br>Comuna',
"cod_tipo_cliente" : '<br>Tipo de Cliente',
"razon_social" : ''
}
widgets = {
'run_cliente' : forms.TextInput(attrs={'class':'input','placeholder':'12345678'}),
'fecha_nacimiento': DateInput(),
'razon_social':forms.TextInput(attrs={'hidden':'hidden'})
}
id_comuna = forms.ModelChoiceField(
queryset= Comuna.objects.all(),
label='<br>Comuna'
)
cod_tipo_cliente = forms.ModelChoiceField(
queryset= TipoCliente.objects.all(),
label='<br>Tipo de Cliente'
)
class UserPass(UserCreationForm):
pass
class AgregarProductoForm(forms.ModelForm):
class Meta:
model = Producto
fields = [
'descripcion',
'fecha_vencimiento',
'stock',
'stock_critico',
'precio',
'foto',
'cod_proveedor',
'cod_familia',
'cod_tipo_producto'
]
labels = {
"descripcion":'Nombre del producto',
"fecha_vencimiento":'<br>Fecha de vencimiento',
"stock":'<br>Stock',
"stock_critico":' <br>Stock critico',
"precio":' <br>Precio',
"foto":' <br>Seleccione la imagen del producto'
}
widgets = {
"descripcion": forms.TextInput(attrs={'class':'input','placeholder':'Ingrese el nombre del producto'}),
"fecha_vencimiento":DateInput(attrs={'type':'date'})
}
cod_tipo_producto = forms.ModelChoiceField(
queryset = TipoProducto.objects.all(),
label='<br>Tipo Productos'
)
cod_proveedor = forms.ModelChoiceField(
queryset = Proveedor.objects.all(),
label='<br>Proveedor'
)
cod_familia = forms.ModelChoiceField(
queryset = FamiliaProducto.objects.all(),
label='<br>Familia de producto'
)
#agregar empleado
#class EmpleadoAgregarOrdenCompra(forms.ModelForm):
# class Meta:
# model = Empleado
# fields = {
# 'id_cargo'
#}
class AgregarOrdenCompra(forms.ModelForm):
class Meta:
model = OrdenesCompra
fields = [
'cod_proveedor',
'run_empleado',
'estado',
'fecha_emision',
'fecha_recepcion'
]
labels={
"cod_proveedor":'Codigo Proveedor ',
"run_empleado":'<br>Rut Empleado',
"estado":'<br>Estado Orden Compra',
"fecha_emision":'<br>Fecha Emision',
"fecha_recepcion":'<br>Fecha Recepcion',
}
widgets = {
'fecha_emision': DateInput(),
'fecha_recepcion': DateInput()
}
class ProveedorForm(forms.ModelForm):
class Meta:
model= Proveedor
fields = [
'run_proveedor','nom_proveedor','celular_proveedor'
]
labels={
"run_proveedor":'Rut Proveedor ',
"nom_proveedor":'<br>Nombre',
"celular_proveedor":'<br>Celular ',
}
widgets = {
"run_proveedor": forms.TextInput(attrs={'class':'input','placeholder':'12345678-1'}),
"nom_proveedor": forms.TextInput(attrs={'class':'input','placeholder':'Ingrese Datos'}),
"celular_proveedor": forms.TextInput(attrs={'class':'input','placeholder':'91234567'}),
}
class TipoProductoForm(forms.ModelForm):
class Meta:
model = TipoProducto
fields = [
'descripcion'
]
labels={
'descripcion': 'Nombre de Tipo Producto'
}
widgets = {
"descripcion": forms.TextInput(attrs={'class':'input','placeholder':'Ingrese un tipo de producto','title':'Tipo de producto' }),
}
class FamiliaProductoForm(forms.ModelForm):
class Meta:
model = FamiliaProducto
fields = [
'descripcion'
]
labels = {
'descripcion': 'Familia del producto'
}
widgets = {'descripcion': forms.TextInput(attrs={'class' : 'input'})}
|
[
"ivega.josue@gmail.com"
] |
ivega.josue@gmail.com
|
b3a9c46f690b95732f118b30580ab5f528859ea7
|
fcca9d5118d87d815208dd881c7d87943ee4a932
|
/rclpy/test/test_callback_group.py
|
8538407f6c6ac63e6100fed7cf9a1c86c86f5dc5
|
[
"Apache-2.0"
] |
permissive
|
Parallel-Hao/rclpy
|
fe1b4913f804ff4e5c3d5bb629cadf3bfb75352c
|
20dd7abd5bba11641d39056cc481bc89f9f961f7
|
refs/heads/Brand
| 2020-05-03T01:18:01.326845
| 2019-04-02T09:15:55
| 2019-04-02T09:15:55
| 178,333,300
| 0
| 1
|
Apache-2.0
| 2020-02-26T07:17:34
| 2019-03-29T04:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,938
|
py
|
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from rcl_interfaces.srv import GetParameters
import rclpy
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.callback_groups import ReentrantCallbackGroup
from test_msgs.msg import Primitives
class TestCallbackGroup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.context = rclpy.context.Context()
rclpy.init(context=cls.context)
cls.node = rclpy.create_node('TestCallbackGroup', namespace='/rclpy', context=cls.context)
@classmethod
def tearDownClass(cls):
cls.node.destroy_node()
rclpy.shutdown(context=cls.context)
def test_reentrant_group(self):
self.assertIsNotNone(self.node.handle)
group = ReentrantCallbackGroup()
t1 = self.node.create_timer(1.0, lambda: None, callback_group=group)
t2 = self.node.create_timer(1.0, lambda: None, callback_group=group)
self.assertTrue(group.can_execute(t1))
self.assertTrue(group.can_execute(t2))
self.assertTrue(group.beginning_execution(t1))
self.assertTrue(group.beginning_execution(t2))
def test_mutually_exclusive_group(self):
self.assertIsNotNone(self.node.handle)
group = MutuallyExclusiveCallbackGroup()
t1 = self.node.create_timer(1.0, lambda: None, callback_group=group)
t2 = self.node.create_timer(1.0, lambda: None, callback_group=group)
self.assertTrue(group.can_execute(t1))
self.assertTrue(group.can_execute(t2))
self.assertTrue(group.beginning_execution(t1))
self.assertFalse(group.can_execute(t2))
self.assertFalse(group.beginning_execution(t2))
group.ending_execution(t1)
self.assertTrue(group.can_execute(t2))
self.assertTrue(group.beginning_execution(t2))
def test_create_timer_with_group(self):
tmr1 = self.node.create_timer(1.0, lambda: None)
group = ReentrantCallbackGroup()
tmr2 = self.node.create_timer(1.0, lambda: None, callback_group=group)
self.assertFalse(group.has_entity(tmr1))
self.assertTrue(group.has_entity(tmr2))
def test_create_subscription_with_group(self):
sub1 = self.node.create_subscription(Primitives, 'chatter', lambda msg: print(msg))
group = ReentrantCallbackGroup()
sub2 = self.node.create_subscription(
Primitives, 'chatter', lambda msg: print(msg), callback_group=group)
self.assertFalse(group.has_entity(sub1))
self.assertTrue(group.has_entity(sub2))
def test_create_client_with_group(self):
cli1 = self.node.create_client(GetParameters, 'get/parameters')
group = ReentrantCallbackGroup()
cli2 = self.node.create_client(GetParameters, 'get/parameters', callback_group=group)
self.assertFalse(group.has_entity(cli1))
self.assertTrue(group.has_entity(cli2))
def test_create_service_with_group(self):
srv1 = self.node.create_service(GetParameters, 'get/parameters', lambda req: None)
group = ReentrantCallbackGroup()
srv2 = self.node.create_service(
GetParameters, 'get/parameters', lambda req: None, callback_group=group)
self.assertFalse(group.has_entity(srv1))
self.assertTrue(group.has_entity(srv2))
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Parallel-Hao.noreply@github.com
|
f7de7c2ef755e5893d687912c0b74ed7148a8d02
|
6b453d913b1ae6697da738ddae9df013e8128d91
|
/app/members/urls.py
|
1a117fa9f25eacf78656a78f5d5d3ef9928e11ec
|
[] |
no_license
|
moorekwon/instagram
|
9703ecb1aed460ddec685c0bd06fe0fac3807548
|
aaeca79f0d2765a24dd780bb12848c2c7b76a009
|
refs/heads/master
| 2022-12-12T18:13:37.536048
| 2020-02-03T08:33:16
| 2020-02-03T08:33:16
| 229,711,643
| 0
| 0
| null | 2022-12-08T03:32:50
| 2019-12-23T08:39:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
from django.urls import path
from . import views
app_name = 'members'
urlpatterns = [
path('login/', views.login_view, name='login'),
path('logout/', views.logout_view, name='logout-view'),
path('naver-login/', views.naver_login, name='naver-login')
]
|
[
"raccoonhj33@gmail.com"
] |
raccoonhj33@gmail.com
|
2dd0198f6e8c5fa6b71b472694204fc3ec02e9dd
|
acee409d0a45506c49d78d5e26f3e992b6eea856
|
/python/layer.py
|
5ec5c28d8f936826733430c6fce92ebe779d2d80
|
[
"Apache-2.0"
] |
permissive
|
m-thirumal/aws-lambda-layer
|
d74a3cf8edf527d00da48c328dbf1a9ba5283bbe
|
032d875219c729f94c8a9b32f0240cdd18c36bf2
|
refs/heads/main
| 2023-06-18T00:53:44.245617
| 2021-07-18T07:04:05
| 2021-07-18T07:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
def layer_fun():
return "Hi, I am from layer_fun(), if you see me then your code is working!!!"
|
[
"m.thirumal@hotmail.com"
] |
m.thirumal@hotmail.com
|
3ad8858aa59890f2add9d983a44c9c960eddbfa3
|
278b98798ab151565e728d34f598ff376fb0a332
|
/Amazon/code/BackEnd/delete_database.py
|
c0ff3986b0992c4299422c79a9fa04551e27523a
|
[] |
no_license
|
xwgnick/MiniAmzon
|
aa453e6d134f5f8acfdca712da82d1fb9388879b
|
31139a47b6b96e9f949bc8e64c474df126d1c618
|
refs/heads/master
| 2022-07-24T23:32:44.264585
| 2020-05-22T20:56:29
| 2020-05-22T20:56:29
| 260,796,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import psycopg2
conn = psycopg2.connect(database = 'wx50db', user = 'wx50', password = 'wx50', host = 'localhost', port = '5432')
cur = conn.cursor()
cur.execute("DELETE FROM amazonapp_warehouse")
cur.execute("DELETE FROM amazonapp_cart")
conn.commit()
|
[
"492251476xwg@gmail.com"
] |
492251476xwg@gmail.com
|
977fd006132a5a85bddc68ea4d248132df0082ce
|
39827f381f095704902080c4291871020d7dfb24
|
/python-impl/bls_sig_g1.py
|
19bad1b29d98ce274a55faf1d96849e50270aab0
|
[
"Apache-2.0"
] |
permissive
|
valerini/bls_sigs_ref
|
eee0ff6469e93adac36d084feffb83b011f3d78a
|
14faaa54a79583396f35475b7b1a27ec6d92c4cf
|
refs/heads/master
| 2020-07-04T22:18:59.124024
| 2019-08-04T21:58:22
| 2019-08-04T22:29:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
#!/usr/bin/python
#
# (C) Riad S. Wahby <rsw@cs.stanford.edu>
from consts import g1suite
from curve_ops import g2gen, point_mul, point_neg
from hash_to_field import Hr
from opt_swu_g1 import map2curve_osswu
from pairing import multi_pairing
from util import get_cmdline_options, print_g1_hex, print_g2_hex, print_tv_sig
# sk must be bytes()
def keygen(sk):
x_prime = Hr(sk)
return (x_prime, point_mul(x_prime, g2gen))
# signing as in
# https://github.com/pairingwg/bls_standard/blob/master/minutes/spec-v1.md#basic-signature-in-g1
# sign takes in x_prime (the output of keygen), a message, and a ciphersuite id
# returns a signature in G1
def sign(x_prime, msg, ciphersuite):
P = map2curve_osswu(msg, ciphersuite)
return point_mul(x_prime, P)
# verification corresponding to sign()
# returns True if the signature is correct, False otherwise
def verify(pk, sig, msg, ciphersuite):
P = map2curve_osswu(msg, ciphersuite)
return multi_pairing((P, sig), (pk, point_neg(g2gen))) == 1
if __name__ == "__main__":
def main():
opts = get_cmdline_options()
ver_fn = verify if opts.verify else None
for sig_in in opts.test_inputs:
print_tv_sig(sig_in, g1suite, sign, keygen, print_g2_hex, print_g1_hex, ver_fn, opts.quiet)
main()
|
[
"kwantam@gmail.com"
] |
kwantam@gmail.com
|
53b6ae3c7727f0d5ac9dd683355813314ab047fb
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/FWCore/Integration/test/testConcurrentIOVsESConcurrentSource_cfg.py
|
461e011539e736d1c861cfd0d22a1e3cbb9ecf66
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstLuminosityBlock = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1),
numberEventsInLuminosityBlock = cms.untracked.uint32(1),
numberEventsInRun = cms.untracked.uint32(100)
)
process.maxEvents.input = 8
process.options = dict(
numberOfThreads = 4,
numberOfStreams = 4,
numberOfConcurrentRuns = 1,
numberOfConcurrentLuminosityBlocks = 4,
eventSetup = dict(
numberOfConcurrentIOVs = 2
)
)
process.testESSource = cms.ESSource("TestESConcurrentSource",
firstValidLumis = cms.vuint32(1, 4, 6, 7, 8, 9),
iterations = cms.uint32(10*1000*1000),
checkIOVInitialization = cms.bool(True),
expectedNumberOfConcurrentIOVs = cms.uint32(2)
)
process.concurrentIOVESProducer = cms.ESProducer("ConcurrentIOVESProducer")
process.test = cms.EDAnalyzer("ConcurrentIOVAnalyzer",
checkExpectedValues = cms.untracked.bool(False)
)
process.testOther = cms.EDAnalyzer("ConcurrentIOVAnalyzer",
checkExpectedValues = cms.untracked.bool(False),
fromSource = cms.untracked.ESInputTag(":other")
)
process.busy1 = cms.EDProducer("BusyWaitIntProducer",ivalue = cms.int32(1), iterations = cms.uint32(10*1000*1000))
process.p1 = cms.Path(process.busy1 * process.test * process.testOther)
#process.add_(cms.Service("Tracer"))
|
[
"chrisdjones15@gmail.com"
] |
chrisdjones15@gmail.com
|
1b0d06848054f92113d19ae82b5dd5c65a1d6a0f
|
1b026d5a0450ccee784e8fb0fc1216354716135c
|
/tarea_3/generar_respuestas.py
|
548084bbbaee3b47dd1507d108921834f4219436
|
[] |
no_license
|
lross2k/tareas-electriciad-1
|
1752560b5a40b4ea5bf0ee70666b830841ca2d38
|
7914f7a2db9281d6f343fec7af193982b06c8561
|
refs/heads/master
| 2023-06-22T14:15:21.286328
| 2021-07-20T14:40:46
| 2021-07-20T14:40:46
| 294,838,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
import csv
import matplotlib.pyplot as plt
import numpy as np
# función usada para redondear valores reales a 3 decimales
def redondear(func):
func = "%.3f" % func
return func
# función encargada de calcular A, B, ..., H, I y retornar un array para el CSV
def valores(t_x):
# constantes
A = -20
B = -2
C = -30
E = -5
G = 4
# dependientes de t_x
D = (-20 * np.exp( -2 * (t_x + 0.5) ) + 30 ) * np.exp( 2 * (t_x + 0.5 ) )
F = (-30 + (-20 * np.exp( -2 * (t_x + 0.5) ) + 30 ) * np.exp( -2 ) + 5) * np.exp( 2 * (t_x + 1.5) )
H = H = -2 * 0.1 * D
I = I = -2 * 0.1 * F
t_x = "%0.1f" % t_x
return [t_x, A, B, C, redondear(D), E, redondear(F), G, redondear(H), redondear(I)]
# generar archivo CSV e introducir los datos para cada t_x
with open("valores.csv", 'w', newline = '') as archivo:
escribir = csv.writer(archivo)
escribir.writerow(['tx','A','B','C','D','E','F','G','H','I'])
for t_x in np.arange(0.1,1.1,0.1):
escribir.writerow( valores(t_x) )
# gráficas
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
t = np.arange(0, 4.5, 1e-1)
# nombres para leyenda
def nombres(t_x):
nombre = "$t_x$ = %0.1fs" % t_x
return nombre
# gráfica de voltaje
for t_x in np.arange(0.1,1.1,0.1):
v_t = (t < (t_x + 0.5)) * -20 * np.exp( -2 * t ) + (t >= (t_x + 0.5)) * (t < (t_x + 1.5)) * (-30 + (-20 * np.exp( -2 *(t_x + 0.5) ) + 30 ) * np.exp( -2 * ( t - t_x - 0.5 ) ) ) + (t >= (t_x + 1.5)) * ( -5 + ( -30 + (-20 * np.exp( -2 * (t_x + 0.5) -2 ) +30 * np.exp(-2) +5 )) * np.exp( 2* (t_x + 1.5)) * np.exp( - 2 * t) )
ax1.plot(t,v_t,label = nombres(t_x))
ax1.set_ylabel('$V_C$ (V)')
# gráfica de corriente
for t_x in np.arange(0.1,1.1,0.1):
i_t = (t < (t_x + 0.5)) * 4 * np.exp(-2 * t) + (t >= (t_x + 0.5)) * (t < (t_x + 1.5)) * -2 * 0.1 * (-20 * np.exp( -2 *(t_x + 0.5) ) + 30 ) * np.exp( -2 * t ) * np.exp( -2 * (- t_x - 0.5) ) + (t >= (t_x + 1.5)) * -2 * 0.1 * ( -5 + ( -30 + (-20 * np.exp( -2 * (t_x + 0.5) -2 ) +30 * np.exp(-2) +5 )) * np.exp( 2* (t_x + 1.5)) * np.exp( - 2 * t) )
ax2.plot(t,i_t)
ax2.set_ylabel('$i_C$ (mA)')
# dar formato a los subplots
fig.legend(bbox_to_anchor=(0.17, 0.002, 0.7, 0.98),loc='upper left', fontsize = 'xx-small', ncol=5, mode="expand", borderaxespad=0.)
plt.xlabel('Tiempo (s)')
plt.savefig('graf.png')
plt.show()
|
[
"luisross2000@outlook.com"
] |
luisross2000@outlook.com
|
4b26d2288fe6cceaed839816ed06bdce9f6e52d8
|
f0ae65bddedea44b1e66f3d235a901e62afae3f2
|
/macgyyver_stuff/parse_input.py
|
9de1a35ebd5d51857f29e74f682675a2370a7704
|
[] |
no_license
|
Brandon-Valley/my_movie_tools
|
d0a2ba91cda054c1b68709f1a9082028842e83a1
|
371742bfeaa0cfa2985ce06a6865f6ae09445029
|
refs/heads/master
| 2023-01-10T00:06:48.696103
| 2022-12-28T14:09:20
| 2022-12-28T14:09:20
| 221,057,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
import pyperclip
INPUT_PATH = 'input.txt'
def read_text_file(file_path):
with open(file_path, 'r', encoding='utf-8') as text_file: # can throw FileNotFoundError
result = tuple(l.rstrip() for l in text_file.readlines())
return result
raw_in = read_text_file(INPUT_PATH)
print(raw_in)
in_str = ''
for line in raw_in:
in_str += line
print(in_str)
s_raw_in = in_str.split('"')
print(s_raw_in)
e_l = []
for elm_num, elm in enumerate(s_raw_in):
if elm_num % 2 != 0:
e_l.append(elm)
print(e_l)
pyperclip.copy(str(e_l))
spam = pyperclip.paste()
|
[
"bavvh8@mst.edu"
] |
bavvh8@mst.edu
|
cd397df91774060d5ea6fd8a7135eecaae70687f
|
b785c5d007367903fda90a708264be5ac8f65d02
|
/task.py
|
b520806f366eb4d3440afd500cbd10be748e27e4
|
[] |
no_license
|
Sakshichouhan2/Dice-roll
|
c23747fa34d3d4b256c65f14d7c1a5957b3debe2
|
0cf9b9662bd7951288d16dfa7c1011488c73c2bf
|
refs/heads/main
| 2023-03-03T11:20:58.676586
| 2021-02-16T07:26:48
| 2021-02-16T07:26:48
| 339,145,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
'''“Dice-roll" that generates the random value between 1 and 6 every minute . and sends
to Redis“Sender" that reads the value from Redis and pushes to SQS“Reader" that reads
the value from SQS and prints it to the stdout. Ideally,A reader could be deployed in
AWS.'''
import random
try:
min_dice = 1
max_dice = 6
except:
print('Input invalid program will revert to defaults.')
again = True
while again:
print(random.randint(min_dice, max_dice))
dice_again = input('Want to roll the dice again? ')
if dice_again.lower() == 'yes' or dice_again.lower() == 'y' or dice_again.upper() == "Yes" or dice_again.upper() == "Y":
continue
elif dice_again.lower() == 'no' or dice_again.lower() == 'n' or dice_again.upper() == "No" or dice_again.upper() == "N":
exit()
|
[
"chouhansakshi88@gmail.com"
] |
chouhansakshi88@gmail.com
|
e8c9a9436af9cb169a4756a1d72c4bf0258943d8
|
dc6c429e3fb5c85f5742d50c8d7e07ab24f138df
|
/supportgroup.py
|
a479f9d61a27eeae6b847dd3807a5b10920514ce
|
[] |
no_license
|
sasqwatch/utils
|
47a57cc70f3819ef5525b1c3e687c8d16f6edb5b
|
c708440efb85ab720877d1fbb82447df9e55e8c6
|
refs/heads/master
| 2020-04-17T19:35:26.916041
| 2018-08-14T20:37:39
| 2018-08-14T20:37:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
#!/usr/bin/env python3
"""
Updated this to allow changing processes other than self:
https://gist.github.com/epinna/8ce25ac36a7710cdd1806764c647cf99
"""
import argparse
import os
import re
def update_argv(pid, newargs=None):
with open('/proc/{}/cmdline'.format(pid), 'rb') as f:
cmdline = f.read()
cmdline_len = len(cmdline) - 1
if newargs is None:
return cmdline_len
with open('/proc/{}/maps'.format(pid)) as f:
maps = f.read()
stack_start, stack_end = [int(x, 16) for x in re.search('([0-9a-f]+)-([0-9a-f]+).*\[stack\]', maps).groups()]
stack_size = stack_end - stack_start
with open('/proc/{}/mem'.format(pid), 'rb+') as mem:
mem.seek(stack_start)
data = mem.read(stack_size)
argv_addr = stack_start + data.find(cmdline)
mem.seek(argv_addr)
newargs = b'\x00'.join(newargs.strip(b'\x00').split(b' '))
if len(newargs) > cmdline_len:
newargs = newargs[:cmdline_len]
print('WARNING: You gave too many characters. Truncating to "{}"...'.format(newargs.decode().replace('\x00',' ')))
newargs += b'\x00'*(cmdline_len - len(newargs) + 1)
mem.write(newargs)
return len(newargs)
def main():
parser = argparse.ArgumentParser(description='Renames a process in process list. Must be root!')
parser.add_argument('-p', '--pid', required=True, type=int, help='PID of process to rename')
parser.add_argument('--rename', default=None, type=str, help='The name/args to rename the process')
args = parser.parse_args()
if args.rename is None:
print('You can rename the process name for pid={} with {} characters'.format(args.pid, update_argv(args.pid)))
else:
update_argv(args.pid, newargs=args.rename.encode())
print('Process name updated!')
return
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must run this as root!')
else:
main()
|
[
"noreply@github.com"
] |
sasqwatch.noreply@github.com
|
43d3b7fcd74fe33c44b8d93e53d04867f5334898
|
04803c70bb97012b7d500a177ac0240fb2ddbe38
|
/1heptane/pdep/network4244_1.py
|
5145107f893228a8e18edf49ec3b8ccbfc48d9cc
|
[] |
no_license
|
shenghuiqin/chpd
|
735e0415f6688d88579fc935459c1b0f53596d1d
|
396ba54629036e3f2be0b3fabe09b78c90d56939
|
refs/heads/master
| 2023-03-01T23:29:02.118150
| 2019-10-05T04:02:23
| 2019-10-05T04:02:23
| 192,084,217
| 0
| 0
| null | 2019-06-18T18:33:13
| 2019-06-15T13:52:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 84,283
|
py
|
species(
label = 'C=C([CH]C)C[CH]C(24171)',
structure = SMILES('[CH2]C(=CC)C[CH]C'),
E0 = (230.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,357.285,2038.33],'cm^-1')),
HinderedRotor(inertia=(0.0814701,'amu*angstrom^2'), symmetry=1, barrier=(7.37999,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0814702,'amu*angstrom^2'), symmetry=1, barrier=(7.37999,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0013206,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0814701,'amu*angstrom^2'), symmetry=1, barrier=(7.37998,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.526491,'amu*angstrom^2'), symmetry=1, barrier=(47.6916,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.840817,0.0668233,-3.93337e-05,1.17465e-08,-1.46239e-12,27845.5,29.2386], Tmin=(100,'K'), Tmax=(1751.26,'K')), NASAPolynomial(coeffs=[12.8576,0.0393763,-1.58248e-05,2.79727e-09,-1.84852e-13,23636.5,-35.4691], Tmin=(1751.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(230.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(RCCJC) + radical(Allyl_P)"""),
)
species(
label = 'C3H6(72)',
structure = SMILES('C=CC'),
E0 = (5.9763,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.497558,'amu*angstrom^2'), symmetry=1, barrier=(11.4398,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (42.0797,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2218.31,'J/mol'), sigma=(4.982,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.31912,0.00817959,3.34736e-05,-4.36194e-08,1.58213e-11,749.325,9.54025], Tmin=(100,'K'), Tmax=(983.754,'K')), NASAPolynomial(coeffs=[5.36755,0.0170743,-6.35108e-06,1.1662e-09,-8.2762e-14,-487.138,-4.54468], Tmin=(983.754,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(5.9763,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), label="""C3H6""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C1([CH]C)CC1C(24224)',
structure = SMILES('[CH2]C1([CH]C)CC1C'),
E0 = (316.349,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.968205,0.0489647,2.86241e-05,-6.7546e-08,2.77792e-11,38172.7,27.7912], Tmin=(100,'K'), Tmax=(1002.45,'K')), NASAPolynomial(coeffs=[15.0332,0.0350469,-1.37018e-05,2.60034e-09,-1.88281e-13,33232.3,-50.6754], Tmin=(1002.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.349,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsCs) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclopropane) + radical(Cs_S) + radical(Neopentyl)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C(C=CC)=CC(24268)',
structure = SMILES('[CH2]C(C=CC)=CC'),
E0 = (135.779,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.729417,'amu*angstrom^2'), symmetry=1, barrier=(16.7707,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.729494,'amu*angstrom^2'), symmetry=1, barrier=(16.7725,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.729468,'amu*angstrom^2'), symmetry=1, barrier=(16.7719,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.729518,'amu*angstrom^2'), symmetry=1, barrier=(16.7731,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (95.1622,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.465196,0.0665121,-2.88623e-05,-8.26848e-09,7.50332e-12,16467.4,25.1769], Tmin=(100,'K'), Tmax=(1031.76,'K')), NASAPolynomial(coeffs=[14.9174,0.0329066,-1.26059e-05,2.29195e-09,-1.59489e-13,12291.6,-50.7825], Tmin=(1031.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(135.779,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(Allyl_P)"""),
)
species(
label = 'C=CCC(=C)[CH]C(24175)',
structure = SMILES('[CH2]C(=CC)CC=C'),
E0 = (165.168,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,978.543],'cm^-1')),
HinderedRotor(inertia=(0.075462,'amu*angstrom^2'), symmetry=1, barrier=(1.73502,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.754747,'amu*angstrom^2'), symmetry=1, barrier=(17.3531,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.754909,'amu*angstrom^2'), symmetry=1, barrier=(17.3568,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.75496,'amu*angstrom^2'), symmetry=1, barrier=(17.358,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (95.1622,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.681576,0.0618448,-2.07951e-05,-1.24754e-08,7.90273e-12,19994.1,27.5811], Tmin=(100,'K'), Tmax=(1063.47,'K')), NASAPolynomial(coeffs=[13.5977,0.0351299,-1.39558e-05,2.57106e-09,-1.79378e-13,16010.5,-41.3398], Tmin=(1063.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(165.168,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'C3H6(T)(143)',
structure = SMILES('[CH2][CH]C'),
E0 = (284.865,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000],'cm^-1')),
HinderedRotor(inertia=(0.238389,'amu*angstrom^2'), symmetry=1, barrier=(5.48103,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00909639,'amu*angstrom^2'), symmetry=1, barrier=(22.1005,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (42.0797,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.93778,0.0190991,4.26842e-06,-1.44873e-08,5.74941e-12,34303.2,12.9695], Tmin=(100,'K'), Tmax=(1046.81,'K')), NASAPolynomial(coeffs=[5.93909,0.0171892,-6.69152e-06,1.21546e-09,-8.39795e-14,33151.2,-4.14888], Tmin=(1046.81,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(284.865,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), label="""C3H6(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C([CH]CC)=CC(24235)',
structure = SMILES('[CH2]C([CH]CC)=CC'),
E0 = (177.229,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.550561,0.0647337,-2.2494e-05,-1.02113e-08,6.70686e-12,21449.3,26.446], Tmin=(100,'K'), Tmax=(1097.32,'K')), NASAPolynomial(coeffs=[13.2783,0.0390642,-1.57369e-05,2.89635e-09,-2.00968e-13,17408.2,-41.8261], Tmin=(1097.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(177.229,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]CCC([CH2])=CC(24269)',
structure = SMILES('[CH2]CCC([CH2])=CC'),
E0 = (241.363,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,350,440,435,1725,1972.66,4000],'cm^-1')),
HinderedRotor(inertia=(0.159683,'amu*angstrom^2'), symmetry=1, barrier=(9.37025,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.49658,'amu*angstrom^2'), symmetry=1, barrier=(29.1363,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.159684,'amu*angstrom^2'), symmetry=1, barrier=(9.37009,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.496523,'amu*angstrom^2'), symmetry=1, barrier=(29.1365,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.496531,'amu*angstrom^2'), symmetry=1, barrier=(29.1364,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.187715,0.0727305,-4.74989e-05,1.57475e-08,-2.11977e-12,29175.8,30.6649], Tmin=(100,'K'), Tmax=(1716.14,'K')), NASAPolynomial(coeffs=[17.3493,0.03273,-1.25364e-05,2.16563e-09,-1.41227e-13,23285.4,-61.3985], Tmin=(1716.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(241.363,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(RCCJ)"""),
)
species(
label = 'C[CH][CH]C(C)=CC(24270)',
structure = SMILES('C[CH]C=C(C)[CH]C'),
E0 = (167.977,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.82433,0.0581713,-7.43779e-06,-2.31978e-08,1.06492e-11,20327.2,24.7437], Tmin=(100,'K'), Tmax=(1080.15,'K')), NASAPolynomial(coeffs=[12.103,0.0405352,-1.6457e-05,3.05113e-09,-2.1296e-13,16483,-37.0552], Tmin=(1080.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(167.977,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_S) + radical(Allyl_S)"""),
)
species(
label = 'C[C]=C(C)C[CH]C(24271)',
structure = SMILES('C[C]=C(C)C[CH]C'),
E0 = (316.905,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3025,407.5,1350,352.5,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,1685,370,350,440,435,1725,226.947,2510.41],'cm^-1')),
HinderedRotor(inertia=(0.00327337,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.202056,'amu*angstrom^2'), symmetry=1, barrier=(7.38475,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.202063,'amu*angstrom^2'), symmetry=1, barrier=(7.38462,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.202085,'amu*angstrom^2'), symmetry=1, barrier=(7.38462,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.202084,'amu*angstrom^2'), symmetry=1, barrier=(7.38457,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.02572,0.0541081,3.51348e-05,-1.85085e-07,1.69623e-10,38174.7,25.4701], Tmin=(100,'K'), Tmax=(419.643,'K')), NASAPolynomial(coeffs=[3.82194,0.0538254,-2.40443e-05,4.55016e-09,-3.16869e-13,37875.6,16.5975], Tmin=(419.643,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.905,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(RCCJC) + radical(Cds_S)"""),
)
species(
label = '[CH2]C(=[C]C)CCC(24272)',
structure = SMILES('[CH2]C(=[C]C)CCC'),
E0 = (273.958,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,350,440,435,1725,248.252,248.351],'cm^-1')),
HinderedRotor(inertia=(0.00273459,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00273508,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.132651,'amu*angstrom^2'), symmetry=1, barrier=(5.80247,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.295917,'amu*angstrom^2'), symmetry=1, barrier=(12.9452,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.483631,'amu*angstrom^2'), symmetry=1, barrier=(21.1565,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.524652,0.0721738,-4.79101e-05,1.65335e-08,-2.37732e-12,33077.7,28.115], Tmin=(100,'K'), Tmax=(1567.7,'K')), NASAPolynomial(coeffs=[13.7461,0.0384395,-1.56328e-05,2.80758e-09,-1.88477e-13,28932.3,-41.6152], Tmin=(1567.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(273.958,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = 'C=C[C](C)C[CH]C(19167)',
structure = SMILES('C=C[C](C)C[CH]C'),
E0 = (230.593,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,360,370,350,2750,2850,1437.5,1250,1305,750,350,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.892072,0.0600801,-2.42405e-05,-2.45853e-10,1.86411e-12,27852.5,28.7245], Tmin=(100,'K'), Tmax=(1266.31,'K')), NASAPolynomial(coeffs=[10.3481,0.0418611,-1.64597e-05,2.92328e-09,-1.95921e-13,24523.5,-22.8164], Tmin=(1266.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(230.593,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_T) + radical(RCCJC)"""),
)
species(
label = '[CH2][CH]CC(C)=CC(24273)',
structure = SMILES('[CH2][CH]CC(C)=CC'),
E0 = (284.31,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,237.749,2078.8],'cm^-1')),
HinderedRotor(inertia=(0.00298216,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156111,'amu*angstrom^2'), symmetry=1, barrier=(6.26303,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00298224,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156122,'amu*angstrom^2'), symmetry=1, barrier=(6.26291,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.15611,'amu*angstrom^2'), symmetry=1, barrier=(6.26313,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.30307,0.064992,-4.06959e-05,1.49569e-08,-2.66661e-12,34286.8,29.0938], Tmin=(100,'K'), Tmax=(1129.12,'K')), NASAPolynomial(coeffs=[5.15778,0.0513364,-2.25549e-05,4.246e-09,-2.95091e-13,33416.3,10.029], Tmin=(1129.12,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(284.31,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(RCCJ) + radical(RCCJC)"""),
)
species(
label = '[CH2][C](C=C)CCC(3296)',
structure = SMILES('[CH2]C=C([CH2])CCC'),
E0 = (187.616,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.381084,0.0671752,-2.21408e-05,-1.56763e-08,9.82109e-12,22706,28.4379], Tmin=(100,'K'), Tmax=(1040.54,'K')), NASAPolynomial(coeffs=[14.9511,0.0364389,-1.42653e-05,2.62039e-09,-1.83185e-13,18305.7,-49.0072], Tmin=(1040.54,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(187.616,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C[CH]C[C]1CC1C(24274)',
structure = SMILES('C[CH]C[C]1CC1C'),
E0 = (308.738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.11982,0.0560976,-1.86944e-05,-2.77089e-09,2.09126e-12,37242,29.5612], Tmin=(100,'K'), Tmax=(1354.62,'K')), NASAPolynomial(coeffs=[9.83964,0.0424915,-1.70734e-05,3.04844e-09,-2.03921e-13,33765.6,-19.2655], Tmin=(1354.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(308.738,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclopropane) + radical(RCCJC) + radical(Tertalkyl)"""),
)
species(
label = '[CH2][C]1CC(C)C1C(24275)',
structure = SMILES('[CH2][C]1CC(C)C1C'),
E0 = (305.913,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.33424,0.0452312,2.59334e-05,-5.66379e-08,2.26382e-11,36900.5,26.8575], Tmin=(100,'K'), Tmax=(999.267,'K')), NASAPolynomial(coeffs=[10.3263,0.0410187,-1.54512e-05,2.8007e-09,-1.95369e-13,33516.7,-24.4571], Tmin=(999.267,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.913,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclobutane) + radical(Tertalkyl) + radical(Isobutyl)"""),
)
species(
label = 'CC=CC(C)=CC(24276)',
structure = SMILES('CC=CC(C)=CC'),
E0 = (-15.7206,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.366519,0.0700565,-4.05138e-05,8.23894e-09,5.44902e-13,-1751.78,25.2579], Tmin=(100,'K'), Tmax=(1181.85,'K')), NASAPolynomial(coeffs=[13.897,0.0369024,-1.44776e-05,2.6018e-09,-1.76919e-13,-5832.77,-46.0144], Tmin=(1181.85,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-15.7206,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H)"""),
)
species(
label = 'C=CCC(C)=CC(24277)',
structure = SMILES('C=CCC(C)=CC'),
E0 = (13.6692,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.601563,0.0651294,-3.13621e-05,2.39412e-09,1.73181e-12,1774.23,27.598], Tmin=(100,'K'), Tmax=(1226.09,'K')), NASAPolynomial(coeffs=[12.8372,0.0387397,-1.56269e-05,2.8372e-09,-1.93405e-13,-2242.99,-38.0726], Tmin=(1226.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(13.6692,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(=C)C[CH]C(24278)',
structure = SMILES('[CH2]C(=C)C[CH]C'),
E0 = (266.588,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,1948.01],'cm^-1')),
HinderedRotor(inertia=(0.0680026,'amu*angstrom^2'), symmetry=1, barrier=(27.1181,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(5.20297,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.278491,'amu*angstrom^2'), symmetry=1, barrier=(6.40305,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.010536,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (82.1436,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.07974,0.0556446,-3.26018e-05,9.62163e-09,-1.15636e-12,32175.6,25.9137], Tmin=(100,'K'), Tmax=(1886.95,'K')), NASAPolynomial(coeffs=[14.3542,0.0275054,-1.02333e-05,1.71884e-09,-1.09339e-13,27165.9,-46.5569], Tmin=(1886.95,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(266.588,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(RCCJC) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(C)C(=C)[CH]C(24172)',
structure = SMILES('[CH2]C(=CC)C([CH2])C'),
E0 = (237.411,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,3010,987.5,1337.5,450,1655,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,4000],'cm^-1')),
HinderedRotor(inertia=(0.0358237,'amu*angstrom^2'), symmetry=1, barrier=(17.0825,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(3.92092,'amu*angstrom^2'), symmetry=1, barrier=(90.1497,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.742218,'amu*angstrom^2'), symmetry=1, barrier=(17.065,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.172372,'amu*angstrom^2'), symmetry=1, barrier=(3.96316,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(3.93149,'amu*angstrom^2'), symmetry=1, barrier=(90.3926,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3479.64,'J/mol'), sigma=(6.29859,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=543.51 K, Pc=31.6 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.441935,0.0682285,-3.06307e-05,-5.44961e-09,6.38237e-12,28690.7,29.4733], Tmin=(100,'K'), Tmax=(1022.38,'K')), NASAPolynomial(coeffs=[13.4828,0.0369601,-1.37359e-05,2.43177e-09,-1.6594e-13,24991.8,-38.7786], Tmin=(1022.38,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(237.411,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + longDistanceInteraction_noncyclic(CdCs-ST) + group(Cds-CdsCsH) + radical(Isobutyl) + radical(Allyl_P)"""),
)
species(
label = 'C[CH]CC[C]=CC(19228)',
structure = SMILES('C[CH]CC[C]=CC'),
E0 = (332.18,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3010,987.5,1337.5,450,1655,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3025,407.5,1350,352.5,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.26714,0.0653856,-4.37282e-05,1.92202e-08,-4.37429e-12,40046,29.4112], Tmin=(100,'K'), Tmax=(906.265,'K')), NASAPolynomial(coeffs=[4.00506,0.0533011,-2.37267e-05,4.50669e-09,-3.1546e-13,39549.7,16.4718], Tmin=(906.265,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(332.18,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(RCCJC)"""),
)
species(
label = 'CC=C1CC(C)C1(24256)',
structure = SMILES('CC=C1CC(C)C1'),
E0 = (31.5064,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[10.4891,-0.0122365,0.000145495,-1.38082e-07,3.18834e-11,3441.47,-17.0613], Tmin=(100,'K'), Tmax=(1694.81,'K')), NASAPolynomial(coeffs=[70.7806,0.0429634,-7.81564e-05,1.86507e-08,-1.3786e-12,-45359.2,-423.421], Tmin=(1694.81,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(31.5064,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(methylenecyclobutane)"""),
)
species(
label = 'CHCH3(T)(95)',
structure = SMILES('[CH]C'),
E0 = (343.893,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),
HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C([CH2])=CC(24219)',
structure = SMILES('[CH2]C([CH2])=CC'),
E0 = (234.041,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.0177712,'amu*angstrom^2'), symmetry=1, barrier=(20.2255,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.87837,'amu*angstrom^2'), symmetry=1, barrier=(20.1954,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.61389,'amu*angstrom^2'), symmetry=1, barrier=(106.082,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (68.117,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.80244,0.0390483,-7.97741e-07,-2.45997e-08,1.16044e-11,28236,18.1778], Tmin=(100,'K'), Tmax=(1004.18,'K')), NASAPolynomial(coeffs=[10.9852,0.023482,-8.93213e-06,1.6381e-09,-1.15442e-13,25332.4,-31.4366], Tmin=(1004.18,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(234.041,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C[CH]C[C]=CC(24192)',
structure = SMILES('C[CH]C[C]=CC'),
E0 = (355.96,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3010,987.5,1337.5,450,1655,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3025,407.5,1350,352.5,272.37,2221.18],'cm^-1')),
HinderedRotor(inertia=(0.00227236,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.148422,'amu*angstrom^2'), symmetry=1, barrier=(7.81357,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.148424,'amu*angstrom^2'), symmetry=1, barrier=(7.81357,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.148422,'amu*angstrom^2'), symmetry=1, barrier=(7.81357,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (82.1436,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.25446,0.047533,-2.24803e-05,4.72442e-09,-3.81653e-13,42864.6,23.5426], Tmin=(100,'K'), Tmax=(2781.8,'K')), NASAPolynomial(coeffs=[19.681,0.022476,-8.96952e-06,1.48664e-09,-9.0685e-14,33168.8,-78.3599], Tmin=(2781.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(355.96,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(RCCJC)"""),
)
species(
label = 'C=CC(=C)C[CH]C(19164)',
structure = SMILES('C=CC(=C)C[CH]C'),
E0 = (204.351,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,325.967,325.97,325.971],'cm^-1')),
HinderedRotor(inertia=(0.0234883,'amu*angstrom^2'), symmetry=1, barrier=(1.77106,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0234892,'amu*angstrom^2'), symmetry=1, barrier=(1.77113,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.194384,'amu*angstrom^2'), symmetry=1, barrier=(14.6567,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.19437,'amu*angstrom^2'), symmetry=1, barrier=(14.6566,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (95.1622,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.607111,0.0653555,-3.42158e-05,2.15645e-09,2.88851e-12,24707.8,28.6688], Tmin=(100,'K'), Tmax=(1085.78,'K')), NASAPolynomial(coeffs=[13.0148,0.0349811,-1.34394e-05,2.40786e-09,-1.64502e-13,21109.4,-36.3749], Tmin=(1085.78,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(204.351,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(RCCJC)"""),
)
species(
label = '[CH2]CC(=C)C[CH]C(24279)',
structure = SMILES('[CH2]CC(=C)C[CH]C'),
E0 = (297.69,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,350,440,435,1725,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.924397,0.0658228,-3.85648e-05,1.14822e-08,-1.42991e-12,35915.3,31.5477], Tmin=(100,'K'), Tmax=(1740.2,'K')), NASAPolynomial(coeffs=[12.3144,0.039642,-1.59979e-05,2.8369e-09,-1.87928e-13,31951,-29.7127], Tmin=(1740.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(297.69,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(RCCJ) + radical(RCCJC)"""),
)
species(
label = 'C=C([CH][CH]C)CC(24280)',
structure = SMILES('[CH2]C(=C[CH]C)CC'),
E0 = (178.364,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.6455,0.0607146,-7.39766e-06,-2.83229e-08,1.36484e-11,21584.3,26.7698], Tmin=(100,'K'), Tmax=(1035.37,'K')), NASAPolynomial(coeffs=[13.8717,0.037755,-1.48994e-05,2.75541e-09,-1.93573e-13,17337.3,-44.7818], Tmin=(1035.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(178.364,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_S)"""),
)
species(
label = '[CH]=C(CC)C[CH]C(24281)',
structure = SMILES('[CH]=C(CC)C[CH]C'),
E0 = (339.54,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3120,650,792.5,1650,350,440,435,1725,287.341,1535.51],'cm^-1')),
HinderedRotor(inertia=(0.0020416,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00203636,'amu*angstrom^2'), symmetry=1, barrier=(0.119657,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.153541,'amu*angstrom^2'), symmetry=1, barrier=(9.05865,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.154017,'amu*angstrom^2'), symmetry=1, barrier=(9.06246,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.155556,'amu*angstrom^2'), symmetry=1, barrier=(9.07373,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.883161,0.0678438,-4.23764e-05,1.39494e-08,-1.95975e-12,40949.3,30.3095], Tmin=(100,'K'), Tmax=(1550.14,'K')), NASAPolynomial(coeffs=[11.0007,0.0417364,-1.71136e-05,3.08472e-09,-2.07548e-13,37812.6,-22.9371], Tmin=(1550.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(339.54,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(Cds_P) + radical(RCCJC)"""),
)
species(
label = '[CH]=C([CH]C)CCC(24282)',
structure = SMILES('[CH]C(=CC)CCC'),
E0 = (255.302,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,350,440,435,1725,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.197486,0.0737448,-4.23702e-05,1.19747e-08,-1.37166e-12,30850.9,29.6804], Tmin=(100,'K'), Tmax=(1964.11,'K')), NASAPolynomial(coeffs=[18.3382,0.0368005,-1.41557e-05,2.39799e-09,-1.52706e-13,23724.9,-70.0837], Tmin=(1964.11,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(255.302,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2][CH]CC(=C)CC(24283)',
structure = SMILES('[CH2][CH]CC(=C)CC'),
E0 = (297.69,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,350,440,435,1725,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.924397,0.0658228,-3.85648e-05,1.14822e-08,-1.42991e-12,35915.3,31.5477], Tmin=(100,'K'), Tmax=(1740.2,'K')), NASAPolynomial(coeffs=[12.3144,0.039642,-1.59979e-05,2.8369e-09,-1.87928e-13,31951,-29.7127], Tmin=(1740.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(297.69,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(RCCJ) + radical(RCCJC)"""),
)
species(
label = 'C[CH][C]1CC(C)C1(24284)',
structure = SMILES('C[CH][C]1CC(C)C1'),
E0 = (304.414,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.43715,0.0453823,1.58051e-05,-3.8158e-08,1.38124e-11,36713.9,28.245], Tmin=(100,'K'), Tmax=(1112.49,'K')), NASAPolynomial(coeffs=[8.60969,0.045401,-1.90173e-05,3.56204e-09,-2.4894e-13,33521,-14.3007], Tmin=(1112.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.414,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclobutane) + radical(Cs_S) + radical(Tertalkyl)"""),
)
species(
label = 'C=C(C=CC)CC(24285)',
structure = SMILES('C=C(C=CC)CC'),
E0 = (-2.34046,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.471798,0.065208,-1.89932e-05,-1.92017e-08,1.1352e-11,-143.675,25.9787], Tmin=(100,'K'), Tmax=(1018.92,'K')), NASAPolynomial(coeffs=[14.8445,0.0350786,-1.33469e-05,2.43062e-09,-1.69787e-13,-4437.52,-50.3286], Tmin=(1018.92,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-2.34046,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH)"""),
)
species(
label = 'C=CCC(=C)CC(24286)',
structure = SMILES('C=CCC(=C)CC'),
E0 = (27.0493,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.690396,0.0605209,-1.08892e-05,-2.34066e-08,1.17276e-11,3382.97,28.3746], Tmin=(100,'K'), Tmax=(1044.52,'K')), NASAPolynomial(coeffs=[13.4765,0.0373792,-1.47395e-05,2.7195e-09,-1.90466e-13,-696.74,-40.6109], Tmin=(1044.52,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(27.0493,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=CC(=C)CCC(3302)',
structure = SMILES('C=CC(=C)CCC'),
E0 = (9.90489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.429571,0.064938,-1.42316e-05,-2.68111e-08,1.46756e-11,1331.8,26.5681], Tmin=(100,'K'), Tmax=(999.682,'K')), NASAPolynomial(coeffs=[15.8208,0.033571,-1.2507e-05,2.27564e-09,-1.59971e-13,-3255.39,-55.2328], Tmin=(999.682,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(9.90489,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=[C]C(C)C[CH]C(19169)',
structure = SMILES('C=[C]C(C)C[CH]C'),
E0 = (336.454,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0597,0.0647849,-3.73125e-05,1.09539e-08,-1.35798e-12,40570.8,30.3163], Tmin=(100,'K'), Tmax=(1715.74,'K')), NASAPolynomial(coeffs=[11.094,0.0413914,-1.68606e-05,3.00709e-09,-2.00057e-13,37127.5,-23.5102], Tmin=(1715.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.454,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJC) + radical(Cds_S)"""),
)
species(
label = 'C=C1CC(C)C1C(24267)',
structure = SMILES('C=C1CC(C)C1C'),
E0 = (35.7798,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (96.1702,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[9.99122,-0.0100132,0.000144281,-1.38728e-07,3.23306e-11,3980.95,-16.4526], Tmin=(100,'K'), Tmax=(1679.8,'K')), NASAPolynomial(coeffs=[69.3651,0.0437352,-7.79586e-05,1.86251e-08,-1.37959e-12,-43496.9,-415.639], Tmin=(1679.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(35.7798,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(methylenecyclobutane)"""),
)
species(
label = 'C=[C]C[CH]C(2608)',
structure = SMILES('C=[C]C[CH]C'),
E0 = (391.986,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,328.839,1764.65],'cm^-1')),
HinderedRotor(inertia=(0.070318,'amu*angstrom^2'), symmetry=1, barrier=(5.38273,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0701107,'amu*angstrom^2'), symmetry=1, barrier=(5.38037,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0702146,'amu*angstrom^2'), symmetry=1, barrier=(5.38499,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (68.117,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.41566,0.0369675,-1.69643e-05,3.43217e-09,-2.57685e-13,47199.8,21.2179], Tmin=(100,'K'), Tmax=(2427.98,'K')), NASAPolynomial(coeffs=[16.5625,0.0166401,-6.24654e-06,9.9465e-10,-5.87348e-14,39452.1,-61.39], Tmin=(2427.98,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(391.986,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJC) + radical(Cds_S)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (230.563,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (316.349,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (368.728,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (379.471,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (379.499,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (462.507,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (355.665,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (393.242,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (433.138,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (478.826,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (318.267,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (380.38,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (338.493,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (308.594,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (645.922,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (461.779,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (355.664,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (293.963,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (255.536,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (686.45,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (397.346,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (502.218,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (238.847,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (577.934,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (737.523,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (416.143,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (411.495,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (439.763,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (484.725,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (299.61,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (344.133,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS32',
E0 = (355.664,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS33',
E0 = (293.963,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS34',
E0 = (238.931,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS35',
E0 = (238.931,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS36',
E0 = (430.928,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS37',
E0 = (238.847,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS38',
E0 = (735.879,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C3H6(72)', 'CH3CHCCH2(18175)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['[CH2]C1([CH]C)CC1C(24224)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(3.36329e+10,'s^-1'), n=0.535608, Ea=(85.7861,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_D;doublebond_intra_HNd;radadd_intra_csHNd]
Euclidian distance = 1.0
family: Intra_R_Add_Exocyclic
Ea raised from 83.4 to 85.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', '[CH2]C(C=CC)=CC(24268)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.0272924,'m^3/(mol*s)'), n=2.81111, Ea=(21.1569,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 26 used for Cds-CdH_Cds-CsH;HJ
Exact match found for rate rule [Cds-CdH_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)', 'C=CCC(=C)[CH]C(24175)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(3.36e+08,'cm^3/(mol*s)'), n=1.56, Ea=(2.5104,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 9 used for Cds-HH_Cds-CsH;HJ
Exact match found for rate rule [Cds-HH_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['C3H6(72)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(0.00620445,'m^3/(mol*s)'), n=2.46568, Ea=(12.4666,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-HH_Cds-Cs\H3/H;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['C3H6(T)(143)', 'CH3CHCCH2(18175)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['[CH2]C([CH]CC)=CC(24235)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(1.682e+10,'s^-1'), n=0.35, Ea=(125.102,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 160 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]CCC([CH2])=CC(24269)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(718000,'s^-1'), n=2.05, Ea=(151.879,'kJ/mol'), T0=(1,'K'), Tmin=(500,'K'), Tmax=(2000,'K'), comment="""From training reaction 147 used for R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeC
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C[CH][CH]C(C)=CC(24270)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(1.09894e+08,'s^-1'), n=1.58167, Ea=(202.575,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_2Cd;C_rad_out_2H;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['C[C]=C(C)C[CH]C(24271)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]C(=[C]C)CCC(24272)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['C=C[C](C)C[CH]C(19167)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(800000,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2][CH]CC(C)=CC(24273)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(91273.5,'s^-1'), n=1.79, Ea=(54.1828,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;Cs_H_out_2H] for rate rule [R5HJ_1;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['[CH2][C](C=C)CCC(3296)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(634768,'s^-1'), n=1.77, Ea=(78.0316,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SSMS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SSMS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['C3H6(T)(143)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction16',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C[CH]C[C]1CC1C(24274)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(3.473e+12,'s^-1'), n=0.247, Ea=(231.216,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3_D;doublebond_intra_secNd;radadd_intra_cs] for rate rule [R3_D;doublebond_intra_secNd_HNd;radadd_intra_cs2H]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction17',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['[CH2][C]1CC(C)C1C(24275)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(5.25757e+07,'s^-1'), n=1.165, Ea=(125.102,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_Cs_HH_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction18',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['CC=CC(C)=CC(24276)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction19',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C=CCC(C)=CC(24277)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(6.37831e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction20',
reactants = ['CH2(S)(23)', '[CH2]C(=C)C[CH]C(24278)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction21',
reactants = ['[CH2]C(C)C(=C)[CH]C(24172)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction22',
reactants = ['C[CH]CC[C]=CC(19228)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['CC=C1CC(C)C1(24256)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_2H] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_2H]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction24',
reactants = ['CHCH3(T)(95)', '[CH2]C([CH2])=CC(24219)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(2.13464e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/Cd;Birad]
Euclidian distance = 3.0
Multiplied by reaction path degeneracy 2.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction25',
reactants = ['CH2(19)', 'C[CH]C[C]=CC(24192)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/NonDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction26',
reactants = ['H(3)', 'C=CC(=C)C[CH]C(19164)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction27',
reactants = ['[CH2]CC(=C)C[CH]C(24279)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction28',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C=C([CH][CH]C)CC(24280)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(1.23617e+10,'s^-1'), n=1.04667, Ea=(209.2,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_2Cd;C_rad_out_H/NonDeC;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction29',
reactants = ['[CH]=C(CC)C[CH]C(24281)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction30',
reactants = ['[CH]=C([CH]C)CCC(24282)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction31',
reactants = ['[CH2][CH]CC(=C)CC(24283)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(262000,'s^-1'), n=1.62, Ea=(46.4424,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;Cs_H_out_H/NonDeC] for rate rule [R5HJ_1;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction32',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C[CH][C]1CC(C)C1(24284)'],
transitionState = 'TS32',
kinetics = Arrhenius(A=(5.25757e+07,'s^-1'), n=1.165, Ea=(125.102,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_Cs_HH_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction33',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C=C(C=CC)CC(24285)'],
transitionState = 'TS33',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 1 used for R3radExo;Y_rad_NDe;XH_Rrad_NDe
Exact match found for rate rule [R3radExo;Y_rad_NDe;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction34',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C=CCC(=C)CC(24286)'],
transitionState = 'TS34',
kinetics = Arrhenius(A=(9.63e+09,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction35',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C=CC(=C)CCC(3302)'],
transitionState = 'TS35',
kinetics = Arrhenius(A=(9.63e+09,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction36',
reactants = ['C=[C]C(C)C[CH]C(19169)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS36',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction37',
reactants = ['C=C([CH]C)C[CH]C(24171)'],
products = ['C=C1CC(C)C1C(24267)'],
transitionState = 'TS37',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
reaction(
label = 'reaction38',
reactants = ['CHCH3(T)(95)', 'C=[C]C[CH]C(2608)'],
products = ['C=C([CH]C)C[CH]C(24171)'],
transitionState = 'TS38',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/NonDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
network(
label = '4244',
isomers = [
'C=C([CH]C)C[CH]C(24171)',
],
reactants = [
('C3H6(72)', 'CH3CHCCH2(18175)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4244',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
[
"qin.she@husky.neu.edu"
] |
qin.she@husky.neu.edu
|
f1853f12e58fde7c1eea2bc8c29c8d03481fe955
|
4e50d2345a2cfeb3c9ecb02187f88e753d1ed83c
|
/bin/00.raw_reads/test_rmAdaptor.py
|
a089a074dd4a77d7a66772f59516a1df8ad64515
|
[] |
no_license
|
ms201420201029/real_metagenome_pipeline
|
7c7b54e5e8a798933387f960256ebb849e9c2200
|
e8f0b188f21975305565d06e4fefe4f4c9adc1f7
|
refs/heads/master
| 2020-04-05T06:41:34.349662
| 2018-12-04T05:48:52
| 2018-12-04T05:48:52
| 156,646,750
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,642
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# Description: Remove adapter
# Copyright (C) 20170721 Ruiyi Corporation
# Email: lixr@realbio.cn
import os, sys, argparse, gzip, math
from Bio import SeqIO
from multiprocessing import Pool
from multiprocessing import cpu_count
from time import time
def read_params(args):
parser = argparse.ArgumentParser(description="2017/07/21 by lixr")
parser.add_argument('--out_prefix',dest='out_prefix',metavar="DIR",type=str,required=True,
help="out file prefix")
parser.add_argument('-r1', '--read1', dest='read1', metavar='DIR', type=str, required=True,
help="read1.fastq")
parser.add_argument('-r2', '--read2', dest='read2', metavar='DIR', type=str, required=True,
help="read2.fastq")
parser.add_argument('-a1', '--read1Adaptor', dest='read1Adaptor', metavar='string', type=str, required=True,
help="AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC")
parser.add_argument('-a2', '--read2Adaptor', dest='read2Adaptor', metavar='string', type=str, required=True,
help="AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT")
parser.add_argument('--type',dest="type",metavar='string',type=str,required=True,
help="PE or SE")
parser.add_argument('--mis_ratio',dest="mis_ratio",metavar="float",type=float,default=0.2,
help="mis_ratio defult[0.2]")
parser.add_argument('--mis_num', dest="mis_num", metavar="int", type=int, default=3,
help="mis_ratio defult[3]")
parser.add_argument('--out_type',dest="out_type",metavar="STRING",type=int,default=4,
help="2 : two file out ; 4 : four file out.")
parser.add_argument('--min_len',dest="min_len",metavar="min_len",type=int,default=99,
help="min_len[100]")
return vars(parser.parse_args())
def get_mistaken_count_max(seq_len, find_pos, adaptor_len, mis_ratio, mis_num):
mis_count_max = adaptor_len * mis_ratio if seq_len - find_pos > adaptor_len else (seq_len - find_pos) * mis_ratio
if mis_count_max < float(mis_num):
mis_count_max = mis_num
return math.ceil(mis_count_max)
def match_adaptor(seq,seed):
#seed_first
index=-1
indexs = []
append = indexs.append
while True:
index = seq.find(seed,index+1)#从index+1位置开始找,如果找到返回索引,没找到则返回-1
if index == -1:#没找到 跳出
break
append(index)
return indexs
def situation_1(read1,read2,adaptor1,adaptor2,min_len):#adaptor1 30个碱基匹配到read1,adaptor2 20个碱基匹配到read2
seq1 =read1.seq
seq2 =read2.seq
pos = seq1.find(adaptor1[0:30]) if len(adaptor1) > 30 else seq1.find(adaptor1) #截取30个碱基的adaptor1
if pos > min_len: #控制长度大于50
return True, read1[:pos], read2[:pos] # del seq1\seq2
pos2 = seq2.find(adaptor2[0:30]) if len(adaptor2) > 30 else seq2.find(adaptor2)
if pos2 > min_len: #控制长度大于50
return True,read1[:pos2],read2[:pos2]
if pos == -1 and pos2 == -1:
return False, read1, read2
return True, None, None
def situation_2(read1,read2,adaptor1,adaptor2): #四次匹配中只有0、1、2次匹配上的
seq1 = read1.seq
seq2 = read2.seq
true_num = 0
adaptor_read1_pos_1 = match_adaptor(seq1,adaptor1[0:7]) #考虑接头自连情况没有写【0:7】
adaptor_read1_pos_2 = match_adaptor(seq1,adaptor1[7:14])
adaptor_read2_pos_1 = match_adaptor(seq2,adaptor2[0:7])
adaptor_read2_pos_2 = match_adaptor(seq2,adaptor2[7:14])
if adaptor_read1_pos_1:
true_num += 1
if adaptor_read1_pos_2:
true_num += 1
if adaptor_read2_pos_1:
true_num += 1
if adaptor_read2_pos_2:
true_num += 1
if true_num == 0: #更严格是 true_num=1 删除了
return True,read1,read2 #clean reads
else:
return False,None,None
def rmPE(read1,read2,adaptor1,adaptor2,mis_ratio,min_len,mis_num):
result = situation_1(read1,read2,adaptor1,adaptor2,min_len)
if result[0]:
return False,result[1],result[2] #del seq1 and seq2
result = situation_2(read1,read2,adaptor1,adaptor2)
if result[0]:
return True,result[1],result[2] #clean seq1 and seq2
res_1 = rmSE(read1,adaptor1,mis_ratio,min_len,mis_num)
if res_1[1] is None:
return False,None,None
res_2 = rmSE(read2,adaptor2,mis_ratio,min_len,mis_num)
if res_1[0] and res_2[0]:
return True,res_1[1],res_2[1]
else:
if res_2[1] is None:
return False, None, None
if res_1[2]>res_2[2]:
return False,res_1[1][:res_2[2]],res_2[1]
elif res_1[2]==res_2[2]:
return False,res_1[1],res_2[1]
else:
return False,res_1[1],res_2[1][:res_1[2]]
def rmSE(read,adaptor,mis_ratio,min_len,mis_num):
seq = read.seq
seed_len = 6
adaptor_len = len(adaptor)
seq_len = len(seq)
for i in [0,6,12]:#之前是【0,6,12】
seed = adaptor[i:i+seed_len]
seed_count = seq.count(seed)
if seed_count==0:
continue
pos = 0
for j in range(seed_count):
find_pos = seq.find(seed,pos)
mistaken_count_max =get_mistaken_count_max(seq_len, (find_pos-i), adaptor_len, mis_ratio, mis_num)
mistaken_count = 0
_b = find_pos
_e = find_pos + seed_len
while(_b >= 0 and i >= find_pos - _b):
if adaptor[i - find_pos + _b] != seq[_b]:
mistaken_count += 1
if mistaken_count > mistaken_count_max:
break
_b -= 1
else :
while(_e < seq_len and i - find_pos + _e < adaptor_len):
if adaptor[ i - find_pos + _e ] != seq[_e]:
mistaken_count += 1
if mistaken_count > mistaken_count_max:
break
_e += 1
else:
if _b+1 > min_len:
return False,read[:_b+1],_b+1
if (_b+1 >= 0) and (_b+1 <= min_len):
return False,None,0
pos = find_pos + 1
return True,read,seq_len
def rmAdaptor(type,read1_file,read2_file,adaptor1,adaptor2,out_prefix,out_type,mis_ratio,min_len,mis_num):
total_read_num = 0
clean_read_num = 0
adaptor_read_num = 0
if type=='PE':
read2_records = SeqIO.parse(gzip.open(read2_file,'rt'),'fastq')
read1_out = open( '%s.1.fq'%out_prefix,'w' )
read2_out = open( '%s.2.fq'%out_prefix,'w' )
if out_type==4:
read1_rm_out = open( '%s.1_rm.fq'%out_prefix,'w' )
read2_rm_out = open( '%s.2_rm.fq'%out_prefix,'w' )
for read1 in SeqIO.parse(gzip.open(read1_file,'rt'),'fastq'):
total_read_num += 2
read2 = read2_records.__next__()
rmPE_res = rmPE(read1,read2,adaptor1,adaptor2,mis_ratio,min_len,mis_num)
if rmPE_res[0]:
clean_read_num += 2
read1_out.write(rmPE_res[1].format('fastq'))#clean read
read2_out.write(rmPE_res[2].format('fastq'))#clean read
else:
adaptor_read_num += 2
if (rmPE_res[1] is None) or (rmPE_res[2] is None):
continue
read1_rm_out.write(rmPE_res[1].format('fastq'))#adaptor read
read2_rm_out.write(rmPE_res[2].format('fastq'))#adaptor read
read1_rm_out.close()
read2_rm_out.close()
else:
for read1 in SeqIO.parse(gzip.open(read1_file,'rt'),'fastq'):
total_read_num += 2
read2 = read2_records.__next__()
rmPE_res = rmPE(read1,read2,adaptor1,adaptor2,mis_ratio,min_len,mis_num)
if rmPE_res[0]:
clean_read_num += 2
read1_out.write(rmPE_res[1].format('fastq'))#clean read
read2_out.write(rmPE_res[2].format('fastq'))#clean read
else:
adaptor_read_num += 2
if (rmPE_res[1] is None) or (rmPE_res[2] is None):
continue
read1_out.write(rmPE_res[1].format('fastq'))#adaptor read
read2_out.write(rmPE_res[2].format('fastq'))#adaptor read
read1_out.close()
read2_out.close()
return total_read_num,clean_read_num,adaptor_read_num
if __name__ == '__main__':
params = read_params(sys.argv)
read1_file = params["read1"]
read2_file = params["read2"]
adaptor1 = params["read1Adaptor"][:30]
adaptor2 = params["read2Adaptor"][:30]
type = params["type"]
out_prefix = params["out_prefix"]
mis_ratio = params["mis_ratio"]
out_type = params["out_type"]
min_len = params["min_len"]
mis_num = params["mis_num"]
starttime = time()
total_read_num,clean_read_num,adaptor_read_num = rmAdaptor(type,read1_file,read2_file,adaptor1,adaptor2,out_prefix,out_type,mis_ratio,min_len,mis_num)
with open("%s_adaptor_statistical.tsv" % out_prefix,'w') as fqout:
fqout.write("sampleName\ttotal_reads\tremain_reads\tadaptor_reads\n")
fqout.write("%s\t%s\t%s\t%s\n" % (os.path.basename(out_prefix),total_read_num,clean_read_num,adaptor_read_num))
endtime = time()
sys.stdout.write("use time %s second"%(endtime-starttime))
|
[
"1037080472@qq.com"
] |
1037080472@qq.com
|
a1bbbf01277d38b11003413cc62221047eceb338
|
032edbd5eccee1896a207f6e0b0ba1d026d4e984
|
/codility/missing_integer.py
|
2096104854d56029219e6bedcc29d3932c6934e6
|
[
"MIT"
] |
permissive
|
grzesk075/PythonSandbox
|
95812a69e1b53e1faea574c10ec8db6fc79a58d2
|
9fa879b4a15a971258c458bbfe55c7a899c95ce5
|
refs/heads/master
| 2021-06-10T11:35:06.781026
| 2019-11-20T14:10:18
| 2019-11-20T14:10:18
| 156,559,088
| 0
| 0
|
MIT
| 2021-04-20T17:46:55
| 2018-11-07T14:31:11
|
Python
|
UTF-8
|
Python
| false
| false
| 364
|
py
|
def solution(A):
N = len(A)
existent = [False] * (N + 1)
anyExistent = False
for i in A:
if i < 1 or i > N:
continue
existent[i] = True
anyExistent = True
if not anyExistent:
return 1
for i in range(1, N + 1):
if existent[i] == False:
return i
return N + 1
|
[
"grzegorz.kuprianowicz@tomtom.com"
] |
grzegorz.kuprianowicz@tomtom.com
|
61468b496b4ca341fc7f4f03f63f4d96fa02445f
|
de7071a20fccd71617ddb94edb979869d23d3c51
|
/hour.py
|
af88b1432b01a270eb30bb385700f61f99f3db6c
|
[] |
no_license
|
nancygaooo/weChatWeDone
|
825628d734ec7b1050727c39e9ef712ff237bbc6
|
5322c9a35c4e1f7a1c62cff03420a1e80275e834
|
refs/heads/master
| 2023-05-25T17:58:09.309748
| 2019-11-14T03:09:25
| 2019-11-14T03:09:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
#!/usr/bin/python3
import json
import matplotlib.pyplot as plt
import time
import collections
# Python 字典类型转换为 JSON 对象
hi = 'data/a-hou.json'
f = open(hi, encoding='utf-8')
data = f.read() # 读文件
lists = json.loads(data)
hoursDict = [
"00",
"01",
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
]
aDays = collections.OrderedDict()
bDays = collections.OrderedDict()
for h in hoursDict:
aDays[h] = 0
for h in hoursDict:
bDays[h] = 0
xB = []
yB = []
xA = []
yA = []
for item in lists:
timeObj = time.localtime(item['msgCreateTime'])
hour = time.strftime("%H", timeObj)
timeFormat = time.strftime("%m-%d %H:%M", timeObj)
if item['mesDes'] == 1:
aDays.setdefault(hour, 0)
aDays[hour] = aDays[hour] + 1
else:
bDays.setdefault(hour, 0)
bDays[hour] = bDays[hour] + 1
for numbers in aDays:
xA.append(numbers)
yA.append(aDays[numbers])
for number in bDays:
xB.append(number)
yB.append(bDays[number])
print(xA, yA)
print(xB, yB)
# exit()
# plt.figure()
plt.plot(xA, yA, label="a")
plt.plot(xB, yB, label="b")
plt.xlabel('hour')
plt.ylabel('records')
plt.legend()
plt.show()
# plt.savefig('day.jpg')
# print("Python 原始数据:", repr(lists))
|
[
"zqbinary@foxmail.com"
] |
zqbinary@foxmail.com
|
b3ca053e73670d9c89ef872531a9f1acbf7de96e
|
fbb1550dc5437d672ed0137bd7711eba3290dee3
|
/students/ethan_nguyen/Lesson09/charges_calc.py
|
4a7033c84e96e8d9333119ece0d76514c91d20b4
|
[] |
no_license
|
JavaRod/SP_Python220B_2019
|
2cc379daf5290f366cf92dc317b9cf68e450c1b3
|
5dac60f39e3909ff05b26721d602ed20f14d6be3
|
refs/heads/master
| 2022-12-27T00:14:03.097659
| 2020-09-27T19:31:12
| 2020-09-27T19:31:12
| 272,602,608
| 1
| 0
| null | 2020-06-16T03:41:14
| 2020-06-16T03:41:13
| null |
UTF-8
|
Python
| false
| false
| 4,893
|
py
|
'''
Returns total price paid for individual rentals
'''
# pylint: disable=line-too-long, c0301
import argparse
import json
import datetime
import math
import logging
def parse_cmd_arguments():
"""
function to parse input and check for input requirements
"""
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-i', '--input', help='input JSON file',
required=True)
parser.add_argument('-o', '--output', help='ouput JSON file',
required=True)
parser.add_argument('-d', '--debug', help='logging for debug',
required=True)
return parser.parse_args()
ARGS = parse_cmd_arguments()
LOGLEV = int(ARGS.debug)
def logger_decorator(level):
"""
decorator for the logger
"""
def logged_func(func):
"""
set up logger
"""
def wrapper(*args):
"""
function to set up logger and handler
"""
logger = logging.getLogger()
# create a logger object
if level > 0:
if level == 1:
logger.setLevel(logging.ERROR)
elif level == 2:
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.DEBUG)
else:
logging.disable(logging.ERROR) # disable all logging
log_format = "%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s \
%(message)s"
formatter = logging.Formatter(log_format)
log_file = datetime.datetime.now().strftime("%Y-%m-%d")+".log"
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.WARNING)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
if args:
print("\twith args: {}".format(args))
print("Function {} called".format(func.__name__))
return func(*args)
return wrapper
return logged_func
@logger_decorator(LOGLEV)
def load_rentals_file(filename):
"""
function to check and load json file
"""
logging.debug("Load input json file")
try:
with open(filename) as file:
try:
data = json.load(file)
except ValueError:
logging.error("Decoding JSON has failed")
exit(0)
except FileNotFoundError:
logging.error(f"File {filename} not found")
exit(0)
return data
@logger_decorator(LOGLEV)
def calculate_additional_fields(data):
"""
function to loop through json data and calculate required fields
"""
logging.debug("Start calculating additional fields")
for value in data.values():
logging.debug(f"Proccessing record {value}")
try:
rental_start = datetime.datetime.strptime(
value['rental_start'], '%m/%d/%y')
rental_end = datetime.datetime.strptime(
value['rental_end'], '%m/%d/%y')
total_day = (rental_end - rental_start).days
if total_day < 0:
logging.warning("Negative total day. Let take absolute of it")
total_day = abs(total_day)
if total_day == 0:
logging.warning(f"Rental start and end date are the same. Let set \
total rental days to 1 {value}")
total_day = 1
value['total_days'] = total_day
value['total_price'] = value['total_days'] * value['price_per_day']
value['sqrt_total_price'] = math.sqrt(value['total_price'])
value['unit_cost'] = value['total_price'] / value['units_rented']
except ValueError:
logging.warning(f"Missing rental start or end date. Value was {value}. \
Skipped this record gracefully.")
continue
except ZeroDivisionError:
logging.warning(f"Tried to divide by zero. Value was {value}. Recovered \
gracefully.")
continue
return data
@logger_decorator(LOGLEV)
def save_to_json(filename, data):
"""
function to save json to disk
"""
logging.debug("Save output to json")
try:
with open(filename, 'w') as file:
json.dump(data, file)
except IOError:
logging.error(f"Problem dumping {filename} file")
exit(0)
if __name__ == "__main__":
DATA = load_rentals_file(ARGS.input)
DATA = calculate_additional_fields(DATA)
save_to_json(ARGS.output, DATA)
|
[
"ethanenguyen@hotmail.com"
] |
ethanenguyen@hotmail.com
|
d7ff36848628a39077cf531d9bb5f4e27680db7f
|
e7508722c01cd5a3a88dcec1efae18d6c501c72f
|
/TestCase/test_practice_fixture.py
|
4affb7adab4669f439fb2d2f8f0efa9210921454
|
[] |
no_license
|
heqiang1992/DT
|
e8bd2bace9458a0f13888e27c823befc19c9a4c7
|
00ad9369c02d21821c4d079babdbc16766c9bbfe
|
refs/heads/master
| 2021-10-29T14:52:21.540535
| 2021-10-23T09:24:24
| 2021-10-23T09:24:24
| 188,933,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture(scope="function", params=None, autouse=False, ids=None, name=None)
def beforetest():
print("fixture: beforetest setup#$%^&*()")
@pytest.mark.usefixtures("beforetest")
def test_hello():
print("hello")
|
[
"wenjing@edmodo.com"
] |
wenjing@edmodo.com
|
5840a5608c466d0d4c2e178f14721ec7b50f2df9
|
f0e0bf9fe818cde5c76b5428723a6bd05679cd3c
|
/cronDeliveryAPI/orders/migrations/0013_auto_20200810_1042.py
|
4acb25f73c814b6790f0f25fa9a72a600427fd70
|
[] |
no_license
|
murkhan13/DeliveryAPP
|
e4339d32b29f8f617ad7bf520cb28d34eaaaa58c
|
44e72be1fe32543acaef24de616ec34f69f04878
|
refs/heads/master
| 2023-02-20T04:54:15.968788
| 2021-01-18T09:28:33
| 2021-01-18T09:28:33
| 292,862,348
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# Generated by Django 3.0.8 on 2020-08-10 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0012_order_restaurant'),
]
operations = [
migrations.AlterField(
model_name='order',
name='restaurant',
field=models.CharField(blank=True, default=None, max_length=200, null=True, verbose_name='Ресторан'),
),
]
|
[
"merch.fade13@gmail.com"
] |
merch.fade13@gmail.com
|
4524f3c6f3dc84c87fc21c014c6ff6b23468839b
|
552c0a423264ca47c48f24737d62c93b6cc5dd11
|
/cars/migrations/0016_auto_20160915_1632.py
|
eff29b8135828c0b5b4bfcf628f34bc5f6b05832
|
[] |
no_license
|
YELLOWINC/car-maze
|
6dfa72e45dd493731547e964636818effccab14a
|
31b82ed01bcb2004fbe1d850c33f2f636b305457
|
refs/heads/master
| 2021-01-15T12:41:49.984750
| 2016-09-16T16:00:15
| 2016-09-16T16:00:15
| 68,349,329
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,023
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-15 11:02
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cars', '0015_auto_20160915_0240'),
]
operations = [
migrations.CreateModel(
name='TestDrive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(default=datetime.datetime(2016, 9, 15, 16, 32, 57, 251958))),
('flag', models.BooleanField(default=False)),
('scheduled', models.DateTimeField(blank=True, null=True)),
('confirmed', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(default=datetime.datetime(2016, 9, 15, 16, 32, 57, 251011))),
('flag', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='userprofile',
name='city',
),
migrations.RemoveField(
model_name='userprofile',
name='state',
),
migrations.RemoveField(
model_name='userprofile',
name='wishlist',
),
migrations.AlterField(
model_name='userprofile',
name='DOB',
field=models.CharField(max_length=11),
),
migrations.AlterField(
model_name='userprofile',
name='district',
field=models.CharField(choices=[('Ernakulam', 'Ernakulam'), ('Thriruvananthapuram', 'Thriruvananthapuram'), ('Kollam', 'Kollam'), ('Pathanamthitta', 'Pathanamthitta'), ('Alappuzha', 'Alappuzha'), ('Kottayam', 'Kottayam'), ('Idukki', 'Idukki'), ('Thissur', 'Thissur'), ('Palakkad', 'Palakkad'), ('Malappuram', 'Malappuram'), ('Kozhikode', 'Kozhikode'), ('Wayanad', 'Wayanad'), ('Kannur', 'Kannur'), ('Kasargod', 'Kasargod')], max_length=25),
),
]
|
[
"sreeshsmallya@gmail.com"
] |
sreeshsmallya@gmail.com
|
575e43af3805628268f12c1376a7f55e5d8b3a55
|
1f3b9e7f0009460d86d6cd2e6664d927cb271fa8
|
/RRT_holonomic.py
|
f233feba5fe58265834cf4c65a52ec02d1327ab2
|
[] |
no_license
|
gowrijsuria/RRT-PathPlanning
|
5cd8b9ebdaa8945db54c33553ad966704019c676
|
797ef75ebfc5badcb620fed9db3ee51c5b5825e7
|
refs/heads/main
| 2023-04-20T08:16:57.224236
| 2021-05-09T19:44:34
| 2021-05-09T19:44:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,683
|
py
|
import pygame
from random import randint as ri
pygame.init()
import time
import numpy as np
screen = pygame.display.set_mode([500, 550])
SIDE_x = 20
SIDE_y = 40
WINDOW_width = 440
WINDOW_height = 400
GAME_border = 3
WHITE=(255,255,255)
BLUE=(0,0,255)
BLACK=(0,0,0)
RED=(255,0,0)
GREEN=(0,255,0)
RAND=(120,120,120)
YELLOW = (0,0,102)
C1=(39,38,53)
C2=(19,51,62)
C3=(31,3,24)
screen.fill(WHITE)
INT_MAX = 100000000000000
robot_radius = 8
class Environment:
def __init__ (self, colour, x, y, width, height):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
def create(self,screen):
pygame.draw.rect(screen, self.colour, [self.x, self.y,self.width ,self.height])
def point_inside_game(self,x,y):
if x>SIDE_x+GAME_border and x<SIDE_x + WINDOW_width - GAME_border:
if y>SIDE_y+GAME_border and y < SIDE_y + WINDOW_height - GAME_border:
return(True)
return(False)
def random_point(self): #Random Point Generator
x_random = ri(SIDE_x+GAME_border , SIDE_x + WINDOW_width - GAME_border - 1)
y_random = ri(SIDE_y+GAME_border , SIDE_y + WINDOW_height - GAME_border - 1 )
return((x_random, y_random))
def point_inside_rec(self,xr,yr,wr,hr,x,y): # Point inside given Rectangle ?
if x> xr and x < xr + wr:
if y > yr and y < yr + hr:
return(True)
return(False)
def p2p_dist(self,p1,p2): # Point to Point Distance
x1,y1=p1
x2,y2=p2
return ( ( (x1-x2)**2 + (y1-y2)**2 )**0.5 )
def ClickText(self): # Text on Environment
font = pygame.font.Font('freesansbold.ttf', 12)
text = font.render('CLICK HERE', True, WHITE)
textRect = text.get_rect()
textRect.center = (75, 495)
screen.blit(text, textRect)
def DesText(self,s,x=315,y=485): # Description Text
pygame.draw.rect(screen,WHITE,(125,470,500,30))
font = pygame.font.SysFont('segoeuisemilight', 15)
text = font.render('%s'%(s), True, BLACK)
textRect = text.get_rect()
#textRect.center = (255, 460)
textRect.center = (x, y)
screen.blit(text, textRect)
def ConfSpace():
#CS for circle
pygame.draw.circle(screen,BLACK,(100,150),20+robot_radius)
pygame.draw.circle(screen,BLACK,(360,300),30+robot_radius)
#CS for rectangle
pygame.draw.circle(screen,BLACK,(200,150),robot_radius)
pygame.draw.circle(screen,BLACK,(300,200),robot_radius)
pygame.draw.circle(screen,BLACK,(200,200),robot_radius)
pygame.draw.circle(screen,BLACK,(300,150),robot_radius)
# left side of rectangle
pygame.draw.rect(screen,BLACK,(200-robot_radius,150,100,50))
# right side of rectangle
pygame.draw.rect(screen,BLACK,(300,150,robot_radius,50))
# bottom side of rectangle
pygame.draw.rect(screen,BLACK,(200,150-robot_radius,100,robot_radius))
# top side of rectangle
pygame.draw.rect(screen,BLACK,(200,200,100,robot_radius))
def WheelTraj_holonomic(r, points):
r=10
theta_list=[]
print(len(points))
for i in range(len(points)-1):
px,py=points[i]
cx,cy=points[i+1]
theta=np.arctan2((py-cy),(px-cx))
theta_list.append(float(theta))
cx1=cx+r*np.cos(theta)
cy1=cy+r*np.sin(theta)
cx2=cx+r*np.cos(theta-2*(np.pi/3))
cy2=cy+r*np.sin(theta-2*(np.pi/3))
cx3=cx+r*np.cos(theta+2*(np.pi/3))
cy3=cy+r*np.sin(theta+2*(np.pi/3))
px1=px+r*np.cos(theta)
py1=py+r*np.sin(theta)
px2=px+r*np.cos(theta-2*(np.pi/3))
py2=py+r*np.sin(theta-2*(np.pi/3))
px3=px+r*np.cos(theta+2*(np.pi/3))
py3=py+r*np.sin(theta+2*(np.pi/3))
pygame.draw.line(screen, C1, (cx1,cy1), (px1,py1), 3)
pygame.draw.line(screen, C2, (cx2,cy2), (px2,py2), 3)
pygame.draw.line(screen, C3, (cx3,cy3), (px3,py3), 3)
if(len(theta_list)>1):
theta_old=theta_list[-2]
theta_new=theta_list[-1]
step=(theta_new-theta_old)/10
for j in range(10):
temp=theta_old+step*j
pygame.draw.circle(screen, C1, (int(cx+r*np.cos(temp)), int(cy+r*np.sin(temp))), 1)
pygame.draw.circle(screen, C2, (int(cx+r*np.cos(temp-2*(np.pi/3))), int(cy+r*np.sin(temp-2*(np.pi/3)))), 1)
pygame.draw.circle(screen, C3, (int(cx+r*np.cos(temp+2*(np.pi/3))), int(cy+r*np.sin(temp+2*(np.pi/3)))), 1)
def RRT(x,y,parent):
if (x,y) not in parent and screen.get_at((x,y)) != (0,0,0,255):
x_m,y_m=-1,-1
cur_min=INT_MAX
for v in parent:
if B1.p2p_dist(v,(x,y))<cur_min:
x_m,y_m=v
cur_min = B1.p2p_dist(v,(x,y))
good = True
ans=[]
theta=np.arctan2((y-y_m),(x-x_m));
for i in range(Step):
x_mid=x_m+i*np.cos(theta)
y_mid=y_m+i*np.sin(theta)
if screen.get_at((int(x_mid),int(y_mid))) == (0,0,0,255):
good=False
break
if(good):
ans=[int(x_m+(Step)*np.cos(theta)),int(y_m+Step*np.sin(theta))]
return(good,x_m,y_m,ans)
return(False,-1,-1,[])
running = True
#Environment for Game
# Grid with random Obstacles
pygame.draw.rect(screen,BLACK,(SIDE_x,SIDE_y,WINDOW_width,WINDOW_height),GAME_border)
pygame.draw.rect(screen,BLACK,(200,150,100,50))
pygame.draw.circle(screen,BLACK,(100,150),20)
pygame.draw.circle(screen,BLACK,(360,300),30)
B1 = Environment(BLACK, 25, 470, 100, 50)
B1.create(screen)
OBS=dict()
points = []
#Number of forward Steps towards random sampled point
# Step = 10
Step = 30
#Start stores a single point [Starting point- RED Point]
Start=[]
#End stores a set of destination point [Destination point- Green Point]
#Multiple points allowed to make the point appear bigger, and fast discovery,
#due to huge number of pixels in this game
End=set()
#parent stores the graph
parent=dict()
level=0
B1.ClickText()
B1.DesText("Instruction :",y=460)
B1.DesText("Click the BLACK button below to view Configuration Space for Obstacles")
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
if running==False:
break
m = pygame.mouse.get_pressed()
x,y = pygame.mouse.get_pos()
if m[0]==1:
if B1.point_inside_rec(B1.x,B1.y, B1.width, B1.height,x,y):
if level==0:
level+=1
B1.colour=RED
ConfSpace()
B1.DesText("Click the RED button and select the STARTING POINT")
elif level==1 and Start==[]:
level+=1
B1.colour=GREEN
B1.DesText("Click the GREEN button and select the DESTINATION POINT")
elif level==2 and Start:
level+=1
B1.colour=BLUE
B1.DesText("Click the BLUE button to view the RRT path and wheel trajectories")
elif level==3 and End!=set():
level+=1
B1.colour=BLUE
B1.DesText("Path is being explored using RRT Algorithm with wheel trajectories")
B1.create(screen)
B1.ClickText()
continue
elif level==1:
OBS[(x,y)]=1
elif level == 2 and Start==[]:
if B1.point_inside_game(x,y):
Start=(x,y)
pygame.draw.circle(screen, RED, (x, y), 10)
elif level == 3 :
if B1.point_inside_game(x,y):
End.add((x,y))
pygame.draw.circle(screen, GREEN, (x, y), 10)
if level>=4:
running = False
break
pygame.display.update()
running = True
parent[Start]=(-1,-1)
Trace=[]
Timer = time.time()
while(running):
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
x,y =B1.random_point()
if (time.time() - Timer) > 5:
Step=5
good,x_m,y_m,ans=RRT(x,y,parent)
if good and ans:
x_cur = ans[0]
y_cur = ans[1]
if screen.get_at((x_cur,y_cur)) != (0,0,0,255) and (x_cur,y_cur) not in parent:
parent[(x_cur,y_cur)]=(x_m,y_m)
if screen.get_at((x_cur,y_cur)) == (0, 255, 0, 255):
Trace=(x_cur,y_cur)
running = False
pygame.draw.line(screen, BLUE, (x_cur,y_cur), (x_m,y_m), 2)
pygame.display.update()
running = True
#This loop gets the route back to Start point
while(Trace and running):
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
while(Trace!=Start):
points.append(Trace)
x,y = parent[Trace]
pygame.draw.line(screen, GREEN, (x,y), Trace, 2)
Trace=(x,y)
pygame.display.update()
points.append(Start)
WheelTraj_holonomic(robot_radius, points)
#Quit
pygame.quit()
|
[
"gowri.jsuria@gmail.com"
] |
gowri.jsuria@gmail.com
|
7f4bc71f56886683d4b882f80e796b530ca7e98f
|
07561714394c112c846f8544e507db1e5c202344
|
/src/utils/utils.py
|
10099722e23b640e3be0ce7f2f74d878a041cb3d
|
[] |
no_license
|
manojsukhavasi/kaggle-rsna-pneumonia-detection-challenge
|
ebd1438693f5c8beaa152b2928f94db5940445fd
|
42ee044d1b0132fc35d44751c00e9edf7cd04436
|
refs/heads/master
| 2020-03-28T22:21:06.353598
| 2018-09-19T17:57:39
| 2018-09-19T17:57:39
| 149,226,025
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from src.imports import *
def read_dicom(path):
"""
Reads dicom and gives out RGB PIL Image
"""
pd = pydicom.read_file(path)
img_arr = pd.pixel_array
img_arr = img_arr/img_arr.max()
img_arr = (255*img_arr).clip(0,255).astype(np.uint8)
img = Image.fromarray(img_arr).convert('RGB')
return img
def clean_bb_boxes(preds):
mask = preds[:,0]>0.5
preds[~mask] = torch.Tensor([])
return preds
|
[
"manoj.sukhavasi1@gmail.com"
] |
manoj.sukhavasi1@gmail.com
|
77eee7cb521aa55ec25cc3b3b823f11e5014a79e
|
f404a58e558e813d7afaec9282e65127fa223d42
|
/tweet_updater/tweety.py
|
2532bd39cb88b4d76297f9a40a44288f1fc76eca
|
[] |
no_license
|
zInnovationLab/sentiment-anaylsis-bluemix
|
153a33e72f8fe4d0179194c7236d9a9790e8fd91
|
252fc164bf66ac25d0354aa39c351086eb0c0456
|
refs/heads/master
| 2021-01-17T12:59:00.248428
| 2016-06-20T19:01:50
| 2016-06-20T19:01:50
| 58,948,854
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,669
|
py
|
from datetime import date, timedelta
import os
import json
import sys
import tweepy
import pymongo
COLLECTION_NAME = 'tweets'
# Gets the Mongo DB URL and DB from VCAP_SERVICES if present, else
# assumes the Mongo instance is running locally
url = 'mongodb://mongo_server:27017/tweets'
if os.environ.has_key('VCAP_SERVICES'):
vcapJson = json.loads(os.environ['VCAP_SERVICES'])
for key, value in vcapJson.iteritems():
#Only find the services with the name todo-mongo-db, there should only be one
mongoServices = filter(lambda s: s['name'].find('mongo') != -1, value)
if len(mongoServices) != 0:
mongoService = mongoServices[0]
if "uri" in mongoService['credentials']:
url = mongoService['credentials']['uri']
else:
url = mongoService['credentials']['url']
client = pymongo.MongoClient(url)
db = client.get_default_database()
tweet_col = db[COLLECTION_NAME]
twitter_key = os.environ.get('TWITTER_APIKEY', '''{
"consumer_key": "",
"consumer_secret": "",
"access_token": "",
"access_token_secret": ""
}''')
twitter_key = json.loads(twitter_key)
CONSUMER_KEY = twitter_key['consumer_key']
CONSUMER_SECRET = twitter_key['consumer_secret']
ACCESS_TOKEN = twitter_key['access_token']
ACCESS_TOKEN_SECRET = twitter_key['access_token_secret']
# https://dev.twitter.com/rest/reference/get/search/tweets
TWEETS_PER_PAGE=100 # max
until = date.today() - timedelta(days=7) # not used. also, has a 7 day limit
result_type = 'mixed' # popular, mixed, recent. popular doesn't return many results
# twitter auth
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
# call twitter api and return
def get_tweets(query, max_id=None):
if max_id:
res_tweets = api.search(query, max_id=max_id, count=TWEETS_PER_PAGE, show_user=True, result_type=result_type)
else:
res_tweets = api.search(query, count=TWEETS_PER_PAGE, show_user=True, result_type=result_type)
tweets = []
for tweet in res_tweets:
#tweet = tweet._json
t = {}
try:
t['id'] = str(tweet.id)
t['favCount'] = tweet.favorite_count
t['place'] = '' #tweet.place.coordinates if tweet.place else ''
t['time'] = tweet.created_at.strftime('%c')
t['text'] = tweet.text
t['coord'] = tweet.coordinates if tweet.coordinates else ''
except Exception, ex:
print repr(ex)
tweets.append(t)
print t
print '========================================================'
return tweets
def update_all(count):
# collect tweets until we reach count
#last_count = 0
with open("searches.txt", "rb") as searches:
allsearches = searches.readlines()
for query in allsearches:
tweets = []
last_count = 0
print query
while len(tweets) < count:
if len(tweets) == 0:
tweets = get_tweets(query)
else:
tweets += get_tweets(query, tweets[-1]['id'])
print "found %d tweets" % len(tweets)
if last_count == len(tweets):
break # no progress
last_count = len(tweets)
# insert all results into mongo
tweet_col.insert_many(tweets)
if __name__ == "__main__":
# get parameters
if len(sys.argv) > 1:
count = int(sys.argv[1])
else:
print "USAGE: %s count" % sys.argv[0]
sys.exit(8)
update_all(count)
|
[
"ivandov@us.ibm.com"
] |
ivandov@us.ibm.com
|
cfcfe9e9a35e6480dc98c66e8627b6ea00972a94
|
cab67e7629c8193b80f525245371065c8183d4d1
|
/venv/lib/python3.8/site-packages/arcade/examples/perf_test/stress_test_draw_moving_arcade.py
|
9552cc446755e9bac11c04e7fbd0aaf393f58d2a
|
[] |
no_license
|
pablo2811/CVD-simulator
|
31bd9fbc9d4795d1332712fe0f8da6729d8918e7
|
17c6125d1efdca5abcb9b5957c435f55e28b0ae3
|
refs/heads/master
| 2022-12-28T16:58:16.531530
| 2020-10-12T18:25:54
| 2020-10-12T18:25:54
| 303,478,485
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,019
|
py
|
"""
Moving Sprite Stress Test
Simple program to test how fast we can draw sprites that are moving
Artwork from http://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.stress_test_draw_moving
"""
import random
import arcade
import os
import timeit
import time
import collections
import pyglet
# --- Constants ---
SPRITE_SCALING_COIN = 0.25
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING_COIN)
COIN_COUNT_INCREMENT = 500
STOP_COUNT = 10000
RESULTS_FILE = "stress_test_draw_moving_arcade.csv"
SCREEN_WIDTH = 1800
SCREEN_HEIGHT = 1000
SCREEN_TITLE = "Moving Sprite Stress Test"
class FPSCounter:
def __init__(self):
self.time = time.perf_counter()
self.frame_times = collections.deque(maxlen=60)
def tick(self):
t1 = time.perf_counter()
dt = t1 - self.time
self.time = t1
self.frame_times.append(dt)
def get_fps(self):
total_time = sum(self.frame_times)
if total_time == 0:
return 0
else:
return len(self.frame_times) / sum(self.frame_times)
class Coin(arcade.Sprite):
def update(self):
"""
Update the sprite.
"""
self.position = (self.position[0] + self.change_x, self.position[1] + self.change_y)
class MyGame(arcade.Window):
""" Our custom Window Class"""
def __init__(self):
""" Initializer """
# Call the parent class initializer
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.coin_list = None
self.processing_time = 0
self.draw_time = 0
self.program_start_time = timeit.default_timer()
self.sprite_count_list = []
self.fps_list = []
self.processing_time_list = []
self.drawing_time_list = []
self.last_fps_reading = 0
self.fps = FPSCounter()
arcade.set_background_color(arcade.color.AMAZON)
# Open file to save timings
self.results_file = open(RESULTS_FILE, "w")
def add_coins(self):
# Create the coins
for i in range(COIN_COUNT_INCREMENT):
# Create the coin instance
# Coin image from kenney.nl
coin = Coin(":resources:images/items/coinGold.png", SPRITE_SCALING_COIN)
# Position the coin
coin.center_x = random.randrange(SPRITE_SIZE, SCREEN_WIDTH - SPRITE_SIZE)
coin.center_y = random.randrange(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE)
coin.change_x = random.randrange(-3, 4)
coin.change_y = random.randrange(-3, 4)
# Add the coin to the lists
self.coin_list.append(coin)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.coin_list = arcade.SpriteList(use_spatial_hash=False)
def on_draw(self):
""" Draw everything """
# Start timing how long this takes
draw_start_time = timeit.default_timer()
arcade.start_render()
self.coin_list.draw()
# Display info on sprites
# output = f"Sprite count: {len(self.coin_list):,}"
# arcade.draw_text(output, 20, SCREEN_HEIGHT - 20, arcade.color.BLACK, 16)
#
# # Display timings
# output = f"Processing time: {self.processing_time:.3f}"
# arcade.draw_text(output, 20, SCREEN_HEIGHT - 40, arcade.color.BLACK, 16)
#
# output = f"Drawing time: {self.draw_time:.3f}"
# arcade.draw_text(output, 20, SCREEN_HEIGHT - 60, arcade.color.BLACK, 16)
#
# fps = self.fps.get_fps()
# output = f"FPS: {fps:3.0f}"
# arcade.draw_text(output, 20, SCREEN_HEIGHT - 80, arcade.color.BLACK, 16)
self.draw_time = timeit.default_timer() - draw_start_time
self.fps.tick()
def update(self, delta_time):
# Start update timer
start_time = timeit.default_timer()
self.coin_list.update()
for sprite in self.coin_list:
if sprite.position[0] < 0:
sprite.change_x *= -1
elif sprite.position[0] > SCREEN_WIDTH:
sprite.change_x *= -1
if sprite.position[1] < 0:
sprite.change_y *= -1
elif sprite.position[1] > SCREEN_HEIGHT:
sprite.change_y *= -1
# Save the time it took to do this.
self.processing_time = timeit.default_timer() - start_time
# Total time program has been running
total_program_time = int(timeit.default_timer() - self.program_start_time)
# Print out stats, or add more sprites
if total_program_time > self.last_fps_reading:
self.last_fps_reading = total_program_time
# It takes the program a while to "warm up", so the first
# few seconds our readings will be off. So wait some time
# before taking readings
if total_program_time > 5:
# We want the program to run for a while before taking
# timing measurements. We don't want the time it takes
# to add new sprites to be part of that measurement. So
# make sure we have a clear second of nothing but
# running the sprites, and not adding the sprites.
if total_program_time % 2 == 1:
# Take timings
output = f"{total_program_time}, {len(self.coin_list)}, {self.fps.get_fps():.1f}, " \
f"{self.processing_time:.4f}, {self.draw_time:.4f}\n"
self.results_file.write(output)
print(output, end="")
if len(self.coin_list) >= STOP_COUNT:
pyglet.app.exit()
return
self.sprite_count_list.append(len(self.coin_list))
self.fps_list.append(round(self.fps.get_fps(), 1))
self.processing_time_list.append(self.processing_time)
self.drawing_time_list.append(self.draw_time)
# Now add the coins
self.add_coins()
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
[
"fijalkowskipablo@gmail.com"
] |
fijalkowskipablo@gmail.com
|
51b4c0c2bea9491a67c860e630caba768c5da03f
|
f87587a22c7cf4714f3130636a85fe81f8a0d448
|
/Python/Exercicios Coursera/Semana 3/Exercicio2_1.py
|
7209e23499b0bef93e5b00edc191af2a23021e1a
|
[] |
no_license
|
fernandosergio/Documentacoes
|
6065dec7d9fbd67dc86280322247e53e8c95f29e
|
13d605cae58317bb1666294c1e8f3927755f4ce8
|
refs/heads/master
| 2023-04-01T10:47:13.076027
| 2021-04-05T19:43:54
| 2021-04-05T19:43:54
| 290,798,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
x1 = int(input("Digite o valor do x1 no plano cartesiano: "))
y1 = int(input("Digite o valor do y1 no plano cartesiano: "))
x2 = int(input("Digite o valor do x2 no plano cartesiano: "))
y2 = int(input("Digite o valor do y2 no plano cartesiano: "))
import math
valorx = (x1 - x2) ** 2
valory = (y1 - y2) ** 2
distancia = math.sqrt(valorx + valory)
if distancia < 10 :
print("perto")
else:
print("longe")
|
[
"46656725+fyzn@users.noreply.github.com"
] |
46656725+fyzn@users.noreply.github.com
|
04a136eac2da6f980a9fdf007fe19353aec45133
|
b55b9b54c66522b214c9e2016c60dfb9dbcc0015
|
/tests/python/gaiatest/tests/test_browser_cell_data.py
|
0ea2e24dbd90fe830d3b35e72fed768b6823d239
|
[
"Apache-2.0"
] |
permissive
|
enterstudio/gaia
|
92f5b6fb597eef52bcbd3db9e78066befa24bd1a
|
36bcd46a56e2d066d629e5e82681b1af063db0f7
|
refs/heads/master
| 2022-10-21T10:04:13.419441
| 2013-01-02T22:57:12
| 2013-01-02T22:58:44
| 109,246,945
| 0
| 0
|
NOASSERTION
| 2022-10-17T02:19:24
| 2017-11-02T09:56:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
class TestBrowserCellData(GaiaTestCase):
# Firefox/chrome locators
_awesome_bar_locator = ("id", "url-input")
_url_button_locator = ("id", "url-button")
_throbber_locator = ("id", "throbber")
_browser_frame_locator = ('css selector', 'iframe[mozbrowser]')
def setUp(self):
GaiaTestCase.setUp(self)
self.data_layer.disable_wifi()
self.data_layer.enable_cell_data()
# launch the app
self.app = self.apps.launch('Browser')
def test_browser_cell_data(self):
# https://moztrap.mozilla.org/manage/case/1328/
awesome_bar = self.marionette.find_element(*self._awesome_bar_locator)
awesome_bar.click()
awesome_bar.send_keys('http://mozqa.com/data/firefox/layout/mozilla.html')
self.marionette.find_element(*self._url_button_locator).click()
# Bump up the timeout due to slower cell data speeds
self.wait_for_condition(lambda m: not self.is_throbber_visible(), timeout=40)
browser_frame = self.marionette.find_element(
*self._browser_frame_locator)
self.marionette.switch_to_frame(browser_frame)
heading = self.marionette.find_element('id', 'page-title')
self.assertEqual(heading.text, 'We believe that the internet should be public, open and accessible.')
def tearDown(self):
# close the app
if hasattr(self, 'app'):
self.apps.kill(self.app)
self.data_layer.disable_cell_data()
GaiaTestCase.tearDown(self)
def is_throbber_visible(self):
return self.marionette.find_element(*self._throbber_locator).get_attribute('class') == 'loading'
|
[
"jgriffin@mozilla.com"
] |
jgriffin@mozilla.com
|
45887ae16823fc001b1f21088f29542dcaf7bd15
|
7a9900b0591d8e87b58c4d5104d37f61898af681
|
/curses.py
|
fda8868696dbf4652c0bc2a1f51c209027ead248
|
[
"MIT"
] |
permissive
|
greylurk/mr-v1
|
5ef9b70e59bf27f760a97074286a5b2d36966984
|
baad86be342e3d460ed746de99c421e3f0245f71
|
refs/heads/master
| 2021-01-20T12:09:58.266385
| 2014-03-19T14:06:13
| 2014-03-19T14:06:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
import curses
import locale
class ControlPanel:
def __init__(self):
self.sensors = []
self.actuators = []
self.screen = curses.initscr()
curses.noecho()
curses.cbreak()
self.screen.keypad(1)
locale.setLocale(locale.LC_ALL,'')
def close(self):
curses.nocbreak()
self.screen.keypad(0)
curses.echo()
curses.endwin()
|
[
"adamn@sparkpi.(none)"
] |
adamn@sparkpi.(none)
|
2d38cbbe7e6389f185c8e82dd8a4f4b9c64aeadd
|
12abdc9a83b03902b75429e73d17bbbb8a431110
|
/images/openstack/fs/openstack/environments/image_importer/bigip_image_import.py
|
5fec512f6b16cfbcef636369e58b9b70d7e8abee
|
[
"MIT"
] |
permissive
|
jgruber/f5-super-netops-container
|
e52ff7a1c841d94498e944f35d91613d861d9e71
|
500b3f81c609d24b0901e557e8a9c1317df9526f
|
refs/heads/master
| 2021-01-21T20:59:33.645912
| 2017-11-21T22:50:36
| 2017-11-21T22:50:36
| 92,293,549
| 0
| 0
| null | 2017-05-24T13:10:13
| 2017-05-24T13:10:12
| null |
UTF-8
|
Python
| false
| false
| 26,712
|
py
|
# coding=utf-8
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
import zipfile
import tarfile
import time
import re
import requests
import paramiko
import socket
import uuid
import keystoneclient.v3.client as ksclient
import keystoneauth1
import neutronclient.v2_0.client as netclient
import novaclient.client as compclient
import glanceclient.v2.client as gclient
import heatclient.client as heatclient
UBUNTU_IMAGE = 'http://cloud-images.ubuntu.com/trusty/current/' + \
'trusty-server-cloudimg-amd64-disk1.img'
BIGIP_TAR_IMAGE = 'bigipzips.tar'
WEB_SERVER_TEMPLATE = './bigip_image_importer_webserver.yaml'
# GIT copy is out of date with Ubuntu repo
# F5_IMAGE_TEMPLATE = 'https://raw.githubusercontent.com/F5Networks/' + \
# 'f5-openstack-heat/master/f5_supported/ve/images/' + \
# 'patch_upload_ve_image.yaml'
F5_IMAGE_TEMPLATE = './patch_upload_ve_image.yaml'
CONTAINERFORMAT = 'bare'
DISKFORMAT = 'qcow2'
VE_PROPS = {
'.*ALL.qcow2.zip$': {
'metadata': {
'os_product':
'F5 TMOS Virtual Edition for All Modules. ' +
'160G disk, 8 or 16G RAM, 4 or 8 vCPUs.'
},
'min_disk': 160,
'min_ram': 8192
},
'.*LTM.qcow2.zip$': {
'metadata': {
'os_product':
'F5 TMOS Virtual Edition for Local Traffic Manager. ' +
'40G disk, 4 or 8G RAM, 2 or 4 vCPUs.'
},
'min_disk': 40,
'min_ram': 4096
},
'.*LTM_1SLOT.qcow2.zip$': {
'metadata': {
'os_product':
'F5 TMOS Virtual Edition for Local Traffic Manager. ' +
' - Small Footprint Single Version. 8G disk, 2G RAM, 1 vCPUs.'
},
'min_disk': 8,
'min_ram': 2048
},
'^iWorkflow': {
'metadata': {
'os_product':
'F5 TMOS Virtual Edition for iWorkflow ' +
'Orchestration Services. 160G disk, 4G RAM, 2 vCPUs.'
},
'min_disk': 160,
'min_ram': 4096
},
'^BIG-IQ.*qcow2.zip': {
'metadata': {
'os_product':
'F5 TMOS Virtual Edition for BIG-IQ ' +
'Configuration Management Server. 160G disk, 4G RAM, 2 vCPUs.'
},
'min_disk': 160,
'min_ram': 4096
},
'^BIG-IQ.*LARGE.qcow2.zip': {
'metadata': {
'os_product':
'F5 TMOS Virtual Edition for BIG-IQ ' +
'Configuration Management Server. 500G disk, 4G RAM, 2 vCPUs.'
},
'min_disk': 500,
'min_ram': 4096
}
}
def _make_bigip_inventory():
if 'IMAGE_DIR' not in os.environ:
return None
bigip_images = {}
# BIGIP and BIG-IQ Image Packages
for f5file in glob.glob("%s/BIG*.zip" % os.environ['IMAGE_DIR']):
vepackage = zipfile.ZipFile(f5file)
filename = os.path.basename(f5file)
for packed in vepackage.filelist:
if packed.filename.startswith(filename[:8]) and \
packed.filename.endswith('qcow2'):
f5_version = 13
if filename not in bigip_images:
bigip_images[filename] = {'image': None,
'datastor': None,
'readyimage': None,
'file': f5file,
'archname': filename}
if packed.filename.find('DATASTOR') > 0:
bigip_images[filename]['datastor'] = packed.filename
elif packed.filename.find('BIG-IQ') > 0:
bigip_images[filename]['image'] = packed.filename
else:
last_dash = filename.rfind('-')
first_dot = filename.find('.')
f5_version = int(filename[last_dash+1:first_dot])
if f5_version < 13:
bigip_images[filename]['image'] = packed.filename
else:
bigip_images[filename]['readyimage'] = packed.filename
# iWorkflow Image Packages
for f5file in glob.glob("%s/iWorkflow*.zip" % os.environ['IMAGE_DIR']):
vepackage = zipfile.ZipFile(f5file)
filename = os.path.basename(f5file)
for packed in vepackage.filelist:
if packed.filename.startswith(filename[:8]) and \
packed.filename.endswith('qcow2'):
f5_version = 13
if filename not in bigip_images:
bigip_images[filename] = {'image': None,
'datastor': None,
'readyimage': None,
'file': f5file,
'archname': filename}
if packed.filename.find('DATASTOR') > 0:
bigip_images[filename]['datastor'] = packed.filename
elif packed.filename.find('Workflow') > 0:
bigip_images[filename]['image'] = packed.filename
else:
last_dash = filename.rfind('-')
first_dot = filename.find('.')
f5_version = int(filename[last_dash+1:first_dot])
if f5_version < 13:
bigip_images[filename]['image'] = packed.filename
else:
bigip_images[filename]['readyimage'] = packed.filename
return bigip_images
def _images_needing_import(bigip_images):
image_names = bigip_images.keys()
for image in image_names:
final_image_name = image.replace('.qcow2.zip', '')
gc = _get_glance_client()
for uploaded_image in gc.images.list():
if uploaded_image.name == final_image_name:
del bigip_images[image]
return bigip_images
def _get_keystone_session(project_name=None):
auth_url = str(os.environ['OS_AUTH_URL']).replace('2.0', '3')
project_domain_id = 'default'
if 'OS_DOMAIN_ID' in os.environ:
project_domain_id = os.environ['OS_DOMAIN_ID']
user_domain_id = 'default'
if 'OS_USER_DOMAIN_ID' in os.environ:
user_domain_id = os.environ['OS_USER_DOMAIN_ID']
from keystoneauth1.identity import v3
if not project_name:
project_name = os.environ['OS_TENANT_NAME']
auth = v3.Password(username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
project_name=project_name,
user_domain_id=user_domain_id,
project_domain_id=project_domain_id,
auth_url=auth_url)
sess = keystoneauth1.session.Session(auth=auth, verify=False)
return sess
def _get_keystone_client():
return ksclient.Client(session=_get_keystone_session())
def _get_neutron_client():
return netclient.Client(session=_get_keystone_session())
def _get_nova_client():
return compclient.Client('2.1', session=_get_keystone_session())
def _get_glance_client():
return gclient.Client(session=_get_keystone_session())
def _get_heat_client(tenant_name=None, tenant_id=None):
kc = _get_keystone_client()
if tenant_id:
tenant_name = kc.projects.get(tenant_id).name
if not tenant_name:
tenant_name = os.environ['OS_TENANT_NAME']
if not tenant_id:
tenant_id = kc.projects.find(name=tenant_name).id
ks = _get_keystone_session(project_name=tenant_name)
heat_sid = kc.services.find(type='orchestration').id
heat_url = kc.endpoints.find(service_id=heat_sid, interface='public').url
heat_url = heat_url.replace('%(tenant_id)s', tenant_id)
return heatclient.Client('1', endpoint=heat_url, token=ks.get_token())
def _download_file(url):
local_filename = url.split('/')[-1]
cached_file = "%s/%s" % (os.environ['IMAGE_DIR'], local_filename)
if os.path.isfile(cached_file):
return cached_file
r = requests.get(url)
f = open(cached_file, 'wb')
for chunk in r.iter_content(chunk_size=512 * 1024):
if chunk:
f.write(chunk)
f.close()
return cached_file
def _upload_image_to_glance(local_file_name, image_name, is_public):
gc = _get_glance_client()
visibility = 'private'
if is_public:
visibility = 'public'
img_model = gc.images.create(
name=image_name,
disk_format=DISKFORMAT,
container_format=CONTAINERFORMAT,
visibility=visibility
)
gc.images.upload(img_model.id, open(local_file_name, 'rb'))
return img_model.id
def _get_import_image_id():
gc = _get_glance_client()
importer_id = None
image_name = 'f5-Image-Importer'
for image in gc.images.list():
if image.name == image_name:
importer_id = image.id
if not importer_id:
local_filename = _download_file(UBUNTU_IMAGE)
importer_id = _upload_image_to_glance(local_filename,
image_name,
False)
return importer_id
def _get_external_net_id():
nc = _get_neutron_client()
ext_id = None
for net in nc.list_networks()['networks']:
if net['router:external']:
ext_id = net['id']
return ext_id
def _allocate_floating_ip(port_id):
ext_id = _get_external_net_id()
if ext_id and port_id:
floating_obj = {'floatingip': {'floating_network_id': ext_id,
'port_id': port_id}}
nc = _get_neutron_client()
floating_ip = nc.create_floatingip(floating_obj)
return floating_ip['floatingip']['floating_ip_address']
def _create_web_server(download_server_image, ext_net):
image_importer_web_server_template = open(WEB_SERVER_TEMPLATE, 'r').read()
hc = _get_heat_client()
web_server_stack_id = hc.stacks.create(
disable_rollback=True,
parameters={'external_network': ext_net,
'web_app_image': download_server_image},
stack_name='image_importer_web_server',
environment={},
template=image_importer_web_server_template
)['stack']['id']
stack_completed = ['CREATE_COMPLETE',
'CREATE_FAILED',
'DELETE_COMPLETE']
print " "
while True:
s = hc.stacks.get(web_server_stack_id)
print '\tImage importer status: %s \r' % s.stack_status,
if s.stack_status in stack_completed:
if s.stack_status == 'CREATE_FAILED':
print "Image importer web server create failed"
sys.exit(1)
if s.stack_status == 'DELETE_COMPLETE':
print "Image importer web server was deleted"
sys.exit(1)
break
else:
sys.stdout.flush()
time.sleep(5)
print " "
return web_server_stack_id
def _is_port_open(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
def _make_bigip_zip_tar_file(bigip_images):
tar_file_name = "%s/%s" % (os.environ['IMAGE_DIR'], BIGIP_TAR_IMAGE)
tar = tarfile.open(tar_file_name, 'w')
for image in bigip_images:
tar.add(bigip_images[image]['file'],
arcname=bigip_images[image]['archname'])
tar.close()
def sftp_print_totals(transferred, toBeTransferred):
percent_uploaded = 100 * float(transferred)/float(toBeTransferred)
print '\tTransferred: %d of %d bytes [%d%%]\r' % (
transferred, toBeTransferred, int(percent_uploaded)),
def _upload_bigip_zips_to_web_server(web_server_floating_ip, bigip_images):
print " "
# wait for web server to answer SSH
while True:
if _is_port_open(web_server_floating_ip, 22):
print "\tSSH is reachable on web server\n"
time.sleep(10)
break
time.sleep(5)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(web_server_floating_ip,
username='ubuntu', password='openstack')
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'sudo rm /var/www/html/index.html'
)
for image in bigip_images:
zip_file = bigip_images[image]['file']
transport = paramiko.Transport((web_server_floating_ip, 22))
transport.connect(username='ubuntu', password='openstack')
scp = paramiko.SFTPClient.from_transport(transport)
print "\tscp %s to server %s" % (zip_file, web_server_floating_ip)
print " "
scp.put(zip_file, '/tmp/%s' % image, callback=sftp_print_totals)
print "\n"
# deploy the image to the web servers
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'sudo mv /tmp/%s /var/www/html/' % image
)
print "\tAvailable http://%s/%s" % (web_server_floating_ip, image)
print "\n"
def _get_heat_output_value(stack_id, output_name):
hc = _get_heat_client()
stack = hc.stacks.get(stack_id)
for output in stack.outputs:
if output['output_key'] == output_name:
return output['output_value']
return None
def _create_glance_images(f5_heat_template_file, download_server_image,
web_server_stack_id, bigip_images):
f5_image_template = open(f5_heat_template_file, 'r').read()
image_prep_key = "importer_%s" % uuid.uuid4()
cc = _get_nova_client()
cc.keypairs.create(image_prep_key)
print " "
for image in bigip_images:
if bigip_images[image]['image']:
image_name = bigip_images[image]['image']
glance_image_name = image_name.replace('.qcow2', '')
final_image_name = image.replace('.qcow2.zip', '')
gc = _get_glance_client()
create_image = True
for uploaded_image in gc.images.list():
if uploaded_image.name == final_image_name:
create_image = False
if not create_image:
print "\tImage with name %s exists. Skipping." % \
final_image_name
else:
print "\tCreating image for %s" % image
hc = _get_heat_client()
private_network = _get_heat_output_value(web_server_stack_id,
'import_network_id')
web_server_floating_ip = _get_heat_output_value(
web_server_stack_id, 'web_server_public_ip')
f5_ve_image_url = "http://%s/%s" % (web_server_floating_ip,
image)
ipu = "https://github.com/F5Networks/" + \
"f5-openstack-image-prep.git"
image_stack_id = hc.stacks.create(
disable_rollback=True,
parameters={"onboard_image": download_server_image,
"flavor": "m1.medium",
"use_config_drive": True,
"private_network": private_network,
"f5_image_import_auth_url": os.environ[
'OS_AUTH_URL'],
"f5_image_import_tenant": os.environ[
'OS_TENANT_NAME'],
"f5_image_import_user": os.environ[
'OS_USERNAME'],
"f5_image_import_password": os.environ[
'OS_PASSWORD'],
"image_prep_url": ipu,
"f5_ve_image_name": image_name,
"f5_ve_image_url": f5_ve_image_url,
"image_prep_key": image_prep_key,
"apt_cache_proxy_url": None,
"os_distro": "mitaka"
},
stack_name="image_importer",
environment={},
template=f5_image_template
)['stack']['id']
stack_completed = ['CREATE_COMPLETE',
'CREATE_FAILED',
'DELETE_COMPLETE']
while True:
s = hc.stacks.get(image_stack_id)
print '\tImage importer status: %s \r' % s.stack_status,
sys.stdout.flush()
if s.stack_status in stack_completed:
if s.stack_status == 'CREATE_FAILED':
print "\tImage importer web server create failed"
print " "
cc = _get_nova_client()
cc.keypairs.delete(image_prep_key)
sys.exit(1)
if s.stack_status == 'DELETE_COMPLETE':
print "\tImage importer web server was deleted"
print " "
cc = _get_nova_client()
cc.keypairs.delete(image_prep_key)
sys.exit(1)
break
else:
time.sleep(5)
print " "
print "\tSUCCESS - Image patched and uploaded."
hc = _get_heat_client()
hc.stacks.delete(image_stack_id)
# Fix the name to reflect the actual BIG-IP release name
gc = _get_glance_client()
for uploaded_image in gc.images.list():
if uploaded_image.name == glance_image_name:
image_properties = {
'os_vendor': 'F5 Networks',
'os_name': 'F5 Traffic Management Operating System'
}
for ve_type in VE_PROPS:
p = re.compile(ve_type)
match = p.match(image)
if match:
image_properties.update(
VE_PROPS[ve_type]['metadata'])
min_disk = 0
min_ram = 0
if 'min_disk' in VE_PROPS[ve_type]:
min_disk = VE_PROPS[ve_type]['min_disk']
if 'min_ram' in VE_PROPS[ve_type]:
min_ram = VE_PROPS[ve_type]['min_ram']
gc.images.update(uploaded_image.id,
name=final_image_name,
visibility='public',
min_disk=min_disk,
min_ram=min_ram,
**image_properties)
# Let last image stack delete
stack_completed = ['DELETE_COMPLETE']
hc = _get_heat_client()
while True:
s = hc.stacks.get(image_stack_id)
print '\tImage importer status: %s \r' % s.stack_status,
sys.stdout.flush()
if s.stack_status in stack_completed:
break
else:
time.sleep(5)
# Add readyimage if defined
if bigip_images[image]['readyimage']:
gc = _get_glance_client()
image_name = bigip_images[image]['readyimage']
glance_image_name = image_name.replace('.qcow2', '')
final_image_name = image.replace('.qcow2.zip', '')
create_image = True
for uploaded_image in gc.images.list():
if uploaded_image.name == final_image_name:
create_image = False
if not create_image:
print "\tImage with name %s exists. Skipping." % \
final_image_name
else:
print "\tCreating Ready image %s" % glance_image_name
vepackage = zipfile.ZipFile(bigip_images[image]['file'])
vepackage.extract(bigip_images[image]['readyimage'])
image_id = _upload_image_to_glance(
bigip_images[image]['readyimage'],
glance_image_name, True
)
# Fix the name to reflect the actual BIG-IP release name
gc = _get_glance_client()
for uploaded_image in gc.images.list():
if uploaded_image.name == glance_image_name:
image_properties = {
'os_vendor': 'F5 Networks',
'os_name': 'F5 Traffic Management Operating System'
}
for ve_type in VE_PROPS:
p = re.compile(ve_type)
match = p.match(image)
if match:
image_properties.update(
VE_PROPS[ve_type]['metadata'])
min_disk = 0
min_ram = 0
if 'min_disk' in VE_PROPS[ve_type]:
min_disk = VE_PROPS[ve_type]['min_disk']
if 'min_ram' in VE_PROPS[ve_type]:
min_ram = VE_PROPS[ve_type]['min_ram']
gc.images.update(uploaded_image.id,
name=final_image_name,
visibility='public',
min_disk=min_disk,
min_ram=min_ram,
**image_properties)
os.unlink(bigip_images[image]['readyimage'])
# Add datastor if defined
if bigip_images[image]['datastor']:
gc = _get_glance_client()
datastor_name = bigip_images[image]['datastor'].replace(
'.qcow2', '')
create_datastor_image = True
for uploaded_image in gc.images.list():
if uploaded_image.name == datastor_name:
create_datastor_image = False
break
if create_datastor_image:
print "\tCreating Datastor image %s" % datastor_name
vepackage = zipfile.ZipFile(bigip_images[image]['file'])
vepackage.extract(bigip_images[image]['datastor'])
properties = {'os_vendor': 'F5 Networks',
'os_name': 'F5 TMOS Datastor Volume'}
image_id = _upload_image_to_glance(
bigip_images[image]['datastor'],
datastor_name, True
)
gc.images.update(
image_id,
name=datastor_name,
disk_format=DISKFORMAT,
container_format=CONTAINERFORMAT,
visibility='public',
**properties
)
os.unlink(bigip_images[image]['datastor'])
print "\n"
cc = _get_nova_client()
cc.keypairs.delete(image_prep_key)
def main():
print "Finding F5 image zip archives"
bigip_images = _make_bigip_inventory()
if not bigip_images:
print "No TMOS zip archives. Please place F5 zip files " + \
" in the directory associaed wtih ENV variable IMAGE_DIR"
sys.exit(1)
bigip_images = _images_needing_import(bigip_images)
if not bigip_images:
print "All images already imported"
sys.exit(1)
# external network
print "Finding external networking"
ext_net = _get_external_net_id()
if not ext_net:
print "No external network found. You need an network " \
"with router:external attribute set to True"
sys.exit(1)
# get supported Image template
print "Downloading F5 image patch Heat template"
# GIT copy is out of date with Ubuntu repo
# f5_heat_template_file = _download_file(F5_IMAGE_TEMPLATE)
f5_heat_template_file = F5_IMAGE_TEMPLATE
# create the download glance image
print "Getting image to build importer guest instance"
download_server_image = _get_import_image_id()
# create web server as an image repo
print "Creating web server for F5 image repo"
web_server_stack_id = _create_web_server(download_server_image, ext_net)
web_server_floating_ip = _get_heat_output_value(web_server_stack_id,
'web_server_public_ip')
print "\tweb server available at: %s \n" % web_server_floating_ip
# upload F5 images to the repo
# print "Creating upload F5 image package for web server"
# _make_bigip_zip_tar_file(bigip_images)
print "Uploading F5 zip files to web server"
_upload_bigip_zips_to_web_server(web_server_floating_ip, bigip_images)
# use the F5 supported Heat template to patch images
print "Creating F5 images"
_create_glance_images(f5_heat_template_file,
download_server_image,
web_server_stack_id,
bigip_images)
hc = _get_heat_client()
hc.stacks.delete(web_server_stack_id)
gc = _get_glance_client()
gc.images.delete(download_server_image)
print "\nImages Imported Successfully\n"
if __name__ == "__main__":
main()
|
[
"john.t.gruber@gmail.com"
] |
john.t.gruber@gmail.com
|
d50964d905b178c47097b874498131eb270eef94
|
83513a7452a401e83cfcf77af6996317ad88396b
|
/Assignments/A7/Q1 & Q2 code & pictures/test_opera.py
|
01b8ae5f5132bdd3dcdd6165ebb9b7a4a8928038
|
[] |
no_license
|
sunying2018/persp-analysis_A18
|
3d332b9baf067474fd0f5c080e1cf0a3ead4aada
|
92fd765f3e2a27f82375e5ae3e73983e674d1fc7
|
refs/heads/master
| 2020-03-30T17:46:07.394879
| 2018-12-14T21:33:10
| 2018-12-14T21:33:10
| 151,469,049
| 0
| 0
| null | 2018-10-03T19:32:23
| 2018-10-03T19:32:23
| null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
import opera
import pytest
def test_operate():
assert opera.operate(6, 8, '+')== 14, "failed on '+'"
assert opera.operate(6, 8, '-')== -2, "failed on'-'"
assert opera.operate(6, 8, '*')== 48, "failed on '*'"
assert opera.operate(6, 8, '/')== 3/4, "failed on '/'"
with pytest.raises(ZeroDivisionError) as err1:
opera.operate(6, 0, '/')
assert err1.value.args[0] == "division by zero is undefined"
with pytest.raises(TypeError) as err2:
opera.operate(6, 0, 0)
assert err2.value.args[0] == "oper must be a string"
with pytest.raises(ValueError) as err3:
opera.operate(6, 0, '!=')
assert err3.value.args[0] == "oper must be one of '+', '/', '-', or '*'"
|
[
"sunying2018@uchicago.edu"
] |
sunying2018@uchicago.edu
|
0e9d3792679e9cf7dee7540715ef45bb4f3fb891
|
8173234f279a012d0cb813473f2e7ce15573ab5e
|
/code/python/rummySkeleton.py
|
dff16cf7459555ab543c9959be0badf194baa23f
|
[] |
no_license
|
Rvansolkem/ESU-SoftwareEngineering
|
31bb3a4950082d62462927f43c485cc698141540
|
cbb2b913fc73914c0989b093e210c01be2a277bc
|
refs/heads/master
| 2023-01-19T23:34:04.010267
| 2020-11-24T00:48:35
| 2020-11-24T00:48:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
from Card import Card
from Deck import Deck
from tkinter import *
from tkinter.ttk import *
from PIL import ImageTk,Image
def makeCanvas(root, w=0, h=0, x=0, y=0, place=False, grid=False):
#if place is true then use .place not .grid or .pack (.pack is the default)
#if place then use x and y for placement
#use w and h for size
c=Canvas(root, width=w, height=h)
if grid:
c.grid(y,x)
return c
def makeFrame(root, w=0, h=0, x=0, y=0, place=False, grid=False):
f=Frame(root, width=w, height=h)
if grid:
f.grid(y,x)
return f
def checkMove(move):
#check game state in order to make sure all preconditions are met
#for some event to occur
pass
root=Tk()
windowHeight = root.winfo_screenheight()
windowWidth = root.winfo_screenwidth()
handHeight=int(windowHeight/6)
deckHeight = int(windowHeight / 3)
deckWidth=int(windowWidth / 5)
card_x=int(windowWidth/8)
card_y=int(handHeight*4/5)
################# main game components ####################
oppponentHandCanvas=makeCanvas(root, grid=True, w=windowWidth, h=handHeight, x=0,y=0)#need x and y as well?
middleFrame=makeFrame(root, grid=True, w=windowWidth, h=int(windowHeight/2), x=0,y=1)
playerHand=makeCanvas(root, grid=True, w=windowWidth, h=handHeight, x=0, y=2)
bottomFrame=makeFrame(root, grid=True, w=windowWidth, h=handHeight, x=0, y=4)
############################## opponentHand Components #######################
############################## middleFrame Components #######################
deckCanvas=makeCanvas(middleFrame, w=deckWidth, h=deckHeight, grid=True, x=0, y=0)
discardCanvas=makeCanvas(middleFrame, w=deckWidth, h=deckHeight, grid=True, x=1,y=0)
meldFrame=makeFrame(middleFrame, h=int(windowHeight/2),w=int(windowWidth/2), grid=True, x=2, y=0)
########## deck canvas components ##########
############################## playerHand components ######################
############################## bottomFrame components #######################
#if gin include knock and dont include meld
#if not gin includ emeld dont include knock
#need saveBtn, meldBtn, quitBtn, discardBtn, ??
|
[
"smcrowley8@gmail.com"
] |
smcrowley8@gmail.com
|
d1f476ff0ad10c262739f805be8f65639d917508
|
d1991c1b97cf0913ffab05d9821e3778f2616b36
|
/models/modelws.py
|
153c9068dd79ec4d195fc51bf0ba4c7b13c03b7b
|
[
"MIT"
] |
permissive
|
zhaozunjin/DMPHN-v2-Deblur
|
d70fd443ef2b1bc5787e1d9fe09807a759365c0b
|
0e84aab9c07a880b16dec8ee182868db93cd1d12
|
refs/heads/master
| 2020-11-25T08:34:27.026364
| 2020-09-17T09:09:05
| 2020-09-17T09:09:05
| 228,576,078
| 16
| 1
|
MIT
| 2020-06-21T05:59:54
| 2019-12-17T09:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 10,143
|
py
|
from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import os
class ConvBlock(nn.Sequential):
def __init__(self, in_channel, out_channel, ker_size, padd, stride):
super(ConvBlock,self).__init__()
self.add_module('conv',nn.Conv2d(in_channel ,out_channel,kernel_size=ker_size,stride=stride,padding=padd)),
self.add_module('norm',nn.BatchNorm2d(out_channel)),
self.add_module('LeakyRelu',nn.LeakyReLU(0.2, inplace=True))
class ResBlock(nn.Sequential):
def __init__(self,in_channel, out_channel, kernel_size, padding, stride):
super(ResBlock,self).__init__()
self.add_module('Conv1', nn.Conv2d(in_channel, out_channel, kernel_size, padding, stride))
self.add_module('Relu', nn.ReLU(inplace=True))
self.add_module('Conv2', nn.Conv2d(in_channel, out_channel, kernel_size, padding, stride))
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
# Conv1
self.layer1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.layer2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1)
)
self.layer3 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1)
)
self.layer4 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1)
)
# Conv2
self.layer5 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1)
self.layer6 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1)
)
self.layer7 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1)
)
self.layer8 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1)
)
# Conv3
self.layer9 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.layer10 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1)
)
self.layer11 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1)
)
self.layer12 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1)
)
def forward(self, x):
# 修改Conv1的连接方式
output_layer1 = self.layer1(x)
output_layer2 = self.layer2(output_layer1)
output_layer3 = self.layer3(output_layer2 + output_layer1) + output_layer2 + output_layer1
output_layer4 = self.layer4(
output_layer3 + output_layer2 + output_layer1) + output_layer3 + output_layer2 + output_layer1
# 修改Conv2的连接方式
output_layer5 = self.layer5(output_layer4)
output_layer6 = self.layer6(output_layer5)
output_layer7 = self.layer7(output_layer6 + output_layer5) + output_layer6 + output_layer5
output_layer8 = self.layer8(
output_layer7 + output_layer6 + output_layer5) + output_layer7 + output_layer6 + output_layer5
# 修改Conv3的连接方式
output_layer9 = self.layer9(output_layer8)
output_layer10 = self.layer10(output_layer9)
output_layer11 = self.layer11(output_layer10 + output_layer9) + output_layer10 + output_layer9
output_layer12 = self.layer12(
output_layer11 + output_layer10 + output_layer9) + output_layer11 + output_layer10 + output_layer9
return output_layer12
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
# Deconv3
self.layer13 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1)
)
self.layer14 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1)
)
self.layer16 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)
# Deconv2
self.layer17 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1)
)
self.layer18 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1)
)
self.layer20 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1)
# Deconv1
self.layer21 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1)
)
self.layer22 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1)
)
self.layer24 = nn.Conv2d(32, 3, kernel_size=3, padding=1)
def forward(self, x):
# 修改Deconv3的连接方式
output_layer13 = self.layer13(x)
output_layer14 = self.layer14(output_layer13 + x) + output_layer13 + x
# output_layer15 = self.layer15(output_layer14+output_layer13 + x) + output_layer14+output_layer13 + x
output_layer16 = self.layer16(output_layer14)
# 修改Deconv2的连接方式
output_layer17 = self.layer17(output_layer16)
output_layer18 = self.layer18(output_layer17 + output_layer16) + output_layer17 + output_layer16
# output_layer19 = self.layer19(output_layer18+output_layer17 + output_layer16) + output_layer18+output_layer17 + output_layer16
output_layer20 = self.layer20(output_layer18)
# 修改Conv1的连接方式
output_layer21 = self.layer21(output_layer20)
output_layer22 = self.layer22(output_layer21 + output_layer20) + output_layer21 + output_layer20
# output_layer23 = self.layer22(output_layer22+output_layer21 + output_layer20) + output_layer22+output_layer21 + output_layer20
output_layer24 = self.layer24(output_layer22)
return output_layer24
class WSDMPHN(nn.Module):
def __init__(self):
super(WSDMPHN, self).__init__()
self.images = {}
self.feature = {}
self.residual = {}
self.encoder_lv4_1 = Encoder()
self.encoder_lv4_2 = Encoder()
self.encoder_lv4_3 = Encoder()
self.encoder_lv4_4 = Encoder()
self.decoder_lv4_1 = Decoder()
self.decoder_lv4_2 = Decoder()
# self.encoder_lv4 = Encoder()
# self.decoder_lv4 = Decoder()
self.encoder_lv2_1 = Encoder()
self.encoder_lv2_2 = Encoder()
self.decoder_lv2_1 = Decoder()
# self.encoder_lv2 = Encoder()
# self.decoder_lv2 = Decoder()
self.encoder_lv1_1 = Encoder()
self.decoder_lv1_1 = Decoder()
def divide_patchs(self, images):
H = images.size(2)
W = images.size(3)
self.images['lv1_1'] = images
self.images['lv2_1'] = self.images['lv1_1'][:, :, 0:int(H / 2), :]
self.images['lv2_2'] = self.images['lv1_1'][:, :, int(H / 2):H, :]
self.images['lv4_1'] = self.images['lv2_1'][:, :, :, 0:int(W / 2)]
self.images['lv4_2'] = self.images['lv2_1'][:, :, :, int(W / 2):W]
self.images['lv4_3'] = self.images['lv2_2'][:, :, :, 0:int(W / 2)]
self.images['lv4_4'] = self.images['lv2_2'][:, :, :, int(W / 2):W]
def forward(self, input_generator):
self.divide_patchs(input_generator)
# level3
self.feature['lv4_1'] = self.encoder_lv4_1(self.images['lv4_1'])
self.feature['lv4_2'] = self.encoder_lv4_2(self.images['lv4_2'])
self.feature['lv4_3'] = self.encoder_lv4_3(self.images['lv4_3'])
self.feature['lv4_4'] = self.encoder_lv4_4(self.images['lv4_4'])
self.feature['lv4_top'] = torch.cat((self.feature['lv4_1'], self.feature['lv4_2']), 3)
self.feature['lv4_bottom'] = torch.cat((self.feature['lv4_3'], self.feature['lv4_4']), 3)
# self.feature['lv4'] = torch.cat((self.feature['lv4_top'], self.feature['lv4_bottom']), 2)
self.residual['lv4_top'] = self.decoder_lv4_1(self.feature['lv4_top'])
self.residual['lv4_bottom'] = self.decoder_lv4_2(self.feature['lv4_bottom'])
# level2
self.feature['lv2_1'] = self.encoder_lv2_1(self.images['lv2_1']+self.residual['lv4_top'])+self.feature['lv4_top']
self.feature['lv2_2'] = self.encoder_lv2_2(self.images['lv2_2']+self.residual['lv4_bottom'])+self.feature['lv4_bottom']
self.feature['lv2'] = torch.cat((self.feature['lv2_1'], self.feature['lv2_2']), 2)
self.residual['lv2'] = self.decoder_lv2_1(self.feature['lv2'])
# level1
self.feature['lv1'] = self.encoder_lv1_1(self.images['lv1_1'] + self.residual['lv2'])+self.feature['lv2']
self.residual['lv1'] = self.decoder_lv1_1(self.feature['lv1'])
return self.residual['lv1']
class StackShareNet(nn.Module):
def __init__(self):
super(StackShareNet, self).__init__()
self.basicnet = WSDMPHN()
def forward(self, x):
x1 = self.basicnet(x)
x2 = self.basicnet(x1)
x3 = self.basicnet(x2)
return x1, x2, x3
|
[
"1763020129@qq.com"
] |
1763020129@qq.com
|
dfd28d04bd20cdbae0bd324f5eaf1c036da10434
|
480e33f95eec2e471c563d4c0661784c92396368
|
/RecoMuon/MuonIdentification/python/me0MuonConverter_cfi.py
|
83b6632c7cca09219c2dcc76760b07daa2dbaeab
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420
| 2020-08-27T08:01:20
| 2020-08-27T08:01:20
| 102,867,729
| 7
| 14
|
Apache-2.0
| 2022-05-23T07:58:09
| 2017-09-08T14:03:57
|
C++
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
import FWCore.ParameterSet.Config as cms
me0MuonConverting = cms.EDProducer("ME0MuonConverter")
|
[
"dnash@cern.ch"
] |
dnash@cern.ch
|
eef2fd51f950be206cbc573c1a40bacd5f6323e1
|
b1088a69760332dff5c27479c7c46f0dfda11e2c
|
/venv/Scripts/pip-script.py
|
45cb9845d8d236ebbbfa6754518f669d650412db
|
[] |
no_license
|
JUSADAM1/FightingGame
|
66e5117b5ff84f71c1db4983bc9a7754207f4261
|
0e8691d72eec95433994b9e7a92b2b9dae7dd6fd
|
refs/heads/master
| 2020-05-03T20:28:45.001845
| 2019-04-08T20:43:19
| 2019-04-08T20:43:19
| 178,803,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!C:\Users\justy\PycharmProjects\FightingGame\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"justy@DESKTOP-NAT2PHA"
] |
justy@DESKTOP-NAT2PHA
|
20aa6256635712125cec7cde82d9edd48783816b
|
58a686c0a752db0f9084659a7f93d5b4f35a065c
|
/web_idea/apps.py
|
3ad06d0a73b08e922fed17edcaef47f32b83f3ed
|
[
"MIT"
] |
permissive
|
Dimas4/Web-IDEA-Python-Django
|
5322ae013eb9b6803c175886f152654ed9469402
|
9cf17150d422979470e3d865a381309a83d8e875
|
refs/heads/master
| 2020-04-10T04:45:33.843338
| 2018-12-07T11:58:34
| 2018-12-07T11:58:34
| 160,808,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
from django.apps import AppConfig
class WebIdeaConfig(AppConfig):
name = 'web_idea'
|
[
"ivanshatukho@yandex.ru"
] |
ivanshatukho@yandex.ru
|
724778ba9809a4764fe8fb9db59911050b386395
|
8329282a8fda056d705c1af6dbcd0de1ed7ca25e
|
/.history/textutiles/textutiles/views_20210522223732.py
|
629f8ff15d006894a027b5cc2f0bacbc0ca08e6f
|
[] |
no_license
|
ritikalohia/Django-beginners-
|
c069b16867407ef883bb00c6faf4f601921c118a
|
829e28ab25201853de5c71a10ceff30496afea52
|
refs/heads/main
| 2023-05-04T03:34:29.082656
| 2021-05-22T17:38:21
| 2021-05-22T17:38:21
| 369,869,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
#created
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name' : 'Ritika', 'place' : 'Mars'}
return render(request, 'index.html')
#return HttpResponse("Home")
def contact(request):
return render(request, 'contact.html')
def about(request):
return render(request, 'about_us.html')
def analyze(request):
#get the text in head
djtext = request.GET.post('text', 'default' )
#check checkbox values
removepunc = request.GET.get('removepunc', 'off')
fullcaps = request.GET.get('fullcaps', 'off')
newlineremover = request.GET.get('newlineremover', 'off')
spaceremover = request.GET.get('spaceremover', 'off'),
charcount = request.GET.get('charcount', 'off')
if removepunc == "on":
#analyzed = djtext
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params ={'purpose':'removed punctuations', 'analyzed_text': analyzed}
#analyze the text
return render(request, 'analyze.html', params)
elif(fullcaps == "on"):
analyzed =""
for char in djtext:
analyzed = analyzed + char.upper()
params ={'purpose':'changed to UPPERCASE', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(newlineremover== "on"):
analyzed =""
for char in djtext:
if char != '\n' and char !="\r":
analyzed = analyzed + char
params ={'purpose':'Removed new lines', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(spaceremover== "on"):
analyzed =""
for index, char in enumerate(djtext):
if not djtext[index] == " " and djtext[index+1]==" ":
analyzed = analyzed + char
params ={'purpose':'extra space removed', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(charcount== "on"):
a=0
for char in djtext:
a = a + 1
params ={'purpose':'extra space removed', 'analyzed_text': a}
#analyze the text
#return render(request, 'analyze.html', params)
else:
return HttpResponse("Error")
# def capfirst(request):
# return HttpResponse("capitalize first")
|
[
"rtklohia@gmail.com"
] |
rtklohia@gmail.com
|
c6a0f515e4061baa17af3a79b41c463f25758ff0
|
92429015d9a1f1cea9b9bf2c9f1a8a7a07586af5
|
/option.py
|
6e7b7892e37aa810a008bdf082451b08034a0125
|
[] |
no_license
|
arthur-qiu/adv_vis
|
46a953ce6c3d562137c8e566bc9b523e25bc5bbd
|
ba46c00cf38ca5186d7db84844892036ed714eaf
|
refs/heads/master
| 2021-01-03T23:00:45.065108
| 2020-04-05T03:47:01
| 2020-04-05T03:47:01
| 240,272,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,099
|
py
|
import argparse
import os
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(description='Trains a CIFAR Classifier',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
self.parser.add_argument('--model', '-m', type=str, default='wrn',
choices=['wrn'], help='Choose architecture.')
# Optimization options
self.parser.add_argument('--epochs', '-e', type=int, default=50, help='Number of epochs to train.')
self.parser.add_argument('--start_epoch', type=int, default=1, help='The start epoch to train. Design for restart.')
self.parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
self.parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
self.parser.add_argument('--test_bs', type=int, default=128)
self.parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
self.parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
self.parser.add_argument('--epoch_step', default='[40,42,44,46,48]', type=str,
help='json list with epochs to drop lr on')
self.parser.add_argument('--lr_decay_ratio', default=0.2, type=float)
# Checkpoints
self.parser.add_argument('--save', '-s', type=str, default='./logs/cifar10_adv', help='Folder to save checkpoints.')
self.parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
self.parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
self.parser.add_argument('--dataroot', default='.', type=str)
# Acceleration
self.parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
self.parser.add_argument('--prefetch', type=int, default=1, help='Pre-fetching threads.')
# Adversarial setting
self.parser.add_argument('--epsilon', type=float, default=8 / 255,
help='perturbation')
self.parser.add_argument('--num_steps', type=int, default=7,
help='perturb number of steps')
self.parser.add_argument('--step_size', type=float, default=2 / 255,
help='perturb step size')
self.parser.add_argument('--test_num_steps', type=int, default=20,
help='test perturb number of steps')
self.parser.add_argument('--test_step_size', type=float, default=2 / 255,
help='test perturb step size')
# Others
self.parser.add_argument('--random_seed', type=int, default=1)
def parse(self, save=True):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
# Make save directory
if not os.path.exists(self.opt.save):
os.makedirs(self.opt.save)
if not os.path.isdir(self.opt.save):
raise Exception('%s is not a dir' % self.opt.save)
if save and not self.opt.test:
file_name = os.path.join(self.opt.save, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
[
"Arthur"
] |
Arthur
|
955308df4fc4b863683301eafeeab3ece4016421
|
c25dad937d53dba30119b3c841ba27ab7542d90b
|
/adjust_data/adjust_data_functions.py
|
1b0c3a6347738c8fcfdf0b51a67ff6365148af29
|
[] |
no_license
|
webclinic017/Financial_Machine_Learning
|
546ad883cefc0f9e2400ddda121db3147b7274d9
|
33489128315c87e7378ff0da6ab52712177afcd9
|
refs/heads/master
| 2023-07-31T17:24:11.433057
| 2021-09-14T13:28:30
| 2021-09-14T13:28:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,127
|
py
|
"""
Name : adjust_data_functions.py in Project: Financial_ML
Author : Simon Leiner
Date : 18.05.2021
Description: Pca transformation of the given variables
"""
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import PCA
import pandas as pd
import seaborn as sns
from scipy.stats import norm
import matplotlib.pyplot as plt
import warnings
# disable some warnings
warnings.filterwarnings(category=FutureWarning,action="ignore")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Checked: Function works
def pca_analysis(data_cleaner):
"""
This function computes a principal component analysis for dimensionality reduction.
:param data_cleaner: list with 2 pd.DataFrame: data_cleaner
:return: list with 2 pd.DataFrame: data_cleaner
Linear dimensionality reduction using Singular Value Decomposition of
the data to project it to a lower dimensional space. The input data is centered but not
scaled for each feature before applying the SVD.
For further Inforamtion See:
# https: // stats.stackexchange.com / questions / 55718 / pca - and -the - train - test - split
# https://stats.stackexchange.com/questions/2691/making-sense-of-principal-component-analysis-eigenvectors-eigenvalues
# https://stackoverflow.com/questions/55441022/how-to-aply-the-same-pca-to-train-and-test-set
# https: // towardsdatascience.com / pca - using - python - scikit - learn - e653f8989e60
"""
# create scaler build_model
scaler_model = RobustScaler()
# create pca build_model
# choose the minimum number of principal components such that 95% of the variance is retained.
pca_model = PCA(.95)
# for each dataframe do:
for i in range(len(data_cleaner)):
# get the df
df = data_cleaner[i]
# get the X features
X = df.drop(["t"], axis=1)
# for the training data do
if i == 0:
# fit the scaler build_model
scaler_model.fit(X)
# fit the pca build_model
pca_model.fit(X)
# for the testing data do:
else:
pass
# only transform the data with the already fitted scler build_model
X = scaler_model.transform(X)
# only transform the data with the already fitted pca build_model
principal_components = pca_model.transform(X)
# save them in a dataframe
principal_df = pd.DataFrame(data=principal_components, index=df.index)
# only print for the training data
if i == 0:
print(f"{principal_components.shape[1]} Principal components explain 95 % of the training datasets variance.")
print("-" * 10)
# add the y column
finalDf = pd.concat([principal_df, df[['t']]], axis=1)
# only plot for the training data
if i == 0:
# plotting
plt.subplot(2, 1, 1)
plt.title(f"First 2 Principal Components that explain the most variance:")
sns.scatterplot(data=finalDf, x=finalDf.iloc[:,0], y=finalDf.iloc[:,1], hue=finalDf['t'],palette=["red","green"])
plt.xlabel("PC1")
plt.ylabel("PC2")
# plotting
plt.subplot(2, 1, 2)
plt.title(f"Distribution of the PCA transformed returns:")
sns.distplot(principal_df, fit=norm)
plt.show()
# set the data
data_cleaner[i] = finalDf
# return the data
return data_cleaner
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Checked: Function works
def transform_supervies_learning(df,days_predict_into_future):
"""
This function transforms a time series into a supervised learning problem.
:param df: pd.DataFrame: information of Labels
:param days_predict_into_future: integer: number of days to predict in the future
:return: pd.DataFrame: Adjusted and transformed df
"""
print(f"The time series containes {len(df.Label)} datapoints.")
print("-" * 10)
# rename a column
df.rename({"Label": "t"}, inplace=True, axis=1)
# number of days to look backward and convert into columns: make a 2 : 3 split
numberdays_back = int(len(df["t"]) / 3)
# transform the dataframe, so we can use him properly
for i in range(days_predict_into_future, numberdays_back):
df['t-' + str(i)] = df["t"].shift(i)
# remove the nan values : delete many rows, because we shifted the infromation into the columns and the last row, because we have t+1
df.dropna(inplace=True)
# note: t and t+1 must also be accounted for
print(f"Transforming the data into {numberdays_back+2} columns.")
print("-" * 10)
print(f"The dataframe contains {df.shape[0]} rows and {df.shape[1]} columns. A total of {df.shape[0] * df.shape[1]} datapoints.")
print("-" * 10)
return df
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
[
"simon.leiner1@gmail.com"
] |
simon.leiner1@gmail.com
|
313ca3e1c98c38562546e6407aa1547744c5cc05
|
d83b3d9898973ec8f9dbcc5a2299eede9b6c435f
|
/x10project/exchange/exchangebl/bittrexbl.py
|
45b1f12f4f348d3e946902ea7fc48be9d4b1a1ae
|
[] |
no_license
|
fintechclub/x10python
|
333e914c440cc8f995a2b97619a62735298d9fc8
|
dc3a52a7531e5e8114a9c708accbdc2e7bff2d60
|
refs/heads/master
| 2022-12-08T06:09:17.924359
| 2018-09-06T13:00:28
| 2018-09-06T13:00:28
| 138,585,164
| 0
| 4
| null | 2022-12-08T02:18:55
| 2018-06-25T11:22:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,614
|
py
|
from x10project import BaseExchangeBL
from bittrex.bittrex import *
import pprint
#https://github.com/ericsomdahl/python-bittrex
class BittrexLogic(BaseExchangeBL):
def __init__(self, account_name=None, api_key=None, api_secret=''):
super().__init__("bittrex", account_name, api_key, api_secret)
self.bittrexClient = Bittrex(self.api_key, self.api_secret, api_version=API_V1_1)
def getTicker(self, symbol):
self.bittrexClient.get_ticker()
def getMarketSummaries(self):
result = self.bittrexClient.get_market_summaries()
return result['result']
def getTickers(self, symbols):
market_symbols = self.getMarketSummaries()
result = dict()
for item in market_symbols:
base_c, rated_c = item['MarketName'].split('-')
if base_c == 'BTC' and self._findInList(symbols, rated_c) == True:
result[rated_c] = item['Last']
return result
def _findInList(self, arr, elem):
for item in arr:
if item == elem:
return True
return False
def getOrders(self):
orders = self.bittrexClient.get_open_orders()
if orders['success'] == False:
return None
return [(item['Exchange'], item['OrderType'], item['Limit'], item['Quantity']) for item in orders['result']]
def getBalances(self):
full_balances = self.bittrexClient.get_balances()
if full_balances['success'] == False:
return None
return [(item['Currency'], item['Balance'], item['Available']) for item in full_balances['result'] if item['Balance'] > 0]
def _balancesToString(self, balances):
result=''
for item in balances:
result += '🔹 Инструмент: {:s},\n Количество: {:.2f}\n'.format(item[0],
item[1])
return result
def _ordersToString(self, orders):
result=''
for item in orders:
result += '{:s} Инструмент: {:s},\n Тип ордера: {:s}\n Количество: {:.2f}\n Цена: {:.7f}\n'.format( '🔴' if item[1]=='LIMIT_SELL' else '🔵', item[0],
item[1], item[3], item[2])
return result if result != '' else 'Отсутствуют'
def getCommonAccountInfo(self):
balances = self.getBalances()
orders = self.getOrders()
tickers = self.getTickers( [item[0] for item in balances] )
est_balance = sum(item[1] * tickers[item[0]] for item in balances if item[0] != 'BTC' and item[0] != 'USDT')
est_balance += sum(item[1] for item in balances if item[0] == 'BTC')
'''
print(colored("\n---Balance---", "green"))
pprint.pprint(balance)
print(colored("\n---My Orders---", "green"))
pprint.pprint(orders)
'''
return "Рассчетный баланс: {:.4f}, \nДанные по балансу: \n{:s} \nОткрытые ордера:\n{:s}".format(est_balance,
self._balancesToString(balances),
self._ordersToString(orders))
|
[
"kda.biz83@gmail.com"
] |
kda.biz83@gmail.com
|
8f6d2d076198fade4cd55609075354f9fec895db
|
a633fe67ddf4fc2097fbf5cdbc91d1ec73ac02b3
|
/volume-sizer.py
|
54998da4a36756471e6fe65a6cff6ba614b78f3b
|
[] |
no_license
|
mchad1/azure-volume-sizing
|
07b4b85794f6d28ef06beeb8e439701845f4bbba
|
f17909b506cda40ed8be5faec36b09cea1fb036a
|
refs/heads/master
| 2022-11-25T05:57:25.853339
| 2020-08-03T17:19:17
| 2020-08-03T17:19:17
| 284,761,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,845
|
py
|
import sys
sys.path.append(sys.path[0] + "/lib")
import argparse
import json
import math
import os
import requests
from datetime import datetime
def quota_and_servicelevel_parser():
if os.path.exists('servicelevel_and_quotas.json'):
with open('servicelevel_and_quotas.json','r') as config_file:
price_and_bw_hash = json.load(config_file)
return price_and_bw_hash
else:
print('\Error, the servicelevel_and_quotas.json file could not be found\n')
exit()
#Return epoch
def date_to_epoch(created = None, now = None):
#Split the created string for now becuase I don't know how to use Z
if now:
#Convert creation time string to datetime
created = str(datetime.now()).split('.')[0]
#Convert creation time string to datetime
created = datetime.strptime(str(created), '%Y-%m-%d %H:%M:%S')
else:
created = created.split('.')[0]
#Convert creation time string to datetime
created = datetime.strptime(created, '%Y-%m-%dT%H:%M:%S')
#Return creation time in epoch
return datetime.timestamp(created)
def command_line():
parser = argparse.ArgumentParser(prog='cvs-aws.py',description='%(prog)s is used to issue commands to your NetApp Cloud Volumes Service on your behalf.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--volSize',action='store_const',const=True,)
parser.add_argument('--gigabytes','-g',type=str,help='Volume gigabytes in Gigabytes, value accepted between 100 and 102,400. Supports volSize')
parser.add_argument('--bandwidth','-b',type=str,help='Volume bandwidth requirements in Megabytes per second. If unknown enter 0 and maximum\
bandwidth is assigned. Supports volSize')
arg = vars(parser.parse_args())
#Preview sets of the automation to simulate the command and return simulated results, if not entered assume False
if arg['bandwidth']:
bandwidth = arg['bandwidth']
else:
bandwidth = None
if arg['gigabytes']:
gigabytes = arg['gigabytes']
else:
gigabytes = None
if arg['volSize']:
volSize( bandwidth = bandwidth, gigabytes = gigabytes)
##########################################################
# Volume Commands
##########################################################
def volSize(
bandwidth = None,
gigabytes = None,
):
if gigabytes and bandwidth:
error = False
error_value = {}
local_error = is_number(gigabytes)
if local_error == True:
error = True
error_value['gigabytes_integer'] = 'Capacity was not a numeric value'
elif int(gigabytes) < 100 or int(gigabytes) > 102400:
error = True
error_value['size'] = 'Capacity was either smaller than 100GiB or greater than 102,400GiB'
local_error = is_number(bandwidth)
if local_error == True:
error = True
error_value['bw_integer'] = 'Bandwidth was not a numeric value'
elif int(bandwidth) < 0:
error = True
error_value['bw'] = ('Negative value entered: %s, requested values must be => 0. If value == 0 or value > 4500 then maximum bandwidth will be assigned' % (bandwidth))
servicelevel, quotainbytes, bandwidthMiB, cost = servicelevel_and_quota_lookup(bwmb = bandwidth, gigabytes = gigabytes)
if error == False:
volume_sizing(bandwidth = bandwidthMiB,
cost = cost,
quota_in_bytes = quotainbytes,
servicelevel = servicelevel
)
else:
print('The volSize command failed, see the following json output for the cause:\n')
pretty_hash(error_value)
volSize_error_message()
else:
print('Error Bandwidth: %s, GiB: %s'%(bandwidth,gigabytes))
##########################################################
# Primary Functions
##########################################################
def is_number(number = None):
try:
int(number)
local_error = False
except:
local_error = True
return local_error
'''
verify characters are allowable, only letters, numbers, and - are allowed
'''
def is_ord(my_string = None, position = None):
if position == 0:
if ord(my_string) >= 65 and ord(my_string) <= 90 or ord(my_string) >= 97 and ord(my_string) <= 122:
value = False
else:
value = True
else:
if ord(my_string) >= 65 and ord(my_string) <= 90 or ord(my_string) >= 97 and ord(my_string) <= 122\
or ord(my_string) >= 48 and ord(my_string) <= 57 or ord(my_string) == 45:
value = False
else:
value = True
return value
##########################################################
# Volume Functions
##########################################################
'''
Issue call to create volume
'''
def volume_sizing(bandwidth = None,
cost = None,
quota_in_bytes = None,
servicelevel = None):
print('\n\tserviceLevel:%s ($%s)\
\n\tallocatedCapacityGiB:%s\
\n\tavailableBandwidthMiB:%s'
% (servicelevel,cost,int(quota_in_bytes) / 2**30,bandwidth))
'''
Determine the best gigabytes and service level based upon input
input == bandwidth in MiB, gigabytes in GiB
output == service level and gigabytes in GiB
'''
def servicelevel_and_quota_lookup(bwmb = None, gigabytes = None):
servicelevel_and_quota_hash = quota_and_servicelevel_parser()
bwmb = float(bwmb)
gigabytes = float(gigabytes)
standard_cost_per_gb = float(servicelevel_and_quota_hash['prices']['standard'])
premium_cost_per_gb = float(servicelevel_and_quota_hash['prices']['premium'])
ultra_cost_per_gb = float(servicelevel_and_quota_hash['prices']['ultra'])
standard_bw_per_gb = float(servicelevel_and_quota_hash['bandwidth']['standard'])
premium_bw_per_gb = float(servicelevel_and_quota_hash['bandwidth']['premium'])
ultra_bw_per_gb = float(servicelevel_and_quota_hash['bandwidth']['ultra'])
'''
if bwmb == 0, then the user didn't know the bandwidth, so set to maximum which we've seen is 3800MiB/s.
'''
if bwmb == 0 or bwmb > int(servicelevel_and_quota_hash['max_bandwidth']['max']):
bwmb = int(servicelevel_and_quota_hash['max_bandwidth']['max'])
'''
convert mb to kb
'''
bwkb = bwmb * 1024.0
'''
gigabytes needed based upon bandwidth needs
'''
standard_gigabytes_by_bw = bwkb / standard_bw_per_gb
if standard_gigabytes_by_bw < gigabytes:
standard_cost = gigabytes * standard_cost_per_gb
else:
standard_cost = standard_gigabytes_by_bw * standard_cost_per_gb
premium_gigabytes_by_bw = bwkb / premium_bw_per_gb
if premium_gigabytes_by_bw < gigabytes:
premium_cost = gigabytes * premium_cost_per_gb
else:
premium_cost = premium_gigabytes_by_bw * premium_cost_per_gb
ultra_gigabytes_by_bw = bwkb / ultra_bw_per_gb
if ultra_gigabytes_by_bw < gigabytes:
ultra_cost = gigabytes * ultra_cost_per_gb
else:
ultra_cost = ultra_gigabytes_by_bw * ultra_cost_per_gb
'''
calculate right service level and gigabytes based upon cost
'''
cost_hash = {'standard':standard_cost,'premium':premium_cost,'ultra':ultra_cost}
capacity_hash = {'standard':standard_gigabytes_by_bw,'premium':premium_gigabytes_by_bw,'ultra':ultra_gigabytes_by_bw}
bw_hash = {'standard':standard_bw_per_gb,'premium':premium_bw_per_gb,'ultra':ultra_bw_per_gb}
lowest_price = min(cost_hash.values())
print('lowest_price:%s,Cheapest_Service_level:%s'%(cost_hash,lowest_price))
for key in cost_hash.keys():
if cost_hash[key] == lowest_price:
servicelevel = key
if capacity_hash[key] < gigabytes:
gigabytes = int(math.ceil(gigabytes))
bandwidthKiB = int(math.ceil(gigabytes)) * bw_hash[servicelevel]
else:
gigabytes = int(math.ceil(capacity_hash[key]))
bandwidthKiB = int(math.ceil(capacity_hash[key])) * bw_hash[servicelevel]
'''
convert from Bytes to GiB
'''
gigabytes *= 2**30
bandwidthMiB = int(bandwidthKiB / 1024)
if bandwidthMiB > int(servicelevel_and_quota_hash['max_bandwidth'][servicelevel]):
bandwidthMiB = int(servicelevel_and_quota_hash['max_bandwidth'][servicelevel])
break
return servicelevel, gigabytes, bandwidthMiB, lowest_price
'''
Calculate the bandwidth based upon passed in service level and quota
'''
def bandwidth_calculator(servicelevel = None, quotaInBytes = None):
servicelevel_and_quota_hash = quota_and_servicelevel_parser()
'''
gigabytes converted from Bytes to KiB
'''
#quotaInBytes *= 2**30
if servicelevel in servicelevel_and_quota_hash['bandwidth'].keys():
capacityGiB = quotaInBytes / 2**30
bandwidthMiB = (capacityGiB * servicelevel_and_quota_hash['bandwidth'][servicelevel]) / 1024
else:
bandwidthMiB = None
capacityGiB = None
return bandwidthMiB, capacityGiB
def volSize_error_message():
print('\nThe following volSize flags are required:\
\n\t--gigabytes | -g [0 < X <= 102,400]\t#Allocated volume capacity in Gigabyte\
\n\t--bandwidth | -b [0 <= X <= 4500]\t#Requested maximum volume bandwidth in Megabytes')
exit()
'''MAIN'''
command_line()
|
[
"mchad@netapp.com"
] |
mchad@netapp.com
|
e0ada9a3ea0f7e8137969f157413b9f0dad2b729
|
7dc45e620fda52abb073eb3499ebf772293c993a
|
/RunInkaRun_Ejecutable/exe.win-amd64-3.6/Entities/Entities.py
|
27a126bdd077ce2bbda4ccf0d6718887a31e9f28
|
[] |
no_license
|
EduLara97/Proyecto_Juego_SW2
|
3e0f82a07e47acc1fab8be179f766d8b6045ba14
|
f2f0e37cedffb4e2de428ba29393d57899d3b619
|
refs/heads/master
| 2021-08-22T07:24:41.951831
| 2017-11-29T16:07:09
| 2017-11-29T16:07:09
| 105,581,768
| 2
| 3
| null | 2017-11-27T14:16:40
| 2017-10-02T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 16,534
|
py
|
from random import randrange
import pygame as pg
from pygame import *
from GeneralInformation import *
vec = pg.math.Vector2
class Spritesheet:
# utility class for loading and parsing spritesheets
def __init__(self, filename):
self.sspritesheet = pg.image.load(filename).convert()
def get_image(self, x, y, width, height):
# grab an image out of a larger spritesheet
image = pg.Surface((width, height))
image.blit(self.sspritesheet, (0, 0), (x, y, width, height))
image = pg.transform.scale(image, (60, 80))
return image
class Player(pg.sprite.Sprite):
def __init__(self, game, x, y):
pg.sprite.Sprite.__init__(self)
self.game = game
self.walking = False
self.current_frame = 0
self.last_update = 0
self.load_images()
self.image = self.standing_frames[0]
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.propiedades = Propiedades.get_instance()
self.vida = self.propiedades.vida_personaje
self.pos = vec(x, y)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
def load_images(self):
self.standing_frames = [self.game.sprites.get_image(0, 0, 200, 200)]
for frame in self.standing_frames:
frame.set_colorkey(BLACK)
self.walk_frames_r = [self.game.sprites.get_image(200, 0, 200, 200),
self.game.sprites.get_image(400, 0, 200, 200),
self.game.sprites.get_image(600, 0, 200, 200)]
"""self.game.sprites.get_image(200, 0, 200, 200),
self.game.sprites.get_image(200, 200, 200, 200),
self.game.sprites.get_image(200, 400, 200, 200),
self.game.sprites.get_image(200, 600, 200, 200)"""
self.walk_frames_l = []
for frame in self.walk_frames_r:
frame.set_colorkey(BLACK)
self.walk_frames_l.append(pg.transform.flip(frame, True, False))
def jump(self):
# jump only if standing on a platform
pg.mixer.Sound.play(pg.mixer.Sound("assets/audio/jump.wav"))
self.rect.x += 1
hits = pg.sprite.spritecollide(self, self.game.platforms, False)
self.rect.x -= 1
if hits:
self.vel.y = -PLAYER_JUMP
def update(self):
self.animate()
self.acc = vec(0, PLAYER_GRAV)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
self.acc.x = - self.propiedades.player_acc
if keys[pg.K_RIGHT]:
self.acc.x = self.propiedades.player_acc
# APPLY FRICTION
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
if abs(self.vel.x) < 0.1:
self.vel.x = 0
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
def animate(self):
now = pg.time.get_ticks()
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
# show walk animation
if self.walking and now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % \
len(self.walk_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# show idle animation
if not self.walking and now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.standing_frames)
bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
def disminuirVida(self, disminucion):
self.vida -= disminucion
return self.vida
class Serpiente(pg.sprite.Sprite):
def __init__(self, game, x, y):
pg.sprite.Sprite.__init__(self)
self.game = game
self.walking = False
self.current_frame = 0
self.last_update = 0
self.load_images()
self.image = self.standing_frames[0]
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(x, y)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.movimiento = True
def load_images(self):
self.standing_frames = [pg.transform.scale(self.game.sprites_serpientes.get_image(0, 0, 165, 90.5), SERPIENTE_PROP)]
for frame in self.standing_frames:
frame.set_colorkey(WHITE)
self.walk_frames_r = [pg.transform.scale(self.game.sprites_serpientes.get_image(165, 0, 165, 90.5), SERPIENTE_PROP),
pg.transform.scale(self.game.sprites_serpientes.get_image(330, 0, 165, 90.5), SERPIENTE_PROP),
pg.transform.scale(self.game.sprites_serpientes.get_image(495, 0, 165, 90.5), SERPIENTE_PROP)]
self.walk_frames_l = []
for frame in self.walk_frames_r:
frame.set_colorkey(WHITE)
self.walk_frames_l.append(pg.transform.flip(frame, True, False))
def cambiarMovimiento(self):
self.movimiento = not self.movimiento
def update(self):
self.animate()
self.acc = vec(0, PLAYER_GRAV)
if self.movimiento:
self.acc.x = -SERP_ACC
else:
self.acc.x = SERP_ACC
# APPLY FRICTION
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
if abs(self.vel.x) < 0.1:
self.vel.x = 0
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
def animate(self):
now = pg.time.get_ticks()
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
# show walk animation
if self.walking:
if now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % \
len(self.walk_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# show idle animation
if not self.walking:
if now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.standing_frames)
bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
class Soldado(pg.sprite.Sprite):
def __init__(self, game, x, y):
pg.sprite.Sprite.__init__(self)
self.game = game
self.current_frame = 0
self.last_update = 0
self.load_images()
self.image = self.standing_frames[0]
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(x, y)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.movimiento = True
def load_images(self):
self.standing_frames = [pg.transform.scale(self.game.sprites_soldado.get_image(0, 0, 200, 200), SOLDADO_PROP)]
for frame in self.standing_frames:
frame.set_colorkey(BLACK)
self.walk_frames_r = [pg.transform.scale(self.game.sprites_soldado.get_image(200, 0, 200, 200), SOLDADO_PROP),
pg.transform.scale(self.game.sprites_soldado.get_image(400, 0, 200, 200), SOLDADO_PROP),
pg.transform.scale(self.game.sprites_soldado.get_image(600, 0, 200, 200), SOLDADO_PROP)]
self.walk_frames_l = []
for frame in self.walk_frames_r:
frame.set_colorkey(BLACK)
self.walk_frames_l.append(pg.transform.flip(frame, True, False))
def update(self):
self.animate()
self.acc = vec(0, PLAYER_GRAV)
if self.movimiento:
self.acc.x = -SERP_ACC
else:
self.acc.x = SERP_ACC
# APPLY FRICTION
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
if abs(self.vel.x) < 0.1:
self.vel.x = 0
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
def cambiarMovimiento(self):
self.movimiento = not self.movimiento
def animate(self):
now = pg.time.get_ticks()
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
# show walk animation
if self.walking and now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % \
len(self.walk_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# show idle animation
if not self.walking and now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.standing_frames)
bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
class Boss(pg.sprite.Sprite):
def __init__(self, game, x, y):
pg.sprite.Sprite.__init__(self)
self.game = game
self.current_frame = 0
self.last_update = 0
self.load_images()
self.image = self.standing_frames[0]
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(x, y)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.vida = BOOS_VIDA
self.movimiento = True
def load_images(self):
self.standing_frames = [pg.transform.scale(self.game.sprites_boss.get_image(0, 0, 200, 200), SOLDADO_PROP)]
for frame in self.standing_frames:
frame.set_colorkey(BLACK)
self.walk_frames_r = [pg.transform.scale(self.game.sprites_boss.get_image(200, 0, 200, 200), SOLDADO_PROP),
pg.transform.scale(self.game.sprites_boss.get_image(400, 0, 200, 200), SOLDADO_PROP),
pg.transform.scale(self.game.sprites_boss.get_image(600, 0, 200, 200), SOLDADO_PROP)]
self.walk_frames_l = []
for frame in self.walk_frames_r:
frame.set_colorkey(BLACK)
self.walk_frames_l.append(pg.transform.flip(frame, True, False))
def update(self):
self.animate()
self.acc = vec(0, PLAYER_GRAV)
if self.movimiento:
self.acc.x = -SERP_ACC
else:
self.acc.x = SERP_ACC
# APPLY FRICTION
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
if abs(self.vel.x) < 0.1:
self.vel.x = 0
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
def cambiarMovimiento(self):
self.movimiento = not self.movimiento
def animate(self):
now = pg.time.get_ticks()
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
# show walk animation
if self.walking and now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % \
len(self.walk_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# show idle animation
if not self.walking and now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.standing_frames)
bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
def disminuirVida(self):
self.vida -= 1
return self.vida
class Platform(pg.sprite.Sprite):
def __init__(self, esce, x, y, w, h):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("assets/images/terrenos/" + esce).convert()
self.image = pg.transform.scale(self.image, (w, h))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Moneda(pg.sprite.Sprite):
def __init__(self, x, y):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("assets/images/objetos/moneda.gif").convert()
self.image = pg.transform.scale(self.image, COINS_PROP)
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Terreno(pg.sprite.Sprite):
def __init__(self, x, y, w, h):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("assets/images/terrenos/terreno.png").convert()
self.image = pg.transform.scale(self.image, (w, self.image.get_height()+h))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Cartel(pg.sprite.Sprite):
def __init__(self, x, y):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("assets/images/objetos/cartel1.png").convert()
self.image = pg.transform.scale(self.image, CARTEL_PROP)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Checkpoint(pg.sprite.Sprite):
def __init__(self, x, y):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("assets/images/objetos/llama_1.gif").convert()
self.image = pg.transform.scale(self.image, (100, 80))
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Rocon(pg.sprite.Sprite):
def __init__(self, x, y):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("assets/images/obstaculos/rocon.png").convert()
self.image = pg.transform.scale(self.image, (50, 200))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def update(self, target):
x = -target.rect.x + int(WIDTH / 2)
y = -target.rect.y + 490
# limit scrolling to map size
x = min(0, x) # left
y = min(0, y) # right
self.camera = pg.Rect(x, y, self.width, self.height)
|
[
"edu.lara.lev@gmail.com"
] |
edu.lara.lev@gmail.com
|
e254aa45d97a2f3ff329c8b06be41ad5a4e0aec5
|
3acb90a1e97a0e851c6e4b1b57dda78ec5e3e3b4
|
/problems/deep_copy_graph.py
|
0b0caab3f09a04fb8519c76e677dd80b5c5b183b
|
[] |
no_license
|
jhyang12345/algorithm-problems
|
fea3c6498cff790fc4932404b5bbab08a6d4a627
|
704355013de9965ec596d2e0115fd2ca9828d0cb
|
refs/heads/master
| 2023-05-15T10:26:52.685471
| 2021-06-01T14:57:38
| 2021-06-01T14:57:38
| 269,333,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
# Given a node in a connected directional graph, create a copy of it.
#
# Here's an example and some starter code.
class Node:
def __init__(self, value, adj=None):
self.value = value
self.adj = adj
# Variable to help print graph
self._print_visited = set()
if self.adj is None:
self.adj = []
# Able to print graph
def __repr__(self):
if self in self._print_visited:
return ''
else:
self._print_visited.add(self)
final_str = ''
for n in self.adj:
final_str += f'{n}\n'
self._print_visited.remove(self)
return final_str + f'({self.value}, ({[n.value for n in self.adj]}))'
def deep_copy_graph(graph_node, visited=None):
dummy_node = Node(0)
queue = [graph_node, dummy_node]
graph = {}
visited = [graph_node, dummy_node]
dummy_map = {}
while queue:
cur = queue.pop(0)
dummy = queue.pop(0)
dummy_map[cur] = dummy
dummy.value = cur.value
visited.append(cur)
for node in cur.adj:
if node not in visited:
queue.append(node)
new_dummy = Node(0)
queue.append(new_dummy)
dummy.adj.append(new_dummy)
else:
dummy.adj.append(dummy_map[node])
return dummy_node
n5 = Node(5)
n4 = Node(4)
n3 = Node(3, [n4])
n2 = Node(2)
n1 = Node(1, [n5])
n5.adj = [n3]
n4.adj = [n3, n2]
n2.adj = [n4]
graph_copy = deep_copy_graph(n1)
print(graph_copy)
# (2, ([4]))
# (4, ([3, 2]))
# (3, ([4]))
# (5, ([3]))
# (1, ([5]))
|
[
"jhyang12345@naver.com"
] |
jhyang12345@naver.com
|
efa07d57ac7d1b5f748e8b5e108f68d9ecc11029
|
444c42503bf34df6dbd1e74d8c66e78c9d6b2564
|
/ansible/models.py
|
d129b1053aedc7d86022363058e6f25489ed9b37
|
[] |
no_license
|
liyanwei4408866/DjangoDemo
|
0ee8b189093c5ba351cb22c8cfbe7ec118d0a5c6
|
5e3ed793191a88e1c2a83b045e6d6eadc5cf66c3
|
refs/heads/master
| 2020-05-15T11:12:59.494731
| 2019-04-19T07:00:10
| 2019-04-19T07:00:10
| 182,217,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
from django.db import models
# Create your models here.
class Grade(models.Model):
gname = models.CharField(max_length=20)
gdate = models.DateTimeField()
ggirlnum = models.IntegerField()
gboynum = models.IntegerField()
isDelete = models.BooleanField(default=False)
def __str__(self):
return self.gname
class Meta:
db_table:"grade"
class StudentManager(models.Manager):
def get_queryset(self):
models.Manager.get_queryset(self)
return super(StudentManager, self).get_queryset().filter(isDelete=False)
def createStudent(self, name, age, gender, contend):
stu = self.model()
stu.sname=name
stu.sage=age
stu.sgender=gender
stu.scontend=contend
return stu
class Student(models.Model):
# 自定义模型管理器
# stuObj = models.Manager()
stuObj2 = StudentManager()
sname = models.CharField(max_length=20)
sgender = models.BooleanField(default=True)
sage = models.IntegerField(db_column="sage")
scontend = models.CharField(max_length=20)
isDelete = models.BooleanField(default=False)
sgrade = models.ForeignKey("Grade", on_delete=models.CASCADE)
createTime = models.DateTimeField(auto_now_add=True) # auto_now_add 新增时赋值
updateTime = models.DateTimeField(auto_now=True) # auto_now 修改时赋值
def __str__(self):
return self.sname
class Meta:
db_table:"student" # 设置表名,默认为appname_classname
ordering:['-createTime'] # 默认排序 + asc - desc
# 定义一个类方法创建对象 cls==Stundent
@classmethod
def createStudent(cls, name, age, gender, contend):
stu = cls(sname=name , sage=age , sgender=gender, scontend=contend)
return stu
|
[
"Administrator@XL-20170505IAOA"
] |
Administrator@XL-20170505IAOA
|
53e8ea169d0cfd5c2042f9ade08153f4669354fc
|
65b4522c04c2be071c2d42095956fe950fe1cebe
|
/inversions/inversion10/iter2/run5/analysis/pred_disp/create_predicted_disp_database.py
|
608cb3ba2bafea964917232a2b235b12007f7f0a
|
[] |
no_license
|
geodesy/viscojapan
|
ac0cd93f7a2134cd2651623b94879dcc21c0c46a
|
03e70265b56eb5994e73bcb6066f0be338e42f27
|
refs/heads/master
| 2021-03-03T18:19:07.779601
| 2015-07-16T03:50:49
| 2015-07-16T03:50:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import sqlite3
import numpy as np
import viscojapan as vj
pred = vj.inv.DispPred(
file_G0 = '../../../green_function/G0_He50km_VisM6.3E18_Rake83.h5',
result_file = '../../outs/nrough_05_naslip_11.h5',
fault_file = '../../../fault_model/fault_bott80km.h5',
files_Gs = ['../../../green_function/G1_He50km_VisM1.0E19_Rake83.h5',
'../../../green_function/G2_He60km_VisM6.3E18_Rake83.h5',
'../../../green_function/G3_He50km_VisM6.3E18_Rake90.h5'
],
nlin_par_names = ['log10(visM)','log10(He)','rake'],
file_incr_slip0 = '../../slip0/v1/slip0.h5',
)
writer = vj.inv.PredDispToDatabaseWriter(
pred_disp = pred
)
writer.create_database()
writer.insert_all()
|
[
"zy31415@gmail.com"
] |
zy31415@gmail.com
|
5e6eab96a36af8362b1089b13514cebebf213f95
|
11812a0cc7b818292e601ecdd4aa4c4e03d131c5
|
/100days_of_python/day32/main.py
|
2d1a1c5e6332bb4dae8a588642e9e2d964c7be13
|
[] |
no_license
|
SunshineFaxixi/Python_Learning
|
f1e55adcfa898489cc9146ccfb220f0b48a31a22
|
ab3ca44d013311b6de02124091acc4c36a83c4d9
|
refs/heads/master
| 2021-08-16T05:47:29.963118
| 2021-01-04T13:48:30
| 2021-01-04T13:48:30
| 238,857,341
| 1
| 0
| null | 2020-03-03T13:53:08
| 2020-02-07T06:21:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
##################### Extra Hard Starting Project ######################
import pandas
from datetime import datetime
import os
import random
import smtplib
MY_EMAIL = "xxhan2018@163.com"
MY_PASSWORD = "TXHTVGKIOLEHXVCI"
today = datetime.now()
today_tuple = (today.month, today.day)
all_birth_info = pandas.read_csv("birthdays.csv")
birthday_dict = {(data_row["month"], data_row["day"]): data_row for (index, data_row) in all_birth_info.iterrows()}
# 2. Check if today matches a birthday in the birthdays.csv
if today_tuple in birthday_dict:
# 3. If step 2 is true, pick a random letter from letter templates and replace the [NAME] with the person's actual name from birthdays.csv
birthday_person = birthday_dict[today_tuple]
file_path = f"letter_templates/letter_{random.randint(1, 3)}.txt"
with open(file_path) as data:
content = data.read()
content = content.replace("[NAME]", birthday_person["name"])
# print(content)
# 4. Send the letter generated in step 3 to that person's email address.
with smtplib.SMTP("smtp.163.com") as connection:
connection.starttls()
connection.login(user=MY_EMAIL, password=MY_PASSWORD)
connection.sendmail(
from_addr=MY_EMAIL,
to_addrs=birthday_person["email"],
msg=f"Subject: Happy Birthday!\n\n{content}"
)
|
[
"xxhan2018@163.com"
] |
xxhan2018@163.com
|
c16b7296ae527faae7b03ce58aac6dd19ea48438
|
45ea73115f79affcd5d3770fc49b8f94dee7e903
|
/python/windchill.py
|
91bb82ca9982816d268e03589451e4fbfc787ede
|
[] |
no_license
|
Gamertoc/University-precourse
|
85033037a4536c7ee9974db2d26c78cd13f08d5e
|
feeaec10c45f2bfb4c72d8c5f3f1a77887f37641
|
refs/heads/master
| 2020-08-15T08:30:42.124899
| 2019-10-15T14:11:03
| 2019-10-15T14:11:03
| 215,309,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
import math
T = float(input('Temperature in °C (integer): '))
v = float(input('Speed in km/h (integer): '))
while T != int(T):
T = input('Try again! Temperature in °C: ')
while v != int(v):
v = input('Try again! Speed in km/h: ')
Wold = 33 + (.478 + .237*math.sqrt(v) - .0124*v) * (T-33)
Wnew = 13.12 + .6215*T -11.37*(v**0.16) + .3965*T*(v**0.16)
Wold = (int(Wold*10))/10
Wnew = (int(Wnew*10))/10
print('Windchill temperature in °C (old method): ', Wold)
print('Windchill temperature in °C (new method): ', Wnew)
|
[
"noreply@github.com"
] |
Gamertoc.noreply@github.com
|
0487d2a65c915d43cdd636edbad2f5aaa02af497
|
e556419a56549f8d42e60bda98df519335e70246
|
/main_affectation.py
|
30b3f69360e9d324d4ede62c5913773774a4ae4b
|
[] |
no_license
|
Brandone123/Projet_RO
|
cf7f1964de7c9fe41d2bee0f92ae4a35ab30353f
|
3523b3e067631132128971cb9bf4276965edd4fd
|
refs/heads/master
| 2022-11-06T13:13:50.499059
| 2020-06-22T10:27:53
| 2020-06-22T10:27:53
| 272,175,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
from PyQt5.QtWidgets import QMainWindow, QMessageBox, QSpacerItem, QPushButton, QSpinBox, QHBoxLayout, QApplication
# from PyQt5.QtGui import Q
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSlot
from munkres import Munkres, print_matrix
from AffectionWindow import Ui_MainWindow
import sys
class MainAffectation(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.cell_list = []
@pyqtSlot()
def on_pushButtonGenMatrix_clicked(self):
col = self.spinBoxCol.value()
row = self.spinBoxRow.value()
self.create_matrix_row(col, row)
def create_matrix_row(self, row, col):
for i in range(1, row+1):
hLayout = QHBoxLayout()
hSpacer = QSpacerItem(40, 20)
hLayout.addItem(hSpacer)
row_list = []
for j in range(1, col+1):
spinBox = QSpinBox()
object_name = 'spinBox_{}_{}'.format(i, j)
row_list.append(spinBox)
spinBox.setObjectName(object_name)
spinBox.setMinimum(-999)
spinBox.setMaximum(999)
hLayout.addWidget(spinBox)
self.cell_list.append(row_list)
hLayout.addItem(hSpacer)
self.verticalLayoutMatrix.addLayout(hLayout)
verticalSpacer = QSpacerItem(20, 40)
self.verticalLayoutMatrix.addItem(verticalSpacer)
pushButtonCompute = QPushButton('Compute')
self.verticalLayoutMatrix.addWidget(pushButtonCompute)
pushButtonCompute.clicked.connect(self.compute_affectation)
def compute_affectation(self):
matrix = self.get_matrix(self.cell_list)
munkres = Munkres()
indexes = munkres.compute(matrix)
display_result = 'Resultat : {} \n Cout : {}'.format(
str(indexes), sum([matrix[i[0]][i[1]] for i in indexes]))
self.labelResult.setText(str(display_result))
def get_matrix(self, cell_list):
matrix = []
for row in cell_list:
m_row = []
for col in row:
m_row.append(col.value())
matrix.append(m_row)
return matrix
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MainAffectation()
win.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
Brandone123.noreply@github.com
|
a1cd97ecced854a2187a5ae65068a685c323438c
|
c7e09ffb1bc9b95e266ef89984b39abfbd6976cd
|
/mcts/mcts_model.py
|
ed593186bcc4b1dd7e81c5a1a5b73991289776ed
|
[] |
no_license
|
liyunlon008/RuleBasedModelV2
|
c5bac60e10c6ab80b1f738c05850401fc5f72073
|
bf5c7e042d15090053086523c4e2f112bc799b06
|
refs/heads/master
| 2022-05-09T03:01:26.756576
| 2019-08-14T03:25:20
| 2019-08-14T03:25:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,787
|
py
|
from __future__ import absolute_import
import sys
import os
sys.path.insert(0, os.path.join('..'))
from game.engine import Agent, Game, Card
from mcts.tree_policy import tree_policy
from mcts.default_policy import default_policy
from mcts.backup import backup
from mcts.tree import Node, State
from mcts.get_moves_prune import get_moves
from mcts.get_bestchild import get_bestchild
import numpy as np
from collections import Counter
import time
class MctsModel(Agent):
def __init__(self, player_id):
super(MctsModel, self).__init__(player_id)
root = Node(None, None)
self.current_node = root
def choose(self, state):
# start = time.time()
# 定位current_node
cards_out = self.game.cards_out
length = len(cards_out)
# 判断是否定位到current_node的flag
flag = 0
if length > 2:
# 前两步对手选择的move
out1 = self.list_to_card(cards_out[length-2][1])
out2 = self.list_to_card(cards_out[length-1][1])
for child in self.current_node.get_children():
if self.compare(child.state.action, out1):
self.current_node = child
flag = 1
break
if flag == 1:
for child in self.current_node.get_children():
if self.compare(child.state.action, out2):
self.current_node = child
flag = 2
break
my_id = self.player_id
if flag != 2:
root = Node(None, None)
self.current_node = root
# 下家id
next_id = (my_id + 1) % 3
# 下下家id
next_next_id = (my_id + 2) % 3
my_card = self.card_list_to_dict(self.get_hand_card())
# 下家牌
next_card = self.card_list_to_dict(self.game.players[next_id].get_hand_card())
# 下下家牌
next_next_card = self.card_list_to_dict(self.game.players[next_next_id].get_hand_card())
last_move = self.trans_card(Card.visual_card(self.game.last_move))
last_p = self.game.last_pid
moves_num = len(get_moves(my_card, last_move))
state = State(my_id, my_card, next_card, next_next_card, last_move, -1, moves_num, None, last_p)
self.current_node.set_state(state)
# 搜索
computation_budget = 2000
for i in range(computation_budget):
expand_node = tree_policy(self.current_node, my_id)
reward = default_policy(expand_node, my_id)
backup(expand_node, reward)
best_next_node = get_bestchild(self.current_node, my_id)
move = best_next_node.get_state().action
self.current_node = best_next_node
new_move = self.card_to_list(move)
hand_card = []
for i, n in enumerate(Card.all_card_name):
hand_card.extend([n] * self.get_hand_card()[i])
print("Player {}".format(self.player_id), ' ', hand_card, end=' // ')
print(Card.visual_card(new_move))
# end = time.time()
# dur = end - start
# print('cost: {}'.format(dur))
return new_move, None
@staticmethod
# 用于比较两个无序的list
def compare(s, t):
return Counter(s) == Counter(t)
@staticmethod
def trans_card(before):
after = []
for card in before:
after.append(int(card))
return after
@staticmethod
def card_list_to_dict(card_list):
# e.g. [3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0] -> ['3':3, '4':3, '5':0, '6':0, '7':0, '8':0, '9':0, '10':0, '11':0, '12':0, '13':0, '1':1, '2':1, '14':0, '15':0]
card_name = Card.all_card_name
card_dict = dict(zip(card_name, card_list))
return card_dict
@staticmethod
def card_to_list(before):
# e.g. [3, 3, 3, 4, 4, 4, 1, 2] -> [3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0]
# index = [str(i) for i in range(3, 14)] + ['1', '2', '14', '15']
tem = [0] * 15
for card in before:
tem[card - 1] += 1
tem = tem[2:-2] + tem[:2] + tem[-2:]
return tem
@staticmethod
def list_to_card(before):
# e.g. [3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0] -> [3, 3, 3, 4, 4, 4, 1, 2]
cards = [i for i in range(3, 14)] + [1, 2, 14, 15]
card = []
for i, j in enumerate(before):
card += ([cards[i]] * j)
return card
class RandomModel(Agent):
def choose(self, state):
valid_moves = self.get_moves()
# player i [手牌] // [出牌]
hand_card = []
for i, n in enumerate(Card.all_card_name):
hand_card.extend([n]*self.get_hand_card()[i])
# print("Player {}".format(self.player_id), ' ', hand_card, end=' // ')
i = np.random.choice(len(valid_moves))
move = valid_moves[i]
# print(Card.visual_card(move))
return move, None
if __name__=="__main__":
# game = Game([RandomModel(i) for i in range(3)])
game = Game([RandomModel(0), MctsModel(1), RandomModel(2)])
# win_count = [0, 0, 0]
for i_episode in range(1):
game.game_reset()
# game.show()
for i in range(100):
pid, state, cur_moves, cur_move, winner, info = game.step()
#game.show()
if winner != -1:
print(str(i_episode) + ': ' + 'Winner:{}'.format(winner))
# win_count[winner] += 1
break
# print(win_count)
|
[
"625283021@qq.com"
] |
625283021@qq.com
|
9c12a9dc49718c2d3211144a328a690531b52177
|
122c0ce4b8709872f8ffa962708b26806a0b41ee
|
/PackEXEC/Eten.py
|
684f34b545e3d9687c3fe22ee5534ef9cffaa50c
|
[] |
no_license
|
Sniper099/WorkTest
|
de1d6729a01890706053e7c8434aac34421c59c9
|
5377f5cc7d3ed89f597be9ea0e39b8a148656737
|
refs/heads/master
| 2022-05-30T12:04:15.027020
| 2020-04-22T23:15:50
| 2020-04-22T23:15:50
| 256,840,217
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
import numpy as np
import sys
def Itener(zon):
Nu=int(input('Veuillez entrez le nombre de sites visités ou que vous voulez visiter :\t' )) #nombre de site parcorrue
SI=['Tour Hassan','Oudaya','Chellah','Le Musee Mohammed VI','Musee Belghazi','Musee Maroc Telecom','Village de poterie Oulja','Ancienne Medina Rabat','Oued Bouregreg','Jardin exotique']
dictioI={'Tour Hassan': 0, 'Oudaya': 1, 'Chellah': 2, 'Le Musee Mohammed VI': 3, 'Musee Belghazi': 4, 'Musee Maroc Telecom': 5, 'Village de poterie Oulja': 6, 'Ancienne Medina Rabat': 7, 'Oued Bouregreg': 8, 'Jardin exotique': 9}
SII=['Ein Zarqa','Mirleft','Quissariat Neqra','Ouad Assaka','Targa','Sidi Boulfdayl','Al Aqwass','Qasr Khalifi']
dictioII={'Ein Zarqa': 0, 'Mirleft': 1, 'Quissariat Neqra': 2,'Ouad Assaka': 3,'Targa': 4,'Sidi Boulfdayl': 5,'Al Aqwass': 6,'Qasr Khalifi': 7}
S=[]
D=0 #La distance
if zon=='Rabat-Sale':
print('Veuillez choisir parmis les sites disponibles :')
for i in SI:
print('>>>Site Touristique :' + i)
for i in range(Nu):
site=str(input('Veuillez preciser le site touristique visité/à visiter : \n'))
while (site not in dictioI):
site=str(input('Vous avez mal ecrit le site! Essayer une autre fois SVP! \n'))
S.append(site)
matrix= np.loadtxt('Matrix1.txt', usecols=range(10)) #on importe notre matrice a l'aide de numpy pour pouvoire faire des traitements
for i in range(len(S)-1):
D+=matrix[dictioI[S[i]]][dictioI[S[i+1]]]
print("la distance de cet intineraire est: " + str(D) +"km \n")
if zon=='Tiznit':
print('Veuillez choisir parmis les sites disponibles :')
for i in SII:
print('>>>Site Touristique :' + i)
for i in range(Nu):
site = str(input('Veuillez preciser le site touristique visité/à visiter : \n'))
while (site not in dictioII):
site = str(input('Vous avez mal ecrit le site! Essayer une autre fois SVP! \n'))
S.append(site)
matrix= np.loadtxt('Matrix2.txt', usecols=range(8))
for i in range(len(S)-1):
D+=matrix[dictioII[S[i]]][dictioII[S[i+1]]]
print('Vous avez parcorru ou vous allez parcourir est de Distance de : ' + str(D) + 'km \n')
|
[
"nj.nava.99@gmail.com"
] |
nj.nava.99@gmail.com
|
6037784afde9a814b4a03625736d447476252e24
|
1e164c6d208e7c82870f37d1f2cc2206373e3ce1
|
/venv/Scripts/django-admin.py
|
107d5b5a4b6df634137dc021f7bb8fdb85634e7c
|
[] |
no_license
|
SAGAR0071/Realestate
|
d152a35452b438fbb801458189a07133bec50611
|
32030d933d114ecd6580d039c901866ea3fbd08e
|
refs/heads/master
| 2021-04-04T06:47:23.864348
| 2020-03-19T07:28:45
| 2020-03-19T07:28:45
| 248,433,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
#!C:\Users\You know that\PycharmProjects\Django\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"sagarscimox@gmail.com"
] |
sagarscimox@gmail.com
|
23c0b017aab6fb2fba920cde4cd27a7b1878240f
|
a8b71966826f9119b264bdd32b60b218c795bd92
|
/test_AirportWeatherAPI.py
|
548f21fa4114ce7436a9a3114961749a31c1cc8c
|
[] |
no_license
|
BUEC500C1/api-design-lijunwei19
|
f19d929fb9fb8c1598929303a24ca41b478f9fa7
|
209f8a2be61ded47844f65d1f2b4ce7373ed691d
|
refs/heads/master
| 2020-12-27T22:42:52.976680
| 2020-02-05T19:21:33
| 2020-02-05T19:21:33
| 238,090,706
| 0
| 2
| null | 2020-02-05T18:58:38
| 2020-02-04T00:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 301
|
py
|
from AirportWeatherAPI import airport_Weather
import pytest
def test_airport_weather():
assert "message" not in airport_Weather("Total Rf Heliport")
assert "message" not in airport_Weather("Aero B Ranch Airport")
assert "message" not in airport_Weather("Newport Hospital & Clinic Heliport")
|
[
"jlyc8@bu.edu"
] |
jlyc8@bu.edu
|
02e52b31e058e832bbd4fe48a9863e3d6f212388
|
275b36012933d9471db4abcfa4631d1da3e69361
|
/tests/test_dice_interface/test_dice_tensorflow.py
|
cd8e36609a2c35052944a818c342bcba0d518de4
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
gaugup/DiCE
|
bad277c17ba62daf2ba41e6c2fc26844c986f33e
|
41dfde376ec3e5471d8e04899e639d2621b987f3
|
refs/heads/master
| 2023-03-02T11:05:00.561852
| 2021-02-11T23:45:24
| 2021-02-11T23:45:24
| 337,927,184
| 0
| 0
|
MIT
| 2021-02-11T23:45:25
| 2021-02-11T04:17:20
| null |
UTF-8
|
Python
| false
| false
| 4,858
|
py
|
import numpy as np
import pytest
import dice_ml
from dice_ml.utils import helpers
tf = pytest.importorskip("tensorflow")
@pytest.fixture
def tf_exp_object():
backend = 'TF'+tf.__version__[0]
dataset = helpers.load_adult_income_dataset()
d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')
ML_modelpath = helpers.get_adult_income_modelpath(backend=backend)
m = dice_ml.Model(model_path= ML_modelpath, backend=backend)
exp = dice_ml.Dice(d, m)
return exp
class TestDiceTensorFlowMethods:
@pytest.fixture(autouse=True)
def _initiate_exp_object(self, tf_exp_object, sample_adultincome_query):
self.exp = tf_exp_object # explainer object
self.exp.do_cf_initializations(total_CFs=4, algorithm="DiverseCF", features_to_vary="all") # initialize required params for CF computations
# prepare query isntance for CF optimization
query_instance = self.exp.data_interface.prepare_query_instance(query_instance=sample_adultincome_query, encoding='one-hot')
self.query_instance = np.array([query_instance.iloc[0].values], dtype=np.float32)
init_arrs = self.exp.initialize_CFs(self.query_instance, init_near_query_instance=True) # initialize CFs
self.desired_class = 1 # desired class is 1
# setting random feature weights
np.random.seed(42)
weights = np.random.rand(len(self.exp.data_interface.encoded_feature_names))
weights = np.array([weights], dtype=np.float32)
if tf.__version__[0] == '1':
for i in range(4):
self.exp.dice_sess.run(self.exp.cf_assign[i], feed_dict={self.exp.cf_init: init_arrs[i]})
self.exp.feature_weights = tf.Variable(self.exp.minx, dtype=tf.float32)
self.exp.dice_sess.run(tf.assign(self.exp.feature_weights, weights))
else:
self.exp.feature_weights_list = tf.constant([weights], dtype=tf.float32)
@pytest.mark.parametrize("yloss, output",[("hinge_loss", 4.6711), ("l2_loss", 0.9501), ("log_loss", 3.6968)])
def test_yloss(self, yloss, output):
if tf.__version__[0] == '1':
loss1 = self.exp.compute_yloss(method=yloss)
loss1 = self.exp.dice_sess.run(loss1, feed_dict={self.exp.target_cf: np.array([[1]])})
else:
self.exp.target_cf_class = np.array([[self.desired_class]], dtype=np.float32)
self.exp.yloss_type = yloss
loss1 = self.exp.compute_yloss().numpy()
assert pytest.approx(loss1, abs=1e-4) == output
def test_proximity_loss(self):
if tf.__version__[0] == '1':
loss2 = self.exp.compute_proximity_loss()
loss2 = self.exp.dice_sess.run(loss2, feed_dict={self.exp.x1: self.query_instance})
else:
self.exp.x1 = tf.constant(self.query_instance, dtype=tf.float32)
loss2 = self.exp.compute_proximity_loss().numpy()
assert pytest.approx(loss2, abs=1e-4) == 0.0068 # proximity loss computed for given query instance and feature weights.
@pytest.mark.parametrize("diversity_loss, output",[("dpp_style:inverse_dist", 0.0104), ("avg_dist", 0.1743)])
def test_diversity_loss(self, diversity_loss, output):
if tf.__version__[0] == '1':
loss3 = self.exp.compute_diversity_loss(diversity_loss)
loss3 = self.exp.dice_sess.run(loss3)
else:
self.exp.diversity_loss_type = diversity_loss
loss3 = self.exp.compute_diversity_loss().numpy()
assert pytest.approx(loss3, abs=1e-4) == output
def test_regularization_loss(self):
loss4 = self.exp.compute_regularization_loss()
if tf.__version__[0] == '1':
loss4 = self.exp.dice_sess.run(loss4)
else:
loss4 = loss4.numpy()
assert pytest.approx(loss4, abs=1e-4) == 0.2086 # regularization loss computed for given query instance and feature weights.
def test_final_cfs_and_preds(self, sample_adultincome_query):
"""
Tets correctness of final CFs and their predictions for sample query instance.
"""
dice_exp = self.exp.generate_counterfactuals(sample_adultincome_query, total_CFs=4, desired_class="opposite")
test_cfs = [[70.0, 'Private', 'Masters', 'Single', 'White-Collar', 'White', 'Female', 51.0, 0.534], [19.0, 'Self-Employed', 'Doctorate', 'Married', 'Service', 'White', 'Female', 44.0, 0.815], [47.0, 'Private', 'HS-grad', 'Married', 'Service', 'White', 'Female', 45.0, 0.589], [36.0, 'Private', 'Prof-school', 'Married', 'Service', 'White', 'Female', 62.0, 0.937]]
assert dice_exp.final_cfs_list == test_cfs
preds = [np.round(preds.flatten().tolist(), 3)[0] for preds in dice_exp.final_cfs_preds]
assert pytest.approx(preds, abs=1e-3) == [0.534, 0.815, 0.589, 0.937]
|
[
"noreply@github.com"
] |
gaugup.noreply@github.com
|
6000dedcf91921ea9a5a6cba05ff8fe17f2ae918
|
221d1ad342677d2fac8aa3f8d5c60e059a6316c9
|
/pm4py/objects/log/util/dataframe_utils.py
|
e8318a1daaeaa367f7ae496fe27ab3a705aca2da
|
[] |
no_license
|
niklasadams/explainable_concept_drift_pm
|
06ff651fbdebece4adf96f94bfb4d1026da14c48
|
6bf84d727ab0bae76716a04ad28c7de73250c89d
|
refs/heads/main
| 2023-08-26T18:21:49.955080
| 2021-10-29T18:53:48
| 2021-10-29T18:53:48
| 314,514,571
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,643
|
py
|
from pm4py.util import constants
from pm4py.objects.log.log import EventStream
from pm4py.objects.conversion.log import converter as log_converter
import pandas as pd
from pm4py.util.vers_checker import check_pandas_ge_024
from enum import Enum
from pm4py.util import exec_utils
from pm4py.util import points_subset
from pm4py.util import xes_constants
LEGACY_PARQUET_TP_REPLACER = "AAA"
LEGACY_PARQUET_CASECONCEPTNAME = "caseAAAconceptAAAname"
class Parameters(Enum):
PARTITION_COLUMN = "partition_column"
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
MANDATORY_ATTRIBUTES = "mandatory_attributes"
MAX_NO_CASES = "max_no_cases"
MIN_DIFFERENT_OCC_STR_ATTR = 5
MAX_DIFFERENT_OCC_STR_ATTR = 50
def insert_partitioning(df, num_partitions, parameters=None):
"""
Insert the partitioning in the specified dataframe
Parameters
-------------
df
Dataframe
num_partitions
Number of partitions
parameters
Parameters of the algorithm
Returns
-------------
df
Partitioned dataframe
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
partition_column = exec_utils.get_param_value(Parameters.PARTITION_COLUMN, parameters, "@@partitioning")
df[partition_column] = df[case_id_key].rank(method='dense', ascending=False).astype(int) % num_partitions
return df
def legacy_parquet_support(df, parameters=None):
"""
For legacy support, Parquet files columns could not contain
a ":" that has been arbitrarily replaced by a replacer string.
This string substitutes the replacer to the :
Parameters
---------------
dataframe
Dataframe
parameters
Parameters of the algorithm
"""
if parameters is None:
parameters = {}
df.columns = [x.replace(LEGACY_PARQUET_TP_REPLACER, ":") for x in df.columns]
return df
def table_to_stream(table, parameters=None):
"""
Converts a Pyarrow table to an event stream
Parameters
------------
table
Pyarrow table
parameters
Possible parameters of the algorithm
"""
if parameters is None:
parameters = {}
dict0 = table.to_pydict()
keys = list(dict0.keys())
# for legacy format support
if LEGACY_PARQUET_CASECONCEPTNAME in keys:
for key in keys:
dict0[key.replace(LEGACY_PARQUET_TP_REPLACER, ":")] = dict0.pop(key)
stream = EventStream([dict(zip(dict0, i)) for i in zip(*dict0.values())])
return stream
def table_to_log(table, parameters=None):
"""
Converts a Pyarrow table to an event log
Parameters
------------
table
Pyarrow table
parameters
Possible parameters of the algorithm
"""
if parameters is None:
parameters = {}
stream = table_to_stream(table, parameters=parameters)
return log_converter.apply(stream, parameters=parameters)
def convert_timestamp_columns_in_df(df, timest_format=None, timest_columns=None):
"""
Convert all dataframe columns in a dataframe
Parameters
-----------
df
Dataframe
timest_format
(If provided) Format of the timestamp columns in the CSV file
timest_columns
Columns of the CSV that shall be converted into timestamp
Returns
------------
df
Dataframe with timestamp columns converted
"""
needs_conversion = check_pandas_ge_024()
for col in df.columns:
if timest_columns is None or col in timest_columns:
if df[col].dtype == 'object':
try:
if timest_format is None:
if needs_conversion:
df[col] = pd.to_datetime(df[col], utc=True)
else:
df[col] = pd.to_datetime(df[col])
else:
if needs_conversion:
df[col] = pd.to_datetime(df[col], utc=True, format=timest_format)
else:
df[col] = pd.to_datetime(df[col])
except ValueError:
# print("exception converting column: "+str(col))
pass
return df
def sample_dataframe(df, parameters=None):
"""
Sample a dataframe on a given number of cases
Parameters
--------------
df
Dataframe
parameters
Parameters of the algorithm, including:
- Parameters.CASE_ID_KEY
- Parameters.CASE_ID_TO_RETAIN
Returns
-------------
sampled_df
Sampled dataframe
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
max_no_cases = exec_utils.get_param_value(Parameters.MAX_NO_CASES, parameters, 100)
case_ids = list(df[case_id_key].unique())
case_id_to_retain = points_subset.pick_chosen_points_list(min(max_no_cases, len(case_ids)), case_ids)
return df[df[case_id_key].isin(case_id_to_retain)]
def automatic_feature_selection_df(df, parameters=None):
"""
Performs an automatic feature selection on dataframes,
keeping the features useful for ML purposes
Parameters
---------------
df
Dataframe
parameters
Parameters of the algorithm
Returns
---------------
featured_df
Dataframe with only the features that have been selected
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
mandatory_attributes = exec_utils.get_param_value(Parameters.MANDATORY_ATTRIBUTES, parameters,
set(df.columns).intersection(
{constants.CASE_CONCEPT_NAME, xes_constants.DEFAULT_NAME_KEY,
xes_constants.DEFAULT_TIMESTAMP_KEY}))
min_different_occ_str_attr = exec_utils.get_param_value(Parameters.MIN_DIFFERENT_OCC_STR_ATTR, parameters, 5)
max_different_occ_str_attr = exec_utils.get_param_value(Parameters.MAX_DIFFERENT_OCC_STR_ATTR, parameters, 50)
cols_dtypes = {x: str(df[x].dtype) for x in df.columns}
other_attributes_to_retain = set()
no_all_cases = df[case_id_key].nunique()
for x, y in cols_dtypes.items():
attr_df = df.dropna(subset=[x])
this_cases = attr_df[case_id_key].nunique()
# in any case, keep attributes that appears at least once per case
if this_cases == no_all_cases:
if "float" in y or "int" in y:
# (as in the classic log version) retain always float/int attributes
other_attributes_to_retain.add(x)
elif "object" in y:
# (as in the classic log version) keep string attributes if they have enough variability, but not too much
# (that would be hard to explain)
unique_val_count = df[x].nunique()
if min_different_occ_str_attr <= unique_val_count <= max_different_occ_str_attr:
other_attributes_to_retain.add(x)
else:
# not consider the attribute after this feature selection if it has other types (for example, date)
pass
attributes_to_retain = mandatory_attributes.union(other_attributes_to_retain)
return df[attributes_to_retain]
|
[
"niklas.adams@pads.rwth-aachen.de"
] |
niklas.adams@pads.rwth-aachen.de
|
7ca5e6cecede89720beb07c78961edf828316a33
|
2f59f4b22c2012ad6965b1dc694d48dc056362b4
|
/prepare_data.py
|
e0bedff263a0aa6fb9ddd0cd93a71f857ecb9bf3
|
[] |
no_license
|
vudaoanhtuan/neural-machine-translation
|
8538a7ff733d1bad90045dced9945ec286230fe6
|
1cea5e1a4b03017c3a1f3f0fffb750991e61fd2e
|
refs/heads/master
| 2020-05-18T19:24:13.444063
| 2019-05-02T15:48:24
| 2019-05-02T15:48:24
| 184,608,068
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import torch
from torchtext.data import Field, Dataset, Iterator, BucketIterator, ReversibleField
from torchtext.datasets import TranslationDataset
from preprocess import preprocess, tokenize
def tokenize_word(text):
text = preprocess(text)
return tokenize(text)
SRC = Field(
tokenize=tokenize_word,
lower=True,
batch_first=True
)
TRG = Field(
tokenize=tokenize_word,
init_token='<sos>',
eos_token='<eos>',
lower=True,
batch_first=True
)
fields = [('src', SRC), ('trg', TRG)]
ds = TranslationDataset('lang.', ('en', 'de'), fields)
train_ds, test_ds = ds.split(0.9)
SRC.build_vocab(ds)
TRG.build_vocab(ds)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 32
train_iter = BucketIterator(
train_ds,
batch_size=batch_size,
device=device
)
test_iter = BucketIterator(
test_ds,
batch_size=batch_size,
device=device
)
|
[
"vudaoanhtuan@gmail.com"
] |
vudaoanhtuan@gmail.com
|
517b7bcb9f4f6b5045ecd44c559702ab5df05680
|
9a82885b4617b666e1ac1c976377790b759cd64e
|
/myfit/migrations/0010_device_owner_log_files_location.py
|
582fb68a7b1b841c1b99bd71de6e70de24e1400e
|
[] |
no_license
|
lukbor2/myfitapp
|
94ba4b7b0acc796a15fab11471e120b21218d9d5
|
0020bfd3d37de637121773d21464ac92f77da9e8
|
refs/heads/master
| 2021-04-26T21:54:23.585351
| 2018-05-15T03:43:23
| 2018-05-15T03:43:23
| 122,386,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# Generated by Django 2.0.2 on 2018-04-14 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myfit', '0009_auto_20180313_0233'),
]
operations = [
migrations.AddField(
model_name='device_owner',
name='log_files_location',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
|
[
"lukbor2@gmail.com"
] |
lukbor2@gmail.com
|
7b9023674047f9c5e60aa18006c5f6c23a0bf7f2
|
15d4c666e1fa5133cd9fdf0e373410fcc0f14d1c
|
/Mini_Projects/hw1/testing/matrix.py
|
eb4d1dc6971f9e64e7a1342e51f00cfd786689c3
|
[
"MIT"
] |
permissive
|
twyunting/CSC-676_Computer-Vision
|
86df53cfb63b7acb213cec9e831d1d5e345a443c
|
7786477f805c37d38afaef1f7438ce3c6e2c1f3c
|
refs/heads/master
| 2023-08-12T07:31:46.138909
| 2021-10-10T20:23:12
| 2021-10-10T20:23:12
| 332,000,716
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,700
|
py
|
# adding the packages
"""
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d as conv2d
import scipy.sparse as sps
from PIL import Image
# Package for fast equation solving
from sys import platform
import sparseqr
"""
# World parameters
alpha = 35*math.pi/180;
img = cv2.imread('img2.png')
print(type(img))
img = img[:, :, ::-1].astype(np.float32)
nrows, ncols, colors = img.shape
ground = (np.min(img, axis=2) > 110).astype(np.float32)
foreground = (ground == 0).astype(np.float32)
m = np.mean(img, 2)
kern = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32)
dmdx = conv2d(m, kern, 'same')
dmdy = conv2d(m, kern.transpose(), 'same')
mag = np.sqrt(dmdx**2 + dmdy**2)
mag[0, :] = 0
mag[-1, :] = 0
mag[:, 0] = 0
mag[:, -1] = 0
theta = np.arctan2(dmdx, dmdy)
edges = mag >= 30
edges = edges * foreground
## Occlusion and contact edges
pi = math.pi
vertical_edges = edges*((theta<115*pi/180)*(theta>65*pi/180)+(theta<-65*pi/180)*(theta>-115*pi/180));
horizontal_edges = edges * (1-vertical_edges)
kern = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=np.float32)
horizontal_ground_to_foreground_edges = (conv2d(ground, kern, 'same'))>0;
horizontal_foreground_to_ground_edges = (conv2d(foreground, kern, 'same'))>0;
vertical_ground_to_foreground_edges = vertical_edges*np.abs(conv2d(ground, kern.transpose(), 'same'))>0
occlusion_edges = edges*(vertical_ground_to_foreground_edges + horizontal_ground_to_foreground_edges)
contact_edges = horizontal_edges*(horizontal_foreground_to_ground_edges);
E = np.concatenate([vertical_edges[:,:,None],
horizontal_edges[:,:,None],
np.zeros(occlusion_edges.shape)[:,:,None]], 2)
# Plot
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img.astype(np.uint8))
plt.axis('off')
plt.title('Input image')
plt.subplot(2,2,2)
plt.imshow(edges == 0, cmap='gray')
plt.axis('off')
plt.title('Edges')
# Normals
K = 3
ey, ex = np.where(edges[::K, ::K])
ex *= K
ey *= K
plt.figure()
plt.subplot(2,2,3)
plt.imshow(np.max(mag)-mag, cmap='gray')
dxe = dmdx[::K, ::K][edges[::K, ::K] > 0]
dye = dmdy[::K, ::K][edges[::K, ::K] > 0]
n = np.sqrt(dxe**2 + dye**2)
dxe = dxe/n
dye = dye/n
plt.quiver(ex, ey, dxe, -dye, color='r')
plt.axis('off')
plt.title('Normals')
plt.show()
# Edges and boundaries
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img.astype(np.uint8))
plt.axis('off')
plt.title('Input image')
plt.subplot(2,2,2)
plt.imshow(E+(edges == 0)[:, :, None])
plt.axis('off')
plt.title('Edges')
plt.subplot(2,2,3)
plt.imshow(1-(occlusion_edges>0), cmap='gray')
plt.axis('off')
plt.title('Occlusion boundaries')
plt.subplot(2,2,4)
plt.imshow(1-contact_edges, cmap='gray')
plt.axis('off')
plt.title('Contact boundaries');
# testing the correct matrix
total = []
for i in range(-2, 2):
for j in range(-2, 2):
for k in range(-2, 2):
a = np.array([i, j, k])
total.append(a)
res = []
for i in total:
for j in total:
for k in total:
res.append(np.array([i, j, k]))
# print(np.shape(res[0]))
Nconstraints = nrows*ncols*20
Aij = np.zeros((3, 3, Nconstraints))
ii = np.zeros((Nconstraints, 1));
jj = np.zeros((Nconstraints, 1));
b = np.zeros((Nconstraints, 1));
V = np.zeros((nrows, ncols))
# Create linear contraints
c = 0
for i in range(1, nrows-1):
for j in range(1, ncols-1):
if ground[i,j]:
# Y = 0
Aij[:,:,c] = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
ii[c] = i
jj[c] = j
b[c] = 0
V[i,j] = 0
c += 1 # increment constraint counter
else:
# Check if current neirborhood touches an edge
edgesum = np.sum(edges[i-1:i+2,j-1:j+2])
# Check if current neirborhood touches ground pixels
groundsum = np.sum(ground[i-1:i+2,j-1:j+2])
# Check if current neirborhood touches vertical pixels
verticalsum = np.sum(vertical_edges[i-1:i+2,j-1:j+2])
# Check if current neirborhood touches horizontal pixels
horizontalsum = np.sum(horizontal_edges[i-1:i+2,j-1:j+2])
# Orientation of edge (average over edge pixels in current
# neirborhood)
nx = np.sum(dmdx[i-1:i+2,j-1:j+2]*edges[i-1:i+2,j-1:j+2])/edgesum
ny = np.sum(dmdy[i-1:i+2,j-1:j+2]*edges[i-1:i+2,j-1:j+2])/edgesum
if contact_edges[i, j]:
# dY/dy = 0
Aij[:,:,c] = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
ii[c] = i
jj[c] = j
b[c] = 0
c += 1 # increment constraint counter
if verticalsum > 0 and groundsum == 0:
# dY/Dy = 1/cos a
Aij[:,:,c] = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])/8;
ii[c] = i
jj[c] = j
b[c] = 1/np.cos(alpha)
c += 1 # increment constraint counter
if horizontalsum > 0 and groundsum == 0 and verticalsum == 0: #(x,y belongs to horizontal edge)
# dY/dt = 0
Aij[:,:,c] = a
# Fill out the kernel (need to revise it! 3 by 3 matrix)
ii[c] = i
jj[c] = j
b[c] = 0
c += 1 # increment constraint counter
if groundsum == 0:
# laplacian = 0
# 0.1 is a weight to reduce the strength of this constraint
Aij[:,:,c] = 0.1*np.array([[0, 0, 0], [-1, 2, -1], [0, 0, 0]]);
ii[c] = i
jj[c] = j
b[c] = 0
c += 1 # increment constraint counter
Aij[:,:,c] = 0.1*np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]); # question 4
ii[c] = i;
jj[c] = j;
b[c] = 0;
c = c+1; # increment constraint counter
Aij[:,:,c] = 0.1*np.array([[0, -1, 1], [0, 1, -1], [0, 0, 0]]);
ii[c] = i;
jj[c] = j;
b[c] = 0;
c = c+1; # increment constraint counter
def sparseMatrix(i, j, Aij, imsize):
""" Build a sparse matrix containing 2D linear neighborhood operators
Input:
Aij = [ni, nj, nc] nc: number of neighborhoods with contraints
i: row index
j: column index
imsize: [nrows ncols]
Returns:
A: a sparse matrix. Each row contains one 2D linear operator
"""
ni, nj, nc = Aij.shape
nij = ni*nj
a = np.zeros((nc*nij))
m = np.zeros((nc*nij))
n = np.zeros((nc*nij))
grid_range = np.arange(-(ni-1)/2, 1+(ni-1)/2)
jj, ii = np.meshgrid(grid_range, grid_range)
ii = ii.reshape(-1,order='F')
jj = jj.reshape(-1,order='F')
k = 0
for c in range(nc):
# Get matrix index
x = (i[c]+ii) + (j[c]+jj)*nrows
a[k:k+nij] = Aij[:,:,c].reshape(-1,order='F')
m[k:k+nij] = c
n[k:k+nij] = x
k += nij
m = m.astype(np.int32)
n = n.astype(np.int32)
A = sps.csr_matrix((a, (m, n)))
return A
ii = ii[:c]
jj = jj[:c]
Aij = Aij[:,:,:c]
b = b[:c]
A = sparseMatrix(ii, jj, Aij, nrows)
Y = sparseqr.solve( A, b , tolerance=0)
Y = np.reshape(Y, [nrows, ncols], order='F') # Transfrom vector into image
# Recover 3D world coordinates
x, y = np.meshgrid(np.arange(ncols), np.arange(nrows))
x = x.astype(np.float32)
y = y.astype(np.float32)
x -= nrows/2
y -= ncols/2
# Final coordinates
X = x
Z = Y*np.cos(alpha)/np.sin(alpha) - y/np.sin(alpha)
Y = -Y
Y = np.maximum(Y, 0);
E = occlusion_edges.astype(np.float32);
E[E > 0] = np.nan;
Z = Z+E; # remove occluded edges
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img[1:-1, 1:-1].astype(np.uint8))
plt.axis('off')
plt.title('Edges')
plt.subplot(2,2,2)
plt.imshow(Z[1:-1, 1:-1], cmap='gray')
plt.axis('off')
plt.title('Z')
plt.subplot(2,2,3)
plt.imshow(Y[1:-1, 1:-1], cmap='gray')
plt.axis('off')
plt.title('Y')
plt.subplot(2,2,4)
plt.imshow(X[1:-1, 1:-1], cmap='gray')
plt.axis('off')
plt.title('X')
|
[
"yc6705a@american.edu"
] |
yc6705a@american.edu
|
1c1cf34d45653ee87041cd214a18294d6564ba82
|
786de007822aca1e092b81928543b3dae230d99e
|
/problem_011.py
|
0659fedbfb47b0c53d26a286236c6ec83309ea3d
|
[] |
no_license
|
ycao-cit/leetcode
|
4109bcd2c50403f1000136ef10dee4e32b90b0a2
|
a1e5dc64979fa601662c1057b396e72f38cfd70e
|
refs/heads/master
| 2021-01-11T04:24:18.765305
| 2017-01-30T01:15:23
| 2017-01-30T01:15:23
| 71,197,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Yi Cao <ycao16@uw.edu>
#
# Distributed under terms of the GNU General Public License 3.0 license.
"""
Container With Most Water
URL: https://leetcode.com/problems/container-with-most-water/
"""
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
i = 0
j = len(height) - 1
max_area = 0
while (j > i):
max_area = max(max_area, min(height[i], height[j]) * (j - i))
if height[i] < height[j]:
k = i
while (k < j and height[k] <= height[i]):
k += 1
i = k
else:
k = j
while (k > i and height[k] <= height[j]):
k -= 1
j = k
return max_area
|
[
"ycao16@uw.edu"
] |
ycao16@uw.edu
|
53d165f828a7c30f169e4ad05cb402581160f7ca
|
7079e1759dd5213fe072017a0fd8bca8620badff
|
/common/com.py
|
0fda1b30951b94027eb7b80958ea441e4a88d5d6
|
[] |
no_license
|
wanhui1994/skx
|
82e23c310f18d2a4d704d8dba81068135da6286e
|
a7cb0772af8bf0bc20b3cd7c7729908533481683
|
refs/heads/master
| 2020-06-07T14:23:35.377435
| 2019-06-21T06:14:16
| 2019-06-21T06:14:16
| 193,040,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,742
|
py
|
#coding=utf-8
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
from selenium.webdriver.support.select import Select
import os
class Comm():
def __init__(self,browser='ff'):
try:
if browser == 'ff' or browser == 'Firefox':
self.driver = webdriver.Firefox()
elif browser == 'ch' or browser == 'Chrome':
self.driver = webdriver.Chrome()
elif browser == 'ie' or browser == 'IE':
self.driver = webdriver.Ie()
except ValueError:
print('Please enter the correct browser name!(Supported browsers are: Firefox、Chrome and Ie)')
# url打开
def openurl(self,url):
self.driver.get(url)
self.driver.maximize_window() #最大化浏览器
# 定义元素获取
def element(self,locator):
# element = WebDriverWait(self.driver, 30, 1).until(lambda x: x.find_element(*locator)) #方案1
try:
element=self.driver.find_element(*locator) # *号是把两个参数分开传值 方案2
return element
except NoSuchElementException as msg:
print('元素查找异常:%s'%msg)
# 点击功能
def click(self,locator):
element = self.element(locator)
element.click()
# 清空功能
def clear(self,locator):
element = self.element(locator)
element.clear()
# 输入信息
def send(self,locator,txt):
element = self.element(locator)
#element.clear()
element.send_keys(txt)
#下拉菜单元素获取-value
def downmenuvalue (self,locator,num):
Select(self.element(locator)).select_by_value(num)
#下拉菜单元素获取-option(text文本)
def downmenutext (self,locator,text):
Select(self.element(locator)).select_by_visible_text(text)
#日期控件
def datetime(self,date,locator,text):
# 去掉元素的readonly属性,date就是日期控件的id
js='document.getElementById("'+date+'").removeAttribute("readonly");'
self.driver.execute_script(js)
self.clear(locator)
self.send(locator,text) #输入日期
#上传文件(通过控件上传)
def uploadfile(self,loc,path):
self.click(loc)
os.system(path) #选择导入的excel文件
# 获取元素上的文字
def text(self,locator):
element = self.element(locator).text
return element
# 显示等待元素出现
def WebDriver(self,timeout,method):
try:
WebDriverWait(self.driver,timeout,5).until(method)
print('查找到元素了')
except Exception as msg:
print("元素未找到")
# 隐示等待出现
def implicitly(self,value):
self.driver.implicitly_wait(value)
#截图
def screenshot1(self,path):
self.driver.get_screenshot_as_file(path)
# 停止页面加载,并且刷新页面
def page_timeout(self,num):
self.driver.set_page_load_timeout(num)
# 关闭当前页面
def clos(self):
self.driver.close()
def quit(self):
self.driver.quit()
#浏览器后台运行--静默模式
def silent (self):
display = Display(visible=0, size=(800, 600))
display.start()
#-----------------判断-------------------
#title判断
def title(self,name):
title = EC.title_is(name) #判断title完全等于
def title_contains(self,name):
title_contains = EC.title_contains(name) #判断title包含
|
[
"2353231116@qq.com"
] |
2353231116@qq.com
|
bd4e433309484011267564b7b1b2a7e81669bcd0
|
734ba54960c6150164bfccc91063512066cc2fbf
|
/CodeSamples-Python/LPTHW-PythonCourse-ex18-NamesVariablesCodeFunctions-Functions.py
|
85606394d2aac8ecf110289480b8dfcb43fd0283
|
[] |
no_license
|
cikent/Portfolio
|
8cff76e7249ac83bcbe11034357b6b07e3007c9d
|
e5c4261c2c452f01656c7af612498fd1fe099289
|
refs/heads/master
| 2021-06-22T13:18:21.320010
| 2020-03-29T23:54:21
| 2020-03-29T23:54:21
| 224,490,142
| 1
| 0
| null | 2021-06-02T01:25:44
| 2019-11-27T18:09:41
|
Python
|
UTF-8
|
Python
| false
| false
| 740
|
py
|
# this one is like your scripts with argv
# define the function print_two
def print_two(*args):
arg1, arg2 = args
print(f"arg1: {arg1}, arg2: {arg2}")
# ok, that *args is actually pointless, we can just do this
# define the function print_two_again
def print_two_again(arg1, arg2):
print(f"arg1: {arg1}, arg2: {arg2}")
# this just takes one argument
# define the function print_one
def print_one(arg1):
print(f"arg1: {arg1}")
# this one takes no arguments
# define the function print_none
def print_none():
print("I got nothin'.")
# execute each of the newly created functions by passing in the correct # of arguments
print_two("Zed", "Shaw")
print_two_again("Zed", "Shaw")
print_one("First!")
print_none()
|
[
"noreply@github.com"
] |
cikent.noreply@github.com
|
55bb3d82b80185533da7d4c85f2c8e6589933ab4
|
cd4eb25911d3e3b092aa97aaa7b8fbba6c3a0704
|
/lang/python/asyncio/chain.py
|
aa8d41fb55cb2b5319e1e90b9a6e8a96e55ad630
|
[
"MIT"
] |
permissive
|
liuyang1/test
|
29bb142982d2ef0d79b71e8fe5f5e0d51ec5258e
|
9a154e0161a1a33baad53f7223ee72e702532001
|
refs/heads/master
| 2023-08-05T08:56:50.526414
| 2023-07-21T05:49:53
| 2023-07-21T11:16:09
| 26,949,326
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
import asyncio
@asyncio.coroutine
def compute(x, y):
print("Compute %s + %s ..." % (x, y))
# yield from asyncio.sleep(1.0)
return x + y
@asyncio.coroutine
def print_sum(x, y):
result = yield from compute(x, y)
print("%s + %s = %s" % (x, y, result))
loop = asyncio.get_event_loop()
loop.run_until_complete(print_sum(1, 2))
loop.run_until_complete(print_sum(3, 2))
loop.close()
|
[
"liuyang1@mail.ustc.edu.cn"
] |
liuyang1@mail.ustc.edu.cn
|
8ea5c0269ada1927409543ec2f0976260b434299
|
d11843388d4002e246c2281956570ce1acfe2774
|
/alembic/versions/a283be5badc2_revision_of_nullable_fields_spotify_id_.py
|
1707de564c1a0f3ec47ebb3d7e2a33b9d76ee704
|
[] |
no_license
|
Gorillaz322/SpotifyHistory
|
a067128a0f843773b02605cd1e43b664e79853a0
|
933ce1bd9ec4117fff7b1d8b5ee60fe26fc6e482
|
refs/heads/master
| 2021-05-13T17:53:40.654581
| 2018-02-21T18:04:23
| 2018-02-21T18:04:23
| 116,839,474
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
"""Revision of nullable fields/Spotify_id for song
Revision ID: a283be5badc2
Revises: e9f3b4345181
Create Date: 2018-02-21 19:46:35.323702
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a283be5badc2'
down_revision = 'e9f3b4345181'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('albums', 'artist_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('albums', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=False)
op.alter_column('artists', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=False)
op.alter_column('plays', 'duration',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('plays', 'song_id',
existing_type=sa.INTEGER(),
nullable=False)
op.add_column('songs', sa.Column('spotify_id', sa.String(length=128), nullable=False))
op.alter_column('songs', 'album_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('songs', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('songs', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=True)
op.alter_column('songs', 'album_id',
existing_type=sa.INTEGER(),
nullable=True)
op.drop_column('songs', 'spotify_id')
op.alter_column('plays', 'song_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('plays', 'duration',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('artists', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=True)
op.alter_column('albums', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=True)
op.alter_column('albums', 'artist_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
|
[
"jyvylo5@gmail.com"
] |
jyvylo5@gmail.com
|
8dc513728068e9e929518340bb44a7718efc33eb
|
3122ac39f1ce0a882b48293a77195476299c2a3b
|
/clients/python/generated/swaggyjenkins/models/extension_class_container_impl1links.py
|
71b82d350f0fa80dde4f8cb80c78f8b7b2e3c3c7
|
[
"MIT"
] |
permissive
|
miao1007/swaggy-jenkins
|
4e6fe28470eda2428cbc584dcd365a21caa606ef
|
af79438c120dd47702b50d51c42548b4db7fd109
|
refs/heads/master
| 2020-08-30T16:50:27.474383
| 2019-04-10T13:47:17
| 2019-04-10T13:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,766
|
py
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionClassContainerImpl1links(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'Link',
'_class': 'str'
}
attribute_map = {
'_self': 'self',
'_class': '_class'
}
def __init__(self, _self=None, _class=None): # noqa: E501
"""ExtensionClassContainerImpl1links - a model defined in OpenAPI""" # noqa: E501
self.__self = None
self.__class = None
self.discriminator = None
if _self is not None:
self._self = _self
if _class is not None:
self._class = _class
@property
def _self(self):
"""Gets the _self of this ExtensionClassContainerImpl1links. # noqa: E501
:return: The _self of this ExtensionClassContainerImpl1links. # noqa: E501
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this ExtensionClassContainerImpl1links.
:param _self: The _self of this ExtensionClassContainerImpl1links. # noqa: E501
:type: Link
"""
self.__self = _self
@property
def _class(self):
"""Gets the _class of this ExtensionClassContainerImpl1links. # noqa: E501
:return: The _class of this ExtensionClassContainerImpl1links. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this ExtensionClassContainerImpl1links.
:param _class: The _class of this ExtensionClassContainerImpl1links. # noqa: E501
:type: str
"""
self.__class = _class
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionClassContainerImpl1links):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"cliffano@gmail.com"
] |
cliffano@gmail.com
|
dca160553e252568b51df4a76fc3e773fdcfa0e5
|
4caf210180227f535f476ae372afd1d3a2f01313
|
/artsycharts/urls.py
|
823089ca34f6c3100540c43a6335ffc744fd3bd4
|
[] |
no_license
|
BWStearns/ArtsyCharts
|
d7afddd9dfaaa0cecac69893d2ca8176387f653d
|
c32efae6ca22f0478d747b47a9c04d5b8e105769
|
refs/heads/master
| 2021-01-21T00:17:52.706917
| 2014-12-22T21:13:18
| 2014-12-22T21:13:18
| 28,325,785
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Django
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.models import User
# External
# Internal
from charts.api import router
from charts import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'artsycharts.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# Actual html views
url(r'^collections/(\d+)/$', views.collection_view, ),
# API and Admin stuff
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"brianw.stearns@gmail.com"
] |
brianw.stearns@gmail.com
|
9c1157855677e70e2dbb83ed160ef13773bf4c30
|
f917c2a60653e01e330dcad6d98d23eb38fc79d2
|
/Lab6/Q1&2/Lab06_Q1functions.py
|
593c8645f8f061a57b99cd6819e074796061870e
|
[] |
no_license
|
rundong-zhou/PHY407-Projects
|
652f60907c631935775b0708bfa44a852d63f91d
|
9509c74cd2cad732888fb4424765766ab60aeb5f
|
refs/heads/master
| 2023-02-16T15:14:44.578399
| 2021-01-13T23:27:16
| 2021-01-13T23:27:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 05:51:45 2020
@author: Zirui Wan
"""
import numpy as np
# define constants
G = 1
M = 10
L = 2
def f(r, t):
""" Function calculate first-order derivatives for the RK4 algorithm.
INPUT:
r[floats]: array of the state of the system: 4 variables (x, y, vx, vy)
t[float]: time
OUTPUT:
fr[floats]: array of first-order derivatives for the 4 variables
"""
# extract x, y, vx, vy information from the state array
x = r[0]
y = r[1]
vx = r[2]
vy = r[3]
# calculate radius of orbit squared
rsq = x**2 + y**2
# calculate first-order derivatives
fx = vx
fy = vy
fvx = -G*M*x/rsq/np.sqrt(rsq + L**2/4)
fvy = -G*M*y/rsq/np.sqrt(rsq + L**2/4)
fr = np.array([fx, fy, fvx, fvy])
return fr
|
[
"71141331+nazeda-cc@users.noreply.github.com"
] |
71141331+nazeda-cc@users.noreply.github.com
|
40c34bd9c99fb2039166995b23dec6a86c82f436
|
b02c88bcad352811d22cadacd2aa573c426a2ca3
|
/scrapers/settings.py
|
06a6ae3c2620e357e3388b52d97f309471c0bf4b
|
[
"Apache-2.0"
] |
permissive
|
frankier/ties445
|
9e24c3e415b4a07584c41e0e4a3f241b133463b8
|
4ceee5390d81aedc0fb3904803797584dd5084b8
|
refs/heads/master
| 2020-12-31T07:18:42.938988
| 2016-05-17T18:45:14
| 2016-05-17T18:45:14
| 56,767,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,085
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scrapers project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapers'
SPIDER_MODULES = ['scrapers.spiders']
NEWSPIDER_MODULE = 'scrapers.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapers (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapers.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.decompression.DecompressionMiddleware': 1,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 1,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrapers.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"frankie@robertson.name"
] |
frankie@robertson.name
|
d42c2fb57540b52754abd4678d2b2e19c8cd9368
|
4e5c57cd961ccfd0e5ea4a488fe5ba21525f34c1
|
/manage.py
|
3ece816a1d26f68f9c14f1a4ed52da8fc25b2158
|
[] |
no_license
|
kk5678/NewsInterface
|
56cb2c91d898725841ad226f71d27cebcead91c6
|
d3abce01c148b98a5ecd00e3ba958253fe5be7eb
|
refs/heads/master
| 2023-07-06T20:57:06.674636
| 2021-08-18T15:40:02
| 2021-08-18T15:40:02
| 397,651,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NewsInterface.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"827799307@qq.com"
] |
827799307@qq.com
|
5e6bc2d33224e5db9d7abdcf3df63cae6323d06d
|
71d3f67eabe240d9f1f5010e8cd8cd0e693eef76
|
/cfg/pose_prediction.cfg
|
0ef450999df50fab4254c5d434c9234d15e81cc7
|
[
"BSD-3-Clause"
] |
permissive
|
asr-ros/asr_recognizer_prediction_ism
|
0eb656969519b6edd626c31c95b1c5b643d02e4b
|
bebed25c3b28d52b8360137cab2b52905018cda2
|
refs/heads/master
| 2021-03-16T10:10:26.277799
| 2020-01-06T11:18:30
| 2020-01-06T11:18:30
| 73,502,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
cfg
|
#!/usr/bin/env python2
from dynamic_reconfigure.parameter_generator_catkin import *
PACKAGE = "asr_recognizer_prediction_ism"
gen = ParameterGenerator()
size_enum = gen.enum([ gen.const("best_path", int_t, 0, "Best Path"),
gen.const("old_prediction_non_normalized", int_t, 1, "old_prediction_non_normalized"),
gen.const("old_prediction_normalized", int_t, 2, "old_prediction_normalized"),
gen.const("random_path", int_t, 3, "random_path"),
gen.const("shortest_path", int_t, 4, "shortest_path")],
"An enum to set size")
gen.add("posePredictor", int_t, 0, "Choose Posepredictor.", 4, 0, 4, edit_method=size_enum)
gen.add("enableVisualization", bool_t, 0, "toggle Visualization.", True)
exit(gen.generate(PACKAGE, "asr_recognizer_prediction_ism", "pose_prediction"))
|
[
"ujdhi@student.kit.edu"
] |
ujdhi@student.kit.edu
|
f3c255111173fd934609d2f484fe11d84a6389c5
|
9ea9eafd07b918e893de6362517b8043806b1e18
|
/arktours/wsgi.py
|
8b514d7f20025d309a523537a0ae390a8c2fbb33
|
[] |
no_license
|
Muhanguzi/ArkTours
|
b508a19fa2148695d17f9b1728356a3e0d642b9b
|
f8ced1e89d757c26ab09cda2d1346d703ed77984
|
refs/heads/master
| 2020-12-24T15:22:56.563425
| 2014-08-10T05:22:14
| 2014-08-10T05:22:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
"""
WSGI config for arktours project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arktours.settings")
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(get_wsgi_application())
|
[
"daniel@daniel-M.(none)"
] |
daniel@daniel-M.(none)
|
592216a6120e78bdd900efa5d1643a5167d856e3
|
f87ba5b342e3ec212a9dd5d661acefb3aa7cff6c
|
/ComPDF.py
|
ea7038fcf6c8b9c2bd13287a68098c71defe884d
|
[] |
no_license
|
JackLuguibin/CompoundPdf
|
e7d496abf234709b0ed6b4398e9ceeb44051c2a0
|
f1e57313ec0a5efaf5572826ff920b4254e1e86d
|
refs/heads/master
| 2020-11-28T14:49:09.943578
| 2019-12-24T03:03:04
| 2019-12-24T03:03:04
| 229,850,240
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 17:20:44 2019
@author: Administrator
"""
import os, sys, codecs
from argparse import ArgumentParser, RawTextHelpFormatter
from PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger
def getfilenames(filepath='',filelist_out=[],file_ext='all'):
# 遍历filepath下的所有文件,包括子目录下的文件
for fpath, dirs, fs in os.walk(filepath):
for f in fs:
fi_d = os.path.join(fpath, f)
if file_ext == 'all':
filelist_out.append(fi_d)
elif os.path.splitext(fi_d)[1] == file_ext:
filelist_out.append(fi_d)
else:
pass
return filelist_out
def mergefiles(path, output_filename, import_bookmarks=False):
# 遍历目录下的所有pdf将其合并输出到一个pdf文件中,输出的pdf文件默认带书签,书签名为之前的文件名
# 默认情况下原始文件的书签不会导入,使用import_bookmarks=True可以将原文件所带的书签也导入到输出的pdf文件中
merger = PdfFileMerger()
filelist = getfilenames(filepath=path, file_ext='.pdf')
if len(filelist) == 0:
print("当前目录及子目录下不存在pdf文件")
sys.exit()
for filename in filelist:
f = codecs.open(filename, 'rb')
file_rd = PdfFileReader(f)
short_filename = os.path.basename(os.path.splitext(filename)[0])
if file_rd.isEncrypted == True:
print('不支持的加密文件:%s'%(filename))
continue
merger.append(file_rd, bookmark=short_filename, import_bookmarks=import_bookmarks)
print('合并文件:%s'%(filename))
f.close()
out_filename=os.path.join(os.path.abspath(path), output_filename)
merger.write(out_filename)
print('合并后的输出文件:%s'%(out_filename))
merger.close()
path = "在这里填写pdf所在的地址"
output_filename = "填写输出文件名.pdf"
mergefiles(path, output_filename, import_bookmarks=False)
|
[
"782056183@qq.com"
] |
782056183@qq.com
|
7d460e74d46fd859249367c10120406c2cf0c1df
|
de33f9dfbfd365b821a4e9a2abc4f74360ec9458
|
/pythonModuleException/exception_divide2.py
|
c5d5e6e65e5f5a68e070a8de4d0ce92ab1f92a3d
|
[] |
no_license
|
youjin9209/2016_embeded_raspberry-pi
|
8dea655915ed2bf2fcbe570b9c23f2c18803c10f
|
20de2d6e88a9cd8e2eece1c092cb6c633b0334d5
|
refs/heads/master
| 2021-01-12T12:09:49.963918
| 2016-12-06T05:14:01
| 2016-12-06T05:14:01
| 72,323,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
def devide(m, n):
try:
result = m/n
except ZeroDivisionError:
print("not devided by zero")
except:
print("raise error not by zerodivision")
else:
return result
finally:
print("division")
if __name__ == "__main__":
res = devide(3,2)
print(res)
print ()
res = devide(3,0)
print(res)
print()
res = devide(None,2)
print(res)
|
[
"youjin9200@naver.com"
] |
youjin9200@naver.com
|
8502202133de46f0b14432650b3f149cdc966efe
|
687efc817c868a20109c634a9556640fca4deda6
|
/challenges/strings/string_formatting.py
|
729a62ca3311baad5db74f534b123a76de5fc0e1
|
[] |
no_license
|
Sai-Ram-Adidela/hackerrank
|
56625bf813e6d7b4b863b76979ac5c2048360c80
|
2dff2dbb1e02b0c3e182556e79d2140ff960b232
|
refs/heads/master
| 2020-03-11T12:56:24.961576
| 2019-09-30T07:19:07
| 2019-09-30T07:19:07
| 130,011,087
| 1
| 0
| null | 2018-05-01T17:06:40
| 2018-04-18T05:56:37
|
Python
|
UTF-8
|
Python
| false
| false
| 127
|
py
|
def dec_to_oct(n):
n = int(input())
for i in range(1, n):
print(i+' '+dec_to_oct(i)+' '+dec_to_hex(i)+' '+dec_to_bin(i))
|
[
"noreply@github.com"
] |
Sai-Ram-Adidela.noreply@github.com
|
b91e68161a2024087f0f3b6f4fad6853f99e03bf
|
ad1bf558a6337fa51a745c15e9810bc1c2aa5aa2
|
/Source/manage.py
|
993e54bd76ae2f07d265dae8f01060e8ae31f126
|
[] |
no_license
|
Slemaire-PNI/taketwo_logparser
|
1c8f9e343b5c91f6487b298d644401b90b554217
|
b4e78d021a78458bfaf12a1c532b3aba48d7420b
|
refs/heads/main
| 2023-06-17T17:14:09.796794
| 2021-07-09T06:45:33
| 2021-07-09T06:45:33
| 384,341,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logparser.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"simon.lemaire@visiercorp.com"
] |
simon.lemaire@visiercorp.com
|
747f6e3bbe97d7d158aac09fcaafe66708f7f141
|
ba1df9c61dc827dc865f8a8bb06653e85d80d1e7
|
/venv/Scripts/futurize-script.py
|
124e769fbb87ef3f752618b93d9da78a6b79f3d5
|
[] |
no_license
|
Netdea/Tool-D
|
fb55a952d88c75e0e8ea8cf3a1eb2dd781b9119f
|
8af0b8af0e864df907901ef345a52fd2ec9aa556
|
refs/heads/master
| 2023-06-11T12:03:55.881628
| 2021-07-04T11:42:07
| 2021-07-04T11:42:07
| 382,797,508
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
#!"c:\users\khant thu\pycharmprojects\pythonproject5\venv\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
|
[
"deadnet8@gmail.com"
] |
deadnet8@gmail.com
|
38782df3260494c0dda9dbe844b891579898536b
|
3a63f1dc94044df0d5e3f8f2e89445e14af34691
|
/lapidary/checkpoint/CheckpointConvert.py
|
794f1be2463ac77c0098026c549ef75e125925ea
|
[
"MIT"
] |
permissive
|
eltsai/lapidary
|
aeb191612dbce087a0adadad52e80dfe2048a58a
|
05c8c1aaa0e865bb6406cc44e0b72c59444fafba
|
refs/heads/master
| 2022-02-16T18:36:46.428166
| 2019-08-28T15:53:56
| 2019-08-28T15:53:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,159
|
py
|
#! /usr/bin/env python3
import gzip, json, mimetypes, os, progressbar, resource, shutil
import subprocess
from argparse import ArgumentParser
from elftools.elf.elffile import ELFFile
from multiprocessing import cpu_count, Pool, Lock, Process
from pathlib import Path
from pprint import pprint
from progressbar import ProgressBar
from time import sleep
from lapidary.utils import *
from lapidary.checkpoint.Checkpoints import GDBCheckpoint
class GDBCheckpointConverter:
def __init__(self, gdb_checkpoint):
assert isinstance(gdb_checkpoint, GDBCheckpoint)
assert gdb_checkpoint.is_valid_checkpoint()
self.gdb_checkpoint = gdb_checkpoint
self.mappings = self.gdb_checkpoint.get_mappings()
@staticmethod
def compress_memory_image(file_path):
subprocess.call(['gzip', '-f', str(file_path)])
gzip_path = Path(str(file_path) + '.gz')
gzip_path.rename(file_path)
def create_pmem_file(self):
with self.gdb_checkpoint.get_pmem_file_handle() as pmem_raw,\
self.gdb_checkpoint.get_core_file_handle() as core:
core_elf = ELFFile(core)
pgsize = resource.getpagesize()
idx = 0
# Write out whole file as zeros first
pmem_raw.truncate(self.mappings['mem_size'])
# Check for shared object files
for vaddr, mapping_dict in self.mappings.items():
if vaddr == 0 or vaddr == 'mem_size':
continue
maybe_file = Path(mapping_dict['name'])
if maybe_file.exists() and maybe_file.is_file():
for s in core_elf.iter_segments():
if s['p_type'] != 'PT_LOAD':
continue
elf_start_vaddr = int(s['p_vaddr'])
elf_max_vaddr = elf_start_vaddr + int(s['p_memsz'])
if elf_start_vaddr <= vaddr and vaddr < elf_max_vaddr:
continue
else:
with maybe_file.open('rb') as shared_object:
offset = int(mapping_dict['offset'])
size = int(mapping_dict['size'])
paddr = int(mapping_dict['paddr'])
shared_object.seek(offset, 0)
pmem_raw.seek(paddr, 0)
buf = shared_object.read(size)
pmem_raw.write(buf)
# Load everything else
for s in core_elf.iter_segments():
if s['p_type'] != 'PT_LOAD':
continue
assert s['p_filesz'] == s['p_memsz']
assert s['p_memsz'] % pgsize == 0
if s['p_vaddr'] in self.mappings:
mapping = self.mappings[s['p_vaddr']]
paddr = int(mapping['paddr'])
pmem_raw.seek(paddr, 0)
mem = s.data()
assert len(mem) == s['p_memsz']
#print('{}: {} -> {}, size {}'.format(os.getpid(), s['p_vaddr'], paddr, len(mem)))
pmem_raw.write(mem)
return self.gdb_checkpoint.pmem_file
################################################################################
def convert_checkpoint(gdb_checkpoint, force_recreate):
assert isinstance(gdb_checkpoint, GDBCheckpoint)
if gdb_checkpoint.pmem_file_exists() and not force_recreate:
return None
converter = GDBCheckpointConverter(gdb_checkpoint)
pmem_out_file = converter.create_pmem_file()
assert pmem_out_file.exists()
return pmem_out_file
def add_arguments(parser):
parser.add_argument('--pool-size', '-p', default=cpu_count(),
help='Number of threads to run at a time.')
parser.add_argument('--checkpoint-dir', '-d',
help='Directory that contains all checkpoints.')
parser.add_argument('--num-checkpoints', '-n', default=None, type=int,
help='Number of checkpoints to simulate. If None, then all.')
parser.add_argument('--force', '-f', default=False, action='store_true',
help='Override existing checkpoints. Disabled by default')
parser.add_argument('--no-compression', '-x', default=False,
action='store_true', help='Do not compress pmem file. Faster, but space intensive')
def main():
parser = ArgumentParser(description='Convert gdb core dumps into gem5 pmem files.')
add_arguments(parser)
args = parser.parse_args()
checkpoint_dir = Path(args.checkpoint_dir)
assert checkpoint_dir.exists()
pool_args = []
for checkpoint_subdir in utils.get_directory_entries_by_time(checkpoint_dir):
if checkpoint_subdir.is_dir():
checkpoint = GDBCheckpoint(checkpoint_subdir)
if checkpoint.is_valid_checkpoint():
pool_args += [ (checkpoint, args.force) ]
else:
print('{} is not a valid checkpoint, skipping.'.format(checkpoint))
if args.num_checkpoints is not None:
pool_args = utils.select_evenly_spaced(pool_args, args.num_checkpoints)
with Pool(int(args.pool_size)) as pool:
bar = ProgressBar(max_value=len(pool_args))
lock = Lock()
def update_bar(pmem_file_dest):
try:
lock.acquire()
bar.update(update_bar.num_complete)
update_bar.num_complete += 1
if pmem_file_dest is not None:
update_bar.newly_created += 1
if update_bar.compress:
gzip_proc = Process(target=GDBCheckpointConverter.compress_memory_image,
args=(pmem_file_dest,))
update_bar.gzip_procs += [gzip_proc]
gzip_proc.start()
finally:
lock.release()
update_bar.num_complete = 0
update_bar.newly_created = 0
update_bar.gzip_procs = []
update_bar.compress = not args.no_compression
bar.start()
def fail(e):
raise e
results = []
for args in pool_args:
result = pool.apply_async(convert_checkpoint, args, callback=update_bar,
error_callback=fail)
results += [result]
all_ready = False
while not all_ready:
all_ready = True
for result in [r for r in results if not r.ready()]:
result.wait(0.1)
if not result.ready():
all_ready = False
sleep(1)
bar.finish()
progressbar.streams.flush()
for gzip_proc in update_bar.gzip_procs:
if gzip_proc is not None:
gzip_proc.join()
print('\n{}/{} newly created, {}/{} already existed.'.format(
update_bar.newly_created, len(pool_args),
len(pool_args) - update_bar.newly_created, len(pool_args)))
return 0
if __name__ == '__main__':
exit(main())
|
[
"ian.gl.neal@gmail.com"
] |
ian.gl.neal@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.