hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4b256384aba49c788282f89f33675aa04a6159a | 460 | py | Python | setup.py | ovod88/studentsdb | ade2cff0eea7a13644a0f708133901457352ff5c | [
"MIT"
] | null | null | null | setup.py | ovod88/studentsdb | ade2cff0eea7a13644a0f708133901457352ff5c | [
"MIT"
] | null | null | null | setup.py | ovod88/studentsdb | ade2cff0eea7a13644a0f708133901457352ff5c | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='django-studentsdb-app',
version='1.0',
author=u'Vova Khrystenko',
author_email='ovod88@bigmir.net',
packages=find_packages(),
license='BSD licence, see LICENCE.txt',
description='Students DB application',
long_description=open('README.txt').read(),
zip_safe=False,
include_package_data=True,
package_data = {
'students': ['requirements.txt']
},
) | 27.058824 | 47 | 0.676087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.347826 |
f4b2b65f20de9c3b980e6d6e57bd05701477f757 | 1,142 | py | Python | nlpproject/main/Node.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
] | null | null | null | nlpproject/main/Node.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
] | null | null | null | nlpproject/main/Node.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
] | null | null | null | from .words import *
class Node:
def __init__(self ,docId, freq = None):
self.freq = freq
self.doc = docId
self.nextval = None
class SlinkedList:
def __init__(self ,head = None):
self.head = head
linked_list_data = {}
for word in unique_words_all:
linked_list_data[word] = SlinkedList()
linked_list_data[word].head = Node(1,Node)
word_freq_in_doc = {}
idx = 1
for file in glob.glob(file_folder):
file = open(file, "r")
text = file.read()
text = remove_special_characters(text)
text = re.sub(re.compile('\d'),'',text)
sentences = sent_tokenize(text)
words = word_tokenize(text)
words = [word for word in words if len(words)>1]
words = [word.lower() for word in words]
words = [word for word in words if word not in Stopwords]
word_freq_in_doc = finding_all_unique_words_and_freq(words)
for word in word_freq_in_doc.keys():
linked_list = linked_list_data[word].head
while linked_list.nextval is not None:
linked_list = linked_list.nextval
linked_list.nextval = Node(idx ,word_freq_in_doc[word])
idx = idx + 1
| 31.722222 | 63 | 0.665499 | 213 | 0.186515 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.007881 |
f4b2fdd280ccde354efc42b0cbc21ae404482ade | 2,201 | py | Python | _open_source/examples/state_space_explain.py | daviddewhurst/daviddewhurst.github.io | d782bc7e8f9c821705fc83c7705006f40e29f360 | [
"CC0-1.0"
] | null | null | null | _open_source/examples/state_space_explain.py | daviddewhurst/daviddewhurst.github.io | d782bc7e8f9c821705fc83c7705006f40e29f360 | [
"CC0-1.0"
] | null | null | null | _open_source/examples/state_space_explain.py | daviddewhurst/daviddewhurst.github.io | d782bc7e8f9c821705fc83c7705006f40e29f360 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
import pathlib
import matplotlib.pyplot as plt
import torch
import pyro
from state_space import state_space_model
SEED = 123
torch.manual_seed(SEED)
pyro.set_rng_seed(SEED)
def main():
figdir = pathlib.Path('./figures')
figdir.mkdir(exist_ok=True)
# demo predictive capacity
N = 3
T = 101
# draws from the prior predictive are shape (T, N)
# each draw uses different draws from global drift and vol params
n_prior_draws = 5
prior_predictive = torch.stack(
[state_space_model(None, N=N, T=T) for _ in range(n_prior_draws)]
)
colors = plt.get_cmap('cividis', n_prior_draws)
fig, ax = plt.subplots()
list(map(
lambda i: ax.plot(prior_predictive[i], color=colors(i)),
range(prior_predictive.shape[0])
))
plt.savefig(figdir / 'state_space_prior_predictive.png', bbox_inches='tight')
#######
# as far as inference goes, actually just a diagonal normal should be ok..
data_N = 100
data_T = 50
data = state_space_model(None, N=data_N, T=data_T, verbose=True)
guide = pyro.infer.autoguide.AutoDiagonalNormal(state_space_model)
optim = pyro.optim.Adam({'lr': 0.01})
svi = pyro.infer.SVI(state_space_model, guide, optim, loss=pyro.infer.Trace_ELBO())
niter = 2500 # or whatever, you'll have to play with this and other optim params
pyro.clear_param_store()
losses = torch.empty((niter,))
for n in range(niter):
loss = svi.step(data, N=data_N, T=data_T)
losses[n] = loss
if n % 50 == 0:
print(f"On iteration {n}, loss = {loss}")
# you can extract the latent time series in a variety of ways
# one of these is the pyro.infer.Predictive class
num_samples = 100
posterior_predictive = pyro.infer.Predictive(
state_space_model,
guide=guide,
num_samples=num_samples
)
posterior_draws = posterior_predictive(None, N=data_N, T=data_T)
# since our model returns the latent, we should have this in the `latent` value
print(
posterior_draws['latent'].squeeze().shape == (num_samples, data_T, data_N)
)
if __name__ == "__main__":
main()
| 27.5125 | 87 | 0.663789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.279873 |
f4b40d349ff11c549cfc166ba441afac71e1529b | 11,554 | py | Python | modred/tests/testera.py | shubhamKGIT/modred | 6c585084a76a975c47b0eaa3e408ccdb8abd76e3 | [
"BSD-2-Clause"
] | null | null | null | modred/tests/testera.py | shubhamKGIT/modred | 6c585084a76a975c47b0eaa3e408ccdb8abd76e3 | [
"BSD-2-Clause"
] | null | null | null | modred/tests/testera.py | shubhamKGIT/modred | 6c585084a76a975c47b0eaa3e408ccdb8abd76e3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""Test era module"""
import unittest
import os
from os.path import join
from shutil import rmtree
import numpy as np
from modred import era, parallel, util
from modred.py2to3 import range
def make_time_steps(num_steps, interval):
"""Helper function to find array of integer time steps.
Args:
num_steps: integer number of time steps to create.
interval: interval between pairs of time steps, as shown above.
Returns:
time_steps: array of integers, time steps [0 1 interval interval+1 ...]
"""
if num_steps % 2 != 0:
raise ValueError('num_steps, %d, must be even'%num_steps)
interval = int(interval)
time_steps = np.zeros(num_steps, dtype=int)
time_steps[::2] = interval*np.arange(num_steps/2)
time_steps[1::2] = 1 + interval*np.arange(num_steps/2)
return time_steps
@unittest.skipIf(parallel.is_distributed(), 'Only test ERA in serial')
class testERA(unittest.TestCase):
def setUp(self):
if not os.access('.', os.W_OK):
raise RuntimeError('Cannot write to current directory')
self.test_dir = 'files_ERA_DELETE_ME'
if not os.path.exists(self.test_dir):
os.mkdir(self.test_dir)
self.impulse_file_path = join(self.test_dir, 'impulse_input%03d.txt')
def tearDown(self):
"""Deletes all of the arrays created by the tests"""
rmtree(self.test_dir, ignore_errors=True)
#@unittest.skip('Testing others')
def test_make_sampled_format(self):
"""
Test that can give time_values and outputs in either format.
First tests format [0, 1, P, P+1, ...] and if there is a wrong time
value.
Then tests [0, 1, 2, 3, ...] format.
"""
for num_inputs in [1, 3]:
for num_outputs in [1, 2, 4]:
for num_time_steps in [4, 10, 12]:
# Generate data
# P=2 format [0, 1, 2, 3, ...]
sample_interval = 2
dt_system = np.random.random()
dt_sample = sample_interval * dt_system
outputs = np.random.random(
(num_time_steps, num_outputs, num_inputs))
time_steps = make_time_steps(
num_time_steps, sample_interval)
time_values = time_steps * dt_system
# Compute using modred
my_ERA = era.ERA()
time_steps_computed, outputs_computed =\
era.make_sampled_format(time_values, outputs)
#self.assertEqual(dt_system_computed, dt_system)
# Reference values
num_time_steps_true = (num_time_steps - 1) * 2
time_steps_true = make_time_steps(num_time_steps_true, 1)
outputs_true = np.zeros(
(num_time_steps_true, num_outputs, num_inputs))
outputs_true[::2] = outputs[:-1]
outputs_true[1::2] = outputs[1:]
# Compare values
np.testing.assert_equal(
time_steps_computed, time_steps_true)
np.testing.assert_equal(outputs_computed, outputs_true)
# Test that if there is a wrong time value, get an error
time_values[num_time_steps // 2] = -1
self.assertRaises(
ValueError, era.make_sampled_format, time_values,
outputs)
#@unittest.skip("testing others")
def test_assemble_Hankel(self):
""" Tests Hankel arrays are symmetric given
``[CB CAB CA**P CA**(P+1)B ...]``."""
rtol = 1e-10
atol = 1e-12
for num_inputs in [1,3]:
for num_outputs in [1, 2, 4]:
for sample_interval in [1]:
num_time_steps = 50
num_states = 5
A, B, C = util.drss(num_states, num_inputs, num_outputs)
time_steps = make_time_steps(
num_time_steps, sample_interval)
Markovs = util.impulse(A, B, C, time_steps[-1] + 1)
Markovs = Markovs[time_steps]
if sample_interval == 2:
time_steps, Markovs = era.make_sampled_format(
time_steps, Markovs)
my_ERA = era.ERA(verbosity=0)
my_ERA._set_Markovs(Markovs)
my_ERA._assemble_Hankel()
H = my_ERA.Hankel_array
Hp = my_ERA.Hankel_array2
for row in range(my_ERA.mc):
for col in range(my_ERA.mo):
np.testing.assert_equal(
H[row * num_outputs:(row + 1) * num_outputs,
col * num_inputs:(col + 1) * num_inputs],
H[col * num_outputs:(col + 1) * num_outputs,
row * num_inputs:(row + 1) * num_inputs])
np.testing.assert_equal(
Hp[row * num_outputs:(row + 1) * num_outputs,
col * num_inputs:(col + 1) * num_inputs],
Hp[col * num_outputs:(col + 1) * num_outputs,
row * num_inputs:(row + 1) * num_inputs])
np.testing.assert_allclose(
H[row * num_outputs:(row + 1) * num_outputs,
col * num_inputs:(col + 1) * num_inputs],
C.dot(
np.linalg.matrix_power(
A, time_steps[(row + col) * 2]).dot(
B)),
rtol=rtol, atol=atol)
np.testing.assert_allclose(
Hp[row * num_outputs:(row + 1) * num_outputs,
col * num_inputs:(col + 1) * num_inputs],
C.dot(
np.linalg.matrix_power(
A, time_steps[(row + col) * 2 + 1]).dot(
B)),
rtol=rtol, atol=atol)
#@unittest.skip('testing others')
def test_compute_model(self):
"""
Test ROM Markov params similar to those given
- generates data
- assembles Hankel array
- computes SVD
- forms the ROM discrete arrays A, B, and C (D = 0)
- Tests Markov parameters from ROM are approx. equal to full plant's
"""
num_time_steps = 40
num_states_plant = 12
num_states_model = num_states_plant // 3
for num_inputs in [1, 3]:
for num_outputs in [1, 2]:
for sample_interval in [1, 2, 4]:
time_steps = make_time_steps(
num_time_steps, sample_interval)
A, B, C = util.drss(
num_states_plant, num_inputs, num_outputs)
my_ERA = era.ERA(verbosity=0)
Markovs = util.impulse(A, B, C, time_steps[-1] + 1)
Markovs = Markovs[time_steps]
if sample_interval == 2:
time_steps, Markovs =\
era.make_sampled_format(time_steps, Markovs)
num_time_steps = time_steps.shape[0]
A_path_computed = join(self.test_dir, 'A_computed.txt')
B_path_computed = join(self.test_dir, 'B_computed.txt')
C_path_computed = join(self.test_dir, 'C_computed.txt')
A, B, C = my_ERA.compute_model(Markovs, num_states_model)
my_ERA.put_model(
A_path_computed, B_path_computed, C_path_computed)
#sing_vals = my_ERA.sing_vals[:num_states_model]
# Flatten vecs into 2D X and Y arrays:
# [B AB A**PB A**(P+1)B ...]
#direct_vecs_flat = direct_vecs.swapaxes(0,1).reshape(
# (num_states_model,-1)))
# Exact grammians from Lyapunov eqn solve
#gram_cont = util.solve_Lyapunov(A, B*B.H)
#gram_obs = util.solve_Lyapunov(A.H, C.H*C)
#print(np.sort(np.linalg.eig(gram_cont)[0])[::-1])
#print(sing_vals)
#np.testing.assert_allclose(gram_cont.diagonal(),
# sing_vals, atol=.1, rtol=.1)
#np.testing.assert_allclose(gram_obs.diagonal(),
# sing_vals, atol=.1, rtol=.1)
#np.testing.assert_allclose(np.sort(np.linalg.eig(
# gram_cont)[0])[::-1], sing_vals,
# atol=.1, rtol=.1)
#np.testing.assert_allclose(np.sort(np.linalg.eig(
# gram_obs)[0])[::-1], sing_vals,
# atol=.1, rtol=.1)
# Check that the diagonals are largest entry on each row
#self.assertTrue((np.max(np.abs(gram_cont),axis=1) ==
# np.abs(gram_cont.diagonal())).all())
#self.assertTrue((np.max(np.abs(gram_obs),axis=1) ==
# np.abs(gram_obs.diagonal())).all())
# Check the ROM Markov params match the full plant's
Markovs_model = np.zeros(Markovs.shape)
for ti, tv in enumerate(time_steps):
Markovs_model[ti] = C.dot(
np.linalg.matrix_power(A, tv).dot(
B))
#print(
# 'Computing ROM Markov param at time step %d' % tv)
"""
import matplotlib.pyplot as PLT
for input_num in range(num_inputs):
PLT.figure()
PLT.hold(True)
for output_num in range(num_outputs):
PLT.plot(time_steps[:50],
# Markovs_model[:50, output_num,input_num], 'ko')
PLT.plot(time_steps[:50],Markovs[:50,
# output_num, input_num],'rx')
PLT.plot(time_steps_dense[:50],
# Markovs_dense[:50, output_num, input_num],'b--')
PLT.title('input %d to outputs'%input_num)
PLT.legend(['ROM','Plant','Dense plant'])
PLT.show()
"""
np.testing.assert_allclose(
Markovs_model.squeeze(), Markovs.squeeze(),
rtol=0.5, atol=0.5)
np.testing.assert_equal(
util.load_array_text(A_path_computed), A)
np.testing.assert_equal(
util.load_array_text(B_path_computed), B)
np.testing.assert_equal(
util.load_array_text(C_path_computed), C)
if __name__ == '__main__':
unittest.main()
| 44.610039 | 80 | 0.481305 | 10,564 | 0.914315 | 0 | 0 | 10,635 | 0.92046 | 0 | 0 | 3,463 | 0.299723 |
f4b44b36e0c483c1cc19960204e9e48a4d8c6436 | 49 | py | Python | tests/resources/ok/ok.py | lleites/topyn | 69e2bd100e71bb0323adadb857aea724647f456e | [
"MIT"
] | 10 | 2019-11-21T22:25:34.000Z | 2022-01-13T13:44:54.000Z | tests/resources/ok/ok.py | lleites/topyn | 69e2bd100e71bb0323adadb857aea724647f456e | [
"MIT"
] | null | null | null | tests/resources/ok/ok.py | lleites/topyn | 69e2bd100e71bb0323adadb857aea724647f456e | [
"MIT"
] | null | null | null | def my_function() -> str:
return "todo bien"
| 16.333333 | 25 | 0.632653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.22449 |
f4b531bd4f8e27016a07006efb457088789ed103 | 368 | py | Python | copycat01.py | OneOfaKindGeek/mycode | bbb4391b333aaa1667314b76393f2102c05a2571 | [
"Apache-2.0"
] | null | null | null | copycat01.py | OneOfaKindGeek/mycode | bbb4391b333aaa1667314b76393f2102c05a2571 | [
"Apache-2.0"
] | null | null | null | copycat01.py | OneOfaKindGeek/mycode | bbb4391b333aaa1667314b76393f2102c05a2571 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# import additional code to complete our task
import shutil
import os
# move into the working directory
os.chdir("/home/student/mycode/")
# copy the fileA to fileB
shutil.copy("5g_research/sdn_network.txt", "5g_research/sdn_network.txt.copy")
# copy the entire directoryA to directoryB
shutil.copytree("5g_research/", "5g_research_backup/")
| 24.533333 | 78 | 0.774457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.782609 |
f4b74cdb7ebe0043071f600b703680593e743888 | 3,020 | py | Python | Scripts-python/addRulesIptables.py | Brotic66/Script-Python | fd9094efabacc38d4aec46d43cae20f83791d83b | [
"CNRI-Python"
] | null | null | null | Scripts-python/addRulesIptables.py | Brotic66/Script-Python | fd9094efabacc38d4aec46d43cae20f83791d83b | [
"CNRI-Python"
] | null | null | null | Scripts-python/addRulesIptables.py | Brotic66/Script-Python | fd9094efabacc38d4aec46d43cae20f83791d83b | [
"CNRI-Python"
] | null | null | null | # coding=utf-8
'''
Ce fichier contient un script permettant de lier une application web en, PHP 5.6 avec Symfony et doctrine, avec l'administration d'un serveur et nottament de son pare-feu (iptables)
Permet d'ouvrir des ports pour des adresses IPs récupérer en base de données et ajouté via l'application web.
'''
__author__ = 'brice VICO'
import os
import MySQLdb
import re
PORTNUMBER = {"Silvaco Enseignement": 1000, "Cadence Enseignement": 1001, "Synopsys Enseignement": 1002,
"Memscap Enseignement": 1003, "Coventor Enseignement": 1004}
'''
Cette fonction extrait une liste d'adresse IPs dans un tableau depuis un élément Doctrine ArrayCollection d'une BDD (array SQL étrange...)
'''
def getIps(data):
listeIp = []
match = re.search(r'.*a:\d+:{(.*)}}', data)
listeNonParse = match.group(1)
result = re.sub(r'i:\d+;s:\d+:', '', listeNonParse)
result2 = re.search(r'"(.*?)";', result)
while result != '':
result2 = re.search(r'"(.*?)";', result)
result = re.sub(r'("(.*?)";)?', '', result, 1)
listeIp.append(result2.group(1))
return listeIp
'''
Cette fonction récupère la liste des installations ayant été validée par un responsable via l'application web.
'''
def recupererInstallationsValide():
db = MySQLdb.connect("localhost", "xxxxxx", "xxxxxx", "Crcc")
cursor = db.cursor()
query = "SELECT i.ips, l.nom FROM Installation i, Logiciel l WHERE valide = 1 AND l.id = i.logiciel_id"
lines = cursor.execute(query)
data = cursor.fetchall()
db.close()
return data
'''
Cette fonction classe par nom de logiciel dans un Dictionnary, les liste d'adresse IPs
'''
def classerParLogiciel(data):
listeFinale = {}
for installation in data:
if installation[1] in listeFinale:
listeFinale[installation[1]].append(installation[0])
else:
listeFinale[installation[1]] = [installation[0]]
return listeFinale
'''
########## Main ##########
'''
data = recupererInstallationsValide()
listeFinale = {}
if data:
listeFinale = classerParLogiciel(data)
else:
print "Pas de données à traiter"
print os.popen("iptables -F").read()
for logiciel in listeFinale:
print logiciel + ' : '
for listeIp in listeFinale[logiciel]:
ips = getIps(listeIp)
for ip in ips:
print os.popen("iptables -A INPUT -s " + ip + " -p tcp --dport " + str(
PORTNUMBER[logiciel]) + " -j ACCEPT").read()
print os.popen("iptables -A INPUT -s " + ip + " -p udp --dport " + str(
PORTNUMBER[logiciel]) + " -j ACCEPT").read()
for logiciel in PORTNUMBER:
print os.popen('iptables -A INPUT -p tcp --dport ' + str(PORTNUMBER[logiciel]) + " -j DROP").read()
print os.popen('iptables -A INPUT -p udp --dport ' + str(PORTNUMBER[logiciel]) + " -j DROP").read()
# Permet de conserver la mise à jour de iptables aprés un redémarrage (coupure de courant ou autre...)
print os.popen('service iptables-persistent save').read()
| 32.826087 | 181 | 0.648344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,425 | 0.469213 |
f4b8a8b015a6aaabe3f827c4ba3d679bcb91d751 | 2,277 | py | Python | src/vardb/deposit/annotation_config.py | Dabble-of-DevOps-Bio/ella | e38631d302611a143c9baaa684bcbd014d9734e4 | [
"MIT"
] | null | null | null | src/vardb/deposit/annotation_config.py | Dabble-of-DevOps-Bio/ella | e38631d302611a143c9baaa684bcbd014d9734e4 | [
"MIT"
] | null | null | null | src/vardb/deposit/annotation_config.py | Dabble-of-DevOps-Bio/ella | e38631d302611a143c9baaa684bcbd014d9734e4 | [
"MIT"
] | null | null | null | from typing import Any, List, Mapping, Sequence
import jsonschema
from dataclasses import dataclass, field
from sqlalchemy.orm import scoped_session
from vardb.datamodel.jsonschemas.load_schema import load_schema
from vardb.datamodel import annotation
@dataclass
class ConverterConfig:
elements: Sequence[Mapping[str, Any]]
@dataclass(init=False)
class AnnotationImportConfig:
name: str
converter_config: ConverterConfig
def __init__(self, name: str, converter_config: Mapping[str, Any]) -> None:
self.name = name
self.converter_config = ConverterConfig(**converter_config)
@dataclass(init=False)
class AnnotationConfig:
deposit: Sequence[AnnotationImportConfig]
view: List = field(default_factory=list)
def __init__(self, deposit: Sequence[Mapping[str, Any]], view: List) -> None:
self.view = view
self.deposit = list()
for sub_conf in deposit:
self.deposit.append(AnnotationImportConfig(**sub_conf))
def deposit_annotationconfig(
session: scoped_session, annotationconfig: Mapping[str, Any]
) -> annotation.AnnotationConfig:
schema = load_schema("annotationconfig.json")
jsonschema.validate(annotationconfig, schema)
active_annotationconfig = (
session.query(annotation.AnnotationConfig)
.order_by(annotation.AnnotationConfig.id.desc())
.limit(1)
.one_or_none()
)
# Check if annotation config is equal. Note that for deposit, we do not care about order or duplicity
# Since the deposit is a list of dicts, we can not check set-equality (dicts are not hashable),
# so we check that all items in incoming are in active, and vice versa.
if (
active_annotationconfig
and all(x in active_annotationconfig.deposit for x in annotationconfig["deposit"])
and all(x in annotationconfig["deposit"] for x in active_annotationconfig.deposit)
and active_annotationconfig.view == annotationconfig["view"]
):
raise RuntimeError("The annotation config matches the current active annotation config.")
ac_obj = annotation.AnnotationConfig(
deposit=annotationconfig["deposit"], view=annotationconfig["view"]
)
session.add(ac_obj)
session.flush()
return ac_obj
| 33.485294 | 105 | 0.724199 | 672 | 0.295125 | 0 | 0 | 729 | 0.320158 | 0 | 0 | 398 | 0.174791 |
f4b8b68f4b6ed74ffd5cdd6c3f4a46584a38442e | 1,266 | py | Python | yolov5/temp.py | shuyansy/A-detection-and-recognition-pipeline-of-complex-meters-in-wild | 15bc2b97078d3216cfd075ccba1cf2d2e42af54f | [
"MIT"
] | 17 | 2022-03-20T05:41:51.000Z | 2022-03-25T04:53:17.000Z | yolov5/temp.py | shuyansy/A-detection-and-recognition-pipeline-of-complex-meters-in-wild | 15bc2b97078d3216cfd075ccba1cf2d2e42af54f | [
"MIT"
] | null | null | null | yolov5/temp.py | shuyansy/A-detection-and-recognition-pipeline-of-complex-meters-in-wild | 15bc2b97078d3216cfd075ccba1cf2d2e42af54f | [
"MIT"
] | 1 | 2022-03-23T03:06:51.000Z | 2022-03-23T03:06:51.000Z | import os
import cv2
import numpy as np
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox
from models.common import DetectMultiBackend
from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, time_sync
# Load model
device='0'
weights='runs/train/exp6/weights/best.pt'
data='data/mydata.yaml'
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=False, data=data)
stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
imgsz = check_img_size([640], s=stride) # check image size
path='/home/sy/ocr/datasets/all_meter_image/'
img_dir=os.listdir(path)
for i in img_dir:
img=cv2.imread(path+i)
# Padded resize
img = letterbox(img, [640], stride=stride, auto=pt)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
| 36.171429 | 117 | 0.751185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.135861 |
f4babdf08efb27d0c5a43cf914340fd4c9566f63 | 884 | py | Python | Devashish/PythonScriptExecution/PythonScriptExecution/PyScript.py | ishmeet1995/AutomationHour | a7aeb343974d080e98a7eca09d547cda59a4abd5 | [
"MIT"
] | null | null | null | Devashish/PythonScriptExecution/PythonScriptExecution/PyScript.py | ishmeet1995/AutomationHour | a7aeb343974d080e98a7eca09d547cda59a4abd5 | [
"MIT"
] | null | null | null | Devashish/PythonScriptExecution/PythonScriptExecution/PyScript.py | ishmeet1995/AutomationHour | a7aeb343974d080e98a7eca09d547cda59a4abd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
def get_age(name):
df = pd.read_excel("test.xlsx", sheet_name="Sheet1", headers=True)
print("*"*20)
print(df)
print("*"*20)
rows, cols = df[df['Name']==name].shape
print(rows, cols, "^^^")
if rows==1:
age = df[df['Name']==name]['Age'][0]
return age
else:
return -1
def get_all_above_age(age):
df = pd.read_excel("test.xlsx", sheet_name="Sheet1", headers=True)
df_filter = df[(df['Profession'].str.contains("Developer")) & (df['Age']>age)]
print("&&&&&&&&&&&&&&&&")
print(df_filter)
print("&&&&&&&&&&&&&&&&")
return df_filter.to_json(orient='records')
def get_2_values(name):
return (name,str(get_age(name)))
if __name__=='__main__':
print(get_age("Ram"))
print(get_all_above_age(25))
print(get_2_values("Ram"))
| 22.1 | 82 | 0.578054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.220588 |
f4bd8e380a3348632ea15635269d008f2995dbbf | 8,638 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | AaronLPS/CarND-Capstone | 5d5cc347b5d875c71b8f034eb5718ecdab7d46ee | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | AaronLPS/CarND-Capstone | 5d5cc347b5d875c71b8f034eb5718ecdab7d46ee | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | AaronLPS/CarND-Capstone | 5d5cc347b5d875c71b8f034eb5718ecdab7d46ee | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import cv2
import os
import rospy
from timeit import default_timer as timer
from styx_msgs.msg import TrafficLight
CLASS_TRAFFIC_LIGHT = 10
MODEL_DIR = 'light_classification/models/'
IMG_DIR = 'light_classification/img/'
DEBUG_DIR = 'light_classification/result/'
class TLClassifier(object):
def __init__(self):
#TODO load classifier
# object detection: faster_rcnn_inception_v2
# from Tensorflow detection model zoo:
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
self.detector = MODEL_DIR + 'faster_rcnn_inception_v2.pb'
self.sess= self.load_graph(self.detector)
detection_graph = self.sess.graph
if not os.path.exists(DEBUG_DIR): #check the result of light detection
os.makedirs(DEBUG_DIR)
# The input placeholder for the image.
# 'get_tensor_by_name' returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# the first decoding
test_image = cv2.imread(IMG_DIR + 'image_test.jpg')
image_np, box_coords, classes, scores = self.detect_tl(test_image)
# Traditional traffic light classifier
pred_image, is_red = self.classify_red_tl(image_np, box_coords, classes, scores)
# rospy.loginfo("DEBUG: stage 4")
if is_red:
rospy.loginfo("Classifier: RED")
else:
rospy.loginfo("Classifier: NOT RED")
cv2.imwrite(IMG_DIR + 'pred_image.png', pred_image)
rospy.loginfo("TensorFlow Initiation: Done")
self.num_image = 1
def load_graph(self, graph_file, use_xla=False):
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
# if use_xla:
# jit_level = tf.OptimizerOptions.ON_1
# config.graph_options.optimizer_options.global_jit_level = jit_level
with tf.Session(graph=tf.Graph(), config=config) as sess:
gd = tf.GraphDef()
with tf.gfile.Open(graph_file, 'rb') as f:
data = f.read()
gd.ParseFromString(data)
tf.import_graph_def(gd, name='')
ops = sess.graph.get_operations()
n_ops = len(ops)
print("number of operations = %d" % n_ops)
return sess
# return sess, ops
def detect_tl(self, image):
trt_image = np.copy(image)
image_np = np.expand_dims(np.asarray(trt_image, dtype=np.uint8), 0)
# Actual detection.
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.8
# Filter traffic light boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.filter_boxes(confidence_cutoff, boxes, scores, classes, keep_classes=[CLASS_TRAFFIC_LIGHT])
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
image_np = np.squeeze(image_np)
width = image_np.shape[1]
height = image_np.shape[0]
box_coords = self.to_image_coords(boxes, height, width)
return image_np, box_coords, classes, scores
# Filter the boxes which detection confidence lower than the threshold
def filter_boxes(self, min_score, boxes, scores, classes, keep_classes):
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
if ((keep_classes is None) or (int(classes[i]) in keep_classes)):
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
# Convert the normalized box coordinates (0~1) to image coordinates
def to_image_coords(self, boxes, height, width):
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
#Draw bounding box on traffic light, and detect if it is RED
def classify_red_tl(self, image_np, boxes, classes, scores, thickness=5):
for i in range(len(boxes)):
# rospy.loginfo("DEBUG: stage 3.1")
bot, left, top, right = boxes[i, ...]
class_id = int(classes[i])
score = scores[i]
h = top - bot
w = right - left
if h <= 1.5 * w:
continue # Truncated Traffic Ligth box
cv2.rectangle(image_np,(left, top), (right, bot), (255, 43, 255), thickness) # BGR format for color
tl_img = image_np[int(bot):int(top), int(left):int(right)]
tl_img_simu = self.select_red_simu(tl_img) # SELECT RED
tl_img_real = self.select_lighton_real(tl_img) # SELECT TL
tl_img = (tl_img_simu + tl_img_real) / 2
gray_tl_img = cv2.cvtColor(tl_img, cv2.COLOR_RGB2GRAY)
nrows, ncols = gray_tl_img.shape[0], gray_tl_img.shape[1]
# compute center of mass of RED points
mean_row = 0
mean_col = 0
npoints = 0
for row in range(nrows):
for col in range(ncols):
if (gray_tl_img[row, col] > 0):
mean_row += row
mean_col += col
npoints += 1
if npoints > 0:
mean_row = float(mean_row / npoints) / nrows
mean_col = float(mean_col / npoints) / ncols
# Get the normalized center of mass of RED points
# Use the location of light to detect the color, RED is in the upper part of the box
if npoints > 10 and mean_row < 0.33:
rospy.loginfo("RED Light Detection Confidance: %.2f", score)
return image_np, True
return image_np, False
# select RED mask in simulation situation
def select_red_simu(self, img): # BGR
lower = np.array([ 0, 0, 200], dtype="uint8")
upper = np.array([ 55, 55, 255], dtype="uint8")
red_mask = cv2.inRange(img, lower, upper)
return cv2.bitwise_and(img, img, mask = red_mask)
# select Traffic Lighton area(HLS: high L and high S) in real situation
# for camera without polarization filter
def select_lighton_real(self, img): # HLS for real
hls_img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
lower = np.array([ 50, 150, 150], dtype="uint8")
upper = np.array([ 100, 255, 255], dtype="uint8")
tl_mask = cv2.inRange(hls_img, lower, upper)
return cv2.bitwise_and(img, img, mask = tl_mask)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#implement light color prediction
image_np, box_coords, classes, scores = self.detect_tl(image)
# light color detection
detected_image, is_red = self.classify_red_tl(image_np, box_coords, classes, scores)
# fimage = DEBUG_DIR + 'detected_img_' + str(self.num_image) + '.png'
# #output the predicted image
# cv2.imwrite(fimage, detected_image)
self.num_image += 1
#return 'if it is a RED'
if is_red:
return TrafficLight.RED
else:
return TrafficLight.UNKNOWN
| 40.938389 | 129 | 0.606275 | 8,325 | 0.963765 | 0 | 0 | 0 | 0 | 0 | 0 | 2,481 | 0.287219 |
f4bdc5828e958cecb388cd84228d0eef206f8bdc | 3,540 | py | Python | src/lib/training/scene_sampler.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | 44 | 2020-12-09T06:15:15.000Z | 2022-03-31T02:37:47.000Z | src/lib/training/scene_sampler.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | null | null | null | src/lib/training/scene_sampler.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | 7 | 2020-12-09T10:08:32.000Z | 2021-08-17T01:53:51.000Z | import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
import numpy as np
def get_valid_starts_and_ends(get_frame_arguments: np.ndarray, min_state_index: int = 0):
get_frame_arguments = get_frame_arguments[:] # put on the memory if the array is zarr
scene_change_points = np.where(np.diff(get_frame_arguments[:, 1], 1) > 0)[0] + 1
starts = np.r_[0, scene_change_points]
ends = np.r_[scene_change_points, len(get_frame_arguments)]
valid_starts, valid_ends = [], []
while len(starts) > 0:
ok = get_frame_arguments[starts, 2] >= min_state_index
valid_starts.append(starts[ok])
valid_ends.append(ends[ok])
starts, ends = starts[~ok], ends[~ok]
starts += 1
ok = starts < ends
starts, ends = starts[ok], ends[ok]
return np.concatenate(valid_starts), np.concatenate(valid_ends)
class SceneSampler(Sampler):
def __init__(self, get_frame_arguments: np.ndarray, min_state_index: int = 0) -> None:
self.starts, self.ends = get_valid_starts_and_ends(get_frame_arguments, min_state_index)
def __len__(self) -> int:
return len(self.starts)
def __iter__(self):
indices = np.random.permutation(len(self.starts))
return iter(np.random.randint(self.starts[indices], self.ends[indices]))
class DistributedSceneSampler(Sampler):
def __init__(
self,
get_frame_arguments: np.ndarray,
min_state_index: int = 0,
num_replicas=None,
rank=None,
shuffle=True,
seed=0
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.starts, self.ends = get_valid_starts_and_ends(get_frame_arguments, min_state_index)
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.starts) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.starts), generator=g).tolist()
else:
indices = list(range(len(self.starts)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(np.random.randint(self.starts[indices], self.ends[indices]))
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Arguments:
epoch (int): Epoch number.
"""
self.epoch = epoch
| 34.368932 | 96 | 0.646045 | 2,631 | 0.74322 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.155367 |
f4be1ae12ce6a24db3379525a913d9920ff8ef2d | 1,680 | py | Python | aiovault/client.py | johnnoone/aiovault | 03e1bfb6f0404dcf97ce87a98c539027c4e78a37 | [
"BSD-3-Clause"
] | 1 | 2022-01-31T22:37:57.000Z | 2022-01-31T22:37:57.000Z | aiovault/client.py | johnnoone/aiovault | 03e1bfb6f0404dcf97ce87a98c539027c4e78a37 | [
"BSD-3-Clause"
] | null | null | null | aiovault/client.py | johnnoone/aiovault | 03e1bfb6f0404dcf97ce87a98c539027c4e78a37 | [
"BSD-3-Clause"
] | null | null | null | from . import v1
from .request import Request
from .util import task, extract_id
class Vault(v1.SysEndpoint):
def __init__(self, addr, token=None, cert=None, verify=True):
token = extract_id(token)
self.req_handler = Request(addr, 'v1', token=token,
cert=cert, verify=verify)
@property
def audit(self):
return v1.AuditEndpoint(self.req_handler)
@property
def auth(self):
return v1.AuthEndpoint(self.req_handler)
@property
def lease(self):
return v1.LeaseEndpoint(self.req_handler)
@property
def policy(self):
return v1.PolicyEndpoint(self.req_handler)
@property
def raw(self):
return v1.RawEndpoint(self.req_handler)
@property
def seal(self):
return v1.SealEndpoint(self.req_handler)
@property
def secret(self):
return v1.SecretEndpoint(self.req_handler)
@task
def login(self, *args, **kwargs):
return self.auth.login(*args, **kwargs)
@task
def read(self, path, **kwargs):
method = kwargs.pop('method', 'GET')
response = yield from self.req_handler(method, path, **kwargs)
return response
@task
def write(self, path, **kwargs):
method = kwargs.pop('method', 'POST')
response = yield from self.req_handler(method, path, **kwargs)
return response
@task
def delete(self, path, **kwargs):
method = kwargs.pop('method', 'DELETE')
response = yield from self.req_handler(method, path, **kwargs)
return response
def __repr__(self):
return '<Vault(addr=%r)>' % self.req_handler.addr
| 25.846154 | 70 | 0.621429 | 1,596 | 0.95 | 520 | 0.309524 | 1,197 | 0.7125 | 0 | 0 | 65 | 0.03869 |
f4bec0b744cca8c20ff1afc50477b19675f4e48d | 2,559 | py | Python | prepper.py | shawntan/quora-codesprint-2013 | 50a119ccb22cdb8bc081cc27f3d68442c0885b82 | [
"Unlicense"
] | 3 | 2016-01-24T06:22:10.000Z | 2016-06-15T00:16:56.000Z | prepper.py | shawntan/quora-codesprint-2013 | 50a119ccb22cdb8bc081cc27f3d68442c0885b82 | [
"Unlicense"
] | null | null | null | prepper.py | shawntan/quora-codesprint-2013 | 50a119ccb22cdb8bc081cc27f3d68442c0885b82 | [
"Unlicense"
] | null | null | null | import json,sys,re,math
from random import random
import numpy as np
from pprint import pprint
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
from sklearn.feature_extraction import DictVectorizer
from sklearn.naive_bayes import MultinomialNB,GaussianNB
from sklearn.linear_model import SGDClassifier,LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC,LinearSVC,SVR
from sklearn.linear_model import *
from nltk.tokenize import wordpunct_tokenize
from nltk.corpus import stopwords
from nltk.corpus import words
stopwords = [
w for w in stopwords.words('english')
if w not in ['who','what','when','where','how','is']
]
def word_tokenize_filter(string):
return [ w for w in wordpunct_tokenize(string) if not re.match(r'^[\'\"\:\;\.\,\!]$',w) ]
#eng_words = set([ w.lower() for w in words.words('en') ])
def prep_words(training_data,target,clsf,n):
counter = CountVectorizer(
tokenizer=word_tokenize_filter,
# stop_words=stopwords,
binary=True,
dtype=np.byte,
ngram_range = (1,1),
min_df = 1
)
model = Pipeline([
('vect',counter),
('clsf',clsf)
])
training_data = [ d['question_text'] for d in training_data ]
#training_data = input_data[1:5000]
model.fit(training_data,target)
words = counter.get_feature_names()
weights = np.abs(clsf.coef_)
important = zip(weights,words)
important.sort()
print [ w for _,w in important[-n:] ]
def prep_topics(training_data,target,clsf,n):
counter = DictVectorizer()
model = Pipeline([
('vect',counter),
('clsf',clsf)
])
training_count = int(sys.stdin.next())
training_data = [ { t['name']:1 for t in d['topics']} for d in training_data ]
#training_data = input_data[1:5000]
model.fit(training_data,target)
words = counter.get_feature_names()
weights = clsf.coef_.toarray()[0]
important = zip(abs(weights),words)
important.sort()
print [ w for _,w in important[-n:] ]
if __name__=="__main__":
training_count = int(sys.stdin.next())
training_data = [ json.loads(sys.stdin.next()) for _ in xrange(training_count) ]
target = [ math.log(math.log(obj['__ans__']+1)+1) for obj in training_data ]
#prep_topics(training_data,target,SVR(kernel='linear'),50)
#prep_words(training_data,target,Ridge(),200)
training_data.sort(key=lambda x:x['__ans__'])
for i in training_data:
print "%0.3f %s"%(math.log(math.log(i['__ans__']+0.9)+0.9),i['question_text'].encode('utf-8'))
| 34.12 | 96 | 0.718249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 443 | 0.173114 |
f4bf1220f5c2ae8aefe49cfbdd4d189a426ccba9 | 121 | py | Python | wsgi.py | pointerish/Brevis | 0cf32b964ad151456bd73ed510ebe41cbbbfbdb7 | [
"MIT"
] | 3 | 2020-10-28T23:16:33.000Z | 2021-10-10T03:53:57.000Z | wsgi.py | pointerish/Brevis | 0cf32b964ad151456bd73ed510ebe41cbbbfbdb7 | [
"MIT"
] | null | null | null | wsgi.py | pointerish/Brevis | 0cf32b964ad151456bd73ed510ebe41cbbbfbdb7 | [
"MIT"
] | null | null | null | from api import app
if __name__ == '__main__':
with open('urls.json', 'w') as fj:
fj.write('')
app.run() | 20.166667 | 38 | 0.561983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.214876 |
f4c0503cca138822ab25ce843524463359b2e111 | 932 | py | Python | CustomerReviewSummarizer/main.py | shashwatsagar/ReviewSummarizer | 6ed36140f30c555794a3f41e70c1c1946beae762 | [
"Apache-2.0"
] | null | null | null | CustomerReviewSummarizer/main.py | shashwatsagar/ReviewSummarizer | 6ed36140f30c555794a3f41e70c1c1946beae762 | [
"Apache-2.0"
] | null | null | null | CustomerReviewSummarizer/main.py | shashwatsagar/ReviewSummarizer | 6ed36140f30c555794a3f41e70c1c1946beae762 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Shashank Sapaliga, Shashwat Sagar, Ishpreet Kaur, Dhwani Shah
"""
from Processor.Processor import Processor
from WebScrapper.Scrapper import Scrapper
import json
import os
print("The Data is being scrapped please wait!!!!!!!!!!")
start=0
flag = 1
scrap = Scrapper()
p = Processor()
print("Creating your Visualization Please Wait.........")
p.createOrientedReviewsMap()
p.summarize()
p.removeFeaturesWithNoReview()
p.separatePositiveAndNegative()
if os.path.exists("finalOrientation.txt"):
os.remove("finalOrientation.txt")
f = open("finalOrientation.txt", "a")
f.write(str(p.finalOrientation))
f.close()
if os.path.exists("OrientedReviews.txt"):
os.remove("OrientedReviews.txt")
f = open("OrientedReviews.txt", "a")
f.write(str(p.orientedReviews))
f.close()
from Visualization.Featuresandvisual import Visualization
vis = Visualization()
| 23.3 | 71 | 0.707082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.36588 |
f4c05a42ec953c452d55a44c9ab74a2e629db782 | 259 | py | Python | Python 2/code/json1.py | gabrielwolf/5-python-kurse | cabc2b6706f6f384963c026749fd39eb042736c6 | [
"MIT"
] | null | null | null | Python 2/code/json1.py | gabrielwolf/5-python-kurse | cabc2b6706f6f384963c026749fd39eb042736c6 | [
"MIT"
] | null | null | null | Python 2/code/json1.py | gabrielwolf/5-python-kurse | cabc2b6706f6f384963c026749fd39eb042736c6 | [
"MIT"
] | null | null | null | import json
data = '''
{
"name" : "Chuck",
"phone" : {
"type" : "intl",
"number" : "+1 734 303 4456"
},
"email" : {
"hide" : "yes"
}
}'''
info = json.loads(data)
print 'Phone:',info["phone"]
print 'Number:',info["phone"]["number"]
| 14.388889 | 39 | 0.490347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.706564 |
f4c06ea3329c774be01f9918ed296584dc1358f6 | 2,384 | py | Python | samples/python/minimal_service/minimal_service_client.py | OrhanKupusoglu/ecal | 3f8d24929bfae18b0399497858c30199be96ec57 | [
"Apache-2.0"
] | null | null | null | samples/python/minimal_service/minimal_service_client.py | OrhanKupusoglu/ecal | 3f8d24929bfae18b0399497858c30199be96ec57 | [
"Apache-2.0"
] | null | null | null | samples/python/minimal_service/minimal_service_client.py | OrhanKupusoglu/ecal | 3f8d24929bfae18b0399497858c30199be96ec57 | [
"Apache-2.0"
] | null | null | null | # ========================= eCAL LICENSE =================================
#
# Copyright (C) 2016 - 2019 Continental Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ========================= eCAL LICENSE =================================
import sys
import time
import ecal.core.core as ecal_core
import ecal.core.service as ecal_service
def main():
# print eCAL version and date
print("eCAL {} ({})\n".format(ecal_core.getversion(), ecal_core.getdate()))
# initialize eCAL API
ecal_core.initialize(sys.argv, "py_minimal_service_client")
# set process state
ecal_core.set_process_state(1, 1, "I feel good")
# create a client for the "DemoService" service
client = ecal_service.Client("DemoService")
# define the client response callback to catch server responses
def client_resp_callback(service_info, response):
if (service_info["call_state"] == "call_state_executed"):
print("'DemoService' method '{}' responded : '{}'".format(service_info["method_name"], response))
print()
else:
print("server {} response failed, error : '{}'".format(service_info["host_name"], service_info["error_msg"]))
print()
# and add it to the client
client.add_response_callback(client_resp_callback)
# idle and call service methods
i = 0
while(ecal_core.ok()):
i = i + 1
# call foo
request = bytes("hello foo {}".format(i), "ascii")
print("'DemoService' method 'foo' requested with : {}".format(request))
client.call_method("foo", request)
time.sleep(0.5)
# call ping
request = bytes("ping number {}".format(i), "ascii")
print("'DemoService' method 'ping' requested with : {}".format(request))
client.call_method("ping", request)
time.sleep(0.5)
# destroy client
client.destroy()
# finalize eCAL API
ecal_core.finalize()
if __name__ == "__main__":
main()
| 32.657534 | 115 | 0.665268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,414 | 0.593121 |
f4c080828534b9de9bdd23a9d40729bc926da372 | 5,022 | py | Python | model.py | yz540/Behavioral_Cloning | ef3c60766f6235256af99f455cd4502d21c47c28 | [
"MIT"
] | null | null | null | model.py | yz540/Behavioral_Cloning | ef3c60766f6235256af99f455cd4502d21c47c28 | [
"MIT"
] | null | null | null | model.py | yz540/Behavioral_Cloning | ef3c60766f6235256af99f455cd4502d21c47c28 | [
"MIT"
] | null | null | null | import csv
import cv2
import numpy as np
from scipy import ndimage
# Setup Keras
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Lambda, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers import Cropping2D
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import load_model
from keras import optimizers
def load_data(path):
samples = []
with open(path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip header of sample data, comment this line if not sample data
for line in reader:
samples.append(line)
return samples
def processImg(img_path):
image = ndimage.imread(img_path)
# crop the image to keep only the road part
cropped_img = image[65:image.shape[0]-25, :, :]
return cropped_img
def generator(path, samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
# read in images from center, left and right cameras
# and crop them to keep only the road part
image_center = processImg(path + './IMG/'+ batch_sample[0].split('/')[-1])
image_left = processImg(path + './IMG/' + batch_sample[1].split('/')[-1])
image_right = processImg(path + './IMG/' + batch_sample[2].split('/')[-1])
images.extend([image_center, image_left, image_right])
# steering angle for the center image_center
steering_angle_center = float(batch_sample[3])
# correct the steering angle for left and right camera images as if they were from the center camera
correction = 0.2
steering_left = steering_angle_center + correction
steering_right = steering_angle_center - correction
angles.extend([steering_angle_center, steering_left, steering_right])
# data augmentation by flipping images and reverse steering angles
images_flipped = [np.fliplr(image) for image in images]
angles_flipped = [-angle for angle in angles]
# add flipped images and steering angles
images.extend(images_flipped)
angles.extend(angles_flipped)
X_train = np.array(images)
y_train = np.array(angles)
# produce 6*batch_size images to train each time
yield shuffle(X_train, y_train)
def nn_model(train_generator, validation_generator):
# add normalization layer and dropout layers to the LeNet model
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(70,320,3)))
model.add(Conv2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dropout(.5))
model.add(Dense(84))
model.add(Dropout(.5))
model.add(Dense(1))
print(model.summary())
# compile and train the model using the generator function
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, steps_per_epoch= len(train_samples), validation_data=validation_generator, validation_steps=len(validation_samples), epochs=3, verbose = 1)
return model
def transfer_learning(model_path, train_generator, validation_generator):
# Load the previously trained model and initialize the weights from the pretrained model
ft_model = load_model(model_path)
ft_model.load_weights(model_path)
# compile and train the model on the new data set
ft_model.compile(loss='mse', optimizer='adam')
ft_model.fit_generator(train_generator, samples_per_epoch = len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=1)
return ft_model
# first round trian
sample_data_path = "../../../opt/carnd_p3/data/"
# # fine tune data
# sample_data_path = "../../../opt/carnd_p3/train_data/"
samples = load_data(sample_data_path)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
train_generator = generator(sample_data_path, train_samples, batch_size=32)
validation_generator = generator(sample_data_path, validation_samples, batch_size=32)
# Train the model from scratch
model = nn_model(train_generator, validation_generator)
model.save('model.h5') # creates a HDF5 file 'my_model.h5'
# # Use transfer learning with a pretrained model
# model_path = 'model.h5'
# ft_model = transfer_learning(model_path, train_generator, validation_generator)
# ft_model.save('ft_model.h5') | 44.052632 | 180 | 0.69096 | 0 | 0 | 1,921 | 0.382517 | 0 | 0 | 0 | 0 | 1,286 | 0.256073 |
f4c2cb83bfca6f4e55c555206abdc246480c784b | 3,414 | py | Python | vsphere_network_plugin/tests/ippool_test.py | Vladimir-Antonovich/cloudify-vsphere-plugin | 25299fd2908f8f7411fb66a1115244ceaeae2e87 | [
"Apache-2.0"
] | 10 | 2016-10-26T19:48:17.000Z | 2022-03-23T12:13:30.000Z | vsphere_network_plugin/tests/ippool_test.py | Vladimir-Antonovich/cloudify-vsphere-plugin | 25299fd2908f8f7411fb66a1115244ceaeae2e87 | [
"Apache-2.0"
] | 26 | 2016-08-29T16:01:43.000Z | 2021-04-30T07:31:52.000Z | vsphere_network_plugin/tests/ippool_test.py | Vladimir-Antonovich/cloudify-vsphere-plugin | 25299fd2908f8f7411fb66a1115244ceaeae2e87 | [
"Apache-2.0"
] | 16 | 2016-08-11T14:19:43.000Z | 2021-06-15T12:23:27.000Z | # Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import Mock, patch
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from vsphere_plugin_common.constants import DELETE_NODE_ACTION
from vsphere_network_plugin import ippool
class IPPoolTest(unittest.TestCase):
def setUp(self):
super(IPPoolTest, self).setUp()
self.mock_ctx = MockCloudifyContext(
'node_name',
properties={},
runtime_properties={}
)
self.mock_ctx._operation = Mock()
current_ctx.set(self.mock_ctx)
@patch('vsphere_plugin_common.VsphereClient.get')
def test_create(self, mock_client_get):
mock_client_get().create_ippool.side_effect = [12345]
rel = Mock()
rel.type_hierarchy = [
"cloudify.relationships.vsphere.ippool_connected_to_network"]
rel.target.node.type_hierarchy = ["cloudify.vsphere.nodes.Network"]
self.mock_ctx.instance._relationships = [rel]
self.mock_ctx.node._properties = {
'connection_config': {
'host': 'host',
'port': '80'
},
"datacenter_name": "datacenter",
"ippool": {
"name": "ippool-check",
"subnet": "192.0.2.0",
"netmask": "255.255.255.0",
"gateway": "192.0.2.254",
"range": "192.0.2.1#12"
}
}
ippool.create()
self.assertEqual(
self.mock_ctx.instance.runtime_properties,
{'ippool': 12345}
)
mock_client_get().create_ippool.assert_called_once_with(
'datacenter', {
'subnet': '192.0.2.0',
'netmask': '255.255.255.0',
'range': '192.0.2.1#12',
'name': 'ippool-check',
'gateway': '192.0.2.254'
},
[rel.target.instance])
@patch('vsphere_plugin_common.VsphereClient.get')
def test_delete(self, mock_client_get):
self.mock_ctx._operation.name = DELETE_NODE_ACTION
mock_client_get().delete_ippool.side_effect = [None]
self.mock_ctx.node.properties['connection_config'] = {
'host': 'host',
'port': '80'
}
self.mock_ctx.node.properties["datacenter_name"] = "datacenter"
# nothing to remove
ippool.delete()
self.assertFalse(self.mock_ctx.instance.runtime_properties)
# something exists
self.mock_ctx.instance.runtime_properties['ippool'] = 12345
ippool.delete()
mock_client_get().delete_ippool.assert_called_once_with(
'datacenter', 12345)
self.assertFalse(self.mock_ctx.instance.runtime_properties)
if __name__ == '__main__':
unittest.main()
| 34.836735 | 75 | 0.620094 | 2,505 | 0.733743 | 0 | 0 | 2,173 | 0.636497 | 0 | 0 | 1,236 | 0.362039 |
f4c3eeb99cd13bf33d9ab5ff221738ac9379ca5d | 370 | py | Python | common/migrations/0002_auto_20180722_1316.py | Red-Teapot/bbyaworld.com-django | 6eb8febd2cfa304a062ac924240cbdf060499cfc | [
"MIT"
] | 1 | 2020-01-11T18:04:15.000Z | 2020-01-11T18:04:15.000Z | common/migrations/0002_auto_20180722_1316.py | Red-Teapot/bbyaworld.com-django | 6eb8febd2cfa304a062ac924240cbdf060499cfc | [
"MIT"
] | 2 | 2018-08-24T08:53:27.000Z | 2019-07-05T16:08:28.000Z | common/migrations/0002_auto_20180722_1316.py | Red-Teapot/bbyaworld.com-django | 6eb8febd2cfa304a062ac924240cbdf060499cfc | [
"MIT"
] | 1 | 2018-11-22T16:19:52.000Z | 2018-11-22T16:19:52.000Z | # Generated by Django 2.0.7 on 2018-07-22 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='miscstorageentry',
name='value',
field=models.TextField(),
),
]
| 19.473684 | 47 | 0.583784 | 277 | 0.748649 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.254054 |
f4c5360b157971c47ab67890d1de372c50e60d6a | 282 | py | Python | MTS/__init__.py | ohhorob/pyMTS | e7553b96e72ac6d4f91657bdb7c632aeeaba3c9b | [
"Apache-2.0"
] | 1 | 2021-04-28T12:23:42.000Z | 2021-04-28T12:23:42.000Z | MTS/__init__.py | ohhorob/pyMTS | e7553b96e72ac6d4f91657bdb7c632aeeaba3c9b | [
"Apache-2.0"
] | null | null | null | MTS/__init__.py | ohhorob/pyMTS | e7553b96e72ac6d4f91657bdb7c632aeeaba3c9b | [
"Apache-2.0"
] | null | null | null | # MTS Log protocol -- http://www.innovatemotorsports.com/support/downloads/Seriallog-2.pdf
# Serial: 8-N-1-19.2 kbit/sec
# Packet periodicity: 81.92 milliseconds (12.2 hertz) (8 MHz / 655360)
# Sample resolution: 10 bits (0..5V at 0.1% resolution)
import Header
from word import *
| 35.25 | 90 | 0.734043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.865248 |
f4c6a6f5ec87a73f6a0b0260e13c8a0bf8dbe17d | 6,688 | py | Python | savecode/threeyears/idownclient/clientdbmanager/dbsqlite/tbdata.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2019-05-19T11:54:26.000Z | 2019-05-19T12:03:49.000Z | savecode/threeyears/idownclient/clientdbmanager/dbsqlite/tbdata.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 1 | 2020-11-27T07:55:15.000Z | 2020-11-27T07:55:15.000Z | savecode/threeyears/idownclient/clientdbmanager/dbsqlite/tbdata.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2019-01-17T15:01:28.000Z | 2019-09-20T09:32:17.000Z | """
data表
create by judy
2019/02/20
插入data表前判断数据是否存在于数据库中
update by judy 2019/03/22
作为当前数据的唯一标识,保留当前数据的uniqueid
不再需要account来分辨不同的账号,因为uniqueid确实回事独一无二的
update by judy 2019/03/27
"""
from datacontract.outputdata import EStandardDataType
import traceback
from commonbaby.helpers import helper_time
from commonbaby.sql import (SqliteColumn, SqliteConn, SqliteTable,
table_locker)
from idownclient.clientdatafeedback import FeedDataBase, UniqueData
from .sqliteconfig import SqliteConfig
from .tbsqlitebase import TbSqliteBase
class TbData(TbSqliteBase):
__tb_data: SqliteTable = SqliteTable(
'data',
True,
SqliteColumn(
colname='Id',
coltype='INTEGER',
nullable=False,
is_primary_key=True,
is_auto_increament=True,
is_unique=True).set_index_new(),
SqliteColumn(colname='UniqueId', nullable=False).set_index_new(),
SqliteColumn(colname='DataType', nullable=False),
SqliteColumn(colname='AppType', coltype='INTEGER', nullable=False),
# SqliteColumn(colname='Account', nullable=False),
SqliteColumn(colname='DownloadTime', coltype='DATETIME', nullable=False),
)
databasename = 'idowndata'
def __init__(self, dbcfg: SqliteConfig):
TbSqliteBase.__init__(self, TbData.__tb_data._tbname, dbcfg, TbData.databasename)
def _append_tables(self):
self._conn_mngr.append_table(TbData.__tb_data)
def _get_execute_sql_pars(self, data: FeedDataBase):
if not isinstance(data, UniqueData):
raise Exception("Param data is invalid.")
sqls = []
if not data._is_muti_seg:
sql = self.__get_sql_pars(data, data._datatype.value)
sqls.append(sql)
else:
for inner in data:
sql = self.__get_sql_pars(inner, data._datatype.value)
sqls.append(sql)
return sqls
def __get_sql_pars(self, data: UniqueData, datatype):
pars = (
data.get_uniqueid(),
datatype,
data._task.apptype,
# data._task.account,
helper_time.get_time_sec_tz(),
)
return pars
def _dump_pars(self, pars):
conn = None
sql = '''
SELECT count(1) FROM data
WHERE UniqueId=?
'''
newdata = []
for par in pars:
try:
# 默认数据不是重复的
dump_res = False
for conn in self.connect_all(5):
try:
conn: SqliteConn = conn
c = conn.cursor
c.execute(sql, (par[0], ))
result = c.fetchall()
# 防止没有查出来
if len(result) > 0 and result[0][0] > 0:
# 数据是重复的
dump_res = True
break
except Exception as ex:
conn._conn.rollback()
raise ex
finally:
if conn is not None:
conn.close()
if not dump_res:
newdata.append(par)
except:
self._logger.error(
f'Dump data error, err:{traceback.format_exc()}')
finally:
if conn is not None:
conn.commit()
conn.close()
return newdata
@table_locker(__tb_data._tbname)
def insert_uniquely_identifies(self, data: FeedDataBase) -> bool:
"""
向数据库存入数据唯一标识,用于去重。返回是否插入成功True/False
由于存在强制下载,所以可能需要更新已有的资源数据
"""
conn: SqliteConn = None
res: bool = False
sql = """insert into data(
UniqueId,
DataType,
AppType,
DownloadTime
) values(?,?,?,?)"""
try:
pars = self._get_execute_sql_pars(data)
if len(pars) == 0:
return res
# 插入前查询数据是否在数据库里
new_pars = self._dump_pars(pars)
if len(new_pars) == 0:
return res
pars = new_pars
conn = self.connect_write()
c = conn._conn.cursor()
if len(pars) == 1:
result = c.execute(sql, pars[0])
else:
result = c.executemany(sql, pars)
if result is None or result.rowcount < 1: # or len(result) < 1:
res = False
else:
res = True
except Exception:
self._logger.error(
f"Insert data to db error:\ndatatype:{data._datatype}\n"
f"datauniqueid:{data.get_uniqueid()}\nerror:{traceback.format_exc()}"
)
finally:
if conn is not None:
conn.commit()
conn.close()
return res
@table_locker(__tb_data._tbname)
def is_data_exists(self, data: UniqueData,
datatype: EStandardDataType) -> bool:
"""检查数据是否已存在。返回True/False"""
conn: SqliteConn = False
res: bool = False
try:
if not isinstance(data, UniqueData):
raise Exception("Param data is invalid.")
sql = """select count(1) from data where
UniqueId=? and
DataType=? and
AppType=?"""
for conn in self.connect_all(5):
try:
conn: SqliteConn = conn
c = conn.cursor
result = c.execute(sql, (
data.get_uniqueid(),
datatype.value,
data._task.apptype,
))
for c in result:
# print(c)
if len(c) > 0 and c[0] > 0:
res = True
break
except Exception as ex:
conn._conn.rollback()
raise ex
finally:
if conn is not None:
conn.close()
if res:
break
except Exception:
self._logger.error(
"Check data duplication error:\ndatatype:{}\ndataid:{}\nerror:{}"
.format(data._datatype.name, data.get_uniqueid(),
traceback.format_exc()))
finally:
if conn is not None:
conn.commit()
conn.close()
return res
| 33.108911 | 89 | 0.495963 | 6,326 | 0.90294 | 0 | 0 | 3,273 | 0.467171 | 0 | 0 | 1,472 | 0.210106 |
f4c6b6cc0643ccdb4de80f086a1cffd76a93cea5 | 4,461 | py | Python | resorganizer/task.py | anton-pershin/research-organizer | ce92ab65a28b54d78d966291cafd42efb2e62090 | [
"MIT"
] | null | null | null | resorganizer/task.py | anton-pershin/research-organizer | ce92ab65a28b54d78d966291cafd42efb2e62090 | [
"MIT"
] | null | null | null | resorganizer/task.py | anton-pershin/research-organizer | ce92ab65a28b54d78d966291cafd42efb2e62090 | [
"MIT"
] | null | null | null | from resorganizer.aux import *
class Command(object):
"""Command is an abstraction for any command line able to be executed. Command line has the components:
program name, params (flags followed by some values, e.g. -f filename), flags (alone flags. e.g. -f)
and trailing arguments. Some of these may be optional. We create Command by passing the program name and
all eligible names for params and flags. When we need to produce a particular command line, we substitute
necessary ones via substitute() which returns a complete string for execution.
Note that params and flags are implied to be prepended by a single minus, so you need to pass only their names.
"""
def __init__(self, program_name, params=(), flags=()):
self._program_name = program_name
self._params = params
self._flags = flags
def substitute(self, param_values={}, flags=(), trailing_args=''):
"""Produces command line from params_values (a dictionary for valued flags), flags (if they are allowed,
otherwise throws an exception) and trailing_args (trailing arguments) which can be either sequence or a mere string.
"""
command_str = self._program_name + ' '
for param, val in param_values.items():
if not param in self._params:
raise Exception('Command line parameter {} is not allowed'.format(param))
command_str += '-{} {} '.format(param, val)
for flag in flags:
if not flag in self._flags:
raise Exception('Command line flag {} is not allowed'.format(flag))
command_str += '-{} '.format(flag)
command_str += ' '.join(trailing_args) if is_sequence(trailing_args) else trailing_args
return command_str
class CommandTask(object):
"""CommandTask consists of a Command object, substitutions for the command and inputs which are files intended
to be moved. Each substitution consists of params, flags, trailing arguments and a substitution identifier.
After these being set, Task acts as a generator via method command_gen which yields a sid and a command string
for a subsequent substitution.
Using this class, you can generate command lines of three types:
(1) single program, single command params/flags, single data
(2) single program, multiple command params/flags, single data
(3) single program, single command params/flags, multiple data
(4) single program, multiple command params/flags, multiple data
Here we call (1) an alone task and (2), (3), (4) a multiple task. Multiple tasks can be executed in various ways,
but this is a business of TaskExecution.
Multiple programs tasks are, in turn, implemented as a combination of several CommandTask.
"""
def __init__(self, cmd, prog=''):
self.program = prog
self.command = cmd
self.inputs = []
self.params_subst = []
self.trailing_args_subst = []
self.flags_subst = []
self.sids = []
def set_input(self, input_):
"""Adds one input into task.
"""
self.inputs.append(input_)
def set_substitution(self, sid, params={}, flags=(), trailing_args=''):
"""Adds a substitution specified by sid (substitution identifier), params, flags and trailing arguments into task.
The latter can be either a string or a sequence.
"""
self.sids.append(sid)
self.params_subst.append(params)
self.flags_subst.append(flags)
self.trailing_args_subst.append(trailing_args)
def command_gen(self):
"""Yields a substitution.
"""
for sid, params_subst, flags_subst, trailing_args_subst in zip(self.sids, self.params_subst, self.flags_subst, self.trailing_args_subst):
yield (sid, self.command.substitute(params_subst, flags_subst, trailing_args_subst))
class PythonTask(object):
"""PythonTask essentially executes a python function and specify data to be copied. It allows to automate some of the routines
emerging while working with other heavier tasks. Even though the extension to "single function - multiple data"
idealogy is possible, it has not yet done due to lack of demand.
"""
def __init__(self, func):
self.func = func
self.inputs = []
def set_input(self, input_):
"""Adds one input into task.
"""
self.inputs.append(input_) | 49.566667 | 145 | 0.679892 | 4,425 | 0.99193 | 311 | 0.069715 | 0 | 0 | 0 | 0 | 2,592 | 0.581036 |
f4c827b88627c2e4404e7cb9829cbdc85d36c383 | 931 | py | Python | orbit/coe2mee.py | lasr/orbit | 19c1a4d0554c7c14784a2f0dfe2ab38520e7e26a | [
"MIT"
] | null | null | null | orbit/coe2mee.py | lasr/orbit | 19c1a4d0554c7c14784a2f0dfe2ab38520e7e26a | [
"MIT"
] | null | null | null | orbit/coe2mee.py | lasr/orbit | 19c1a4d0554c7c14784a2f0dfe2ab38520e7e26a | [
"MIT"
] | 1 | 2019-03-12T04:04:53.000Z | 2019-03-12T04:04:53.000Z | """Created on Sat Oct 01 2015 16:24.
@author: Nathan Budd
"""
import numpy as np
def coe2mee(COE, mu=1.):
"""
Convert classical orbital elements to modified equinoctial elements.
Parameters
----------
COE : ndarray
mx6 array of elements ordered as [p e i W w nu].
mu : float
Standard gravitational parameter. Defaults to canonical units.
Returns
-------
MEE : ndarray
mx6 array of elements ordered as [p f g h k L].
"""
p = COE[0:, 0:1]
e = COE[0:, 1:2]
i = COE[0:, 2:3]
W = COE[0:, 3:4]
w = COE[0:, 4:5]
nu = COE[0:, 5:6]
# x,y components of eccentricity vector
f = e * np.cos(w + W)
g = e * np.sin(w + W)
# x,y components of ascending node vector
h = np.tan(i/2.) * np.cos(W)
k = np.tan(i/2.) * np.sin(W)
# true longitude
L = np.mod(W+w+nu, 2*np.pi)
return np.concatenate((p, f, g, h, k, L), 1)
| 21.159091 | 72 | 0.542427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.572503 |
f4c845969dcb245e178d58873d955a0721ef9c69 | 2,602 | py | Python | contrib/bulk_operations/metadata.py | tzhaoredhat/automation | a1867dc2d3591fdae1fa7f80d457c25f9705070e | [
"MIT"
] | 18 | 2015-12-15T17:56:18.000Z | 2021-04-10T13:49:48.000Z | contrib/bulk_operations/metadata.py | tzhaoredhat/automation | a1867dc2d3591fdae1fa7f80d457c25f9705070e | [
"MIT"
] | 303 | 2015-11-18T07:37:06.000Z | 2021-05-26T12:34:01.000Z | contrib/bulk_operations/metadata.py | tzhaoredhat/automation | a1867dc2d3591fdae1fa7f80d457c25f9705070e | [
"MIT"
] | 27 | 2015-11-19T20:33:54.000Z | 2021-03-25T08:15:28.000Z | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""
Use the provided metadata generator if you wish to support OPTIONS requests on
list url of resources that support bulk operations. The only difference from
the generator provided by REST Framework is that it does not try to check
object permissions when the request would be bulk update.
To use the class, add this to your settings:
REST_FRAMEWORK = {
'DEFAULT_METADATA_CLASS': 'contrib.bulk_operations.metadata.BulkMetadata'
}
"""
from django.core.exceptions import PermissionDenied
from django.http import Http404
from rest_framework import exceptions
from rest_framework import metadata
from rest_framework.request import clone_request
class BulkMetadata(metadata.SimpleMetadata):
"""
Simple wrapper around `SimpleMetadata` provided by REST Framework. This
class can handle views supporting bulk operations by not checking object
permissions on list URL.
"""
def determine_actions(self, request, view):
"""
For generic class based views we return information about the fields
that are accepted for 'PUT' and 'POST' methods.
This method expects that `get_object` may actually fail and gracefully
handles it.
Most of the code in this method is copied from the parent class.
"""
actions = {}
for method in set(['PUT', 'POST']) & set(view.allowed_methods):
view.request = clone_request(request, method)
try:
# Test global permissions
if hasattr(view, 'check_permissions'):
view.check_permissions(view.request)
# Test object permissions. This will fail on list url for
# resources supporting bulk operations. In such case
# permissions are not checked.
if method == 'PUT' and hasattr(view, 'get_object'):
try:
view.get_object()
except (AssertionError, KeyError):
pass
except (exceptions.APIException, PermissionDenied, Http404):
pass
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer()
actions[method] = self.get_serializer_info(serializer)
finally:
view.request = request
return actions
| 37.710145 | 81 | 0.645273 | 1,828 | 0.702537 | 0 | 0 | 0 | 0 | 0 | 0 | 1,406 | 0.540354 |
f4c98af364b66028736488dbda6874b1d12ba9a4 | 83 | py | Python | str/regex_example.py | gitdummyac/Python | 5bd4acaaa707877822bef5d2b2c0a6f62b19ad8a | [
"MIT"
] | null | null | null | str/regex_example.py | gitdummyac/Python | 5bd4acaaa707877822bef5d2b2c0a6f62b19ad8a | [
"MIT"
] | null | null | null | str/regex_example.py | gitdummyac/Python | 5bd4acaaa707877822bef5d2b2c0a6f62b19ad8a | [
"MIT"
] | null | null | null | import re
txt = "Hello welcome to hacktober fest"
x = re.search("hacktober", txt)
| 16.6 | 39 | 0.710843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.53012 |
f4c9927b909dfb773cd0b0bd92024436cca40bcc | 324 | py | Python | web/templatetags/tags.py | codeschule/koodikoulu-site | dde9932564f36dce6f4dbfd31e7923f1bae83293 | [
"MIT"
] | 5 | 2015-09-16T10:50:53.000Z | 2016-01-16T09:10:37.000Z | web/templatetags/tags.py | codeschule/koodikoulu-site | dde9932564f36dce6f4dbfd31e7923f1bae83293 | [
"MIT"
] | 10 | 2015-09-07T05:58:03.000Z | 2019-02-15T10:36:48.000Z | web/templatetags/tags.py | codeschule/koodikoulu-site | dde9932564f36dce6f4dbfd31e7923f1bae83293 | [
"MIT"
] | 6 | 2015-09-06T19:42:46.000Z | 2019-12-29T21:31:07.000Z | from django import template
register = template.Library()
@register.inclusion_tag('templatetags/form_field.html')
def show_form_field(field, icon=False):
return {'field': field, 'icon': icon}
@register.inclusion_tag('templatetags/learning_resource.html')
def show_resource(resource):
return {'resource': resource} | 29.454545 | 62 | 0.771605 | 0 | 0 | 0 | 0 | 262 | 0.808642 | 0 | 0 | 90 | 0.277778 |
f4cbf5dfb144c1dccb1554376a7cd38916728ee3 | 2,040 | py | Python | sandbox/simstream/simstream/eventmonitor.py | docquantum/airavata | 4ec5fa0aab1b75ca1e98a16648c57cd8abdb4b9c | [
"ECL-2.0",
"Apache-2.0"
] | 74 | 2015-04-10T02:57:26.000Z | 2022-02-28T16:10:03.000Z | sandbox/simstream/simstream/eventmonitor.py | docquantum/airavata | 4ec5fa0aab1b75ca1e98a16648c57cd8abdb4b9c | [
"ECL-2.0",
"Apache-2.0"
] | 126 | 2015-04-26T02:55:26.000Z | 2022-02-16T22:43:28.000Z | sandbox/simstream/simstream/eventmonitor.py | docquantum/airavata | 4ec5fa0aab1b75ca1e98a16648c57cd8abdb4b9c | [
"ECL-2.0",
"Apache-2.0"
] | 163 | 2015-01-22T14:05:24.000Z | 2022-03-17T12:24:34.000Z | #
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Utility for monitoring collected data.
Author: Jeff Kinnison (jkinniso@nd.edu)
"""
# TODO: Add method to add handlers
# TODO: Add method to create PikaProducer
# TODO: Add method to use PikaProducer to respond to events
# TODO: Add method to deactivate monitor
class EventCheckerNotCallableException(Exception):
pass
class EventHandlerNotCallableException(Exception):
pass
class EventHandlerDoesNotExistException(Exception):
pass
class EventMonitor(object):
"""Checks data for user-defined bounds violations.
Instance variables:
handlers -- a dict of EventHandler objects indexed by name
"""
def __init__(self, event_check, handlers={}):
self._event_check = event_check
self.handlers = handlers
def __call__(self, val):
if not callable(self._event_check):
raise EventCheckerNotCallableException
self._run_handler(self.event_check(val))
def _run_handler(self, handler_names):
for name in handler_names:
if name not in self.handlers:
raise EventHandlerDoesNotExistException
if not callable(self.handlers[name]):
raise EventHandlerNotCallableException
self.handlers[name]()
| 30.447761 | 62 | 0.726961 | 969 | 0.475 | 0 | 0 | 0 | 0 | 0 | 0 | 1,179 | 0.577941 |
f4cc2588e5b4784c45799a725e16c31ecaab3c64 | 1,347 | py | Python | Guitar Training Remote/music_theory/mode.py | keremkoseoglu/Python-Library | f66ab246da4eabea94596494cf2bc9b416b65b1d | [
"MIT"
] | 3 | 2018-06-28T07:09:04.000Z | 2019-03-04T14:43:52.000Z | Guitar Training Remote/music_theory/mode.py | keremkoseoglu/Python-Library | f66ab246da4eabea94596494cf2bc9b416b65b1d | [
"MIT"
] | null | null | null | Guitar Training Remote/music_theory/mode.py | keremkoseoglu/Python-Library | f66ab246da4eabea94596494cf2bc9b416b65b1d | [
"MIT"
] | 5 | 2018-06-28T07:12:28.000Z | 2021-06-03T18:20:21.000Z | import random
from music_theory.note import Note
class Mode:
def __init__(self):
self._modes = [
# Major oriented
"Ionian",
"Dorian",
"Phrygian",
"Lydian",
"Mixo",
"Aeolian",
"Locrian",
# Melodic minor oriented
"Jazz minor",
"Dorian b2",
"Lydian aug",
"Lydian dom",
"Mixo b6",
"Half dim",
"Altered"
]
def get_mode_list(self) -> []:
return self._modes
def get_random_mode(self) -> str:
random_note = Note().get_random_note()
random_mode = self.get_random_mode_type()
return random_note + " " + random_mode
def get_random_mode_type(self) -> str:
i = random.randint(0, len(self._modes) - 1)
return self._modes[i]
def get_random_modes(self, count: int, with_note=True) -> []:
output = []
note_obj = Note()
for c in range(count):
if with_note:
random_note = note_obj.get_random_note() + " "
else:
random_note = ""
random_mode = self.get_random_mode_type()
random_result = random_note + random_mode
output.append(random_result)
return output
| 24.490909 | 65 | 0.507053 | 1,295 | 0.961396 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.134373 |
f4cc9923dfd51c7eac4621172705c16b6f05d784 | 878 | py | Python | test/py/t-searchmem.py | alexbudmsft/dbgscript | 76dc77109bbeb8f09a893e9dd56012ff8a4b601f | [
"PSF-2.0"
] | 27 | 2015-11-05T22:19:34.000Z | 2021-08-21T02:03:52.000Z | test/py/t-searchmem.py | alexbudmsft/dbgscript | 76dc77109bbeb8f09a893e9dd56012ff8a4b601f | [
"PSF-2.0"
] | null | null | null | test/py/t-searchmem.py | alexbudmsft/dbgscript | 76dc77109bbeb8f09a893e9dd56012ff8a4b601f | [
"PSF-2.0"
] | 2 | 2015-11-06T04:32:31.000Z | 2016-08-22T18:24:20.000Z | from utils import *
car = get_car()
# Positive cases. Can't print the result because the address may change
# from run to run.
#
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 1)
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 2)
# Negative cases.
#
# 4 is not a multiple of the pattern length.
#
try:
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 4)
except ValueError:
print('Swallowed ValueError')
# Try a non-existent pattern.
#
try:
dbgscript.search_memory(car['name'].address-16, 100, b'AbcDefAb', 4)
except LookupError:
print('Swallowed LookupError')
# 3 is a multiple of the pat. len, but the pattern won't be found on a
# 3 byte granularity.
#
try:
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 3)
except LookupError:
print('Swallowed LookupError')
| 25.085714 | 72 | 0.692483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.492027 |
f4cd42c245f80917627216ad30dc10c24df5fd00 | 234 | py | Python | make_valid.py | shawnlewis/wordle | c7c19f0d1571d07aa4d2bd0e73ee5bf4d0175ccf | [
"MIT"
] | null | null | null | make_valid.py | shawnlewis/wordle | c7c19f0d1571d07aa4d2bd0e73ee5bf4d0175ccf | [
"MIT"
] | null | null | null | make_valid.py | shawnlewis/wordle | c7c19f0d1571d07aa4d2bd0e73ee5bf4d0175ccf | [
"MIT"
] | null | null | null | words_file = open('wordlist.txt')
print("export const VALIDGUESSES6 = [")
for word in words_file.read().split('\n'):
word = word.strip()
if word.isalpha() and len(word) == 6:
print(' "%s",' % word.lower())
print("];") | 33.428571 | 42 | 0.598291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.269231 |
f4cd5226f40c7efc92c4acddffbdcf13a2052324 | 1,132 | py | Python | 03_BFS_DFS/01_DFS_BFS/01_DFS_BFS.py | ki-yungkim/Algorithm_Step_1_Stack_Queue | 14798d37166c8803d3bf3021e79df920a15eae10 | [
"MIT"
] | null | null | null | 03_BFS_DFS/01_DFS_BFS/01_DFS_BFS.py | ki-yungkim/Algorithm_Step_1_Stack_Queue | 14798d37166c8803d3bf3021e79df920a15eae10 | [
"MIT"
] | null | null | null | 03_BFS_DFS/01_DFS_BFS/01_DFS_BFS.py | ki-yungkim/Algorithm_Step_1_Stack_Queue | 14798d37166c8803d3bf3021e79df920a15eae10 | [
"MIT"
] | null | null | null |
graph = {}
n = input().split(' ')
node, edge, start = map(int, n)
for i in range(edge):
edge_number = input().split(' ')
n1, n2 = map(int, edge_number)
if n1 not in graph:
graph[n1] = [n2]
elif n2 not in graph[n1]:
graph[n1].append(n2)
if n2 not in graph:
graph[n2] = [n1]
elif n1 not in graph[n2]:
graph[n2].append(n1)
def DFS(graph, start):
visited = []
stack = [start]
while stack:
n = stack.pop()
if n not in visited:
visited.append(n)
if n in graph:
tmp = list(set(graph[n]) - set(visited))
tmp.sort(reverse=True)
stack += tmp
return " ".join(map(str, visited))
def BFS(graph, start):
visited = []
queue = [start]
while queue:
n = queue.pop(0)
if n not in visited:
visited.append(n)
if n in graph:
tmp = list(set(graph[n]) - set(visited))
tmp.sort()
queue += tmp
return " ".join(map(str, visited))
print(DFS(graph, start))
print(BFS(graph, start))
| 22.64 | 56 | 0.49735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.010601 |
f4cd5e973a9c8cb0c4e46de8ba320355f64f5848 | 1,470 | py | Python | server/rtracker/mainapp/tests.py | Aqudei/RTrackerv3 | 17d769d33a64b56680c046359489f16304293d0c | [
"Apache-2.0"
] | null | null | null | server/rtracker/mainapp/tests.py | Aqudei/RTrackerv3 | 17d769d33a64b56680c046359489f16304293d0c | [
"Apache-2.0"
] | null | null | null | server/rtracker/mainapp/tests.py | Aqudei/RTrackerv3 | 17d769d33a64b56680c046359489f16304293d0c | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
from . import models
# Create your tests here.
class FlowTest(TestCase):
def setUp(self):
flow = models.ReportFlow.objects.create(
specification='od-base.pd.ard.rd')
self.fielduser = models.ROUser(Office='pso-borong')
self.fielduser.Flow = flow
self.fielduser.save()
self.pduser = models.ROUser.objects.create(Office='pd')
self.od_baseuser = models.ROUser.objects.create(Office='od-base')
def blank_flow_is_false(self):
self.assertTrue(not self.pduser.Flow)
def correct_flow_behaviour(self):
flow = models.ReportFlow(specification='od-base.pd.ard.rd')
self.assertEquals(flow.next_of('od-base', 'pd'))
self.assertEquals(flow.next_of('rd', 'rd'))
with self.assertRaises(models.ReportEndOfFlowException):
flow.previous_of('od-base')
def test_flow(self):
report = models.Report(Owner=self.fielduser)
report.save()
report.start_tracking(self.fielduser)
self.assertEqual(report.CurrentOffice, 'od-base')
with self.assertRaises(models.UserNoPermissionToChangeReportState):
report.approve(self.fielduser)
self.assertEqual(report.CurrentOffice, 'od-base')
report.approve(self.od_baseuser)
self.assertEqual(report.CurrentOffice, 'pd')
report.reject(self.pduser)
self.assertEqual(report.CurrentOffice, 'pso-borong')
| 32.666667 | 75 | 0.672109 | 1,387 | 0.943537 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.103401 |
f4ce0a1fac889f4443a1f231c3c08f75f3120a52 | 6,094 | py | Python | topfarm/tests/test_topfarm_utils/test_utils.py | DTUWindEnergy/TopFarm2 | cba70b20431f7a828370447117fe2e7533edf7c2 | [
"MIT"
] | 4 | 2019-02-18T08:46:00.000Z | 2021-01-28T06:35:52.000Z | topfarm/tests/test_topfarm_utils/test_utils.py | DTUWindEnergy/TopFarm2 | cba70b20431f7a828370447117fe2e7533edf7c2 | [
"MIT"
] | 1 | 2019-11-26T12:12:12.000Z | 2019-11-26T12:12:12.000Z | topfarm/tests/test_topfarm_utils/test_utils.py | DTUWindEnergy/TopFarm2 | cba70b20431f7a828370447117fe2e7533edf7c2 | [
"MIT"
] | 8 | 2019-01-14T09:33:26.000Z | 2021-06-30T11:56:03.000Z | from topfarm.utils import smart_start
import numpy as np
from topfarm.tests import npt
from topfarm import TopFarmProblem
from py_wake.examples.data import hornsrev1
from py_wake.deficit_models.noj import NOJ
from topfarm.easy_drivers import EasyScipyOptimizeDriver
from topfarm.constraint_components.boundary import CircleBoundaryConstraint
from topfarm.constraint_components.spacing import SpacingConstraint
from topfarm.cost_models.py_wake_wrapper import PyWakeAEPCostModelComponent, PyWakeAEP
from py_wake.examples.data.iea37._iea37 import IEA37Site
from py_wake.site._site import UniformSite
import pytest
def egg_tray_map():
x = np.arange(0, 20, 0.1)
y = np.arange(0, 10, 0.1)
YY, XX = np.meshgrid(y, x)
val = np.sin(XX) + np.sin(YY)
return XX, YY, val
def tests_smart_start():
xs_ref = [1.6, 14.1, 1.6, 7.9, 14.1, 7.9, 19.9, 19.9, 7.8, 5.8, 14.2,
5.8, 1.5, 16.2, 16.2, 1.6, 3.7, 14.1, 7.9, 3.7]
ys_ref = [1.6, 1.6, 7.9, 1.6, 7.9, 7.9, 1.6, 7.9, 5.8, 7.8, 5.8, 1.5, 5.8, 7.8, 1.5, 3.7, 1.6, 3.7, 3.7, 7.9]
N_WT = 20
min_space = 2.1
XX, YY, val = egg_tray_map()
xs, ys = smart_start(XX, YY, val, N_WT, min_space, seed=0)
if 0:
import matplotlib.pyplot as plt
plt.contourf(XX, YY, val, 100)
for i in range(N_WT):
circle = plt.Circle((xs[i], ys[i]), min_space / 2, color='b', fill=False)
plt.gcf().gca().add_artist(circle)
plt.plot(xs[i], ys[i], 'rx')
print(np.round(xs, 1).tolist())
print(np.round(ys, 1).tolist())
plt.axis('equal')
plt.show()
npt.assert_array_almost_equal([xs, ys], [xs_ref, ys_ref])
def tests_smart_start_random():
xs_ref = [1.7, 13.9, 1.4, 7.7, 14.4, 7.6, 19.7, 19.7, 8.7, 19.4, 15.8,
12.4, 7.7, 9.8, 14.1, 1.8, 9.7, 6.6, 13.6, 3.5]
ys_ref = [7.9, 1.4, 1.7, 1.3, 7.9, 8.4, 1.7, 8.7, 6.4, 6.6, 2.3, 7.1, 3.5, 1.6, 5.8, 5.8, 8.3, 6.5, 3.5, 1.4]
N_WT = 20
min_space = 2.1
XX, YY, val = egg_tray_map()
np.random.seed(0)
with pytest.raises(expected_exception=AssertionError):
xs, ys = smart_start(XX, YY, val, N_WT, min_space, random_pct=101, seed=0)
xs, ys = smart_start(XX, YY, val, N_WT, min_space, random_pct=1, seed=0)
if 0:
import matplotlib.pyplot as plt
plt.contourf(XX, YY, val, 100)
plt.plot(XX, YY, ',k')
for i in range(N_WT):
circle = plt.Circle((xs[i], ys[i]), min_space / 2, color='b', fill=False)
plt.gcf().gca().add_artist(circle)
plt.plot(xs[i], ys[i], 'rx')
print(np.round(xs, 1).tolist())
print(np.round(ys, 1).tolist())
plt.axis('equal')
plt.show()
npt.assert_array_almost_equal([xs, ys], [xs_ref, ys_ref])
def tests_smart_start_no_feasible():
XX, YY, val = egg_tray_map()
N_WT = 20
min_space = 5.1
with pytest.raises(Exception, match="No feasible positions for wt 8"):
xs, ys = smart_start(XX, YY, val, N_WT, min_space)
@pytest.mark.parametrize('seed,radius,resolution,tol', [(1, 500, 10, 5),
(0, 2000, 100, 3)])
def test_smart_start_aep_map(seed, radius, resolution, tol):
site = IEA37Site(16)
n_wt = 4
x, y = site.initial_position[:n_wt].T
wd_lst = np.arange(0, 360, 45)
ws_lst = [10]
turbines = hornsrev1.HornsrevV80()
site = UniformSite([1], .75)
site.default_ws = ws_lst
site.default_wd = wd_lst
wfm = NOJ(site, turbines)
aep_comp = PyWakeAEPCostModelComponent(wfm, n_wt=n_wt)
aep_1wt = wfm([0], [0]).aep().sum()
tf = TopFarmProblem(
design_vars={'x': x, 'y': y},
cost_comp=aep_comp,
driver=EasyScipyOptimizeDriver(),
constraints=[SpacingConstraint(160), CircleBoundaryConstraint((0, 0), radius)]
)
x = np.arange(-radius, radius, resolution)
y = np.arange(-radius, radius, resolution)
XX, YY = np.meshgrid(x, y)
tf.smart_start(XX, YY, aep_comp.get_aep4smart_start(wd=wd_lst, ws=ws_lst), radius=40, plot=0, seed=seed)
tf.evaluate()
if 0:
wt_x, wt_y = tf['x'], tf['y']
for i, _ in enumerate(wt_x, 1):
print(wfm(wt_x[:i], wt_y[:i]).aep().sum(['wd', 'ws']))
aep_comp.windFarmModel(wt_x, wt_y, ws=ws_lst, wd=wd_lst).flow_map().aep_xy().plot()
print(tf.evaluate())
import matplotlib.pyplot as plt
plt.plot(wt_x, wt_y, '2r')
for c in tf.model.constraint_components:
c.plot()
plt.axis('equal')
plt.show()
npt.assert_almost_equal(aep_1wt * n_wt, tf['AEP'], tol)
def test_smart_start_aep_map_PyWakeAEP():
site = IEA37Site(16)
n_wt = 4
x, y = site.initial_position[:n_wt].T
wd_lst = np.arange(0, 360, 45)
ws_lst = [10]
turbines = hornsrev1.HornsrevV80()
site = UniformSite([1], .75)
site.default_ws = ws_lst
site.default_wd = wd_lst
aep = PyWakeAEP(wake_model=NOJ(site, turbines))
aep_1wt = aep.calculate_AEP([0], [0]).sum()
tf = TopFarmProblem(
design_vars={'x': x, 'y': y},
cost_comp=aep.get_TopFarm_cost_component(n_wt),
driver=EasyScipyOptimizeDriver(),
constraints=[SpacingConstraint(160), CircleBoundaryConstraint((0, 0), 500)]
)
x = np.arange(-500, 500, 10)
y = np.arange(-500, 500, 10)
XX, YY = np.meshgrid(x, y)
tf.smart_start(XX, YY, aep.get_aep4smart_start(wd=wd_lst, ws=ws_lst), radius=40, seed=1)
tf.evaluate()
if 0:
wt_x, wt_y = tf['x'], tf['y']
for i, _ in enumerate(wt_x, 1):
print(aep.calculate_AEP(wt_x[:i], wt_y[:i]).sum((1, 2)))
X_j, Y_j, aep_map = aep.aep_map(x, y, 0, wt_x, wt_y, ws=ws_lst, wd=wd_lst)
print(tf.evaluate())
import matplotlib.pyplot as plt
c = plt.contourf(X_j, Y_j, aep_map, 100)
plt.colorbar(c)
plt.plot(wt_x, wt_y, '2r')
for c in tf.model.constraint_components:
c.plot()
plt.axis('equal')
plt.show()
npt.assert_almost_equal(aep_1wt * n_wt, tf['AEP'], 5)
| 34.429379 | 113 | 0.600591 | 0 | 0 | 0 | 0 | 1,608 | 0.263866 | 0 | 0 | 156 | 0.025599 |
f4ce1feb2ee3a9c2af47ae2d5bda19d6ec7488f1 | 1,459 | py | Python | Jinja/002_BGPtemplate/config_render.py | CalvinDawgz/Python-Projects | cc5d1e93bcbe124c1864a49cc3dfbca7db520928 | [
"MIT"
] | 1 | 2021-07-10T09:22:55.000Z | 2021-07-10T09:22:55.000Z | Jinja/002_BGPtemplate/config_render.py | CalvinDawgz/Python-Projects | cc5d1e93bcbe124c1864a49cc3dfbca7db520928 | [
"MIT"
] | null | null | null | Jinja/002_BGPtemplate/config_render.py | CalvinDawgz/Python-Projects | cc5d1e93bcbe124c1864a49cc3dfbca7db520928 | [
"MIT"
] | null | null | null | import yaml
from jinja2 import Template
from argparse import ArgumentParser
parser = ArgumentParser("Specifying the YAML File")
parser.add_argument("-d", "--data-file",
help="Please specify the YAMl file.",
required=True
)
parser.add_argument("-t", "--template-file",
help="Please specify the Jinja template file.",
required=True
)
args = parser.parse_args()
# data_file_name = "routerBGP-Data.yaml"
# template_file_name = "bgp_Template.j2"
data_file_name = args.data_file
template_file_name = args.template_file
# Verifies whether the provided data and template file exist before continuing
try:
with open(data_file_name) as f:
yaml_data = yaml.safe_load(f)
with open(template_file_name) as f:
template_file = Template(f.read(), keep_trailing_newline=True)
for node in yaml_data:
# Stores full configuration that will be exported to a file
generated_configs = template_file.render(rid=node['rid'],
local_asn=node['local_asn'],
neighbors=node['neighbors'],
networks=node['networks']
)
with open("{}_BGP-config.txt".format(node['hostname']), "w") as f:
f.write(generated_configs)
except FileNotFoundError as e:
print(e)
| 33.159091 | 78 | 0.596299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.289239 |
f4cec63eb86b3e248b58b034bd83c2efbe098bbe | 2,415 | py | Python | src/gans/wgangp.py | universuen/RVGAN-TL | d370673063a3dfd9cc4a20bfd1c18bc95aadabca | [
"MIT"
] | null | null | null | src/gans/wgangp.py | universuen/RVGAN-TL | d370673063a3dfd9cc4a20bfd1c18bc95aadabca | [
"MIT"
] | null | null | null | src/gans/wgangp.py | universuen/RVGAN-TL | d370673063a3dfd9cc4a20bfd1c18bc95aadabca | [
"MIT"
] | null | null | null | import torch
from src import config, models
from src.models import WGANGPGModel, WGANGPDModel
from src.datasets import PositiveDataset
from ._base import Base
class WGANGP(Base):
def __init__(self):
super().__init__(WGANGPGModel(), WGANGPDModel())
def _fit(self):
d_optimizer = torch.optim.Adam(
params=self.d.parameters(),
lr=config.gan.d_lr,
betas=(0.5, 0.999),
)
g_optimizer = torch.optim.Adam(
params=self.g.parameters(),
lr=config.gan.g_lr,
betas=(0.5, 0.999),
)
x = PositiveDataset()[:][0].to(config.device)
for _ in range(config.gan.epochs):
for __ in range(config.gan.d_loops):
self.d.zero_grad()
prediction_real = self.d(x)
loss_real = - prediction_real.mean()
z = torch.randn(len(x), models.z_size, device=config.device)
fake_x = self.g(z).detach()
prediction_fake = self.d(fake_x)
loss_fake = prediction_fake.mean()
gradient_penalty = self._cal_gradient_penalty(x, fake_x)
loss = loss_real + loss_fake + gradient_penalty
loss.backward()
d_optimizer.step()
for __ in range(config.gan.g_loops):
self.g.zero_grad()
z = torch.randn(len(x), models.z_size, device=config.device)
fake_x = self.g(z)
prediction = self.d(fake_x)
loss = - prediction.mean()
loss.backward()
g_optimizer.step()
def _cal_gradient_penalty(
self,
x: torch.Tensor,
fake_x: torch.Tensor,
) -> torch.Tensor:
alpha = torch.rand(len(x), 1).to(config.device)
interpolates = alpha * x + (1 - alpha) * fake_x
interpolates.requires_grad = True
disc_interpolates = self.d(interpolates)
gradients = torch.autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(config.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * config.gan.wgangp_lambda
return gradient_penalty
| 36.044776 | 98 | 0.558592 | 2,252 | 0.932505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f4cf23c9e4011531fcde6ef317ab2fd1e345946a | 2,050 | py | Python | brainlit/napari_viterbrain/viterbrain_plugin.py | neurodata/mouselight | 462112e62d14c97dbf73970995bf3a178623e02c | [
"Apache-2.0"
] | null | null | null | brainlit/napari_viterbrain/viterbrain_plugin.py | neurodata/mouselight | 462112e62d14c97dbf73970995bf3a178623e02c | [
"Apache-2.0"
] | 6 | 2020-01-31T22:21:10.000Z | 2020-01-31T22:24:59.000Z | brainlit/napari_viterbrain/viterbrain_plugin.py | neurodata/mouselight | 462112e62d14c97dbf73970995bf3a178623e02c | [
"Apache-2.0"
] | null | null | null | import pickle
from sys import intern
from numpy import uint32
import numpy as np
import zarr
from napari_plugin_engine import napari_hook_implementation
from qtpy.QtWidgets import QWidget, QHBoxLayout, QPushButton
from magicgui import magic_factory
import pathlib
import napari
def viterbrain_reader(path: str) -> list:
with open(path, "rb") as handle:
viterbi = pickle.load(handle)
layer_labels = zarr.open(viterbi.fragment_path)
image_path = viterbi.fragment_path[:-12] + ".zarr"
layer_image = zarr.open(image_path)
scale = viterbi.resolution
meta_labels = {"name": "fragments", "scale": scale}
meta_image = {"name": "image", "scale": scale}
return [(layer_image, meta_image, "image"), (layer_labels, meta_labels, "labels")]
def napari_get_reader(path: str) -> list:
parts = path.split(".")
if parts[-1] == "pickle" or parts[-1] == "pkl":
return viterbrain_reader
else:
return None
@magic_factory(
call_button="Trace", start_comp={"max": 2**20}, end_comp={"max": 2**20}
)
def comp_trace(
v: napari.Viewer,
start_comp: int,
end_comp: int,
filename=pathlib.Path("/some/path.pickle"),
) -> None:
with open(filename, "rb") as handle:
viterbi = pickle.load(handle)
def comp2point(comp: int) -> list:
state = viterbi.comp_to_states[comp][0]
if viterbi.nxGraph.nodes[state]["type"] == "fragment":
return viterbi.nxGraph.nodes[state]["point1"]
else:
coords = viterbi.soma_fragment2coords[comp]
centroid = np.mean(coords, axis=0)
centroid = [int(c) for c in centroid]
return centroid
start_pt = comp2point(start_comp)
end_pt = comp2point(end_comp)
print(f"tracing from {start_pt} to {end_pt}")
path = viterbi.shortest_path(start_pt, end_pt)
v.add_shapes(
path,
shape_type="path",
edge_color="r",
edge_width=1,
name=f"trace {start_comp} to {end_comp}",
scale=viterbi.resolution,
)
| 26.973684 | 86 | 0.649268 | 0 | 0 | 0 | 0 | 1,085 | 0.529268 | 0 | 0 | 232 | 0.113171 |
f4d1b1ae7affabc3c3876c429d445f6eb5d699b7 | 5,122 | py | Python | examples/twisted/resource.py | florath/spyne-1 | 88d7b9b9020d5067b80927a30e229c3c5d64af85 | [
"BSD-3-Clause"
] | null | null | null | examples/twisted/resource.py | florath/spyne-1 | 88d7b9b9020d5067b80927a30e229c3c5d64af85 | [
"BSD-3-Clause"
] | null | null | null | examples/twisted/resource.py | florath/spyne-1 | 88d7b9b9020d5067b80927a30e229c3c5d64af85 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""This is a blocking example running in a single-process twisted setup.
In this example, user code runs directly in the reactor loop. So unless your
code fully adheres to the asynchronous programming principles, you can block
the reactor loop. ::
$ time curl -s "http://localhost:9757/block?seconds=10" > /dev/null & \
time curl -s "http://localhost:9757/block?seconds=10" > /dev/null &
[1] 27559
[2] 27560
real 0m10.026s
user 0m0.005s
sys 0m0.008s
real 0m20.045s
user 0m0.009s
sys 0m0.005s
If you call sleep, it sleeps by returning a deferred: ::
$ time curl -s "http://localhost:9757/sleep?seconds=10" > /dev/null & \
time curl -s "http://localhost:9757/sleep?seconds=10" > /dev/null &
[1] 27778
[2] 27779
real 0m10.012s
user 0m0.000s
sys 0m0.000s
real 0m10.013s
user 0m0.000s
sys 0m0.000s
"""
import sys
import time
import logging
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.internet.task import deferLater
from twisted.python import log
from spyne import Unicode, Integer, Double, ByteArray, Iterable, rpc, \
ServiceBase, Application
from spyne.server.twisted import TwistedWebResource
from spyne.protocol.http import HttpRpc
HOST = '0.0.0.0'
PORT = 9758
class SomeService(ServiceBase):
@rpc(Integer, _returns=Integer)
def block(ctx, seconds):
"""Blocks the current thread for given number of seconds."""
time.sleep(seconds)
return seconds
class SomeNonBlockingService(ServiceBase):
@rpc(Integer, _returns=Unicode)
def sleep(ctx, seconds):
"""Waits without blocking reactor for given number of seconds by
returning a deferred."""
def _cb():
return "slept for %r seconds" % seconds
return deferLater(reactor, seconds, _cb)
@rpc(Unicode, Double, Double, _returns=ByteArray)
def say_hello_with_sleep(ctx, name, times, seconds):
"""Sends multiple hello messages by waiting given number of seconds
inbetween."""
times = [times] # Workaround for Python 2's lacking of nonlocal
def _cb(response):
if times[0] > 0:
msg = u"Hello %s, sleeping for %f seconds for " \
u"%d more time(s)." % (name, seconds, times[0])
response.append(msg.encode('utf8'))
response.append(b'\n')
logging.debug(msg)
times[0] -= 1
return deferLater(reactor, seconds, _cb, response)
return Iterable.Push(_cb)
def initialize(services, tns='spyne.examples.twisted.resource'):
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
observer = log.PythonLoggingObserver('twisted')
log.startLoggingWithObserver(observer.emit, setStdout=False)
return Application(services, 'spyne.examples.twisted.hello',
in_protocol=HttpRpc(), out_protocol=HttpRpc())
if __name__=='__main__':
application = initialize([SomeService, SomeNonBlockingService])
resource = TwistedWebResource(application)
site = Site(resource)
reactor.listenTCP(PORT, site, interface=HOST)
logging.info("listening on: %s:%d" % (HOST,PORT))
logging.info('wsdl is at: http://%s:%d/?wsdl' % (HOST, PORT))
sys.exit(reactor.run())
| 34.146667 | 80 | 0.684889 | 1,300 | 0.253758 | 0 | 0 | 1,211 | 0.236385 | 0 | 0 | 3,109 | 0.606871 |
f4d2efff6733a7fa83b900da723a343483f0adb4 | 488 | py | Python | code/part_I_D/arima.py | Spacebody/MCM-ICM-2018-Problem-C | 89acbec8b7b08733002e570ff67637e7ba100190 | [
"MIT"
] | 1 | 2021-09-18T08:01:19.000Z | 2021-09-18T08:01:19.000Z | code/part_I_D/arima.py | Spacebody/MCM-ICM-2018-Problem-C | 89acbec8b7b08733002e570ff67637e7ba100190 | [
"MIT"
] | null | null | null | code/part_I_D/arima.py | Spacebody/MCM-ICM-2018-Problem-C | 89acbec8b7b08733002e570ff67637e7ba100190 | [
"MIT"
] | 1 | 2018-05-13T08:39:46.000Z | 2018-05-13T08:39:46.000Z | import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# from theano import *
# load state data
az_year = pd.read_csv("data/csv/price_expenditures/sector/az/price/teacd.csv", engine='c', low_memory=True)["Year"]
az_price = pd.read_csv("data/csv/price_expenditures/sector/az/price/teacd.csv", engine='c', low_memory=True, date_parser=az_year)
| 28.705882 | 129 | 0.788934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.329918 |
f4d365edd139d60dd0919dc1d55ad6225c11b8e7 | 3,657 | py | Python | pysnmp/TPLINK-COMMANDER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/TPLINK-COMMANDER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/TPLINK-COMMANDER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module TPLINK-COMMANDER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TPLINK-COMMANDER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:16:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, MibIdentifier, iso, IpAddress, ModuleIdentity, ObjectIdentity, Counter64, Bits, Integer32, Counter32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "MibIdentifier", "iso", "IpAddress", "ModuleIdentity", "ObjectIdentity", "Counter64", "Bits", "Integer32", "Counter32", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
clusterManage, = mibBuilder.importSymbols("TPLINK-CLUSTER-MIB", "clusterManage")
clusterConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2))
commanderConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 4))
clusterName = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: clusterName.setStatus('current')
clusterHoldTime = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clusterHoldTime.setStatus('current')
clusterIntervalTime = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clusterIntervalTime.setStatus('current')
commanderClusterName = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 4, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: commanderClusterName.setStatus('current')
clusterIp = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 4, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clusterIp.setStatus('current')
clusterIpMask = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 4, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clusterIpMask.setStatus('current')
clusterCommit = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 33, 1, 1, 3, 2, 4, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("commit", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clusterCommit.setStatus('current')
mibBuilder.exportSymbols("TPLINK-COMMANDER-MIB", commanderConfig=commanderConfig, clusterIntervalTime=clusterIntervalTime, clusterConfig=clusterConfig, clusterIp=clusterIp, clusterHoldTime=clusterHoldTime, clusterIpMask=clusterIpMask, clusterName=clusterName, clusterCommit=clusterCommit, commanderClusterName=commanderClusterName)
| 114.28125 | 477 | 0.76292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,059 | 0.289582 |
f4d5f261aed4ef24ae543278e27504f15ab846bc | 2,410 | py | Python | autoPyTorch/pipeline/components/setup/augmentation/image/Resize.py | ravinkohli/Auto-PyTorch | a1512d56d4db89133e895e85765e3b72afbfe157 | [
"Apache-2.0"
] | 1 | 2021-05-12T10:11:58.000Z | 2021-05-12T10:11:58.000Z | autoPyTorch/pipeline/components/setup/augmentation/image/Resize.py | maxpark/Auto-PyTorch | 06e67de5017b4cccad9398e24a3d9f0bd8176da3 | [
"Apache-2.0"
] | 1 | 2021-06-23T21:48:03.000Z | 2021-06-23T21:48:03.000Z | autoPyTorch/pipeline/components/setup/augmentation/image/Resize.py | maxpark/Auto-PyTorch | 06e67de5017b4cccad9398e24a3d9f0bd8176da3 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Optional, Union
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
)
import imgaug.augmenters as iaa
from imgaug.augmenters.meta import Augmenter
import numpy as np
from autoPyTorch.datasets.base_dataset import BaseDatasetPropertiesType
from autoPyTorch.pipeline.components.setup.augmentation.image.base_image_augmenter import BaseImageAugmenter
from autoPyTorch.utils.common import FitRequirement, HyperparameterSearchSpace, add_hyperparameter
class Resize(BaseImageAugmenter):
def __init__(self, use_augmenter: bool = True,
random_state: Optional[Union[int, np.random.RandomState]] = None):
super().__init__(use_augmenter=use_augmenter)
self.random_state = random_state
self.add_fit_requirements([
FitRequirement('image_height', (int,), user_defined=True, dataset_property=True),
FitRequirement('image_width', (int,), user_defined=True, dataset_property=True)])
def fit(self, X: Dict[str, Any], y: Any = None) -> BaseImageAugmenter:
self.check_requirements(X, y)
if self.use_augmenter:
self.augmenter: Augmenter = iaa.Resize(size=(X['dataset_properties']['image_height'],
X['dataset_properties']['image_width']),
interpolation='linear', name=self.get_properties()['name'])
return self
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None,
use_augmenter: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="use_augmenter",
value_range=(True, False),
default_value=True,
),
) -> ConfigurationSpace:
cs = ConfigurationSpace()
add_hyperparameter(cs, use_augmenter, CategoricalHyperparameter)
return cs
@staticmethod
def get_properties(dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None
) -> Dict[str, Any]:
return {'name': 'Resize'}
| 44.62963 | 110 | 0.63195 | 1,843 | 0.76473 | 0 | 0 | 870 | 0.360996 | 0 | 0 | 137 | 0.056846 |
f4d627ca669c90b09e3db2646cd7e31702dfedac | 974 | py | Python | test/unittests/chemical/building_blocks/aminoacids.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | test/unittests/chemical/building_blocks/aminoacids.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | test/unittests/chemical/building_blocks/aminoacids.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | '''
Unittests/Chemical/Building_Blocks/aminoacids
_____________________________________________
Unit tests for aminoacid lookups.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules/submodules
import unittest
from xldlib.chemical.building_blocks import aminoacids
# CASES
# -----
class AminoAcidTest(unittest.TestCase):
'''Test aminoacid lookups'''
def test_mixed_case(self):
'''Test mixed-case lookups produce the same object'''
lower = aminoacids.AMINOACIDS['a']
upper = aminoacids.AMINOACIDS['A']
self.assertEquals(id(lower), id(upper))
self.assertEquals(lower, upper)
assert 'a' in aminoacids.AMINOACIDS
assert 'A' in aminoacids.AMINOACIDS
# TESTS
# -----
def add_tests(suite):
'''Add tests to the unittest suite'''
suite.addTest(AminoAcidTest('test_mixed_case'))
| 21.644444 | 69 | 0.691992 | 431 | 0.442505 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.497947 |
f4d7dad198309996969b7b1a6b4a53071fcc5d86 | 61,304 | py | Python | firewall/firewallManager.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | firewall/firewallManager.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | firewall/firewallManager.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | #!/usr/local/CyberCP/bin/python
import os
import os.path
import sys
import django
sys.path.append('/usr/local/CyberCP')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings")
django.setup()
import json
from plogical.acl import ACLManager
import plogical.CyberCPLogFileWriter as logging
from plogical.virtualHostUtilities import virtualHostUtilities
import subprocess
from django.shortcuts import HttpResponse, render
from random import randint
import time
from plogical.firewallUtilities import FirewallUtilities
from firewall.models import FirewallRules
from plogical.modSec import modSec
from plogical.csf import CSF
from plogical.processUtilities import ProcessUtilities
from serverStatus.serverStatusUtil import ServerStatusUtil
class FirewallManager:
imunifyPath = '/usr/bin/imunify360-agent'
CLPath = '/etc/sysconfig/cloudlinux'
def __init__(self, request = None):
self.request = request
def securityHome(self, request = None, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
return render(request, 'firewall/index.html')
except BaseException as msg:
return HttpResponse(str(msg))
def firewallHome(self, request = None, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
return render(request, 'firewall/firewall.html')
except BaseException as msg:
return HttpResponse(str(msg))
def getCurrentRules(self, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('fetchStatus', 0)
rules = FirewallRules.objects.all()
json_data = "["
checker = 0
for items in rules:
dic = {
'id': items.id,
'name': items.name,
'proto': items.proto,
'port': items.port,
'ipAddress': items.ipAddress,
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps({'status': 1, 'fetchStatus': 1, 'error_message': "None", "data": json_data})
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'fetchStatus': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def addRule(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('add_status', 0)
ruleName = data['ruleName']
ruleProtocol = data['ruleProtocol']
rulePort = data['rulePort']
ruleIP = data['ruleIP']
FirewallUtilities.addRule(ruleProtocol, rulePort, ruleIP)
newFWRule = FirewallRules(name=ruleName, proto=ruleProtocol, port=rulePort, ipAddress=ruleIP)
newFWRule.save()
final_dic = {'status': 1, 'add_status': 1, 'error_message': "None"}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'add_status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def deleteRule(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('delete_status', 0)
ruleID = data['id']
ruleProtocol = data['proto']
rulePort = data['port']
ruleIP = data['ruleIP']
FirewallUtilities.deleteRule(ruleProtocol, rulePort, ruleIP)
delRule = FirewallRules.objects.get(id=ruleID)
delRule.delete()
final_dic = {'status': 1, 'delete_status': 1, 'error_message': "None"}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'delete_status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def reloadFirewall(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('reload_status', 0)
command = 'sudo firewall-cmd --reload'
res = ProcessUtilities.executioner(command)
if res == 1:
final_dic = {'reload_status': 1, 'error_message': "None"}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'reload_status': 0,
'error_message': "Can not reload firewall, see CyberCP main log file."}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'reload_status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def startFirewall(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('start_status', 0)
command = 'sudo systemctl start firewalld'
res = ProcessUtilities.executioner(command)
if res == 1:
final_dic = {'start_status': 1, 'error_message': "None"}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'start_status': 0,
'error_message': "Can not start firewall, see CyberCP main log file."}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'start_status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def stopFirewall(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('stop_status', 0)
command = 'sudo systemctl stop firewalld'
res = ProcessUtilities.executioner(command)
if res == 1:
final_dic = {'stop_status': 1, 'error_message': "None"}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'stop_status': 0,
'error_message': "Can not stop firewall, see CyberCP main log file."}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'stop_status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def firewallStatus(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson()
command = 'systemctl status firewalld'
status = ProcessUtilities.outputExecutioner(command)
if status.find("dead") > -1:
final_dic = {'status': 1, 'error_message': "none", 'firewallStatus': 0}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'status': 1, 'error_message': "none", 'firewallStatus': 1}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def secureSSH(self, request = None, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
return render(request, 'firewall/secureSSH.html')
except BaseException as msg:
return HttpResponse(str(msg))
def getSSHConfigs(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson()
type = data['type']
if type == "1":
## temporarily changing permission for sshd files
pathToSSH = "/etc/ssh/sshd_config"
cat = "sudo cat " + pathToSSH
data = ProcessUtilities.outputExecutioner(cat).split('\n')
permitRootLogin = 0
sshPort = "22"
for items in data:
if items.find("PermitRootLogin") > -1:
if items.find("Yes") > -1 or items.find("yes") > -1:
permitRootLogin = 1
continue
if items.find("Port") > -1 and not items.find("GatewayPorts") > -1:
sshPort = items.split(" ")[1].strip("\n")
final_dic = {'status': 1, 'permitRootLogin': permitRootLogin, 'sshPort': sshPort}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
pathToKeyFile = "/root/.ssh/authorized_keys"
cat = "sudo cat " + pathToKeyFile
data = ProcessUtilities.outputExecutioner(cat).split('\n')
json_data = "["
checker = 0
for items in data:
if items.find("ssh-rsa") > -1:
keydata = items.split(" ")
try:
key = "ssh-rsa " + keydata[1][:50] + " .. " + keydata[2]
try:
userName = keydata[2][:keydata[2].index("@")]
except:
userName = keydata[2]
except:
key = "ssh-rsa " + keydata[1][:50]
userName = ''
dic = {'userName': userName,
'key': key,
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps({'status': 1, 'error_message': "None", "data": json_data})
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def saveSSHConfigs(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('saveStatus', 0)
type = data['type']
sshPort = data['sshPort']
rootLogin = data['rootLogin']
if rootLogin == True:
rootLogin = "1"
else:
rootLogin = "0"
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/firewallUtilities.py"
execPath = execPath + " saveSSHConfigs --type " + str(type) + " --sshPort " + sshPort + " --rootLogin " + rootLogin
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
csfPath = '/etc/csf'
if os.path.exists(csfPath):
dataIn = {'protocol': 'TCP_IN', 'ports': sshPort}
self.modifyPorts(dataIn)
dataIn = {'protocol': 'TCP_OUT', 'ports': sshPort}
self.modifyPorts(dataIn)
else:
try:
updateFW = FirewallRules.objects.get(name="SSHCustom")
FirewallUtilities.deleteRule("tcp", updateFW.port, "0.0.0.0/0")
updateFW.port = sshPort
updateFW.save()
FirewallUtilities.addRule('tcp', sshPort, "0.0.0.0/0")
except:
try:
newFireWallRule = FirewallRules(name="SSHCustom", port=sshPort, proto="tcp")
newFireWallRule.save()
FirewallUtilities.addRule('tcp', sshPort, "0.0.0.0/0")
command = 'firewall-cmd --permanent --remove-service=ssh'
ProcessUtilities.executioner(command)
except BaseException as msg:
logging.CyberCPLogFileWriter.writeToFile(str(msg))
final_dic = {'status': 1, 'saveStatus': 1}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'status': 0, 'saveStatus': 0, "error_message": output}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0 ,'saveStatus': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def deleteSSHKey(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('delete_status', 0)
key = data['key']
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/firewallUtilities.py"
execPath = execPath + " deleteSSHKey --key '" + key + "'"
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
final_dic = {'status': 1, 'delete_status': 1}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'status': 1, 'delete_status': 1, "error_mssage": output}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'delete_status': 0, 'error_mssage': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def addSSHKey(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('add_status', 0)
key = data['key']
tempPath = "/home/cyberpanel/" + str(randint(1000, 9999))
writeToFile = open(tempPath, "w")
writeToFile.write(key)
writeToFile.close()
execPath = "sudo /usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/firewallUtilities.py"
execPath = execPath + " addSSHKey --tempPath " + tempPath
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
final_dic = {'status': 1, 'add_status': 1}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'status': 0, 'add_status': 0, "error_mssage": output}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'status': 0, 'add_status': 0, 'error_mssage': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def loadModSecurityHome(self, request = None, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
OLS = 1
confPath = os.path.join(virtualHostUtilities.Server_root, "conf/httpd_config.conf")
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).splitlines()
modSecInstalled = 0
for items in httpdConfig:
if items.find('module mod_security') > -1:
modSecInstalled = 1
break
else:
OLS = 0
modSecInstalled = 1
return render(request, 'firewall/modSecurity.html', {'modSecInstalled': modSecInstalled, 'OLS': OLS})
except BaseException as msg:
return HttpResponse(str(msg))
def installModSec(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('installModSec', 0)
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " installModSec"
ProcessUtilities.popenExecutioner(execPath)
time.sleep(3)
final_json = json.dumps({'installModSec': 1, 'error_message': "None"})
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'installModSec': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def installStatusModSec(self, userID = None, data = None):
try:
command = "sudo cat " + modSec.installLogPath
installStatus = ProcessUtilities.outputExecutioner(command)
if installStatus.find("[200]") > -1:
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " installModSecConfigs"
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
pass
else:
final_json = json.dumps({
'error_message': "Failed to install ModSecurity configurations.",
'requestStatus': installStatus,
'abort': 1,
'installed': 0,
})
return HttpResponse(final_json)
final_json = json.dumps({
'error_message': "None",
'requestStatus': installStatus,
'abort': 1,
'installed': 1,
})
return HttpResponse(final_json)
elif installStatus.find("[404]") > -1:
final_json = json.dumps({
'abort': 1,
'installed': 0,
'error_message': "None",
'requestStatus': installStatus,
})
return HttpResponse(final_json)
else:
final_json = json.dumps({
'abort': 0,
'error_message': "None",
'requestStatus': installStatus,
})
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'abort': 1, 'installed': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def fetchModSecSettings(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('fetchStatus', 0)
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
modsecurity = 0
SecAuditEngine = 0
SecRuleEngine = 0
SecDebugLogLevel = "9"
SecAuditLogRelevantStatus = '^(?:5|4(?!04))'
SecAuditLogParts = 'ABIJDEFHZ'
SecAuditLogType = 'Serial'
confPath = os.path.join(virtualHostUtilities.Server_root, 'conf/httpd_config.conf')
modSecPath = os.path.join(virtualHostUtilities.Server_root, 'modules', 'mod_security.so')
if os.path.exists(modSecPath):
command = "sudo cat " + confPath
data = ProcessUtilities.outputExecutioner(command).split('\n')
for items in data:
if items.find('modsecurity ') > -1:
if items.find('on') > -1 or items.find('On') > -1:
modsecurity = 1
continue
if items.find('SecAuditEngine ') > -1:
if items.find('on') > -1 or items.find('On') > -1:
SecAuditEngine = 1
continue
if items.find('SecRuleEngine ') > -1:
if items.find('on') > -1 or items.find('On') > -1:
SecRuleEngine = 1
continue
if items.find('SecDebugLogLevel') > -1:
result = items.split(' ')
if result[0] == 'SecDebugLogLevel':
SecDebugLogLevel = result[1]
continue
if items.find('SecAuditLogRelevantStatus') > -1:
result = items.split(' ')
if result[0] == 'SecAuditLogRelevantStatus':
SecAuditLogRelevantStatus = result[1]
continue
if items.find('SecAuditLogParts') > -1:
result = items.split(' ')
if result[0] == 'SecAuditLogParts':
SecAuditLogParts = result[1]
continue
if items.find('SecAuditLogType') > -1:
result = items.split(' ')
if result[0] == 'SecAuditLogType':
SecAuditLogType = result[1]
continue
final_dic = {'fetchStatus': 1,
'installed': 1,
'SecRuleEngine': SecRuleEngine,
'modsecurity': modsecurity,
'SecAuditEngine': SecAuditEngine,
'SecDebugLogLevel': SecDebugLogLevel,
'SecAuditLogParts': SecAuditLogParts,
'SecAuditLogRelevantStatus': SecAuditLogRelevantStatus,
'SecAuditLogType': SecAuditLogType,
}
else:
final_dic = {'fetchStatus': 1,
'installed': 0}
else:
SecAuditEngine = 0
SecRuleEngine = 0
SecDebugLogLevel = "9"
SecAuditLogRelevantStatus = '^(?:5|4(?!04))'
SecAuditLogParts = 'ABIJDEFHZ'
SecAuditLogType = 'Serial'
confPath = os.path.join(virtualHostUtilities.Server_root, 'conf/modsec.conf')
command = "sudo cat " + confPath
data = ProcessUtilities.outputExecutioner(command).split('\n')
for items in data:
if items.find('SecAuditEngine ') > -1:
if items.find('on') > -1 or items.find('On') > -1:
SecAuditEngine = 1
continue
if items.find('SecRuleEngine ') > -1:
if items.find('on') > -1 or items.find('On') > -1:
SecRuleEngine = 1
continue
if items.find('SecDebugLogLevel') > -1:
result = items.split(' ')
if result[0] == 'SecDebugLogLevel':
SecDebugLogLevel = result[1]
continue
if items.find('SecAuditLogRelevantStatus') > -1:
result = items.split(' ')
if result[0] == 'SecAuditLogRelevantStatus':
SecAuditLogRelevantStatus = result[1]
continue
if items.find('SecAuditLogParts') > -1:
result = items.split(' ')
if result[0] == 'SecAuditLogParts':
SecAuditLogParts = result[1]
continue
if items.find('SecAuditLogType') > -1:
result = items.split(' ')
if result[0] == 'SecAuditLogType':
SecAuditLogType = result[1]
continue
final_dic = {'fetchStatus': 1,
'installed': 1,
'SecRuleEngine': SecRuleEngine,
'SecAuditEngine': SecAuditEngine,
'SecDebugLogLevel': SecDebugLogLevel,
'SecAuditLogParts': SecAuditLogParts,
'SecAuditLogRelevantStatus': SecAuditLogRelevantStatus,
'SecAuditLogType': SecAuditLogType,
}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'fetchStatus': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def saveModSecConfigurations(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('saveStatus', 0)
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
modsecurity = data['modsecurity_status']
SecAuditEngine = data['SecAuditEngine']
SecRuleEngine = data['SecRuleEngine']
SecDebugLogLevel = data['SecDebugLogLevel']
SecAuditLogParts = data['SecAuditLogParts']
SecAuditLogRelevantStatus = data['SecAuditLogRelevantStatus']
SecAuditLogType = data['SecAuditLogType']
if modsecurity == True:
modsecurity = "modsecurity on"
else:
modsecurity = "modsecurity off"
if SecAuditEngine == True:
SecAuditEngine = "SecAuditEngine on"
else:
SecAuditEngine = "SecAuditEngine off"
if SecRuleEngine == True:
SecRuleEngine = "SecRuleEngine On"
else:
SecRuleEngine = "SecRuleEngine off"
SecDebugLogLevel = "SecDebugLogLevel " + str(SecDebugLogLevel)
SecAuditLogParts = "SecAuditLogParts " + str(SecAuditLogParts)
SecAuditLogRelevantStatus = "SecAuditLogRelevantStatus " + SecAuditLogRelevantStatus
SecAuditLogType = "SecAuditLogType " + SecAuditLogType
## writing data temporary to file
tempConfigPath = "/home/cyberpanel/" + str(randint(1000, 9999))
confPath = open(tempConfigPath, "w")
confPath.writelines(modsecurity + "\n")
confPath.writelines(SecAuditEngine + "\n")
confPath.writelines(SecRuleEngine + "\n")
confPath.writelines(SecDebugLogLevel + "\n")
confPath.writelines(SecAuditLogParts + "\n")
confPath.writelines(SecAuditLogRelevantStatus + "\n")
confPath.writelines(SecAuditLogType + "\n")
confPath.close()
## save configuration data
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " saveModSecConfigs --tempConfigPath " + tempConfigPath
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'saveStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'saveStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
SecAuditEngine = data['SecAuditEngine']
SecRuleEngine = data['SecRuleEngine']
SecDebugLogLevel = data['SecDebugLogLevel']
SecAuditLogParts = data['SecAuditLogParts']
SecAuditLogRelevantStatus = data['SecAuditLogRelevantStatus']
SecAuditLogType = data['SecAuditLogType']
if SecAuditEngine == True:
SecAuditEngine = "SecAuditEngine on"
else:
SecAuditEngine = "SecAuditEngine off"
if SecRuleEngine == True:
SecRuleEngine = "SecRuleEngine On"
else:
SecRuleEngine = "SecRuleEngine off"
SecDebugLogLevel = "SecDebugLogLevel " + str(SecDebugLogLevel)
SecAuditLogParts = "SecAuditLogParts " + str(SecAuditLogParts)
SecAuditLogRelevantStatus = "SecAuditLogRelevantStatus " + SecAuditLogRelevantStatus
SecAuditLogType = "SecAuditLogType " + SecAuditLogType
## writing data temporary to file
tempConfigPath = "/home/cyberpanel/" + str(randint(1000, 9999))
confPath = open(tempConfigPath, "w")
confPath.writelines(SecAuditEngine + "\n")
confPath.writelines(SecRuleEngine + "\n")
confPath.writelines(SecDebugLogLevel + "\n")
confPath.writelines(SecAuditLogParts + "\n")
confPath.writelines(SecAuditLogRelevantStatus + "\n")
confPath.writelines(SecAuditLogType + "\n")
confPath.close()
## save configuration data
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " saveModSecConfigs --tempConfigPath " + tempConfigPath
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'saveStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'saveStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'saveStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def modSecRules(self, request = None, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
confPath = os.path.join(virtualHostUtilities.Server_root, "conf/httpd_config.conf")
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).split('\n')
modSecInstalled = 0
for items in httpdConfig:
if items.find('module mod_security') > -1:
modSecInstalled = 1
break
else:
modSecInstalled = 1
return render(request, 'firewall/modSecurityRules.html', {'modSecInstalled': modSecInstalled})
except BaseException as msg:
return HttpResponse(str(msg))
def fetchModSecRules(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('modSecInstalled', 0)
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
confPath = os.path.join(virtualHostUtilities.Server_root, "conf/httpd_config.conf")
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).split('\n')
modSecInstalled = 0
for items in httpdConfig:
if items.find('module mod_security') > -1:
modSecInstalled = 1
break
rulesPath = os.path.join(virtualHostUtilities.Server_root + "/conf/modsec/rules.conf")
if modSecInstalled:
command = "sudo cat " + rulesPath
currentModSecRules = ProcessUtilities.outputExecutioner(command).split('\n')
final_dic = {'modSecInstalled': 1,
'currentModSecRules': currentModSecRules}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'modSecInstalled': 0}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
rulesPath = os.path.join(virtualHostUtilities.Server_root + "/conf/rules.conf")
command = "sudo cat " + rulesPath
currentModSecRules = ProcessUtilities.outputExecutioner(command).split('\n')
final_dic = {'modSecInstalled': 1,
'currentModSecRules': currentModSecRules}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'modSecInstalled': 0,
'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def saveModSecRules(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('saveStatus', 0)
newModSecRules = data['modSecRules']
## writing data temporary to file
rulesPath = open(modSec.tempRulesFile, "w")
rulesPath.write(newModSecRules)
rulesPath.close()
## save configuration data
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " saveModSecRules"
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'saveStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'saveStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'saveStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def modSecRulesPacks(self, request = None, userID = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
confPath = os.path.join(virtualHostUtilities.Server_root, "conf/httpd_config.conf")
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).split('\n')
modSecInstalled = 0
for items in httpdConfig:
if items.find('module mod_security') > -1:
modSecInstalled = 1
break
else:
modSecInstalled = 1
return render(request, 'firewall/modSecurityRulesPacks.html', {'modSecInstalled': modSecInstalled})
except BaseException as msg:
return HttpResponse(msg)
def getOWASPAndComodoStatus(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('modSecInstalled', 0)
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
confPath = os.path.join(virtualHostUtilities.Server_root, "conf/httpd_config.conf")
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).splitlines()
modSecInstalled = 0
for items in httpdConfig:
if items.find('module mod_security') > -1:
modSecInstalled = 1
break
comodoInstalled = 0
owaspInstalled = 0
if modSecInstalled:
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).splitlines()
for items in httpdConfig:
if items.find('modsec/comodo') > -1:
comodoInstalled = 1
elif items.find('modsec/owasp') > -1:
owaspInstalled = 1
if owaspInstalled == 1 and comodoInstalled == 1:
break
final_dic = {
'modSecInstalled': 1,
'owaspInstalled': owaspInstalled,
'comodoInstalled': comodoInstalled
}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
final_dic = {'modSecInstalled': 0}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
else:
comodoInstalled = 0
owaspInstalled = 0
try:
command = 'sudo ls /usr/local/lsws/conf/comodo_litespeed/'
output = ProcessUtilities.outputExecutioner(command)
if output.find('No such') > -1:
comodoInstalled = 0
else:
comodoInstalled = 1
except subprocess.CalledProcessError:
pass
final_dic = {
'modSecInstalled': 1,
'owaspInstalled': owaspInstalled,
'comodoInstalled': comodoInstalled
}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'modSecInstalled': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def installModSecRulesPack(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('installStatus', 0)
packName = data['packName']
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " " + packName
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'installStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'installStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
if packName == 'disableOWASP' or packName == 'installOWASP':
final_json = json.dumps({'installStatus': 0, 'error_message': "OWASP will be available later.", })
return HttpResponse(final_json)
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " " + packName
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'installStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'installStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'installStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def getRulesFiles(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('fetchStatus', 0)
packName = data['packName']
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
confPath = os.path.join(virtualHostUtilities.Server_root, 'conf/httpd_config.conf')
command = "sudo cat " + confPath
httpdConfig = ProcessUtilities.outputExecutioner(command).splitlines()
json_data = "["
checker = 0
counter = 0
for items in httpdConfig:
if items.find('modsec/' + packName) > -1:
counter = counter + 1
if items[0] == '#':
status = False
else:
status = True
fileName = items.lstrip('#')
fileName = fileName.split('/')[-1]
dic = {
'id': counter,
'fileName': fileName,
'packName': packName,
'status': status,
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps({'fetchStatus': 1, 'error_message': "None", "data": json_data})
return HttpResponse(final_json)
else:
if packName == 'owasp':
final_json = json.dumps({'fetchStatus': 0, 'error_message': "OWASP will be available later.", })
return HttpResponse(final_json)
comodoPath = '/usr/local/lsws/conf/comodo_litespeed'
command = 'sudo chown -R cyberpanel:cyberpanel /usr/local/lsws/conf'
ProcessUtilities.executioner(command)
json_data = "["
counter = 0
checker = 0
for fileName in os.listdir(comodoPath):
if fileName == 'categories.conf':
continue
if fileName.endswith('bak'):
status = 0
fileName = fileName.rstrip('.bak')
elif fileName.endswith('conf'):
status = 1
else:
continue
dic = {
'id': counter,
'fileName': fileName,
'packName': packName,
'status': status,
}
counter = counter + 1
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
command = 'sudo chown -R lsadm:lsadm /usr/local/lsws/conf'
ProcessUtilities.executioner(command)
json_data = json_data + ']'
final_json = json.dumps({'fetchStatus': 1, 'error_message': "None", "data": json_data})
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'fetchStatus': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def enableDisableRuleFile(self, userID = None, data = None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('saveStatus', 0)
packName = data['packName']
fileName = data['fileName']
currentStatus = data['status']
if currentStatus == True:
functionName = 'disableRuleFile'
else:
functionName = 'enableRuleFile'
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/modSec.py"
execPath = execPath + " " + functionName + ' --packName ' + packName + ' --fileName ' + fileName
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'saveStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'saveStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'saveStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def csf(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
csfInstalled = 1
try:
command = 'csf -h'
output = ProcessUtilities.outputExecutioner(command)
if output.find("command not found") > -1:
csfInstalled = 0
except subprocess.CalledProcessError:
csfInstalled = 0
return render(self.request,'firewall/csf.html', {'csfInstalled' : csfInstalled})
except BaseException as msg:
return HttpResponse(str(msg))
def installCSF(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('installStatus', 0)
execPath = "sudo /usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/csf.py"
execPath = execPath + " installCSF"
ProcessUtilities.popenExecutioner(execPath)
time.sleep(2)
data_ret = {"installStatus": 1}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
final_dic = {'installStatus': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def installStatusCSF(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
installStatus = ProcessUtilities.outputExecutioner("sudo cat " + CSF.installLogPath)
if installStatus.find("[200]")>-1:
command = 'sudo rm -f ' + CSF.installLogPath
ProcessUtilities.executioner(command)
final_json = json.dumps({
'error_message': "None",
'requestStatus': installStatus,
'abort':1,
'installed': 1,
})
return HttpResponse(final_json)
elif installStatus.find("[404]") > -1:
command = 'sudo rm -f ' + CSF.installLogPath
ProcessUtilities.executioner(command)
final_json = json.dumps({
'abort':1,
'installed':0,
'error_message': "None",
'requestStatus': installStatus,
})
return HttpResponse(final_json)
else:
final_json = json.dumps({
'abort':0,
'error_message': "None",
'requestStatus': installStatus,
})
return HttpResponse(final_json)
except BaseException as msg:
final_dic = {'abort':1, 'installed':0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def removeCSF(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('installStatus', 0)
execPath = "sudo /usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/csf.py"
execPath = execPath + " removeCSF"
ProcessUtilities.popenExecutioner(execPath)
time.sleep(2)
data_ret = {"installStatus": 1}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
final_dic = {'installStatus': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def fetchCSFSettings(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('fetchStatus', 0)
currentSettings = CSF.fetchCSFSettings()
data_ret = {"fetchStatus": 1, 'testingMode' : currentSettings['TESTING'],
'tcpIN' : currentSettings['tcpIN'],
'tcpOUT': currentSettings['tcpOUT'],
'udpIN': currentSettings['udpIN'],
'udpOUT': currentSettings['udpOUT'],
'firewallStatus': currentSettings['firewallStatus']
}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
final_dic = {'fetchStatus': 0, 'error_message': 'CSF is not installed.'}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def changeStatus(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson()
data = json.loads(self.request.body)
controller = data['controller']
status = data['status']
execPath = "sudo /usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/csf.py"
execPath = execPath + " changeStatus --controller " + controller + " --status " + status
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {"status": 1}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'status': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
final_dic = {'status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def modifyPorts(self, data = None):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson()
protocol = data['protocol']
ports = data['ports']
portsPath = '/tmp/ports'
if os.path.exists(portsPath):
os.remove(portsPath)
writeToFile = open(portsPath, 'w')
writeToFile.write(ports)
writeToFile.close()
execPath = "sudo /usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/csf.py"
execPath = execPath + " modifyPorts --protocol " + protocol + " --ports " + portsPath
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {"status": 1}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'status': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
final_dic = {'status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def modifyIPs(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson()
data = json.loads(self.request.body)
mode = data['mode']
ipAddress = data['ipAddress']
if mode == 'allowIP':
CSF.allowIP(ipAddress)
elif mode == 'blockIP':
CSF.blockIP(ipAddress)
data_ret = {"status": 1}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
final_dic = {'status': 0, 'error_message': str(msg)}
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def imunify(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
ipFile = "/etc/cyberpanel/machineIP"
f = open(ipFile)
ipData = f.read()
ipAddress = ipData.split('\n', 1)[0]
data = {}
data['ipAddress'] = ipAddress
if os.path.exists(FirewallManager.CLPath):
data['CL'] = 1
else:
data['CL'] = 0
if os.path.exists(FirewallManager.imunifyPath):
data['imunify'] = 1
else:
data['imunify'] = 0
if data['CL'] == 0:
return render(self.request, 'firewall/notAvailable.html', data)
elif data['imunify'] == 0:
return render(self.request, 'firewall/notAvailable.html', data)
else:
return render(self.request, 'firewall/imunify.html', data)
except BaseException as msg:
return HttpResponse(str(msg))
def submitinstallImunify(self):
try:
userID = self.request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
logging.CyberCPLogFileWriter.statusWriter(ServerStatusUtil.lswsInstallStatusPath,
'Not authorized to install container packages. [404].',
1)
return 0
data = json.loads(self.request.body)
execPath = "/usr/local/CyberCP/bin/python /usr/local/CyberCP/CLManager/CageFS.py"
execPath = execPath + " --function submitinstallImunify --key %s" % (data['key'])
ProcessUtilities.popenExecutioner(execPath)
data_ret = {'status': 1, 'error_message': 'None'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
logging.CyberCPLogFileWriter.statusWriter(ServerStatusUtil.lswsInstallStatusPath, str(msg) + ' [404].', 1)
| 38.0062 | 129 | 0.511745 | 60,556 | 0.987799 | 0 | 0 | 0 | 0 | 0 | 0 | 9,466 | 0.154411 |
f4d8b0ea61e089a5c1497a5c73511f8dba6105a9 | 8,970 | py | Python | operacoes.py | cleber-si/Calculadora-Clebsh-Gordan | e9645b0145d8a0148bf7c57e56ed9b5859335e91 | [
"MIT"
] | null | null | null | operacoes.py | cleber-si/Calculadora-Clebsh-Gordan | e9645b0145d8a0148bf7c57e56ed9b5859335e91 | [
"MIT"
] | null | null | null | operacoes.py | cleber-si/Calculadora-Clebsh-Gordan | e9645b0145d8a0148bf7c57e56ed9b5859335e91 | [
"MIT"
] | null | null | null | import sympy as sp
from sympy.solvers.solveset import linsolve
def printa_latex(eq):
# Lê J e M no lado esquerdo da equação
J_lt = eq[0][1]
M_lt = eq[0][2]
# Monta o código em LaTeX para o lado esquerdo da equação
lado_esquerdo = '|' + sp.latex(J_lt) + ';' + sp.latex(M_lt) + '\\rangle' + '='
# Inicializa a string de todos os termos da equação
termos = lado_esquerdo
# Conta quantos termos existem no lado direito da equação
n_termos = len(eq[1])
# Cria o código LaTeX para cada termo do lado direito da equação e soma com os anteriores
for i in range(n_termos):
CS = eq[1][i][0]
j1 = eq[1][i][1]
m1 = eq[1][i][2]
j2 = eq[1][i][3]
m2 = eq[1][i][4]
termo = sp.latex(CS) + '|' + sp.latex(j1) + ',' + sp.latex(m1) + ';' + sp.latex(j2) + ',' + sp.latex(m2) + '\\rangle'
# Faz a regulagem dos sinais dos coeficientes
if i == 0:
termos += termo
else:
if CS < 0:
termos += termo
else:
termos += '+' + termo
return termos
def calcula_possiveis_m_iniciais(j):
m = []
m_i = j
while m_i >= -j:
m.append(m_i)
m_i = m_i - 1
return m
def eleva(ket, ket_somado=False):
# Verifica se está somando o ket do lado esquerdo da igualdade
if ket_somado:
coef = ket[0]
j = ket[1]
m = ket[2]
coef = coef * sp.sqrt(j*(j+1)-m*(m+1))
ket_elev = [coef, j, m+1]
else:
coef = ket[0]
if coef == 0:
return [0]*5
j1 = ket[1]
j2 = ket[2]
m1 = ket[3]
m2 = ket[4]
coef1 = coef * sp.sqrt(j1*(j1+1)-m1*(m1+1))
coef2 = coef * sp.sqrt(j2*(j2+1)-m2*(m2+1))
if m1 >= j1 and m2 >= j2:
ket_elev = []
elif m1 >= j1:
ket_elev = [[0]*5, [coef2, j1, j2, m1, m2+1]]
elif m2 >= j2:
ket_elev = [[coef1, j1, j2, m1+1, m2], [0]*5]
else:
ket_elev = [[coef1, j1, j2, m1+1, m2], [coef2, j1, j2, m1, m2+1]]
return ket_elev
# Soma termos iguais que resultarem da aplicação do J(+) em cada ket
def soma_kets_iguais(kets):
while len(kets)>1:
tam = len(kets)
aux = []
for ket in kets[1]:
aux.append(ket[1:])
for i in range(len(kets[0])):
if kets[0][i][0] == 0:
continue
if kets[0][i][1:] in aux:
indice = aux.index(kets[0][i][1:])
kets[0][i][0] += kets[1][indice][0]
kets[1].pop(indice)
kets[0] += kets[1]
kets.pop(1)
if len(kets) == tam:
kets[0] += kets[1]
kets.pop(1)
i += 1
for ket in kets[0]:
if ket[0] == 0:
kets[0].pop(kets[0].index(ket))
return kets
def j_mais(eq):
kets = []
cont = 1
for ket in eq:
if eq.index(ket) == 0:
kets.append(eleva(ket, ket_somado=True))
else:
aux = []
for kt in ket:
cont += 1
aux.append(eleva(kt))
kets.append(aux)
"""
Se len(kets[1]) > 1, quer dizer que o "levantamento" por meio do J(+)
resultou em termos (kets) que podem ser somados.
"""
if len(kets[1]) > 1:
kets[1] = soma_kets_iguais(kets[1])
# Remove kets nulos
kets[1] = list(filter(lambda a: a != [0]*5, kets[1][0]))
# Divide a equação pelo coeficiente do lado esqerdo
coef = kets[0][0]
kets[0][0] = 1
for ket in kets[1]:
ket[0] /= coef
return kets
# Acha os ms possíveis de um estado na troca se subespaço
def seleciona_ms(m1, m2, M):
ms = []
for i in range(len(m1)):
for j in range(len(m2)):
if m1[i]+m2[j] == M:
ms.append([m1[i], m2[j]])
return ms
def muda_subespaco(conj, j1, j2, m1, m2):
# Verifica qual em qual subespaço vai chegar
J = j1 + j2 - len(conj)
M = -J
# Possíves ms do novo subespaço
pms = seleciona_ms(m1, m2, M)
# Compara cada par [m1, m2] com os pares possíveis na troca de subespaço
comparacao = []
for i in range(len(conj)):
sub_comparacao = []
conj_ref = conj[i]
# Armazena todos pares [m1, m2] de cada equação de estado de um subespaço
lista = []
for i in range(len(conj_ref)):
sub_lista = []
for j in range(len(conj_ref[i][1])):
sub_lista.append(conj_ref[i][1][j][3:])
lista.append(sub_lista)
'''
Compara cada par [m1, m2] das equações do subespaço a que 'comparacao' se refere
com os pares armazenados em 'pms'. Para cada par que não esteja presente em 'pms',
a equação ganha um ponto. A equação escolhida para representar o novo subespaço vai ser
aquela que tiver 0 pontos, ou seja, a que só tiver pares correspondentes com 'pms'.
Vale ressaltar que o número de pares de uma equação é menor ou igual ao tamanho de 'pms'.
Em outras palavras, uma equação pode ter um número menor de termos do que todos os pares de
possíveis ms.
'''
for i in range(len(lista)):
sub_comparacao.append(0)
for j in range(len(lista[i])):
if not(lista[i][j] in pms):
sub_comparacao[i] += 1
comparacao.append(sub_comparacao)
eqs_ref = []
indices = []
coefs = []
for i in range(len(comparacao)):
sub_coefs = []
indices.append(comparacao[i].index(0))
eqs_ref.append(conj[i][indices[i]])
# Preenche os estados com os termos que faltam.
# Ex.: |1, 0> para j1 = j2 = 1 fica faltando o termo 0 * |j1, 2, 0, 0> = 0.
if len(eqs_ref[i][1]) < len(pms):
for j in range(len(pms)):
if not pms[j] == eqs_ref[i][1][j][3:]:
eqs_ref[i][1].insert(j, [0, j1, j2, pms[j][0], pms[j][1]])
break
# eqs_ref[i-ésima eq][lado da igualdade]
# eqs_ref[i-ésima eq][0][elemento do ket]
# eqs_ref[i-ésima eq][1][j-ésimo termo][elemento do ket]
# sub_coefs[coeficiente][número do subespaço][par de ms correspendente]
for j in range(len(eqs_ref[i][1])):
sub_coefs.append([eqs_ref[i][1][j][0], eqs_ref[i][0][1], eqs_ref[i][1][j][3:]])
coefs.append(sub_coefs)
# Verifica se os elementos de 'coefs' estão coerentes em relação ao tamanho
for i in range(len(coefs)):
if len(coefs[i]) < len(pms):
for j in range(len(pms)):
if not pms[j] == coefs[i][j][2]:
coefs[i].insert(j, [0, j1, j2, pms[j][0], pms[j][1]])
break
# Define os coeficientes do sistema da troca de base a serem determinados (equivalentes a c1, c2, ..., cn)
a, b, c, d, e, f, g = sp.symbols('a, b, c, d, e, f, g')
variaveis = [a, b, c, d, e, f, g]
num_variaveis = len(variaveis)
# Cria a matriz preliminar que representa o sistema
linhas = []
for i in range(num_variaveis-1):
linhas.append([0]*(num_variaveis+1))
# Preenche a matriz com os coeficientes
for i in range(len(pms)-1):
for j in range(len(pms)):
linhas[i][j] = coefs[i][j][0]
#pass
# Cria a matriz SymPy que representa o sistema
Matriz = sp.Matrix(linhas)
# Cria o sistema com base na matriz 'Matriz'
system = A, x = Matriz[:, :-1], Matriz[:, -1]
# Resolve o sistema
r = linsolve(system, a, b, c, d, e, f, g)
'''
A equação de normalização é do tipo |a1|^2 + |a2|^2 + ... + |an|^2 = 1. Podemos subtrair 1 de
ambos os lados da equação e com isso surge um termo -1 no lado esquerdo da igualdade. Na forma como
estou desenvolvendo aqui, estou usando de funções do SymPy que consideram sempre o lado
direito da igualdade como 0, por isso essa pequena manipulação na equação. Isso também explica
o motivo de inicializar a 'eq_normalizacao' com o valor -1.
'''
eq_normalizacao = -1
for i in range(len(pms)):
eq_normalizacao += r.args[0][i]**2
# Analisa o sinal do primeiro termo da solução
sinal = str(r.args[0][0])
correcao = -1 if sinal[0] == '-' else 1
# Resolve a equação de normalização com base na solução do sistema
s = sp.solve(eq_normalizacao, variaveis[len(pms)-1])
# Seleciona apenas o coeficiente positivo e já insere o sinal de correção quando for o caso
coef = max(s)*correcao
# Calcula os coeficientes de Clabsh-Gordan e monta o ket da nova base
CG = []
novo_ket = [[1, J, M], []]
for i in range(len(pms)):
CG.append(r.args[0][i]/variaveis[len(pms)-1] * coef)
novo_ket[1].append([CG[i], j1, j2, pms[i][0], pms[i][1]])
return novo_ket
| 29.313725 | 125 | 0.535897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,173 | 0.350608 |
f4d92ae8855426c9f556531456dc67c3b915c5e8 | 1,653 | py | Python | src/routers/user.py | momonoki1990/fastapi-todo-list | fffcc072ab1181ce63f163c2bf0614551b3e40ed | [
"MIT"
] | 1 | 2022-02-17T07:35:43.000Z | 2022-02-17T07:35:43.000Z | src/routers/user.py | momonoki1990/fastapi-todo-list-api | fffcc072ab1181ce63f163c2bf0614551b3e40ed | [
"MIT"
] | 2 | 2021-12-05T06:37:35.000Z | 2022-01-04T11:08:10.000Z | src/routers/user.py | momonoki1990/fastapi-todo-list | fffcc072ab1181ce63f163c2bf0614551b3e40ed | [
"MIT"
] | 1 | 2022-01-11T02:02:31.000Z | 2022-01-11T02:02:31.000Z | from typing import List
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.ext.asyncio import AsyncSession
from src.schema import user as user_schema, task as task_schema
from src.cruds import user as user_crud
from src.libs import authenticate
from src.db import get_db
router = APIRouter(prefix="", tags=["user"])
@router.post("/user", response_model=user_schema.Token)
async def register_user(
form_data: user_schema.UserCreate = Depends(),
db: AsyncSession = Depends(get_db)
):
form_data.password = authenticate.get_hashed_password(form_data.password)
user = await user_crud.create_user(db, form_data)
access_token = authenticate.create_access_token(user.username)
return {"access_token": access_token, "token_type": "bearer"}
@router.post("/token", response_model=user_schema.Token)
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
db: AsyncSession = Depends(get_db)
):
user = await authenticate.authenticate_user(db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"}
)
access_token = authenticate.create_access_token(user.username)
return {"access_token": access_token, "token_type": "bearer"}
@router.get("/users/me", response_model=user_schema.User)
async def read_users_me(current_user: user_schema.User = Depends(authenticate.get_current_active_user)):
return current_user | 42.384615 | 104 | 0.762855 | 0 | 0 | 0 | 0 | 1,249 | 0.755596 | 1,078 | 0.652148 | 160 | 0.096794 |
f4da84214e93efb0d8a87ad63fc141e1f58417ac | 166 | py | Python | maguey/tests/test_systems_api.py | andrewmagill/maguey | 54efb60a5cab432cf5a3f1cbdaae0d1ffd1f3763 | [
"MIT"
] | null | null | null | maguey/tests/test_systems_api.py | andrewmagill/maguey | 54efb60a5cab432cf5a3f1cbdaae0d1ffd1f3763 | [
"MIT"
] | null | null | null | maguey/tests/test_systems_api.py | andrewmagill/maguey | 54efb60a5cab432cf5a3f1cbdaae0d1ffd1f3763 | [
"MIT"
] | null | null | null | from unittest import TestCase
import maguey
class TestSystems(TestCase):
def test_add_system(self):
pass
def test_delete_system(self):
pass
| 16.6 | 33 | 0.704819 | 120 | 0.722892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f4da92b5feb63df688048e6a9100c1156f6d1d4b | 1,321 | py | Python | ml/regressor_with_kerasModel.py | shilang1220/tfwrapper | 36b8097d63636d93e93a0a8023a535fded9f1455 | [
"MIT"
] | null | null | null | ml/regressor_with_kerasModel.py | shilang1220/tfwrapper | 36b8097d63636d93e93a0a8023a535fded9f1455 | [
"MIT"
] | null | null | null | ml/regressor_with_kerasModel.py | shilang1220/tfwrapper | 36b8097d63636d93e93a0a8023a535fded9f1455 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-5-15 下午4:54
# @Author : Guoliang PU
# @File : regressor_with_kerasSquential.py
# @Software: tfwrapper
import numpy as np
np.random.seed(1337)
from keras.models import Model
from keras.layers import Dense
from keras.layers import Activation
import matplotlib.pyplot as plt
x = np.linspace(0,2*np.pi,200)
np.random.shuffle(x)
y = np.sin(x) + np.random.normal(0,0.05,(200,))
plt.scatter(x,y)
plt.show()
x_train,y_train = x[:160],y[:160]
x_test,y_test = x[160:],y[160:]
model = Sequential()
model.add(Dense(output_dim = 1,input_dim = 1,activation='tanh'))
model.add(Dense(output_dim = 1,activation='tanh'))
model.add(Dense(output_dim = 1,activation='tanh'))
model.add(Dense(output_dim = 1,activation='tanh'))
model.add(Dense(output_dim = 1,activation='tanh'))
#model.add(Activation('sig'))
model.compile(loss='mse',optimizer='sgd')
print('Training....')
for step in range (300):
cost = model.train_on_batch(x_train,y_train)
if step % 100 == 0:
print('train cost:',cost)
print('Test....')
cost = model.evaluate(x_test,y_test,batch_size = 40)
print('Test cost:', cost)
W,b= model.layers[0].get_weights()
print('Weight = ',W,'\nbias=',b)
y_pred = model.predict(x_test)
plt.scatter(x_test,y_test)
#plt.plot(x_test,y_pred)
plt.show() | 24.018182 | 64 | 0.694928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.248302 |
f4dc218ab525c8b2b47959492cb994fb65259ae5 | 13,238 | py | Python | isolates/download_accession_list.py | josl/ASM_challenge | f6bc31ab29d7589e259e1f3a2acbb613db6f03f3 | [
"Apache-2.0"
] | 2 | 2015-11-12T11:18:11.000Z | 2015-11-12T22:29:59.000Z | isolates/download_accession_list.py | josl/ASM_challenge | f6bc31ab29d7589e259e1f3a2acbb613db6f03f3 | [
"Apache-2.0"
] | null | null | null | isolates/download_accession_list.py | josl/ASM_challenge | f6bc31ab29d7589e259e1f3a2acbb613db6f03f3 | [
"Apache-2.0"
] | 1 | 2015-11-10T16:10:36.000Z | 2015-11-10T16:10:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.cfg:
Then run `python setup.py install` which will install the command `download`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import json
import argparse
from shutil import move
from progressbar import Bar, Percentage, ProgressBar, ETA
from isolates import __version__, TemporaryDirectory
from isolates.log import _logger
from isolates.metadata import (ExtractExperimentMetadata,
ExtractExperimentIDs_acc)
from isolates.sequence import Sequence
from isolates.source import acctypes
__author__ = "Jose Luis Bellod Cisneros"
__coauthor__ = "Martin C F Thomsen"
__copyright__ = "Jose Luis Bellod Cisneros"
__license__ = "none"
def parse_args_accessions(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Download script of isolates from" +
"ENA taxonomy or Accession list")
parser.add_argument(
'--version',
action='version',
version='isolates {ver}'.format(ver=__version__))
parser.add_argument(
'-a',
nargs=1,
metavar=('PATH'),
help='Format: [PATH]\n' +
'to file containing list of ACCESSION IDs, 1 per line\n' +
'Name of the file is used to identify the isolates downloaded.'
)
parser.add_argument(
'-m',
nargs=1,
type=argparse.FileType('r'),
metavar=('METADATA'),
default=None,
help='JSON file with seed attributes and mandatory fields\n'
)
parser.add_argument(
'-out',
nargs=1,
metavar=('OUTPUT'),
required=True,
help='Path to save isolates'
)
parser.add_argument(
'-p',
'--preserve',
action="store_true",
dest="preserve",
default=False,
help='preserve any existing SRA and fastq files\n'
)
parser.add_argument(
'--all_runs_as_samples',
action="store_true",
dest="all_runs_as_samples",
default=False,
help=('Treat all runs associated to a sample as separate samples. '
'Default is to combine them into one run.\n')
)
parser.add_argument(
'--skip_files',
action="store_true",
dest="skip_files",
default=False,
help=('Treat all runs associated to a sample as separate samples. '
'Default is to combine them into one run.\n')
)
return parser.parse_args(args)
def DownloadRunFiles(runid, tmpdir):
# Download run files
try:
s = Sequence(runid, tmpdir)
s.download_fastq()
if not s.error:
_logger.info("Downloaded files: %s", ','.join(s.files))
return s.files
else: return None
except ValueError, e:
_logger.error(e)
return None
def CreateSampleDir(sfiles, m, sample_dir, preserve=False, skip_files=False):
sample_dir = str(sample_dir)
if not skip_files and len(sfiles) == 0:
_logger.error("Error: No files were found! (%s)", sample_dir)
return False
if not os.path.exists(sample_dir):
_logger.info("Create sample dir: %s", sample_dir)
# Create 'sample' dir
os.mkdir(sample_dir)
# Move files from tmpdir to sample dir
for sf in sfiles: move(sf, sample_dir)
elif not preserve and not skip_files:
# Empty sample directory
for fn in os.listdir(sample_dir):
os.unlink("%s/%s"%(sample_dir, fn))
# Move files from tmpdir to sample dir
for sf in sfiles: move(sf, sample_dir)
# Update and create metadata file
try:
m.metadata["file_names"] = ' '.join(
[os.path.basename(sf).replace(' ','_')
for sf in sfiles
if not os.path.basename(sf) == 'meta.json']
)
m.save_metadata(sample_dir)
except ValueError, e:
_logger.error(e)
return False
else:
return True
def download_fastq_from_list(accession_list, output, json, preserve=False, all_runs_as_samples=False, skip_files=False):
"""
Get Fastq from list of IDs
:param accession_list: List of accessions
:param dir: Output folder
"""
metadata = []
cwd = os.getcwd()
with open(accession_list, 'r') as f:
# Setup batch dir
batch_dir = "%s/%s/"%(cwd, output)
if not os.path.exists(batch_dir): os.mkdir(batch_dir)
os.chdir(batch_dir)
# Set logging
_logger.Set(filename="%s/download-acceession-list.log"%batch_dir)
# Count samples in accession_list
n_samples = sum(1 for l in f)
f.seek(0)
_logger.info("Number of samples to download: %s", n_samples)
# Start progress bar
pbar = ProgressBar(
widgets = [ETA(), ' - ', Percentage(), ' : ', Bar()],
maxval = n_samples
).start()
pbar.update(0)
failed_accession = []
sample_dir_id = 0
for i, l in enumerate(f):
accession = l.strip()
if accession == '': continue
# Determine accession type
if accession[:3] in acctypes:
accession_type = acctypes[accession[:3]]
else:
_logger.error("unknown accession type for '%s'!", accession)
failed_accession.append(accession)
continue
_logger.info("Acc Found: %s (%s)", accession, accession_type)
if accession_type in ['study', 'sample']:
for experiment_id in ExtractExperimentIDs_acc(accession):
sample_dir_id = ProcessExperiment(
experiment_id, json, batch_dir,sample_dir_id, preserve,
failed_accession, all_runs_as_samples, skip_files)
elif accession_type == 'experiment':
sample_dir_id = ProcessExperiment(
accession, json, batch_dir,sample_dir_id, preserve,
failed_accession, all_runs_as_samples, skip_files)
elif accession_type == 'run':
sample_dir_id = ProcessExperiment(
accession, json, batch_dir,sample_dir_id, preserve,
failed_accession, all_runs_as_samples, skip_files)
pbar.update(i)
pbar.finish()
if failed_accession:
_logger.info("The following accessions were not downloaded!")
_logger.info('\n'.join(failed_accession))
else:
_logger.info("All accessions downloaded succesfully!")
def ProcessExperiment(experiment_id, json, batch_dir, sample_dir_id, preserve, failed_accession, all_runs_as_samples, skip_files=False):
_logger.info("Processing %s...", experiment_id)
if all_runs_as_samples:
sample_dir_id = ProcessExperimentSeparate(
experiment_id, json, batch_dir, sample_dir_id,
preserve, failed_accession, skip_files)
else:
sample_dir_id = ProcessExperimentCombined(
experiment_id, json, batch_dir, sample_dir_id,
preserve, failed_accession, skip_files)
return sample_dir_id
def ProcessExperimentSeparate(experiment_id, json, batch_dir, sample_dir_id, preserve, failed_accession, skip_files=False):
m = ExtractExperimentMetadata(experiment_id, json)
if m.valid_metadata():
# Check if a run ID was submitted, and if so only process that
if experiment_id in m.runIDs: m.runIDs = [experiment_id]
# Process the runIDs as samples
_logger.info("Found Following Runs: %s", ', '.join(m.runIDs))
for runid in m.runIDs:
with TemporaryDirectory() as tmpdir:
os.chdir(batch_dir)
sample_dir = "%s/%s/"%(batch_dir, sample_dir_id)
if os.path.exists(sample_dir):
sfiles = [x for x in os.listdir(sample_dir) if any([y in x for y in ['fq','fastq']])]
else:
sfiles = []
if not preserve or not skip_files or len(sfiles) == 0:
sfiles = DownloadRunFiles(runid, tmpdir)
if sfiles is not None:
success = CreateSampleDir(sfiles, m, sample_dir, preserve, skip_files)
if success:
sample_dir_id += 1
else:
failed_accession.append(runid)
else:
_logger.error("Files could not be retrieved! (%s)", runid)
failed_accession.append(runid)
else:
_logger.error("Metadata Invalid! (%s) - %s", experiment_id, m.metadata.items())
failed_accession.append(experiment_id)
return sample_dir_id
def ProcessExperimentCombined(experiment_id, json, batch_dir, sample_dir_id, preserve, failed_accession, skip_files=False):
m = ExtractExperimentMetadata(experiment_id, json)
if m.valid_metadata():
# Check if a run ID was submitted, and if so only process that
if experiment_id in m.runIDs: m.runIDs = [experiment_id]
# Process the runs as one sample
_logger.info("Found Following Runs: %s", ', '.join(m.runIDs))
with TemporaryDirectory() as tmpdir:
os.chdir(batch_dir)
sample_dir = "%s/%s/"%(batch_dir, sample_dir_id)
csfiles = []
if preserve and os.path.exists(sample_dir):
csfiles = [x for x in os.listdir(sample_dir) if any([y in x for y in ['fq','fastq']])]
if csfiles == [] and not skip_files:
sfiles = []
for runid in m.runIDs:
sf = DownloadRunFiles(runid, tmpdir)
if sf is not None:
sfiles.append(sf)
else:
_logger.error("Run files could not be retrieved! (%s)",
runid)
_logger.info("Found Following files sets:\n%s\n",
'\n'.join([', '.join(sf) for sf in sfiles]))
# Combine sfiles into one entry
if len(sfiles) > 1:
for file_no, file_set in enumerate(zip(*sfiles)):
ext = '.'.join(file_set[0].split('/')[-1].split('.')[1:])
if len(sfiles[0]) > 1:
new_file = "%s_%s.combined.%s"%(experiment_id,file_no+1, ext)
else:
new_file = "%s.combined.%s"%(experiment_id, ext)
with open(new_file, 'w') as nf:
for fn in file_set:
with open(fn, 'rb') as f:
nf.write(f.read())
if os.path.exists(new_file):
csfiles.append(new_file)
else:
_logger.error("Combined file creation failed! (%s: %s)",
experiment_id, file_no)
break
elif isinstance(sfiles[0], list):
csfiles = sfiles[0]
if csfiles == []:
_logger.error("Files could not be combined! (%s)",
experiment_id)
failed_accession.append(experiment_id)
if csfiles != [] or skip_files:
success = CreateSampleDir(csfiles, m, sample_dir, preserve, skip_files)
if success:
sample_dir_id += 1
else:
failed_accession.append(experiment_id)
else:
_logger.error("Files could not be retrieved! (%s)",
experiment_id)
failed_accession.append(experiment_id)
else:
_logger.error("Metadata Invalid! (%s) - %s", experiment_id, m.metadata.items())
failed_accession.append(experiment_id)
return sample_dir_id
def download_accession_list():
args = parse_args_accessions(sys.argv[1:])
if args.a is not None:
if args.m is not None:
try:
default = json.load(args.m[0])
except ValueError as e:
print("ERROR: Json file has the wrong format!\n", e)
exit()
else:
default = None
download_fastq_from_list(args.a[0], args.out[0], default, args.preserve, args.all_runs_as_samples, args.skip_files)
else:
print('Usage: -a PATH -o ORGANISM -out PATH [-m JSON]')
if __name__ == "__main__":
download_accession_list()
| 40.48318 | 136 | 0.5769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,088 | 0.233268 |
f4dde28343298262f6e5c42a2d685e3d288155a3 | 344 | py | Python | tkinker/Code/messageBox.py | siddhantjoshi/Thinker | d86bf064b5753b30d56d17b9e9b5e88c4478c1bf | [
"Apache-2.0"
] | null | null | null | tkinker/Code/messageBox.py | siddhantjoshi/Thinker | d86bf064b5753b30d56d17b9e9b5e88c4478c1bf | [
"Apache-2.0"
] | null | null | null | tkinker/Code/messageBox.py | siddhantjoshi/Thinker | d86bf064b5753b30d56d17b9e9b5e88c4478c1bf | [
"Apache-2.0"
] | null | null | null | from tkinter import *
from tkinter import messagebox
rootWidget = Tk()
rootWidget.title("Message Box Example")
def messageBox():
response = messagebox.showinfo(title="Show Info", message="Message")
Label(rootWidget, text = response).pack()
Button(rootWidget,text= "Click Me!!", command = messageBox).pack()
rootWidget.mainloop() | 28.666667 | 70 | 0.729651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.15407 |
f4de5cc58d12df4855fb7893f7de106c2f5a9481 | 5,409 | py | Python | octavia/controller/worker/v2/flows/flow_utils.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 129 | 2015-06-23T08:06:23.000Z | 2022-03-31T12:38:20.000Z | octavia/controller/worker/v2/flows/flow_utils.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 6 | 2016-05-20T11:05:27.000Z | 2021-03-23T06:05:52.000Z | octavia/controller/worker/v2/flows/flow_utils.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 166 | 2015-07-15T16:24:05.000Z | 2022-03-02T20:54:36.000Z |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia.api.drivers import utils as provider_utils
from octavia.controller.worker.v2.flows import amphora_flows
from octavia.controller.worker.v2.flows import health_monitor_flows
from octavia.controller.worker.v2.flows import l7policy_flows
from octavia.controller.worker.v2.flows import l7rule_flows
from octavia.controller.worker.v2.flows import listener_flows
from octavia.controller.worker.v2.flows import load_balancer_flows
from octavia.controller.worker.v2.flows import member_flows
from octavia.controller.worker.v2.flows import pool_flows
LB_FLOWS = load_balancer_flows.LoadBalancerFlows()
AMP_FLOWS = amphora_flows.AmphoraFlows()
HM_FLOWS = health_monitor_flows.HealthMonitorFlows()
L7_POLICY_FLOWS = l7policy_flows.L7PolicyFlows()
L7_RULES_FLOWS = l7rule_flows.L7RuleFlows()
LISTENER_FLOWS = listener_flows.ListenerFlows()
M_FLOWS = member_flows.MemberFlows()
P_FLOWS = pool_flows.PoolFlows()
def get_create_load_balancer_flow(topology, listeners=None):
return LB_FLOWS.get_create_load_balancer_flow(topology,
listeners=listeners)
def get_delete_load_balancer_flow(lb):
return LB_FLOWS.get_delete_load_balancer_flow(lb)
def get_listeners_on_lb(db_lb):
"""Get a list of the listeners on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format listeners.
"""
listener_dicts = []
for listener in db_lb.listeners:
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
listener_dicts.append(prov_listener.to_dict())
return listener_dicts
def get_pools_on_lb(db_lb):
"""Get a list of the pools on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format pools.
"""
pool_dicts = []
for pool in db_lb.pools:
prov_pool = provider_utils.db_pool_to_provider_pool(pool)
pool_dicts.append(prov_pool.to_dict())
return pool_dicts
def get_cascade_delete_load_balancer_flow(lb, listeners=(), pools=()):
return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb, listeners,
pools)
def get_update_load_balancer_flow():
return LB_FLOWS.get_update_load_balancer_flow()
def get_create_amphora_flow():
return AMP_FLOWS.get_create_amphora_flow()
def get_delete_amphora_flow(amphora, retry_attempts=None, retry_interval=None):
return AMP_FLOWS.get_delete_amphora_flow(amphora, retry_attempts,
retry_interval)
def get_failover_LB_flow(amps, lb):
return LB_FLOWS.get_failover_LB_flow(amps, lb)
def get_failover_amphora_flow(amphora_dict, lb_amp_count):
return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count)
def cert_rotate_amphora_flow():
return AMP_FLOWS.cert_rotate_amphora_flow()
def update_amphora_config_flow():
return AMP_FLOWS.update_amphora_config_flow()
def get_create_health_monitor_flow():
return HM_FLOWS.get_create_health_monitor_flow()
def get_delete_health_monitor_flow():
return HM_FLOWS.get_delete_health_monitor_flow()
def get_update_health_monitor_flow():
return HM_FLOWS.get_update_health_monitor_flow()
def get_create_l7policy_flow():
return L7_POLICY_FLOWS.get_create_l7policy_flow()
def get_delete_l7policy_flow():
return L7_POLICY_FLOWS.get_delete_l7policy_flow()
def get_update_l7policy_flow():
return L7_POLICY_FLOWS.get_update_l7policy_flow()
def get_create_l7rule_flow():
return L7_RULES_FLOWS.get_create_l7rule_flow()
def get_delete_l7rule_flow():
return L7_RULES_FLOWS.get_delete_l7rule_flow()
def get_update_l7rule_flow():
return L7_RULES_FLOWS.get_update_l7rule_flow()
def get_create_listener_flow():
return LISTENER_FLOWS.get_create_listener_flow()
def get_create_all_listeners_flow():
return LISTENER_FLOWS.get_create_all_listeners_flow()
def get_delete_listener_flow():
return LISTENER_FLOWS.get_delete_listener_flow()
def get_update_listener_flow():
return LISTENER_FLOWS.get_update_listener_flow()
def get_create_member_flow():
return M_FLOWS.get_create_member_flow()
def get_delete_member_flow():
return M_FLOWS.get_delete_member_flow()
def get_update_member_flow():
return M_FLOWS.get_update_member_flow()
def get_batch_update_members_flow(old_members, new_members, updated_members):
return M_FLOWS.get_batch_update_members_flow(old_members, new_members,
updated_members)
def get_create_pool_flow():
return P_FLOWS.get_create_pool_flow()
def get_delete_pool_flow():
return P_FLOWS.get_delete_pool_flow()
def get_update_pool_flow():
return P_FLOWS.get_update_pool_flow()
| 29.396739 | 79 | 0.765391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.165835 |
f4dfb6b1e34d706321fd96235fbfa3fb0950437c | 2,977 | py | Python | blogproject/blog/views.py | MrWolffy/django-tutorial | 4b00e35092d47e9a04a7019c3f803c5b09630ec6 | [
"MIT"
] | null | null | null | blogproject/blog/views.py | MrWolffy/django-tutorial | 4b00e35092d47e9a04a7019c3f803c5b09630ec6 | [
"MIT"
] | null | null | null | blogproject/blog/views.py | MrWolffy/django-tutorial | 4b00e35092d47e9a04a7019c3f803c5b09630ec6 | [
"MIT"
] | null | null | null | import re
import markdown
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import render, get_object_or_404, redirect
from django.utils.text import slugify
from django.views.generic import ListView, DetailView
from markdown.extensions.toc import TocExtension
from pure_pagination.mixins import PaginationMixin
from .models import Post, Category, Tag
class IndexView(PaginationMixin, ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
# 指定 paginate_by 属性后开启分页功能,其值代表每一页包含多少篇文章
paginate_by = 10
class PostDetailView(DetailView):
# 这些属性的含义和 ListView 是一样的
model = Post
template_name = 'blog/detail.html'
context_object_name = 'post'
def get(self, request, *args, **kwargs):
# 覆写 get 方法的目的是因为每当文章被访问一次,就得将文章阅读量 +1
# get 方法返回的是一个 HttpResponse 实例
# 之所以需要先调用父类的 get 方法,是因为只有当 get 方法被调用后,
# 才有 self.object 属性,其值为 Post 模型实例,即被访问的文章 post
response = super(PostDetailView, self).get(request, *args, **kwargs)
# 将文章阅读量 +1
# 注意 self.object 的值就是被访问的文章 post
self.object.increase_views()
# 视图必须返回一个 HttpResponse 对象
return response
def get_object(self, queryset=None):
# 覆写 get_object 方法的目的是因为需要对 post 的 body 值进行渲染
post = super().get_object(queryset=None)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
# 记得在顶部引入 TocExtension 和 slugify
TocExtension(slugify=slugify),
])
post.body = md.convert(post.body)
m = re.search(r'<div class="toc">\s*<ul>(.*)</ul>\s*</div>', md.toc, re.S)
post.toc = m.group(1) if m is not None else ''
return post
class ArchiveView(IndexView):
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(ArchiveView, self).get_queryset().filter(created_time__year=year,
created_time__month=month)
class CategoryView(IndexView):
def get_queryset(self):
cate = get_object_or_404(Category, pk=self.kwargs.get('pk'))
return super(CategoryView, self).get_queryset().filter(category=cate)
class TagView(ListView):
model = Tag
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
t = get_object_or_404(Tag, pk=self.kwargs.get('pk'))
return super(TagView, self).get_queryset().filter(tags=t)
def search(request):
q = request.GET.get('q')
if not q:
error_msg = "请输入搜索关键词"
messages.add_message(request, messages.ERROR, error_msg, extra_tags='danger')
return redirect('blog:index')
post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))
return render(request, 'blog/index.html', {'post_list': post_list})
| 31.336842 | 88 | 0.664091 | 2,548 | 0.760824 | 0 | 0 | 0 | 0 | 0 | 0 | 1,004 | 0.299791 |
f4dfdc2828ad759886241cbe8ceb66c9cfe90377 | 87 | py | Python | apps/sentry/apps.py | ShAlireza/Sentry-Telegram | adf5384a4bb2be1fd91fa948f636d00622ea9c04 | [
"MIT"
] | null | null | null | apps/sentry/apps.py | ShAlireza/Sentry-Telegram | adf5384a4bb2be1fd91fa948f636d00622ea9c04 | [
"MIT"
] | null | null | null | apps/sentry/apps.py | ShAlireza/Sentry-Telegram | adf5384a4bb2be1fd91fa948f636d00622ea9c04 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SentryConfig(AppConfig):
name = 'sentry'
| 14.5 | 33 | 0.747126 | 50 | 0.574713 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.091954 |
f4e13e171e02dd82b6bb13fe5bf5609262caa599 | 4,284 | py | Python | tests/test_compare.py | grst/jupytext | 0bbcd61297d8ee75b4f0329e2617acbbade3eb10 | [
"MIT"
] | null | null | null | tests/test_compare.py | grst/jupytext | 0bbcd61297d8ee75b4f0329e2617acbbade3eb10 | [
"MIT"
] | null | null | null | tests/test_compare.py | grst/jupytext | 0bbcd61297d8ee75b4f0329e2617acbbade3eb10 | [
"MIT"
] | null | null | null | import pytest
from nbformat.v4.nbbase import new_notebook, \
new_markdown_cell, new_code_cell, new_raw_cell
import jupytext
from jupytext.compare import compare_notebooks, \
test_round_trip_conversion as round_trip_conversion
jupytext.file_format_version.FILE_FORMAT_VERSION = {}
def test_raise_on_different_metadata():
ref = new_notebook(metadata={'main_language': 'python'},
cells=[new_markdown_cell('Cell one')])
test = new_notebook(metadata={'main_language': 'R'},
cells=[new_markdown_cell('Cell one')])
with pytest.raises(AssertionError):
compare_notebooks(ref, test)
def test_raise_on_different_cell_count():
ref = new_notebook(cells=[new_markdown_cell('Cell one'),
new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one')])
with pytest.raises(AssertionError):
compare_notebooks(ref, test)
def test_raise_on_different_cell_type():
ref = new_notebook(cells=[new_markdown_cell('Cell one'),
new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_raw_cell('Cell two')])
with pytest.raises(AssertionError):
compare_notebooks(ref, test)
def test_raise_on_different_cell_content():
ref = new_notebook(cells=[new_markdown_cell('Cell one'),
new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_code_cell('Modified cell two')])
with pytest.raises(AssertionError):
compare_notebooks(ref, test)
def test_raise_on_split_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_markdown_cell('second line')])
with pytest.raises(AssertionError):
compare_notebooks(ref, test)
def test_raise_on_incomplete_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one')])
with pytest.raises(AssertionError):
compare_notebooks(ref, test, allow_split_markdown=True)
def test_dont_raise_on_split_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_markdown_cell('second line')])
compare_notebooks(ref, test, allow_split_markdown=True)
def test_raise_on_different_cell_metadata():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(
cells=[new_code_cell('1+1', metadata={'metakey': 'value'})])
with pytest.raises(AssertionError):
compare_notebooks(ref, test)
def test_dont_raise_on_different_outputs():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
compare_notebooks(ref, test)
def test_raise_on_different_outputs():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
with pytest.raises(AssertionError):
compare_notebooks(ref, test, test_outputs=True)
def test_test_round_trip_conversion():
notebook = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])], metadata={'main_language': 'python'})
round_trip_conversion(notebook, '.py', test_outputs=True)
| 34 | 78 | 0.613445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 627 | 0.146359 |
f4e2c1950f15699ea2256ebd32a508dcb6887549 | 105 | py | Python | test_initial.py | BickySamourai/djreact | cea500cb3dc841100cc058110d7e2c6d813ca8b8 | [
"MIT"
] | 1 | 2018-12-05T11:21:50.000Z | 2018-12-05T11:21:50.000Z | test_initial.py | floriansollami/djreact | cea500cb3dc841100cc058110d7e2c6d813ca8b8 | [
"MIT"
] | 2 | 2020-02-11T23:28:33.000Z | 2020-06-05T19:36:41.000Z | test_initial.py | BickySamourai/djreact | cea500cb3dc841100cc058110d7e2c6d813ca8b8 | [
"MIT"
] | 1 | 2018-12-10T10:32:23.000Z | 2018-12-10T10:32:23.000Z | def hello(name):
return 'Hello ' + 'name'
def test_hello():
assert hello('name') == 'Hello name' | 21 | 40 | 0.609524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.304762 |
f4e45601dde17142d5458f76f7251eb01214f0cd | 1,921 | py | Python | hashing/pdq/python/pdqhashing/utils/matrix.py | larrycameron80/ThreatExchange | 00f9c140360fd6189e2be7de4ad680d474cbeebb | [
"BSD-3-Clause"
] | 1 | 2021-10-11T21:43:04.000Z | 2021-10-11T21:43:04.000Z | preprocess/third_party/pdqhashing/utils/matrix.py | vegetable68/sbb | 5949632fbd95a9dd6f40fca806b9a9d56b41652a | [
"CC0-1.0"
] | null | null | null | preprocess/third_party/pdqhashing/utils/matrix.py | vegetable68/sbb | 5949632fbd95a9dd6f40fca806b9a9d56b41652a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
class MatrixUtil:
@classmethod
def allocateMatrix(cls, numRows, numCols):
rv = [0.0] * numRows
for i in range(numRows):
rv[i] = [0.0] * numCols
return rv
@classmethod
def allocateMatrixAsRowMajorArray(cls, numRows, numCols):
return [0.0] * numRows * numCols
@classmethod
def torben(cls, m, numRows, numCols):
n = numRows * numCols
midn = int((n + 1) / 2)
less = int()
greater = int()
equal = int()
min = float()
max = float()
guess = float()
maxltguess = float()
mingtguess = float()
min = max = m[0][0]
for i in range(numRows):
for j in range(numCols):
v = m[i][j]
if v < min:
min = v
if v > max:
max = v
while True:
guess = float((min + max) / 2)
less = 0
greater = 0
equal = 0
maxltguess = min
mingtguess = max
for _i in range(numRows):
for _j in range(numCols):
v = m[_i][_j]
if v < guess:
less += 1
if v > maxltguess:
maxltguess = v
elif v > guess:
greater += 1
if v < mingtguess:
mingtguess = v
else:
equal += 1
if less <= midn and greater <= midn:
break
elif less > greater:
max = maxltguess
else:
min = mingtguess
if less >= midn:
return maxltguess
elif less + equal >= midn:
return guess
else:
return mingtguess
| 27.442857 | 61 | 0.401353 | 1,896 | 0.986986 | 0 | 0 | 1,862 | 0.969287 | 0 | 0 | 21 | 0.010932 |
f4e541181a3113f28e637d5fa10bb1de655a01f7 | 4,279 | py | Python | Scripts/simulation/plex/plex_service.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/plex/plex_service.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/plex/plex_service.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\plex\plex_service.py
# Compiled at: 2020-03-06 03:39:09
# Size of source mod 2**32: 8464 bytes
from plex.plex_enums import PlexBuildingType
from sims4.geometry import Polygon
from sims4.service_manager import Service
import build_buy, services, sims4.log
logger = sims4.log.Logger('PlexService', default_owner='tingyul')
class PlexService(Service):
def __init__(self):
self._zone_to_master_map = {}
def setup(self, gameplay_zone_data=None, save_slot_data=None):
persistence_service = services.get_persistence_service()
for zone_data in persistence_service.zone_proto_buffs_gen():
master_id = zone_data.master_zone_object_data_id
if master_id != 0:
zone_id = zone_data.zone_id
plex_id = zone_data.active_plex
self._zone_to_master_map[zone_id] = (master_id, plex_id)
def is_active_zone_a_plex(self):
return self.is_zone_a_plex(services.current_zone_id())
def is_zone_a_plex(self, zone_id):
return zone_id in self._zone_to_master_map
def is_zone_an_apartment(self, zone_id, consider_penthouse_an_apartment=True):
return self.is_zone_a_plex(zone_id) and (consider_penthouse_an_apartment or self.get_plex_building_type(zone_id) != PlexBuildingType.PENTHOUSE_PLEX)
def get_active_zone_plex_id(self):
zone_id = services.current_zone_id()
if zone_id in self._zone_to_master_map:
_, plex_id = self._zone_to_master_map[zone_id]
return plex_id
def get_master_zone_id(self, child_zone_id):
if child_zone_id in self._zone_to_master_map:
master_id, _ = self._zone_to_master_map[child_zone_id]
return master_id
return child_zone_id
def get_plex_building_type(self, zone_id):
persistence_service = services.get_persistence_service()
house_description_id = persistence_service.get_house_description_id(zone_id)
return PlexBuildingType(services.get_building_type(house_description_id))
def get_plex_zones_in_group(self, zone_id):
if zone_id in self._zone_to_master_map:
master_id, _ = self._zone_to_master_map[zone_id]
return frozenset((z for z, (m, _) in self._zone_to_master_map.items() if m == master_id))
return set()
def zone_to_master_map_gen(self):
yield from self._zone_to_master_map.items()
if False:
yield None
def get_plex_zone_at_position(self, world_position, level):
active_zone_id = services.current_zone_id()
if active_zone_id not in self._zone_to_master_map:
return
master_id, _ = self._zone_to_master_map[active_zone_id]
plex_id = build_buy.get_location_plex_id(world_position, level)
for zone_id, (other_master_id, other_plex_id) in self._zone_to_master_map.items():
if master_id == other_master_id and plex_id == other_plex_id:
return zone_id
def is_position_in_common_area_or_active_plex(self, world_position, level):
if not services.current_zone().lot.is_position_on_lot(world_position, level):
return False
plex_zone_id = self.get_plex_zone_at_position(world_position, level)
if plex_zone_id is None or plex_zone_id == services.current_zone_id():
return True
return False
def get_plex_polygons(self, level):
zone_id = services.current_zone_id()
if zone_id not in self._zone_to_master_map:
logger.error("Can't get polygons for a non-plex: {}", zone_id)
return []
_, plex_id = self._zone_to_master_map[zone_id]
blocks = build_buy.get_plex_outline(plex_id, level)
polygons = []
for block in blocks:
logger.assert_log(len(block) == 1, 'Plex has cutouts. get_plex_polygons needs to be updated. Zone: {}, Level: {}', zone_id, level)
vertices = list(reversed(block[0]))
polygon = Polygon(vertices)
polygons.append(polygon)
return polygons | 44.572917 | 156 | 0.699696 | 3,737 | 0.873335 | 126 | 0.029446 | 0 | 0 | 0 | 0 | 448 | 0.104697 |
f4e75c309e61dbe358567d5b645b34ee9b7c97aa | 962 | py | Python | youtube.py | yappy2000d/MoodBot | b62fc96fb15aa6f7bc95696f7a1614f1f50614dd | [
"MIT"
] | null | null | null | youtube.py | yappy2000d/MoodBot | b62fc96fb15aa6f7bc95696f7a1614f1f50614dd | [
"MIT"
] | null | null | null | youtube.py | yappy2000d/MoodBot | b62fc96fb15aa6f7bc95696f7a1614f1f50614dd | [
"MIT"
] | null | null | null | import os
KEY = os.environ['KEY']
from urllib.parse import parse_qs, urlparse
import requests
def getid(url):
if url.startswith("http"):
try:
url_data = urlparse(url)
query = parse_qs(url_data.query)
return query["v"][0]
except KeyError:
return url.split("/")[-1]
else:
return url
def video(id):
url = "https://youtube.googleapis.com/youtube/v3/videos"
params = {
"part": "snippet,contentDetails,statistics",
"id": id,
"key": KEY
}
res = requests.get(url, params=params)
return res.json()
def comment(id, page_token=''):
url = "https://youtube.googleapis.com/youtube/v3/commentThreads"
params = {
"part": "snippet,replies",
"videoId": id,
"next_page_token": page_token,
"key": KEY
}
res = requests.get(url, params=params)
return res.json(), res.json().get("nextPageToken", "")
| 22.372093 | 68 | 0.576923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.257796 |
f4e91ac9a8dab12776325390648132e24eca6eb5 | 6,109 | py | Python | Course_4_Week_4_Project_2.py | Vivek9Chavan/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate | c48f2040631a87d973ea8cbe534af9cd8f715d4a | [
"Apache-2.0"
] | null | null | null | Course_4_Week_4_Project_2.py | Vivek9Chavan/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate | c48f2040631a87d973ea8cbe534af9cd8f715d4a | [
"Apache-2.0"
] | null | null | null | Course_4_Week_4_Project_2.py | Vivek9Chavan/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate | c48f2040631a87d973ea8cbe534af9cd8f715d4a | [
"Apache-2.0"
] | null | null | null |
"""
This is is a part of the DeepLearning.AI TensorFlow Developer Professional Certificate offered on Coursera.
All copyrights belong to them. I am sharing this work here to showcase the projects I have worked on
Course: Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning
Week 4: Real-world time series data
Aim: Sunspot Data: Different models
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
path_to_file = tf.keras.utils.get_file('sunspots.csv','https://storage.googleapis.com/laurencemoroney-blog.appspot.com/Sunspots.csv')
import csv
time_step = []
sunspots = []
with open(path_to_file) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
sunspots.append(float(row[2]))
time_step.append(int(row[0]))
series = np.array(sunspots)
time = np.array(time_step)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 3000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
window_size = 30
batch_size = 32
shuffle_buffer_size = 1000
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
window_size = 64
batch_size = 256
train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
print(train_set)
print(x_train.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 60])
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
train_set = windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
model_expanded = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,epochs=500)
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
loss=history.history['loss']
epochs=range(len(loss)) # Get number of epochs
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(epochs, loss, 'r')
plt.title('Training loss')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss"])
plt.figure()
zoomed_loss = loss[200:]
zoomed_epochs = range(200,500)
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(zoomed_epochs, zoomed_loss, 'r')
plt.title('Training loss')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss"])
plt.figure()
| 32.668449 | 134 | 0.647405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,221 | 0.199869 |
f4eed8815cb84898320683d05ba025d24a490e96 | 182 | py | Python | 2018/1/solution1.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | 2 | 2020-12-04T09:45:38.000Z | 2020-12-07T14:06:12.000Z | 2018/1/solution1.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | 2018/1/solution1.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | with open("input.txt") as input_file:
lines = input_file.readlines()
result = 0
for line in lines:
print(line.strip(), result)
result += int(line.strip())
print(result)
| 20.222222 | 37 | 0.67033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.06044 |
f4effd5fe4c5a3fbe7c1088d05cb3e53feff8a3e | 1,435 | py | Python | packages/models-library/tests/conftest.py | KZzizzle/osparc-simcore | 981bc8d193f3f5d507e3225f857e0308c339e163 | [
"MIT"
] | null | null | null | packages/models-library/tests/conftest.py | KZzizzle/osparc-simcore | 981bc8d193f3f5d507e3225f857e0308c339e163 | [
"MIT"
] | 17 | 2020-10-15T16:06:05.000Z | 2022-03-21T18:48:21.000Z | packages/models-library/tests/conftest.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | null | null | null | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-import
import json
import subprocess
import sys
from pathlib import Path
from typing import Callable, Dict
import pytest
import models_library
pytest_plugins = [
"pytest_simcore.repository_paths",
"pytest_simcore.schemas",
]
@pytest.fixture(scope="session")
def package_dir():
pdir = Path(models_library.__file__).resolve().parent
assert pdir.exists()
return pdir
@pytest.fixture(scope="session")
def json_diff_script(script_dir: Path) -> Path:
json_diff_script = script_dir / "json-schema-diff.bash"
assert json_diff_script.exists()
return json_diff_script
@pytest.fixture(scope="session")
def diff_json_schemas(json_diff_script: Path, tmp_path_factory: Path) -> Callable:
def _run_diff(schema_a: Dict, schema_b: Dict) -> subprocess.CompletedProcess:
tmp_path = tmp_path_factory.mktemp(__name__)
schema_a_path = tmp_path / "schema_a.json"
schema_a_path.write_text(json.dumps(schema_a))
schema_b_path = tmp_path / "schema_b.json"
schema_b_path.write_text(json.dumps(schema_b))
command = [json_diff_script, schema_a_path, schema_b_path]
return subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
cwd=tmp_path,
)
yield _run_diff
| 27.596154 | 82 | 0.710105 | 0 | 0 | 704 | 0.490592 | 1,092 | 0.760976 | 0 | 0 | 239 | 0.166551 |
f4f1160d71bb5c4176834eb20e1ed9451bb439a5 | 3,974 | py | Python | cellular-automata/examples/forest_fire_parameter_exploration.py | enthought/ets-examples | 3b2894bfe25c9f57f76996bd5818d1f0cdc77e8a | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2017-07-22T11:04:20.000Z | 2018-10-01T15:30:16.000Z | cellular-automata/examples/forest_fire_parameter_exploration.py | enthought/ets-examples | 3b2894bfe25c9f57f76996bd5818d1f0cdc77e8a | [
"Apache-2.0",
"BSD-3-Clause"
] | 11 | 2018-01-18T17:06:59.000Z | 2019-12-17T12:08:42.000Z | cellular-automata/examples/forest_fire_parameter_exploration.py | enthought/ets-examples | 3b2894bfe25c9f57f76996bd5818d1f0cdc77e8a | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-12-28T14:15:15.000Z | 2018-12-28T14:15:15.000Z | # Copyright (c) 2017, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Forest Fire Parameter Exploration
=================================
This example shows parallel execution of multiple forest fire
simulations with parameters swept over a range of values to
collect and display statistics about the model.
In this example, we use a modified version of a forest fire
simulation with the following states:
- Empty cell: 0
- Healthy tree: 1
- Burning tree: 2
- Moldy tree: 3
Every tick:
- an empty cell can grow a tree
- fires are randomly started and burn down all connected trees
- crowded trees have a chance of contracting and dying from mold
infection
The script runs the simulation with different values for the
likelihood of mold infection. As the probability grows, a qualitative
decrease can be seen in the size and effect of fires, as the deaths
due to mold have the effect of breaking up large groups of trees into
less-connected groves, making it harder for fire to spread.
"""
import numpy as np
from cellular_automata.automata_recorder import AutomataRecorder, count_states
from cellular_automata.cellular_automaton import CellularAutomaton
from cellular_automata.rules.change_state_rule import ChangeStateRule
from cellular_automata.rules.forest import BurnGrovesRule, MoldRule
# State values
EMPTY = 0
TREE = 1
FIRE = 2
MOLD = 3
def simulation(p_mold, size, steps):
""" Perform a simulation of a moldy forest, returning statistics.
Parameters
----------
p_mold : probability
The probability that a crowded tree dies of mold.
size : size tuple
The number of cells in each direction for the simulation.
steps : int
The number of ticks to run the simulation for.
Returns
-------
counts : array
Array with shape (4, steps) of counts of each state at
each tick.
"""
np.random.seed(None)
# trees grow
grow = ChangeStateRule(
from_state=EMPTY,
to_state=TREE,
p_change=0.0025
)
# fires are started, and all connected trees burn
burn_groves = BurnGrovesRule()
# crowded trees have a chance to be infected with mold
mold = MoldRule(dead_state=MOLD, p_mold=p_mold)
# trees which are infected with mold die
mold_die = ChangeStateRule(
from_state=MOLD,
to_state=EMPTY,
p_change=1.0
)
# fires are extinguished
fire_out = ChangeStateRule(
from_state=FIRE,
to_state=EMPTY,
p_change=1.0
)
forest = CellularAutomaton(
shape=size,
rules=[mold_die, fire_out, grow, burn_groves, mold],
)
# record the number of each state
recorder = AutomataRecorder(automaton=forest, transform=count_states)
forest.start()
for i in range(steps):
forest.step()
return recorder.as_array()
if __name__ == '__main__':
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
SHAPE = (256, 256)
N_STEPS = 4096
N_SIMULATIONS = 16
results = Parallel(n_jobs=4)(
delayed(simulation)(p_mold, SHAPE, N_STEPS, count_states)
for p_mold in np.logspace(-4, -1, N_SIMULATIONS)
)
for i, result in enumerate(results):
# plot count of each non-empty state over time
plt.subplot(N_SIMULATIONS, 2, 2*i+1)
for state, color in [(TREE, 'g'), (FIRE, 'r'), (MOLD, 'c')]:
plt.plot(result[state, :], c=color)
# plot histogram
plt.subplot(N_SIMULATIONS, 2, 2*i+2)
plt.hist(
np.log(result[result[1] != 0, 1]),
bins=np.linspace(0, 10, 21)
)
plt.show()
| 27.597222 | 78 | 0.681429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,093 | 0.526673 |
f4f2a5cbf30192318f4590d2be79888593d3e6c9 | 1,146 | py | Python | src/RobotFrameworkPercy/RobotFrameworkPercy.py | carlosnizolli/robot-framework-percy | dff3f4a8bcde1b33ff7b84ae35d0440137432803 | [
"MIT"
] | null | null | null | src/RobotFrameworkPercy/RobotFrameworkPercy.py | carlosnizolli/robot-framework-percy | dff3f4a8bcde1b33ff7b84ae35d0440137432803 | [
"MIT"
] | null | null | null | src/RobotFrameworkPercy/RobotFrameworkPercy.py | carlosnizolli/robot-framework-percy | dff3f4a8bcde1b33ff7b84ae35d0440137432803 | [
"MIT"
] | 2 | 2021-01-30T01:53:14.000Z | 2021-06-09T09:22:44.000Z | from robot.libraries.BuiltIn import BuiltIn
import os
import percy
class RobotFrameworkPercy():
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self):
self.percy_runner = None
def Percy_Initialize_Build(self, **kwargs):
driver = BuiltIn().get_library_instance('SeleniumLibrary').driver
loader = percy.ResourceLoader(
root_dir=kwargs.get('root_dir'),
base_url=kwargs.get('base_url'),
webdriver=driver
)
# api_url
# access_token
# default_widths
config = percy.Config(**kwargs)
self.percy_runner = percy.Runner(loader=loader, config=config)
# branch
# pull_request_number
# parallel_nonce
# parallel_total_shards
# commit_data
self.percy_runner.initialize_build(**kwargs)
def Percy_Snapshot(self, name=None, **kwargs):
self.percy_runner.snapshot(
name=name,
widths=kwargs.get('widths'),
enable_javascript=kwargs.get('enable_javascript')
)
def Percy_Finalize_Build(self):
self.percy_runner.finalize_build()
| 28.65 | 73 | 0.63438 | 1,077 | 0.939791 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.17103 |
f4f65f4899bacd28b8ccabd10ebc4817d077ad11 | 3,434 | py | Python | source/clusters.py | ehutt/text2topics | f79ff78b79c1ef2006350672ce9bfeb15de14b9c | [
"MIT"
] | 2 | 2019-08-14T03:07:59.000Z | 2020-09-17T21:55:38.000Z | source/clusters.py | ehutt/text2topics | f79ff78b79c1ef2006350672ce9bfeb15de14b9c | [
"MIT"
] | 2 | 2019-06-14T16:34:18.000Z | 2019-06-21T21:02:25.000Z | source/clusters.py | ehutt/text2topics | f79ff78b79c1ef2006350672ce9bfeb15de14b9c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 11:02:21 2019
@author: elizabethhutton
"""
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from wordcloud import WordCloud
from yellowbrick.cluster import KElbowVisualizer
from sklearn.neighbors import NearestNeighbors
import spacy
import iterate
class Cluster():
def __init__(self,corpus,num_clusters):
"""Perform k-means clustering on corpus.
Keyword Arguments:
corpus -- document corpus as Corpus object
num_clusters -- k clusters to search for
"""
self.k = num_clusters
self.top_words = None
word_vectors = corpus.vectors
kmeans_clustering = KMeans(n_clusters = num_clusters, init='k-means++')
self.model = kmeans_clustering
idx = kmeans_clustering.fit_predict(word_vectors)
self.centers = kmeans_clustering.cluster_centers_
#update corpus vectors with cluster labels
corpus.clusters = pd.DataFrame(idx,columns=['clusterid'],index=word_vectors.index)
return
def get_top_words(self, corpus, knn):
"""Get knn top words for each cluster.
Keyword Arguments:
corpus -- pandas df of words and their vectors
knn -- (int) num words to find per cluster
"""
word_vectors = corpus.vectors
neigh = NearestNeighbors(n_neighbors=knn, metric= 'cosine')
neigh.fit(word_vectors)
top_word_idxs = list()
for center in self.centers:
center = center.reshape(1,-1)
top_n = neigh.kneighbors(center,n_neighbors=knn,return_distance=False)
top_word_idxs.append(top_n)
top_n_words = pd.DataFrame()
for i, cluster in enumerate(top_word_idxs):
cluster_name = 'Cluster ' + str(i)
words = list()
for idx in cluster[0]:
word = word_vectors.iloc[idx].name
words.append(word)
top_n_words[cluster_name] = words
self.top_words = top_n_words
return top_n_words
def iterate_kmeans(clean_corpus,elbow):
#prep for clustering
clean_corpus.vectorize()
#iterate kmeans over num topics
#methods = 'var','dist','c_h'
elbow.elbow_kmeans_variance(clean_corpus)
elbow.elbow_kmeans_inertia(clean_corpus)
elbow.elbow_kmeans_ch(clean_corpus)
elbow.elbow_kmeans_dist(clean_corpus)
return
#fix
def plot_tsne(word_vectors):
tsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=3)
np.set_printoptions(suppress=True)
T = tsne.fit_transform(word_vectors)
labels = word_vectors.index
plt.figure(figsize=(12, 6))
plt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')
for label, x, y in zip(labels, T[:, 0], T[:, 1]):
plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')
return
def get_kmeans(clean_corpus,num_topics):
cluster_model = Cluster(clean_corpus,num_topics)
top_words_kmeans = cluster_model.get_top_words(clean_corpus, knn=10)
return cluster_model,top_words_kmeans
| 32.093458 | 90 | 0.625801 | 2,762 | 0.80431 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.2053 |
f4f6dd9ec34601f8c73911a269e4b2611744f766 | 1,095 | py | Python | week_2/ex1a.py | scodes1/pyneta | 73012860b49488b882898ddf0060e3f662212ed7 | [
"Apache-2.0"
] | null | null | null | week_2/ex1a.py | scodes1/pyneta | 73012860b49488b882898ddf0060e3f662212ed7 | [
"Apache-2.0"
] | null | null | null | week_2/ex1a.py | scodes1/pyneta | 73012860b49488b882898ddf0060e3f662212ed7 | [
"Apache-2.0"
] | null | null | null | from netmiko import ConnectHandler
from getpass import getpass
device1 = {
'host' : 'cisco4.lasthop.io',
'username' : 'pyclass',
'password' : getpass(),
'device_type' : 'cisco_ios',
'global_delay_factor' : 2,
}
net_connect = ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_command_timing(
"ping", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing(
"8.8.8.8", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
net_connect.disconnect()
print()
print(output)
print()
| 32.205882 | 88 | 0.761644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.126027 |
f4f6fa07769f11d38fee60f54fe27a3c1a45cc34 | 4,094 | py | Python | bot.py | egor5q/hawkeyegame | c951cce856736099603903bd2aa98d763a01c68b | [
"MIT"
] | null | null | null | bot.py | egor5q/hawkeyegame | c951cce856736099603903bd2aa98d763a01c68b | [
"MIT"
] | null | null | null | bot.py | egor5q/hawkeyegame | c951cce856736099603903bd2aa98d763a01c68b | [
"MIT"
] | 2 | 2019-05-14T17:23:44.000Z | 2019-05-14T17:31:14.000Z | # -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
fighters=[]
btimer=10
names=['Волк', 'Осёл', 'Кроль', 'Пажилая чимчима', 'Сосааать']
@bot.message_handler(commands=['start'])
def start(m):
no=0
for ids in fighters:
if ids['id']==m.from_user.id:
no=1
if no==0:
fighters.append(createplayer(user=m.from_user))
bot.send_message(m.chat.id, 'Вы успешно зашли в игру! Теперь ждите, пока ваш боец прострелит кому-нибудь яйцо.\nСоветую кинуть бота в мут!')
@bot.message_handler(commands=['add'])
def add(m):
if m.from_user.id==441399484:
name=m.text.split(' ')[1]
fighters.append(createplayer(name=name))
bot.send_message(m.chat.id, 'Добавлен игрок "'+name+'"!')
@bot.message_handler(commands=['settimer'])
def settimer(m):
if m.from_user.id==441399484:
try:
global btimer
btimer=int(m.text.split(' ')[1])
except:
pass
@bot.message_handler(commands=['stats'])
def stats(m):
me=None
for ids in fighters:
if ids['id']==m.from_user.id:
me=ids
if me!=None:
text=''
text+='ХП: '+str(me['hp'])+'\n'
text+='В вас попали: '+str(me['hitted'])+' раз(а)\n'
text+='Вы убили: '+str(me['killed'])+' дурачков\n'
bot.send_message(m.chat.id, text)
def createplayer(user=None, name=None):
if user!=None:
name=user.first_name
idd=user.id
else:
name=name
idd='npc'
return {
'hp':1000,
'damage':10,
'killchance':5,
'name':name,
'id':idd,
'hitted':0, # сколько раз попали
'killed':0, # сколько уебал
'killer':''
}
def fight():
for ids in fighters:
alive=[]
for idss in fighters:
if idss['hp']>0 and idss['id']!=ids['id']:
alive.append(idss)
if len(alive)>0:
text=''
target=random.choice(alive)
dmg=ids['damage']+ids['damage']*(random.randint(-20, 20)/100)
target['hp']-=dmg
target['hitted']+=1
text+='Вы попали в '+target['name']+'! Нанесено '+str(dmg)+' урона.\n'
if target['hp']<=0:
ids['killed']+=1
target['killer']=ids['name']
text+='Вы убили цель!\n'
else:
if random.randint(1,100)<=ids['killchance']:
target['hp']=0
ids['killed']+=1
target['killer']=ids['name']
text+='Вы прострелили яйцо цели! Та погибает.\n'
try:
bot.send_message(ids['id'], text)
except:
pass
dellist=[]
for ids in fighters:
if ids['hp']<=0:
dellist.append(ids)
for ids in dellist:
try:
bot.send_message(ids['id'], 'Вы сдохли. Вас убил '+ids['killer'])
except:
pass
me=ids
text='Итоговые статы:\n\n'
text+='ХП: '+str(me['hp'])+'\n'
text+='В вас попали: '+str(me['hitted'])+' раз(а)\n'
text+='Вы убили: '+str(me['killed'])+' дурачков\n'
try:
bot.send_message(ids['id'], text)
except:
pass
fighters.remove(ids)
if len(fighters)<=2:
name=random.choice(names)
fighters.append(createplayer(name=name))
global btimer
t=threading.Timer(btimer, fight)
t.start()
fight()
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode=None):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
print('7777')
bot.polling(none_stop=True,timeout=600)
| 28.430556 | 148 | 0.533952 | 0 | 0 | 0 | 0 | 1,319 | 0.299093 | 0 | 0 | 1,126 | 0.255329 |
f4f8ac48bcc18b69231760309200fe77cd389fd6 | 2,404 | py | Python | napari_svg/_tests/test_get_writer.py | nclack/napari-svg | 0e222857c3a61befe1ea4b4e97fd48b285877d02 | [
"BSD-3-Clause"
] | 1 | 2020-04-13T12:20:00.000Z | 2020-04-13T12:20:00.000Z | napari_svg/_tests/test_get_writer.py | nclack/napari-svg | 0e222857c3a61befe1ea4b4e97fd48b285877d02 | [
"BSD-3-Clause"
] | 13 | 2020-04-26T04:27:12.000Z | 2021-12-17T16:56:46.000Z | napari_svg/_tests/test_get_writer.py | nclack/napari-svg | 0e222857c3a61befe1ea4b4e97fd48b285877d02 | [
"BSD-3-Clause"
] | 8 | 2020-04-19T21:47:37.000Z | 2022-01-25T16:39:01.000Z | import os
import numpy as np
import pytest
from napari_svg import napari_get_writer
from napari.layers import Image, Labels, Points, Shapes, Vectors
@pytest.fixture
def layer_data_and_types():
np.random.seed(0)
layers = [
Image(np.random.rand(20, 20)),
Labels(np.random.randint(10, size=(20, 2))),
Points(np.random.rand(20, 2)),
Shapes(np.random.rand(10, 2, 2)),
Vectors(np.random.rand(10, 2, 2)),
]
layer_data = [l.as_layer_data_tuple() for l in layers]
layer_types = [ld[2] for ld in layer_data]
return layer_data, layer_types
def test_get_writer(tmpdir, layer_data_and_types):
"""Test writing layers data."""
layer_data, layer_types = layer_data_and_types
path = os.path.join(tmpdir, 'layers_file.svg')
writer = napari_get_writer(path, layer_types)
assert writer is not None
# Check file does not exist
assert not os.path.isfile(path)
# Write data
return_path = writer(path, layer_data)
assert return_path == path
# Check file now exists
assert os.path.isfile(path)
def test_get_writer_no_extension(tmpdir, layer_data_and_types):
"""Test writing layers data with no extension."""
layer_data, layer_types = layer_data_and_types
path = os.path.join(tmpdir, 'layers_file')
writer = napari_get_writer(path, layer_types)
assert writer is not None
# Check file does not exist
assert not os.path.isfile(path)
# Write data
return_path = writer(path, layer_data)
assert return_path == path + '.svg'
# Check file now exists
assert os.path.isfile(path + '.svg')
def test_get_writer_bad_extension(tmpdir, layer_data_and_types):
"""Test not writing layers data with bad extension."""
layer_data, layer_types = layer_data_and_types
path = os.path.join(tmpdir, 'layers_file.csv')
writer = napari_get_writer(path, layer_types)
assert writer is None
# Check file does not exist
assert not os.path.isfile(path)
def test_get_writer_bad_layer_types(tmpdir):
"""Test not writing layers data with bad extension."""
layer_types = ['image', 'points', 'bad_type']
path = os.path.join(tmpdir, 'layers_file.svg')
writer = napari_get_writer(path, layer_types)
assert writer is None
# Check file does not exist
assert not os.path.isfile(path)
| 26.417582 | 64 | 0.682196 | 0 | 0 | 0 | 0 | 482 | 0.200499 | 0 | 0 | 467 | 0.19426 |
f4fa097f769833133255ddd66d6f8877c26fd01c | 4,701 | py | Python | django_easy_report/migrations/0001_initial.py | ehooo/django_easy_report | b6e42946693cf5144ee8902ecc7ab7e8626d7439 | [
"MIT"
] | null | null | null | django_easy_report/migrations/0001_initial.py | ehooo/django_easy_report | b6e42946693cf5144ee8902ecc7ab7e8626d7439 | [
"MIT"
] | 6 | 2021-11-29T20:23:30.000Z | 2022-02-02T23:09:50.000Z | django_easy_report/migrations/0001_initial.py | ehooo/django_easy_report | b6e42946693cf5144ee8902ecc7ab7e8626d7439 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ReportGenerator',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.SlugField(max_length=32, unique=True)),
('class_name', models.CharField(help_text='Class name for for generate the report. It must be subclass of django_easy_report.reports.ReportBaseGenerator', max_length=64)),
('init_params', models.TextField(blank=True, help_text='JSON with init parameters', null=True)),
('permissions', models.CharField(blank=True, help_text='Comma separated permission list. Permission formatted as: <content_type.app_label>.<permission.codename>', max_length=1024, null=True)),
('always_generate', models.BooleanField(default=False, help_text='Do not search for similar reports previously generated')),
('always_download', models.BooleanField(default=False, help_text='Never will redirect to storage URL')),
('preserve_report', models.BooleanField(default=False, help_text='If model is deleted, do not remove the file on storage')),
],
),
migrations.CreateModel(
name='ReportQuery',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.PositiveSmallIntegerField(choices=[(0, 'Created'), (10, 'Working'), (20, 'Done'), (30, 'Error')], default=0)),
('filename', models.CharField(max_length=32)),
('mimetype', models.CharField(default='application/octet-stream', max_length=32)),
('params', models.TextField(blank=True, null=True)),
('params_hash', models.CharField(max_length=128)),
('storage_path_location', models.CharField(blank=True, max_length=512, null=True)),
('report', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_easy_report.reportgenerator')),
],
options={
'ordering': ('created_at',),
},
),
migrations.CreateModel(
name='ReportSender',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(db_index=True, max_length=32, unique=True)),
('email_from', models.EmailField(blank=True, help_text='If have content email must be send when report is completed.', max_length=254, null=True)),
('size_to_attach', models.PositiveIntegerField(default=0, help_text='If size is bigger, the file will be upload using storage system, otherwise the file will be send as attached on the email.')),
('storage_class_name', models.CharField(help_text='Class name for for save the report. It must be subclass of django.core.files.storage.Storage', max_length=64)),
('storage_init_params', models.TextField(blank=True, help_text='JSON with init parameters', null=True)),
],
),
migrations.CreateModel(
name='ReportRequester',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request_at', models.DateTimeField(auto_now_add=True)),
('user_params', models.TextField(blank=True, null=True)),
('notified', models.BooleanField(default=False)),
('query', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_easy_report.reportquery')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='reportgenerator',
name='sender',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_easy_report.reportsender'),
),
]
| 61.855263 | 220 | 0.635397 | 4,591 | 0.976601 | 0 | 0 | 0 | 0 | 0 | 0 | 1,355 | 0.288237 |
f4fadeeded9f37b94879f47ef805c3a9011fce5c | 29,988 | py | Python | post_optimization_studies/mad_analyses/new_pre_select/Output/Histos/MadAnalysis5job_0/selection_10.py | sheride/axion_pheno | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | [
"MIT"
] | null | null | null | post_optimization_studies/mad_analyses/new_pre_select/Output/Histos/MadAnalysis5job_0/selection_10.py | sheride/axion_pheno | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | [
"MIT"
] | null | null | null | post_optimization_studies/mad_analyses/new_pre_select/Output/Histos/MadAnalysis5job_0/selection_10.py | sheride/axion_pheno | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | [
"MIT"
] | null | null | null | def selection_10():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(0.0,1000.0,81,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([6.25,18.75,31.25,43.75,56.25,68.75,81.25,93.75,106.25,118.75,131.25,143.75,156.25,168.75,181.25,193.75,206.25,218.75,231.25,243.75,256.25,268.75,281.25,293.75,306.25,318.75,331.25,343.75,356.25,368.75,381.25,393.75,406.25,418.75,431.25,443.75,456.25,468.75,481.25,493.75,506.25,518.75,531.25,543.75,556.25,568.75,581.25,593.75,606.25,618.75,631.25,643.75,656.25,668.75,681.25,693.75,706.25,718.75,731.25,743.75,756.25,768.75,781.25,793.75,806.25,818.75,831.25,843.75,856.25,868.75,881.25,893.75,906.25,918.75,931.25,943.75,956.25,968.75,981.25,993.75])
# Creating weights for histo: y11_PT_0
y11_PT_0_weights = numpy.array([0.131010679613,1.11359057671,3.91803799968,7.74191288589,11.8933130711,16.4991568388,20.9412367569,25.7190323666,30.04648039,34.1774125941,38.9593002,41.9643614386,45.7022380038,48.3920755321,51.470832703,54.3939900169,57.2762273684,58.6191061344,61.1123838433,63.3027418306,63.4460216989,64.8257404311,65.2924600022,67.2166582341,68.7273768458,68.2934172446,68.2238173086,68.0313774854,68.4776570753,68.1664973612,67.8635376396,68.6987368722,66.7090187005,65.8574194831,65.4357398706,64.3753808449,63.2331418945,63.2781418532,61.5913834032,61.5013434859,61.0837438696,59.3765054384,59.5730252579,57.251667391,56.2609083014,54.860709588,53.7307506264,52.9733513223,51.3561928084,51.0655130755,49.1371948474,48.4985154343,46.7830770106,45.9601977668,44.3839592152,44.2488393394,41.7432816418,40.3062429623,40.0565231917,38.8241963241,38.2223648772,36.8140021713,35.4752354015,34.2306325452,33.6533690756,32.0321105654,31.6759268927,30.7260997655,30.2102442396,28.6135497068,27.4017028203,27.3771388429,25.7968202951,25.0885449459,24.4130215667,23.4959464094,23.2421146426,22.2513435531,21.3342683958,20.6546530203])
# Creating weights for histo: y11_PT_1
y11_PT_1_weights = numpy.array([702.900577971,5860.16056363,2832.29591012,1369.85243305,695.429636629,353.117841868,172.218757054,82.125877891,37.0944554986,17.1936398652,9.24720761597,5.07881009451,3.71786022279,2.35715669866,1.82252554531,1.17885974637,0.862978845048,0.789151075614,0.729034652602,0.522341813342,0.328118568417,0.255359026229,0.145930104316,0.133699727511,0.0851190211533,0.072804485602,0.12148725597,0.0971783966792,0.0364618669294,0.0243135298806,0.0727484865885,0.0121719383497,0.0121674920761,0.0121245635048,0.0,0.0364704910979,0.0243242289768,0.0,0.0242616647,0.0364505589741,0.0242483579244,0.0,0.0121684894834,0.0,0.024342799179,0.0121245635048,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121541812948,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y11_PT_2
y11_PT_2_weights = numpy.array([176.651716605,2317.75832416,2055.46502747,1507.23989373,1091.67396584,771.726216738,552.406825288,385.407584973,270.137935167,185.277736377,122.800991985,84.5348367418,58.3412497919,38.1936119851,26.0140675928,17.3893783594,10.0704099157,6.54671910232,4.34748404844,2.94145968466,2.21910346047,1.56600464309,1.15479106274,0.742998161084,0.723339252279,0.753005296057,0.451792930011,0.39150450778,0.341421933512,0.220899219642,0.21085130078,0.210929604194,0.230951642499,0.160710505135,0.180818656534,0.0601906152755,0.0602349114548,0.0903994112121,0.0501383577027,0.0702760949828,0.0401689199358,0.020103552365,0.0401888656128,0.0502107522046,0.0401370903212,0.0100586499413,0.0301139145123,0.0401535567647,0.0602671418837,0.0301342610036,0.0,0.0100371423281,0.0100534186947,0.0100326135396,0.0100698396851,0.0200459136146,0.010070459501,0.010070459501,0.0100185850388,0.0100611539977,0.0100548236109,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100273285757,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100570260235])
# Creating weights for histo: y11_PT_3
y11_PT_3_weights = numpy.array([67.0789130983,937.440838177,884.926097202,687.131609497,531.166505423,417.467104414,332.749145317,268.218028966,220.387587449,179.435108382,150.450091413,124.088191139,103.75302416,84.2606768284,70.7233585269,60.8782614135,52.1832763312,44.0369405948,35.8453032007,30.1910948892,24.7459745887,21.0608466384,16.9348979312,14.2734103142,11.1050487029,9.26815200532,7.37594349106,6.29228689127,4.81795376916,3.4931295235,2.71172691556,2.08488132681,1.57307456661,0.979173126514,0.830623421558,0.539045036903,0.495008055682,0.357604687273,0.302493398775,0.242102888865,0.192484797057,0.186860135778,0.165033682595,0.0660077366969,0.0825237679701,0.0605301808808,0.0880485307866,0.0549914428422,0.044025443537,0.0550033461564,0.0604766362797,0.0385444304785,0.0384558421954,0.0275054959515,0.0165119280831,0.0275088394421,0.01102512587,0.0165110343189,0.00550369029398,0.0109919225298,0.0109785566923,0.00549235573882,0.00549596329616,0.0109806854761,0.00549596329616,0.0,0.0,0.0109954772738,0.0110064258853,0.0,0.0,0.00549441139649,0.0,0.0,0.00548472624255,0.0,0.0109898709346,0.00550802098782,0.0,0.0054880453578])
# Creating weights for histo: y11_PT_4
y11_PT_4_weights = numpy.array([9.01867086008,130.802485415,128.827945372,104.20910547,83.8984112371,68.323670997,56.2718860133,47.355932441,39.9327826078,34.2165250589,29.4556032211,25.5245686064,22.2060017532,19.7805293701,17.6841443218,15.7203305518,14.1829542598,12.9070048088,11.608107708,10.5562756265,9.57832896469,8.84604040077,8.02409366574,7.29161270232,6.93059105728,6.31765432202,5.81832949671,5.27277267277,5.0150735723,4.60480166133,4.3677053416,4.04504735696,3.91982213247,3.50805110866,3.36122663431,2.75630414025,2.44650044697,2.06944591036,1.76752378279,1.55334304622,1.27987239685,1.1092597274,0.863573170433,0.814131707482,0.62766491767,0.476594426662,0.360206632699,0.286189499486,0.211168401587,0.16085448703,0.0976773425617,0.097694578351,0.076002936643,0.0621515348368,0.0365172664438,0.0493441408654,0.0305980234047,0.0305980474546,0.0207229422476,0.0226914818305,0.0118353796278,0.0138188542226,0.00986751745144,0.012837696894,0.00789972341657,0.00986073136043,0.00986906065583,0.01479984721,0.00691116271521,0.00592408910173,0.00197545955196,0.00295601523125,0.00789580327658,0.00295870842355,0.000988181113161,0.00197400212568,0.00295990731299,0.00395035232703,0.00197395683163,0.000984227704087])
# Creating weights for histo: y11_PT_5
y11_PT_5_weights = numpy.array([1.94653669359,29.3172758954,29.9865974582,24.6086391712,20.1481078181,16.5763963227,13.6958958047,11.6637331307,9.9399147285,8.64102143541,7.50614471331,6.65611044958,5.82840170828,5.12403147589,4.61277301145,4.14163647153,3.76331328984,3.38814968971,3.10284471692,2.86109091691,2.55283866791,2.3160492959,2.15849272285,2.02445676783,1.92436800997,1.72697398275,1.60973498262,1.49555633949,1.44286610608,1.35840081282,1.25409301134,1.17821384193,1.135575695,1.06303135049,1.00578158924,0.940716294749,0.88298921543,0.889077521428,0.820748219553,0.798594739687,0.779433199906,0.732785261113,0.69169389679,0.681112079281,0.670270197442,0.615329007238,0.609792837816,0.588330728968,0.573242196682,0.54700450655,0.473393098097,0.443676947537,0.393745276483,0.328440282697,0.29970389449,0.271996280759,0.226601051835,0.200913217716,0.160580280996,0.143695184314,0.118224283854,0.100339739949,0.0731105645645,0.0526879528453,0.0330209320285,0.0267205735774,0.0287378165608,0.0196620915976,0.0133581242538,0.0115995892641,0.0100841744124,0.00730924400812,0.00731128451286,0.00478896459404,0.00529466568381,0.00529438961553,0.00630140270939,0.00630146672523,0.00226657665839,0.00176346820993])
# Creating weights for histo: y11_PT_6
y11_PT_6_weights = numpy.array([0.783843801624,12.2265377037,12.70615678,10.6527257553,8.84084168579,7.33469903274,6.1550698266,5.28657322802,4.51555992145,3.97711558243,3.41184324484,3.06238349696,2.69640129276,2.39810137953,2.12063147733,1.90868459059,1.75161928976,1.57867631627,1.43405284091,1.30610776955,1.18092577111,1.09876233891,1.02063555232,0.929073273656,0.896196604566,0.838942814585,0.783898983109,0.720021815429,0.660485991125,0.604676516541,0.566937978724,0.527410841026,0.504761140543,0.482752125095,0.455246553874,0.416249938172,0.404309544461,0.382971603851,0.359283051928,0.364673143423,0.324377263656,0.320366309427,0.290784634788,0.289113195596,0.281432872528,0.261098595165,0.267142267369,0.24761751839,0.233924112855,0.247956604618,0.234208217531,0.236729571557,0.210126597492,0.19725321682,0.213824356805,0.201827482044,0.196696703543,0.173812781647,0.188931808851,0.185833648358,0.183779137696,0.167479406646,0.155770135387,0.171109088801,0.156896657412,0.14599571494,0.144870692412,0.138020790718,0.126828146118,0.12802674396,0.0996597817554,0.0913492701312,0.09304474126,0.0810026316763,0.0669754581291,0.0689846040125,0.0630043005476,0.061009189955,0.0526718673265,0.0509823242087])
# Creating weights for histo: y11_PT_7
y11_PT_7_weights = numpy.array([0.131106469137,2.02840287035,2.14430444527,1.82977235121,1.53237747294,1.28926588132,1.08813168834,0.943676748415,0.812419111692,0.718042677764,0.628317779822,0.559046396813,0.498364366902,0.442669149018,0.403495077192,0.364346989259,0.327665862443,0.300520321532,0.270951489211,0.246593894317,0.22600769389,0.208721992728,0.19612868932,0.175009232348,0.16602270287,0.154391851249,0.141732749918,0.136267708526,0.124123926471,0.115879446692,0.108379321635,0.0987197674123,0.0934810792583,0.089161885649,0.0862528211623,0.080816278234,0.0744337958632,0.0701650193887,0.0663402741222,0.0631273657226,0.0601609690327,0.0550243724334,0.05421996139,0.0505276920695,0.0507941946132,0.0459538563064,0.0443972953726,0.0417626040347,0.0410975085686,0.0384615431818,0.0350853972593,0.0340991618274,0.0339265910577,0.032158085374,0.0315663985972,0.0293238628817,0.0280074895133,0.0277702062766,0.0257991561451,0.0261952722157,0.025260346591,0.0230298179488,0.0223431893789,0.0233270653056,0.0220508999165,0.022391544566,0.0220865607145,0.0211793372729,0.018332797577,0.0193046203301,0.0190045440816,0.0168808134316,0.017103134975,0.0165434502964,0.015744202504,0.0170613092887,0.0154204348123,0.0152674944548,0.0144399996559,0.0148616721428])
# Creating weights for histo: y11_PT_8
y11_PT_8_weights = numpy.array([0.0441113966496,0.705669815692,0.759984812721,0.652807115134,0.55979480251,0.467904975472,0.401176407535,0.354328412116,0.305553525182,0.273454060763,0.241789691298,0.21129889024,0.190259450478,0.172316164235,0.159290193465,0.140948609718,0.12880923773,0.118281602981,0.10741668797,0.0994420508301,0.09233806394,0.0796856065974,0.0772208308017,0.0710600644354,0.0630548221525,0.0612650820723,0.0567743081508,0.0497549197575,0.0499770557791,0.0472726510657,0.0426836466621,0.0401975764581,0.0370621158965,0.0359883520365,0.0334319538561,0.0309622925232,0.0309856807332,0.0271370653862,0.0265649080956,0.0254006355752,0.0232553501534,0.0228892170064,0.021460635438,0.019793078337,0.0196682075698,0.0179457215869,0.0169371328722,0.0153650145177,0.0160077225295,0.0147292398109,0.0132205042481,0.0140345253297,0.0124162037519,0.0118240766424,0.0111195257484,0.0117657204597,0.0106363995773,0.0105674763785,0.00971614701819,0.00910512668983,0.00904074451622,0.00790836163456,0.0082218304725,0.00785733375764,0.00847773906933,0.00734330248008,0.0070599041979,0.00678066233476,0.00611385481436,0.00615048594865,0.00575405207613,0.00623907009351,0.00570296034568,0.00540627162034,0.00481021677647,0.00411056626931,0.00458846684601,0.00484840266987,0.00436772817721,0.0039899142898])
# Creating weights for histo: y11_PT_9
y11_PT_9_weights = numpy.array([164943.979527,1297154.22498,610808.252294,289781.784617,159205.687534,90024.6466906,49663.995891,26063.1398179,12180.6275642,5031.02228078,2374.55207706,1313.64596973,750.705267103,539.529108065,312.785094202,213.754299125,143.326831678,93.8484827915,52.1177048053,49.5685960945,28.6649779689,28.6753677412,23.4569065857,7.81647716224,20.8869260451,5.21076303407,0.0,7.83491497056,0.0,5.21416220457,2.59804330778,2.61034453668,2.61034453668,2.60657430288,0.0,2.60956741863,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.60341891904,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y11_PT_10
y11_PT_10_weights = numpy.array([31276.9848271,337422.439677,234875.085314,146304.939395,95307.5782391,65643.4379113,47550.2499671,36309.9418446,28621.4922129,22086.228649,15926.9957395,11383.4199313,7828.70651489,5525.22393342,3542.12634905,2248.63722677,1317.53629245,694.04744834,445.512312974,282.262042765,196.968866774,128.485547292,88.4738402711,74.7884344781,51.6143637014,49.4959087789,41.0804188382,32.6485265413,23.1701769967,14.7484193013,13.6953595569,8.425085877,7.36964831042,7.37645473036,6.32550732323,5.26437914326,2.10969507155,2.10701290346,4.21089616194,1.05572344391,1.05537331152,1.05386620315,2.10845152439,0.0,0.0,1.05572344391,0.0,1.05194970922,0.0,0.0,0.0,0.0,0.0,0.0,2.1080806149,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y11_PT_11
y11_PT_11_weights = numpy.array([4430.09849164,54188.814408,43410.6841204,29424.1665837,20621.8078373,14978.9045692,11275.4399938,8885.18734355,7135.53552886,5906.46626974,4993.43823187,4345.80597572,3823.65998572,3463.86142747,3206.76425893,2999.74002627,2774.84484271,2517.77918107,2122.7124774,1758.1867297,1404.36718431,1201.44576634,952.227246887,791.915853238,644.964158644,510.649809516,422.194154708,347.586556086,252.688852088,221.128186448,159.165195252,100.658473854,71.6396475328,49.7546392294,30.6312732199,24.8761131305,19.5792095459,16.121030121,10.3661735738,7.37256740537,8.52515939962,5.98639392569,5.06853610878,3.45207438439,3.6878129406,2.30236143027,2.30298734197,1.84242737448,0.920468664048,0.920996596935,1.84432854765,1.38350653487,0.921984453889,1.61214374055,0.230621641619,0.690682608783,0.46065487245,0.460370541783,0.691185566679,0.0,0.0,0.229908509568,0.0,0.230564160717,0.230649575186,0.0,0.230596090282,0.460950345806,0.0,0.0,0.0,0.0,0.0,0.230574688636,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y11_PT_12
y11_PT_12_weights = numpy.array([371.23446632,5072.22605975,4592.14431319,3392.38082995,2510.14208815,1916.52754291,1494.77663841,1189.94786994,961.679654128,782.413331289,653.294197371,552.626646178,472.555186293,401.961560119,344.395838848,306.567717819,271.877138307,240.805686294,217.323426115,198.158127603,180.994038516,169.663995421,156.811789766,146.540712618,139.813551227,138.375718149,130.754979702,126.95999907,127.903550578,131.06202058,130.7320507,135.495396944,138.846647531,133.414936271,126.739365134,112.394850786,99.4382335347,85.7866290049,74.904240047,65.0748924282,57.3793269529,49.5383008133,41.1193945789,34.7514452397,29.1318854399,21.3475277898,18.3053724589,13.0427433524,9.05445122993,5.84255588401,4.90034318327,2.93535003981,2.90677842607,2.3260599055,1.35700142159,1.71657472079,1.43969702109,0.609152406413,0.88605280428,0.609247046253,0.830907016017,0.553707310039,0.636769158031,0.249228939805,0.332385543051,0.249265910897,0.13840095544,0.193811696782,0.0553014823407,0.165998625204,0.0829989855944,0.0553496486331,0.2490200012,0.0276605244676,0.11077089269,0.0831365980769,0.0830534612095,0.0277272955675,0.110730882351,0.0831584498773])
# Creating weights for histo: y11_PT_13
y11_PT_13_weights = numpy.array([71.4031696407,1011.96801601,974.987747693,751.502890835,576.125637118,458.077326539,364.850640528,295.220630455,242.733590188,202.643374755,171.045270759,144.284457719,124.40407422,108.699354068,93.6626332497,80.8869032699,71.9458584159,65.1195581805,57.3674829608,51.851682362,46.241962562,40.1770529183,36.5967755329,32.4644156317,31.9002996613,28.0587330745,25.6185572399,23.5923613701,22.3112626852,20.3839074143,19.772628555,17.9744979008,18.4713527464,16.7663706184,15.7899252631,14.6705894619,15.3644640599,15.0216896201,14.0544923661,13.0964885985,12.9552608689,14.1339629263,14.0042831866,13.8928083705,15.3361978817,15.8184463102,16.263508148,17.0478308752,18.5809160455,17.4804162719,16.9072281813,15.2436440497,14.5482220331,11.8456707701,11.2918041718,9.85963496184,9.25531645171,8.2660669668,7.0770912641,5.89779026469,4.89987827026,3.71024840397,3.01479422536,2.13749470501,1.66378559938,1.05843001542,0.957837495917,0.584831170943,0.393138057786,0.393317436967,0.393148313226,0.352856933387,0.251991036615,0.262161884748,0.131039344123,0.171352933969,0.181480818482,0.141239987647,0.110896324259,0.0302782288241])
# Creating weights for histo: y11_PT_14
y11_PT_14_weights = numpy.array([27.527962396,403.455277127,395.668110121,319.591605207,251.49329143,201.872570669,164.236122948,134.271496746,112.664032012,96.0735573487,80.4995696042,69.6704443013,60.731207489,52.869054374,45.8802643569,40.8673525452,36.1426274713,31.9470527072,29.2934793646,25.0659095012,23.3126042264,20.5849219851,18.6690903784,17.0486940384,15.8089108756,14.2136573896,13.0451706653,11.8569428572,11.158356259,9.91893860047,9.19477823789,8.43627585324,8.24131966425,7.54799632912,6.98816288848,6.53553765368,6.02320824022,5.6754442884,5.39245540757,4.91132467853,4.84618410315,4.57156334892,4.20690924732,4.13051498444,3.98068512045,3.9692852312,3.55066536971,3.61549584348,3.22815392315,3.17156691974,3.13481218401,3.0439470256,2.93083496217,2.95087345126,2.96774590293,2.98484612155,2.94516042543,3.14592313243,3.03308269646,3.10366390073,3.23940222426,3.28476054836,3.46581718289,3.85894522994,3.83361000527,4.02599996877,3.75428978364,3.70623057454,3.36956679837,3.07833406244,2.94254379881,2.6426470701,2.55201352605,2.31711270993,2.18134899346,1.90696831791,1.76535576187,1.70300840944,1.65515503701,1.46566525458])
# Creating weights for histo: y11_PT_15
y11_PT_15_weights = numpy.array([4.17090010579,62.2847283399,63.2070244345,52.2423924163,42.9975474401,35.1019262206,29.2661453025,24.0898984966,20.6183137004,17.6163138835,15.320735778,13.3165745009,11.8088959455,10.2320499095,9.11045788493,8.10319692288,7.15900143622,6.22123999641,5.66816838524,5.1354872079,4.6791579013,4.12952723493,3.87734214012,3.47709007146,3.29788798283,2.92158663874,2.63808091117,2.48869640524,2.30945295909,2.14179788711,1.92243293019,1.87046308376,1.64066539966,1.56188879722,1.39078569059,1.30248268125,1.24934537141,1.19241380548,1.12405232374,0.971925696767,0.931972924625,0.865178538001,0.860794287502,0.804211188136,0.763474041285,0.699089379492,0.644429876975,0.640096909787,0.526862766575,0.554428728295,0.514932543058,0.513323026953,0.507248908616,0.455723241839,0.409983726969,0.392785383893,0.397674786829,0.345846501247,0.335214076378,0.333615313226,0.316950127529,0.312168963103,0.286336353688,0.294153395639,0.298520039656,0.246731457283,0.257441870598,0.245295288225,0.237643203654,0.254270104113,0.241985860315,0.242184612689,0.232992256301,0.211657453289,0.204103917756,0.176767312967,0.181323847034,0.18578608598,0.179865463085,0.170522565362])
# Creating weights for histo: y11_PT_16
y11_PT_16_weights = numpy.array([1.29230475575,20.252083342,21.4293549047,18.105728309,15.0427055881,12.6256987175,10.697437243,9.03993159047,7.75284106893,6.71745798881,5.88243722091,5.13826020546,4.56011528308,3.96418519669,3.59158639571,3.20313153177,2.91098306138,2.58422900605,2.32665877658,2.12372882277,1.95224102087,1.76844109449,1.59796309553,1.48184923619,1.33329420554,1.23630806287,1.12741456417,1.03891276891,0.988225436594,0.905680015227,0.850443563005,0.768840932489,0.737081435944,0.670250735938,0.625627998204,0.582096710336,0.536790757337,0.508830153339,0.49206249481,0.446349847718,0.423255323134,0.39866920175,0.366902156716,0.353735011826,0.334567588561,0.302073154197,0.291245039518,0.263781518915,0.252039613446,0.24069169288,0.224809017642,0.222643448624,0.202409608747,0.192486850577,0.194454926479,0.173521618978,0.172801393009,0.151850177104,0.16068922646,0.139379958,0.135608602032,0.130904815128,0.121332907638,0.11591108999,0.108161526351,0.113940741838,0.106351660613,0.0989532942697,0.0926139501003,0.0890252983722,0.0861287583429,0.0834123036902,0.0756534584916,0.0754788034054,0.0706038660711,0.076733007934,0.0622762268333,0.0639155198309,0.0622988337871,0.0604764129098])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights+y11_PT_13_weights+y11_PT_14_weights+y11_PT_15_weights+y11_PT_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights+y11_PT_13_weights+y11_PT_14_weights+y11_PT_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights+y11_PT_13_weights+y11_PT_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights+y11_PT_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights+y11_PT_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y11_PT_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"p_{T} [ a_{1} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights+y11_PT_13_weights+y11_PT_14_weights+y11_PT_15_weights+y11_PT_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y11_PT_0_weights+y11_PT_1_weights+y11_PT_2_weights+y11_PT_3_weights+y11_PT_4_weights+y11_PT_5_weights+y11_PT_6_weights+y11_PT_7_weights+y11_PT_8_weights+y11_PT_9_weights+y11_PT_10_weights+y11_PT_11_weights+y11_PT_12_weights+y11_PT_13_weights+y11_PT_14_weights+y11_PT_15_weights+y11_PT_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_10.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_10.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_10.eps')
# Running!
if __name__ == '__main__':
selection_10()
| 154.57732 | 1,310 | 0.800053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,613 | 0.087135 |
f4fbc7791249dd3dc8759c139bed36e338524bfa | 4,198 | py | Python | games_logger/games/migrations/0001_initial.py | HaeckelK/games_logger_django | 0a8a51e73f56e68d2dea6252a263c408ca86071e | [
"MIT"
] | null | null | null | games_logger/games/migrations/0001_initial.py | HaeckelK/games_logger_django | 0a8a51e73f56e68d2dea6252a263c408ca86071e | [
"MIT"
] | 3 | 2021-01-10T10:45:32.000Z | 2021-01-10T13:31:05.000Z | games_logger/games/migrations/0001_initial.py | HaeckelK/games_logger_django | 0a8a51e73f56e68d2dea6252a263c408ca86071e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-07 21:50
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('description_short', models.CharField(max_length=50)),
('description_long', models.CharField(max_length=250)),
('created_on', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('players_min', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('players_max', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('expands', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.game')),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='GenreCategory',
fields=[
('category_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='games.category')),
],
bases=('games.category',),
),
migrations.CreateModel(
name='PlatformCategory',
fields=[
('category_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='games.category')),
],
bases=('games.category',),
),
migrations.CreateModel(
name='TimeCategory',
fields=[
('category_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='games.category')),
],
bases=('games.category',),
),
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=250)),
('created_on', models.DateTimeField(auto_now_add=True)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.game')),
('players', models.ManyToManyField(related_name='players', to='games.Player')),
('winners', models.ManyToManyField(related_name='winners', to='games.Player')),
],
),
migrations.AddField(
model_name='game',
name='genre',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.genrecategory'),
),
migrations.AddField(
model_name='game',
name='platform',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.platformcategory'),
),
migrations.AddField(
model_name='game',
name='time',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.timecategory'),
),
]
| 45.139785 | 193 | 0.595045 | 4,042 | 0.962839 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.153168 |
f4fd4b6da503b9271301852273d340f511178dff | 505 | py | Python | button.py | anas652/button | ce1436332f491815e1a0e760c725589372d3c0cb | [
"MIT"
] | null | null | null | button.py | anas652/button | ce1436332f491815e1a0e760c725589372d3c0cb | [
"MIT"
] | null | null | null | button.py | anas652/button | ce1436332f491815e1a0e760c725589372d3c0cb | [
"MIT"
] | null | null | null | n kjhimport Tkinter
window = Tkinter.Tk()
button = Tkinter.Button(window, text="do not press this because i will kill you.", width=40)
button.pack(padx=10, pady=10)
clickCount=0
def onClick(event):
global clickCount
clickCount = clickCount + 1
if clickCount == 1:
button.configure(text="seriously? do. not. press. it.")
elif clickCount == 2:
button.configure(text="gah! Next time, no more button.")
else:
button.pack_forget()
button.bind("<ButtonRelease-1>", onClick)
window.mainloop() | 20.2 | 92 | 0.716832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.253465 |
7600037bb80f567062423e8740b15ff55e0fa33b | 5,381 | py | Python | corehq/apps/fixtures/interface.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/fixtures/interface.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/fixtures/interface.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop, ugettext_lazy
from couchdbkit import ResourceNotFound
from memoized import memoized
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc
from corehq.apps.fixtures.views import FixtureViewMixIn, fixtures_home
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
class FixtureInterface(FixtureViewMixIn, GenericReportView):
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = ugettext_lazy("Select a Table")
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
@property
@memoized
def fixtures(self):
return sorted(FixtureDataType.by_domain(self.domain), key=lambda t: t.tag.lower())
@property
@memoized
def options(self):
return [(_id_from_doc(f), f.tag) for f in self.fixtures]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = ugettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def view_response(self):
if not self.has_tables():
messages.info(self.request, _("You don't have any tables defined yet - create tables to view them."))
return HttpResponseRedirect(fixtures_home(self.domain))
else:
return super(FixtureViewInterface, self).view_response
@property
def report_context(self):
assert self.has_tables()
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
# Build javascript options for DataTables
report_table = context['report_table']
headers = report_table.get('headers')
data_tables_options = {
'slug': self.context['report']['slug'],
'defaultRows': report_table.get('default_rows', 10),
'startAtRowNum': report_table.get('start_at_row', 0),
'showAllRowsOption': report_table.get('show_all_rows'),
'autoWidth': headers.auto_width,
}
if headers.render_aoColumns:
data_tables_options.update({
'aoColumns': headers.render_aoColumns,
})
if headers.custom_sort:
data_tables_options.update({
'customSort': headers.custom_sort,
})
pagination = context['report_table'].get('pagination', {})
if pagination.get('is_on'):
data_tables_options.update({
'ajaxSource': pagination.get('source'),
'ajaxParams': pagination.get('params'),
})
left_col = context['report_table'].get('left_col', {})
if left_col.get('is_fixed'):
data_tables_options.update({
'fixColumns': True,
'fixColsNumLeft': left_col['fixed'].get('num'),
'fixColsWidth': left_col['fixed'].get('width'),
})
context.update({
"selected_table": self.table.get("table_id", ""),
'data_tables_options': data_tables_options,
})
if self.lookup_table:
context.update({
"table_description": self.lookup_table.description,
})
return context
@memoized
def has_tables(self):
return True if list(FixtureDataType.by_domain(self.domain)) else False
@property
@memoized
def table(self):
from corehq.apps.fixtures.views import data_table
if self.has_tables() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@cached_property
def lookup_table(self):
if self.has_tables() and self.request.GET.get("table_id", None):
return FixtureDataType.get(self.request.GET['table_id'])
return None
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = ugettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureEditInterface, self).report_context
context.update(types=self.data_types)
return context
@property
@memoized
def data_types(self):
return list(FixtureDataType.by_domain(self.domain))
| 33.42236 | 113 | 0.660472 | 4,687 | 0.871028 | 0 | 0 | 3,783 | 0.703029 | 0 | 0 | 955 | 0.177476 |
7600e325b061a3eb01042d3e6b2023bf522e3722 | 53,884 | py | Python | pyDR/blopt.py | ClayCampaigne/pyDR | ef07d19db09b1bc57955e7ec99c9861c6d3c296f | [
"MIT"
] | null | null | null | pyDR/blopt.py | ClayCampaigne/pyDR | ef07d19db09b1bc57955e7ec99c9861c6d3c296f | [
"MIT"
] | null | null | null | pyDR/blopt.py | ClayCampaigne/pyDR | ef07d19db09b1bc57955e7ec99c9861c6d3c296f | [
"MIT"
] | null | null | null | """
Code for the optimization and gaming component of the Baselining work.
@author: Maximilian Balandat, Lillian Ratliff
@date Mar 2, 2016
"""
import numpy as np
import pandas as pd
import logging
from gurobipy import GRB, Model, quicksum, LinExpr
from pandas.tseries.holiday import USFederalHolidayCalendar
from datetime import datetime
from .utils import (get_energy_charges, get_demand_charge, dem_charges, dem_charges_yearly,
get_pdp_demand_credit, get_DR_rewards, powerset, E19,
carbon_costs)
# define some string formatters
psform = '%Y-%m-%d %H:%M'
dsform = '%Y-%m-%d'
class BLModel(object):
"""
Abstract base class for Baselining models.
"""
def __init__(self, name):
"""
Construct an abstract dynamical system object based on the
gurobipy Model object 'model'.
"""
self._name = name
self._model = Model()
def get_model(self):
"""
Returns the underlying gurobiy Model object.
"""
return self._model
def set_dynsys(self, dynsys):
"""
Initialize dynamical system for underlying dynamics.
"""
self._dynsys = dynsys
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
self._index = index
self._dynsys.set_window(index)
def energy_charges(self, tariff, isRT=False, LMP=None, isPDP=False,
twindow=None, carbon=False):
"""
Return total enery consumption charges (as determined by the
tariff's energy charge) as a gurobipy LinExpr.
"""
locidx = self._index.tz_convert('US/Pacific')
year = locidx[0].year
if isRT and isPDP:
raise Exception('Cannot combine RTP and PDP.')
nrg_charges = get_energy_charges(
self._index, tariff, isRT=isRT, LMP=LMP,
isPDP=isPDP, carbon=carbon, year=year)['EnergyCharge']
cons = self._dynsys.get_consumption()['energy']
if twindow is None:
# echrg_= quicksum([ec * con for ec, con in
# zip(nrg_charges.values, cons.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges.values, cons.values)]
echrg = pd.Series(echrg_, index=locidx)
else:
nrg_charges_ = nrg_charges.loc[twindow[0]:twindow[1]]
cons_ = cons.loc[twindow[0]:twindow[1]]
# echrg = quicksum([ec * con for ec, con in
# zip(nrg_charges_.values, cons_.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges_.values, cons_.values)]
indx = locidx[locidx.get_loc(twindow[0]):
locidx.get_loc(twindow[1])+1]
echrg = pd.Series(echrg_, index=indx)
return echrg
def demand_charges(self, tariff, isPDP=False):
"""
Return the total demand charges under the tariff as a
gurobipy LinExpr.
"""
# determine which year/month combinations there is a demand charge,
# and create a variable for each of them
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
if hasattr(self, '_maxconppk'):
for maxconppk in self._maxconppk.values():
self._model.remove(maxconppk)
del self._maxconppk
if hasattr(self, '_maxconppkbnd'):
for maxconppkbnd in self._maxconppkbnd.values():
self._model.remove(maxconppkbnd)
del self._maxconppkbnd
if hasattr(self, '_maxconpk'):
for maxconpk in self._maxconpk.values():
self._model.remove(maxconpk)
del self._maxconpk
if hasattr(self, '_maxconpkbnd'):
for maxconpkbnd in self._maxconpkbnd.values():
self._model.remove(maxconpkbnd)
del self._maxconpkbnd
if hasattr(self, '_maxconpks'):
for maxconpks in self._maxconpks.values():
self._model.remove(maxconpks)
del self._maxconpks
if hasattr(self, '_maxconppkw'):
for maxconppkw in self._maxconppkw.values():
self._model.remove(maxconppkw)
del self._maxconppkw
if hasattr(self, '_maxconppkbndw'):
for maxconppkbndw in self._maxconppkbndw.values():
self._model.remove(maxconppkbndw)
del self._maxconppkbndw
if hasattr(self, '_maxconppks'):
for maxconppks in self._maxconppks.values():
self._model.remove(maxconppks)
del self._maxconppks
if hasattr(self, '_maxconppkbnds'):
for maxconppkbnds in self._maxconppkbnds.values():
self._model.remove(maxconppkbnds)
del self._maxconppkbnds
self._model.update()
locidx = self._index.tz_convert('US/Pacific')
ym_dict = {year: np.unique(locidx[locidx.year == year].month)
for year in np.unique(locidx.year)}
indx = []
for year, months in ym_dict.items():
for month in months:
indx.append(pd.Timestamp(datetime(year, month, 1),
tz='US/Pacific'))
if tariff in dem_charges:
if not(tariff in E19):
self._maxcon, self._maxconbnd = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# print locidx
# the following creates a dictionary with all years in the data
# as keys, and for each year the value is an array of (unique)
# months that appear during that year. This is used for keeping
# track of the peak consumpiton for the demand charge
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update()
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# dcharges += (get_demand_charge(tariff, month, isPDP)*
# self._maxcon[year, month])
dcharges.append(
(get_demand_charge(tariff, month, isPDP, year=year) *
self._maxcon[year, month]))
dcharges = pd.Series(dcharges, index=indx)
self._model.update()
return dcharges
else:
# for E19 tarrifs
idx_ = self._index.tz_convert('US/Pacific')
iswknd = idx_.dayofweek > 5
holidays = USFederalHolidayCalendar().holidays(
idx_.min(), idx_.max())
iswknd = iswknd | pd.DatetimeIndex(idx_.date).isin(holidays)
issummer = (idx_.month >= 5) & (idx_.month <= 10)
ToD = idx_.hour + idx_.minute / 60
ispeak = ~iswknd & issummer & (ToD >= 12) & (ToD < 18)
ispartial_summer = (~iswknd & issummer & (
((ToD >= 8.5) & (ToD < 12)) |
((ToD >= 18) & (ToD < 21.5))))
ispartial_winter = ~iswknd & ~issummer & (
(ToD >= 8.5) & (ToD < 21.5))
# create dictionaries for variables
self._maxcon, self._maxconbnd = {}, {}
self._maxconppks, self._maxconppkbnds = {}, {}
self._maxconpks, self._maxconpkbnds = {}, {}
self._maxconpk, self._maxconpkbnd = {}, {}
self._maxconppk, self._maxconppkbnd = {}, {}
self._maxconppkw, self._maxconppkbndw = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# declare variable for part peak consumption
self._maxconppk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconppk[{},{}]'.format(year, month))
# declare variable for max peak only in summer
if (5 <= month) & (month <= 10):
# add variable for maximum peak usage in summer
self._maxconpk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconpk[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update() # update model
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
dchrg = 0.0
# for peak summer less than max demand
if (month >= 5) & (month <= 10):
self._maxconpkbnd[year, month] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconpk[year, month],
name='maxconpkbnd[{},{}]'.format(year, month))
# max partial peak summer greater than consumption
ppconsum = cons[(ispartial_summer) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppconsum):
self._maxconppkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbnds[{},{},{}]'.format(
year, month, i))
# max peak consumption summer
pconsum = cons[(ispeak) & (locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(pconsum):
self._maxconpkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconpk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconpkbnds[{},{},{}]'.format(
year, month, i))
# max partial peak winter
ppkconwin = cons[(ispartial_winter) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppkconwin):
self._maxconppkbndw[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbndw[{},{},{}]'.format(
year, month, i))
# max demand each month
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# max partial peaks (summer & winter) < than max demand
self._maxconppkbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconppk[year, month],
name='maxconppkbnd[{},{},{}]'.format(
year, month, i))
demchrg = get_demand_charge(tariff, month, year=year)
if (month >= 5) & (month <= 10):
mpeakchg = demchrg['mpeak']
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
if isPDP:
pdpcred = get_pdp_demand_credit(tariff, month, year=year)
mpeakchg = mpeakchg - pdpcred['peak']
dchrg += mpeakchg * self._maxconpk[year, month]
# dcharges.append(mpeakchg * self._maxconpk[year, month])
else:
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
# add partpeak and maximum demand charge
dcharges.append(
(maxchg * self._maxcon[year, month] +
ppeakchg * self._maxconppk[year, month])+dchrg)
self._model.update()
dcharges = pd.Series(dcharges, index=indx)
return dcharges
else:
return pd.Series([LinExpr(0.0) for ij in
range(0, np.size(indx, 0))], index=indx)
def DR_compensation(self, LMP, dr_periods, BL='CAISO', **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex.
"""
# start by removing all variables (might be inefficient, but o/w it
# is a pain in the ass do deal with the multihour baselines etc.)
self._removeOld()
# no work if no DR events are specified
if (LMP is None) or (dr_periods is None):
return pd.Series([0.0], index=['None'])
# get DR rewards (in case we want LMP-G instead of LMP)
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# populate optimization problem for proper BL choices
if BL == 'CAISO':
# print self._DR_comp_CAISO(DR_rewards, dr_periods)
return self._DR_comp_CAISO(DR_rewards, dr_periods)
elif BL == 'expMA':
return self._DR_comp_expMA(DR_rewards, dr_periods, **kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _DR_comp_CAISO(self, LMP, dr_periods):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
valid_periods = dr_periods[dr_periods.isin(self._index)].tz_convert(
'US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(valid_periods.date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
# drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# formulate constaints and add terms to objective
drcomp_ = []
for i, day in enumerate(grouped):
periods = grouped[day]
# print('Formulating constraints for day {} of {}'.format(
# i, len(grouped)))
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
similars = locidx[per_select].sort_values(ascending=False)
# now go through similar days sucessively
sim_nonDR, sim_DR, sim_DR_mult = [], [], []
for sim in similars:
if len(sim_nonDR) == nmax:
continue
if sim in self._dr_periods:
sim_DR += [sim]
if len(grouped[pd.Timestamp(sim.date())]) > 1:
sim_DR_mult += [sim]
else:
sim_nonDR += [sim]
sim_DR = pd.DatetimeIndex(
sim_DR).sort_values(ascending=False)
sim_DR_mult = pd.DatetimeIndex(
sim_DR_mult).sort_values(ascending=False)
sim_nonDR = pd.DatetimeIndex(
sim_nonDR).sort_values(ascending=False)
# get consumption variables
cons_nonDR = nrgcons.loc[sim_nonDR].values
# Now add constraits on the baseline variables
for idxset in powerset(range(len(sim_DR))):
K = [sim_DR[i] for i in idxset]
Kc = [sim_DR[i] for i in range(len(sim_DR))
if i not in idxset]
qK = nrgcons.loc[K].values.tolist()
# Need to make sure to use zday if there are multiple
# events possible that day!
zK, zKc = [], []
for k in K:
if k in sim_DR_mult:
zK.append(self._zday[k.strftime(dsform)])
else:
zK.append(self._z[k.strftime(psform)])
for kc in Kc:
if kc in sim_DR_mult:
zKc.append(self._zday[kc.strftime(dsform)])
else:
zKc.append(self._z[kc.strftime(psform)])
# the following uses that the "closest" days appear first
qD = cons_nonDR[:nmax-len(idxset)].tolist()
n = len(sim_nonDR)
if n == 0:
print('No non-DR day available for BL computation -' +
' too many DR events!')
bnd = (quicksum(qD + qK) / float(n) +
M * quicksum(zK) +
M * quicksum([(1-z) for z in zKc]))
self._blcon[perstr, idxset] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=bnd, name="blcon[{},{}]".format(perstr, idxset))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - nrgcons.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(perstr))
# add DR compensation to objective
# drcomp += lmps.loc[period] * self._red[perstr]
drcomp_.append(lmps.loc[period] * self._red[perstr])
drcomp = pd.Series(drcomp_, index=self._dr_periods)
self._model.update()
return drcomp
def _DR_comp_expMA(self, LMP, dr_periods, **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_hours is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
# set default values for alphas if not passed as kwargs
if 'alpha_b' in kwargs:
alpha_b = kwargs['alpha_b']
else:
alpha_b = 0.175 # business day
if 'alpha_nb' in kwargs:
alpha_nb = kwargs['alpha_nb']
else:
alpha_nb = 0.25 # non-business day
valid_periods = dr_periods[dr_periods.isin(self._index)]
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(
valid_periods.tz_convert('US/Pacific').date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
# for the expMA we have to define a variable for the bl value
# for every period of the simulation range
for per in self._index:
perstr = per.strftime(psform)
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
drcomp_ = []
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# now add the constraints that define the baseline as well as a
# bunch of other stuff
for cons, alpha in zip([nrgcons[isBusiness], nrgcons[~isBusiness]],
[alpha_b, alpha_nb]):
# localize consumption index
considxloc = cons.index.tz_convert('US/Pacific')
# compute BLs for each hour separately
con_hrly = {hour: cons[considxloc.hour == hour].sort_index()
for hour in range(24)}
for hour, con in con_hrly.items():
# set the initial value of the BL to zero (this should not have
# an overly large effect of the course of a year or so...)
# NOTE: This assumes that the first occurrence of an hour (for
# both business and non-business days) is NOT a potential event
perstr_pre = con.index[0].strftime(psform)
self._blcon[perstr_pre, 'init'] = self._model.addConstr(
lhs=self._bl[perstr_pre], sense=GRB.EQUAL,
rhs=0.0, name='blcon[{}]'.format(perstr_pre))
# now loop through the rest
for period, q in con.iloc[1:].items():
perstr = period.strftime(psform)
# if the period under consideration is a DR period,
# we have to do some work ...
if period in valid_periods:
# need to use zday if this day has multiple DR events
dt = period.tz_convert('US/Pacific').date()
if len(grouped[dt]) > 1:
z = self._zday[dt.strftime(dsform)]
else:
z = self._z[perstr]
# add big M constraints on the bl
self._blcon[perstr, 'static'] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr_pre] + M * (1 - z),
name='blcon[{},static]'.format(perstr))
self._blcon[perstr, 'change'] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=alpha*q + (1-alpha)*self._bl[perstr_pre] + M*z,
name='blcon[{},change]'.format(perstr))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - q,
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M,
name='red0[{}]'.format(perstr))
# add DR compensation to objective
drcomp_.append(
(lmps.loc[period.tz_convert('US/Pacific')] *
self._red[perstr]))
# ... otherwise this is pretty straightforward
else:
self._blcon[perstr] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.EQUAL,
rhs=alpha * q + (1 - alpha) * self._bl[perstr_pre],
name='blcon[{}]'.format(perstr))
# update and keep track of last bl variable
perstr_pre = perstr
drcomp = pd.Series(drcomp_, index=self._dr_periods)
self._model.update()
return drcomp
def DR_comp_blfix(self, LMP, bl_values, **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
bl_values is a pandas Series, whose index is a DatetimeIndex,
each entry of which represents a possible DR period, and whose
values are the baseline values for those periods (assumed fixed).
This is used for solving the baseline-taking equilibrium problem.
Note that LMP may also be LMP-G, i.e. the LMP minus the generation
component of the tariff.
"""
self._removeOld()
self._blvals = bl_values[
bl_values.index.isin(self._index)].tz_convert('US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
self._grouped = self._blvals.index.groupby(self._blvals.index.date)
# define dictionaries to store variables in
self._red, self._z = {}, {}
self._redpos, self._redBL, self._red0 = {}, {}, {}
# create variables for different days and periods within each day
for day, periods in self._grouped.items():
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._model.update() # must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] -
self._dynsys._opts['umin']), axis=1).max()
# if u is not bounded the the above results in an NaN value. We
# need to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
self._drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# Pick out relevant dates and congvert to $/kWh
DR_rewards = DR_rewards.tz_convert('US/Pacific').loc[locidx] / 1000
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# formulate constaints and add terms to objective
for i, day in enumerate(self._grouped):
periods = self._grouped[day]
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=(self._red[perstr] + nrgcons.loc[period] -
(1-self._z[perstr]) * M),
sense=GRB.LESS_EQUAL, rhs=self._blvals.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(
perstr))
# add DR compensation to objective
self._drcomp += DR_rewards.loc[period] * self._red[perstr]
self._model.update()
return self._drcomp
def compute_baseline(self, bl_periods, red_times=None, BL='CAISO',
**kwargs):
"""
Compute the CAISO baseline for all elements of the pandas
Datetimeindex bl_periods. If red_times is a Datetimeindex,
regard the associated days as "event days" (in addition to
weekend days and holidays).
"""
if BL == 'CAISO':
return self._BL_CAISO(bl_periods, red_times=red_times)
elif BL == 'expMA':
return self._BL_expMA(bl_periods, red_times=red_times,
**kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _BL_CAISO(self, bl_periods, red_times=None):
"""
Compute the CAISO baseline for all elements of the pandas
Datetimeindex bl_periods. If red_times is a Datetimeindex,
regard the associated days as "event days" (in addition to
weekend days and holidays).
"""
locidx = self._index.tz_convert('US/Pacific')
cons = self._dynsys.get_consumption()['energy'].tz_convert(
'US/Pacific')
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
if red_times is not None:
isEventDay = locidx.normalize().isin(red_times.tz_convert(
'US/Pacific').normalize())
blidx, blvals = bl_periods.tz_convert('US/Pacific'), []
for period in blidx:
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
if red_times is not None:
per_select = per_select & (~isEventDay)
similars = locidx[per_select].sort_values(ascending=False)[:nmax]
blvals.append(np.sum([c.getValue() for c in cons.loc[similars]]) /
float(len(similars)))
return pd.Series(blvals, index=blidx.tz_convert('GMT'))
def _BL_expMA(self, bl_periods, red_times=None, alpha_b=0.14,
alpha_nb=0.32):
"""
Compute the expMA baseline for all elements of the pandas
Datetimeindex bl_periods using the smoothing parameter alpha.
If red_times is a Datetimeindex, regard the associated days as
"event days" (in addition to weekend days and holidays).
"""
locidx = self._index.tz_convert('US/Pacific')
cons = self._dynsys.get_consumption()['energy'].tz_convert(
'US/Pacific')
cons = pd.Series([c.getValue() for c in cons],
index=cons.index)
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
bls = []
for con, alpha in zip([cons[isBusiness], cons[~isBusiness]],
[alpha_b, alpha_nb]):
# determine intitial values for the BL from non-DR data
if red_times is not None:
nDRc = con[~con.index.isin(red_times)]
else:
nDRc = con
cmeans = nDRc.groupby(nDRc.index.hour).mean()
# compute BL for each hour separately
con_hrly = {hour: con[con.index.hour == hour]
for hour in range(24)}
bl_hrly = []
for hour, conhr in con_hrly.items():
blvals = [cmeans[hour]]
if red_times is not None:
for period, c in conhr.items():
if period in red_times:
blvals.append(blvals[-1])
else:
blvals.append(alpha*c + (1-alpha)*blvals[-1])
else:
for period, c in conhr.items():
blvals.append(alpha*c + (1-alpha)*blvals[-1])
bl_hrly.append(pd.Series(blvals[1:], index=conhr.index))
bls.append(pd.concat(bl_hrly).tz_convert('GMT'))
return pd.concat(bls).loc[bl_periods]
def optimize(self, tariff, LMP=None, dr_periods=None, BL='CAISO',
isRT=False, isPDP=False, carbon=False, **kwargs):
"""
Solve the participant's optimization problem. Pass in additional
Lin/Quad Expr of other objective terms with 'add_obj_term' kwarg
"""
if isRT and (dr_periods is not None):
raise Exception('Cannot combine DR with RTP.')
if isPDP and (dr_periods is not None):
raise Exception('Cannot combine DR with PDP.')
# extract additonal objective term if given
if 'add_obj_term' in kwargs:
add_obj_term = kwargs['add_obj_term']
else:
add_obj_term = 0
# energy charges are always included (demand charges
# are set to zero if tariff has none and DR_compensation is
# set to zero if there are no DR events ...)
# if (LMP is None) or (dr_periods is None):
# #print drc
# drc = 0.0
# else:
# #print self.DR_compensation(LMP, dr_periods, BL=BL,
# # tariff=tariff, **kwargs)
# drc=quicksum(self.DR_compensation(LMP, dr_periods, BL=BL,
# tariff=tariff, **kwargs).values.tolist())
self._model.setObjective(
self._dynsys.additional_cost_term(vals=False) +
quicksum(self.energy_charges(
tariff, isRT=isRT, LMP=LMP, isPDP=isPDP,
carbon=carbon).values) +
quicksum(self.demand_charges(tariff, isPDP=False).values) -
quicksum(self.DR_compensation(LMP, dr_periods, BL=BL,
tariff=tariff, **kwargs).values) +
add_obj_term)
self._model.optimize()
def optimize_blfixed(self, tariff, LMP, bl_values, carbon=False, **kwargs):
"""
Solve the participant's optimziation problem in case the baseline
values are fixed.
"""
# No option for RTPs. No biggie, since RTP and DR are alternatives.
# extract additonal objective term if given
if 'add_obj_term' in kwargs:
add_obj_term = kwargs['add_obj_term']
else:
add_obj_term = 0
self._model.setObjective(
quicksum(self.energy_charges(tariff, LMP=LMP,
carbon=carbon).values) +
self._dynsys.additional_cost_term(vals=False))
self._model.update()
# for some tariffs we also have demand charges
if tariff in dem_charges:
self._model.setObjective(
self._model.getObjective() +
quicksum(self.demand_charges(tariff).values))
else:
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
self._model.update()
self._nonDRobj = self._model.getObjective() + add_obj_term
self._model.setObjective(
self._nonDRobj - self.DR_comp_blfix(
LMP, bl_values, tariff=tariff, **kwargs))
self._model.optimize()
def generation_cost(self, LMP, carbon=False):
"""
Return the generation cost of the partipant's consumption (= price
of consuption according to the LMPs) as a gurobipy LinExpr.
"""
lmps = LMP.loc[self._index] / 1000 # select and convert price to $/kWh
if carbon:
lmps += pd.Series(carbon_costs).loc[self._index.tz_convert(
'US/Pacific').year].values / 1000.0
cons = self._dynsys.get_consumption()['energy']
return quicksum([lmp * con for lmp, con in
zip(lmps.values, cons.values)])
def get_results(self):
"""
Return results of optimziation problem.
"""
columns = {}
xopt, uopt = self._dynsys.get_optvals()
for i in range(xopt.shape[1]):
columns['x{}'.format(i+1)] = xopt[:-1, i]
for i in range(uopt.shape[1]):
columns['u{}'.format(i+1)] = uopt[:, i]
cons = self._dynsys.get_consumption()
columns['nrg_cons'] = np.array([e.getValue() for e in cons['energy']])
columns['pwr_cons'] = np.array([e.getValue() for e in cons['power']])
dfs = [pd.DataFrame(columns, index=self._index)]
if hasattr(self, '_z'):
perstrs, vals = [], []
for perstr, z in self._z.items():
perstrs.append(perstr)
vals.append(bool(z.X))
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'z': vals}, index=dtidx))
if hasattr(self, '_red'):
perstrs, vals = [], []
for perstr, red in self._red.items():
perstrs.append(perstr)
vals.append(red.X)
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'red': vals}, index=dtidx))
if hasattr(self, '_bl'):
perstrs, vals = [], []
for perstr, bl in self._bl.items():
perstrs.append(perstr)
vals.append(bl.X)
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'BL': vals}, index=dtidx))
return pd.concat(dfs, axis=1)
def _removeOld(self):
"""
Helper function removing all DR-related variables from the
underlying gurobipy optimization model.
"""
if hasattr(self, '_zday'):
for zday in self._zday.values():
self._model.remove(zday)
del self._zday
if hasattr(self, '_red'):
for red in self._red.values():
self._model.remove(red)
del self._red
if hasattr(self, '_z'):
for z in self._z.values():
self._model.remove(z)
del self._z
if hasattr(self, '_bl'):
for bl in self._bl.values():
self._model.remove(bl)
del self._bl
if hasattr(self, '_zdaysum'):
for zdaysum in self._zdaysum.values():
self._model.remove(zdaysum)
del self._zdaysum
if hasattr(self, '_zdaymax'):
for zdaymax in self._zdaymax.values():
self._model.remove(zdaymax)
del self._zdaymax
if hasattr(self, '_blcon'):
for blcon in self._blcon.values():
self._model.remove(blcon)
del self._blcon
if hasattr(self, '_redpos'):
for redpos in self._redpos.values():
self._model.remove(redpos)
del self._redpos
if hasattr(self, '_redBL'):
for redBL in self._redBL.values():
self._model.remove(redBL)
del self._redBL
if hasattr(self, '_red0'):
for red0 in self._red0.values():
self._model.remove(red0)
del self._red0
self._model.update()
def compute_BLtaking_eq(blmodel, tariff, LMP, dr_periods, BL='CAISO',
blinit='noDR', eps=1.0, maxiter=20, carbon=False,
**kwargs):
"""
Function used ot compute Baseline-taking equilibrium.
"""
if 'logger' in kwargs:
logger = kwargs['logger']
if 'isLMPmG' in kwargs:
logstr = BL + ' (LMP-G)'
else:
logstr = BL
logger.log(logging.INFO,
'Computing BL-taking eq. for ' '{} BL.'.format(logstr))
dfs, blvals, objs, gencosts, residuals = [], [], [], [], []
if blinit == 'gamed':
blmodel.optimize(tariff, LMP=LMP, dr_periods=dr_periods,
BL=BL, carbon=carbon, **kwargs)
elif blinit == 'noDR':
blmodel.optimize(tariff, LMP=LMP, carbon=carbon, **kwargs)
else:
errmsg = 'Unknown BL initialization parameter {}.'.format(blinit)
logger.log(logging.ERROR, errmsg)
raise NotImplementedError(errmsg)
# retrieve data from the solution for initialization
dfs.append(blmodel.get_results())
if 'red' in dfs[-1]:
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL, red_times=dfs[-1][dfs[-1]['red'] > 0].index))
else:
blvals.append(blmodel.compute_baseline(dr_periods, BL=BL))
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
residuals.append(np.NaN)
# solve the bl-taking problem for the first time using the bl values
# from the previous solution of the problem
blmodel.optimize_blfixed(tariff, LMP=LMP, bl_values=blvals[-1],
carbon=carbon, **kwargs)
dfs.append(blmodel.get_results())
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL, red_times=dfs[-1][dfs[-1]['red'] > 0].index))
logger.info(f"")
# todo: what are the units/magnitude of the residuals? I increased the MIPGap (2020-02-05), but that seems to
# have resulted in convergence failure. If the mipgap is too big relative to the convergence tolerance,
# that seems normal. I need to reason out the implications of a 1e-3 mipgap for the baseline residuals that
# implies
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
# blvalues are in kWh, on the order of 200kWh on average, max 660 for a building
# costs are on the order of 0.1 $/kWh.
# make the bl convergence in terms of decimal fraction, like the mipgap
# require the max deviation over periods to be within x percent of the mean. should be a couple kWh
# residuals.append(2*np.max(blvals[1] - blvals[0])/np.mean(blvals[1] + blvals[0]))
residuals.append(np.max(blvals[1] - blvals[0])) # had a div by 0 for above
n_iter = 0
while (residuals[-1] > eps) and (n_iter < maxiter):
if 'logger' in kwargs:
logger.log(logging.INFO,
'Residual: {:.2f}, '.format(residuals[-1]) +
'Continuing fixed point iteration.')
blmodel.optimize_blfixed(
tariff, LMP=LMP, bl_values=blvals[-1], carbon=carbon, **kwargs)
dfs.append(blmodel.get_results())
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL,
red_times=dfs[-1][dfs[-1]['red'] > 0].index))
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
residuals.append(np.linalg.norm(blvals[-2] - blvals[-1]))
n_iter += 1
if 'logger' in kwargs:
if residuals[-1] <= eps:
logger.log(logging.INFO,
'Fixed-point iteration successful. ' +
'BL-taking eq. found.')
else:
logger.log(logging.WARNING,
'Fixed-point iteration failed.' +
'No BL-taking eq. found. ')
return dfs[-1]
| 50.311858 | 113 | 0.52806 | 49,252 | 0.914038 | 0 | 0 | 0 | 0 | 0 | 0 | 13,307 | 0.246956 |
760122620b3c96cc231d8b54ec99cb4d4690794d | 380 | py | Python | setup.py | edjdavid/aiml | 6035cf3575137a8022fd373b8be9cfe16ee4ec61 | [
"Apache-2.0"
] | null | null | null | setup.py | edjdavid/aiml | 6035cf3575137a8022fd373b8be9cfe16ee4ec61 | [
"Apache-2.0"
] | null | null | null | setup.py | edjdavid/aiml | 6035cf3575137a8022fd373b8be9cfe16ee4ec61 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(name='aiml',
version='1.0',
description='ML Automation',
author='MSDS ML',
author_email='edjdavid@users.noreply.github.com',
packages=find_packages(),
install_requires=[
'numpy',
'pandas',
'matplotlib',
'scikit-learn',
'tqdm'
],
)
| 22.352941 | 55 | 0.55 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.307895 |
760133bec2625c1f79501f59184ee12e8243bf4b | 315 | py | Python | python_scripts/wikiracer/__init__.py | gulyas/network_games_analysis | 2e6bdd2a2275702495af1c18043758193d94377b | [
"MIT"
] | null | null | null | python_scripts/wikiracer/__init__.py | gulyas/network_games_analysis | 2e6bdd2a2275702495af1c18043758193d94377b | [
"MIT"
] | null | null | null | python_scripts/wikiracer/__init__.py | gulyas/network_games_analysis | 2e6bdd2a2275702495af1c18043758193d94377b | [
"MIT"
] | 1 | 2021-03-30T18:39:08.000Z | 2021-03-30T18:39:08.000Z | """
WikiRacer is a project of stong1108.
https://github.com/stong1108/WikiRacer
Rewritten for Python 3.8 by Tamás Csertán (csertant)
"""
from .wikiracer import check_pages, find_shortest_path, get_result, redirected
from .wikiracer_threaded import check_page_threaded, find_shortest_path_threaded, result_threaded
| 35 | 97 | 0.828571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.438486 |
7601a6df6f49fcbee94c63c43de55ef018409e87 | 1,115 | py | Python | provisioner/python/prp_provisioner/provisioner.py | sfiligoi/prp-htcondor-portal | 8db9fb75d706111fa02f9f57c18fb7b3fb441206 | [
"BSD-2-Clause"
] | null | null | null | provisioner/python/prp_provisioner/provisioner.py | sfiligoi/prp-htcondor-portal | 8db9fb75d706111fa02f9f57c18fb7b3fb441206 | [
"BSD-2-Clause"
] | null | null | null | provisioner/python/prp_provisioner/provisioner.py | sfiligoi/prp-htcondor-portal | 8db9fb75d706111fa02f9f57c18fb7b3fb441206 | [
"BSD-2-Clause"
] | null | null | null | #
# prp-htcondor-portal/provisioner
#
# BSD license, copyright Igor Sfiligoi 2021
#
# Main entry point of the provisioner process
#
import sys
import time
from . import provisioner_logging
from . import provisioner_htcondor
from . import provisioner_k8s
from . import event_loop
def main(namespace, max_pods_per_cluster=10, sleep_time=10):
log_obj = provisioner_logging.ProvisionerStdoutLogging(want_log_debug=True)
# TBD: Proper security
schedd_obj = provisioner_htcondor.ProvisionerSchedd(namespace, {'.*':'.*'})
collector_obj = provisioner_htcondor.ProvisionerCollector(namespace, '.*')
k8s_obj = provisioner_k8s.ProvisionerK8S(namespace)
k8s_obj.authenticate()
el = event_loop.ProvisionerEventLoop(log_obj, schedd_obj, collector_obj, k8s_obj, max_pods_per_cluster)
while True:
log_obj.log_debug("[Main] Iteration started")
try:
el.one_iteration()
except:
log_obj.log_debug("[Main] Exception in one_iteration")
log_obj.sync()
time.sleep(sleep_time)
if __name__ == "__main__":
# execute only if run as a script
main(sys.argv[1])
| 28.589744 | 106 | 0.741704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.235874 |
76049c2dadfdef13db9c5eaee735b3b43dbdb154 | 33,142 | py | Python | linetools/isgm/abscomponent.py | marijana777/linetools | 73720a2f6df42b7dde1f35055cd40ad970200f7f | [
"BSD-3-Clause"
] | null | null | null | linetools/isgm/abscomponent.py | marijana777/linetools | 73720a2f6df42b7dde1f35055cd40ad970200f7f | [
"BSD-3-Clause"
] | null | null | null | linetools/isgm/abscomponent.py | marijana777/linetools | 73720a2f6df42b7dde1f35055cd40ad970200f7f | [
"BSD-3-Clause"
] | null | null | null | """ Class for absorption line component
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Python 2 & 3 compatibility
try:
basestring
except NameError:
basestring = str
import pdb
import numpy as np
import warnings
from astropy import constants as const
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import SkyCoord
from astropy.table import QTable, Column
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.analysis import absline as ltaa
from linetools.analysis import plots as ltap
from linetools.spectralline import AbsLine, SpectralLine
from linetools.abund import ions
from linetools import utils as ltu
from linetools.lists.linelist import LineList
# Global import for speed
c_kms = const.c.to('km/s').value
# Class for Components
class AbsComponent(object):
"""
Class for an absorption component
Attributes
----------
name : str
Name of the component, e.g. `Si II`
coord : SkyCoord
Sky coordinate
Zion : tuple
Atomic number, ion -- (int,int)
e.g. (8,1) for OI
Note: (-1, -1) is special and is meant for molecules (e.g. H2)
This notation will most likely be changed in the future.
zcomp : float
Component redshift
vlim : Quantity array
Velocity limits of the component
e.g. [-300,300]*u.km/u.s
A : int
Atomic mass -- used to distinguish isotopes
Ej : Quantity
Energy of lower level (1/cm)
reliability : str, optional
Reliability of AbsComponent
'a' - reliable
'b' - possible
'c' - uncertain
'none' - not defined (default)
comment : str, optional
A comment, default is ``
"""
@classmethod
def from_abslines(cls, abslines, stars=None, reliability='none', **kwargs):
"""Instantiate from a list of AbsLine objects
Parameters
----------
abslines : list
List of AbsLine objects
stars : str, optional
Asterisks to append to the ion name (e.g. fine-structure, CII*)
reliability : str, optional
Reliability of AbsComponent
'a' - reliable
'b' - possible
'c' - uncertain
'none' - not defined (default)
"""
# Check
if not isinstance(abslines, list):
raise IOError("Need a list of AbsLine objects")
if not all(isinstance(x, AbsLine) for x in abslines):
raise IOError("List needs to contain only AbsLine objects")
# Instantiate with the first line
init_line = abslines[0]
slf = cls( init_line.attrib['coord'], (init_line.data['Z'],init_line.data['ion']),
init_line.z, init_line.limits.vlim,
Ej=init_line.data['Ej'], stars=stars, reliability=reliability)
slf._abslines.append(init_line)
# Append with component checking
if len(abslines) > 1:
for absline in abslines[1:]:
slf.add_absline(absline, **kwargs)
# Return
return slf
@classmethod
def from_component(cls, component, **kwargs):
""" Instantiate from an AbsComponent object
Uses coord, Zion, Ej, A, zcomp, vlim, name, reliability, comment
Parameters
----------
component : AbsComponent
An AbsComponent object
Returns
-------
AbsComponent
"""
# Check
if not isinstance(component, AbsComponent):
raise IOError('Need an AbsComponent object')
# Return
return cls(component.coord, component.Zion, component.zcomp, component.vlim, Ej=component.Ej,
A=component.A, name=component.name, reliability=component.reliability, comment= component.comment,
**kwargs)
@classmethod
def from_dict(cls, idict, coord=None, **kwargs):
""" Instantiate from a dict
Parameters
----------
idict : dict
Returns
-------
"""
if coord is not None:
radec = coord
else:
radec = SkyCoord(ra=idict['RA']*u.deg, dec=idict['DEC']*u.deg)
# Init
#slf = cls(radec, tuple(idict['Zion']), idict['zcomp'], Quantity(idict['vlim'], unit='km/s'),
# backwards compatibility
for key in ['reliability', 'Reliability']:
if key in idict.keys():
reliability = idict[key]
break
else:
reliability = 'none'
# init
slf = cls(radec, tuple(idict['Zion']), idict['zcomp'], idict['vlim']*u.km/u.s,
Ej=idict['Ej']/u.cm, A=idict['A'],
Ntup = tuple([idict[key] for key in ['flag_N', 'logN', 'sig_logN']]),
comment=idict['comment'], name=idict['Name'], reliability=reliability)
# Add lines
for key in idict['lines'].keys():
iline = SpectralLine.from_dict(idict['lines'][key], coord=coord, **kwargs)
slf.add_absline(iline, **kwargs)
# Return
return slf
def __init__(self, radec, Zion, zcomp, vlim, Ej=0./u.cm, A=None,
Ntup=None, comment='', name=None, stars=None, reliability='none'):
""" Initiator
Parameters
----------
radec : tuple or SkyCoord
(RA,DEC) in deg or astropy.coordinate.SkyCoord
Zion : tuple
Atomic number, ion -- (int,int)
e.g. (8,1) for OI
Note: (-1, -1) is special and is meant for moleculer (e.g. H2)
This notation will most likely change in the future.
zcomp : float
Absorption component redshift
vlim : Quantity array
Velocity limits of the component w/r to `z`
e.g. [-300,300]*u.km/u.s
A : int, optional
Atomic mass -- used to distinguish isotopes
Ntup : tuple
(int,float,float)
(flag_N,logN,sig_logN)
flag_N : Flag describing N measurement (0: no info; 1: detection; 2: saturated; 3: non-detection)
logN : log10 N column density
sig_logN : Error in log10 N
# TODO FUTURE IMPLEMENTATION WILL ALLOW FOR 2-element ndarray for sig_logN
Ej : Quantity, optional
Energy of lower level (1/cm)
stars : str, optional
asterisks to add to name, e.g. '**' for CI**
Required if name=None and Ej>0.
reliability : str, optional
Reliability of AbsComponent
'a' - reliable
'b' - possible
'c' - uncertain
'none' - not defined (default)
comment : str, optional
A comment, default is ``
"""
# Required
self.coord = ltu.radec_to_coord(radec)
self.Zion = Zion
self.zcomp = zcomp
self.vlim = vlim
# Optional
self.A = A
self.Ej = Ej
self.comment = comment
if Ntup is not None:
self.flag_N = Ntup[0]
self.logN = Ntup[1]
self.sig_logN = Ntup[2]
_, _ = ltaa.linear_clm(self) # Set linear quantities
else:
self.flag_N = 0
self.logN = 0.
self.sig_logN = 0.
# Name
if (name is None) and (self.Zion != (-1, -1)):
iname = ions.ion_to_name(self.Zion, nspace=0)
if self.Ej.value > 0: # Need to put *'s in name
try:
iname += stars
except:
raise IOError("Need to provide 'stars' parameter.")
self.name = '{:s}_z{:0.5f}'.format(iname, self.zcomp)
elif (name is None) and (self.Zion == (-1, -1)):
self.name = 'mol_z{:0.5f}'.format(self.zcomp)
else:
self.name = name
# reliability
if reliability not in ['a', 'b', 'c', 'none']:
raise ValueError("Input reliability `{}` not valid.".format(reliability))
self.reliability = reliability
# Potential for attributes
self.attrib = dict()
# Other
self._abslines = []
def add_absline(self, absline, tol=0.1*u.arcsec, chk_vel=True,
chk_sep=True, vtoler=1., **kwargs):
"""Add an AbsLine object to the component if it satisfies
all of the rules.
For velocities, we demand that the new line has a velocity
range that is fully encompassed by the component.
Parameters
----------
absline : AbsLine
tol : Angle, optional
Tolerance on matching coordinates. Only used if chk_sep=True
chk_vel : bool, optional
Perform velocity test (can often be skipped)
Insist the bounds of the AbsLine are within 1km/s of the Component
(allows for round-off error)
chk_sep : bool, optional
Perform coordinate check (expensive)
vtoler : float
Tolerance for velocity in km/s (must be positive)
"""
if vtoler < 0:
raise ValueError('vtoler must be positive!')
# Perform easy checks
if chk_sep:
testc = bool(self.coord.separation(absline.attrib['coord']) < tol)
else:
testc = True
if self.Zion == (-1,-1): #(-1,-1) represents molecules
testZ = True
testi = True
testE = True
else: # atoms
testZ = self.Zion[0] == absline.data['Z']
testi = self.Zion[1] == absline.data['ion']
testE = bool(self.Ej == absline.data['Ej'])
# Now redshift/velocity
if chk_vel:
dz_toler = (1 + self.zcomp) * vtoler / c_kms # Avoid Quantity for speed
zlim_line = absline.limits.zlim # absline.z + (1 + absline.z) * absline.limits.vlim.to('km/s').value / c_kms
zlim_comp = self.zcomp + (1+self.zcomp) * self.vlim.to('km/s').value / c_kms
testv = (zlim_line[0] >= (zlim_comp[0] - dz_toler)) & (
zlim_line[1] <= (zlim_comp[1] + dz_toler))
else:
testv = True
# Combine
test = testc & testZ & testi & testE & testv
# Isotope
if self.A is not None:
raise ValueError('Not ready for this yet.')
# Append?
if test:
self._abslines.append(absline)
else:
warnings.warn("Failed add_absline test")
print('Input absline with wrest={:g} at z={:.3f} does not match component rules. Not appending'.format(absline.wrest,
absline.z))
if not testv:
print("Absline velocities lie beyond component\n Set chk_vel=False to skip this test.")
if not testc:
print("Absline coordinates do not match. Best to set them")
def add_abslines_from_linelist(self, llist='ISM', init_name=None, wvlim=None, min_Wr=None, **kwargs):
"""
It adds associated AbsLines satisfying some conditions (see parameters below).
Parameters
----------
llist : str, optional
Name of the linetools.lists.linelist.LineList
object where to look for the transition names.
Default is 'ISM', which means the function looks
within `list = LineList('ISM')`.
init_name : str, optional
Name of the initial transition used to define the AbsComponent
wvlims : Quantity array, optional
Observed wavelength limits for AbsLines to be added.
e.g. [1200, 2000]*u.AA.
min_Wr : Quantity, optional
Minimum rest-frame equivalent with for AbsLines to be added.
This is calculated in the very low optical depth regime tau0<<1,
where Wr is independent of Doppler parameter or gamma (see eq. 9.15 of
Draine 2011). Still, a column density attribute for the AbsComponent
is needed.
Returns
-------
Adds AbsLine objects to the AbsComponent._abslines list.
Notes
-----
**kwargs are passed to AbsLine.add_absline() method.
"""
# get the transitions from LineList
llist = LineList(llist)
if init_name is None: # we have to guess it
if (self.Zion) == (-1, -1): # molecules
# init_name must be in self.attrib (this is a patch)
init_name = self.attrib['init_name']
else: # atoms
init_name = ions.ion_to_name(self.Zion, nspace=0)
transitions = llist.all_transitions(init_name)
# unify output to be always QTable
if isinstance(transitions, dict):
transitions = llist.from_dict_to_qtable(transitions)
# check wvlims
if wvlim is not None:
cond = (transitions['wrest']*(1+self.zcomp) >= wvlim[0]) & \
(transitions['wrest']*(1+self.zcomp) <= wvlim[1])
transitions = transitions[cond]
# check outputs
if len(transitions) == 0:
warnings.warn("No transitions satisfying the criteria found. Doing nothing.")
return
# loop over the transitions when more than one found
for transition in transitions:
iline = AbsLine(transition['name'], z=self.zcomp, linelist=llist)
iline.limits.set(self.vlim)
iline.attrib['coord'] = self.coord
iline.attrib['logN'] = self.logN
iline.attrib['sig_logN'] = self.sig_logN
iline.attrib['flag_N'] = self.flag_N
iline.attrib['N'] = 10**iline.attrib['logN'] / (u.cm * u.cm)
iline.attrib['sig_N'] = 10**iline.attrib['sig_logN'] / (u.cm * u.cm)
for key in self.attrib.keys():
iline.attrib[key] = self.attrib[key]
if min_Wr is not None:
# check logN is defined
logN = self.logN
if logN == 0:
warnings.warn("AbsComponent does not have logN defined. Appending AbsLines "
"regardless of min_Wr.")
else:
N = 10**logN / (u.cm*u.cm)
Wr_iline = iline.get_Wr_from_N(N=N) # valid for the tau0<<1 regime.
if Wr_iline < min_Wr: # do not append
continue
# add the absline
self.add_absline(iline)
def build_table(self):
"""Generate an astropy QTable out of the abs lines
Returns
-------
comp_tbl : QTable
"""
if len(self._abslines) == 0:
return
comp_tbl = QTable()
comp_tbl.add_column(Column([iline.wrest.to(u.AA).value for iline in self._abslines]*u.AA, name='wrest'))
comp_tbl.add_column(Column([iline.z for iline in self._abslines], name='z'))
for attrib in ['flag_N', 'logN', 'sig_logN']:
comp_tbl.add_column(Column([iline.attrib[attrib] for iline in self._abslines], name=attrib))
# Return
return comp_tbl
def cog(self, redo_EW=False, show_plot=False, **kwargs):
"""Perform a COG analysis on the component
Parameters
----------
redo_EW : bool, optional
Re-analyze each line for its EW
show_plot : bool, optional
Generate plot and show
Returns
-------
logN : float
COG column density
b : Quantity
COG Doppler parameter (km/s)
"""
from linetools.analysis import cog as ltcog
#reload(ltcog)
# Redo EWs?
if redo_EW:
for aline in self._abslines:
aline.measure_restew(**kwargs)
# COG setup
wrest = np.array([aline.wrest.to('AA').value for aline in self._abslines])*u.AA
f = np.array([aline.data['f'] for aline in self._abslines])
EW = np.array([aline.attrib['EW'].to('AA').value for aline in self._abslines])*u.AA
sig_EW = np.array([aline.attrib['sig_EW'].to('AA').value for aline in self._abslines])*u.AA
# COG analysis
COG_dict = ltcog.single_cog_analysis(wrest, f, EW, sig_EW=sig_EW)
# COG plot
if show_plot:
ltcog.cog_plot(COG_dict)
# Return
return COG_dict
def plot_Na(self, show=True, **kwargs):
"""Plot apparent column density Na profiles
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
try: # Nicer view, especially in notebook
import seaborn as sns
sns.set(context="notebook", font_scale=2)
except ImportError:
pass
mpl.rcParams['font.family'] = 'stixgeneral'
mpl.rcParams['font.size'] = 15.
# Check for spec
gdiline = []
for iline in self._abslines:
if isinstance(iline.analy['spec'], XSpectrum1D):
gdiline.append(iline)
nplt = len(gdiline)
if nplt == 0:
print("Load spectra into the absline.analy['spec']")
return
atom_cst = (const.m_e.cgs*const.c.cgs / (np.pi * (const.e.esu**2).cgs)).to(u.AA*u.s/(u.km*u.cm**2))
# Setup plot
plt.clf()
ax = plt.gca()
fw_sv = 0.*u.AA
ymax = 0.
for qq, iline in enumerate(gdiline):
# Calculate
velo = iline.analy['spec'].relative_vel((1+iline.z)*iline.wrest)
cst = atom_cst/(iline.data['f']*iline.wrest) # / (u.km/u.s) / u.cm * (u.AA/u.cm)
Na = np.log(1./np.maximum(iline.analy['spec'].flux, iline.analy['spec'].sig)) * cst
# Figure out ymnx
pixmnx = (velo > self.vlim[0]) & (velo < self.vlim[1])
if iline.data['f']*iline.wrest > fw_sv:
ymax = max(np.max(Na[pixmnx].value), ymax)
fw_sv = iline.data['f']*iline.wrest
# Plot
ax.plot(velo, Na, '-', linestyle='steps-mid', label=iline.data['name'])
# ax.plot(velo, iline.analy['spec'].sig, 'r:')
# Axes
ax.set_xlim(self.vlim.value)
ax.set_ylim(-0.2*ymax, 5*ymax)
# ax.set_ylim(ymnx)
ax.minorticks_on()
ax.set_xlabel('Relative Velocity (km/s)')
ax.set_ylabel(r'Apparent Column (cm$^{-2}$ per km/s)')
# Legend
legend = ax.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='large')
plt.tight_layout(pad=0.2, h_pad=0., w_pad=0.1)
if show:
plt.show()
plt.close()
def reset_vlim_from_abslines(self, verbose=False):
""" Resets the vlim value using the AbsLines
Parameters
----------
"""
for aline in self._abslines:
if aline.analy['vlim'][0] < self.vlim[0]:
if verbose:
print('Resetting vlim0 from {}'.format(aline))
self.vlim[0] = aline.analy['vlim'][0]
if aline.analy['vlim'][1] > self.vlim[1]:
if verbose:
print('Resetting vlim1 from {}'.format(aline))
self.vlim[1] = aline.analy['vlim'][1]
def synthesize_colm(self, overwrite=False, redo_aodm=False, **kwargs):
"""Synthesize column density measurements of the component.
Default is to use the current AbsLine values, but the user can
request that those be re-calculated with AODM.
Parameters
----------
overwrite : bool, optional
Clobber any previous measurement
redo_aodm : bool, optional
Redo the individual column density measurements (likely AODM)
Returns
-------
None
Fills the component attributes instead
"""
# Check
if (self.flag_N != 0) and (not overwrite):
raise IOError("Column densities already set. Use overwrite=True to redo.")
# Redo?
if redo_aodm:
for aline in self._abslines:
aline.measure_aodm(**kwargs)
# Collate
self.flag_N = 0
for aline in self._abslines:
if aline.attrib['flag_N'] == 0: # No value
warnings.warn("Absline {} has flag=0. Hopefully you expected that".format(str(aline)))
continue
# Check N is filled
if np.allclose(aline.attrib['N'].value, 0.):
raise ValueError("Need to set N in attrib. \n Consider linear_clm in linetools.analysis.absline")
if aline.attrib['flag_N'] == 1: # Good value?
if self.flag_N == 1: # Weighted mean
# Original
weight = 1. / self.sig_N**2
mu = self.N * weight
# Update
weight += 1./aline.attrib['sig_N']**2
self.N = (mu + aline.attrib['N']/aline.attrib['sig_N']**2) / weight
self.sig_N = np.sqrt(1./weight)
else: # Fill
self.N = aline.attrib['N']
self.sig_N = aline.attrib['sig_N']
self.flag_N = 1
elif aline.attrib['flag_N'] == 2: # Lower limit
if self.flag_N in [0, 3]:
self.N = aline.attrib['N']
self.sig_N = aline.attrib['sig_N']
self.flag_N = 2
elif self.flag_N == 2:
if aline.attrib['N'] > self.N:
self.N = aline.attrib['N']
self.sig_N = aline.attrib['sig_N']
elif self.flag_N == 1:
pass
elif aline.attrib['flag_N'] == 3: # Upper limit
if self.flag_N == 0:
self.N = aline.attrib['N']
self.sig_N = aline.attrib['sig_N']
self.flag_N = 3
elif self.flag_N in [1, 2]:
pass
elif self.flag_N == 3:
if aline.attrib['N'] < self.N:
self.N = aline.attrib['N']
self.sig_N = aline.attrib['sig_N']
elif aline.attrib['flag_N'] == 0: # No value
warnings.warn("Absline {} has flag=0. Hopefully you expected that")
else:
raise ValueError("Bad flag_N value")
# Log values
if self.flag_N > 0:
self.logN, self.sig_logN = ltaa.log_clm(self)
def repr_vpfit(self, b=10.*u.km/u.s, tie_strs=('', '', ''), fix_strs=('', '', '')):
"""
String representation for VPFIT (line fitting software) in its fort.26 format
Parameters
----------
b : Quantity, optional
Doppler parameter of the component. Default is 10*u.km/u.s
tie_strs : tuple of strings, optional
Strings to be used for tying parameters (z,b,logN),
respectively. These are all converted to lower case
format, following VPFIT convention.
fix_strs : tuple of strings, optional
Strings to be used for fixing parameters (z,b,logN),
respectively. These are all converted to upper case
format, following VPFIT convention. These will take
precedence over tie_strs if different than ''.
Returns
-------
repr_vpfit : str
"""
# get Doppler parameter to km/s
b = b.to('km/s').value
# Ion name
name = ions.ion_to_name(self.Zion, nspace=1)
name = name.replace(' ', '')
# Deal with fix and tie parameters
# Check format first
for i, x_strs in enumerate([tie_strs, fix_strs]):
if (not isinstance(x_strs, tuple)) or (not all(isinstance(s, (str, basestring)) for s in x_strs)):
if i == 0:
raise TypeError('`tie_strs` must be a tuple of strings.')
elif i == 1:
raise TypeError('`fix_strs` must be a tuple of strings.')
if len(x_strs) != 3:
raise SyntaxError('`tie_strs` and `fix_strs` must have len() == 3')
# reformat for VPFIT standard
fix_strs = np.array([s.upper() for s in fix_strs])
tie_strs = np.array([s.lower() for s in tie_strs])
# preference to fix_strs over tie_strs
strs = np.where(fix_strs != '', fix_strs, tie_strs)
# create the line string
s = '{:s} {:.5f}{:s} {:.5f} {:.2f}{:s} {:.2f} {:.2f}{:s} {:.2f}'.format(name, self.zcomp, strs[0], 0, b,
strs[1], 0, self.logN, strs[2], 0)
if len(self.comment) > 0:
s += '! {:s}'.format(self.comment)
s += '\n'
return s
def repr_alis(self, T_kin=1e4*u.K, bturb=0.*u.km/u.s,
tie_strs=('', '', '', ''), fix_strs=('', '', '', '')):
"""
String representation for ALIS (line fitting software)
Parameters
----------
T_kin : Quantity, optional
Kinetic temperature. Default 1e4*u.K
bturb : Quantity, optional
Turbulent Doppler parameter. Default 0.*u.km/u.s
tie_strs : tuple of strings, optional
Strings to be used for tying parameters
(logN,z,bturb,T_kin), respectively. These are all
converted to lower case format, following ALIS convention.
fix_strs : tuple of strings, optional
Strings to be used for fixing parameters
(logN,z,bturb,T_kin), respectively. These are all
converted to upper case format, following ALIS convention.
These will take precedence over tie_strs if different from
''.
Returns
-------
repr_alis : str
"""
# Convert to the units ALIS wants
T_kin = T_kin.to('K').value
bturb = bturb.to('km/s').value
# A patch for nucleons; todo: come up with a better way to do this using ELEMENTS?
if self.Zion[0] == 1:
nucleons = 1
elif self.Zion[0] > 1:
nucleons = 2 * self.Zion[0]
# name
name = ions.ion_to_name(self.Zion, nspace=1)
name = '{}'.format(nucleons)+name.replace(' ', '_')
# Deal with fix and tie parameters
# Check format first
for i, x_strs in enumerate([tie_strs, fix_strs]):
if (not isinstance(x_strs, tuple)) or (not all(isinstance(s, (str, basestring)) for s in x_strs)):
if i == 0:
raise TypeError('`tie_strs` must be a tuple of strings.')
elif i == 1:
raise TypeError('`fix_strs` must be a tuple of strings.')
if len(x_strs) != 4:
raise SyntaxError('`tie_strs` and `fix_strs` must have len()== 4')
# reformat for ALIS standard
fix_strs = np.array([s.upper() for s in fix_strs])
tie_strs = np.array([s.lower() for s in tie_strs])
# preference to fix_strs over tie_strs
strs = np.where(fix_strs != '', fix_strs, tie_strs)
s = 'voigt ion={:s} {:.2f}{:s} redshift={:.5f}{:s} {:.1f}{:s} {:.1E}{:s}'.format(name, self.logN, strs[0],
self.zcomp, strs[1], bturb,
strs[2], T_kin, strs[3])
if len(self.comment) > 0:
s += '# {:s}'.format(self.comment)
s += '\n'
return s
def repr_joebvp(self, specfile, flags=(2,2,2), b_default=10*u.km/u.s):
"""
String representation for JOEBVP (line fitting software).
Parameters
----------
specfile : str
Name of the spectrum file
flags : tuple of ints, optional
Flags (nflag, bflag, vflag). See JOEBVP input for details
about these flags.
b_default : Quantity, optional
Doppler parameter value adopted in case an absorption
line within the component has not set this attribute
Default is 10 km/s.
Returns
-------
repr_joebvp : str
May contain multiple "\n" (1 per absline within component)
"""
# Reference: (note that comment column must be the last one)
# specfile|restwave|zsys|col|bval|vel|nflag|bflag|vflag|vlim1|vlim2|wobs1|wobs2|z_comp|trans|rely|comment
s = ''
for aline in self._abslines:
s += '{:s}|{:.5f}|'.format(specfile, aline.wrest.to('AA').value)
logN = aline.attrib['logN']
b_val = aline.attrib['b'].to('km/s').value
if b_val == 0: # set the default
b_val = b_default.to('km/s').value
# write string
s += '{:.8f}|{:.4f}|{:.4f}|0.|'.format(self.zcomp, logN, b_val) # `vel` is set to 0. because zsys is zcomp
s += '{}|{}|{}|'.format(int(flags[0]), int(flags[1]), int(flags[2]))
vlim = aline.limits.vlim.to('km/s').value
wvlim = aline.limits.wvlim.to('AA').value
s += '{:.4f}|{:.4f}|{:.5f}|{:.5f}|'.format(vlim[0], vlim[1], wvlim[0], wvlim[1])
s += '{:.8f}|{:s}|{:s}|{:s}'.format(self.zcomp, aline.data['ion_name'], self.reliability, self.comment) # zcomp again here
# if len(self.comment) > 0:
# s += '# {:s}'.format(self.comment)
s += '\n'
return s
def stack_plot(self, return_fig=False, vlim=None, **kwargs):
"""Show a stack plot of the component, if spec are loaded
Assumes the data are normalized.
Parameters
----------
return_fig : bool, optional
If True, return stack plot as plt.Figure() instance for further manipulation
vlim : Quantity array, optional
Velocity limits of the plots
e.g. [-300,300]*u.km/u.s
Returns
-------
fig : matplotlib Figure, optional
Figure instance containing stack plot with subplots, axes, etc.
"""
if vlim:
plotvlim=vlim
else:
plotvlim=self.vlim
if return_fig:
fig = ltap.stack_plot(self._abslines, vlim=plotvlim, return_fig=True, **kwargs)
return fig
else:
ltap.stack_plot(self._abslines, vlim=plotvlim, **kwargs)
def to_dict(self):
""" Convert component data to a dict
Returns
-------
cdict : dict
"""
cdict = dict(Zion=self.Zion, zcomp=self.zcomp, vlim=self.vlim.to('km/s').value,
Name=self.name,
RA=self.coord.fk5.ra.value, DEC=self.coord.fk5.dec.value,
A=self.A, Ej=self.Ej.to('1/cm').value, comment=self.comment,
flag_N=self.flag_N, logN=self.logN, sig_logN=self.sig_logN)
cdict['class'] = self.__class__.__name__
# AbsLines
cdict['lines'] = {}
for iline in self._abslines:
cdict['lines'][iline.wrest.value] = iline.to_dict()
# Polish
cdict = ltu.jsonify(cdict)
# Return
return cdict
def copy(self):
""" Generate a copy of itself
Returns
-------
abscomp : AbsComponent
"""
# Instantiate with required attributes
abscomp = AbsComponent(self.coord, self.Zion, self.zcomp, self.vlim)
# Add in the rest
attrs = vars(self).keys()
for attr in attrs:
if attr == '_abslines':
for iline in self._abslines:
abscomp._abslines.append(iline.copy())
else:
setattr(abscomp, attr, getattr(self, attr))
# Return
return abscomp
def __getitem__(self, attrib):
"""Passback attribute, if it exists
Useful for columns
Parameters
----------
attrib : str
"""
return getattr(self, attrib)
def __repr__(self):
txt = '<{:s}: {:s} {:s}, Name={:s}, Zion=({:d},{:d}), Ej={:g}, z={:g}, vlim={:g},{:g}'.format(
self.__class__.__name__, self.coord.fk5.ra.to_string(unit=u.hour,sep=':', pad=True),
self.coord.fk5.dec.to_string(sep=':',pad=True,alwayssign=True), self.name, self.Zion[0], self.Zion[1], self.Ej, self.zcomp, self.vlim[0], self.vlim[1])
# Column?
if self.flag_N > 0:
txt = txt + ', logN={:g}'.format(self.logN)
txt = txt + ', sig_logN={}'.format(self.sig_logN)
txt = txt + ', flag_N={:d}'.format(self.flag_N)
# Finish
txt = txt + '>'
return (txt)
| 38.13809 | 167 | 0.535665 | 32,287 | 0.974202 | 0 | 0 | 3,400 | 0.102589 | 0 | 0 | 14,562 | 0.439382 |
7604f665e3aa02bc719696d186c91d7d6c300052 | 3,305 | py | Python | 120_GruneisenParam/FeNiSi/collectScalingParam.py | r-a-morrison/fe_alloy_sound_velocities | 8da1b0d073e93fb4b4be3d61b73e58b7a7a3097b | [
"MIT"
] | null | null | null | 120_GruneisenParam/FeNiSi/collectScalingParam.py | r-a-morrison/fe_alloy_sound_velocities | 8da1b0d073e93fb4b4be3d61b73e58b7a7a3097b | [
"MIT"
] | null | null | null | 120_GruneisenParam/FeNiSi/collectScalingParam.py | r-a-morrison/fe_alloy_sound_velocities | 8da1b0d073e93fb4b4be3d61b73e58b7a7a3097b | [
"MIT"
] | null | null | null | # Front matter
##############
import os
from os import fdopen, remove
from tempfile import mkstemp
from shutil import move
import glob
import re
import time
import pandas as pd
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit, fsolve
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
from scipy.interpolate import spline
import math
import seaborn as sns
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
start_time = time.time()
# Input scaling parameter results
##########################################
xi_filename = 'Results/scalingparameters.csv'
xi_df = pd.read_csv(xi_filename)
# Rename columns to avoid confusion
xi_df = xi_df.rename(columns={'Vi':'Vj', 'dVi':'dVj', 'V':'Vk','dV':'dVk',
'V/Vi':'Vk/Vj','xi':'xi(Vk/Vj)','dxi':'dxi(Vk/Vj)'})
# Transform scaling parameters to each reference volume
#######################################################
folder_list = xi_df.drop_duplicates(subset='Ref Folder')['Ref Folder'].values
for ref_folder in folder_list:
# for ref_folder in ['2009Oct_30GPa']:
print('Rescaling to '+ref_folder)
# Reference volume to scale everything to
Vi = xi_df[xi_df['Ref Folder']==ref_folder].iloc[-1]['Vj']
xi_rescaled_df = xi_df[['Vj','Vk','xi(Vk/Vj)','dxi(Vk/Vj)']].copy()
xi_rescaled_df['Vi'] = Vi*np.ones(len(xi_rescaled_df))
# rescaled xi(Vk/Vi) = xi(Vk/Vj) * complementary xi(Vj/Vi)
# Complementary xi needed to calculate rescaled xi:
xi_rescaled_df['xi(Vj/Vi)'] = [xi_rescaled_df[(xi_rescaled_df['Vj']==Vi) &
(xi_rescaled_df['Vk']==Vj)].iloc[-1]['xi(Vk/Vj)'] for Vj in xi_rescaled_df['Vj']]
xi_rescaled_df['dxi(Vj/Vi)'] = [xi_rescaled_df[(xi_rescaled_df['Vj']==Vi) &
(xi_rescaled_df['Vk']==Vj)].iloc[-1]['dxi(Vk/Vj)'] for Vj in xi_rescaled_df['Vj']]
xi_rescaled_df['Vk/Vi'] = xi_rescaled_df['Vk']/xi_rescaled_df['Vi']
# Calculate rescaled xi
xi_rescaled_df['xi(Vk/Vi)'] = xi_rescaled_df['xi(Vk/Vj)']*xi_rescaled_df['xi(Vj/Vi)']
# Calculate uncertainty on rescaled xi
# If c = a*b, dc = sqrt((b*da)^2 + (a*db)^2)
xi_rescaled_df['dxi(Vk/Vi)'] = np.sqrt(
(xi_rescaled_df['xi(Vj/Vi)']*xi_rescaled_df['dxi(Vk/Vj)'])**2 +
(xi_rescaled_df['xi(Vk/Vj)']*xi_rescaled_df['dxi(Vj/Vi)'])**2)
# Eliminate data points where Vi = Vk
xi_rescaled_df = xi_rescaled_df[xi_rescaled_df['Vk'] != Vi]
xi_rescaled_df = xi_rescaled_df.round(decimals=4)
xi_rescaled_df.to_csv(ref_folder+'/rescaledparameters.csv',index=False)
# Plot scaling parameters
fig, (ax0) = plt.subplots(nrows = 1, ncols=1, figsize=(6,4.5))
ax0.errorbar(xi_rescaled_df['Vk/Vi'],xi_rescaled_df['xi(Vk/Vi)'],
yerr=xi_rescaled_df['dxi(Vk/Vi)'],
marker = 'o', color = 'gray', mfc='lightgray', ms=6, markeredgewidth=1,
ls='none',elinewidth=1)
ax0.set_xlabel(r'$V/V_i$',fontsize = 16)
ax0.set_ylabel(r'$\xi$',fontsize = 16)
ax0.tick_params(direction='in',right='on',top='on')
fig.savefig(ref_folder+'/scalingparam.pdf', format='pdf',
bbox_inches='tight')
plt.close() | 34.072165 | 86 | 0.681392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,256 | 0.38003 |
76052e516679e30b2741000942a893a95667ab5b | 1,730 | py | Python | classes/strategies.py | noaillypau/Polyvalent_Backtester_3 | 19c11014300b5cf629c037cc5a6b18123237647f | [
"MIT"
] | 2 | 2021-04-30T21:36:29.000Z | 2021-06-10T23:34:38.000Z | classes/strategies.py | noaillypau/Polyvalent_Backtester_3 | 19c11014300b5cf629c037cc5a6b18123237647f | [
"MIT"
] | null | null | null | classes/strategies.py | noaillypau/Polyvalent_Backtester_3 | 19c11014300b5cf629c037cc5a6b18123237647f | [
"MIT"
] | null | null | null | import numpy as np, pandas as pd, json, os, datetime, time
from order import Order
class Strategies():
def __init__(self):
self._dict = {}
def add(self, name, strategy):
self._dict[name] = strategy
def compute(self, arrs, index):
list_order = []
for name,strategy in self._dict.items():
list_order = list_order + strategy.compute(arrs, index)
return list_order
def get_dic_symbol_strategy(self):
dic_strategy = {}
for name,strategy in self._dict.items():
if strategy.symbol not in dic_strategy:
dic_strategy[strategy.symbol] = []
dic_strategy[strategy.symbol].append(strategy)
return dic_strategy
def __repr__(self):
txt = f'Strategies:\n'
for key, item in self._dict.items():
txt += f'\nName: {key}\n{item}\n'
return txt
'''
Strategy object
create a strategy object with :
dataset_name: can be string or list string, set on whioch asset the order will be passed
trigger: function of dic of numpys (datas._dic) and index: will determine wether to send orders or not depending on index
list_order: list of orders to be sent
'''
class Strategy():
def __init__(self, trigger, symbol, params):
self.trigger = trigger
self.params = params
self.symbol = symbol
def compute(self, arrs, index):
return self.trigger(arrs, self.params, self.symbol, index)
def __repr__(self):
txt = f'Strategy on {self.symbol} with params:'
for key, item in self.params.items():
txt += f'\n {key}: {item}'
return txt
| 28.833333 | 126 | 0.602312 | 1,297 | 0.749711 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.246821 |
7607e44458b7d06d849dd60ccf1c72d8f4d87ce2 | 9,129 | py | Python | orders/api.py | terryjbates/restbucks | 31ed117f67b6c205838f89b45162fb000d49578c | [
"MIT"
] | null | null | null | orders/api.py | terryjbates/restbucks | 31ed117f67b6c205838f89b45162fb000d49578c | [
"MIT"
] | null | null | null | orders/api.py | terryjbates/restbucks | 31ed117f67b6c205838f89b45162fb000d49578c | [
"MIT"
] | null | null | null | import os
from datetime import datetime
from dateutil import parser as datetime_parser
from dateutil.tz import tzutc
from flask import Flask, url_for, jsonify, request
from flask.ext.sqlalchemy import SQLAlchemy
from utils import split_url
basedir = os.path.abspath(os.path.dirname(__file__))
db_path = os.path.join(basedir, '../data.sqlite')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + db_path
db = SQLAlchemy(app)
class ValidationError(ValueError):
pass
@app.errorhandler(ValidationError)
def bad_request(e):
response = jsonify({'status': 400, 'error': 'bad request',
'message': e.args[0]})
response.status_code = 400
return response
@app.errorhandler(404)
def not_found(e):
response = jsonify({'status': 404, 'error': 'not found',
'message': 'invalid resource URI'})
response.status_code = 404
return response
@app.errorhandler(405)
def method_not_supported(e):
response = jsonify({'status': 405, 'error': 'method not supported',
'message': 'the method is not supported'})
response.status_code = 405
return response
@app.errorhandler(500)
def internal_server_error(e):
response = jsonify({'status': 500, 'error': 'internal server error',
'message': e.args[0]})
response.status_code = 500
return response
class Customer(db.Model):
__tablename__ = 'customers'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
orders = db.relationship('Order', backref='customer', lazy='dynamic')
def get_url(self):
return url_for('get_customer', id=self.id, _external=True)
def export_data(self):
return {
'self_url': self.get_url(),
'name': self.name,
'orders_url': url_for('get_customer_orders', id=self.id,
_external=True)
}
def import_data(self, data):
try:
self.name = data['name']
except KeyError as e:
raise ValidationError('Invalid customer: missing ' + e.args[0])
return self
class Product(db.Model):
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
items = db.relationship('Item', backref='product', lazy='dynamic')
def get_url(self):
return url_for('get_product', id=self.id, _external=True)
def export_data(self):
return {
'self_url': self.get_url(),
'name': self.name
}
def import_data(self, data):
try:
self.name = data['name']
except KeyError as e:
raise ValidationError('Invalid product: missing ' + e.args[0])
return self
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
customer_id = db.Column(db.Integer, db.ForeignKey('customers.id'),
index=True)
date = db.Column(db.DateTime, default=datetime.now)
items = db.relationship('Item', backref='order', lazy='dynamic',
cascade='all, delete-orphan')
def get_url(self):
return url_for('get_order', id=self.id, _external=True)
def export_data(self):
return {
'self_url': self.get_url(),
'customer_url': self.customer.get_url(),
'date': self.date.isoformat() + 'Z',
'items_url': url_for('get_order_items', id=self.id,
_external=True)
}
def import_data(self, data):
try:
self.date = datetime_parser.parse(data['date']).astimezone(
tzutc()).replace(tzinfo=None)
except KeyError as e:
raise ValidationError('Invalid order: missing ' + e.args[0])
return self
class Item(db.Model):
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
order_id = db.Column(db.Integer, db.ForeignKey('orders.id'), index=True)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'),
index=True)
quantity = db.Column(db.Integer)
def get_url(self):
return url_for('get_item', id=self.id, _external=True)
def export_data(self):
return {
'self_url': self.get_url(),
'order_url': self.order.get_url(),
'product_url': self.product.get_url(),
'quantity': self.quantity
}
def import_data(self, data):
try:
endpoint, args = split_url(data['product_url'])
self.quantity = int(data['quantity'])
except KeyError as e:
raise ValidationError('Invalid order: missing ' + e.args[0])
if endpoint != 'get_product' or not 'id' in args:
raise ValidationError('Invalid product URL: ' +
data['product_url'])
self.product = Product.query.get(args['id'])
if self.product is None:
raise ValidationError('Invalid product URL: ' +
data['product_url'])
return self
@app.route('/customers/', methods=['GET'])
def get_customers():
return jsonify({'customers': [customer.get_url() for customer in
Customer.query.all()]})
@app.route('/customers/<int:id>', methods=['GET'])
def get_customer(id):
return jsonify(Customer.query.get_or_404(id).export_data())
@app.route('/customers/', methods=['POST'])
def new_customer():
customer = Customer()
customer.import_data(request.json)
db.session.add(customer)
db.session.commit()
return jsonify({}), 201, {'Location': customer.get_url()}
@app.route('/customers/<int:id>', methods=['PUT'])
def edit_customer(id):
customer = Customer.query.get_or_404(id)
customer.import_data(request.json)
db.session.add(customer)
db.session.commit()
return jsonify({})
@app.route('/products/', methods=['GET'])
def get_products():
return jsonify({'products': [product.get_url() for product in
Product.query.all()]})
@app.route('/products/<int:id>', methods=['GET'])
def get_product(id):
return jsonify(Product.query.get_or_404(id).export_data())
@app.route('/products/', methods=['POST'])
def new_product():
product = Product()
product.import_data(request.json)
db.session.add(product)
db.session.commit()
return jsonify({}), 201, {'Location': product.get_url()}
@app.route('/products/<int:id>', methods=['PUT'])
def edit_product(id):
product = Product.query.get_or_404(id)
product.import_data(request.json)
db.session.add(product)
db.session.commit()
return jsonify({})
@app.route('/orders/', methods=['GET'])
def get_orders():
return jsonify({'orders': [order.get_url() for order in Order.query.all()]})
@app.route('/customers/<int:id>/orders/', methods=['GET'])
def get_customer_orders(id):
customer = Customer.query.get_or_404(id)
return jsonify({'orders': [order.get_url() for order in
customer.orders.all()]})
@app.route('/orders/<int:id>', methods=['GET'])
def get_order(id):
return jsonify(Order.query.get_or_404(id).export_data())
@app.route('/customers/<int:id>/orders/', methods=['POST'])
def new_customer_order(id):
customer = Customer.query.get_or_404(id)
order = Order(customer=customer)
order.import_data(request.json)
db.session.add(order)
db.session.commit()
return jsonify({}), 201, {'Location': order.get_url()}
@app.route('/orders/<int:id>', methods=['PUT'])
def edit_order(id):
order = Order.query.get_or_404(id)
order.import_data(request.json)
db.session.add(order)
db.session.commit()
return jsonify({})
@app.route('/orders/<int:id>', methods=['DELETE'])
def delete_order(id):
order = Order.query.get_or_404(id)
db.session.delete(order)
db.session.commit()
return jsonify({})
@app.route('/orders/<int:id>/items/', methods=['GET'])
def get_order_items(id):
order = Order.query.get_or_404(id)
return jsonify({'items': [item.get_url() for item in order.items.all()]})
@app.route('/items/<int:id>', methods=['GET'])
def get_item(id):
return jsonify(Item.query.get_or_404(id).export_data())
@app.route('/orders/<int:id>/items/', methods=['POST'])
def new_order_item(id):
order = Order.query.get_or_404(id)
item = Item(order=order)
item.import_data(request.json)
db.session.add(item)
db.session.commit()
return jsonify({}), 201, {'Location': item.get_url()}
@app.route('/items/<int:id>', methods=['PUT'])
def edit_item(id):
item = Item.query.get_or_404(id)
item.import_data(request.json)
db.session.add(item)
db.session.commit()
return jsonify({})
@app.route('/items/<int:id>', methods=['DELETE'])
def delete_item(id):
item = Item.query.get_or_404(id)
db.session.delete(item)
db.session.commit()
return jsonify({})
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| 31.156997 | 80 | 0.621536 | 3,836 | 0.420199 | 0 | 0 | 4,701 | 0.514952 | 0 | 0 | 1,446 | 0.158396 |
760878fec74dfca0b18e914746679e0b6733291a | 411 | py | Python | __findLengthsNoEmptyStrings.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __findLengthsNoEmptyStrings.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __findLengthsNoEmptyStrings.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | #License: https://bit.ly/3oLErEI
def test(strs):
return [*map(len, strs)]
strs = ['cat', 'car', 'fear', 'center']
print("Original strings:")
print(strs)
print("Lengths of the said list of non-empty strings:")
print(test(strs))
strs = ['cat', 'dog', 'shatter', 'donut', 'at', 'todo', '']
print("\nOriginal strings:")
print(strs)
print("Lengths of the said list of non-empty strings:")
print(test(strs))
| 27.4 | 60 | 0.652068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.559611 |
760c099ccfce0265d0163471803afb16f5cff523 | 2,644 | py | Python | speedtest2dynamodb_test.py | ujuettner/speedtest2DynamoDB | 73229088d20556928ad6df2e263e5e6452be22fa | [
"Apache-2.0"
] | null | null | null | speedtest2dynamodb_test.py | ujuettner/speedtest2DynamoDB | 73229088d20556928ad6df2e263e5e6452be22fa | [
"Apache-2.0"
] | null | null | null | speedtest2dynamodb_test.py | ujuettner/speedtest2DynamoDB | 73229088d20556928ad6df2e263e5e6452be22fa | [
"Apache-2.0"
] | null | null | null | """Test cases."""
import unittest
import logging
from speedtest2dynamodb import parse_output
class SpeedTest2DynamoDBTestCase(unittest.TestCase):
"""Collection of tests."""
def setUp(self):
self.logger = logging.getLogger()
def test_parse_output_bit(self):
"""Test output that contains only bit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.5 bit/s\nUpload: 5.88 Bit/s'
),
(10.331, 40.5, 5.88)
)
def test_parse_output_kbit(self):
"""Test output that contains only Kbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 Kbit/s\nUpload: 5.88 kbit/s'
),
(10.331, 41502.72, 6021.12)
)
def test_parse_output_mbit(self):
"""Test output that contains only Mbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 mbit/s\nUpload: 5.88 Mbit/s'
),
(10.331, 42498785.28, 6165626.88)
)
def test_parse_output_gbit(self):
"""Test output that contains only Gbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 gbit/s'
),
(10.331, 43518756126.72, 6313601925.12)
)
def test_parse_output_mixed_bit(self):
"""Test output that contains bit/s and Gbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 bit/s'
),
(10.331, 43518756126.72, 5.88)
)
def test_parse_output_swapped_order(self):
"""Test output with changed order."""
self.assertEqual(
parse_output(
'Upload: 5.88 bit/s\nPing: 10.331 ms\nDownload: 40.53 bit/s'
),
(10.331, 40.53, 5.88)
)
def test_parse_output_not_matching(self):
"""Test whether default values are returned when unable to parse."""
# Silence logging, as we expect to produce exceptions within tests and
# do not want to clutter the output:
old_log_level = self.logger.getEffectiveLevel()
self.logger.setLevel(logging.CRITICAL)
self.assertEqual(
parse_output(
'Ping: 10.331 s\nDownload: 40.xx bit/s\nUpload: 5.88 m/s'
),
(-1, -1, -1)
)
# Restore to default log level:
self.logger.setLevel(old_log_level)
if __name__ == '__main__':
unittest.main()
| 30.744186 | 78 | 0.561271 | 2,500 | 0.945537 | 0 | 0 | 0 | 0 | 0 | 0 | 942 | 0.356278 |
760e0de8aa3b473af5ac0c6c5fd6434bc4cb2ea6 | 2,078 | py | Python | freenet/lib/file_parser.py | augustand/fdslight | f3d82465aaa27160438b22f9b474be8c5dc100cc | [
"BSD-2-Clause"
] | null | null | null | freenet/lib/file_parser.py | augustand/fdslight | f3d82465aaa27160438b22f9b474be8c5dc100cc | [
"BSD-2-Clause"
] | null | null | null | freenet/lib/file_parser.py | augustand/fdslight | f3d82465aaa27160438b22f9b474be8c5dc100cc | [
"BSD-2-Clause"
] | 1 | 2019-06-22T23:25:56.000Z | 2019-06-22T23:25:56.000Z | #!/usr/bin/env python3
"""文件解析器,对dns分发的rules和白名单ip列表进行解析
文件格式:一条规则就是一行,#开头的表示注解:
"""
class FilefmtErr(Exception): pass
def __drop_comment(line):
"""删除注释"""
pos = line.find("#")
if pos < 0:
return line
return line[0:pos]
def __read_from_file(fpath):
result = []
fdst = open(fpath, "rb")
for line in fdst:
line = line.decode("iso-8859-1")
line = __drop_comment(line)
line = line.replace("\r", "")
line = line.replace("\n", "")
line = line.lstrip()
line = line.rstrip()
if not line: continue
result.append(line)
fdst.close()
return result
def parse_host_file(fpath):
"""解析主机文件,即域名规则文件"""
lines = __read_from_file(fpath)
results = []
for line in lines:
find = line.find(":")
if find < 1: continue
a = line[0:find]
e = find + 1
try:
b = int(line[e:])
except ValueError:
continue
results.append((a, b,))
return results
def __get_ip_subnet(line):
"""检查子网格式是否正确"""
pos = line.find("/")
if pos < 7: return None
ipaddr = line[0:pos]
pos += 1
try:
mask = int(line[pos:])
except:
return None
return (ipaddr, mask,)
def parse_ip_subnet_file(fpath):
"""解析IP地址列表文件"""
lines = __read_from_file(fpath)
results = []
for line in lines:
ret = __get_ip_subnet(line)
if not ret: print("the wrong format on: %s" % line)
results.append(ret)
return results
def get_linux_host_nameservers(resolv_path="/etc/resolv.conf"):
"""获取LINUX系统的所有nameservers
:param resolv_path: nameserver的配置文件
"""
fdst = open(resolv_path, "r")
nameservers = []
for line in fdst:
ts = line.lstrip()
if ts[0] == "#": continue
if ts[0:10] != "nameserver": continue
replaces = ("\r", "\n", "nameserver")
for s in replaces: ts = ts.replace(s, "")
ts = ts.lstrip()
ts = ts.rstrip()
nameservers.append(ts)
return nameservers | 21.645833 | 63 | 0.559673 | 33 | 0.01468 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.226423 |
761081e986ec7c695959355edb65bc9f26566336 | 2,918 | py | Python | src/duty/context.py | pawamoy/duty | dac90183543b73fdd6e5fab354a56c5484d5762a | [
"0BSD"
] | 25 | 2020-10-09T08:46:12.000Z | 2022-03-16T18:58:39.000Z | src/duty/context.py | pawamoy/duty | dac90183543b73fdd6e5fab354a56c5484d5762a | [
"0BSD"
] | 4 | 2020-11-12T19:27:41.000Z | 2021-08-01T13:24:31.000Z | src/duty/context.py | pawamoy/duty | dac90183543b73fdd6e5fab354a56c5484d5762a | [
"0BSD"
] | null | null | null | """Module containing the context definition."""
import os
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Union
from failprint.runners import run as failprint_run
from duty.exceptions import DutyFailure
CmdType = Union[str, List[str], Callable]
class Context:
"""
A simple context class.
Context instances are passed to functions decorated with `duty`.
"""
def __init__(self, options, options_override=None) -> None:
"""
Initialize the context.
Arguments:
options: Base options specified in `@duty(**options)`.
options_override: Options that override `run` and `@duty` options.
This argument is used to allow users to override options from the CLI or environment.
"""
self._options = options
self._option_stack: List[Dict[str, Any]] = []
self._options_override = options_override or {}
def run(self, cmd: CmdType, **options) -> str:
"""
Run a command in a subprocess or a Python callable.
Arguments:
cmd: A command or a Python callable.
options: Options passed to `failprint` functions.
Raises:
DutyFailure: When the exit code / function result is greather than 0.
Returns:
The output of the command.
"""
final_options = dict(self._options)
final_options.update(options)
allow_overrides = final_options.pop("allow_overrides", True)
workdir = final_options.pop("workdir", None)
if allow_overrides:
final_options.update(self._options_override)
with self.cd(workdir):
try:
result = failprint_run(cmd, **final_options)
except KeyboardInterrupt:
raise DutyFailure(130) # noqa: WPS432 (ctrl-c)
if result.code:
raise DutyFailure(result.code)
return result.output
@contextmanager
def options(self, **opts):
"""
Change options as a context manager.
Can be nested as will, previous options will pop once out of the with clause.
Arguments:
**opts: Options used in `run`.
Yields:
Nothing.
"""
self._option_stack.append(self._options)
self._options = {**self._options, **opts}
try:
yield
finally:
self._options = self._option_stack.pop()
@contextmanager
def cd(self, directory: str):
"""
Change working directory as a context manager.
Arguments:
directory: The directory to go into.
Yields:
Nothing.
"""
if not directory:
yield
return
old_wd = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(old_wd)
| 27.018519 | 101 | 0.589788 | 2,630 | 0.901302 | 895 | 0.306717 | 935 | 0.320425 | 0 | 0 | 1,307 | 0.44791 |
76108263a77b26ab3e9db3181d3179bc7218f2f9 | 2,802 | py | Python | Tests/TestImgRecognitionAndMotorControl/Test2048Detect4.py | robdobsn/RobotPlay2048 | 0715fd67313ccf6015871c2a73f38de3ca014f10 | [
"MIT"
] | null | null | null | Tests/TestImgRecognitionAndMotorControl/Test2048Detect4.py | robdobsn/RobotPlay2048 | 0715fd67313ccf6015871c2a73f38de3ca014f10 | [
"MIT"
] | null | null | null | Tests/TestImgRecognitionAndMotorControl/Test2048Detect4.py | robdobsn/RobotPlay2048 | 0715fd67313ccf6015871c2a73f38de3ca014f10 | [
"MIT"
] | null | null | null |
import cv2
import numpy as np
import picamera
import time
def identifySq(pt, w, h):
tlx = 80
tly = 210
ppx = 94
ppy = 82
sqx = (pt[0]-(tlx-ppx/2))/ppx
sqy = (pt[1]-(tly-ppy/2))/ppy
# print ("ID",pt, w, h, sqx, sqy)
if sqx < 0 or sqx >= 4 or sqy < 0 or sqy >= 4:
return 0, False
return sqy*4 + sqx, True
if __name__ == '__main__' :
# Acquire source image.
cam = picamera.PiCamera()
cam.capture('newimg.jpg')
# Read source image.
im_src = cv2.imread('newimg.jpg')
# Resize image
newWidth = 640.0
rat1 = newWidth / im_src.shape[1]
dim1 = (int(newWidth), int(im_src.shape[0] * rat1))
im_small = cv2.resize(im_src, dim1, interpolation = cv2.INTER_AREA)
# Four corners of the book in source image
pts_src = np.array([[57, 368], [98, 22], [585, 28], [626, 374]], dtype=float)
# Read destination image.
im_dst = cv2.imread('destimg2.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[0, 0], [511, 0], [511, 639], [0, 639]], dtype=float)
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_small, h, (im_dst.shape[1], im_dst.shape[0]))
im_grey = cv2.cvtColor(im_out, cv2.COLOR_BGR2GRAY)
cv2.imwrite('img23.png', im_out)
# Match to template tiles
tileFiles = ['tile000002.png', 'tile000004.png', 'tile000008.png',
'tile000016.png', 'tile000032.png', 'tile000064.png',
'tile000128.png', 'tile000256.png', 'tile000512.png',
'tile001024.png']
lineThicknessIdx = 1
tileVal = 2
boardCells = [0] * 16
for tileFile in tileFiles:
tile = cv2.imread(tileFile, 0)
w, h = tile.shape[::-1]
# Apply template Matching
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(im_grey, tile, method)
threshold = 0.8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
sq, sqValid = identifySq(pt, w, h)
if sqValid:
if boardCells[sq] == 0:
boardCells[sq] = tileVal
cv2.putText(im_out, str(tileVal), (pt[0],pt[1]+h/3),cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 1, 0, 1)
#print(sq, tileVal)
# print(pt, tileVal, w, h)
#cv2.rectangle(im_out, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), lineThicknessIdx)
lineThicknessIdx += 1
# print("Found", len(zip(*loc[::-1])),"tiles of", tileVal)
tileVal *= 2
for cellIdx in range(len(boardCells)):
print(cellIdx, boardCells[cellIdx])
cv2.imshow("Matched One", im_out)
cv2.waitKey(1000)
# time.sleep(5)
| 30.791209 | 113 | 0.578515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 783 | 0.279443 |
761129398336f1e31011977bfec9f9dd0ac8ed7c | 7,193 | py | Python | utils/views/modals/tag.py | DiscordGIR/Bloo2 | bb4d8b6ac54f2242ad0ce3663e4ae97f05064dff | [
"MIT"
] | null | null | null | utils/views/modals/tag.py | DiscordGIR/Bloo2 | bb4d8b6ac54f2242ad0ce3663e4ae97f05064dff | [
"MIT"
] | null | null | null | utils/views/modals/tag.py | DiscordGIR/Bloo2 | bb4d8b6ac54f2242ad0ce3663e4ae97f05064dff | [
"MIT"
] | 1 | 2022-03-31T07:32:30.000Z | 2022-03-31T07:32:30.000Z | import re
import discord
from data.model import Tag
class TagModal(discord.ui.Modal):
def __init__(self, bot, tag_name, author: discord.Member) -> None:
self.bot = bot
self.tag_name = tag_name
self.author = author
self.tag = None
super().__init__(title=f"Add tag {self.tag_name}")
self.add_item(
discord.ui.TextInput(
label="Body of the tag",
placeholder="Enter the body of the tag",
style=discord.TextStyle.long,
)
)
for i in range(2):
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} name",
placeholder="Enter a name for the button. You can also put an emoji at the start.",
style=discord.TextStyle.short,
required=False,
max_length=80
)
)
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} link",
placeholder="Enter a link for the button",
style=discord.TextStyle.short,
required=False
)
)
async def on_submit(self, interaction: discord.Interaction):
if interaction.user != self.author:
return
button_names = [child.value.strip() for child in self.children[1::2] if child.value is not None and len(child.value.strip()) > 0]
links = [child.value.strip() for child in self.children[2::2] if child.value is not None and len(child.value.strip()) > 0]
# make sure all links are valid URLs with regex
if not all(re.match(r'^(https|http)://.*', link) for link in links):
await self.send_error(interaction, "The links must be valid URLs!")
return
if len(button_names) != len(links):
await self.send_error(interaction, "All buttons must have labels and links!")
return
buttons = list(zip(button_names, links))
description = self.children[0].value
if not description:
await self.send_error(interaction, "Description is missing!")
return
for label in button_names:
custom_emojis = re.search(r'<:\d+>|<:.+?:\d+>|<a:.+:\d+>|[\U00010000-\U0010ffff]', label)
if custom_emojis is not None:
emoji = custom_emojis.group(0).strip()
if not label.startswith(emoji):
await self.send_error(interaction, "Emojis must be at the start of labels!")
return
label = label.replace(emoji, '')
label = label.strip()
if not label:
await self.send_error(interaction, "A button cannot just be an emoji!")
return
# prepare tag data for database
tag = Tag()
tag.name = self.tag_name.lower()
tag.content = description
tag.added_by_id = self.author.id
tag.added_by_tag = str(self.author)
tag.button_links = buttons
self.tag = tag
self.stop()
try:
await interaction.response.send_message()
except:
pass
async def send_error(self, interaction: discord.Interaction, error: str):
embed = discord.Embed(title=":(\nYour command ran into a problem", description=error, color=discord.Color.red())
await interaction.response.send_message(embed=embed, ephemeral=True)
class EditTagModal(discord.ui.Modal):
def __init__(self, tag: Tag, author: discord.Member) -> None:
self.tag = tag
self.author = author
self.edited = False
super().__init__(title=f"Edit tag {self.tag.name}")
self.add_item(
discord.ui.TextInput(
label="Body of the tag",
placeholder="Enter the body of the tag",
style=discord.TextStyle.long,
default=tag.content
)
)
for i in range(2):
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} name",
placeholder="Enter a name for the button. You can also put an emoji at the start.",
style=discord.TextStyle.short,
required=False,
max_length=80,
default=self.tag.button_links[i][0] if len(self.tag.button_links) > i else None
)
)
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} link",
placeholder="Enter a link for the button",
style=discord.TextStyle.short,
required=False,
default=self.tag.button_links[i][1] if len(self.tag.button_links) > i else None
)
)
async def on_submit(self, interaction: discord.Interaction):
if interaction.user != self.author:
return
button_names = [child.value.strip() for child in self.children[1::2] if child.value is not None and len(child.value.strip()) > 0]
links = [child.value.strip() for child in self.children[2::2] if child.value is not None and len(child.value.strip()) > 0]
# make sure all links are valid URLs with regex
if not all(re.match(r'^(https|http)://.*', link) for link in links):
await self.send_error(interaction, "The links must be valid URLs!")
return
if len(button_names) != len(links):
await self.send_error(interaction, "All buttons must have labels and links!")
return
buttons = list(zip(button_names, links))
description = self.children[0].value
if not description:
await self.send_error(interaction, "Description is missing!")
return
for label in button_names:
custom_emojis = re.search(r'<:\d+>|<:.+?:\d+>|<a:.+:\d+>|[\U00010000-\U0010ffff]', label)
if custom_emojis is not None:
emoji = custom_emojis.group(0).strip()
if not label.startswith(emoji):
await self.send_error(interaction, "Emojis must be at the start of labels!")
return
label = label.replace(emoji, '')
label = label.strip()
if not label:
await self.send_error(interaction, "A button cannot just be an emoji!")
return
# prepare tag data for database
self.tag.content = description
self.tag.button_links = buttons
self.edited = True
self.stop()
try:
await interaction.response.send_message()
except:
pass
async def send_error(self, interaction: discord.Interaction, error: str):
embed = discord.Embed(title=":(\nYour command ran into a problem", description=error, color=discord.Color.red())
await interaction.response.send_message(embed=embed, ephemeral=True)
| 39.092391 | 137 | 0.553177 | 7,135 | 0.991937 | 0 | 0 | 0 | 0 | 4,488 | 0.62394 | 1,165 | 0.161963 |
7614576866838e113eac6ed1704be36e192c3e82 | 3,096 | py | Python | meterological_data.py | ask-santosh/Weather-Data-Analysis | c9b4dcfe0ce729554eb8fdff6cabc0e1e824ab8f | [
"Apache-2.0"
] | null | null | null | meterological_data.py | ask-santosh/Weather-Data-Analysis | c9b4dcfe0ce729554eb8fdff6cabc0e1e824ab8f | [
"Apache-2.0"
] | null | null | null | meterological_data.py | ask-santosh/Weather-Data-Analysis | c9b4dcfe0ce729554eb8fdff6cabc0e1e824ab8f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import pandas as pd
# In[4]:
weather_data = pd.read_csv("weatherHistory.csv")
# weather_data.columns = weather_data.iloc[0]
# weather_data.columns
# weather_data
# weather_data.head()
weather_data.columns.values
# In[5]:
weather_data = weather_data.iloc[1:]
# In[6]:
weather_data.head()
# In[7]:
list(weather_data.columns.values)
# In[ ]:
# We first convert all the numeric data to from object data type to float/int data type.
# In[8]:
weather_data['Temperature (C)'] = weather_data['Temperature (C)'].astype('float')
weather_data['Apparent Temperature (C)'] = weather_data['Apparent Temperature (C)'].astype('float')
weather_data['Humidity'] = weather_data['Humidity'].astype('float')
weather_data['Wind Speed (km/h)'] = weather_data['Wind Speed (km/h)'].astype('float')
weather_data['Wind Bearing (degrees)'] = weather_data['Wind Bearing (degrees)'].astype('int')
weather_data['Visibility (km)'] = weather_data['Visibility (km)'].astype('float')
weather_data['Loud Cover'] = weather_data['Loud Cover'].astype('int')
weather_data['Pressure (millibars)'] = weather_data['Pressure (millibars)'].astype('float')
# In[9]:
weather_data['Precip Type'].fillna(weather_data['Precip Type'].value_counts().index[0], inplace=True)
# In[ ]:
#After removing all the null values.
# In[10]:
weather_data.isnull().sum()
# In[ ]:
# Heat map representation of above data
# In[11]:
import seaborn as sns
# In[12]:
sns.heatmap(weather_data.isnull(), yticklabels=False, cbar=True)
# In[27]:
from datetime import timedelta
import datetime as dt
weather_data
# In[28]:
#Most frequent weather from the Summary column
# weather_data['Formatted Date'] = pd.to_datetime(weather_data['Formatted Date'])
weather = weather_data['Summary'].value_counts().reset_index()
weather.columns = ['Weather', 'Count']
print(weather)
# In[ ]:
#We can observe that the most common weather that prevails is Partly cloudy
# In[35]:
import matplotlib.pyplot as plt
sns.set(rc={'figure.figsize':(8,4)})
plt.xticks(rotation=90)
sns.lineplot(x=weather['Weather'], y=weather['Count'], data=weather)
plt.show()
# In[ ]:
#In the below graph we can observe that maximum temperature is when the weather is dry.
# In[36]:
plt.figure(figsize=(12,6))
plt.xticks(rotation=90)
plt.title('Weather')
sns.barplot(x=weather_data['Summary'], y=weather_data['Temperature (C)'])
# In[ ]:
# In[ ]:
# In[45]:
#In the below graph remarks that maximum humidity is for the weather types: Foggy, Breezy and Foggy
#and Rainy weather'''
# In[44]:
plt.figure(figsize=(12,6))
plt.xticks(rotation=90)
plt.title('Weather')
sns.barplot(x=weather_data['Summary'], y=weather_data['Humidity'])
# In[40]:
plt.figure(figsize=(8,4))
plt.title("Weather vs Pressue")
plt.xticks(rotation=90)
sns.lineplot(y=weather_data['Pressure (millibars)'],x=weather_data['Summary'])
# In[41]:
pip install pywedge
# In[43]:
import pywedge as pw
x = pw.Pywedge_Charts(weather_data, c=None, y='Humidity')
charts = x.make_charts()
# In[ ]:
| 15.557789 | 101 | 0.699935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,511 | 0.488049 |
7615add91618b64986aa97f7ee8e80579bbbb020 | 1,263 | py | Python | JPS_NLP/python/caresjpsnlq/NLQ_Chunker.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 21 | 2021-03-08T01:58:25.000Z | 2022-03-09T15:46:16.000Z | JPS_NLP/python/caresjpsnlq/NLQ_Chunker.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 63 | 2021-05-04T15:05:30.000Z | 2022-03-23T14:32:29.000Z | JPS_NLP/python/caresjpsnlq/NLQ_Chunker.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 15 | 2021-03-08T07:52:03.000Z | 2022-03-29T04:46:20.000Z | import NLQ_Preprocessor as preProcessor
import NLP_Engine as nlpEngine
import NLQ_Interpreter as interpreter
import nltk
import time
class NLQ_Chunker:
def __init__(self):
self.preprocessor = preProcessor.PreProcessor()
self.nlp_engine = nlpEngine.NLP_Engine()
self.interpreter = interpreter.Interpreter()
def chunk_a_sentence(self, sentence):
sentence = self.preprocessor.replace_special_words(sentence)['sentence']
# this method returns an object {'sentence': xxxx, 'origional_sentence': xxxx}
tokens = self.preprocessor.filter_tokens_result(nltk.word_tokenize(sentence))
tags = self.preprocessor.recify_tagging_result(nltk.pos_tag(tokens))
# get the bigram of the sentence, which tells subjects/objects from other elements
bigram = self.nlp_engine.bigram_chunk_sentence(tags)
final_gram = self.nlp_engine.top_pattern_recognizer(bigram) # the fully processed tree that contains all the info needed.
# final_gram.draw()
return self.interpreter.main_tree_navigator(final_gram)
#
#
#
#
#
# chunker = NLQ_Chunker()
# sentence = input('Ask: ')
# start = time.time()
# chunker.chunk_a_sentence(sentence)
# print('took ' , time.time() - start, 'seconds') | 31.575 | 129 | 0.726841 | 952 | 0.753761 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.326999 |
76165e8ec95b875db9cb06e3ee130e980b0505a2 | 2,722 | py | Python | build/lib.linux-armv7l-2.7/bibliopixel/drivers/LPD8806.py | sethshill/final | 0ab89f437076a81e5059aa0f6acd2d16c41fd766 | [
"MIT"
] | 6 | 2017-06-09T18:43:17.000Z | 2018-09-10T19:14:35.000Z | bibliopixel/drivers/LPD8806.py | ManiacalLabs/BiblioPixel2 | bc79ac786af9d98f7f562e6a5b712c4250832a38 | [
"MIT"
] | null | null | null | bibliopixel/drivers/LPD8806.py | ManiacalLabs/BiblioPixel2 | bc79ac786af9d98f7f562e6a5b712c4250832a38 | [
"MIT"
] | null | null | null | from spi_driver_base import DriverSPIBase, ChannelOrder
class DriverLPD8806(DriverSPIBase):
"""Main driver for LPD8806 based LED strips on devices like the Raspberry Pi and BeagleBone"""
def __init__(self, num, c_order=ChannelOrder.RGB, use_py_spi=True, dev="/dev/spidev0.0", SPISpeed=2):
super(DriverLPD8806, self).__init__(num, c_order=c_order,
use_py_spi=use_py_spi, dev=dev, SPISpeed=SPISpeed)
# Color calculations from
# http://learn.adafruit.com/light-painting-with-raspberry-pi
self.gamma = [0x80 | int(
pow(float(i) / 255.0, 2.5) * 127.0 + 0.5) for i in range(256)]
# LPD8806 requires latch bytes at the end
self._latchBytes = (self.numLEDs + 31) / 32
for i in range(0, self._latchBytes):
self._buf.append(0)
# LPD8806 requires gamma correction and only supports 7-bits per channel
# running each value through gamma will fix all of this.
# def _fixData(self, data):
# for a, b in enumerate(self.c_order):
# self._buf[a:self.numLEDs*3:3] = [self.gamma[v] for v in data[b::3]]
MANIFEST = [
{
"id": "LPD8806",
"class": DriverLPD8806,
"type": "driver",
"display": "LPD8806 (SPI Native)",
"desc": "Interface with LPD8806 strips over a native SPI port (Pi, BeagleBone, etc.)",
"params": [{
"id": "num",
"label": "# Pixels",
"type": "int",
"default": 1,
"min": 1,
"help": "Total pixels in display."
}, {
"id": "c_order",
"label": "Channel Order",
"type": "combo",
"options": {
0: "RGB",
1: "RBG",
2: "GRB",
3: "GBR",
4: "BRG",
5: "BGR"
},
"options_map": [
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]
],
"default": 0
}, {
"id": "dev",
"label": "SPI Device Path",
"type": "str",
"default": "/dev/spidev0.0",
}, {
"id": "SPISpeed",
"label": "SPI Speed (MHz)",
"type": "int",
"default": 2,
"min": 1,
"max": 24,
"group": "Advanced"
}, {
"id": "use_py_spi",
"label": "Use PySPI",
"type": "bool",
"default": True,
"group": "Advanced"
}]
}
]
| 32.404762 | 105 | 0.443424 | 1,093 | 0.401543 | 0 | 0 | 0 | 0 | 0 | 0 | 1,082 | 0.397502 |
76166cfac04a442fdc8322c4a6b91bc6654b2abd | 3,476 | py | Python | tests/test_tokenizers.py | GateNLP/python-gatenlp | a27f1152064e2e251dc8a477aa425ebe904542ad | [
"Apache-2.0"
] | 30 | 2020-04-18T12:28:15.000Z | 2022-02-18T21:31:18.000Z | tests/test_tokenizers.py | GateNLP/python-gatenlp | a27f1152064e2e251dc8a477aa425ebe904542ad | [
"Apache-2.0"
] | 133 | 2019-10-16T07:41:59.000Z | 2022-03-31T07:27:07.000Z | tests/test_tokenizers.py | GateNLP/python-gatenlp | a27f1152064e2e251dc8a477aa425ebe904542ad | [
"Apache-2.0"
] | 4 | 2021-01-20T08:12:19.000Z | 2021-10-21T13:29:44.000Z | import os
import time
from gatenlp import logger, Document
class TestTokenizers01:
def makedoc(self):
return Document(" This is a 💩 document. It has two sentences and 14 tokens. ")
def test_nltk_tokenizers01(self):
"""
Unit test method (make linter happy)
"""
try:
import nltk
except ImportError:
logger.warning("Module nltk not installed, skipping nltk tokenizer test")
return
from gatenlp.processing.tokenizer import NLTKTokenizer
from nltk.tokenize.casual import TweetTokenizer, casual_tokenize
# Use class
ntok = NLTKTokenizer(nltk_tokenizer=TweetTokenizer)
doc = ntok(self.makedoc())
assert doc.annset().with_type("Token").size == 14
# same, but also create SpaceToken annotations
ntok = NLTKTokenizer(nltk_tokenizer=TweetTokenizer, space_token_type="SpaceToken")
doc = ntok(self.makedoc())
assert doc.annset().with_type("Token").size == 14
assert doc.annset().with_type("SpaceToken").size == 13
# same but specify outset name
ntok = NLTKTokenizer(nltk_tokenizer=TweetTokenizer, space_token_type="SpaceToken", out_set="OUT")
doc = ntok(self.makedoc())
assert doc.annset("OUT").with_type("Token").size == 14
assert doc.annset("OUT").with_type("SpaceToken").size == 13
# same but use NLTK tokenizer instance
ntok = NLTKTokenizer(nltk_tokenizer=TweetTokenizer(), space_token_type="SpaceToken", out_set="OUT")
doc = ntok(self.makedoc())
assert doc.annset("OUT").with_type("Token").size == 14
assert doc.annset("OUT").with_type("SpaceToken").size == 13
# same but specify convenience function
ntok = NLTKTokenizer(nltk_tokenizer=casual_tokenize, space_token_type="SpaceToken", out_set="OUT")
doc = ntok(self.makedoc())
assert doc.annset("OUT").with_type("Token").size == 14
assert doc.annset("OUT").with_type("SpaceToken").size == 13
# regexp
from nltk.tokenize import RegexpTokenizer
ntok = NLTKTokenizer(nltk_tokenizer=RegexpTokenizer(r'\w+|\$[\d\.]+|\S+'), space_token_type="SpaceToken")
doc = ntok(self.makedoc())
assert doc.annset().with_type("Token").size == 14
assert doc.annset().with_type("SpaceToken").size == 13
def test_own_tokenizers(self):
from gatenlp.processing.tokenizer import SplitPatternTokenizer
import re
tok = SplitPatternTokenizer(split_pattern=re.compile(r'\s+'), space_token_type="SpaceToken")
doc = tok(self.makedoc())
assert doc.annset().with_type("Token").size == 12 # the dots are not separated from the words
assert doc.annset().with_type("SpaceToken").size == 13
tok = SplitPatternTokenizer(
split_pattern=re.compile(r'\s+'),
token_pattern=re.compile(r'[a-zA-Z]'),
space_token_type="SpaceToken")
doc = tok(self.makedoc())
assert doc.annset().with_type("Token").size == 10 # also drop the emoji and the number
assert doc.annset().with_type("SpaceToken").size == 13
tok = SplitPatternTokenizer(
split_pattern=" ",
token_pattern="o",
space_token_type="SpaceToken")
doc = tok(self.makedoc())
assert doc.annset().with_type("Token").size == 3
assert doc.annset().with_type("SpaceToken").size == 16
| 44.564103 | 113 | 0.641542 | 3,417 | 0.982179 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.225352 |
76179c4c064e78d6f85438575931fbff3c4f84e8 | 282 | py | Python | tests/neural_networks/art_fuzzy/test_class.py | DavidVinicius/artmap-fuzzy-tcc | 1ff039ca2ade7a2a96137c75feaa9509e401e387 | [
"MIT"
] | null | null | null | tests/neural_networks/art_fuzzy/test_class.py | DavidVinicius/artmap-fuzzy-tcc | 1ff039ca2ade7a2a96137c75feaa9509e401e387 | [
"MIT"
] | null | null | null | tests/neural_networks/art_fuzzy/test_class.py | DavidVinicius/artmap-fuzzy-tcc | 1ff039ca2ade7a2a96137c75feaa9509e401e387 | [
"MIT"
] | null | null | null | from src.neural_networks.art_fuzzy import ARTFUZZY
import numpy as np
def test_If_I_isintance_numpy():
A = ARTFUZZY([1.0, 2.0])
assert isinstance(A.I, np.ndarray)
def test_If_W_isintance_numpy():
A = ARTFUZZY([1.0, 2.0])
assert isinstance(A.I, np.ndarray) | 25.636364 | 50 | 0.695035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7617fbe8c353222fa1397cf03f0913d339c82e46 | 2,801 | py | Python | tests/unit/test_para_helper_utilities.py | lizschley/number_six | a427202397822fca1f49d43d138c24fffdbe95da | [
"MIT"
] | 1 | 2020-07-14T20:13:05.000Z | 2020-07-14T20:13:05.000Z | tests/unit/test_para_helper_utilities.py | lizschley/number_six | a427202397822fca1f49d43d138c24fffdbe95da | [
"MIT"
] | 3 | 2021-04-06T20:40:08.000Z | 2021-06-03T21:54:21.000Z | tests/unit/test_para_helper_utilities.py | lizschley/number_six | a427202397822fca1f49d43d138c24fffdbe95da | [
"MIT"
] | null | null | null | '''Tests for methods in helpers/no_import_common_class/utilities.py'''
# pylint: disable=missing-function-docstring
# pylint: disable=redefined-outer-name
import pytest
import helpers.no_import_common_class.paragraph_helpers as helpers
import utilities.random_methods as utils
import testing.data.dict_constants as constants
def test_find_dictionary_from_list_by_key_and_value():
cats = constants.LIST_OF_SIMILAR_DICTIONARIES
black_cats = utils.find_dictionary_from_list_by_key_and_value(cats, 'color', 'black')
assert len(black_cats) == 2
assert black_cats[0]['color'] == 'black'
assert black_cats[1]['color'] == 'black'
def test_find_value_from_dictionary_list():
cats = constants.LIST_OF_DIFFERENT_DICTIONARIES
current_cats = utils.find_value_from_dictionary_list(cats, 'alive')
assert len(current_cats) == 2
assert isinstance(current_cats[0], bool)
assert current_cats[0]
assert current_cats[1]
@pytest.mark.parametrize('key, expected', [('alive', True), ('name', False)])
def test_key_not_in_dictionary(key, expected):
result = utils.key_not_in_dictionary({'name': 'Nemo'}, key)
assert result == expected
@pytest.mark.parametrize('key, expected', [('alive', False), ('name', True)])
def test_key_in_dictionary(key, expected):
result = utils.key_in_dictionary({'name': 'Nemo'}, key)
assert result == expected
@pytest.mark.parametrize('key, expected', [('in_', True), ('in', True), ('file', False)])
def test_dictionary_key_begins_with_substring(key, expected):
result = utils.dictionary_key_begins_with_substring({'in_file': 'data/input_file.json'}, key)
assert result == expected
@pytest.mark.parametrize('key, value', [('name', 'Nemo'), ('color', 'black'), ('year', '1994')])
def test_dict_from_split_string(key, value):
result = utils.dict_from_split_string('Nemo~black~1994', '~', ('name', 'color', 'year'))
assert result[key] == value
@pytest.mark.parametrize('key_1, val_1, key_2, val_2_list, association_list', [
('dog_id', 'Inky', 'cat_id', ['Nemo', 'Grayface', 'PD'], None),
('dog_id', 'Camden', 'cat_id', ['Sammy', 'Mac'], [{'dog_id': 'Wrigley', 'cat_id': 'Sammy'},
{'dog_id': 'Wrigley', 'cat_id': 'Mac'}]),
('dog_id', 'Pluto', 'cat_id', ['Ninja', 'Ronin'], None)
])
def test_add_to_associations(key_1, val_1, key_2, val_2_list, association_list):
size = 0 if association_list is None else len(association_list)
resulting_list = helpers.add_to_associations(key_1, val_1, key_2, val_2_list, association_list)
val_2 = val_2_list[-1]
last_association = resulting_list[-1]
assert len(resulting_list) == size + len(val_2_list)
assert last_association[key_1] == val_1
assert last_association[key_2] == val_2
| 42.439394 | 99 | 0.702963 | 0 | 0 | 0 | 0 | 1,838 | 0.656194 | 0 | 0 | 659 | 0.235273 |
76180c0856e5a2882537ef401da7a0993a51ee7b | 17,823 | py | Python | scripts/train.py | dowalder/gym-duckietown | 6a2ea8adfe0d25e2a88deb8cfb31c71fc332ce0e | [
"MIT"
] | null | null | null | scripts/train.py | dowalder/gym-duckietown | 6a2ea8adfe0d25e2a88deb8cfb31c71fc332ce0e | [
"MIT"
] | null | null | null | scripts/train.py | dowalder/gym-duckietown | 6a2ea8adfe0d25e2a88deb8cfb31c71fc332ce0e | [
"MIT"
] | 2 | 2018-08-28T07:40:14.000Z | 2018-08-28T07:49:10.000Z | #!/usr/bin/env python3
"""
DEPRECATED: better use trainv2.py together with conf files. This file persists as not everything in it has been moved to
trainv2.py, but in most cases the other one should be used.
"""
import argparse
import os
import pathlib
import yaml
from typing import Dict
import collections
import torch
import torch.nn
import torch.utils.data
import src.networks
import src.dataset
class Statistics:
def __init__(self, path: pathlib.Path):
self.train = collections.OrderedDict()
self.test = collections.OrderedDict()
self.path = path
def add_train(self, value: float, iteration: int):
self.train[iteration] = value
def add_test(self, value: float, iteration: int):
self.test[iteration] = value
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.path.write_text(yaml.dump({"test": self.test, "train": self.train}))
class Params:
def __init__(self, args):
conf = yaml.load(pathlib.Path(args.conf).read_text())
check_conf(conf)
self.train_path = pathlib.Path(conf["train_path"])
self.test_path = pathlib.Path(conf["test_path"])
self.model_path = pathlib.Path(conf["model_path"])
self.pretrained_path = pathlib.Path(conf["pretrained_path"])
self.device = torch.device(conf["device"])
self.network = conf["network"]
self.num_epochs = conf["num_epochs"]
self.pretrained = conf["pretrained"]
self.data_in_memory = conf["data_in_memory"]
self.test_interval = conf["intervals"]["test"]
self.display_interval = conf["intervals"]["display"]
self.save_interval = conf["intervals"]["save"]
def check_conf(conf: Dict):
required_fields = {
"train_path": str,
"test_path": str,
"model_path": str,
"device": str,
"intervals": dict,
"network": str,
"num_epochs": int,
"pretrained": bool,
"pretrained_path": str,
"data_in_memory": bool,
}
for key, val in required_fields.items():
assert key in conf, "Missing key: {}".format(key)
assert isinstance(conf[key], val), "Expected {} to be {}, but got {}".format(key, val, conf[key].__class__)
def net_factory(net: str, params) -> torch.nn.Module:
if net == "conv_rnn":
return src.networks.BasicConvRNN(device=params.device)
elif net == "resnet_rnn":
return src.networks.ResnetRNN(pretrained=params.pretrained, device=params.device)
elif net == "resnet_rnn_small":
return src.networks.ResnetRNNsmall()
elif net == "shared_weights":
return src.networks.WeightsSharingRNN(cnn_weights_path=params.pretrained_path, cnn_no_grad=True, num_lstms=1)
else:
raise RuntimeError("Unkown network: {}".format(net))
def validation(net, test_loader, criterion, device="cpu"):
"""
Perform a validation step.
:param net: torch.nn.Module -> the neural network
:param test_loader: torch.utils.data.DataLoader -> the validation data
:param criterion:
:param device:
"""
avg_mse = 0
for data in test_loader:
labels, images = data
labels = labels.to(device)
images = images.to(device)
outputs = net(images)
loss = criterion(outputs, labels)
avg_mse += loss.item()
avg_mse /= len(test_loader)
print("\ttest loss: %f" % avg_mse)
def train_cnn(net,
train_loader,
test_loader,
criterion,
optimizer,
save_dir,
device="cpu",
num_epoch=150,
disp_interval=10,
val_interval=50,
save_interval=20):
"""
Training a network.
:param net: The pytorch network. It should be initialized (as not initialization is performed here).
:param train_loader: torch.data.utils.DataLoader -> to train the classifier
:param test_loader: torch.data.utils.DataLoader -> to test the classifier
:param criterion: see pytorch tutorials for further information
:param optimizer: see pytorch tutorials for further information
:param save_dir: str -> where the snapshots should be stored
:param device: str -> "cpu" for computation on CPU or "cuda:n",
where n stands for the number of the graphics card that should be used.
:param num_epoch: int -> number of epochs to train
:param disp_interval: int -> interval between displaying training loss
:param val_interval: int -> interval between performing validation
:param save_interval: int -> interval between saving snapshots
"""
save_dir = os.path.expanduser(save_dir)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
print("Moving the network to the device {}...".format(device))
net.to(device)
step = 0
running_loss = 0
print("Starting training")
for epoch in range(num_epoch):
for lbls, imgs in train_loader:
optimizer.zero_grad()
lbls = lbls.to(device)
imgs = imgs.to(device)
outputs = net(imgs)
loss = criterion(outputs, lbls)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % disp_interval == 0 and step != 0:
print("[%d][%d] training loss: %f" % (epoch, step, running_loss / disp_interval))
running_loss = 0
if step % val_interval == 0 and step != 0:
print("[%d][%d] Performing validation..." % (epoch, step))
validation(net, test_loader, criterion=criterion, device=device)
if step % save_interval == 0 and epoch != 0:
path = os.path.join(save_dir, "checkpoint_{}.pth".format(step))
print("[%d][%d] Saving a snapshot to %s" % (epoch, step, path))
torch.save(net.state_dict(), path)
step += 1
def exact_caffe_copy_factory(train_path, test_path):
"""
Prepare the training in such a way that the caffe net proposed in
https://github.com/syangav/duckietown_imitation_learning is copied in pytorch.
:param train_path: str -> path to training data
:param test_path: str -> path to testing data
:return:
"""
train_set = src.dataset.DataSet(train_path)
test_set = src.dataset.DataSet(test_path)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=200, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=100, shuffle=False)
net = src.networks.InitialNet()
net.apply(src.networks.weights_init)
criterion = torch.nn.MSELoss()
# optimizer = torch.optim.SGD(net.parameters(), lr=0.00001, momentum=0.85, weight_decay=0.0005)
optimizer = torch.optim.Adam(net.parameters())
return net, train_loader, test_loader, criterion, optimizer
def train_rnn(params: Params):
if params.network == "resnet_rnn":
img_size = (224, 224)
grayscale = False
elif params.network == "shared_weights":
img_size = (80, 160)
grayscale = True
else:
img_size = (120, 160)
grayscale = False
train_set = src.dataset.RNNDataSet(params.train_path, 10, device=params.device, img_size=img_size,
grayscale=grayscale, in_memory=params.data_in_memory)
test_set = src.dataset.RNNDataSet(params.test_path, 10, device=params.device, img_size=img_size,
grayscale=grayscale, in_memory=params.data_in_memory)
net = net_factory(params.network, params)
net.to(params.device)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters())
step = 0
running_loss = []
with Statistics(params.model_path / "stat.yaml") as statistics:
for epoch in range(params.num_epochs):
for idx in range(len(train_set)):
optimizer.zero_grad()
net.zero_grad()
net.init_hidden()
imgs, actions, lbls = train_set[idx]
out = net(imgs, actions)
out = out.squeeze()
loss = criterion(out, lbls)
loss.backward()
optimizer.step()
running_loss.append(loss.item())
step += 1
if step % params.display_interval == 0:
err = sum(running_loss) / len(running_loss)
print("[{}][{}]: {}".format(epoch, step, err))
statistics.add_train(err, step)
running_loss = []
if step % params.test_interval == 0:
with torch.no_grad():
test_loss = []
for imgs, actions, lbls in test_set:
net.init_hidden()
out = net(imgs, actions)
out = out.squeeze()
loss = criterion(out, lbls)
test_loss.append(loss.item())
err = sum(test_loss) / len(test_loss)
print("test: {}".format(err))
statistics.add_test(err, step)
if step % params.save_interval == 0:
model_path = params.model_path / "step_{}.pth".format(step)
print("Saving model to {}".format(model_path))
torch.save(net.state_dict(), model_path.as_posix())
def train_seq_cnn(params: Params):
img_size = (120, 160)
num_imgs = 5
print("Loading datasets...")
print("\ttraining: {}".format(params.train_path))
train_set = src.dataset.RNNDataSet(params.train_path, num_imgs, device=params.device, img_size=img_size)
print("\ttesting: {}".format(params.test_path))
test_set = src.dataset.RNNDataSet(params.test_path, num_imgs, device=params.device, img_size=img_size)
print("Loading net and moving it to {}...".format(params.device))
net = src.networks.SequenceCnn(device=params.device, num_imgs=num_imgs)
net.apply(src.networks.weights_init)
net.to(params.device)
optimizer = torch.optim.Adam(net.parameters())
criterion = torch.nn.MSELoss()
print("Starting training")
running_loss = []
step = 0
with Statistics(params.model_path / "stat.yaml") as statistics:
for epoch in range(params.num_epochs):
for imgs, actions, lbls in train_set:
optimizer.zero_grad()
out = net(imgs, actions)
loss = criterion(out.squeeze(), lbls[0, :])
running_loss.append(loss.item())
loss.backward()
optimizer.step()
step += 1
if step % params.display_interval == 0:
err = sum(running_loss) / len(running_loss)
print("[{}][{}] train: {}".format(epoch, step, err))
statistics.add_train(err, step)
running_loss = []
if step % params.test_interval == 0:
with torch.no_grad():
test_err = []
for imgs_test, actions_test, lbls_test in test_set:
out = net(imgs_test, actions_test)
loss = criterion(out.squeeze(), lbls_test[0, :])
test_err.append(loss.item())
err = sum(test_err) / len(test_err)
print("[{}][{}] test: {}".format(epoch, step, err))
statistics.add_test(err, step)
if step % params.save_interval == 0:
model_path = params.model_path / "step_{}.pth".format(step)
print("Saving model to {}".format(model_path))
torch.save(net.state_dict(), model_path.as_posix())
print("Done.")
def train_action_estimator(params: Params):
img_size = (120, 160)
print("Loading datasets...")
print("\ttraining: {}".format(params.train_path))
train_set = src.dataset.RNNDataSet(params.train_path, 2, device=params.device, img_size=img_size)
print("\ttesting: {}".format(params.test_path))
test_set = src.dataset.RNNDataSet(params.test_path, 2, device=params.device, img_size=img_size)
print("Loading net and moving it to {}...".format(params.device))
net = src.networks.ActionEstimator()
net.apply(src.networks.weights_init)
net.to(params.device)
optimizer = torch.optim.Adam(net.parameters())
criterion = torch.nn.MSELoss()
print("Starting training")
running_loss = []
step = 0
with Statistics(params.model_path / "stat.yaml") as statistics:
for epoch in range(params.num_epochs):
for imgs, actions, lbls in train_set:
optimizer.zero_grad()
out = net(imgs)
loss = criterion(out.squeeze(), lbls[0, :] * actions[0, :])
running_loss.append(loss.item())
loss.backward()
optimizer.step()
step += 1
if step % params.display_interval == 0:
err = sum(running_loss) / len(running_loss)
print("[{}][{}] train: {}".format(epoch, step, err))
statistics.add_train(err, step)
running_loss = []
if step % params.test_interval == 0:
with torch.no_grad():
test_err = []
for imgs_test, actions_test, lbls_test in test_set:
out = net(imgs_test)
loss = criterion(out.squeeze(), lbls_test[0, :] * actions_test[0, :])
test_err.append(loss.item())
err = sum(test_err) / len(test_err)
print("[{}][{}] test: {}".format(epoch, step, err))
statistics.add_test(err, step)
if step % params.save_interval == 0:
model_path = params.model_path / "step_{}.pth".format(step)
print("Saving model to {}".format(model_path))
torch.save(net.state_dict(), model_path.as_posix())
print("Done.")
class LabelsToCrossEntropy:
def __init__(self):
self.loss = torch.nn.CrossEntropyLoss()
def __call__(self, result, label):
return self.loss(result, label.long().squeeze())
class MySpecialMSELoss:
def __init__(self):
self.loss = torch.nn.MSELoss()
self.softmax = torch.nn.Softmax()
def __call__(self, result, label):
out = self.softmax(result)
dist = torch.zeros(out.shape, device=out.device)
for idx, lbl in enumerate(label):
lbl = int(lbl.item())
if lbl == 0:
dist[idx, 0] = 0.6
dist[idx, 1] = 0.3
dist[idx, 2] = 0.1
elif lbl == out.shape[1] - 1:
dist[idx, lbl - 1] = 0.1
dist[idx, lbl - 1] = 0.3
dist[idx, lbl] = 0.6
else:
dist[idx, lbl - 1] = 0.2
dist[idx, lbl] = 0.6
dist[idx, lbl + 1] = 0.2
return self.loss(result, dist)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--conf", "-c", help="configuration file (.yaml)", required=True)
parser.add_argument("--net", default="rnn")
args = parser.parse_args()
params = Params(args)
if args.net == "rnn":
train_rnn(params)
elif args.net == "cnn":
net, train_loader, test_loader, criterion, optimizer = exact_caffe_copy_factory(params.train_path.as_posix(),
params.test_path.as_posix())
train_cnn(net, train_loader, test_loader, criterion, optimizer, params.model_path.as_posix(),
device=params.device, save_interval=1000)
elif args.net == "resnet":
train_set = src.dataset.ColorDataSet(params.train_path.as_posix())
test_set = src.dataset.ColorDataSet(params.test_path.as_posix())
train_loader = torch.utils.data.DataLoader(train_set, batch_size=200, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=100, shuffle=False)
net = src.networks.ResnetController()
src.networks.weights_init(net)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters())
train_cnn(net, train_loader, test_loader, criterion, optimizer, params.model_path.as_posix(),
device=params.device, save_interval=1000)
elif args.net == "seq_cnn":
train_seq_cnn(params)
elif args.net == "action_est":
train_action_estimator(params)
elif args.net == "discrete_action":
train_set = src.dataset.ColorDataSet(params.train_path.as_posix())
test_set = src.dataset.ColorDataSet(params.test_path.as_posix())
train_loader = torch.utils.data.DataLoader(train_set, batch_size=200, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=100, shuffle=True)
net = src.networks.DiscreteActionNet(11)
src.networks.weights_init(net)
criterion = MySpecialMSELoss()
optimizer = torch.optim.Adam(net.parameters())
train_cnn(net, train_loader, test_loader, criterion, optimizer, params.model_path.as_posix(),
device=params.device, save_interval=1000)
else:
raise RuntimeError("Unknown option for --net: {}".format(args.net))
if __name__ == "__main__":
main()
| 36.373469 | 120 | 0.586489 | 2,328 | 0.130618 | 0 | 0 | 0 | 0 | 0 | 0 | 2,997 | 0.168154 |
76189c1c0f0a1c092eecb8d9cdb55104a570a87d | 9,020 | py | Python | oskb/ui_editkey.py | rushic24/oskb | d453a707d2a1d78d859d5e1648fe3804e40b4148 | [
"MIT"
] | 6 | 2020-05-06T16:59:48.000Z | 2021-09-18T12:48:21.000Z | oskb/ui_editkey.py | rushic24/oskb | d453a707d2a1d78d859d5e1648fe3804e40b4148 | [
"MIT"
] | 1 | 2022-03-24T19:19:11.000Z | 2022-03-24T19:19:11.000Z | oskb/ui_editkey.py | rushic24/oskb | d453a707d2a1d78d859d5e1648fe3804e40b4148 | [
"MIT"
] | 3 | 2020-05-06T16:59:52.000Z | 2021-09-18T12:48:54.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'EditKey.ui'
#
# Created by: PyQt5 UI code generator 5.14.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_EditKey(object):
def setupUi(self, EditKey):
EditKey.setObjectName("EditKey")
EditKey.resize(632, 444)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(EditKey.sizePolicy().hasHeightForWidth())
EditKey.setSizePolicy(sizePolicy)
EditKey.setMinimumSize(QtCore.QSize(632, 444))
EditKey.setMaximumSize(QtCore.QSize(632, 444))
EditKey.setModal(True)
self.cancelsavebuttons = QtWidgets.QDialogButtonBox(EditKey)
self.cancelsavebuttons.setGeometry(QtCore.QRect(330, 400, 291, 41))
self.cancelsavebuttons.setOrientation(QtCore.Qt.Horizontal)
self.cancelsavebuttons.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.cancelsavebuttons.setObjectName("cancelsavebuttons")
self.maintabs = QtWidgets.QTabWidget(EditKey)
self.maintabs.setGeometry(QtCore.QRect(10, 10, 611, 381))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.maintabs.sizePolicy().hasHeightForWidth())
self.maintabs.setSizePolicy(sizePolicy)
self.maintabs.setMinimumSize(QtCore.QSize(611, 381))
self.maintabs.setMaximumSize(QtCore.QSize(611, 381))
self.maintabs.setObjectName("maintabs")
self.appearance = QtWidgets.QWidget()
self.appearance.setObjectName("appearance")
self.lbl_4 = QtWidgets.QLabel(self.appearance)
self.lbl_4.setGeometry(QtCore.QRect(300, 10, 151, 16))
self.lbl_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lbl_4.setObjectName("lbl_4")
self.caption = QtWidgets.QLineEdit(self.appearance)
self.caption.setGeometry(QtCore.QRect(110, 20, 101, 21))
self.caption.setObjectName("caption")
self.cssclass = QtWidgets.QLineEdit(self.appearance)
self.cssclass.setGeometry(QtCore.QRect(40, 130, 221, 19))
self.cssclass.setObjectName("cssclass")
self.extracaptions = QtWidgets.QTableWidget(self.appearance)
self.extracaptions.setGeometry(QtCore.QRect(300, 30, 261, 121))
font = QtGui.QFont()
font.setPointSize(11)
self.extracaptions.setFont(font)
self.extracaptions.setShowGrid(True)
self.extracaptions.setRowCount(0)
self.extracaptions.setColumnCount(2)
self.extracaptions.setObjectName("extracaptions")
item = QtWidgets.QTableWidgetItem()
self.extracaptions.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.extracaptions.setHorizontalHeaderItem(1, item)
self.extracaptions.horizontalHeader().setCascadingSectionResizes(False)
self.extracaptions.horizontalHeader().setStretchLastSection(True)
self.extracaptions.verticalHeader().setVisible(False)
self.extracaptions.verticalHeader().setStretchLastSection(False)
self.lbl_3 = QtWidgets.QLabel(self.appearance)
self.lbl_3.setGeometry(QtCore.QRect(40, 90, 191, 41))
self.lbl_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lbl_3.setWordWrap(True)
self.lbl_3.setObjectName("lbl_3")
self.lbl_1 = QtWidgets.QLabel(self.appearance)
self.lbl_1.setGeometry(QtCore.QRect(20, 20, 81, 16))
self.lbl_1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_1.setObjectName("lbl_1")
self.style = QtWidgets.QPlainTextEdit(self.appearance)
self.style.setGeometry(QtCore.QRect(20, 190, 541, 131))
self.style.setObjectName("style")
self.lbl_5 = QtWidgets.QLabel(self.appearance)
self.lbl_5.setGeometry(QtCore.QRect(40, 170, 291, 16))
self.lbl_5.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lbl_5.setObjectName("lbl_5")
self.lbl_6 = QtWidgets.QLabel(self.appearance)
self.lbl_6.setGeometry(QtCore.QRect(20, 320, 541, 16))
self.lbl_6.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_6.setObjectName("lbl_6")
self.lbl_2 = QtWidgets.QLabel(self.appearance)
self.lbl_2.setGeometry(QtCore.QRect(20, 50, 81, 16))
self.lbl_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_2.setObjectName("lbl_2")
self.width = QtWidgets.QDoubleSpinBox(self.appearance)
self.width.setGeometry(QtCore.QRect(110, 50, 81, 24))
self.width.setDecimals(1)
self.width.setMinimum(0.1)
self.width.setSingleStep(0.1)
self.width.setProperty("value", 1.0)
self.width.setObjectName("width")
self.deletecaption = QtWidgets.QPushButton(self.appearance)
self.deletecaption.setGeometry(QtCore.QRect(530, 150, 31, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.deletecaption.setFont(font)
self.deletecaption.setDefault(False)
self.deletecaption.setFlat(False)
self.deletecaption.setObjectName("deletecaption")
self.addcaption = QtWidgets.QPushButton(self.appearance)
self.addcaption.setGeometry(QtCore.QRect(500, 150, 31, 21))
self.addcaption.setDefault(False)
self.addcaption.setFlat(False)
self.addcaption.setObjectName("addcaption")
self.maintabs.addTab(self.appearance, "")
self.action = QtWidgets.QWidget()
self.action.setObjectName("action")
self.actiontabs = QtWidgets.QTabWidget(self.action)
self.actiontabs.setGeometry(QtCore.QRect(20, 20, 571, 321))
self.actiontabs.setObjectName("actiontabs")
self.single = QtWidgets.QWidget()
self.single.setObjectName("single")
self.actiontabs.addTab(self.single, "")
self.double = QtWidgets.QWidget()
self.double.setObjectName("double")
self.actiontabs.addTab(self.double, "")
self.long = QtWidgets.QWidget()
self.long.setObjectName("long")
self.actiontabs.addTab(self.long, "")
self.maintabs.addTab(self.action, "")
self.retranslateUi(EditKey)
self.maintabs.setCurrentIndex(0)
self.actiontabs.setCurrentIndex(0)
self.cancelsavebuttons.accepted.connect(EditKey.accept)
self.cancelsavebuttons.rejected.connect(EditKey.reject)
QtCore.QMetaObject.connectSlotsByName(EditKey)
EditKey.setTabOrder(self.maintabs, self.caption)
EditKey.setTabOrder(self.caption, self.width)
EditKey.setTabOrder(self.width, self.cssclass)
EditKey.setTabOrder(self.cssclass, self.extracaptions)
EditKey.setTabOrder(self.extracaptions, self.addcaption)
EditKey.setTabOrder(self.addcaption, self.deletecaption)
EditKey.setTabOrder(self.deletecaption, self.style)
EditKey.setTabOrder(self.style, self.actiontabs)
def retranslateUi(self, EditKey):
_translate = QtCore.QCoreApplication.translate
EditKey.setWindowTitle(_translate("EditKey", "Edit key properties"))
self.lbl_4.setText(_translate("EditKey", "Additional captions:"))
item = self.extracaptions.horizontalHeaderItem(0)
item.setText(_translate("EditKey", "CSS class"))
item = self.extracaptions.horizontalHeaderItem(1)
item.setText(_translate("EditKey", "Caption"))
self.lbl_3.setText(_translate("EditKey", "Additional CSS classes, separated by spaces:"))
self.lbl_1.setText(_translate("EditKey", "Caption:"))
self.lbl_5.setText(_translate("EditKey", "CSS StyleSheet specific to this key:"))
self.lbl_6.setText(_translate("EditKey", "(Better to add CSS class and put style info in the keyboard stylesheet)"))
self.lbl_2.setText(_translate("EditKey", "Key width:"))
self.deletecaption.setText(_translate("EditKey", "-"))
self.addcaption.setText(_translate("EditKey", "+"))
self.maintabs.setTabText(self.maintabs.indexOf(self.appearance), _translate("EditKey", "Appearance"))
self.actiontabs.setTabText(self.actiontabs.indexOf(self.single), _translate("EditKey", "Single Tap"))
self.actiontabs.setTabText(self.actiontabs.indexOf(self.double), _translate("EditKey", "Double Tap"))
self.actiontabs.setTabText(self.actiontabs.indexOf(self.long), _translate("EditKey", "Press and hold"))
self.maintabs.setTabText(self.maintabs.indexOf(self.action), _translate("EditKey", "Action"))
| 54.337349 | 124 | 0.704656 | 8,777 | 0.97306 | 0 | 0 | 0 | 0 | 0 | 0 | 866 | 0.096009 |
761906b76105e1a9bca359fb807ff73effa0fbb1 | 5,584 | py | Python | variaveis.py | OHolandes/Athena-Public | ed697b012b0507d31e906026607be69dcb3460ce | [
"BSD-3-Clause"
] | null | null | null | variaveis.py | OHolandes/Athena-Public | ed697b012b0507d31e906026607be69dcb3460ce | [
"BSD-3-Clause"
] | null | null | null | variaveis.py | OHolandes/Athena-Public | ed697b012b0507d31e906026607be69dcb3460ce | [
"BSD-3-Clause"
] | null | null | null | CANAIS_ADM = {
"diretoria": 441263190832185350,
"secretaria": 731689039853518848
}
SAUDACOES = ["Olá!", "Oi!", "Iai!"]
GUIA_ANONIMA_ID = 956319073568976967
msg_ajuda = "**::ola** | **::oi** | **::iai** | **::athena**: Mande um ola caloroso para mim, e responderei!\n" \
"**::cool** `texto`: Você pode me perguntar se algo é COOl (provavelmente sou eu).\n" \
"**::pitagoras** `expressão...`: Resolvo uma expressão matemática no estilo Pitágoras.\n" \
'**::rola** | **::dado** `NdN`: Consigo rolar uns dados para você se for conveniente.\n' \
"**::escolha** | **::prefere** `opções...`: Vou escolher a melhor opção entre algumas opções.\n" \
"**::stalk**: Envio algumas informações suas... Anda stalkeando você mesmo(a)!?.\n" \
"**::privilegios** `membro...`: Mostro suas permissões nesse canal ou de outra pessoa.\n" \
"**::convite**: Mando o convite do servidor.\n" \
"**::chegamais** `menções...`: Separo um canal para você e mais pessoas ficarem a vontade.\n" \
"**::ajuda** | **::comandos**: Esse já é um pouco autoexplicativo não?" \
"\n\n" \
"**Administração**:\n\n" \
'**::teste** `N vezes` `palavra`: Repito uma mensagem para saber se estou "di Boa"\n' \
'**::prompt**: Abro meu console para você interagir com meu código ||pervertido(a)!||.\n' \
"**::ping**: Mando a minha latência (morar nos E.U.A é para poucos).\n" \
"**::cep**: Mando o ID do canal atual.\n" \
"**::cpf**: Envio o ID de alguém.\n" \
"**::relatorio**: Faço um relatório geral do servidor." \
"(n de membros, n de boosts, nivel, n de canais, n de categorias, n de cargos...).\n" \
"**::faxina** `limite`: Dou uma limpeza das últimas (100 por padrão) mensagens no canal atual.\n" \
"\n" \
"**::log** `membro`: Faço um pequeno histórico escolar de um membro especifico. " \
"Ou o seu, caso não for especificado. Por padrão o limite é 15.\n" \
"\n" \
"**::basta**: Mando todas as pessoas **comuns** calarem a boca.\n" \
"**::liberado**: Descalo a boca de todos (talvez não seja uma boa ideia).\n" \
"**::aviso**: Muto alguém pelos seus crimes contra a nação.\n" \
"\n" \
"**::kick** `membro` `motivo`: Dou uma voadora em algum membro...\n" \
"Você pode **kickar** sem um motivo especificado, porém isso seria abuso de autoridade...\n" \
"**::ban** `membro` `motivo`: Excluo um membro da sociedade.\n" \
"Você pode **banir** sem um motivo especificado, porém isso seria abuso de autoridade..." \
"\n\n\n" \
"Você ainda pode pedir uma explicação de alto calão de certos comandos usando **::ajuda** `comando`." \
" Os que tenho alto conhecimento:" \
"`cool`; `soma`; `rola`; `escolha`; `chegamais`; `basta`; `log`; `ban`/`kick`; `aviso`." \
"\n" \
"Também se quiser saber mais sobre as permissões de `Administração`, mande um `::ajuda adms`."
msg_adms = """
Vou dizer resumidamente quem pode oquê aqui e as permissões minimas do cargo mais alto seu.
**Comando** | **Permissão**
`::teste` | Gerenciar canais
`::prompt` | Administrador
`::ping` | Gerenciar canais
`::cep` | Gerenciar canais
`::cpf` | Gerenciar canais
`::relatorio`| Administrador
`::faxina` | Gerenciar mensagens
`::log` | Gerenciar mensagens
`::basta` | Gerenciar mensagens
`::liberado` | Gerenciar mensagens
`::aviso` | Gerenciar mensagens
`::kick` | Expulsar membros
`::ban` | Banir membros
"""
alta_ajuda = {
"adms": msg_adms,
"cool": "Digo se algo é _cool_, como por exemplo: ::cool athena",
"pitagoras": "Calculo uma expressão matemática, como: `(23 + 2) * 9 - 2**3`.\nAinda pode usar exponenciação = `**`, e resto de divisão = `%`",
"rola": "Rolo um dado descompromissadamente: ::rola 1d20 = 1 dado de 20",
"escolha": "Use para eu escolher coisas aleatórias, manda as opções em sequência: ::escolha loritta athena disboard",
"chegamais": """Tenho um sistema de mensagens anônimas.
Entre em um desses canais para usufruir:
<#956301680679473253>
<#957638065596272680>
<#957638119560192090>
Use `::chegamais` `menções` (onde "menções" são as menções dos membros que queira convidar), o canal será fechado para todos com o cargo **everyone** com exceção de vocês (logicamente os outros como administradores e moderadores poderão ver as mensagens) e será aberto depois de _10 minutos_ de inatividade (fique tranquilo, antes disso eu vou apagar tudo).
Obs: Sendo que os de patente alta podem ver as mensagens, não passem os limites, olhem <#441263333807751178> para terem certeza.
""",
"basta": "Todos com somente o cargo **everyone** serão impedidos de falar no canal com o comando invocado.",
"log": "Envio as últimas mensagens de alguém.",
"aviso": "Dou o cargo @Avisado para um membro e ele não poderá mandar mensagens em qualquer canal, para descastiga-lo use o comando novamente.",
"kick": "Use para por alguém nas rédias, use-o no canal em que o membro tenha acesso (para deixar as coisas um pouco mais democráticas).",
"ban": "Use para por alguém nas rédias, use-o no canal em que o membro tenha acesso (para deixar as coisas um pouco mais democráticas)."
} | 62.044444 | 377 | 0.604047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,859 | 0.855608 |
7619f9011026e05240115c2ac1df6ac827fae015 | 187 | py | Python | neodroidagent/utilities/signal/experimental/__init__.py | gitter-badger/agent | 3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11 | [
"Apache-2.0"
] | 8 | 2017-09-13T08:28:44.000Z | 2022-01-21T15:59:19.000Z | neodroidagent/utilities/signal/experimental/__init__.py | gitter-badger/agent | 3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11 | [
"Apache-2.0"
] | 4 | 2019-03-22T13:49:16.000Z | 2019-03-25T13:49:39.000Z | neodroidagent/utilities/signal/experimental/__init__.py | gitter-badger/agent | 3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11 | [
"Apache-2.0"
] | 3 | 2017-09-13T08:31:38.000Z | 2021-11-09T11:22:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = ""
from .discounting import *
from .generalised_advantage import *
from .nstep import *
| 18.7 | 39 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.390374 |
761c0b280845fe16bd537d8ef77d6e2fcaf9c26d | 1,376 | py | Python | NSI/Chapitre 6/Mini projet 5 AB/morse.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | NSI/Chapitre 6/Mini projet 5 AB/morse.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | NSI/Chapitre 6/Mini projet 5 AB/morse.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | from arbre_binaire import AB
def parfait(ngd):
"""
Fonction qui renvoit un arbre binaire parfait à partir de
son parcourt en profondeur préfixe ngd de type str
"""
cle = ngd[0]
ab1 = AB(cle)
if len(ngd) > 1:
ngd_g = ngd[1 : len(ngd) // 2 + 1]
ngd_d = ngd[len(ngd) // 2 + 1 :]
ab1.set_ag(parfait(ngd_g))
ab1.set_ad(parfait(ngd_d))
return ab1
def dechiffrer(chaine_morse):
"""Fonction qui déchiffre une chaine de caractère morse à l'aide d'un arbre binaire"""
retour = ""
# arbre binaire pour les codes morse
ab = parfait(" EISHVUF ARL WPJTNDBXKCYMGZQO ")
# arbre binaire servant au parcours
ab1 = ab
for car in chaine_morse:
if car == ".": # .
ab1 = ab1.get_ag()
elif car == "-": # -
ab1 = ab1.get_ad()
elif car == " ": # espace entre les caractères
retour += ab1.get_val()
ab1 = ab # réinitialise le parcours
else: # espace entre les mots
retour += ab1.get_val()
retour += " "
ab1 = ab
retour += ab1.get_val()
return retour
assert dechiffrer("-... --- -. -. ./.--- --- ..- .-. -. . .") == "BONNE JOURNEE"
assert dechiffrer("-... --- -. .--- --- ..- .-./-- --- -. ... .. . ..- .-.") == "BONJOUR MONSIEUR"
print("Tous les tests sont satisfaits")
| 28.081633 | 98 | 0.52907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.42444 |
761c58ccf71d7166432217958e8a864a1a872340 | 12,091 | py | Python | spharpy/samplings/coordinates.py | mberz/spharpy | e74c30c297dd9ad887e7345c836a515daa6f21f4 | [
"MIT"
] | null | null | null | spharpy/samplings/coordinates.py | mberz/spharpy | e74c30c297dd9ad887e7345c836a515daa6f21f4 | [
"MIT"
] | null | null | null | spharpy/samplings/coordinates.py | mberz/spharpy | e74c30c297dd9ad887e7345c836a515daa6f21f4 | [
"MIT"
] | null | null | null | import numpy as np
from spharpy.samplings.helpers import sph2cart
from scipy.spatial import cKDTree
class Coordinates(object):
"""Container class for coordinates in a three-dimensional space, allowing
for compact representation and convenient conversion into spherical as well
as geospatial coordinate systems.
The constructor as well as the internal representation are only
available in Cartesian coordinates. To create a Coordinates object from
a set of points in spherical coordinates, please use the
Coordinates.from_spherical() method.
Attributes
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
def __init__(self, x=None, y=None, z=None):
"""Init coordinates container
Attributes
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
super(Coordinates, self).__init__()
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = np.asarray(z, dtype=np.float64)
if not np.shape(x) == np.shape(y) == np.shape(z):
raise ValueError("Input arrays need to have same dimensions.")
self._x = x
self._y = y
self._z = z
@property
def x(self):
"""The x-axis coordinates for each point.
"""
return self._x
@x.setter
def x(self, value):
self._x = np.asarray(value, dtype=np.float64)
@property
def y(self):
"""The y-axis coordinate for each point."""
return self._y
@y.setter
def y(self, value):
self._y = np.asarray(value, dtype=np.float64)
@property
def z(self):
"""The z-axis coordinate for each point."""
return self._z
@z.setter
def z(self, value):
self._z = np.asarray(value, dtype=np.float64)
@property
def radius(self):
"""The radius for each point."""
return np.sqrt(self.x**2 + self.y**2 + self.z**2)
@radius.setter
def radius(self, radius):
x, y, z = sph2cart(np.asarray(radius, dtype=np.float64),
self.elevation,
self.azimuth)
self._x = x
self._y = y
self._z = z
@property
def azimuth(self):
"""The azimuth angle for each point."""
return np.mod(np.arctan2(self.y, self.x), 2*np.pi)
@azimuth.setter
def azimuth(self, azimuth):
x, y, z = sph2cart(self.radius,
self.elevation,
np.asarray(azimuth, dtype=np.float64))
self._x = x
self._y = y
self._z = z
@property
def elevation(self):
"""The elevation angle for each point"""
rad = self.radius
return np.arccos(self.z/rad)
@elevation.setter
def elevation(self, elevation):
x, y, z = sph2cart(self.radius,
np.asarray(elevation, dtype=np.float64),
self.azimuth)
self._x = x
self._y = y
self._z = z
@classmethod
def from_cartesian(cls, x, y, z):
"""Create a Coordinates class object from a set of points in the
Cartesian coordinate system.
Parameters
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
return Coordinates(x, y, z)
@classmethod
def from_spherical(cls, radius, elevation, azimuth):
"""Create a Coordinates class object from a set of points in the
spherical coordinate system.
Parameters
----------
radius : ndarray, double
The radius for each point
elevation : ndarray, double
The elevation angle in radians
azimuth : ndarray, double
The azimuth angle in radians
"""
radius = np.asarray(radius, dtype=np.double)
elevation = np.asarray(elevation, dtype=np.double)
azimuth = np.asarray(azimuth, dtype=np.double)
x, y, z = sph2cart(radius, elevation, azimuth)
return Coordinates(x, y, z)
@classmethod
def from_array(cls, values, coordinate_system='cartesian'):
"""Create a Coordinates class object from a set of points given as
numpy array
Parameters
----------
values : double, ndarray
Array with shape Nx3 where N is the number of points.
coordinate_system : string
Coordinate convention of the given values.
Can be Cartesian or spherical coordinates.
"""
coords = Coordinates()
if coordinate_system == 'cartesian':
coords.cartesian = values
elif coordinate_system == 'spherical':
coords.spherical = values
else:
return ValueError("This coordinate system is not supported.")
return coords
@property
def latitude(self):
"""The latitude angle as used in geospatial coordinates."""
return np.pi/2 - self.elevation
@property
def longitude(self):
"""The longitude angle as used in geospatial coordinates."""
return np.arctan2(self.y, self.x)
@property
def cartesian(self):
"""Cartesian coordinates of all points."""
return np.vstack((self.x, self.y, self.z))
@cartesian.setter
def cartesian(self, value):
"""Cartesian coordinates of all points."""
self.x = value[0, :]
self.y = value[1, :]
self.z = value[2, :]
@property
def spherical(self):
"""Spherical coordinates of all points."""
return np.vstack((self.radius, self.elevation, self.azimuth))
@spherical.setter
def spherical(self, value):
"""Cartesian coordinates of all points."""
x, y, z = sph2cart(value[0, :], value[1, :], value[2, :])
self.cartesian = np.vstack((x, y, z))
@property
def n_points(self):
"""Return number of points stored in the object"""
return self.x.size
def merge(self, other):
"""Merge another coordinates objects into this object."""
data = np.concatenate(
(self.cartesian, other.cartesian),
axis=-1
)
self.cartesian = data
def find_nearest_point(self, point):
"""Find the closest Coordinate point to a given Point.
The search for the nearest point is performed using the scipy
cKDTree implementation.
Parameters
----------
point : Coordinates
Point to find nearest neighboring Coordinate
Returns
-------
distance : ndarray, double
Distance between the point and it's closest neighbor
index : int
Index of the closest point.
"""
kdtree = cKDTree(self.cartesian.T)
distance, index = kdtree.query(point.cartesian.T)
return distance, index
def __repr__(self):
"""repr for Coordinate class
"""
if self.n_points == 1:
repr_string = "Coordinates of 1 point"
else:
repr_string = "Coordinates of {} points".format(self.n_points)
return repr_string
def __getitem__(self, index):
"""Return Coordinates at index
"""
return Coordinates(self._x[index], self._y[index], self._z[index])
def __setitem__(self, index, item):
"""Set Coordinates at index
"""
self.x[index] = item.x
self.y[index] = item.y
self.z[index] = item.z
def __len__(self):
"""Length of the object which is the number of points stored.
"""
return self.n_points
class SamplingSphere(Coordinates):
"""Class for samplings on a sphere"""
def __init__(self, x=None, y=None, z=None, n_max=None, weights=None):
"""Init for sampling class
"""
Coordinates.__init__(self, x, y, z)
if n_max is not None:
self._n_max = np.int(n_max)
else:
self._n_max = None
if weights is not None:
if len(x) != len(weights):
raise ValueError("The number of weights has to be equal to \
the number of sampling points.")
self._weights = np.asarray(weights, dtype=np.double)
else:
self._weights = None
@property
def n_max(self):
"""Spherical harmonic order."""
return self._n_max
@n_max.setter
def n_max(self, value):
self._n_max = np.int(value)
@property
def weights(self):
"""Sampling weights for numeric integration."""
return self._weights
@weights.setter
def weights(self, weights):
if len(weights) != self.n_points:
raise ValueError("The number of weights has to be equal to \
the number of sampling points.")
self._weights = np.asarray(weights, dtype=np.double)
@classmethod
def from_coordinates(cls, coords, n_max=None, weights=None):
"""Generate a spherical sampling object from a coordinates object
Parameters
----------
coords : Coordinates
Coordinate object
Returns
-------
sampling : SamplingSphere
Sampling on a sphere
"""
return SamplingSphere(coords.x, coords.y, coords.z,
n_max=n_max, weights=weights)
@classmethod
def from_cartesian(cls, x, y, z, n_max=None, weights=None):
"""Create a Coordinates class object from a set of points in the
Cartesian coordinate system.
Parameters
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
return SamplingSphere(x, y, z, n_max, weights)
@classmethod
def from_spherical(cls, radius, elevation, azimuth,
n_max=None, weights=None):
"""Create a Coordinates class object from a set of points in the
spherical coordinate system.
Parameters
----------
radius : ndarray, double
The radius for each point
elevation : ndarray, double
The elevation angle in radians
azimuth : ndarray, double
The azimuth angle in radians
"""
radius = np.asarray(radius, dtype=np.double)
elevation = np.asarray(elevation, dtype=np.double)
azimuth = np.asarray(azimuth, dtype=np.double)
x, y, z = sph2cart(radius, elevation, azimuth)
return SamplingSphere(x, y, z, n_max, weights)
@classmethod
def from_array(cls, values, n_max=None, weights=None,
coordinate_system='cartesian'):
"""Create a Coordinates class object from a set of points given as
numpy array
Parameters
----------
values : double, ndarray
Array with shape Nx3 where N is the number of points.
coordinate_system : string
Coordinate convention of the given values.
Can be Cartesian or spherical coordinates.
"""
coords = SamplingSphere(n_max=n_max, weights=weights)
if coordinate_system == 'cartesian':
coords.cartesian = values
elif coordinate_system == 'spherical':
coords.spherical = values
else:
return ValueError("This coordinate system is not supported.")
return coords
def __repr__(self):
"""repr for SamplingSphere class
"""
if self.n_points == 1:
repr_string = "Sampling with {} point".format(self.n_points)
else:
repr_string = "Sampling with {} points".format(self.n_points)
return repr_string
| 29.634804 | 79 | 0.574642 | 11,986 | 0.991316 | 0 | 0 | 7,865 | 0.650484 | 0 | 0 | 5,274 | 0.436192 |
761c83dc8dbc123b6c4c63657c6e0437d9f3bf9f | 11,572 | py | Python | 09_Recurrent_Neural_Networks/05_Creating_A_Sequence_To_Sequence_Model/05_seq2seq_translation.py | maxim5/tensorflow_cookbook | 0db6d340970a09fbbc4a7f698fe59b82126aa203 | [
"MIT"
] | 2 | 2017-12-21T05:44:07.000Z | 2017-12-24T07:58:33.000Z | 09_Recurrent_Neural_Networks/05_Creating_A_Sequence_To_Sequence_Model/05_seq2seq_translation.py | maxim5/tensorflow_cookbook | 0db6d340970a09fbbc4a7f698fe59b82126aa203 | [
"MIT"
] | null | null | null | 09_Recurrent_Neural_Networks/05_Creating_A_Sequence_To_Sequence_Model/05_seq2seq_translation.py | maxim5/tensorflow_cookbook | 0db6d340970a09fbbc4a7f698fe59b82126aa203 | [
"MIT"
] | 2 | 2019-01-28T09:53:03.000Z | 2021-03-16T22:46:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Creating Sequence to Sequence Models
# -------------------------------------
# Here we show how to implement sequence to sequence models.
# Specifically, we will build an English to German translation model.
#
import io
import os
import string
import sys
from collections import Counter
from zipfile import ZipFile
import matplotlib.pyplot as plt
import numpy as np
import requests
import tensorflow as tf
if tf.__version__[0] < '1':
from tensorflow.models.rnn.translate import seq2seq_model
else:
# models can be retrieved from github: https://github.com/tensorflow/models.git
# put the models dir under python search lib path.
local_repository = 'temp'
if not os.path.exists(local_repository):
# pip install gitpython
from git import Repo
tf_model_repository = 'https://github.com/tensorflow/models'
Repo.clone_from(tf_model_repository, local_repository)
sys.path.insert(0, 'temp/tutorials/rnn/translate/')
import seq2seq_model as seq2seq_model
# Model Parameters
learning_rate = 0.1
lr_decay_rate = 0.99
lr_decay_every = 100
max_gradient = 5.0
batch_size = 50
num_layers = 3
rnn_size = 500
layer_size = 512
generations = 10000
vocab_size = 10000
save_every = 1000
eval_every = 500
output_every = 50
punct = string.punctuation
# Data Parameters
data_dir = 'temp'
data_file = 'eng_ger.txt'
model_path = 'seq2seq_model'
full_model_dir = os.path.join(data_dir, model_path)
# Test Translation from English (lowercase, no punct)
test_english = ['hello where is my computer',
'the quick brown fox jumped over the lazy dog',
'is it going to rain tomorrow']
########################################################################################################################
# Data
########################################################################################################################
# Make Model Directory
if not os.path.exists(full_model_dir):
os.makedirs(full_model_dir)
# Make data directory
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Loading English-German Data')
# Check for data, if it doesn't exist, download it and save it
if not os.path.isfile(os.path.join(data_dir, data_file)):
print('Data not found, downloading Eng-Ger sentences from www.manythings.org')
sentence_url = 'http://www.manythings.org/anki/deu-eng.zip'
r = requests.get(sentence_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('deu.txt')
# Format Data
eng_ger_data = file.decode(errors='ignore')
eng_ger_data = eng_ger_data.encode('ascii', errors='ignore')
eng_ger_data = eng_ger_data.decode().split('\n')
# Write to file
with open(os.path.join(data_dir, data_file), 'w') as out_conn:
for sentence in eng_ger_data:
out_conn.write(sentence + '\n')
else:
eng_ger_data = []
with open(os.path.join(data_dir, data_file), 'r') as in_conn:
for row in in_conn:
eng_ger_data.append(row[:-1])
print('Done!')
# Remove punctuation
eng_ger_data = [''.join(char for char in sent if char not in punct) for sent in eng_ger_data]
# Split each sentence by tabs
eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x) >= 1]
[english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]
english_sentence = [x.lower().split() for x in english_sentence]
german_sentence = [x.lower().split() for x in german_sentence]
print('Processing the vocabularies.')
# Process the English Vocabulary
all_english_words = [word for sentence in english_sentence for word in sentence]
all_english_counts = Counter(all_english_words)
eng_word_keys = [x[0] for x in all_english_counts.most_common(vocab_size - 1)] # -1 because 0=unknown is also in there
eng_vocab2ix = dict(zip(eng_word_keys, range(1, vocab_size)))
eng_ix2vocab = {val: key for key, val in eng_vocab2ix.items()}
english_processed = []
for sent in english_sentence:
temp_sentence = []
for word in sent:
try:
temp_sentence.append(eng_vocab2ix[word])
except:
temp_sentence.append(0)
english_processed.append(temp_sentence)
# Process the German Vocabulary
all_german_words = [word for sentence in german_sentence for word in sentence]
all_german_counts = Counter(all_german_words)
ger_word_keys = [x[0] for x in all_german_counts.most_common(vocab_size - 1)]
ger_vocab2ix = dict(zip(ger_word_keys, range(1, vocab_size)))
ger_ix2vocab = {val: key for key, val in ger_vocab2ix.items()}
german_processed = []
for sent in german_sentence:
temp_sentence = []
for word in sent:
try:
temp_sentence.append(ger_vocab2ix[word])
except:
temp_sentence.append(0)
german_processed.append(temp_sentence)
# Process the test english sentences, use '0' if word not in our vocab
test_data = []
for sentence in test_english:
temp_sentence = []
for word in sentence.split(' '):
try:
temp_sentence.append(eng_vocab2ix[word])
except:
# Use '0' if the word isn't in our vocabulary
temp_sentence.append(0)
test_data.append(temp_sentence)
# Define Buckets for sequence lengths
# We will split data into the corresponding buckets:
# (x1, y1), (x2, y2), ...
# Where all entries in bucket 1: len(x)<x1 and len(y)<y1 and so on.
x_maxs = [5, 7, 11, 50]
y_maxs = [10, 12, 17, 60]
buckets = [x for x in zip(x_maxs, y_maxs)]
bucketed_data = [[] for _ in range(len(x_maxs))]
for eng, ger in zip(english_processed, german_processed):
for ix, (x_max, y_max) in enumerate(zip(x_maxs, y_maxs)):
if (len(eng) <= x_max) and (len(ger) <= y_max):
bucketed_data[ix].append([eng, ger])
break
# Print summaries of buckets
train_bucket_sizes = [len(bucketed_data[b]) for b in range(len(buckets))]
train_total_size = float(sum(train_bucket_sizes))
for ix, bucket in enumerate(bucketed_data):
print('Data pts in bucket {}: {}'.format(ix, len(bucket)))
########################################################################################################################
# Create sequence to sequence model
########################################################################################################################
print('Creating Translation Model')
# https://stackoverflow.com/questions/44855603/typeerror-cant-pickle-thread-lock-objects-in-seq2seq/47952913#47952913
setattr(tf.contrib.rnn.GRUCell, '__deepcopy__', lambda self, _: self)
setattr(tf.contrib.rnn.BasicLSTMCell, '__deepcopy__', lambda self, _: self)
setattr(tf.contrib.rnn.MultiRNNCell, '__deepcopy__', lambda self, _: self)
def translation_model(input_vocab_size, output_vocab_size,
buckets, rnn_size, num_layers, max_gradient,
learning_rate, lr_decay_rate, forward_only):
model = seq2seq_model.Seq2SeqModel(source_vocab_size=input_vocab_size,
target_vocab_size=output_vocab_size,
buckets=buckets,
size=rnn_size,num_layers=num_layers,
max_gradient_norm=max_gradient,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay_factor=lr_decay_rate,
forward_only=forward_only,
dtype=tf.float32)
return model
translate_model = translation_model(input_vocab_size=vocab_size,
output_vocab_size=vocab_size,
buckets=buckets,
rnn_size=rnn_size,
num_layers=num_layers,
max_gradient=max_gradient,
learning_rate=learning_rate,
lr_decay_rate=lr_decay_rate,
forward_only=False)
# Tell TensorFlow to reuse the variables for the test model
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Reuse the variables for the test model
test_model = translation_model(input_vocab_size=vocab_size,
output_vocab_size=vocab_size,
buckets=buckets,
rnn_size=rnn_size,
num_layers=num_layers,
max_gradient=max_gradient,
learning_rate=learning_rate,
lr_decay_rate=lr_decay_rate,
forward_only=True)
test_model.batch_size = 1
########################################################################################################################
# Training session
########################################################################################################################
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_loss = []
for i in range(generations):
rand_bucket_ix = np.random.choice(len(bucketed_data))
model_outputs = translate_model.get_batch(bucketed_data, rand_bucket_ix)
encoder_inputs, decoder_inputs, target_weights = model_outputs
# Get the (gradient norm, loss, and outputs)
_, step_loss, _ = translate_model.step(sess, encoder_inputs, decoder_inputs,
target_weights, rand_bucket_ix, False)
# Output status
if (i + 1) % output_every == 0:
train_loss.append(step_loss)
print('Gen #{} out of {}. Loss: {:.4}'.format(i + 1, generations, step_loss))
# Check if we should decay the learning rate
if (i + 1) % lr_decay_every == 0:
sess.run(translate_model.learning_rate_decay_op)
# Save model
if (i + 1) % save_every == 0:
print('Saving model to {}.'.format(full_model_dir))
model_save_path = os.path.join(full_model_dir, "eng_ger_translation.ckpt")
translate_model.saver.save(sess, model_save_path, global_step=i)
# Eval on test set
if (i + 1) % eval_every == 0:
for ix, sentence in enumerate(test_data):
# Find which bucket sentence goes in
bucket_id = next(index for index, val in enumerate(x_maxs) if val >= len(sentence))
# Get RNN model outputs
encoder_inputs, decoder_inputs, target_weights = test_model.get_batch(
{bucket_id: [(sentence, [])]}, bucket_id)
# Get logits
_, test_loss, output_logits = test_model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
ix_output = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is a 0 symbol in outputs end the output there.
ix_output = ix_output[0:[i for i, x in enumerate(ix_output + [0]) if x == 0][0]]
# Get german words from indices
test_german = [ger_ix2vocab[x] for x in ix_output]
print('English: {}'.format(test_english[ix]))
print('German: {}'.format(test_german))
########################################################################################################################
# Visualization
########################################################################################################################
# Plot train loss
loss_generations = [i for i in range(generations) if i % output_every == 0]
plt.plot(loss_generations, train_loss, 'k-')
plt.title('Sequence to Sequence Loss')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
| 39.494881 | 120 | 0.606032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,367 | 0.290961 |
761ca9f6ac724fb2550f3f650971c0642f340690 | 6,077 | py | Python | hxmirador/settings/prod.py | nmaekawa/hxmirador | c5e364a92c3631126a7fd9335af506270f52fe68 | [
"BSD-3-Clause"
] | null | null | null | hxmirador/settings/prod.py | nmaekawa/hxmirador | c5e364a92c3631126a7fd9335af506270f52fe68 | [
"BSD-3-Clause"
] | null | null | null | hxmirador/settings/prod.py | nmaekawa/hxmirador | c5e364a92c3631126a7fd9335af506270f52fe68 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for hxmirador project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
SETTINGS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(SETTINGS_DIR)
PROJECT_NAME = "hxmirador"
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("HXMIRADOR_DJANGO_SECRET_KEY", "CHANGE_ME")
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
allowed_hosts_other = os.environ.get("HXMIRADOR_ALLOWED_HOSTS", "")
if allowed_hosts_other:
ALLOWED_HOSTS.extend(allowed_hosts_other.split())
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"hxlti",
"mirador",
"corsheaders",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
ROOT_URLCONF = PROJECT_NAME + ".urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = PROJECT_NAME + ".wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
HXMIRADOR_DB_PATH = os.environ.get(
"HXMIRADOR_DB_PATH", os.path.join(BASE_DIR, PROJECT_NAME + "_sqlite3.db")
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": HXMIRADOR_DB_PATH,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
]
# Logging config
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": (
"%(asctime)s|%(levelname)s [%(filename)s:%(funcName)s]" " %(message)s"
)
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
"stream": "ext://sys.stdout",
},
"errorfile_handler": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"formatter": "simple",
"filename": os.path.join(BASE_DIR, PROJECT_NAME + "_errors.log"),
"maxBytes": 10485760, # 10MB
"backupCount": 7,
"encoding": "utf8",
},
},
"loggers": {
"mirador": {"level": "DEBUG", "handlers": ["console"], "propagate": True},
"hxlti_dj": {"level": "DEBUG", "handlers": ["console"], "propagate": True},
"oauthlib": {
"level": "DEBUG",
"handlers": ["console"],
"propagate": True,
},
"root": {
"level": "DEBUG",
"handlers": ["console"],
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.environ.get("HXMIRADOR_STATIC_ROOT", os.path.join(BASE_DIR, "static/"))
# hxlti app settings
# assuming ssl terminator in front of django (nginx reverse proxy)
use_ssl = os.environ.get("HXLTI_ENFORCE_SSL", "false")
HXLTI_ENFORCE_SSL = use_ssl.lower() == "true"
HXLTI_DUMMY_CONSUMER_KEY = os.environ.get(
"HXLTI_DUMMY_CONSUMER_KEY",
"dummy_42237E2AB9614C4EAB0C089A96B40686B1C97DE114EC40659E64F1CE3C195AAC",
)
HXLTI_REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0")
#
# settings for django-cors-headers
#
CORS_ORIGIN_ALLOW_ALL = True # accept requests from anyone
# hxmirador lti params mapping
HXMIRADOR_CUSTOM_PARAMETERS_MAP = {
"custom_canvas_ids": {
"ptype": "list",
"mapto": "canvases",
},
"custom_object_ids": {
"ptype": "list",
"mapto": "manifests",
},
"custom_layout": {
"ptype": "string",
"mapto": "layout",
},
"custom_view_type": {
"ptype": "string",
"mapto": "view_type",
},
# if there's multiple params that map to the same var name
# and the request sends these multiple params (say with different values)
# the last one defined in this MAP takes precedence.
"custom_manifests": {
"ptype": "list",
"mapto": "manifests",
},
}
| 28.665094 | 96 | 0.632549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,825 | 0.629422 |