hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f366bab7949be48bac13c8ce7427325ee3cbe177 | 896 | py | Python | tests/clvm/benchmark_costs.py | Flax-Network/flax-light-wallet | 1745850a28a47bbbc4b5f3d460f35b34b4ed4f25 | [
"Apache-2.0"
] | 1 | 2021-12-02T14:38:11.000Z | 2021-12-02T14:38:11.000Z | tests/clvm/benchmark_costs.py | Flax-Network/flax-light-wallet | 1745850a28a47bbbc4b5f3d460f35b34b4ed4f25 | [
"Apache-2.0"
] | null | null | null | tests/clvm/benchmark_costs.py | Flax-Network/flax-light-wallet | 1745850a28a47bbbc4b5f3d460f35b34b4ed4f25 | [
"Apache-2.0"
] | 6 | 2021-11-21T00:38:27.000Z | 2021-12-03T01:25:19.000Z | from flaxlight.types.blockchain_format.program import INFINITE_COST
from flaxlight.types.spend_bundle import SpendBundle
from flaxlight.types.generator_types import BlockGenerator
from flaxlight.consensus.cost_calculator import calculate_cost_of_program, NPCResult
from flaxlight.consensus.default_constants import DEFAULT_CONSTANTS
from flaxlight.full_node.bundle_tools import simple_solution_generator
from flaxlight.full_node.mempool_check_conditions import get_name_puzzle_conditions
def cost_of_spend_bundle(spend_bundle: SpendBundle) -> int:
program: BlockGenerator = simple_solution_generator(spend_bundle)
npc_result: NPCResult = get_name_puzzle_conditions(
program, INFINITE_COST, cost_per_byte=DEFAULT_CONSTANTS.COST_PER_BYTE, safe_mode=True
)
cost: int = calculate_cost_of_program(program.program, npc_result, DEFAULT_CONSTANTS.COST_PER_BYTE)
return cost
| 52.705882 | 103 | 0.856027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f36a93259aea79b2a63bd7b922441b69612ed4f5 | 1,634 | py | Python | Validation/Performance/python/SaveRandomSeedsDigi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | Validation/Performance/python/SaveRandomSeedsDigi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | Validation/Performance/python/SaveRandomSeedsDigi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | #G.Benelli Feb 7 2008
#This fragment is used to have the random generator seeds saved to test
#simulation reproducibility. Anothe fragment then allows to run on the
#root output of cmsDriver.py to test reproducibility.
import FWCore.ParameterSet.Config as cms
def customise(process):
#Renaming the process
process.__dict__['_Process__name']='DIGISavingSeeds'
#Storing the random seeds
process.rndmStore=cms.EDProducer("RandomEngineStateProducer")
#Adding the RandomEngine seeds to the content
process.output.outputCommands.append("keep RandomEngineStates_*_*_*")
process.rndmStore_step=cms.Path(process.rndmStore)
#Modifying the schedule:
#First delete the current one:
del process.schedule[:]
#Then add the wanted sequences
process.schedule.append(process.digitisation_step)
process.schedule.append(process.rndmStore_step)
process.schedule.append(process.out_step)
#Adding SimpleMemoryCheck service:
process.SimpleMemoryCheck=cms.Service("SimpleMemoryCheck",
ignoreTotal=cms.untracked.int32(1),
oncePerEventMode=cms.untracked.bool(True))
#Adding Timing service:
process.Timing=cms.Service("Timing")
#Add these 3 lines to put back the summary for timing information at the end of the logfile
#(needed for TimeReport report)
if hasattr(process,'options'):
process.options.wantSummary = cms.untracked.bool(True)
else:
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
return(process)
| 41.897436 | 95 | 0.707466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.425949 |
f36cc628b45610f2a9c084e711e9332703ff2399 | 1,219 | py | Python | chapter_6/ex_6-4.py | akshaymoharir/PythonCrashCourse | 742b9841cff61d36567e8706efc69c5f5d5435ff | [
"MIT"
] | null | null | null | chapter_6/ex_6-4.py | akshaymoharir/PythonCrashCourse | 742b9841cff61d36567e8706efc69c5f5d5435ff | [
"MIT"
] | null | null | null | chapter_6/ex_6-4.py | akshaymoharir/PythonCrashCourse | 742b9841cff61d36567e8706efc69c5f5d5435ff | [
"MIT"
] | null | null | null |
## Python Crash Course
# Exercise 6.4: Glossary#2:
# Now that you know how to loop through a dictionary, clean up the code from Exercise 6-3 (page 102)
# by replacing your series of print statements with a loop that runs through the dictionary’s keys and values.
# When you’re sure that your loop works, add five more Python terms to your glossary.
# When you run your program again, these new words and meanings should automatically be included in the output.
#
def exercise_6_4():
print("\n")
print("Following are key-value pairs stored in a dictionary..\n")
pythonChapters = {
'HelloWorld':"Introduction to Python. First program.",
'Lists':"Collection of items in particular order.",
'Variables':"Different kind of data we can work with.",
'Dictionary':"Limitless type to store information.",
'Games In Python':"I am excited to develop my own game using Python!"
}
for chapter in pythonChapters.keys():
print(chapter.title(), ":", pythonChapters[chapter]);
print("\n")
if __name__ == '__main__':
exercise_6_4()
| 34.828571 | 129 | 0.624282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.716271 |
f36e7b68c4de6666f2c352b4c512f437cc2a7906 | 3,592 | py | Python | test/ai/test_basic_ai.py | PMatthaei/multiagent-particle-envs | 1169b1f88fbcd0c5b43f1be9c63e0cb8916feb6d | [
"MIT"
] | null | null | null | test/ai/test_basic_ai.py | PMatthaei/multiagent-particle-envs | 1169b1f88fbcd0c5b43f1be9c63e0cb8916feb6d | [
"MIT"
] | 2 | 2021-08-29T19:04:50.000Z | 2021-08-29T19:21:32.000Z | test/ai/test_basic_ai.py | PMatthaei/ma-env | 1169b1f88fbcd0c5b43f1be9c63e0cb8916feb6d | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from maenv.ai.basic_ai import BasicScriptedAI
from test.mock import mock_agent, mock_team, mock_world
AGENTS_N = 4
class BasicAgentActTestCases(unittest.TestCase):
def setUp(self):
self.a = mock_agent(id=0, tid=0)
self.b = mock_agent(id=1, tid=0)
self.c = mock_agent(id=2, tid=1)
self.d = mock_agent(id=3, tid=1)
self.at = mock_team(0, members=[self.a, self.b])
self.bt = mock_team(1, members=[self.c, self.d])
self.world = mock_world(AGENTS_N, teams=[self.at, self.bt])
self.world.positions = np.array([[0, 0], [0, 10], [0, 20], [0, 30]], dtype=float)
self.world.distances = np.array([[0, 30, 20, 10], [30, 0, 20, 10], [30, 20, 0, 10], [10, 10, 10, 0]], dtype=float)
self.ai = BasicScriptedAI()
def test_a_should_attack_d(self):
result = self.ai.act(self.a, self.world)
np.testing.assert_array_equal(result.u, [0, 0, self.d.id])
def test_b_should_attack_d(self):
result = self.ai.act(self.b, self.world)
np.testing.assert_array_equal(result.u, [0, 0, self.d.id])
def test_c_should_attack_b(self):
result = self.ai.act(self.c, self.world)
np.testing.assert_array_equal(result.u, [0, 0, self.b.id])
def test_d_should_attack_a(self):
result = self.ai.act(self.d, self.world)
np.testing.assert_array_equal(result.u, [0, 0, self.a.id])
def test_d_should_move_down_towards_b(self):
# A and B are out of range -> no direct targeting -> -1
# But B is closer (30 < 40) -> move to B
self.world.distances = np.array([[0, 30, 20, 10], [30, 0, 20, 10], [20, 20, 0, 10], [40, 30, 10, 0]], dtype=float)
result = self.ai.act(self.d, self.world)
# Should move down to reach B -> B - D = (0,10) - (0,30) = (0,-20) -> move down by -10 (grid step)
np.testing.assert_array_equal(result.u, [0, -10, -1])
def test_a_should_move_up_towards_c(self):
# C and D are out of range -> no direct targeting -> -1
# But C is closer (30 < 40) -> move to C
self.world.distances = np.array([[0, 10, 30, 40], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=float)
result = self.ai.act(self.a, self.world)
# Should move up to reach C -> C - A = (0, 20) - (0, 0) = (0,20) -> move up by 10 (grid step)
np.testing.assert_array_equal(result.u, [0, 10, -1])
def test_a_should_move_right_towards_c(self):
# C and D are out of range -> no direct targeting -> -1
# But B is closer (3 < 4) -> move to B
self.world.distances = np.array([[0, 10, 30, 40], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=float)
self.world.positions = np.array([[0, 0], [0, 0], [20, 0], [0, 0]], dtype=float)
result = self.ai.act(self.a, self.world)
# Should move up to reach C -> D - A = (20, 0) - (0, 0) = (20,0) -> move left by 10 (grid step)
np.testing.assert_array_equal(result.u, [10, 0, -1])
def test_a_should_move_left_towards_d(self):
# C and D are out of range -> no direct targeting -> -1
# But B is closer (30 < 40) -> move to B
self.world.distances = np.array([[0, 10, 30, 40], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=float)
self.world.positions = np.array([[0, 0], [0, 00], [-20, 0], [0, 0]], dtype=float)
result = self.ai.act(self.a, self.world)
# Should move up to reach C -> D - A = (-20, 0) - (0, 0) = (-20,0) -> move left by 10 (grid step)
np.testing.assert_array_equal(result.u, [-10, 0, -1]) | 49.888889 | 122 | 0.581292 | 3,437 | 0.956849 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.21186 |
f36fbaa6774f5a01f0c03dbada430c9534cde159 | 914 | py | Python | src/m3_extra.py | wangj19/99-CapstoneProject-201920 | 30198ec8b8748abbcdc507d85ad38a34fd61de01 | [
"MIT"
] | null | null | null | src/m3_extra.py | wangj19/99-CapstoneProject-201920 | 30198ec8b8748abbcdc507d85ad38a34fd61de01 | [
"MIT"
] | null | null | null | src/m3_extra.py | wangj19/99-CapstoneProject-201920 | 30198ec8b8748abbcdc507d85ad38a34fd61de01 | [
"MIT"
] | null | null | null | import rosebot
import time
def led():
robot = rosebot.RoseBot()
robot.drive_system.go(30, 30)
while True:
distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
delay = distance/500
robot.led_system.left_led.turn_on()
time.sleep(delay)
robot.led_system.left_led.turn_off()
robot.led_system.right_led.turn_on()
time.sleep(delay)
robot.led_system.left_led.turn_on()
time.sleep(delay)
robot.led_system.left_led.turn_off()
robot.led_system.right_led.turn_off()
if distance < 20:
robot.drive_system.stop()
break
def camera():
robot = rosebot.RoseBot()
robot.drive_system.display_camera_data()
robot.drive_system.spin_counterclockwise_until_sees_object(100, 500)
time.sleep(5)
robot.drive_system.spin_clockwise_until_sees_object(100, 500)
| 26.114286 | 83 | 0.677243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f370a807612b9899c6544506def0d96f26936479 | 358 | py | Python | app/requests.py | Mash14/personal-blog | 4117097b98ffab7a0dfc43261162da88cde863c2 | [
"MIT"
] | null | null | null | app/requests.py | Mash14/personal-blog | 4117097b98ffab7a0dfc43261162da88cde863c2 | [
"MIT"
] | null | null | null | app/requests.py | Mash14/personal-blog | 4117097b98ffab7a0dfc43261162da88cde863c2 | [
"MIT"
] | null | null | null | import urllib.request,json
from .models import Quote
def get_quotes():
get_quotes_url = 'http://quotes.stormconsultancy.co.uk/random.json'
with urllib.request.urlopen(get_quotes_url) as url:
get_quotes_data = url.read()
get_quotes_response = json.loads(get_quotes_data)
print(get_quotes_data)
return get_quotes_response | 29.833333 | 71 | 0.73743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.139665 |
f373d9e39b7c68b7583c76a11336122d800a3114 | 2,142 | py | Python | misc/phyler_classify.py | hurwitzlab/LSA-pipeline | 0515fe325ccf49c7914ea8aa1404cdee6dfe2648 | [
"MIT"
] | 39 | 2015-09-20T06:33:54.000Z | 2021-12-21T06:59:53.000Z | misc/phyler_classify.py | hurwitzlab/LSA-pipeline | 0515fe325ccf49c7914ea8aa1404cdee6dfe2648 | [
"MIT"
] | 23 | 2015-09-23T12:00:43.000Z | 2020-05-27T15:42:11.000Z | misc/phyler_classify.py | hurwitzlab/LSA-pipeline | 0515fe325ccf49c7914ea8aa1404cdee6dfe2648 | [
"MIT"
] | 19 | 2015-10-16T21:40:12.000Z | 2020-11-10T07:58:00.000Z | #!/usr/bin/env python
import sys, getopt
import glob,os
# sample the first 10**7 reads
def get_fasta(fp,fo):
f = open(fp)
g = open(fo,'w')
lastlinechar = ''
writenext = False
read_count = 0
for line in f:
if (line[0] == '@') and (lastlinechar != '+'):
g.write('>'+line[1:])
writenext = True
read_count += 1
elif writenext:
g.write(line)
writenext = False
lastlinechar = line[0]
if read_count >= 10**7:
break
f.close()
g.close()
return read_count
help_message = 'usage example: python read_phyler.py -r 1 -i /project/home/original_reads/ -o /project/home/phyler/'
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],'hr:i:o:',["inputdir="])
except:
print help_message
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
print help_message
sys.exit()
elif opt in ('-r',"--filerank"):
fr = int(arg)-1
elif opt in ('-i','--inputdir'):
inputdir = arg
if inputdir[-1] != '/':
inputdir += '/'
elif opt in ('-o','--outputdir'):
outputdir = arg
if outputdir[-1] != '/':
outputdir += '/'
fr = str(fr) + '/'
os.system('mkdir '+outputdir+fr)
FP = glob.glob(os.path.join(inputdir+fr,'*.fastq'))
read_count = 0
for fp in FP:
fileprefix = fp[fp.rfind('/')+1:fp.index('.fastq')]
fasta_file = outputdir + fr + fileprefix + '.fasta'
read_count += get_fasta(fp,fasta_file)
os.system('cat %s*.fasta > %sall.fa' % (outputdir+fr,outputdir+fr))
os.system('rm '+outputdir+fr+'*.fasta')
os.system('touch '+outputdir + fr + 'all.count.' + str(read_count))
os.system('blastall -p blastn -W15 -a1 -e0.01 -m8 -b1 -i %s -d /seq/msctmp/bcleary/src/MetaPhylerV1.25/markers/markers.dna > %s' % (outputdir+fr+'all.fa',outputdir+fr+'all.phyler.blastn'))
os.system('rm '+outputdir+fr+'all.fa')
os.system('/seq/msctmp/bcleary/src/MetaPhylerV1.25/metaphylerClassify /seq/msctmp/bcleary/src/MetaPhylerV1.25/markers/markers.blastn.classifier /seq/msctmp/bcleary/src/MetaPhylerV1.25/markers/markers.taxonomy %s > %s' % (outputdir+fr+'all.phyler.blastn',outputdir+fr+'all.phyler.blastn.classification'))
os.system('rm '+outputdir+fr+'all.phyler.blastn') | 34 | 304 | 0.653595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 810 | 0.378151 |
f373f170f7b6ef88a0c7f2d402d77c96448b85ae | 2,733 | py | Python | Labs/PolicyFunctionIteration/policy_solutions.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 10 | 2016-10-18T19:54:25.000Z | 2021-10-09T20:12:38.000Z | Labs/PolicyFunctionIteration/policy_solutions.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | null | null | null | Labs/PolicyFunctionIteration/policy_solutions.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 2 | 2017-05-14T16:07:59.000Z | 2020-06-20T09:05:06.000Z | #Solutions to Policy Function Iteration Lab
import numpy as np
import scipy as sp
from scipy import sparse
from scipy.sparse import linalg
import math
from matplotlib import pyplot as plt
from scipy import linalg as la
def u(x):
return np.sqrt(x).flatten()
def policyIter(beta, N, Wmax=1.):
"""
Solve the infinite horizon cake eating problem using policy function iteration.
Inputs:
beta -- float, the discount factor
N -- integer, size of discrete approximation of cake
Wmax -- total amount of cake available
Returns:
values -- converged value function (Numpy array of length N)
psi -- converged policy function (Numpy array of length N)
"""
W = np.linspace(0,Wmax,N) #state space vector
I = sparse.identity(N, format='csr')
#precompute u(W-W') for all possible inputs
actions = np.tile(W, N).reshape((N,N)).T
actions = actions - actions.T
actions[actions<0] = 0
rewards = np.sqrt(actions)
rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function
psi_ind = np.arange(N)
rows = np.arange(0,N)
tol = 1.
while tol >= 1e-9:
columns = psi_ind
data = np.ones(N)
Q = sparse.coo_matrix((data,(rows,columns)),shape=(N,N))
Q = Q.tocsr()
values = linalg.spsolve(I-beta*Q, u(W-W[psi_ind])).reshape(1,N)
psi_ind1 = np.argmax(rewards + beta*values, axis=1)
tol = math.sqrt(((W[psi_ind] - W[psi_ind1])**2).sum())
psi_ind = psi_ind1
return values.flatten(), W[psi_ind]
def modPolicyIter(beta, N, Wmax=1., m=15):
"""
Solve the infinite horizon cake eating problem using modified policy function iteration.
Inputs:
beta -- float, the discount factor
N -- integer, size of discrete approximation of cake
Wmax -- total amount of cake available
Returns:
values -- converged value function (Numpy array of length N)
psi -- converged policy function (Numpy array of length N)
"""
W = np.linspace(0,Wmax,N) #state space vector
#precompute u(W-W') for all possible inputs
actions = np.tile(W, N).reshape((N,N)).T
actions = actions - actions.T
actions[actions<0] = 0
rewards = np.sqrt(actions)
rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function
psi_ind = np.arange(N)
values = np.zeros(N)
tol = 1.
while tol >= 1e-9:
for i in xrange(m):
values = u(W - W[psi_ind]) + beta*values[psi_ind]
psi_ind1 = np.argmax(rewards + beta*values.reshape(1,N), axis=1)
tol = math.sqrt(((W[psi_ind] - W[psi_ind1])**2).sum())
psi_ind = psi_ind1
return values.flatten(), W[psi_ind]
| 33.740741 | 92 | 0.628979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.385291 |
f3747ad21d103d033483025376c35ddb48b4a17b | 8,228 | py | Python | official/nlp/gpt/src/gpt.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/nlp/gpt/src/gpt.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/nlp/gpt/src/gpt.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GPT model"""
import numpy as np
import mindspore.nn as nn
from mindspore.common.parameter import Parameter
import mindspore.common.dtype as mstype
from mindspore.common.initializer import TruncatedNormal, initializer
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.nn.transformer.layers import _LayerNorm
from mindspore.nn.transformer.transformer import AttentionMask, TransformerEncoder
class EmbeddingLookup(nn.Cell):
"""
The embedding lookup table for vocabulary
Args:
config(GPTConfig): the config of network
Inputs:
input_ids: the tokenized inputs with datatype int32
Returns:
output: Tensor, the embedding vector for the input with shape (batch_size, seq_length, embedding_size)
self.embedding_table: Tensor, the embedding table for the vocabulary
"""
def __init__(self, config):
super(EmbeddingLookup, self).__init__()
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.embedding_table = Parameter(initializer(TruncatedNormal(0.02), [self.vocab_size, self.embedding_size]))
self.gather = P.Gather()
self.shape = (-1, config.seq_length, config.embedding_size)
def construct(self, input_ids):
output = self.gather(self.embedding_table, input_ids, 0)
return output, self.embedding_table
class GPT_Model(nn.Cell):
"""
The backbone of GPT network
Args:
config(GPTConfig): the config of network
Inputs:
input_ids: the tokenized inputs with datatype int32
input_mask: the mask indicating whether each position is a valid input
layer_past: the previous feature map
Returns:
output_state: Tensor, the output logit of backbone
present_layer: Tensor, the current feature map
embedding_table: Tensor, the embedding table for the vocabulary
"""
def __init__(self, config):
super(GPT_Model, self).__init__()
self.get_attention_mask = AttentionMask(seq_length=config.seq_length)
self.word_embedding = EmbeddingLookup(config)
self.position_embedding = nn.Embedding(config.seq_length, config.embedding_size,
embedding_table=TruncatedNormal(0.02))
self.blocks = nn.CellList()
self.encoder = TransformerEncoder(batch_size=config.batch_size,
num_layers=config.num_layers,
hidden_size=config.embedding_size,
ffn_hidden_size=config.embedding_size * 4,
seq_length=config.seq_length,
num_heads=config.num_heads,)
self.layernorm = _LayerNorm((config.embedding_size,)).to_float(config.compute_dtype)
self.use_past = config.use_past
self.past = tuple([None]*config.num_layers)
self.num_layers = config.num_layers
def construct(self, input_ids, input_mask, layer_past=None):
"""GPT model"""
if not self.use_past:
layer_past = self.past
input_embedding, embedding_table = self.word_embedding(input_ids)
batch_size, seq_length = F.shape(input_ids)
input_position = F.tuple_to_array(F.make_range(seq_length))
input_position = P.Tile()(input_position, (batch_size, 1))
position_embedding = self.position_embedding(input_position)
hidden_states = input_embedding + position_embedding
hidden_states = P.Cast()(hidden_states, mstype.float16)
attention_mask = self.get_attention_mask(input_mask)
hidden_states, present_layer = self.encoder(hidden_states, attention_mask)
output_state = self.layernorm(hidden_states)
return output_state, present_layer, embedding_table
class GPT_Head(nn.Cell):
"""
Head for GPT to get the logits of each token in the vocab
Args:
config(GPTConfig): the config of network
Inputs:
state: the output of the backbone
embedding_table: the embedding table of the vocabulary
Returns:
logits: Tensor, the logits of the corresponding inputs
"""
def __init__(self, config):
super(GPT_Head, self).__init__()
self.matmul = P.MatMul(transpose_b=True)
self.embedding_size = config.embedding_size
self.log_softmax = P.LogSoftmax(axis=-1)
self.dtype = config.compute_dtype
self.cast = P.Cast()
def construct(self, state, embedding_table):
state = P.Reshape()(state, (-1, self.embedding_size))
logits = self.matmul(state, self.cast(embedding_table, self.dtype))
return logits
class GPT(nn.Cell):
"""
The GPT network consisting of two parts the backbone and the head
Args:
config(GPTConfig): the config of network
Inputs:
input_ids: the tokenized inputs
input_mask: the mask indicating whether each position is a valid input
past: the previous feature map
Returns:
logits: Tensor: the logits of the corresponding inputs with shape (batch_size, seq_length, vocab_size)
"""
def __init__(self, config):
super(GPT, self).__init__()
self.backbone = GPT_Model(config)
self.head = GPT_Head(config)
def construct(self, input_ids, input_mask, past=None):
output_states, _, embedding_table = self.backbone(input_ids, input_mask, past)
logits = self.head(output_states, embedding_table)
return logits
class GPTWithLoss(nn.Cell):
"""
GPT training loss
Args:
network: backbone network of GPT2/3
loss: loss function, e.g., crossentropy
eos_token: the end_of_sentence token
Inputs:
input_ids: the tokenized inputs
past: the previous feature map
Returns:
output: Tensor, the loss of the network
"""
def __init__(self, network, loss, eos_token=50256):
super(GPTWithLoss, self).__init__(auto_prefix=False)
self.network = network
self.loss = loss
self.eos_token = eos_token
def construct(self, input_ids, past=None):
tokens = input_ids[:, :-1]
input_mask = F.cast(F.not_equal(tokens, self.eos_token), mstype.float32)
logits = self.network(tokens, input_mask, past)
labels = input_ids[:, 1:]
labels = P.Reshape()(labels, (-1,))
input_mask = P.Reshape()(input_mask, (-1,))
output = self.loss(logits, labels, input_mask)
return output
class EvalNet(nn.Cell):
"""
GPT evaluation net
Args:
backbone: backbone network of GPT2/3
generate: enable generate mode
Inputs:
input_ids: the tokenized inpus
Returns:
outputs: Tensor, corresponding output for different tasks
"""
def __init__(self, backbone, generate=False):
super(EvalNet, self).__init__(auto_prefix=False)
self.backbone = backbone
self.argmax = P.Argmax()
self.generate = generate
self.cast = P.Cast()
def construct(self, input_ids, input_mask):
"""evaluation net"""
input_mask = self.cast(input_mask, mstype.float32)
logits = self.backbone(input_ids, input_mask)
outputs = None
if self.generate:
outputs = nn.LogSoftmax()(logits)
outputs = F.tensor_pow(np.e, outputs)
else:
outputs = self.argmax(logits)
return outputs
| 36.087719 | 116 | 0.658605 | 7,103 | 0.863272 | 0 | 0 | 0 | 0 | 0 | 0 | 2,956 | 0.359261 |
f374cca632fe9ea398a4cffa7dc83253ebb4f185 | 9,481 | py | Python | aws/sagemaker/kmeans/daal_kmeans_docker/container/kmeans/Kmeans.py | ravi9/csp | 42cc55bf36841514341fc45f17363f287bc70114 | [
"MIT"
] | null | null | null | aws/sagemaker/kmeans/daal_kmeans_docker/container/kmeans/Kmeans.py | ravi9/csp | 42cc55bf36841514341fc45f17363f287bc70114 | [
"MIT"
] | null | null | null | aws/sagemaker/kmeans/daal_kmeans_docker/container/kmeans/Kmeans.py | ravi9/csp | 42cc55bf36841514341fc45f17363f287bc70114 | [
"MIT"
] | 1 | 2018-11-06T06:16:35.000Z | 2018-11-06T06:16:35.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import daal.algorithms.kmeans.init
from daal.algorithms import kmeans
from daal.data_management import InputDataArchive, OutputDataArchive
from daal.data_management import Compressor_Zlib, Decompressor_Zlib, \
level9, DecompressionStream, CompressionStream, HomogenNumericTable
from daal.data_management import BlockDescriptor, HomogenNumericTable, BlockDescriptor_Float32, readOnly, readWrite
#from utils import printNumericTable
import numpy as np
from numpy import float32, float64, int32
import warnings
class Kmeans:
'''
....Constructor to set Kmeans compute parameters
....'''
def __init__(
self,
nClusters,
maxIterations=300,
initialCentroidMethod='defaultDense',
method='defaultDense',
oversamplingFactor=0.5,
nRounds=5,
accuracyThreshold=0.0001,
gamma=1.0,
distanceType='euclidean',
assignFlag=True,
dtype=float64,
):
"""\n\t\tnClusters: default: None\n\t\t\tnumber of centroids to compute\n\t\tmaxIterations: default: 300\n\t\t\tmaximum number of iterations \n\t\tinitialCentroidMethod: default: \xe2\x80\x99defaultDense' \n\t\t\tInitial centroid assignment method. Refer here for other available methods\n\t\t method: default: 'defaultDense'\n\t\t\tfinal centroid computation mode. Refer here for other available methods\t \n\t\toversamplingFactor: default: 0.5\n\t\t\tapplicable only if initialCentroidMethod is \xe2\x80\x98parallelPlusDense\xe2\x80\x99, \xe2\x80\x98parallelPlusCSR\xe2\x80\x99\n\t\t\tA fraction of nClusters in each of nRounds of parallel K-Means++.\n\t\t\tL=nClusters*oversamplingFactor points are sampled in a round\n\t\tnRounds: default: 5\n\t\t\tapplicable only if initialCentroidMethod is \xe2\x80\x98parallelPlusDense\xe2\x80\x99, \xe2\x80\x98parallelPlusCSR\xe2\x80\x99\n\t\t\tThe number of rounds for parallel K-Means++. (L*nRounds) must be greater than nClusters.\n\t\taccuracyThreshold: default: 0.0001\n\t\t\tThe threshold for termination of the algorithm.\n\t\tgamma: default:1.0\n\t\t\tThe weight to be used in distance calculation for binary categorical features.\n\t\tdistanceType: default: 'euclidean'\n\t\t\tThe measure of closeness between points being clustered.\n\t\tassignFlag: default: True\n\t\t\tFlag that enables cluster assignments for clustered data points.\n\t\t"""
self.nClusters = nClusters
self.initialCentroidMethod = initialCentroidMethod
self.oversamplingFactor = oversamplingFactor
self.nRounds = nRounds
self.method = method
self.maxIterations = maxIterations
self.accuracyThreshold = accuracyThreshold
self.gamma = gamma
self.distanceType = distanceType
self.assignFlag = assignFlag
self.dtype = dtype
def compute(self, data):
if self.method == 'lloydCSR':
self.method = kmeans.lloydCSR
elif self.method == 'defaultDense':
self.method = kmeans.lloydDense
if self.initialCentroidMethod == 'defaultDense':
initMethod = kmeans.init.deterministicDense
elif self.initialCentroidMethod == 'deterministicCSR':
initMethod = kmeans.init.deterministicCSR
elif self.initialCentroidMethod == 'randomDense':
initMethod = kmeans.init.randomDense
elif self.initialCentroidMethod == 'randomCSR':
initMethod = kmeans.init.randomCSR
elif self.initialCentroidMethod == 'plusPlusDense':
initMethod = kmeans.init.plusPlusDense
elif self.initialCentroidMethod == 'plusPlusCSR':
initMethod = kmeans.init.plusPlusCSR
elif self.initialCentroidMethod == 'parallelPlusDense':
initMethod = kmeans.init.parallelPlusDense
elif self.initialCentroidMethod == 'parallelPlusCSR ':
initMethod = kmeans.init.parallelPlusCSR
initAlg = kmeans.init.Batch(self.nClusters, method=initMethod,
oversamplingFactor=self.oversamplingFactor,
nRounds=self.nRounds,
dtype=self.dtype)
initAlg.input.set(kmeans.init.data, data)
res = initAlg.compute()
InitialCentroidsResult = res.get(kmeans.init.centroids)
algorithm = kmeans.Batch(
self.nClusters,
self.maxIterations,
method=self.method,
accuracyThreshold=self.accuracyThreshold,
gamma=self.gamma,
distanceType=self.distanceType,
assignFlag=self.assignFlag,
)
algorithm.input.set(kmeans.data, data)
algorithm.input.set(kmeans.inputCentroids,
InitialCentroidsResult)
res = algorithm.compute()
if self.assignFlag != False:
self.clusterAssignments = res.get(kmeans.assignments)
self.centroidResults = res.get(kmeans.centroids)
self.objectiveFunction = res.get(kmeans.objectiveFunction)
return self
def predict(self, centroidResults, data):
algorithm = kmeans.Batch(
self.nClusters,
0,
method=self.method,
accuracyThreshold=self.accuracyThreshold,
gamma=self.gamma,
distanceType=self.distanceType,
assignFlag=True,
)
algorithm.input.set(kmeans.data, data)
algorithm.input.set(kmeans.inputCentroids, centroidResults)
res = algorithm.compute()
return res.get(kmeans.assignments)
def compress(self, arrayData):
compressor = Compressor_Zlib()
compressor.parameter.gzHeader = True
compressor.parameter.level = level9
comprStream = CompressionStream(compressor)
comprStream.push_back(arrayData)
compressedData = np.empty(comprStream.getCompressedDataSize(),
dtype=np.uint8)
comprStream.copyCompressedArray(compressedData)
return compressedData
def decompress(self, arrayData):
decompressor = Decompressor_Zlib()
decompressor.parameter.gzHeader = True
# Create a stream for decompression
deComprStream = DecompressionStream(decompressor)
# Write the compressed data to the decompression stream and decompress it
deComprStream.push_back(arrayData)
# Allocate memory to store the decompressed data
bufferArray = np.empty(deComprStream.getDecompressedDataSize(),
dtype=np.uint8)
# Store the decompressed data
deComprStream.copyDecompressedArray(bufferArray)
return bufferArray
# -------------------
# ***Serialization***
# -------------------
def serialize(
self,
data,
fileName=None,
useCompression=False,
):
buffArrObjName = (str(type(data)).split()[1].split('>')[0]
+ '()').replace("'", '')
dataArch = InputDataArchive()
data.serialize(dataArch)
length = dataArch.getSizeOfArchive()
bufferArray = np.zeros(length, dtype=np.ubyte)
dataArch.copyArchiveToArray(bufferArray)
if useCompression == True:
if fileName != None:
if len(fileName.rsplit('.', 1)) == 2:
fileName = fileName.rsplit('.', 1)[0]
compressedData = Kmeans.compress(self, bufferArray)
np.save(fileName, compressedData)
else:
comBufferArray = Kmeans.compress(self, bufferArray)
serialObjectDict = {'Array Object': comBufferArray,
'Object Information': buffArrObjName}
return serialObjectDict
else:
if fileName != None:
if len(fileName.rsplit('.', 1)) == 2:
fileName = fileName.rsplit('.', 1)[0]
np.save(fileName, bufferArray)
else:
serialObjectDict = {'Array Object': bufferArray,
'Object Information': buffArrObjName}
return serialObjectDict
infoFile = open(fileName + '.txt', 'w')
infoFile.write(buffArrObjName)
infoFile.close()
# ---------------------
# ***Deserialization***
# ---------------------
def deserialize(
self,
serialObjectDict=None,
fileName=None,
useCompression=False,
):
import daal
if fileName != None and serialObjectDict == None:
bufferArray = np.load(fileName)
buffArrObjName = open(fileName.rsplit('.', 1)[0] + '.txt',
'r').read()
elif fileName == None and any(serialObjectDict):
bufferArray = serialObjectDict['Array Object']
buffArrObjName = serialObjectDict['Object Information']
else:
warnings.warn('Expecting "bufferArray" or "fileName" argument, NOT both'
)
raise SystemExit
if useCompression == True:
bufferArray = Kmeans.decompress(self, bufferArray)
dataArch = OutputDataArchive(bufferArray)
try:
deSerialObj = eval(buffArrObjName)
except AttributeError:
deSerialObj = HomogenNumericTable()
deSerialObj.deserialize(dataArch)
return deSerialObj
| 43.490826 | 1,404 | 0.629259 | 8,923 | 0.941145 | 0 | 0 | 0 | 0 | 0 | 0 | 2,238 | 0.236051 |
f37556e45733e4284a2c266789d0279aa779aaab | 1,037 | py | Python | packages/syft/src/syft/proto/lib/python/bytes_pb2.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/proto/lib/python/bytes_pb2.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/proto/lib/python/bytes_pb2.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/lib/python/bytes.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x1cproto/lib/python/bytes.proto\x12\x0fsyft.lib.python"\x15\n\x05\x42ytes\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR, "proto.lib.python.bytes_pb2", globals()
)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_BYTES._serialized_start = 49
_BYTES._serialized_end = 70
# @@protoc_insertion_point(module_scope)
| 34.566667 | 139 | 0.788814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.392478 |
f3759665189eabf01b8c5c36782598bf681a6b5f | 6,092 | py | Python | pyscripts/excel2md.py | heyrict/custom_modules | 37860a3f703b6cc731b719a5c9a1bee51184e12e | [
"Apache-2.0"
] | null | null | null | pyscripts/excel2md.py | heyrict/custom_modules | 37860a3f703b6cc731b719a5c9a1bee51184e12e | [
"Apache-2.0"
] | null | null | null | pyscripts/excel2md.py | heyrict/custom_modules | 37860a3f703b6cc731b719a5c9a1bee51184e12e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import optparse,sys,os
from txtform import df_format_print, df_format_read
import pandas as pd, numpy as np
import pyperclip
class string():
def __init__(self,*args,sep=' ',end='\n'):
self.content = sep.join([str(i) for i in args])+end
def read(self):
return self.content
def write(self,*args,sep=' ',end=''):
self.content += sep.join([str(i) for i in args])+end
def main():
opt = optparse.OptionParser()
opt.add_option('-o','--output',dest='output',default=None,help='name of the outputfile.')
opt.add_option('-c','--to-clipboard',dest='to_clipboard',action='store_true',default=False,help='redirect output to clipboard')
opt.add_option('-C','--from-clipboard',dest='from_clipboard',action='store_true',default=False,help='redirect input from clipboard')
opt.add_option('-v','--vim',dest='vim_edit',action='store_true',default=False,help='edit by vim')
opt.add_option('-f','--from',dest='FROM',default=None)
opt.add_option('-t','--to',dest='TO',default=None)
opt.add_option('-r','--in-place',dest='in_place',default=False,action='store_true',help='replace the file')
opt.add_option('-s','--squeeze',dest='squeeze',default=False,action='store_true',help='squeeze the form')
opt.add_option('-n','--replace-na',dest='replace_na',default=False,action='store_true',help='replace na values by spaces (WARNING: THIS OPTION WILL MAKE OUTPUT UNCOMPATIBLE)')
opt.add_option('-k','--kind',dest='kind',default='simple',help='select the output kind(`normal`/`simple`), default simple')
opt.add_option('-a','--align',dest='align',default='c',help='align: [l,c,r]')
opt.add_option('--uwidth',dest='uwidth',default=2,help='relative width of utf8 characters with latin characters')
opt.add_option('--spcwidth',dest='spcwidth',default=1,help='relative width of space with latin characters')
opt.add_option('--preset',dest='preset',default=None,help='presettings: [`xmind`]')
opt.add_option('-O', '--order-by',dest='order_by',default=None)
(options,args) = opt.parse_args()
inp = args
data = None
mode = 'a'
options.uwidth = float(options.uwidth)
options.spcwidth = float(options.spcwidth)
outputkind = options.kind[0]
if outputkind not in 'sn': print('Error: outputkind %s not supported'%options.kind); return
outputkind = 'simple' if outputkind == 's' else 'normal'
if options.preset:
if options.preset == 'xmind':
options.uwidth = 2.5
options.spcwidth = 0.605
if options.in_place:
mode = 'w'
if options.vim_edit: options.output = '/tmp/excel2md_edit.md'
elif len(inp)==0:
print('Error: no file found for replace');return
elif len(inp)>1:
print('Error: More than one arguments passed');return
if not options.output:
options.output = inp[0]
if len(inp) > 1: print('Error: More than one arguments passed'); return
elif len(inp) == 0:
instr = string()
# get input
if options.from_clipboard:
instr.write(pyperclip.paste())
elif options.vim_edit:
try:
os.system('touch /tmp/excel2md_edit.md')
os.system('vim /tmp/excel2md_edit.md')
except: print('Error: No vim editor available'); return;
with open('/tmp/excel2md_edit.md','r') as f:
instr.write(f.read())
#os.remove('/tmp/excel2md_edit.md')
else:
r = str(sys.stdin.readline())
while r:
instr.write(r)
r = str(sys.stdin.readline())
try: data = df_format_read(instr.read(),replace_na=options.replace_na)
except Exception as e: print(e); return
else:
inp = inp[0]
FROM = options.FROM if options.FROM else (inp.split('.')[-1] if inp else None)
# transform all data from excel file to DataFrame
if FROM in ['xls','xlsx','excel']:
data = pd.read_excel(inp)
elif FROM in ['csv']:
data = pd.read_csv(inp)
else:
with open(inp) as f:
try: data = df_format_read(f.read(),replace_na=options.replace_na)
except Exception as e: print(e);return
data = data.applymap(str)
# sort
if options.order_by:
asc = True
if options.order_by[0] == '-':
asc = False
options.order_by = options.order_by[1:]
elif options.order_by[0] == '+':
options.order_by = options.order_by[1:]
try: options.order_by = data.columns[int(options.order_by)]
except: pass
try:
data = data.sort_values(options.order_by, ascending=asc)
except Exception as e:
print(e)
exit(1)
# output
TO = options.TO if options.TO else (options.output.split('.')[-1]\
if options.output else None)
if TO==None:
if options.to_clipboard == True:
outstr = string()
df_format_print(data,outstr,squeeze=options.squeeze,\
align=options.align,uwidth=options.uwidth,\
spcwidth=options.spcwidth,kind=outputkind)
pyperclip.copy(outstr.read())
else:
df_format_print(data,squeeze=options.squeeze,\
align=options.align,uwidth=options.uwidth,\
spcwidth=options.spcwidth,kind=outputkind)
else:
if TO in ['xls','xlsx','excel']:
data.to_excel(options.output,index=False)
elif TO in ['csv']:
if options.output: data.to_csv(options.output,index=False)
else: data.to_csv(sys.stdout,index=False)
else:
with open(options.output,mode) as fo:
df_format_print(data,file=fo,squeeze=options.squeeze,\
align=options.align,uwidth=options.uwidth,\
spcwidth=options.spcwidth,kind=outputkind)
return 0
if __name__=='__main__':
main()
| 40.885906 | 179 | 0.603414 | 275 | 0.045141 | 0 | 0 | 0 | 0 | 0 | 0 | 1,384 | 0.227183 |
f377429eaab6daa3a95ec3785362c510027b3b86 | 12,482 | py | Python | spexxy/weight/fromgridnearest.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 4 | 2019-05-13T21:36:31.000Z | 2021-09-06T01:56:36.000Z | spexxy/weight/fromgridnearest.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 2 | 2020-02-12T14:36:39.000Z | 2020-07-14T11:43:10.000Z | spexxy/weight/fromgridnearest.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 1 | 2019-11-08T09:26:23.000Z | 2019-11-08T09:26:23.000Z | import os
import numpy as np
import pandas as pd
from typing import List, Union
from .weight import Weight
from ..data import Spectrum
class WeightFromGridNearest(Weight):
"""
This class loads the weights from a grid depending on the initial values of the fit parameters by choosing the
nearest neighbour in the grid. It returns an array containing the weights.
"""
def __init__(self, filename, initial: float = 0., max_line_depth: float = 0.5, center_weight: float = 1.,
max_step: int = 1, mask_lines: Union[bool, str, List] = True, max_change=(300, 0.3), *args, **kwargs):
"""
Initializes a new weight.
Args:
filename: Name of grid file.
initial: Initial value for the whole weight array.
max_line_depth: Central pixel for lines with larger line depth are masked out.
center_weight: Factor that increases the weight of the central pixel of each line.
max_step: In iteration steps <= max_step new weights are loaded from the grid.
mask_lines: List of absorption lines that are always masked out in their centers.
"""
Weight.__init__(self, *args, **kwargs)
if 'objects' in kwargs:
self.objects = kwargs['objects']
self._initial = initial
self._max_line_depth = max_line_depth
self._center_weight = center_weight
self._max_step = max_step
self._max_change = sorted(max_change, reverse=True)
if mask_lines:
if isinstance(mask_lines, bool):
self._mask_lines = 'default'
elif isinstance(mask_lines, list):
self._mask_lines = []
for line in mask_lines:
if len(line) == 2:
self._mask_lines.append(line + [-0.5, 6.5])
else:
self._mask_lines.append(line)
elif isinstance(mask_lines, str):
df = pd.read_csv(os.path.expandvars(mask_lines))
df.loc[df['logg_min'].isna(), 'logg_min'] = -0.5
df.loc[df['logg_max'].isna(), 'logg_max'] = 6.5
self._mask_lines = df.to_numpy()
else:
self._mask_lines = mask_lines
# expand filename
filename = os.path.expandvars(filename)
# get grid's root path
self._root = os.path.dirname(filename)
# load CSV
self._data = pd.read_csv(filename, index_col=False)
# get all parameters, by removing 'Filename' from list of columns
self._parameters = list(self._data.columns)
self._parameters.remove('Filename')
# we assume that all parameters are floats, so treat them as such
for name in self._parameters:
self._data[name] = self._data[name].apply(lambda x: float(x))
# get grid axes
self._axes = [np.array(sorted(self._data[p].unique())) for p in self._parameters]
# remove axes that contain only a single value
for i, p in enumerate(self._parameters):
if len(self._axes[i]) <= 1:
del self._axes[i]
self._parameters.remove(p)
self._data.set_index(keys=self._parameters, inplace=True)
# initialize step counter
self._step = 1
# values of the fit parameter from previous iteration step
self._previous_values = None
self._filename = None
# weights will be stored for next iterations
self._weights = None
self._neighbour = None
# save initial parameters
self._initial_values = None
self._logg = None
def __call__(self, spectrum: Spectrum, filename: str) -> np.ndarray:
"""
Creates and returns weight array.
Args:
spectrum: Spectrum to create weight for.
filename: Name of spectrum file.
Returns:
Array containing the weight for given spectrum.
"""
# save initial values
if self._initial_values is None:
self._initial_values = {}
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
self._initial_values[param_name] = cmp[param_name]
if param_name == 'logg' and self._logg is None:
self._logg = cmp[param_name]
break
# load new weights if the fit parameters changed significantly
new_weights = False
if self._previous_values is not None:
for param in self._parameters:
if new_weights:
break
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
if param.lower() != param_name.lower():
continue
if param.lower() == 'teff':
# did Teff change by more than 300K?
new_weights = abs(
self._previous_values[self._parameters.index(param)] - cmp[param_name]) > self._max_change[0]
else:
# did FeH, Alpha or logg change by more than 0.3 dex?
new_weights = abs(
self._previous_values[self._parameters.index(param)] - cmp[param_name]) > self._max_change[1]
# are current parameter values identical with initial values?
if self._step > 1:
tmp = []
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
tmp.append(cmp[param_name] == self._initial_values[param_name])
break
# component is reset to initial values if the fit restarts with a damping factor, in that case the iteration
# step needs to be reset as well
if np.all(tmp):
self._step = 1
# load new weights if max_step has not been reached or fit parameters changed significantly
if (self._step <= self._max_step) or new_weights:
if new_weights:
self._step = 1
# get parameters from component
params = []
for param in self._parameters:
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
if param.lower() != param_name.lower():
continue
params.append(cmp[param_name])
break
# save current parameters for next step
self._previous_values = params.copy()
# find nearest neighbour
self._neighbour = []
for i, p in enumerate(params):
self._neighbour.append(self._axes[i][np.argmin(np.abs(self._axes[i] - p))])
# save filename of weight table
self._filename = self._data.loc[tuple(self._neighbour)].Filename
# load weights
w = self._load_weights(spectrum)
# increase step counter
self._step += 1
return w
def _load_weights(self, spectrum: Spectrum):
"""
Load CSV file from grid and create weight array.
Args:
filename: Filename of CSV file that contains the weights.
spectrum: Spectrum to create the weight for.
Returns:
Weight array.
"""
# load table containing the weights
df = pd.read_csv(os.path.join(self._root, self._filename))
# consider only weights for iteration steps lower/equal than the given one
df = df[df['step'] <= self._step]
# initialize weight array
weights = np.zeros(spectrum.wave.shape) + self._initial
# write weights to array
for i, row in df.iterrows():
if isinstance(self._mask_lines, list) or isinstance(self._mask_lines, np.ndarray):
if self._mask_centers(row, self._mask_lines, weights, spectrum):
continue
elif self._mask_lines == 'default':
if self._mask_default_lines(row, weights, spectrum):
continue
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
# if line depth larger than given threshold mask out the central region otherwise increase weight of
# central pixel by given factor
if row['line_depth'] > self._max_line_depth:
# if region spans more than 10 wavelength pixel mask out the 3 central pixel otherwise only the central
# one
if (row['wave_end'] - row['wave_start']) >= 12:
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i-1:i+2] = 0
else:
weights[np.argmin(np.abs(spectrum.wave - row['wave_center']))] = 0
else:
weights[np.argmin(np.abs(spectrum.wave - row['wave_center']))] *= self._center_weight
return weights
def _mask_default_lines(self, row: pd.Series, weights: np.ndarray, spectrum: Spectrum):
# Halpha
if (row['wave_center'] < 6566.) & (row['wave_center'] > 6557.) & (self._logg <= 3.5):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 1:i + 2] = 0
return True
elif (row['wave_center'] < 6566.) & (row['wave_center'] > 6557.) & (self._logg > 3.5):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i] = 0
return True
# Hbeta
if (row['wave_center'] < 4867.) & (row['wave_center'] > 4857.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i] = 0
return True
# FeI line
if (row['wave_center'] < 5272.) and (row['wave_center'] > 5267.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 1:i + 2] = 0
return True
# Ca triplet
if (row['wave_center'] < 8508.) and (row['wave_center'] > 8490.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 2:i + 3] = 0
return True
if (row['wave_center'] < 8553.) and (row['wave_center'] > 8530.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 2:i + 3] = 0
return True
if (row['wave_center'] < 8672.) and (row['wave_center'] > 8651.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 2:i + 3] = 0
return True
return False
def _mask_centers(self, row: pd.Series, lines: Union[list, np.ndarray], weights: np.ndarray, spectrum: Spectrum):
for center, npix, logg_min, logg_max in lines:
if (row['wave_start'] < center) and (row['wave_end'] > center) and (self._logg < logg_max) and (
self._logg >= logg_min):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
if npix % 2 == 0:
weights[int(i-npix//2):int(i+npix//2)] = 0
else:
weights[int(i-npix//2):int(i+npix//2+1)] = 0
return True
return False
__all__ = ['WeightFromGridNearest']
| 38.884735 | 125 | 0.557763 | 12,304 | 0.985739 | 0 | 0 | 0 | 0 | 0 | 0 | 3,537 | 0.283368 |
f377fe060e7eef928a73ab32445d3e0a5afd5056 | 4,096 | py | Python | testproject/testapp/tests/test_password_reset.py | d1opensource/djoser | ebdd2f25f84df22891372afb53cc6b956917d1ba | [
"MIT"
] | null | null | null | testproject/testapp/tests/test_password_reset.py | d1opensource/djoser | ebdd2f25f84df22891372afb53cc6b956917d1ba | [
"MIT"
] | null | null | null | testproject/testapp/tests/test_password_reset.py | d1opensource/djoser | ebdd2f25f84df22891372afb53cc6b956917d1ba | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core import mail
from django.test.utils import override_settings
from djet import assertions, restframework
from rest_framework import status
import djoser.views
from djoser.compat import get_user_email
from djoser.conf import settings as default_settings
from .common import create_user, mock
from testapp.models import CustomUser
class PasswordResetViewTest(
restframework.APIViewTestCase,
assertions.StatusCodeAssertionsMixin,
assertions.EmailAssertionsMixin,
):
view_class = djoser.views.PasswordResetView
def test_post_should_send_email_to_user_with_password_reset_link(self):
user = create_user()
data = {"email": user.email}
request = self.factory.post(data=data)
response = self.view(request)
self.assert_status_equal(response, status.HTTP_204_NO_CONTENT)
self.assert_emails_in_mailbox(1)
self.assert_email_exists(to=[user.email])
site = get_current_site(request)
self.assertIn(site.domain, mail.outbox[0].body)
self.assertIn(site.name, mail.outbox[0].body)
def test_post_send_email_to_user_with_request_domain_and_site_name(self):
user = create_user()
data = {"email": user.email}
request = self.factory.post(data=data)
self.view(request)
self.assertIn(request.get_host(), mail.outbox[0].body)
def test_post_should_not_send_email_to_user_if_user_does_not_exist(self):
data = {"email": "john@beatles.com"}
request = self.factory.post(data=data)
response = self.view(request)
self.assert_status_equal(response, status.HTTP_204_NO_CONTENT)
self.assert_emails_in_mailbox(0)
def test_post_should_return_no_content_if_user_does_not_exist(self):
data = {"email": "john@beatles.com"}
request = self.factory.post(data=data)
response = self.view(request)
self.assert_status_equal(response, status.HTTP_204_NO_CONTENT)
@override_settings(
DJOSER=dict(settings.DJOSER, **{"PASSWORD_RESET_SHOW_EMAIL_NOT_FOUND": True})
)
def test_post_should_return_bad_request_if_user_does_not_exist(self):
data = {"email": "john@beatles.com"}
request = self.factory.post(data=data)
response = self.view(request)
self.assert_status_equal(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data["email"][0],
default_settings.CONSTANTS.messages.EMAIL_NOT_FOUND,
)
@mock.patch("djoser.serializers.User", CustomUser)
@mock.patch("djoser.views.User", CustomUser)
@override_settings(AUTH_USER_MODEL="testapp.CustomUser")
def test_post_should_send_email_to_custom_user_with_password_reset_link(self):
user = create_user(use_custom_data=True)
data = {"custom_email": get_user_email(user)}
request = self.factory.post(data=data)
response = self.view(request)
self.assert_status_equal(response, status.HTTP_204_NO_CONTENT)
self.assert_emails_in_mailbox(1)
self.assert_email_exists(to=[get_user_email(user)])
site = get_current_site(request)
self.assertIn(site.domain, mail.outbox[0].body)
self.assertIn(site.name, mail.outbox[0].body)
@mock.patch("djoser.serializers.User", CustomUser)
@mock.patch("djoser.views.User", CustomUser)
@override_settings(
AUTH_USER_MODEL="testapp.CustomUser",
DJOSER=dict(settings.DJOSER, **{"PASSWORD_RESET_SHOW_EMAIL_NOT_FOUND": True}),
)
def test_post_should_return_bad_request_with_custom_email_field_if_user_does_not_exist(
self
):
data = {"custom_email": "john@beatles.com"}
request = self.factory.post(data=data)
response = self.view(request)
self.assert_status_equal(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data["custom_email"][0],
default_settings.CONSTANTS.messages.EMAIL_NOT_FOUND,
)
| 36.247788 | 91 | 0.7146 | 3,655 | 0.892334 | 0 | 0 | 2,017 | 0.492432 | 0 | 0 | 358 | 0.087402 |
f3792dc86555ef5c680134d2150cbf714d907165 | 27,319 | py | Python | sequenceur.py | lperezfr/turbot-toulouse-robot-race | 52043120db44cf3c901429d1990c4c96c9c2b5f6 | [
"Apache-2.0"
] | 1 | 2017-10-06T22:13:27.000Z | 2017-10-06T22:13:27.000Z | sequenceur.py | lperezfr/turbot-toulouse-robot-race | 52043120db44cf3c901429d1990c4c96c9c2b5f6 | [
"Apache-2.0"
] | null | null | null | sequenceur.py | lperezfr/turbot-toulouse-robot-race | 52043120db44cf3c901429d1990c4c96c9c2b5f6 | [
"Apache-2.0"
] | null | null | null | # encoding:utf-8
# Librairies tierces
import time
import os
# Mes classes
from voiture import Voiture
from asservissement import Asservissement
from arduino import Arduino
class Sequenceur:
# General
# CONST_NOMBRE_MESURES_DEPASSEMENT_DISTANCE = 1000 # Nombre de mesures consecutives du telemetre avant de considerer qu'un depassement de distance est effectif
DUREE_DEPASSEMENT_TELEMETRE = 0.1 # Temps en secondes pendant lequel le telemetre doit mesurer un depassement avant de considerer qu'un depassement est effectif
DISTANCE_DEPASSEMENT_TELEMETRE_IR = 1 # TODO: remettre ? Distance min mesuree par le telemetre IR pour confirmer depassement
# Premiere ligne droite
VITESSE_PREMIERE_LIGNE_DROITE = 50 # 45 pendant 4.8 fonctionne
DUREE_PREMIERE_LIGNE_DROITE = 4.15 # 4.5 lors des essais à 33s
DISTANCE_BORDURE_PREMIERE_LIGNE_DROITE = 30
# Ligne droite avant 180°
VITESSE_LIGNE_DROITE_AVANT_180 = 25
DISTANCE_DECLENCHEMENT_180 = 80
# Virage 180°
POSITION_ROUES_180_DEBUT = 70
POSITION_ROUES_180_FIN = 25 # Initialement 30 ou 35, mais ca passe trop pres
VITESSE_180_DEBUT = 30
VITESSE_180_FIN = 38
DUREE_LIGNE_DROITE_PENDANT_180 = 0.3
# Ligne droite apres premier virage 180°
VITESSE_LIGNE_DROITE_APRES_PREMiER_VIRAGE = 45
DISTANCE_BORDURE_APRES_PREMIER_VIRAGE = 30
DUREE_LIGNE_DROITE_SANS_SUIVI_BORDURE_APRES_PREMIER_VIRAGE = 1
DUREE_LIGNE_DROITE_APRES_PREMIER_VIRAGE = 2.5 # Auparavant 2.5
# Chicane
VITESSE_ENTREE_CHICANE = 25
DISTANCE_DECLENCHEMENT_CHICANE = 60
VITESSE_PREMIER_VIRAGE = 42
VITESSE_CHICANE = 40
DUREE_LIGNE_DIAGONALE_CHICANE_1 = 0.7 # 0.9 lors des essais du soir
DUREE_LIGNE_DIAGONALE_CHICANE_2 = 0.7 # 0.6 lors des essais du soir
DUREE_LIGNE_DIAGONALE_CHICANE_3 = 0.85 # 0.8 lors des essais du soir
DUREE_LIGNE_DIAGONALE_CHICANE_4 = 0.75 # 0.6 lors des essais du soir
#VITESSE_ENTREE_CHICANE = 25
#DISTANCE_DECLENCHEMENT_CHICANE = 20
#VITESSE_PREMIER_VIRAGE = 25
#VITESSE_CHICANE = 25
#VITESSE_CHICANE = 46
#DUREE_LIGNE_DIAGONALE_CHICANE_1 = 1.1
#DUREE_LIGNE_DIAGONALE_CHICANE_2 = 0.7
#DUREE_LIGNE_DIAGONALE_CHICANE_3 = 0.7
#DUREE_LIGNE_DIAGONALE_CHICANE_4 = 0.6
#DELTA_CAP_LIGNE_DIAGONALE = 27
#DUREE_LIGNE_DROITE_CHICANE_1 = 0.35
#DUREE_LIGNE_DROITE_CHICANE_2 = DUREE_LIGNE_DROITE_CHICANE_1
#DUREE_LIGNE_DROITE_CHICANE_3 = DUREE_LIGNE_DROITE_CHICANE_1
#DUREE_LIGNE_DROITE_CHICANE_4 = DUREE_LIGNE_DROITE_CHICANE_1
DELTA_CAP_LIGNE_DIAGONALE = 27
DUREE_LIGNE_DROITE_CHICANE_1 = 0.40
DUREE_LIGNE_DROITE_CHICANE_2 = DUREE_LIGNE_DROITE_CHICANE_1 - 0.05
DUREE_LIGNE_DROITE_CHICANE_3 = DUREE_LIGNE_DROITE_CHICANE_1 + 0.25
DUREE_LIGNE_DROITE_CHICANE_4 = DUREE_LIGNE_DROITE_CHICANE_1 - 0.05
# Ligne droite après chicane sans telemetre pour stabilisation
VITESSE_LIGNE_DROITE_SORTIE_CHICANE = 45
DUREE_LIGNE_DROITE_SORTIE_CHICANE = 1.0
# Ligne droite au telemetre apres chicane
VITESSE_LIGNE_DROITE_APRES_CHICANE = 50
DISTANCE_BORDURE_LIGNE_DROITE_APRES_CHICANE = 30
DUREE_LIGNE_DROITE_APRES_CHICANE = 2.7
# Derniere ligne droite suivi bordure
VITESSE_DERNIERE_LIGNE_DROITE = 55
DISTANCE_BORDURE_DERNIERE_LIGNE_DROITE = 40
DUREE_LIGNE_DROITE_SANS_SUIVI_BORDURE_APRES_DERNIER_VIRAGE = 1 # On commence par une ligne droite au cap
DUREE_DERNIERE_LIGNE_DROITE = 4.7 # On poursuit par un suivi bordure
# Acceleration finale
VITESSE_DERNIERE_LIGNE_DROITE_CAP = 60
DUREE_DERNIERE_LIGNE_DROITE_CAP = 1.7
# Ralentissement ligne droite finale suivi bordure
VITESSE_RALENTISSEMENT_FINAL = 40
DISTANCE_BORDURE_RALENTISSEMENT_FINAL = 30
DUREE_RALENTISSEMENT_FINAL = 1.0
# Suivi courbes au telemetre IR
VITESSE_SUIVI_COURBE_TELEMETRE_IR = 25
DISTANCE_SUIVI_COURBE_TELEMETRE_IR = 60
DUREE_SUIVI_COURBE_TELEMETRE_IR = 180
# Durees d'appui sur le bouton poussoir
DUREE_APPUI_COURT_REDEMARRAGE = 2 # Nombre de secondes d'appui sur le poussoir pour reinitialiser le programme
DUREE_APPUI_LONG_SHUTDOWN = 10 # Nombre de secondes d'appui sur le poussoir pour eteindre le raspberry
programme = [
###########################################################
# Attente stabilisation gyro - ETAPE 0
###########################################################
{
'instruction' : 'attendreGyroStable', # Attend stabilisation du gyro
'conditionFin' : 'attendreGyroStable'
},
{
'label' : 'attendBouton',
'instruction' : 'tourne', # Attend l'appui sur le bouton
'positionRoues' : 0,
'vitesse' : 0,
'conditionFin' : 'attendBouton'
},
{
'instruction' : 'setCap', # Cap asuivre = cap actuel
'conditionFin' : 'immediat'
},
###########################################################
# PREMIERE LIGNE DROITE ETAPE 1
###########################################################
{
'label' : 'debutLigneDroite', # Ligne droite avec suivi bordure
'instruction' : 'ligneDroiteTelemetre',
'vitesse' : VITESSE_PREMIERE_LIGNE_DROITE,
'distance' : DISTANCE_BORDURE_PREMIERE_LIGNE_DROITE,
'conditionFin' : 'duree',
'duree' : DUREE_PREMIERE_LIGNE_DROITE
},
{
'instruction' : 'tourne', # Freine
'positionRoues' : 0,
'vitesse' : -30,
'conditionFin' : 'duree',
'duree' : 0.5
},
############ TEST
#{
# 'instruction' : 'ligneDroiteTelemetre',
# 'vitesse' : 40, # Max 55 ? 45 plus raisonnable...
# 'recalageCap' : False,
# 'distance' : 40,
# 'antiProche' : False,
# 'conditionFin' : 'duree',
# 'duree' : 4, # Fin quand distance telemetre s'envole
# 'activationDistanceIntegrale' : False,
# 'nextLabel' : 'arret'
#},
###########################################################
# VIRAGE 180° ETAPE 2
###########################################################
{
'instruction' : 'ligneDroite', # Ligne droite au cap
'vitesse' : VITESSE_LIGNE_DROITE_AVANT_180,
'conditionFin' : 'telemetre',
'distSupA' : DISTANCE_DECLENCHEMENT_180 # Fin quand distance telemetre s'envole
},
{
'instruction' : 'tourne', # Commence le virage 180°
'positionRoues' : POSITION_ROUES_180_DEBUT,
'vitesse' : VITESSE_180_DEBUT,
'conditionFin' : 'cap',
'capFinalMini' : 60, # En relatif par rapport au cap initial, pour la gauche : 180 300, pour la droite 60 180
'capFinalMaxi' : 180, # En relatif par rapport au cap initial
},
{
'instruction' : 'ajouteCap',
'cap' : 90,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite au cap
'vitesse' : VITESSE_180_DEBUT,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_PENDANT_180
},
{
'instruction' : 'tourne', # Puis finit le virage 180°
'positionRoues' : POSITION_ROUES_180_FIN,
'vitesse' : VITESSE_180_FIN,
'conditionFin' : 'cap',
'capFinalMini' : 60, # En relatif par rapport au cap initial
'capFinalMaxi' : 180 # En relatif par rapport au cap initial
},
{
'instruction' : 'ajouteCap',
'cap' : 90,
'conditionFin' : 'immediat',
},
###########################################################
# LIGNE DROITE AVEC SUIVI BORDURE ETAPE 3
###########################################################
{
'instruction' : 'ligneDroite', # Ligne droite sans suivi bordure pour sortir proprement du virage
'vitesse' : VITESSE_LIGNE_DROITE_APRES_PREMiER_VIRAGE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_SANS_SUIVI_BORDURE_APRES_PREMIER_VIRAGE
},
{
'label' : 'debutLigneDroiteSortieVirage', # Ligne droite avec suivi bordure sans recalage de cap
'instruction' : 'ligneDroiteTelemetre',
'recalageCap' : False,
'activationDistanceIntegrale' : True,
'vitesse' : VITESSE_LIGNE_DROITE_APRES_PREMiER_VIRAGE,
'distance' : DISTANCE_BORDURE_APRES_PREMIER_VIRAGE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_APRES_PREMIER_VIRAGE
},
###########################################################
# CHICANES ETAPE 4
###########################################################
{
'instruction' : 'tourne', # Freine
'positionRoues' : 0,
'vitesse' : -5,
'conditionFin' : 'duree',
'duree' : 0.3
},
{
'instruction' : 'ligneDroite', # Ligne droite
'vitesse' : VITESSE_ENTREE_CHICANE,
'conditionFin' : 'telemetre',
'distSupA' : DISTANCE_DECLENCHEMENT_CHICANE # Fin quand distance telemetre s'envole
},
# PREMIERE CHICANE
{
'instruction' : 'ajouteCap', # 1ere diagonale a droite
'cap' : DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite',
'vitesse' : VITESSE_PREMIER_VIRAGE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DIAGONALE_CHICANE_1
},
{
'instruction' : 'ajouteCap',
'cap' : -DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite chicane
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_CHICANE_1
},
# DEUXIEME CHICANE
{
'instruction' : 'ajouteCap', # 2e diagonale a gauche
'cap' : -DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite',
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DIAGONALE_CHICANE_2
},
{
'instruction' : 'ajouteCap',
'cap' : +DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite chicane
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_CHICANE_2
},
# TROISIEME CHICANE
{
'instruction' : 'ajouteCap', # 3eme diagonale a droite
'cap' : DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite',
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DIAGONALE_CHICANE_3
},
{
'instruction' : 'ajouteCap',
'cap' : -DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite chicane
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_CHICANE_3
},
# QUATRIEME CHICANE
{
'instruction' : 'ajouteCap', # 4e diagonale a gauche
'cap' : -DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite',
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DIAGONALE_CHICANE_4
},
{
'instruction' : 'ajouteCap',
'cap' : DELTA_CAP_LIGNE_DIAGONALE,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite chicane (TODO: verifier si c'est utile)
'vitesse' : VITESSE_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_CHICANE_4
},
###########################################################
# LIGNE DROITE APRES CHICANE ETAPE 5
###########################################################
{
'label' : 'debutLigneDroite', # Ligne droite avec suivi bordure
'instruction' : 'ligneDroiteTelemetre',
'vitesse' : VITESSE_LIGNE_DROITE_APRES_CHICANE,
'distance' : DISTANCE_BORDURE_LIGNE_DROITE_APRES_CHICANE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_APRES_CHICANE
},
{
'instruction' : 'tourne', # Freine
'positionRoues' : 0,
'vitesse' : -30,
'distance' : DISTANCE_BORDURE_LIGNE_DROITE_APRES_CHICANE,
'conditionFin' : 'duree',
'duree' : 0.5
},
###########################################################
# SECOND VIRAGE 180° ETAPE 6
###########################################################
{
'instruction' : 'ligneDroite', # Ligne droite au cap
'vitesse' : VITESSE_LIGNE_DROITE_AVANT_180,
'conditionFin' : 'telemetre',
'distSupA' : DISTANCE_DECLENCHEMENT_180 # Fin quand distance telemetre s'envole
},
{
'instruction' : 'tourne', # Commence le virage 180°
'positionRoues' : POSITION_ROUES_180_DEBUT,
'vitesse' : VITESSE_180_DEBUT,
'conditionFin' : 'cap',
'capFinalMini' : 60, # En relatif par rapport au cap initial, pour la gauche : 180 300, pour la droite 60 180
'capFinalMaxi' : 180, # En relatif par rapport au cap initial
},
{
'instruction' : 'ajouteCap',
'cap' : 90,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite au cap
'vitesse' : VITESSE_180_DEBUT,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_PENDANT_180 + 0.1 # Ajout car il y a un devers
},
{
'instruction' : 'tourne', # Puis finit le virage 180°
'positionRoues' : POSITION_ROUES_180_FIN,
'vitesse' : VITESSE_180_FIN,
'conditionFin' : 'cap',
'capFinalMini' : 60, # En relatif par rapport au cap initial
'capFinalMaxi' : 180 # En relatif par rapport au cap initial
},
{
'instruction' : 'ajouteCap',
'cap' : 90,
'conditionFin' : 'immediat',
},
###########################################################
# LIGNE DROITE AVEC SUIVI BORDURE ETAPE 7
###########################################################
{
'instruction' : 'ligneDroite', # Ligne droite au cap pour sortir proprement du virage
'vitesse' : VITESSE_DERNIERE_LIGNE_DROITE,
'conditionFin' : 'duree',
'duree' : DUREE_LIGNE_DROITE_SANS_SUIVI_BORDURE_APRES_DERNIER_VIRAGE
},
{
'instruction' : 'ligneDroiteTelemetre', # Ligne droite avec suivi bordures
'vitesse' : VITESSE_DERNIERE_LIGNE_DROITE,
'distance' : DISTANCE_BORDURE_DERNIERE_LIGNE_DROITE,
'conditionFin' : 'duree',
'duree' : DUREE_DERNIERE_LIGNE_DROITE
},
{
'instruction' : 'ajouteCap', # Hack pour corriger un biais
'cap' : -2,
'conditionFin' : 'immediat',
},
{
'instruction' : 'ligneDroite', # Ligne droite au cap
'vitesse' : VITESSE_DERNIERE_LIGNE_DROITE_CAP,
'conditionFin' : 'duree',
'duree' : DUREE_DERNIERE_LIGNE_DROITE_CAP
},
{
'instruction' : 'tourne', # Freine
'positionRoues' : 0,
'vitesse' : -10,
'conditionFin' : 'duree',
'duree' : 0.3
},
{
'instruction' : 'ligneDroiteTelemetre', # Ligne droite avec suivi bordures
'vitesse' : VITESSE_RALENTISSEMENT_FINAL,
'distance' : DISTANCE_BORDURE_RALENTISSEMENT_FINAL,
'conditionFin' : 'duree',
'duree' : DUREE_RALENTISSEMENT_FINAL
},
{
'instruction' : 'tourne', # Freine
'positionRoues' : 0,
'vitesse' : -30,
'distance' : DISTANCE_BORDURE_PREMIERE_LIGNE_DROITE,
'conditionFin' : 'duree',
'duree' : 0.5
},
{
'instruction' : 'ligneDroite', # Ligne droite au cap
'vitesse' : VITESSE_LIGNE_DROITE_AVANT_180,
'conditionFin' : 'telemetre',
'distSupA' : DISTANCE_DECLENCHEMENT_180 # Fin quand distance telemetre s'envole
},
###########################################################
# FREINAGE PUIS ARRET
###########################################################
{
'label' : 'arret',
'instruction' : 'tourne', # Freinage
'vitesse' : -30,
'positionRoues' : 0,
'conditionFin' : 'duree',
'duree' : 0.5
},
{
'instruction' : 'tourne', # Arrêt avec roues a 0
'vitesse' : 0,
'positionRoues' : 0,
'conditionFin' : 'duree',
'duree' : 1.5,
'nextLabel' : 'attendBouton' # Retour au début
}
]
sequence = 0
debut = True
timeDebut = 0
programmeCourant = {}
voiture = None
asservissement = None
last_mesure_depassement = False
time_debut_depassement = 0
last_mesure_telemetre1 = 0
timer_led = 0
vitesse_clignote_led = 10
led_clignote = True
last_led = 0
timer_bouton = 0
last_bouton = 1 # 1 = bouton relache, 0 = bouton appuye
flag_appui_court = False # Passe a True quand un appui court (3 secondes) a ete detecte
def __init__(self, voiture):
self.voiture = voiture
def execute(self):
# Fait clignoter la led
if self.led_clignote:
if time.time() > self.timer_led + self.vitesse_clignote_led:
self.timer_led = time.time()
self.last_led = 0 if self.last_led else 1
self.voiture.setLed(self.last_led)
else:
self.voiture.setLed(1)
# Verifie appui court (3 sec) ou long (10 sec) sur bouton
if self.voiture.getBoutonPoussoir() == 0:
if self.last_bouton == 1:
self.timer_bouton = time.time()
else:
if time.time() > self.timer_bouton + self.DUREE_APPUI_COURT_REDEMARRAGE:
# Arrete la voiture
self.voiture.avance(0)
self.voiture.tourne(0)
self.vitesse_clignote_led = 0.3
self.led_clignote = True
self.flag_appui_court = True
if time.time() > self.timer_bouton + self.DUREE_APPUI_LONG_SHUTDOWN:
# Appui long: shutdown Raspberry Pi
os.system('sudo shutdown -h now')
pass
self.last_bouton = 0
else:
self.last_bouton = 1
if self.flag_appui_court:
# Si on a detecte un appui court avant la relache du bouton
self.flag_appui_court = False
# Retourne a la sequence du debut
for i in range(len(self.programme)):
if 'label' in self.programme[i]:
if self.programme[i]['label'] == 'attendBouton':
# On a trouve la prochaine sequence
self.sequence = i
self.debut = True
if self.debut:
# Premiere execution de l'instruction courante
self.programmeCourant = self.programme[self.sequence]
instruction = self.programmeCourant['instruction']
print "********** Nouvelle instruction *********** ", instruction
self.timeDebut = time.time()
self.debut = False
self.arduino.annuleRecalageCap()
self.asservissement.cumulErreurCap = 0
self.last_mesure_depassement = False
# Fait du cap courant le cap a suivre
if instruction == 'setCap':
self.asservissement.setCapTarget()
# Programme la vitesse de la voiture
if instruction == 'ligneDroite' or instruction == 'ligneDroiteTelemetre' or instruction == 'tourne' or instruction == 'suiviCourbeTelemetre':
vitesse = self.programmeCourant['vitesse']
print "Vitesse : ", vitesse
self.voiture.avance(vitesse)
self.asservissement.setVitesse(vitesse)
# Positionne les roues pour l'instruction 'tourne'
if instruction == 'tourne':
positionRoues = self.programmeCourant['positionRoues']
print "Position roues : ", positionRoues
self.voiture.tourne(positionRoues)
# Ajoute une valeur a capTarget pour l'instruction 'ajouteCap'
if instruction == 'ajouteCap':
self.asservissement.ajouteCap(self.programmeCourant['cap'])
# Indique a la classe d'asservissement si elle doit asservir, et selon quel algo
if instruction == 'ligneDroite':
self.asservissement.initLigneDroite()
elif instruction == 'ligneDroiteTelemetre':
recalageCap = False
if 'recalageCap' in self.programmeCourant:
recalageCap = self.programmeCourant['recalageCap']
activationDistanceIntegrale = False
if 'activationDistanceIntegrale' in self.programmeCourant:
activationDistanceIntegrale = self.programmeCourant['activationDistanceIntegrale']
antiProche = False
if 'antiProche' in self.programmeCourant:
antiProche = self.programmeCourant['antiProche']
# Surtout pas de correction integrale avec la protection antiProche
activationDistanceIntegrale = False
self.asservissement.initLigneDroiteTelemetre(self.programmeCourant['distance'], recalageCap, activationDistanceIntegrale, antiProche)
elif instruction == 'suiviCourbeTelemetre':
self.asservissement.initCourbeTelemetre(self.programmeCourant['distance'])
else:
self.asservissement.annuleLigneDroite()
else:
# Partie qui s'execute en boucle tant que la condition de fin n'est pas remplie
pass
# Verifie s'il faut passer a l'instruction suivante
finSequence = False # Initialise finSequence
# Recupere la condition de fin
conditionFin = self.programmeCourant['conditionFin']
# Verifie si la condition de fin est atteinte
if conditionFin == 'attendreGyroStable':
if self.arduino.gyroX != 0.0:
# Si l'arduino a bien reussi a acquerir le gyro, le dit a travers la vitesse de clignotement de la led
self.vitesse_clignote_led = 1.5
finSequence = self.arduino.checkGyroStable()
elif conditionFin == 'cap':
capFinalMini = self.programmeCourant['capFinalMini']
capFinalMaxi = self.programmeCourant['capFinalMaxi']
if self.asservissement.checkDeltaCapAtteint(capFinalMini, capFinalMaxi):
finSequence = True
elif conditionFin == 'duree':
if (time.time() - self.timeDebut) > self.programmeCourant['duree']:
finSequence = True
elif conditionFin == 'immediat':
finSequence = True
elif conditionFin == 'telemetre':
if self.arduino.bestTelemetrePourDetectionVirage() > self.programmeCourant['distSupA']:
#if self.last_mesure_depassement:
# if self.last_mesure_telemetre1 != self.arduino.telemetre1:
# print "Telemetre1 : ", self.arduino.telemetre1, " Distance a depasser : ", self.programmeCourant['distSupA']
# self.last_mesure_telemetre1 = self.arduino.telemetre1
# # Verifie si depassement du telemetre1 pendant longtemps + confirmation par telemetre IR
# if (time.time() > self.time_debut_depassement + self.DUREE_DEPASSEMENT_TELEMETRE) and (self.arduino.telemetreIR > self.DISTANCE_DEPASSEMENT_TELEMETRE_IR):
finSequence = True
#else:
# self.time_debut_depassement = time.time()
#self.last_mesure_depassement = True
#else:
# self.last_mesure_depassement = False
elif conditionFin == 'attendBouton':
self.vitesse_clignote_led = 0.3
self.led_clignote = True
if self.voiture.getBoutonPoussoir() == 0:
self.led_clignote = False
finSequence = True
if finSequence:
# Si le champ nextLabel est defini, alors il faut chercher le prochain element par son label
if 'nextLabel' in self.programmeCourant:
nextLabel = self.programmeCourant['nextLabel']
for i in range(len(self.programme)):
if 'label' in self.programme[i]:
if self.programme[i]['label'] == nextLabel:
# On a trouve la prochaine sequence
self.sequence = i
else:
# Si le champ nextLabel n'est pas defini, on passe simplement a l'element suivant
self.sequence += 1
self.debut = True
| 39.535456 | 169 | 0.538856 | 27,138 | 0.992902 | 0 | 0 | 0 | 0 | 0 | 0 | 11,418 | 0.417752 |
f37a1381f4d4b5acbdd70be0b55f313823fcb65a | 15,325 | py | Python | ddtrace/internal/wrapping.py | ysk24ok/dd-trace-py | 9d76e3b27c0e90a45721988f2008362683da8bb0 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/internal/wrapping.py | ysk24ok/dd-trace-py | 9d76e3b27c0e90a45721988f2008362683da8bb0 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2022-02-16T09:35:37.000Z | 2022-03-04T16:48:45.000Z | ddtrace/internal/wrapping.py | goodspark/dd-trace-py | e2089c7b348e9d1a70e01f96927d85a643d6ae56 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-02-11T16:34:22.000Z | 2022-02-11T16:34:22.000Z | import sys
from types import FunctionType
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import cast
from six import PY3
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore[misc]
from bytecode import Bytecode
from bytecode import Compare
from bytecode import CompilerFlags
from bytecode import Instr
from bytecode import Label
from .compat import PYTHON_VERSION_INFO as PY
class WrapperFunction(Protocol):
"""A wrapped function."""
__dd_wrapped__ = None # type: Optional[FunctionType]
__dd_wrappers__ = None # type: Optional[Dict[Any, Any]]
def __call__(self, *args, **kwargs):
pass
Wrapper = Callable[[FunctionType, Tuple[Any], Dict[str, Any]], Any]
def wrap_bytecode(wrapper, wrapped):
# type: (Wrapper, FunctionType) -> Bytecode
"""Wrap a function with a wrapper function.
The wrapper function expects the wrapped function as the first argument,
followed by the tuple of arguments and the dictionary of keyword arguments.
The nature of the wrapped function is also honored, meaning that a generator
function will return a generator function, and a coroutine function will
return a coroutine function, and so on. The signature is also preserved to
avoid breaking, e.g., usages of the ``inspect`` module.
"""
def compare_exc(label, lineno):
"""Compat helper for comparing exceptions."""
return (
Instr("COMPARE_OP", Compare.EXC_MATCH, lineno=lineno)
if PY < (3, 9)
else Instr("JUMP_IF_NOT_EXC_MATCH", label, lineno=lineno)
)
def jump_if_false(label, lineno):
"""Compat helper for jumping if false after comparing exceptions."""
return Instr("POP_JUMP_IF_FALSE", label, lineno=lineno) if PY < (3, 9) else Instr("NOP", lineno=lineno)
def end_finally(lineno):
"""Compat helper for ending finally blocks."""
if PY < (3, 9):
return Instr("END_FINALLY", lineno=lineno)
elif PY < (3, 10):
return Instr("RERAISE", lineno=lineno)
return Instr("RERAISE", 0, lineno=lineno)
code = wrapped.__code__
lineno = code.co_firstlineno
varargs = bool(code.co_flags & CompilerFlags.VARARGS)
varkwargs = bool(code.co_flags & CompilerFlags.VARKEYWORDS)
nargs = code.co_argcount
argnames = code.co_varnames[:nargs]
try:
kwonlyargs = code.co_kwonlyargcount
except AttributeError:
kwonlyargs = 0
kwonlyargnames = code.co_varnames[nargs : nargs + kwonlyargs]
varargsname = code.co_varnames[nargs + kwonlyargs] if varargs else None
varkwargsname = code.co_varnames[nargs + kwonlyargs + varargs] if varkwargs else None
# Push the wrapper function that is to be called and the wrapped function to
# be passed as first argument.
instrs = [
Instr("LOAD_CONST", wrapper, lineno=lineno),
Instr("LOAD_CONST", wrapped, lineno=lineno),
]
# Build the tuple of all the positional arguments
if nargs:
instrs.extend([Instr("LOAD_FAST", argname, lineno=lineno) for argname in argnames])
instrs.append(Instr("BUILD_TUPLE", nargs, lineno=lineno))
if varargs:
instrs.extend(
[
Instr("LOAD_FAST", varargsname, lineno=lineno),
Instr("INPLACE_ADD", lineno=lineno),
]
)
elif varargs:
instrs.append(Instr("LOAD_FAST", varargsname, lineno=lineno))
else:
instrs.append(Instr("BUILD_TUPLE", 0, lineno=lineno))
# Prepare the keyword arguments
if kwonlyargs:
for arg in kwonlyargnames:
instrs.extend(
[
Instr("LOAD_CONST", arg, lineno=lineno),
Instr("LOAD_FAST", arg, lineno=lineno),
]
)
instrs.append(Instr("BUILD_MAP", kwonlyargs, lineno=lineno))
if varkwargs:
instrs.extend(
[
Instr("DUP_TOP", lineno=lineno),
Instr("LOAD_ATTR", "update", lineno=lineno),
Instr("LOAD_FAST", varkwargsname, lineno=lineno),
Instr("CALL_FUNCTION", 1, lineno=lineno),
Instr("POP_TOP", lineno=lineno),
]
)
elif varkwargs:
instrs.append(Instr("LOAD_FAST", varkwargsname, lineno=lineno))
else:
instrs.append(Instr("BUILD_MAP", 0, lineno=lineno))
# Call the wrapper function with the wrapped function, the positional and
# keyword arguments, and return the result.
instrs.extend(
[
Instr("CALL_FUNCTION", 3, lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
]
)
# If the function has special flags set, like the generator, async generator
# or coroutine, inject unraveling code before the return opcode.
if CompilerFlags.GENERATOR & code.co_flags and not (CompilerFlags.COROUTINE & code.co_flags):
stopiter = Label()
loop = Label()
genexit = Label()
exc = Label()
propagate = Label()
# DEV: This is roughly equivalent to
#
# __ddgen = wrapper(wrapped, args, kwargs)
# __ddgensend = __ddgen.send
# try:
# value = next(__ddgen)
# except StopIteration:
# return
# while True:
# try:
# tosend = yield value
# except GeneratorExit:
# return __ddgen.close()
# except:
# return __ddgen.throw(*sys.exc_info())
# try:
# value = __ddgensend(tosend)
# except StopIteration:
# return
#
instrs[-1:-1] = [
Instr("DUP_TOP", lineno=lineno),
Instr("STORE_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "send", lineno=lineno),
Instr("STORE_FAST", "__ddgensend", lineno=lineno),
Instr("LOAD_CONST", next, lineno=lineno),
Instr("LOAD_FAST", "__ddgen", lineno=lineno),
loop,
Instr("SETUP_EXCEPT" if PY < (3, 8) else "SETUP_FINALLY", stopiter, lineno=lineno),
Instr("CALL_FUNCTION", 1, lineno=lineno),
Instr("POP_BLOCK", lineno=lineno),
Instr("SETUP_EXCEPT" if PY < (3, 8) else "SETUP_FINALLY", genexit, lineno=lineno),
Instr("YIELD_VALUE", lineno=lineno),
Instr("POP_BLOCK", lineno=lineno),
Instr("LOAD_FAST", "__ddgensend", lineno=lineno),
Instr("ROT_TWO", lineno=lineno),
Instr("JUMP_ABSOLUTE", loop, lineno=lineno),
stopiter, # except StpIteration:
Instr("DUP_TOP", lineno=lineno),
Instr("LOAD_CONST", StopIteration, lineno=lineno),
compare_exc(propagate, lineno),
jump_if_false(propagate, lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
propagate,
end_finally(lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
genexit, # except GeneratorExit:
Instr("DUP_TOP", lineno=lineno),
Instr("LOAD_CONST", GeneratorExit, lineno=lineno),
compare_exc(exc, lineno),
jump_if_false(exc, lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("LOAD_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "close", lineno=lineno),
Instr("CALL_FUNCTION", 0, lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
exc, # except:
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("LOAD_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "throw", lineno=lineno),
Instr("LOAD_CONST", sys.exc_info, lineno=lineno),
Instr("CALL_FUNCTION", 0, lineno=lineno),
Instr("CALL_FUNCTION_VAR" if PY < (3, 6) else "CALL_FUNCTION_EX", 0, lineno=lineno),
]
elif PY3:
if CompilerFlags.COROUTINE & code.co_flags:
# DEV: This is just
# >>> return await wrapper(wrapped, args, kwargs)
instrs[-1:-1] = [
Instr("GET_AWAITABLE", lineno=lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("YIELD_FROM", lineno=lineno),
]
elif CompilerFlags.ASYNC_GENERATOR & code.co_flags:
stopiter = Label()
loop = Label()
genexit = Label()
exc = Label()
propagate = Label()
# DEV: This is roughly equivalent to
#
# __ddgen = wrapper(wrapped, args, kwargs)
# __ddgensend = __ddgen.asend
# try:
# value = await _ddgen.__anext__()
# except StopAsyncIteration:
# return
# while True:
# try:
# tosend = yield value
# except GeneratorExit:
# __ddgen.close()
# except:
# __ddgen.throw(*sys.exc_info())
# try:
# value = await __ddgensend(tosend)
# except StopAsyncIteration:
# return
#
instrs[-1:-1] = [
Instr("DUP_TOP", lineno=lineno),
Instr("STORE_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "asend", lineno=lineno),
Instr("STORE_FAST", "__ddgensend", lineno=lineno),
Instr("LOAD_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "__anext__", lineno=lineno),
Instr("CALL_FUNCTION", 0, lineno=lineno),
loop,
Instr("GET_AWAITABLE", lineno=lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("SETUP_EXCEPT" if PY < (3, 8) else "SETUP_FINALLY", stopiter, lineno=lineno),
Instr("YIELD_FROM", lineno=lineno),
Instr("POP_BLOCK", lineno=lineno),
Instr("SETUP_EXCEPT" if PY < (3, 8) else "SETUP_FINALLY", genexit, lineno=lineno),
Instr("YIELD_VALUE", lineno=lineno),
Instr("POP_BLOCK", lineno=lineno),
Instr("LOAD_FAST", "__ddgensend", lineno=lineno),
Instr("ROT_TWO", lineno=lineno),
Instr("CALL_FUNCTION", 1, lineno=lineno),
Instr("JUMP_ABSOLUTE", loop, lineno=lineno),
stopiter, # except StopAsyncIteration:
Instr("DUP_TOP", lineno=lineno),
Instr("LOAD_CONST", StopAsyncIteration, lineno=lineno),
compare_exc(propagate, lineno),
jump_if_false(propagate, lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
propagate, # finally:
end_finally(lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
genexit, # except GeneratorExit:
Instr("DUP_TOP", lineno=lineno),
Instr("LOAD_CONST", GeneratorExit, lineno=lineno),
compare_exc(exc, lineno),
jump_if_false(exc, lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("LOAD_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "aclose", lineno=lineno),
Instr("CALL_FUNCTION", 0, lineno=lineno),
Instr("GET_AWAITABLE", lineno=lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("YIELD_FROM", lineno=lineno),
Instr("POP_EXCEPT", lineno=lineno),
Instr("RETURN_VALUE", lineno=lineno),
exc, # except:
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("POP_TOP", lineno=lineno),
Instr("LOAD_FAST", "__ddgen", lineno=lineno),
Instr("LOAD_ATTR", "athrow", lineno=lineno),
Instr("LOAD_CONST", sys.exc_info, lineno=lineno),
Instr("CALL_FUNCTION", 0, lineno=lineno),
Instr("CALL_FUNCTION_EX", 0, lineno=lineno),
Instr("GET_AWAITABLE", lineno=lineno),
Instr("LOAD_CONST", None, lineno=lineno),
Instr("YIELD_FROM", lineno=lineno),
Instr("POP_EXCEPT", lineno=lineno),
]
return Bytecode(instrs)
def wrap(f, wrapper):
# type: (FunctionType, Wrapper) -> WrapperFunction
"""Wrap a function with a wrapper.
The wrapper expects the function as first argument, followed by the tuple
of positional arguments and the dict of keyword arguments.
Note that this changes the behavior of the original function with the
wrapper function, instead of creating a new function object.
"""
wrapped = FunctionType(
f.__code__,
f.__globals__,
"<wrapped>",
f.__defaults__,
f.__closure__,
)
if PY3:
wrapped.__kwdefaults__ = f.__kwdefaults__
code = wrap_bytecode(wrapper, wrapped)
code.freevars = f.__code__.co_freevars
code.name = f.__code__.co_name
code.filename = f.__code__.co_filename
code.flags = f.__code__.co_flags
code.argcount = f.__code__.co_argcount
try:
code.posonlyargcount = f.__code__.co_posonlyargcount
except AttributeError:
pass
nargs = code.argcount
try:
code.kwonlyargcount = f.__code__.co_kwonlyargcount
nargs += code.kwonlyargcount
except AttributeError:
pass
nargs += bool(code.flags & CompilerFlags.VARARGS) + bool(code.flags & CompilerFlags.VARKEYWORDS)
code.argnames = f.__code__.co_varnames[:nargs]
f.__code__ = code.to_code()
wf = cast(WrapperFunction, f)
wf.__dd_wrapped__ = wrapped
return wf
def unwrap(wrapper):
# type: (WrapperFunction) -> FunctionType
"""Unwrap a wrapped function.
This is the reverse of :func:`wrap`.
"""
try:
wrapped = cast(FunctionType, wrapper.__dd_wrapped__)
assert wrapped, "Wrapper has wrapped function"
if wrapped.__name__ == "<wrapped>":
f = cast(FunctionType, wrapper)
f.__code__ = wrapped.__code__
del wrapper.__dd_wrapped__
except (IndexError, AttributeError):
return cast(FunctionType, wrapper)
return f
| 38.797468 | 111 | 0.578336 | 237 | 0.015465 | 0 | 0 | 0 | 0 | 0 | 0 | 4,616 | 0.301207 |
f37b98a0e9ce5d992118a7edfb6f656544e568ae | 1,354 | py | Python | setup.py | davidfraser/WSGIUtils | 5520779cae5a1032b24214c871860ca8cb4a30ed | [
"BSD-3-Clause"
] | null | null | null | setup.py | davidfraser/WSGIUtils | 5520779cae5a1032b24214c871860ca8cb4a30ed | [
"BSD-3-Clause"
] | 1 | 2018-08-14T08:48:03.000Z | 2018-08-14T08:48:03.000Z | setup.py | davidfraser/WSGIUtils | 5520779cae5a1032b24214c871860ca8cb4a30ed | [
"BSD-3-Clause"
] | 2 | 2019-02-15T19:34:03.000Z | 2019-06-29T18:24:13.000Z | #!/usr/bin/env python
import sys, os
sys.path.insert(0, os.path.join(os.getcwd(),'lib'))
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import wsgiutils
try:
os.remove ('MANIFEST')
except:
pass
with open(os.path.join(os.getcwd(), 'README.txt'), 'r') as _readme:
long_description = _readme.read()
setup(name="WSGIUtils",
version= wsgiutils.__version__,
description="WSGI Utils are a collection of useful libraries for use in a WSGI environnment.",
long_description=long_description,
author="Colin Stewart",
author_email="colin@owlfish.com",
license="BSD-3-Clause",
url="https://www.owlfish.com/software/wsgiutils/index.html",
project_urls={
"Source Code": "https://github.com/davidfraser/WSGIUtils/",
"Documentation": "https://www.owlfish.com/software/wsgiutils/documentation/index.html",
},
packages=[
'wsgiutils',
],
package_dir = {'': 'lib'},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Internet :: WWW/HTTP :: WSGI',
],
)
| 27.08 | 95 | 0.702363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 781 | 0.576809 |
f37bd3c163dbc0eb3b950a6eec2cd48b6d1e5e78 | 410 | py | Python | doc/doc_updates.py | briandorsey/partisci | cb3c838e95edaf55a182c78007d993e3a995f599 | [
"BSD-2-Clause"
] | 5 | 2015-01-13T21:01:13.000Z | 2022-02-11T17:26:24.000Z | doc/doc_updates.py | briandorsey/partisci | cb3c838e95edaf55a182c78007d993e3a995f599 | [
"BSD-2-Clause"
] | null | null | null | doc/doc_updates.py | briandorsey/partisci | cb3c838e95edaf55a182c78007d993e3a995f599 | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../clients/python"))
import pypartisci
server, port = "localhost", 7777
apps = ["Demo App A",
"Demo App B"]
hosts = ["host1.example.com",
"host2.example.com"]
versions = ["1.0", "2.0"]
for app in apps:
for i, host in enumerate(hosts):
pypartisci.send_http(server, port, app, versions[i], host)
| 20.5 | 80 | 0.639024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.24878 |
f37e6ce0df9b7a208b203c4bc70c01ede05e68fc | 21,140 | py | Python | axisutilities/axisremapper.py | coderepocenter/AxisUtilities | 7cf93a88ba4bb073466c2a54a97beccd9e78d2c3 | [
"Apache-2.0"
] | null | null | null | axisutilities/axisremapper.py | coderepocenter/AxisUtilities | 7cf93a88ba4bb073466c2a54a97beccd9e78d2c3 | [
"Apache-2.0"
] | 11 | 2019-11-15T19:18:48.000Z | 2019-12-17T18:56:24.000Z | axisutilities/axisremapper.py | coderepocenter/AxisUtilities | 7cf93a88ba4bb073466c2a54a97beccd9e78d2c3 | [
"Apache-2.0"
] | 1 | 2019-11-15T19:14:40.000Z | 2019-11-15T19:14:40.000Z | from __future__ import annotations
from typing import Iterable, Callable
import numpy as np
import dask.array as da
from numba import prange
from scipy.sparse import csr_matrix
from axisutilities import Axis
class AxisRemapper:
"""
`AxisRemapper` facilitates conversion between two one-dimensional axis. Originally the idea started for performing
various conversion between time axis. For example, let's say you have a hourly data and you want to average it to
daily data. Or you have a daily data and you want to average it to weekly, monthly, or yearly data. Or may be you
want to calculate daily minimum and maximum from an hourly data. However, since the same concept could be applied
to any one-dimensional axis, the usage was generalized and the name was chaned to `AxisRemapper`.
`AxisRemapper` caches bulk of the computations. Hence, once you create an object of the `AxisRemapper` you could
reuse it; hence, avoid re-doing certain computations, as long as the source/origin axis and the destination axis
remain the same.
`AxisRemapper` applies the calculation on multi-dimensional data as well. By default, it assumes that the axis is
the first dimension. If it is not the case, you could define the axis that that the conversion needs to happen.
Currently it supports calculating `average`, `minimum`, `maximum`, or any user defined function (any Python
Callable object).
Examples:
* Creating an `AxisRemapper` and calculating average:
>>> from axisutilities import AxisRemapper
>>> from axisutilities import DailyTimeAxisBuilder
>>> from axisutilities import WeeklyTimeAxisBuilder
>>> from datetime import date
>>> daily_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=14
... ).build()
>>> weekly_axis = WeeklyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=2
... ).build()
Now we are ready to create an `AxisRemapper` object:
>>> ac = AxisRemapper(from_axis=daily_axis, to_axis=weekly_axis)
Let's create some random data:
>>> # Creating some random data
... import numpy as np
>>> daily_data = np.random.random((14,1))
Now to convert from daily axis to weekly axis all we need to do is:
>>> weekly_avg = ac.average(daily_data)
>>> weekly_avg
array([[0.71498815],
[0.60443017]])
Let's create another random data; but this time make it multi-dimensional. Note that the first dimension
is the source axis.
>>> # creating a multidimensional data
... daily_data = np.random.random((14, 3, 4, 5))
Now we could convert this new data using the same `AxisRemapper` object that we created. No need to create
a new one. We could reuse it as long as the source and destination axis have not changed.
>>> weekly_avg = ac.average(daily_data)
>>> weekly_avg.shape
(2, 3, 4, 5)
Lets create another multi-dimensional data where the first dimension is not the source axis:
>>> # creating a multi-dimensional data with the axis being the last dimension
... daily_data = np.random.random((3, 4, 5, 14))
You could still use the same `AxisRemapper`; All you need to do is to tell what dimension is the source axis:
>>> weekly_avg = ac.average(daily_data,dimension=3)
>>> weekly_avg.shape
(3, 4, 5, 2)
Similarly you could also calculate the weekly min and max:
>>> # Calculating min and max:
... weekly_min = ac.min(data)
>>> weekly_min
array([[0.19497718],
[0.014242 ]])
>>> weekly_max = ac.max(data)
>>> weekly_max
array([[0.99156943],
[0.64039361]])
* Applying a user-defined function:
>>> from axisutilities import AxisRemapper
>>> from axisutilities import DailyTimeAxisBuilder
>>> from axisutilities import WeeklyTimeAxisBuilder
>>> from datetime import date
>>> import numpy as np
>>>
>>> daily_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=14
... ).build()
>>>
>>> weekly_axis = WeeklyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=2
... ).build()
>>>
>>> ac = AxisRemapper(from_axis=daily_axis, to_axis=weekly_axis)
>>>
>>> def user_defined_function(data):
... return np.nansum(data, axis=0) * 42
...
>>> daily_data = np.random.random((3, 4, 5, 14))
>>>
>>> weekly_user_defined = ac.apply_function(daily_data, user_defined_function, dimension=3)
* Creating Axis-Converter covering different periods: Although from- and to-axis could have different
granularity, eg. one could be daily, another weekly; however, they both must cover the same period in total.
For example, they both must start at January 1st, and end on May 6th. If you want to turn this check off,
pass an extra arguments, called `assure_no_bound_mismatch` and set it to false.
>>> from_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=14
... ).build()
>>> to_axis = WeeklyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=3
... ).build()
>>> # This will generate exception and it would fail:
... # tc = AxisRemapper(from_axis=from_axis, to_axis=to_axis)
... # instead use the following:
... tc = AxisRemapper(
... from_axis=from_axis,
... to_axis=to_axis,
... assure_no_bound_mismatch=False
... )
"""
@staticmethod
def _assure_no_bound_missmatch(fromAxis: Axis, toAxis: Axis) -> bool:
return (fromAxis.lower_bound[0, 0] == toAxis.lower_bound[0, 0]) and \
(fromAxis.upper_bound[0, -1] == toAxis.upper_bound[0, -1])
def __init__(self, **kwargs) -> None:
if ("from_axis" in kwargs) and ("to_axis" in kwargs):
from_ta = kwargs["from_axis"]
to_ta = kwargs["to_axis"]
if not (isinstance(from_ta, Axis) and isinstance(to_ta, Axis)):
raise TypeError("provided from/to_axis must be of type TimeAxis.")
self._m = to_ta.nelem
self._n = from_ta.nelem
self._weight_matrix = self._get_coverage_csr_matrix(from_ta, to_ta)
self._from_ta = from_ta
self._to_ta = to_ta
else:
raise ValueError("Not enough information is provided to construct the TimeAxisRemapper.")
if bool(kwargs.get("assure_no_bound_mismatch", True)) and \
(not AxisRemapper._assure_no_bound_missmatch(self._from_ta, self._to_ta)):
raise ValueError("from- and to-axis cover a different period. Although from- and to-axis could have "
"different granularity, eg. one could be daily, another weekly; however, they both must "
"cover the same period in total. For example, they both must start at January 1st, and end"
" on May 6th. If you want to turn this check off, pass an extra arguments, called "
"`assure_no_bound_mismatch` and set it to false")
@property
def from_nelem(self):
return self._n
@from_nelem.setter
def from_nelem(self, v):
pass
@property
def to_nelem(self):
return self._m
@to_nelem.setter
def to_nelem(self, v):
pass
@property
def weights(self) -> csr_matrix:
return self._weight_matrix.copy()
@weights.setter
def weights(self, v):
pass
@property
def from_axis(self):
return self._from_ta
@from_axis.setter
def from_axis(self, v):
pass
@property
def to_axis(self):
return self._to_ta
@to_axis.setter
def to_axis(self, v):
pass
@staticmethod
def _prep_input_data(in_data: Iterable, time_dimension, n) -> (np.ndarray, tuple):
if not isinstance(in_data, Iterable):
raise TypeError("input data should be an Iterable that can be casted to numpy.ndarray.")
in_data_copy = in_data
if not isinstance(in_data_copy, np.ndarray):
in_data_copy = np.asarray(in_data_copy, dtype="float64")
if in_data_copy.ndim == 1:
in_data_copy = in_data_copy.reshape((-1, 1))
if in_data_copy.shape[time_dimension] != n:
raise ValueError("The time dimension does not matches to that of the provided time converter.")
if time_dimension != 0:
in_data_copy = np.moveaxis(in_data_copy, time_dimension, 0)
trailing_shape = in_data_copy.shape[1:]
in_data_copy = in_data_copy.reshape((n, -1))
return in_data_copy, trailing_shape
@staticmethod
def _prep_output_data( out_data: np.ndarray, time_dimension, trailing_shape: tuple):
return np.moveaxis(out_data.reshape((out_data.shape[0], *trailing_shape)), 0, time_dimension)
def average(self, from_data: Iterable, dimension=0):
if isinstance(from_data, Iterable):
return self._average(from_data, self._weight_matrix, dimension)
elif isinstance(from_data, da.Array):
shape = from_data.shape
chunksize = from_data.chunksize
if shape[dimension] != chunksize[dimension]:
new_chunksize = list(chunksize)
new_chunksize[dimension] = shape[dimension]
from_data = from_data.rechunk(tuple(new_chunksize))
return from_data.map_blocks(self._average, weights=self._weight_matrix, dimension=dimension, dtype=from_data.dtype)
else:
raise NotImplementedError()
@staticmethod
def _average(from_data: Iterable, weights: csr_matrix, dimension=0) -> np.ndarray:
from_data_copy, trailing_shape = AxisRemapper._prep_input_data(from_data, dimension, weights.shape[1])
nan_mask = np.isnan(from_data_copy)
non_nan_mask = np.ones(from_data_copy.shape, dtype=np.int8)
non_nan_mask[nan_mask] = 0
from_data_copy[nan_mask] = 0
inverse_sum_effective_weights = np.reciprocal(weights * non_nan_mask)
output = AxisRemapper._prep_output_data(
np.multiply(weights * from_data_copy, inverse_sum_effective_weights),
dimension,
trailing_shape
)
return output
def apply_function(self, from_data: Iterable, func2apply: Callable, dimension=0):
if isinstance(from_data, Iterable):
return self._apply_function(from_data, func2apply, self.to_nelem, self._weight_matrix, dimension)
elif isinstance(from_data, da.Array):
shape = from_data.shape
chunksize = from_data.chunksize
if shape[dimension] != chunksize[dimension]:
new_chunksize = list(chunksize)
new_chunksize[dimension] = shape[dimension]
from_data = from_data.rechunk(tuple(new_chunksize))
return from_data.map_blocks(self._apply_function, func2apply=func2apply, to_nelem=self.to_nelem, weights=self._weight_matrix, dimension=dimension, dtype=from_data.dtype)
else:
raise NotImplementedError()
@staticmethod
def _apply_function(data: Iterable, func2apply: Callable, to_nelem: int, weights: csr_matrix, dimension=0):
"""
Applies a user-defined/provided function for the conversion.
:param data: The data on the source-axis that needs to be converted to the destination axis using the
user-provided function.
:param func2apply: The user provided function. This function should assume that it will receives a `m` by `s`
matrix and it should return `1` by `s` output data. It should also handle the `NaN` or
missing values properly.
:param dimension: The dimension where the source axis is. By default, it is assumed that the first dimension
is the source axis.
:return: a data with the same number of dimension of the input, where each element is the result of the user
defined function. All the dimensions are the same as the input data except the source axis. The source
axis is turned into the destination axis; which means, it's location in the dimension is the same, but
it's size has changed to reflect that of the destination axis. For example, if you have 4 dimensional
input, and the source axis is the second dimension, the output would be still 4 dimensional and the
destination axis would be still the second dimension. But the second dimension between the input and
output might have different numbers depending on the axis.
Examples:
* Let's say we have a daily data, and we want to calculate coefficient of variation (CV) for each month.
This is the proper way of defining the function:
>>> from axisutilities import DailyTimeAxisBuilder
>>> from axisutilities import MonthlyTimeAxisBuilder
>>> from axisutilities import AxisRemapper
>>> from datetime import date
>>>
>>> # creating a daily axis with a span of one year
... daily_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... end_date=date(2020, 1, 1)
... ).build()
>>>
>>> # creating a monthly axis with a span of one year
... monthly_axis = MonthlyTimeAxisBuilder(
... start_year=2019,
... end_year=2019
... ).build()
>>>
>>> # constructing the AxisRemapper object that conversts
... # from the daily axis to the monthly axis.
... ac = AxisRemapper(from_axis=daily_axis, to_axis=monthly_axis)
>>>
>>> # creating some random data points
... from numpy.random import random
>>> data = random((daily_axis.nelem, 90, 360))
>>> print("data.shape: ", data.shape)
data.shape: (365, 90, 360)
>>>
>>> # now creating a function that calculates Coefficient of Variation (CV)
... import numpy as np
>>> def cv(data):
... return np.nanstd(data, axis=0) / np.nanmean(data, axis=0)
...
>>> # now calculating the monthly CV
... monthly_cv = ac.apply_function(data, cv)
>>> print("monthly_cv.shape: ", monthly_cv.shape)
monthly_cv.shape: (12, 90, 360)
Note how cv function was provided.
* Repeating the previous examples using lambda function: You do not need to have a named function to pass.
You could create anonymous function using Lambda expressions:
>>> monthly_cv_using_lambda = ac.apply_function(
... data,
... lambda e: np.nanstd(e, axis=0) / np.nanmean(e, axis=0)
... )
>>> print("monthly_cv_using_lambda.shape: ", monthly_cv_using_lambda.shape)
monthly_cv_using_lambda.shape: (12, 90, 360)
>>> np.min(monthly_cv_using_lambda - monthly_cv)
0.0
>>> np.max(monthly_cv_using_lambda - monthly_cv)
0.0
"""
data_copy, trailing_shape = AxisRemapper._prep_input_data(data, dimension, weights.shape[1])
if isinstance(func2apply, Callable):
import warnings
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
output = _apply_function_core(
to_nelem,
weights,
data_copy,
func2apply
)
else:
raise TypeError("func2apply must be a callable object that performs the calculation on axis=0.")
return AxisRemapper._prep_output_data(
output,
dimension,
trailing_shape
)
def min(self, data, dimension=0):
return self.apply_function(
data,
lambda e: np.nanmin(e, axis=0),
dimension
)
def max(self, data, dimension=0):
return self.apply_function(
data,
lambda e: np.nanmax(e, axis=0),
dimension
)
@staticmethod
def _get_coverage_csr_matrix(from_ta: Axis, to_ta: Axis) -> csr_matrix:
row_idx, col_idx, weights = AxisRemapper._get_coverage(
from_ta.lower_bound, from_ta.upper_bound,
to_ta.lower_bound, to_ta.upper_bound
)
m = to_ta.nelem
n = from_ta.nelem
weights = csr_matrix((weights, (row_idx, col_idx)), shape=(m, n)).tolil()
# with np.errstate(divide='ignore'):
# row_sum_reciprocal = np.reciprocal(np.asarray(weights.sum(axis=1)).flatten())
# mask = np.isinf(row_sum_reciprocal)
# row_sum_reciprocal[mask] = 0.0
# inverse_row_sum = spdiags(row_sum_reciprocal, 0, m, m)
#
# normalized_weights = (inverse_row_sum * weights).tolil()
# normalized_weights[mask, 0] = np.nan
#
# return normalized_weights.tocsr()
mask = np.asarray(weights.sum(axis=1)).flatten() == 0
weights[mask, 0] = np.nan
return weights.tocsr()
@staticmethod
def _get_coverage(
from_lower_bound: np.ndarray,
from_upper_bound: np.ndarray,
to_lower_bound: np.ndarray,
to_upper_bound: np.ndarray):
m = to_lower_bound.size
n = from_lower_bound.size
# basic sanity checks:
if (to_lower_bound.ndim != 2) or (to_lower_bound.shape[0] != 1):
raise ValueError(f"to_lower_bound must be of shape (1,m), it's current shape is: {to_lower_bound.shape}.")
if to_lower_bound.shape != to_upper_bound.shape:
raise ValueError("to_lower_bound/upper_bound must have the same shape.")
if (from_lower_bound.ndim != 2) or (from_lower_bound.shape[0] != 1):
raise ValueError("from_lower_bound must be of shape (1,n).")
if from_lower_bound.shape != from_upper_bound.shape:
raise ValueError("from_lower_bound/upper_bound must have the same shape.")
# if np.any(from_lower_bound[0, :-1] > from_lower_bound[0, 1:]):
# raise ValueError("from_lower_bound must be monotonically increasing.")
# TODO: turn this into cython so that is faster and/or use some sort of data structure to
# reduce its time-complexity from O(mn)
# TODO: Move this to Interval; From OOP stand point it makes more sense to have some of these functionalities
# as part of that class/object.
row_idx = []
col_idx = []
weights = []
for r in range(m):
toLB = to_lower_bound[0, r]
toUB = to_upper_bound[0, r]
for c in range(n):
fromLB = from_lower_bound[0, c]
fromUB = from_upper_bound[0, c]
fromLength = fromUB - fromLB
if (fromUB <= toLB) or (fromLB >= toUB): # No coverage
continue
elif (fromLB <= toLB) and (fromUB >= toLB) and (fromUB <= toUB):
row_idx.append(r)
col_idx.append(c)
weights.append((fromUB - toLB) / fromLength)
elif (fromLB >= toLB) and (fromLB < toUB) and (fromUB >= toUB):
row_idx.append(r)
col_idx.append(c)
weights.append((toUB - fromLB) / fromLength)
elif (fromLB >= toLB) and (fromUB <= toUB):
row_idx.append(r)
col_idx.append(c)
weights.append(1.0)
elif (fromLB <= toLB) and (fromUB >= toUB):
row_idx.append(r)
col_idx.append(c)
weights.append((toUB - toLB) / fromLength)
return row_idx, col_idx, weights
# @jit(parallel=True, forceobj=True, cache=True)
# @autojit
def _apply_function_core(n: int, _weight_matrix: csr_matrix, data_copy: np.ndarray, func: Callable) -> np.ndarray:
output = np.full((n, data_copy.shape[1]), np.nan)
for r in prange(n):
start = _weight_matrix.indptr[r]
end = _weight_matrix.indptr[r + 1]
if not (np.isnan(_weight_matrix[r, 0]) and ((end - start) == 1)):
row_mask = _weight_matrix.indices[start:end]
output[r, :] = func(data_copy[row_mask, :])
return output
| 39.514019 | 181 | 0.607332 | 20,360 | 0.963103 | 0 | 0 | 11,234 | 0.53141 | 0 | 0 | 11,766 | 0.556575 |
f37eb0ba03d92d73ef194fc0d664d1c011be9344 | 4,498 | py | Python | examples/__old/freeform_vault_tutorial.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | 34 | 2020-04-27T13:54:38.000Z | 2022-01-17T19:16:27.000Z | examples/__old/freeform_vault_tutorial.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | 306 | 2020-04-27T12:00:54.000Z | 2022-03-23T22:28:54.000Z | examples/__old/freeform_vault_tutorial.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | 11 | 2020-06-30T08:23:40.000Z | 2022-02-01T20:47:39.000Z | from compas_rv2.skeleton import Skeleton
from compas_rv2.diagrams import FormDiagram # noqa F401
from compas_rv2.diagrams import ForceDiagram
from compas_rv2.diagrams import ThrustDiagram # noqa F401
from compas_rv2.rhino import RhinoSkeleton
from compas_rv2.rhino import RhinoFormDiagram
from compas_rv2.rhino import RhinoForceDiagram
from compas_rv2.rhino import RhinoThrustDiagram
from compas.geometry import subtract_vectors
from compas.geometry import scale_vector
from compas.geometry import Translation
from compas_cloud import Proxy
import compas_rhino
import rhinoscriptsyntax as rs
import time
# --------------------------------------------------------------------------
# settings for visulisation in Rhino
# --------------------------------------------------------------------------
settings = {
"layers.skeleton": "RV2::Skeleton",
"layers.form": "RV2::FormDiagram",
"layers.force": "RV2::ForceDiagram",
"layers.thrust": "RV2::ThrustNetwork",
"color.form.vertices": (0, 255, 0),
"color.form.vertices:is_fixed": (0, 255, 255),
"color.form.vertices:is_external": (0, 0, 255),
"color.form.vertices:is_anchor": (255, 255, 255),
"color.form.edges": (0, 255, 0),
"color.form.edges:is_external": (0, 0, 255),
"color.thrust.vertices": (255, 0, 255),
"color.thrust.vertices:is_fixed": (0, 255, 0),
"color.thrust.vertices:is_anchor": (255, 0, 0),
"color.thrust.edges": (255, 0, 255),
"color.thrust.faces": (255, 0, 255),
"color.force.vertices": (0, 255, 0),
"color.force.vertices:is_fixed": (0, 255, 255),
"color.force.edges": (0, 255, 0),
"color.force.edges:is_external": (0, 0, 255),
}
# --------------------------------------------------------------------------
# create a Rhinoskeleton
# --------------------------------------------------------------------------
guids = rs.GetObjects("select curves", filter=rs.filter.curve)
lines = compas_rhino.get_line_coordinates(guids)
rs.DeleteObjects(guids)
skeleton = Skeleton.from_skeleton_lines(lines)
rhinoskeleton = RhinoSkeleton(skeleton)
rhinoskeleton.draw_skeleton_branches()
rhinoskeleton.dynamic_draw_self()
# --------------------------------------------------------------------------
# modify skeleton
# --------------------------------------------------------------------------
rhinoskeleton.move_skeleton_vertex()
rhinoskeleton.draw_self()
rhinoskeleton.move_diagram_vertex()
rhinoskeleton.draw_self()
rhinoskeleton.diagram.subdivide()
rhinoskeleton.draw_self()
# --------------------------------------------------------------------------
# create form diagram, update form boundaries
# --------------------------------------------------------------------------
time.sleep(1)
form = rhinoskeleton.diagram.to_form()
form.update_boundaries(feet=2)
def move_diagram(diagram, distance=1.5):
bbox = diagram.bounding_box()
a = bbox[0]
b = bbox[1]
ab = subtract_vectors(b, a)
ab = scale_vector(ab, distance)
T = Translation(ab)
diagram.transform(T)
return diagram
form = move_diagram(form)
rhinoform = RhinoFormDiagram(form)
rhinoform.draw(settings)
# --------------------------------------------------------------------------
# create force diagram
# --------------------------------------------------------------------------
time.sleep(1)
force = ForceDiagram.from_formdiagram(form)
force = move_diagram(force)
rhinoforce = RhinoForceDiagram(force)
rhinoforce.draw(settings)
# --------------------------------------------------------------------------
# horizontal equilibrium
# --------------------------------------------------------------------------
time.sleep(1)
proxy = Proxy()
horizontal = proxy.package("compas_rv2.equilibrium.horizontal_nodal_proxy")
formdata, forcedata = horizontal(rhinoform.diagram.data, rhinoforce.diagram.data)
rhinoform.diagram.data = formdata
rhinoforce.diagram.data = forcedata
rhinoform.draw(settings)
rhinoforce.draw(settings)
# --------------------------------------------------------------------------
# vertical equilibrium, draw thrustnetwork
# --------------------------------------------------------------------------
time.sleep(1)
vertical = proxy.package("compas_tna.equilibrium.vertical_from_zmax_proxy")
rhinothrust = RhinoThrustDiagram(form)
zmax = 4
formdata, scale = vertical(rhinoform.diagram.data, zmax)
rhinoforce.diagram.attributes['scale'] = scale
rhinoform.diagram.data = formdata
rhinothrust.diagram.data = formdata
rhinothrust.draw(settings)
| 31.676056 | 81 | 0.5747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,939 | 0.43108 |
f37eef2860b7b43e3ce0d56aad7e0ba20434a32f | 5,923 | py | Python | tasks/time-series/time-series-forecasting/a65761f6-78d4-4fa7-988c-4ac6e7c07421/src/runner.py | sujithvemi/ai-platform | 2c11fecb644be95fe9b0da450da05be8cfd00fa1 | [
"MIT"
] | 1 | 2020-08-29T18:49:53.000Z | 2020-08-29T18:49:53.000Z | tasks/time-series/time-series-forecasting/a65761f6-78d4-4fa7-988c-4ac6e7c07421/src/runner.py | sujithvemi/ai-platform | 2c11fecb644be95fe9b0da450da05be8cfd00fa1 | [
"MIT"
] | 10 | 2020-12-26T09:34:17.000Z | 2022-03-21T22:30:38.000Z | tasks/time-series/time-series-forecasting/a65761f6-78d4-4fa7-988c-4ac6e7c07421/src/runner.py | sujithvemi/ai-platform | 2c11fecb644be95fe9b0da450da05be8cfd00fa1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import io
import requests
from datetime import timedelta
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import *
import matplotlib.pyplot as plt
import mlflow
import mlflow.sklearn
class ForecastRunner(object):
def __init__(self, url, output_file, predicted_date, min_child_weight, colsample_bytree, max_depth, n_estimators):
self.url = url
self.output_file = output_file
self.predicted_date = predicted_date
self.min_child_weight = [float(i) for i in eval(str(min_child_weight))]
self.colsample_bytree = [float(i) for i in eval(str(colsample_bytree))]
self.max_depth = [int(i) for i in eval(str(max_depth))]
self.n_estimators = [int(i) for i in eval(str(n_estimators))]
def get_input(self):
s = requests.get(self.url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=1)
# sum along 15-min intervals and convert into daily values
df['Value'] = df.drop(['Date', 'Values'], axis=1).sum(axis=1)
df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%Y')
input_data = df[['Date', 'Value']]
return input_data
def save_output(self, test, preds):
preds = preds.reset_index(drop=True)
df_test = test.reset_index()[['Date']]
prediction = df_test.join(preds)
prediction.to_csv(self.output_file)
@staticmethod
def evaluation_metrics(y_true, y_pred):
mape = np.mean(np.abs((np.array(y_true) - np.array(y_pred)) / np.array(y_true))) * 100
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
mae = mean_absolute_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
return mape, rmse, mae, r2
@staticmethod
def remove_outliers(data, fill=False, threshold=3.5):
"""
Median Absolute Deviation (MAD) based outlier detection
Removes outliers and if selected fills with polynomial interpolation
fill: Boolean
"""
median = np.median(data.values, axis=0)
diff = np.sum((data.values - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
# scale constant 0.6745
modified_z_score = 0.6745 * diff / med_abs_deviation
data[modified_z_score > threshold] = np.nan
if fill:
# fill by interpolation
data = data.interpolate(method='polynomial', order=2)
data = data.dropna()
return data
def prepare_data(self, df, fill=False):
df = df.set_index('Date')
# get time features
df['Year'] = df.index.year
df['Month'] = df.index.month
df['Week'] = df.index.week
df['DOW'] = df.index.weekday
# encode time features with the mean of the target variable
yearly_avg = dict(df.groupby('Year')['Value'].mean())
df['year_avg'] = df['Year'].apply(lambda x: yearly_avg[x])
monthly_avg = dict(df.groupby('Month')['Value'].mean())
df['month_avg'] = df['Month'].apply(lambda x: monthly_avg[x])
weekly_avg = dict(df.groupby('Week')['Value'].mean())
df['week_avg'] = df['Week'].apply(lambda x: weekly_avg[x])
dow_avg = dict(df.groupby('DOW')['Value'].mean())
df['dow_avg'] = df['DOW'].apply(lambda x: dow_avg[x])
df = df.drop(['Year', 'Month', 'Week', 'DOW'], axis=1)
start_date = pd.to_datetime(self.predicted_date).date()
end_date = start_date + timedelta(days=6)
train = df.loc[df.index.date < start_date]
# remove outliers from training set
train = ForecastRunner.remove_outliers(train, fill)
test = df.loc[(df.index.date >= start_date) & (df.index.date <= end_date)]
return train, test
def grid_search(self, xtr, ytr):
gbm = xgb.XGBRegressor()
reg_cv = GridSearchCV(gbm,
{"colsample_bytree": self.colsample_bytree, "min_child_weight": self.min_child_weight,
'max_depth': self.max_depth, 'n_estimators': self.n_estimators}, verbose=1)
reg_cv.fit(xtr, ytr)
return reg_cv
@staticmethod
def plot_result(y_true, y_pred):
plt.plot(y_true, label='Actual')
plt.plot(y_pred, label='Predicted')
plt.legend()
plt.savefig('plot.png')
def fit(self):
"""
Gets data and preprocess by prepare_data() function
Trains with the selected parameters from grid search and saves the model
"""
data = self.get_input()
df_train, df_test = self.prepare_data(data)
xtr, ytr = df_train.drop(['Value'], axis=1), df_train['Value'].values
xgbtrain = xgb.DMatrix(xtr, ytr)
reg_cv = self.grid_search(xtr, ytr)
param = reg_cv.best_params_
bst = xgb.train(dtrain=xgbtrain, params=param)
# save model to file
mlflow.sklearn.save_model(bst, "model")
return df_test
def predict(self, df_test):
"""
Makes prediction for the next 7 days electricity consumption.
"""
# load model from file
loaded_model = mlflow.sklearn.load_model("model")
# make predictions for test data
xts, yts = df_test.drop(['Value'], axis=1), df_test['Value'].values
p = loaded_model.predict(xgb.DMatrix(xts))
prediction = pd.DataFrame({'Prediction': p})
mape, rmse, mae, r2 = ForecastRunner.evaluation_metrics(yts, p)
print('MAPE: {}'.format(mape))
print('RMSE: {}'.format(rmse))
print('R2: {}'.format(r2))
print('MAE: {}'.format(mae))
mlflow.log_metric("MAPE", mape)
mlflow.log_metric("RMSE", rmse)
mlflow.log_metric("R2", r2)
mlflow.log_metric("MAE", mae)
ForecastRunner.plot_result(yts, p)
self.save_output(df_test, prediction) | 39.486667 | 118 | 0.618268 | 5,656 | 0.954921 | 0 | 0 | 1,276 | 0.215431 | 0 | 0 | 1,165 | 0.196691 |
f38046d7ad6e7e47819ddb5a3878c1a7e985261b | 24 | py | Python | configs.py | GavinLiu-AI/warden-bots | 749d2c16a724d5590b6616ae711c96c9c4b5a9cf | [
"MIT"
] | null | null | null | configs.py | GavinLiu-AI/warden-bots | 749d2c16a724d5590b6616ae711c96c9c4b5a9cf | [
"MIT"
] | null | null | null | configs.py | GavinLiu-AI/warden-bots | 749d2c16a724d5590b6616ae711c96c9c4b5a9cf | [
"MIT"
] | null | null | null | WAR_BOT_TOKEN = 'token'
| 12 | 23 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.291667 |
f3806a187a1195eae97ef27e1fd3f40966d4162c | 2,992 | py | Python | eigenface.py | lion-tohiro/MyEigenface | cc14e598dd83d0831c65c0521633f446ca4314a7 | [
"MIT"
] | null | null | null | eigenface.py | lion-tohiro/MyEigenface | cc14e598dd83d0831c65c0521633f446ca4314a7 | [
"MIT"
] | null | null | null | eigenface.py | lion-tohiro/MyEigenface | cc14e598dd83d0831c65c0521633f446ca4314a7 | [
"MIT"
] | null | null | null | from cv2 import cv2
import numpy as np
import sys
import os
from base import normalize
# some parameters of training and testing data
train_sub_count = 40
train_img_count = 5
total_face = 200
row = 70
col = 70
def eigenfaces_train(src_path):
img_list = np.empty((row*col, total_face))
count = 0
# read all the faces and flatten them
for i in range(1, train_sub_count+1):
for j in range(1, train_img_count+1):
img_path = src_path + "/s" + str(i) + "/" + str(j) + ".png"
img = cv2.imread(img_path, 0)
img_col = np.array(img).flatten()
img_list[:, count] = img_col[:]
count += 1
# compute the average of the faces
img_mean = np.sum(img_list, axis=1) / total_face
diff = np.empty((row*col, total_face))
# compute the difference matrix
for i in range(0, total_face):
diff[:, i] = img_list[:, i] - img_mean[:]
cov = np.mat(diff)*np.mat(diff.T) / total_face
eigen_values, eigen_vectors = np.linalg.eigh(cov)
# sort the eigenvalues and eigenvectors by desc
sort_index = np.argsort(-eigen_values)
eigen_values = eigen_values[sort_index]
eigen_vectors = eigen_vectors[:, sort_index]
# print(eigen_values)
'''
compute the coveriance matrix
here we don't use original algrithom to avoid computing an 10000+ * 10000+ coveriance matrix later
oringinal: cov = 1/m * A*A^T => it will be an 10000+ * 10000+ matrix
when the dimension of the image (here we mean row*col) > the total number of the training images (here we mean total_face)
(1)cov*v = A*A^T*v = e*v (e is eigenvalue of cov, v is eigenvector of cov) => original
(2)let cov'*u = A^T*A*u = e*u
thus, on both sides of the equation(2) left side multiplied by A, we can get the equation below
(3)A*A^T*A*u = A*e2*u = e2*A*u
compare (1) with (3), if u is eigenvector of cov' of eigenvalue e, we can find that A*u = v
(e is not zero, cov and cov' have the same not-zero eigenvalues, but have different number of zero eigenvalue, it can be proofed)
so we can compute A^T*A instead of A*A^T to simplify the computation (which will generate a matrix with only 200 * 200 data)
cov = np.matrix(diff.T)*np.matrix(diff) / total_face
# compute the eigen values and eigen vectors of cov
eigen_values, vectors = np.linalg.eigh(cov)
eigen_vectors = np.matrix(diff)*np.matrix(vectors)
# sort the eigenvalues and eigenvectors by desc
sort_index = np.argsort(-eigen_values)
eigen_values = eigen_values[sort_index]
eigen_vectors = eigen_vectors[:, sort_index]
print(eigen_values)
'''
# for each image we compute the y (y = A^T * x, weight) and we will compare yf(the input image) with yf, find the nearest one
eigenfaces_weight = np.matrix(eigen_vectors.T)*np.matrix(diff)
return img_mean, eigen_values, eigen_vectors, eigenfaces_weight | 42.140845 | 134 | 0.65742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,785 | 0.596591 |
f38371cb347fe1cc59a05a3c04244fc73c03cc52 | 820 | py | Python | app/request/migrations/0003_auto_20190924_2107.py | contestcrew/2019SeoulContest-Backend | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | [
"MIT"
] | null | null | null | app/request/migrations/0003_auto_20190924_2107.py | contestcrew/2019SeoulContest-Backend | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | [
"MIT"
] | 32 | 2019-08-30T13:09:28.000Z | 2021-06-10T19:07:56.000Z | app/request/migrations/0003_auto_20190924_2107.py | contestcrew/2019SeoulContest-Backend | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | [
"MIT"
] | 3 | 2019-09-19T10:12:50.000Z | 2019-09-30T15:59:13.000Z | # Generated by Django 2.2.5 on 2019-09-24 12:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('request', '0002_auto_20190924_1811'),
]
operations = [
migrations.CreateModel(
name='PoliceOffice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='이름')),
],
),
migrations.AddField(
model_name='request',
name='police_office',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='request.PoliceOffice', verbose_name='경찰서'),
),
]
| 30.37037 | 140 | 0.610976 | 704 | 0.848193 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.209639 |
f386e72db7f6ccd97e6e983df013a5674e86d24e | 10,472 | py | Python | mrmap/service/helper/ogc/layer.py | SvenTUM/mrmap | 307e120d0d846645b56fb8f4a7a979857c860c15 | [
"MIT"
] | null | null | null | mrmap/service/helper/ogc/layer.py | SvenTUM/mrmap | 307e120d0d846645b56fb8f4a7a979857c860c15 | [
"MIT"
] | null | null | null | mrmap/service/helper/ogc/layer.py | SvenTUM/mrmap | 307e120d0d846645b56fb8f4a7a979857c860c15 | [
"MIT"
] | null | null | null | from django.contrib.gis.geos import Polygon
from django.db import IntegrityError
from service.helper.enums import MetadataEnum, OGCOperationEnum, MetadataRelationEnum
from service.helper.epsg_api import EpsgApi
from service.models import Service, Metadata, Layer, Keyword, ReferenceSystem, Dimension, ServiceUrl
from service.settings import ALLOWED_SRS
from structure.models import MrMapGroup, MrMapUser
class OGCLayer:
def __init__(self, identifier=None, parent=None, title=None, queryable=False, opaque=False,
cascaded=False, abstract=None):
self.identifier = identifier
self.parent = parent
self.is_queryable = queryable
self.is_opaque = opaque
self.is_cascaded = cascaded
self.title = title
self.abstract = abstract
# capabilities
self.capability_keywords = []
self.capability_online_resource = None
self.capability_projection_system = []
self.capability_scale_hint = {
"min": 0,
"max": 0,
}
self.capability_bbox_lat_lon = {
"minx": 0,
"miny": 0,
"maxx": 0,
"maxy": 0,
}
self.capability_bbox_srs = {}
self.format_list = {}
self.get_capabilities_uri_GET = None
self.get_capabilities_uri_POST = None
self.get_map_uri_GET = None
self.get_map_uri_POST = None
self.get_feature_info_uri_GET = None
self.get_feature_info_uri_POST = None
self.describe_layer_uri_GET = None
self.describe_layer_uri_POST = None
self.get_legend_graphic_uri_GET = None
self.get_legend_graphic_uri_POST = None
self.get_styles_uri_GET = None
self.get_styles_uri_POST = None
self.operation_urls = [(OGCOperationEnum.GET_CAPABILITIES.value, 'get_capabilities_uri_GET', 'Get'),
(OGCOperationEnum.GET_CAPABILITIES.value, 'get_capabilities_uri_POST', 'Post'),
(OGCOperationEnum.GET_MAP.value, 'get_map_uri_GET', 'Get'),
(OGCOperationEnum.GET_MAP.value, 'get_map_uri_POST', 'Post'),
(OGCOperationEnum.GET_FEATURE_INFO.value, 'get_feature_info_uri_GET', 'Get'),
(OGCOperationEnum.GET_FEATURE_INFO.value, 'get_feature_info_uri_POST', 'Post'),
(OGCOperationEnum.DESCRIBE_LAYER.value, 'describe_layer_uri_GET', 'Get'),
(OGCOperationEnum.DESCRIBE_LAYER.value, 'describe_layer_uri_POST', 'Post'),
(OGCOperationEnum.GET_LEGEND_GRAPHIC.value, 'get_legend_graphic_uri_GET', 'Get'),
(OGCOperationEnum.GET_LEGEND_GRAPHIC.value, 'get_legend_graphic_uri_POST', 'Post'),
(OGCOperationEnum.GET_STYLES.value, 'get_styles_uri_GET', 'Get'),
(OGCOperationEnum.GET_STYLES.value, 'get_styles_uri_POST', 'Post')]
self.dimension_list = []
self.style = None
self.child_layers = []
self.iso_metadata = []
def create_layer_record(self, parent_service: Service, group: MrMapGroup, user: MrMapUser, epsg_api: EpsgApi, parent: Layer=None):
""" Transforms a OGCWebMapLayer object to Layer model (models.py)
Args:
parent_service (Service): The root or parent service which holds all these layers
group (MrMapGroup): The group that started the registration process
user (MrMapUser): The performing user
epsg_api (EpsgApi): A EpsgApi object
parent (Layer): The parent layer object to this layer
Returns:
nothing
"""
# Metadata
metadata = self._create_metadata_record(parent_service, group)
# Layer
layer = self._create_layer_record(
metadata,
parent_service,
group,
parent
)
# Additional records
self._create_additional_records(
metadata,
layer,
group,
epsg_api
)
# Final save before continue
metadata.save()
layer.save()
# Continue with child objects
for child in self.child_layers:
child.create_layer_record(
parent_service=parent_service,
group=group,
parent=layer,
user=user,
epsg_api=epsg_api
)
def _create_metadata_record(self, parent_service: Service, group: MrMapGroup):
""" Creates a Metadata record from the OGCLayer object
Args:
self (OGCLayer): The OGCLayer object (result of parsing)
parent_service (Service): The parent Service object
group (MrMapGroup): The creator/owner group
Returns:
metadata (Metadata): The persisted metadata object
"""
metadata = Metadata()
md_type = MetadataEnum.LAYER.value
metadata.metadata_type = md_type
metadata.title = self.title
metadata.abstract = self.abstract
metadata.online_resource = parent_service.metadata.online_resource
metadata.capabilities_original_uri = parent_service.metadata.capabilities_original_uri
metadata.identifier = self.identifier
metadata.contact = parent_service.metadata.contact
metadata.access_constraints = parent_service.metadata.access_constraints
metadata.is_active = False
metadata.created_by = group
# Save metadata to use id afterwards
metadata.save()
# create bounding box polygon
bounding_points = (
(float(self.capability_bbox_lat_lon["minx"]), float(self.capability_bbox_lat_lon["miny"])),
(float(self.capability_bbox_lat_lon["minx"]), float(self.capability_bbox_lat_lon["maxy"])),
(float(self.capability_bbox_lat_lon["maxx"]), float(self.capability_bbox_lat_lon["maxy"])),
(float(self.capability_bbox_lat_lon["maxx"]), float(self.capability_bbox_lat_lon["miny"])),
(float(self.capability_bbox_lat_lon["minx"]), float(self.capability_bbox_lat_lon["miny"]))
)
metadata.bounding_geometry = Polygon(bounding_points)
metadata.save()
return metadata
def _create_layer_record(self, metadata: Metadata, parent_service: Service, group: MrMapGroup, parent: Layer):
""" Creates a Layer record from the OGCLayer object
Args:
metadata (Metadata): The layer's metadata object
parent_service (Service): The parent Service object
group (MrMapGroup): The owner/creator group
parent (Layer): The parent layer object
Returns:
layer (Layer): The persisted layer object
"""
# Layer
layer = Layer()
layer.metadata = metadata
layer.identifier = self.identifier
layer.service_type = parent_service.service_type
layer.parent = parent
layer.parent_service = parent_service
layer.is_queryable = self.is_queryable
layer.is_cascaded = self.is_cascaded
layer.registered_by = group
layer.is_opaque = self.is_opaque
layer.scale_min = self.capability_scale_hint.get("min")
layer.scale_max = self.capability_scale_hint.get("max")
layer.bbox_lat_lon = metadata.bounding_geometry
layer.created_by = group
layer.published_for = parent_service.published_for
layer.parent_service = parent_service
# Save model so M2M relations can be used
layer.save()
operation_urls = []
for operation, parsed_operation_url, method in self.operation_urls:
# todo: optimize as bulk create
try:
operation_urls.append(ServiceUrl.objects.get_or_create(
operation=operation,
url=getattr(self, parsed_operation_url),
method=method
)[0])
except IntegrityError:
pass
layer.operation_urls.add(*operation_urls)
# If parent layer is a real layer, we add the current layer as a child to the parent layer
if layer.parent is not None:
layer.parent.children.add(layer)
if self.style is not None:
self.style.layer = layer
self.style.save()
if parent_service.root_layer is None:
# no root layer set yet
parent_service.root_layer = layer
parent_service.save()
layer.save()
return layer
def _create_additional_records(self, metadata: Metadata, layer: Layer, group: MrMapGroup, epsg_api: EpsgApi):
""" Creates additional records such as Keywords, ReferenceSystems, Dimensions, ...
Args:
metadata (Metadata): The layer's metadata object
layer (Layer): The Layer record object
group (MrMapGroup): The owner/creator group
epsg_api (EpsgApi): A epsg_api object
Returns:
"""
# Keywords
for kw in self.capability_keywords:
keyword = Keyword.objects.get_or_create(keyword=kw)[0]
metadata.keywords.add(keyword)
# handle reference systems
for sys in self.capability_projection_system:
parts = epsg_api.get_subelements(sys)
# check if this srs is allowed for us. If not, skip it!
if parts.get("code") not in ALLOWED_SRS:
continue
ref_sys = ReferenceSystem.objects.get_or_create(code=parts.get("code"), prefix=parts.get("prefix"))[0]
metadata.reference_system.add(ref_sys)
for iso_md in self.iso_metadata:
iso_md = iso_md.to_db_model(created_by=group)
metadata.add_metadata_relation(to_metadata=iso_md,
relation_type=MetadataRelationEnum.DESCRIBES.value,
origin=iso_md.origin)
# Dimensions
for dimension in self.dimension_list:
dim = Dimension.objects.get_or_create(
type=dimension.get("type"),
units=dimension.get("units"),
extent=dimension.get("extent"),
)[0]
layer.metadata.dimensions.add(dim)
| 40.276923 | 134 | 0.618506 | 10,064 | 0.961039 | 0 | 0 | 0 | 0 | 0 | 0 | 2,515 | 0.240164 |
f3878c596d313ee24b2d62403844d0ab0524f2b3 | 906 | py | Python | Sample/PyWebApi.IIS/json_fmtr.py | DataBooster/PyWebApi | d4afed2d6b05215302bfb316a61b36cf13fb74aa | [
"MIT"
] | 6 | 2020-04-10T22:39:38.000Z | 2022-01-21T18:07:08.000Z | Sample/PyWebApi.IIS/json_fmtr.py | DataBooster/PyWebApi | d4afed2d6b05215302bfb316a61b36cf13fb74aa | [
"MIT"
] | null | null | null | Sample/PyWebApi.IIS/json_fmtr.py | DataBooster/PyWebApi | d4afed2d6b05215302bfb316a61b36cf13fb74aa | [
"MIT"
] | 1 | 2021-01-26T23:08:42.000Z | 2021-01-26T23:08:42.000Z | # -*- coding: utf-8 -*-
"""json_fmtr.py
This module implements a MediaTypeFormatter with JSON response.
This module was originally shipped as an example code from https://github.com/DataBooster/PyWebApi, licensed under the MIT license.
Anyone who obtains a copy of this code is welcome to modify it for any purpose, and holds all rights to the modified part only.
The above license notice and permission notice shall be included in all copies or substantial portions of the Software.
"""
from jsonpickle import dumps
from pywebapi import MediaTypeFormatter
class JsonFormatter(MediaTypeFormatter):
"""description of class"""
@property
def supported_media_types(self):
return ['application/json', 'text/json']
def format(self, obj, media_type:str, **kwargs):
kwargs['unpicklable'] = kwargs.get('unpicklable', False)
return dumps(obj, **kwargs)
| 34.846154 | 135 | 0.730684 | 327 | 0.360927 | 0 | 0 | 94 | 0.103753 | 0 | 0 | 585 | 0.645695 |
f388170b0b0845168e9528e0556eaa6b27d8bd42 | 6,952 | py | Python | vad.py | zhuligs/Pallas | c8d77d0963c080fa7331560f1659001488b0328f | [
"MIT"
] | null | null | null | vad.py | zhuligs/Pallas | c8d77d0963c080fa7331560f1659001488b0328f | [
"MIT"
] | null | null | null | vad.py | zhuligs/Pallas | c8d77d0963c080fa7331560f1659001488b0328f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# import numpy as np
import itin
import sdata
import fppy
from copy import deepcopy as cp
from wrapdimer import get_rmode, get_0mode, get_mode
from zfunc import set_cell_from_vasp, write_cell_to_vasp
from vfunc import runvdim, goptv
# def con(reac, prod):
# mode = get_mode(reac, prod)
# sdd = runvdim(reac, mode)
# mode = -1.0 * get_mode(sdd, reac)
# newmin = goptv(sdd, mode)
# types = sdata.types()
# fp0 = reac.get_lfp()
def con(reac, prod):
rPool = []
pPool = []
for i in range(itin.ndimMax):
print "ZLOG: R DIM", i
mode = get_rmode()
tcc = runvdim(reac, mode)
rPool.append(tcc)
print "ZLOG: E, DIR", tcc.get_e(), sdata.ddir
print "ZLOG: P DIM", i
mode = get_rmode()
tcc = runvdim(prod, mode)
pPool.append(tcc)
print "ZLOG: E, DIR", tcc.get_e(), sdata.ddir
dcompt = []
for i in range(itin.ndimMax):
xreac = cp(rPool[i])
fpi = xreac.get_lfp()
for j in range(itin.ndimMax):
xprod = cp(pPool[j])
fpj = xprod.get_lfp()
(dist, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpi, fpj)
print "ZLOG: I %d J %d dist %8.6E" % (i, j, dist)
dcompt.append([dist, [i, j]])
dcomp = sorted(dcompt, key=lambda x: x[0])
print "ZLOG: shortest dim D %8.6E" % (dcomp[0][0])
(ix, iy) = dcomp[0][1]
print "ZLOG: ix iy", ix, iy
xsp = cp(rPool[ix])
ysp = cp(pPool[iy])
fp1 = xsp.get_lfp()
fp2 = ysp.get_lfp()
(d1, m1) = fppy.fp_dist(itin.ntyp, sdata.types, fp1, fp2)
print "ZLOG: CONF D: %8.6E" % (d1)
# modex = -1*get_mode(xsp, reac)
# modey = -1*get_mode(ysp, prod)
modex = get_0mode()
xspl = goptv(xsp, modex)
print "ZLOG: XSPL, E, DIR", xspl.get_e(), sdata.gdir
yspl = goptv(ysp, modex)
print "ZLOG: YSPL, E, DIR", yspl.get_e(), sdata.gdir
rbe = xsp.get_e() - reac.get_e()
pbe = ysp.get_e() - reac.get_e()
fpxs = xspl.get_lfp()
fpys = yspl.get_lfp()
(d, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpxs, fpys)
print "ZLOG: DD: ", d
return(d, rbe, pbe, xsp, ysp, xspl, yspl)
def con2(reac, prod):
rPool = []
rlool = []
pPool = []
plool = []
for i in range(itin.ndimMax):
print "ZLOG: R DIM", i
mode = get_rmode()
tcc = runvdim(reac, mode)
rPool.append(cp(tcc))
print "ZLOG: DIM E, DIR", tcc.get_e(), sdata.ddir
modex = -1*get_mode(tcc, reac)*0.1
tccc = goptv(tcc, modex)
print "ZLOG: OPT E, DIR", tccc.get_e(), sdata.gdir
rlool.append(cp(tccc))
print "ZLOG: P DIM", i
mode = get_rmode()
tcc = runvdim(prod, mode)
pPool.append(cp(tcc))
print "ZLOG: DIM E, DIR", tcc.get_e(), sdata.ddir
modey = -1*get_mode(tcc, prod)*0.1
tccc = goptv(tcc, modey)
print "ZLOG: OPT E, DIR", tccc.get_e(), sdata.gdir
plool.append(tccc)
dcompt = []
for i in range(itin.ndimMax):
xreac = cp(rlool[i])
fpi = xreac.get_lfp()
for j in range(itin.ndimMax):
xprod = cp(plool[j])
fpj = xprod.get_lfp()
(dist, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpi, fpj)
print "ZLOG: I %d J %d dist %8.6E" % (i, j, dist)
dcompt.append([dist, [i, j]])
dcomp = sorted(dcompt, key=lambda x: x[0])
print "ZLOG: shortest OPT D %8.6E" % (dcomp[0][0])
(ix, iy) = dcomp[0][1]
print "ZLOG: ix iy", ix, iy
xsp = cp(rPool[ix])
ysp = cp(pPool[iy])
xspl = cp(rlool[ix])
yspl = cp(plool[iy])
d = dcomp[0][0]
# fp1 = xsp.get_lfp()
# fp2 = ysp.get_lfp()
# (d1, m1) = fppy.fp_dist(itin.ntyp, sdata.types, fp1, fp2)
# print "ZLOG: CONF D: %8.6E" % (d1)
# # modex = -1*get_mode(xsp, reac)
# # modey = -1*get_mode(ysp, prod)
# modex = get_0mode()
# xspl = goptv(xsp, modex)
# print "ZLOG: XSPL, E, DIR", xspl.get_e(), sdata.gdir
# yspl = goptv(ysp, modex)
# print "ZLOG: YSPL, E, DIR", yspl.get_e(), sdata.gdir
# rbe = xsp.get_e() - reac.get_e()
# pbe = ysp.get_e() - reac.get_e()
# fpxs = xspl.get_lfp()
# fpys = yspl.get_lfp()
# (d, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpxs, fpys)
# print "ZLOG: DD: ", d
return(d, xsp, ysp, xspl, yspl)
def rcon(xreac, xprod):
dmax = itin.dist
dd = 1.0
rc = []
ist = 0
xfp0 = xreac.get_lfp()
yfp0 = xprod.get_lfp()
while dd > dmax:
ist += 1
if ist > 200:
break
(d, xsp, ysp, xspl, yspl) = con2(xreac, xprod)
xreac = cp(xspl)
xprod = cp(yspl)
rc.append([d, xsp, ysp, xspl, yspl])
dtt = []
for i in range(len(rc)):
xxl = rc[i][5]
fpxxl = xxl.get_lfp()
(dx, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpxxl, yfp0)
dtt.append(dx)
print "ZLOG: I %d to PROD dist %8.6E" % (i, dx)
for j in range(len(rc)):
yyl = rc[j][6]
fpyyl = yyl.get_lfp()
(dy, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpyyl, xfp0)
dtt.append(dy)
print "ZLOG: J %d to PROD dist %8.6E" % (j, dy)
(dt, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpxxl, fpyyl)
print "ZLOG: CONT I %2d J %2d dist %8.6E" % (i, j, dt)
dtt.append(dt)
dd = min(dtt)
print "ZLOG: IST:", ist
print "ZLOG: DRP:", d, dd, rbe, pbe
print "ZLOG: X-S-E, X-L-E, Y-S-E, Y-L-E:", \
xsp.get_e(), xspl.get_e(), ysp.get_e(), yspl.get_e()
write_cell_to_vasp(xsp, "ixsp_" + str(ist) + ".vasp")
write_cell_to_vasp(xspl, "ixspl_" + str(ist) + ".vasp")
write_cell_to_vasp(ysp, "iysp_" + str(ist) + ".vasp")
write_cell_to_vasp(yspl, "iyspl_" + str(ist) + ".vasp")
return rc
def initrun():
reac0 = set_cell_from_vasp('R.vasp')
prod0 = set_cell_from_vasp('P.vasp')
mode = get_0mode()
reac = goptv(reac0, mode)
prod = goptv(prod0, mode)
write_cell_to_vasp(reac, 'ROPT.vasp')
write_cell_to_vasp(prod, 'POPT.vasp')
sdata.types = reac.get_types()
fpr = reac.get_lfp()
fpp = prod.get_lfp()
(d, m) = fppy.fp_dist(itin.ntyp, sdata.types, fpr, fpp)
print 'ZLOG: INIT DIST', d
print 'ZLOG: REAC ENERGY', reac.get_e()
print 'ZLOG: PROD ENERGY', prod.get_e()
return (reac, prod)
def main():
(reac, prod) = initrun()
rc = rcon(reac, prod)
i = 0
for x in rc:
i += 1
print "ZZ# no, d, rbe, pbe", i, x[0], x[1], x[2]
write_cell_to_vasp(x[1], 'xsp' + str(i) + '.vasp')
write_cell_to_vasp(x[2], 'ysp' + str(i) + '.vasp')
write_cell_to_vasp(x[3], 'xspl' + str(i) + '.vasp')
write_cell_to_vasp(x[4], 'yspl' + str(i) + '.vasp')
if __name__ == '__main__':
main()
| 28.260163 | 76 | 0.534091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,629 | 0.234321 |
f3884809bd62f0f776b8c21dd23802afacf22c1c | 349 | py | Python | EASTAR/main/templatetags/extra.py | DightMerc/EASTAR | 04a3578932f8b4b842e0898513ef279c2f750f48 | [
"Apache-2.0"
] | 1 | 2020-09-21T16:46:19.000Z | 2020-09-21T16:46:19.000Z | EASTAR/main/templatetags/extra.py | DightMerc/EASTAR | 04a3578932f8b4b842e0898513ef279c2f750f48 | [
"Apache-2.0"
] | null | null | null | EASTAR/main/templatetags/extra.py | DightMerc/EASTAR | 04a3578932f8b4b842e0898513ef279c2f750f48 | [
"Apache-2.0"
] | null | null | null | from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
@stringfilter
def FindPhoto(value):
if "%photo%" in str(value):
return True
else:
return False
@register.filter
@stringfilter
def ReplacePhoto(value):
return value.replace("%photo%","")
| 18.368421 | 55 | 0.719198 | 0 | 0 | 0 | 0 | 229 | 0.65616 | 0 | 0 | 20 | 0.057307 |
f3896211c27faf122c9bf819667c176a83dab4ba | 1,058 | py | Python | influxable/db/function/transformations.py | AndyBryson/influxable | 8a2f798cc5ae12b04f803afc84d7e064a3afd250 | [
"MIT"
] | 30 | 2019-07-28T12:57:21.000Z | 2022-03-30T05:02:57.000Z | influxable/db/function/transformations.py | AndyBryson/influxable | 8a2f798cc5ae12b04f803afc84d7e064a3afd250 | [
"MIT"
] | 9 | 2020-04-23T11:29:29.000Z | 2022-02-04T09:15:16.000Z | influxable/db/function/transformations.py | AndyBryson/influxable | 8a2f798cc5ae12b04f803afc84d7e064a3afd250 | [
"MIT"
] | 5 | 2021-03-23T04:05:42.000Z | 2022-01-28T12:04:37.000Z | from . import _generate_function
Abs = _generate_function('ABS')
ACos = _generate_function('ACOS')
ASin = _generate_function('ASIN')
ATan = _generate_function('ATAN')
ATan2 = _generate_function('ATAN2')
Ceil = _generate_function('CEIL')
Cos = _generate_function('COS')
CumulativeSum = _generate_function('CUMULATIVE_SUM')
Derivative = _generate_function('DERIVATIVE')
Difference = _generate_function('DIFFERENCE')
Elapsed = _generate_function('ELAPSED')
Exp = _generate_function('EXP')
Floor = _generate_function('FLOOR')
Histogram = _generate_function('HISTOGRAM')
Ln = _generate_function('LN')
Log = _generate_function('LOG')
Log2 = _generate_function('LOG2')
Log10 = _generate_function('LOG10')
MovingAverage = _generate_function('MOVING_AVERAGE')
NonNegativeDerivative = _generate_function('NON_NEGATIVE_DERIVATIVE')
NonNegativeDifference = _generate_function('NON_NEGATIVE_DIFFERENCE')
Pow = _generate_function('POW')
Round = _generate_function('ROUND')
Sin = _generate_function('SIN')
Sqrt = _generate_function('SQRT')
Tan = _generate_function('TAN')
| 36.482759 | 69 | 0.797732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.216446 |
f38a2a1b6ca2d859f8e7abbf785a6ba3a4abb56a | 10,176 | py | Python | notebooks/pixel_cnn/pixelcnn_helpers.py | bjlkeng/sandbox | ba1fea113065256d4981a71f7b4bece7299effd1 | [
"MIT"
] | 158 | 2017-11-09T14:56:31.000Z | 2022-03-26T17:26:20.000Z | notebooks/pixel_cnn/pixelcnn_helpers.py | iivek/sandbox | c95653618b7be5022b0a8e217a4e5667badb2449 | [
"MIT"
] | 8 | 2017-11-28T11:14:46.000Z | 2021-05-03T00:23:57.000Z | notebooks/pixel_cnn/pixelcnn_helpers.py | iivek/sandbox | c95653618b7be5022b0a8e217a4e5667badb2449 | [
"MIT"
] | 77 | 2017-11-21T15:27:52.000Z | 2022-02-17T16:37:34.000Z | import math
import numpy as np
from keras import backend as K
from keras.layers import Conv2D, Concatenate, Activation, Add
from keras.engine import InputSpec
def logsoftmax(x):
''' Numerically stable log(softmax(x)) '''
m = K.max(x, axis=-1, keepdims=True)
return x - m - K.log(K.sum(K.exp(x - m), axis=-1, keepdims=True))
def pixelcnn_loss(target, output, img_rows, img_cols, img_chns, n_components):
''' Keras PixelCNN loss function. Use a lambda to fill in the last few
parameters
Args:
img_rows, img_cols, img_chns: image dimensions
n_components: number of mixture components
Returns:
log-loss
'''
assert img_chns == 3
# Extract out each of the mixture parameters (multiple of 3 b/c of image channels)
output_m = output[:, :, :, :3*n_components]
output_invs = output[:, :, :, 3*n_components:6*n_components]
output_logit_weights = output[:, :, :, 6*(n_components):]
# Repeat the target to match the number of mixture component shapes
x = K.reshape(target, (-1, img_rows, img_cols, img_chns))
slices = []
for c in range(img_chns):
slices += [x[:, :, :, c:c+1]] * n_components
x = K.concatenate(slices, axis=-1)
x_decoded_m = output_m
x_decoded_invs = output_invs
x_logit_weights = output_logit_weights
# Pixels rescaled to be in [-1, 1] interval
offset = 1. / 127.5 / 2.
centered_mean = x - x_decoded_m
cdfminus_arg = (centered_mean - offset) * K.exp(x_decoded_invs)
cdfplus_arg = (centered_mean + offset) * K.exp(x_decoded_invs)
cdfminus_safe = K.sigmoid(cdfminus_arg)
cdfplus_safe = K.sigmoid(cdfplus_arg)
# Generate the PDF (logistic) in case the `m` is way off (cdf is too small)
# pdf = e^(-(x-m)/s) / {s(1 + e^{-(x-m)/s})^2}
# logpdf = -(x-m)/s - log s - 2 * log(1 + e^(-(x-m)/s))
# = -mid_in - invs - 2 * softplus(-mid_in)
mid_in = centered_mean * K.exp(x_decoded_invs)
log_pdf_mid = -mid_in - x_decoded_invs - 2. * K.tf.nn.softplus(-mid_in)
# Use trick from PixelCNN++ implementation to protect against edge/overflow cases
# In extreme cases (cdfplus_safe - cdf_minus_safe < 1e-5), use the
# log_pdf_mid and assume that density is 1 pixel width wide (1/127.5) as
# the density: log(pdf * 1/127.5) = log(pdf) - log(127.5)
# Add on line of best fit (see notebooks/blog post) to the difference between
# edge case and the standard case
edge_case = log_pdf_mid - np.log(127.5) + 2.04 * x_decoded_invs - 0.107
# ln (sigmoid(x)) = x - ln(e^x + 1) = x - softplus(x)
# ln (1 - sigmoid(x)) = ln(1 / (1 + e^x)) = -softplus(x)
log_cdfplus = cdfplus_arg - K.tf.nn.softplus(cdfplus_arg)
log_1minus_cdf = -K.tf.nn.softplus(cdfminus_arg)
log_ll = K.tf.where(x <= -0.999, log_cdfplus,
K.tf.where(x >= 0.999, log_1minus_cdf,
K.tf.where(cdfplus_safe - cdfminus_safe > 1e-5,
K.log(K.maximum(cdfplus_safe - cdfminus_safe, 1e-12)),
edge_case)))
# x_weights * [sigma(x+0.5...) - sigma(x-0.5 ...) ]
# = log x_weights + log (...)
# Compute log(softmax(.)) directly here, instead of doing 2-step to avoid overflow
pre_result = logsoftmax(x_logit_weights) + log_ll
result = []
for chn in range(img_chns):
chn_result = pre_result[:, :, :, chn*n_components:(chn+1)*n_components]
v = K.logsumexp(chn_result, axis=-1)
result.append(v)
result = K.batch_flatten(K.stack(result, axis=-1))
return -K.sum(result, axis=-1)
def sigmoid(x):
# Protect overflow
if x < -20:
return 0.0
elif x > 20:
return 1.0
return 1 / (1 + math.exp(-x))
def logistic_cdf(x, loc, scale):
return sigmoid((x - loc) / scale)
def compute_pvals(m, invs):
pvals = []
for i in range(256):
if i == 0:
pval = logistic_cdf((0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
elif i == 255:
pval = 1. - logistic_cdf((254.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
else:
pval = (logistic_cdf((i + 0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
- logistic_cdf((i - 0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs)))
pvals.append(pval)
return pvals
def compute_mixture(ms, invs, weights, n_comps):
components = []
for i in range(n_comps):
pvals = compute_pvals(ms[i], invs[i])
arr = np.array(pvals)
components.append(weights[i] * arr)
return np.sum(components, axis=0)
class PixelConv2D(Conv2D):
def __init__(self, ptype, *args, **kwargs):
# ptype corresponds to pixel type and mask type, e.g. ra, ga, ba, rb, gb, bb
assert ptype[0] in ['r', 'g', 'b'], ptype
assert ptype[1] in ['a', 'b'], ptype
self.ptype = ptype
super(PixelConv2D, self).__init__(*args, **kwargs)
def build_mask(self, kernel_shape):
# kernel_shape = kern_dim x kern_dim x total_filters
# = kern_dim x kern_dim x r_g_b_filters x filters_per_channel
assert kernel_shape[0] == kernel_shape[1], \
"{} must be equal in first two dims".format(kernel_shape)
assert kernel_shape[0] % 2 == 1, \
"{} must be odd size in first two dims".format(kernel_shape)
assert kernel_shape[2] % 3 == 0, \
"{} must be divisible by 3".format(kernel_shape)
data = np.ones(kernel_shape)
data.shape
mid = data.shape[0] // 2
if self.ptype[0] == 'r':
filt_prev = 0
filt_thres = int(data.shape[2] / 3)
elif self.ptype[0] == 'g':
filt_prev = int(data.shape[2] / 3)
filt_thres = int(2 * data.shape[2] / 3)
else:
assert self.ptype[0] == 'b', self.ptype
filt_prev = int(2 * data.shape[2] / 3)
filt_thres = data.shape[2]
for k1 in range(data.shape[0]):
for k2 in range(data.shape[1]):
for chan in range(data.shape[2]):
if (self.ptype[1] == 'a'
and filt_prev <= chan < filt_thres
and k1 == mid and k2 == mid):
# Handle the only difference between 'a' and 'b' ptypes
data[k1, k2, chan, :] = 0
elif k1 > mid or (k1 >= mid and k2 > mid) or chan >= filt_thres:
# Turn off anything:
# a) Below currrent pixel
# b) Past the current pixel (scanning left from right, up to down)
# c) In a later filter
data[k1, k2, chan, :] = 0
return K.constant(np.ravel(data), dtype='float32', shape=kernel_shape)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel_mask = self.build_mask(kernel_shape)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
masked_kernel = self.kernel * self.kernel_mask
outputs = K.conv2d(
inputs,
masked_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def conv_block(input_tensor, filters, kernel_size, name, is_first=False):
outs = []
for t in ['rb', 'gb', 'bb']:
if is_first:
t = t[0] + 'a'
x = PixelConv2D(t, filters, kernel_size,
name='res' + name + t, padding='same')(input_tensor)
x = Activation('relu')(x)
outs.append(x)
return Concatenate()(outs)
def resnet_block(input_tensor, filters, stage, block, kernel=3):
name_base = str(stage) + block + '_branch'
filters1, filters2, filters3 = filters
x = input_tensor
x = conv_block(x, filters1, (1, 1), name=name_base + '_a-1x1')
x = conv_block(x, filters2, (kernel, kernel),
name=name_base + '_b-{}x{}'.format(kernel, kernel))
x = conv_block(x, filters3, (1, 1), name=name_base + '_c-1x1')
x = Add()([x, input_tensor])
return x
def final_block(input_tensor, filters, in_filters, name, kernel_size=(1, 1)):
outs = []
for t in ['rb', 'gb', 'bb']:
x = PixelConv2D(t, filters, kernel_size,
name='final' + name + '_' + t,
padding='same')(input_tensor)
x = Activation('relu')(x)
outs.append(x)
return Concatenate()(outs)
| 37.549815 | 100 | 0.554442 | 4,243 | 0.416961 | 0 | 0 | 0 | 0 | 0 | 0 | 2,147 | 0.210987 |
f38a6c2e71521c84f4388fefbf9286e8d814c7cf | 5,667 | py | Python | so_ana_util/common_types.py | HBernigau/StackOverflowAnalysis | 42dae7c0c23fcdeca23f8c770ad17b2eb341d091 | [
"MIT"
] | null | null | null | so_ana_util/common_types.py | HBernigau/StackOverflowAnalysis | 42dae7c0c23fcdeca23f8c770ad17b2eb341d091 | [
"MIT"
] | null | null | null | so_ana_util/common_types.py | HBernigau/StackOverflowAnalysis | 42dae7c0c23fcdeca23f8c770ad17b2eb341d091 | [
"MIT"
] | null | null | null | """
contains several global data classes (for log entries for example)
Author: `HBernigau <https://github.com/HBernigau>`_
Date: 01.2022
"""
import marshmallow_dataclass as mmdc
import marshmallow
from logging import StreamHandler
from typing import Any
from dataclasses import is_dataclass, dataclass, field
from datetime import datetime
import uuid
import warnings
import time
import logging
import os
import threading
UUID = mmdc.NewType("UUID", str, field=marshmallow.fields.UUID)
@dataclass
class LogEntry:
name: str
flow_run_id: str
task_name: str
task_slug: str
task_run_id: str
map_index: int
task_loop_count: int
task_run_count: int
thread: str
threadName: str
process: int
processName: str
exc_text: str
levelname: str
msg: str
timestamp: datetime = field(default_factory=datetime.now)
msg_uuid: UUID = field(default_factory=uuid.uuid4)
modus: str = field(default_factory=lambda:'test')
def fill_dc_from_obj(DC, obj: Any, add_dict: dict = None, excl_lst = None, get_default = None, None_2_default = True):
"""
Creates data class from fields in obj
:param DC: Any data class
:param obj: obj that contains values for parameters of data class (dictionary or object)
:param add_dict: additional parameter values (will overwrite values of obj. if present)
:param excl_lst: list with field names for fields of CD that should not be filled
:return: instance of DC with values of obj and add_dict
"""
def h1(obj, key, default = None):
if isinstance(obj, dict):
ret = obj.get(key, default)
else:
try:
ret = getattr(obj, key, default)
except:
raise ValueError(f'"get_item" not implemented for type="{type(obj)}"')
if None_2_default and ret is None:
return default
else:
return ret
if add_dict is None:
add_dict = {}
if excl_lst is None:
excl_lst = []
if get_default is None:
get_default=lambda x: None
if not is_dataclass(DC) and type(DC) is type:
raise ValueError(f'"DC" must be a valid dataclass.')
rel_val_dict = {key: h1(obj, key, get_default(key)) for key, value in DC.__dataclass_fields__.items() if not key in excl_lst}
rel_val_dict.update(add_dict)
return DC(**rel_val_dict)
def flatten_ls(ls, base=None):
if base is None:
base = []
if not (isinstance(ls, list)):
return base + [ls]
else:
for item in ls:
base += flatten_ls(item)
return base
def get_null_logger():
fb_null_logger = logging.getLogger('downloader logger')
fb_null_logger.setLevel(logging.DEBUG)
fb_null_logger.addHandler(logging.NullHandler())
return fb_null_logger
class TstHandler(StreamHandler):
def __init__(self):
self.reset_loglist()
super().__init__()
def reset_loglist(self):
self._log_list = []
@property
def log_list(self):
return self._log_list
def emit(self, record):
msg = self.format(record)
self._log_list.append({'formated_log_msg': msg, **record.__dict__})
class CustomLogHandler(StreamHandler):
"""Stores prefect logs in the project's postgresql data base"""
def __init__(self, get_session, modus, *args, **kwargs):
self.get_session=get_session
self.modus=modus
super().__init__(*args, **kwargs)
def emit(self, record):
resp = fill_dc_from_obj(DC=LogEntry,
obj=record,
excl_lst = ['timestamp', 'msg_uuid'],
add_dict={'modus': self.modus})
default_dict = {'thread': threading.get_ident(),
'threadName': threading.current_thread().name,
'process': os.getpid()
}
for attr, attr_def_value in default_dict.items():
if getattr(resp, attr, None) is None:
setattr(resp, attr, attr_def_value)
if not isinstance(resp.msg, str):
resp.msg = str(resp.msg)[:4094]
else:
resp.msg=resp.msg[:4094]
for i in range(2):
try:
session = self.get_session()
session.add(resp)
session.commit()
# session.close() # can be omitted now...
break
except Exception as exc:
warnings.warn(f'Error when logging: "{exc}" (trial {i+1}/3)')
time.sleep(1.0)
else:
warnings.warn(f'Final result: Could not log record {record}')
def get_tst_logger(tst_handler):
smpl_logger = logging.Logger('tst_logger', level = logging.INFO)
for hndl in smpl_logger.handlers:
if type(hndl) is type(tst_handler):
break
else:
smpl_logger.addHandler(tst_handler)
return smpl_logger
def get_prod_logger(name, get_session, cust_formatting = None, modus='test'):
logger = logging.Logger(name, level = logging.INFO)
if cust_formatting is None:
cust_formatting = '[%(asctime)s] %(levelname)s - %(name)s | %(message)s'
for req_hndl in [StreamHandler(), CustomLogHandler(get_session, modus=modus)]:
#for hndl in logger.handlers:
# logger.removeHandler(hndl)
for hndl in logger.handlers:
if type(hndl) is type(req_hndl):
break
else:
formatter = logging.Formatter(cust_formatting)
req_hndl.setFormatter(formatter)
logger.addHandler(req_hndl)
return logger
| 31.137363 | 129 | 0.620787 | 2,347 | 0.414152 | 0 | 0 | 546 | 0.096347 | 0 | 0 | 1,076 | 0.189871 |
f38cd9bf72cf20ce46ec9a0d171610abb4fc724e | 544 | py | Python | main.py | traduttore/traduttore-model | c7c2f3eba11226a32bad547c4f89186afab676da | [
"MIT"
] | null | null | null | main.py | traduttore/traduttore-model | c7c2f3eba11226a32bad547c4f89186afab676da | [
"MIT"
] | null | null | null | main.py | traduttore/traduttore-model | c7c2f3eba11226a32bad547c4f89186afab676da | [
"MIT"
] | null | null | null | from run_translation.TestModelComputer import asl_translation
from run_translation.TextToSpeech import tts
from run_translation.RunPiModelStream import rasp_translation
# from run_translation.RunPiModelTesting import rasp_translation
from run_translation.TestModelComputerLetters import asl_translation_letters
# from run_translation.SpeechToText import stt
if __name__ == "__main__":
# sentence = rasp_translation()
sentence = asl_translation(CAM_ID=1)
# rasp_sentence = rasp_translation([])
# tts(sentence)
# print(stt()) | 41.846154 | 76 | 0.816176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.400735 |
f38d5179a40a5708688c37698f18af00b47801df | 855 | py | Python | counting_elements.py | vyshuks/Leetcode-30-day-challenge | aee6050c14670468426b437353f1df99c408a5a9 | [
"MIT"
] | null | null | null | counting_elements.py | vyshuks/Leetcode-30-day-challenge | aee6050c14670468426b437353f1df99c408a5a9 | [
"MIT"
] | null | null | null | counting_elements.py | vyshuks/Leetcode-30-day-challenge | aee6050c14670468426b437353f1df99c408a5a9 | [
"MIT"
] | null | null | null | # Given an integer array arr, count element x such that x + 1 is also in arr.
# If there're duplicates in arr, count them seperately.
# Example 1:
# Input: arr = [1,2,3]
# Output: 2
# Explanation: 1 and 2 are counted cause 2 and 3 are in arr.
# Example 2:
# Input: arr = [1,1,3,3,5,5,7,7]
# Output: 0
# Explanation: No numbers are counted, cause there's no 2, 4, 6, or 8 in arr.
# Example 3:
# Input: arr = [1,3,2,3,5,0]
# Output: 3
# Explanation: 0, 1 and 2 are counted cause 1, 2 and 3 are in arr.
# Example 4:
# Input: arr = [1,1,2,2]
# Output: 2
# Explanation: Two 1s are counted cause 2 is in arr.
def count_elements(arr):
d = {}
for i in arr:
d[i] = 1
count = 0
for num in arr:
num_plus = num + 1
if num_plus in d:
count += 1
return count
print(count_elements([1,1,2,2])) | 20.357143 | 77 | 0.596491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 585 | 0.684211 |
f38fbb026f37e5802966766250eb1e180269abf5 | 446 | py | Python | terrascript/data/logicmonitor.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/data/logicmonitor.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/data/logicmonitor.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/data/logicmonitor.py
import terrascript
class logicmonitor_collectors(terrascript.Data):
pass
class logicmonitor_dashboard(terrascript.Data):
pass
class logicmonitor_dashboard_group(terrascript.Data):
pass
class logicmonitor_device_group(terrascript.Data):
pass
__all__ = [
"logicmonitor_collectors",
"logicmonitor_dashboard",
"logicmonitor_dashboard_group",
"logicmonitor_device_group",
]
| 16.518519 | 53 | 0.773543 | 234 | 0.524664 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.313901 |
f390563b3cfae7b34b4fd4558f37190e60715548 | 32,834 | py | Python | src/plotting_modules.py | kjdavidson/NoisePy | a7445dd2f68f64cb562d6a87096e5f12a2c3b612 | [
"MIT"
] | 74 | 2019-11-08T18:32:36.000Z | 2022-03-27T11:26:53.000Z | src/plotting_modules.py | kjdavidson/NoisePy | a7445dd2f68f64cb562d6a87096e5f12a2c3b612 | [
"MIT"
] | 23 | 2019-11-10T01:30:04.000Z | 2022-03-24T10:23:19.000Z | src/plotting_modules.py | kjdavidson/NoisePy | a7445dd2f68f64cb562d6a87096e5f12a2c3b612 | [
"MIT"
] | 36 | 2019-11-08T19:36:28.000Z | 2022-02-17T06:31:42.000Z | import os
import sys
import glob
import obspy
import scipy
import pyasdf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.fftpack import next_fast_len
from obspy.signal.filter import bandpass
'''
Ensembles of plotting functions to display intermediate/final waveforms from the NoisePy package.
by Chengxin Jiang @Harvard (May.04.2019)
Specifically, this plotting module includes functions of:
1) plot_waveform -> display the downloaded waveform for specific station
2) plot_substack_cc -> plot 2D matrix of the CC functions for one time-chunck (e.g., 2 days)
3) plot_substack_all -> plot 2D matrix of the CC functions for all time-chunck (e.g., every 1 day in 1 year)
4) plot_all_moveout -> plot the moveout of the stacked CC functions for all time-chunk
'''
#############################################################################
###############PLOTTING FUNCTIONS FOR FILES FROM S0##########################
#############################################################################
def plot_waveform(sfile,net,sta,freqmin,freqmax,savefig=False,sdir=None):
'''
display the downloaded waveform for station A
PARAMETERS:
-----------------------
sfile: containing all wavefrom data for a time-chunck in ASDF format
net,sta,comp: network, station name and component
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
USAGE:
-----------------------
plot_waveform('temp.h5','CI','BLC',0.01,0.5)
'''
# open pyasdf file to read
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
sta_list = ds.waveforms.list()
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# check whether station exists
tsta = net+'.'+sta
if tsta not in sta_list:
raise ValueError('no data for %s in %s'%(tsta,sfile))
tcomp = ds.waveforms[tsta].get_waveform_tags()
ncomp = len(tcomp)
if ncomp == 1:
tr = ds.waveforms[tsta][tcomp[0]]
dt = tr[0].stats.delta
npts = tr[0].stats.npts
tt = np.arange(0,npts)*dt
data = tr[0].data
data = bandpass(data,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
plt.figure(figsize=(9,3))
plt.plot(tt,data,'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,tcomp[0].split('_')[0].upper(),freqmin,freqmax))
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.tight_layout()
plt.show()
elif ncomp == 3:
tr = ds.waveforms[tsta][tcomp[0]]
dt = tr[0].stats.delta
npts = tr[0].stats.npts
tt = np.arange(0,npts)*dt
data = np.zeros(shape=(ncomp,npts),dtype=np.float32)
for ii in range(ncomp):
data[ii] = ds.waveforms[tsta][tcomp[ii]][0].data
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
plt.figure(figsize=(9,6))
plt.subplot(311)
plt.plot(tt,data[0],'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,freqmin,freqmax))
plt.legend([tcomp[0].split('_')[0].upper()],loc='upper left')
plt.subplot(312)
plt.plot(tt,data[1],'k-',linewidth=1)
plt.legend([tcomp[1].split('_')[0].upper()],loc='upper left')
plt.subplot(313)
plt.plot(tt,data[2],'k-',linewidth=1)
plt.legend([tcomp[2].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
plt.tight_layout()
if savefig:
if not os.path.isdir(sdir):os.mkdir(sdir)
outfname = sdir+'/{0:s}_{1:s}.{2:s}.pdf'.format(sfile.split('.')[0],net,sta)
plt.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
plt.show()
#############################################################################
###############PLOTTING FUNCTIONS FOR FILES FROM S1##########################
#############################################################################
def plot_substack_cc(sfile,freqmin,freqmax,disp_lag=None,savefig=True,sdir='./'):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by S1
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
disp_lag: time ranges for display
USAGE:
--------------------------
plot_substack_cc('temp.h5',0.1,1,100,True,'./')
Note: IMPORTANT!!!! this script only works for cross-correlation with sub-stacks being set to True in S1.
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
# t is the time labels for plotting
t = np.arange(-int(disp_lag),int(disp_lag)+dt,step=int(2*int(disp_lag)/4))
# windowing the data
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
tmarks = []
# load cc for each station-pair
for ii in range(nwin):
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = max(data[ii])
data[ii] /= amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%H:%M:%S'))
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(211)
ax.matshow(data,cmap='seismic',extent=[-disp_lag,disp_lag,nwin,0],aspect='auto')
ax.set_title('%s.%s.%s %s.%s.%s dist:%5.2fkm' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(timestamp[0:-1:tick_inc])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(413)
ax1.set_title('stacked and filtered at %4.2f-%4.2f Hz'%(freqmin,freqmax))
ax1.plot(np.arange(-disp_lag,disp_lag+dt,dt),np.mean(data,axis=0),'k-',linewidth=1)
ax1.set_xticks(t)
ax2 = fig.add_subplot(414)
ax2.plot(amax/min(amax),'r-')
ax2.plot(ngood,'b-')
ax2.set_xlabel('waveform number')
ax2.set_xticks(np.arange(0,nwin,step=tick_inc))
ax2.set_xticklabels(tmarks[0:nwin:tick_inc])
#for tick in ax[2].get_xticklabels():
# tick.set_rotation(30)
ax2.legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if savefig:
if sdir==None:sdir = sfile.split('.')[0]
if not os.path.isdir(sdir):os.mkdir(sdir)
outfname = sdir+'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}.pdf'.format(net1,sta1,chan1,net2,sta2,chan2)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
def plot_substack_cc_spect(sfile,freqmin,freqmax,disp_lag=None,savefig=True,sdir='./'):
'''
display the 2D matrix of the cross-correlation functions for a time-chunck.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S1
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
disp_lag: time ranges for display
USAGE:
-----------------------
plot_substack_cc('temp.h5',0.1,1,200,True,'./')
Note: IMPORTANT!!!! this script only works for the cross-correlation with sub-stacks in S1.
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(disp_lag),int(disp_lag)+dt,step=int(2*int(disp_lag)/4))
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
# load cc for each station-pair
for ii in range(nwin):
spec[ii] = scipy.fftpack.fft(data[ii],nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]),axis=0)
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = max(data[ii])
data[ii] /= amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-disp_lag,disp_lag,nwin,0],aspect='auto')
ax[0].set_title('%s.%s.%s %s.%s.%s dist:%5.2f km' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax[0].set_xlabel('time [s]')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:-1:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/min(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
#ax[1].set_xticks(np.arange(0,nwin,int(nwin/5)))
ax[2].legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if savefig:
if sdir==None:sdir = sfile.split('.')[0]
if not os.path.isdir(sdir):os.mkdir(sdir)
outfname = sdir+'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}.pdf'.format(net1,sta1,chan1,net2,sta2,chan2)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING FUNCTIONS FOR FILES FROM S2##########################
#############################################################################
def plot_substack_all(sfile,freqmin,freqmax,ccomp,disp_lag=None,savefig=False,sdir=None):
'''
display the 2D matrix of the cross-correlation functions stacked for all time windows.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
disp_lag: time ranges for display
ccomp: cross component of the targeted cc functions
USAGE:
----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
paths = ccomp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(disp_lag),int(disp_lag)+dt,step=int(2*int(disp_lag)/4))
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[2:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
data[ii] = ds.auxiliary_data[itype][paths].data[indx1:indx2]
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
if nwin>100:
tick_inc = int(nwin/10)
elif nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(2,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-disp_lag,disp_lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km filtered at %4.2f-%4.2fHz' % (sfile.split('/')[-1],dist,freqmin,freqmax))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].plot(amax/max(amax),'r-')
ax[1].plot(ngood,'b-')
ax[1].set_xlabel('waveform number')
ax[1].set_xticks(np.arange(0,nwin,nwin//5))
ax[1].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if savefig:
if sdir==None:sdir = sfile.split('.')[0]
if not os.path.isdir(sdir):os.mkdir(sdir)
outfname = sdir+'/{0:s}_{1:4.2f}_{2:4.2f}Hz.pdf'.format(sfile.split('/')[-1],freqmin,freqmax)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
def plot_substack_all_spect(sfile,freqmin,freqmax,ccomp,disp_lag=None,savefig=False,sdir=None):
'''
display the 2D matrix of the cross-correlation functions stacked for all time windows.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
disp_lag: time ranges for display
ccomp: cross component of the targeted cc functions
USAGE:
-----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
paths = ccomp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(disp_lag),int(disp_lag)+dt,step=int(2*int(disp_lag)/4))
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[1:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
tdata = ds.auxiliary_data[itype][paths].data[indx1:indx2]
spec[ii] = scipy.fftpack.fft(tdata,nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]))
data[ii] = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
tick_inc = 50
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-disp_lag,disp_lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km' % (sfile.split('/')[-1],dist))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/max(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
ax[2].set_xticks(np.arange(0,nwin,nwin//15))
ax[2].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if savefig:
if sdir==None:sdir = sfile.split('.')[0]
if not os.path.isdir(sdir):os.mkdir(sdir)
outfname = sdir+'/{0:s}.pdf'.format(sfile.split('/')[-1])
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
def plot_all_moveout(sfiles,dtype,freqmin,freqmax,ccomp,dist_inc,disp_lag=None,savefig=False,sdir=None):
'''
display the moveout (2D matrix) of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
dtype: datatype either 'Allstack0pws' or 'Allstack0linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
ccomp: cross component
dist_inc: distance bins to stack over
disp_lag: lag times for displaying
savefig: set True to save the figures (in pdf format)
sdir: diresied directory to save the figure (if not provided, save to default dir)
USAGE:
----------------------
plot_substack_moveout('temp.h5','Allstack0pws',0.1,0.2,1,'ZZ',200,True,'./temp')
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
path = ccomp
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mode='r')
dt = ds.auxiliary_data[dtype][path].parameters['dt']
maxlag= ds.auxiliary_data[dtype][path].parameters['maxlag']
stack_method = dtype.split('0')[-1]
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(disp_lag),int(disp_lag)+dt,step=(int(2*int(disp_lag)/4)))
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
# cc matrix
nwin = len(sfiles)
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
dist = np.zeros(nwin,dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
# load cc and parameter matrix
for ii in range(len(sfiles)):
sfile = sfiles[ii]
ds = pyasdf.ASDFDataSet(sfile,mode='r')
try:
# load data to variables
dist[ii] = ds.auxiliary_data[dtype][path].parameters['dist']
ngood[ii]= ds.auxiliary_data[dtype][path].parameters['ngood']
tdata = ds.auxiliary_data[dtype][path].data[indx1:indx2]
except Exception:
print("continue! cannot read %s "%sfile);continue
data[ii] = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
# average cc
ntrace = int(np.round(np.max(dist)+0.51)/dist_inc)
ndata = np.zeros(shape=(ntrace,indx2-indx1),dtype=np.float32)
ndist = np.zeros(ntrace,dtype=np.float32)
for td in range(0,ntrace-1):
tindx = np.where((dist>=td*dist_inc)&(dist<(td+1)*dist_inc))[0]
if len(tindx):
ndata[td] = np.mean(data[tindx],axis=0)
ndist[td] = (td+0.5)*dist_inc
# normalize waveforms
indx = np.where(ndist>0)[0]
ndata = ndata[indx]
ndist = ndist[indx]
for ii in range(ndata.shape[0]):
print(ii,np.max(np.abs(ndata[ii])))
ndata[ii] /= np.max(np.abs(ndata[ii]))
# plotting figures
fig,ax = plt.subplots()
ax.matshow(ndata,cmap='seismic',extent=[-disp_lag,disp_lag,ndist[-1],ndist[0]],aspect='auto')
ax.set_title('allstack %s @%5.3f-%5.2f Hz'%(stack_method,freqmin,freqmax))
ax.set_xlabel('time [s]')
ax.set_ylabel('distance [km]')
ax.set_xticks(t)
ax.xaxis.set_ticks_position('bottom')
#ax.text(np.ones(len(ndist))*(disp_lag-5),dist[ndist],ngood[ndist],fontsize=8)
# save figure or show
if savefig:
outfname = sdir+'/moveout_allstack_'+str(stack_method)+'_'+str(dist_inc)+'kmbin.pdf'
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
def plot_all_moveout_1D_1comp(sfiles,sta,dtype,freqmin,freqmax,ccomp,disp_lag=None,savefig=False,sdir=None):
'''
display the moveout waveforms of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: source station name
dtype: datatype either 'Allstack0pws' or 'Allstack0linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
ccomp: cross component
disp_lag: lag times for displaying
savefig: set True to save the figures (in pdf format)
sdir: diresied directory to save the figure (if not provided, save to default dir)
USAGE:
----------------------
plot_substack_moveout('temp.h5','Allstack0pws',0.1,0.2,'ZZ',200,True,'./temp')
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
receiver = sta+'.h5'
stack_method = dtype.split('_')[-1]
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mode='r')
dt = ds.auxiliary_data[dtype][ccomp].parameters['dt']
maxlag= ds.auxiliary_data[dtype][ccomp].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
tt = np.arange(-int(disp_lag),int(disp_lag)+dt,dt)
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
# load cc and parameter matrix
mdist = 0
for ii in range(len(sfiles)):
sfile = sfiles[ii]
iflip = 0
treceiver = sfile.split('_')[-1]
if treceiver == receiver:
iflip = 1
ds = pyasdf.ASDFDataSet(sfile,mode='r')
try:
# load data to variables
dist = ds.auxiliary_data[dtype][ccomp].parameters['dist']
ngood= ds.auxiliary_data[dtype][ccomp].parameters['ngood']
tdata = ds.auxiliary_data[dtype][ccomp].data[indx1:indx2]
except Exception:
print("continue! cannot read %s "%sfile);continue
tdata = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
tdata /= np.max(tdata,axis=0)
if iflip:
plt.plot(tt,np.flip(tdata,axis=0)+dist,'k',linewidth=0.8)
else:
plt.plot(tt,tdata+dist,'k',linewidth=0.8)
plt.title('%s %s filtered @%4.1f-%4.1f Hz' % (sta,ccomp,freqmin,freqmax))
plt.xlabel('time (s)')
plt.ylabel('offset (km)')
plt.text(maxlag*0.9,dist+0.5,receiver,fontsize=6)
#----use to plot o times------
if mdist < dist:
mdist = dist
plt.plot([0,0],[0,mdist],'r--',linewidth=1)
# save figure or show
if savefig:
outfname = sdir+'/moveout_'+sta+'_1D_'+str(stack_method)+'.pdf'
plt.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
plt.show()
def plot_all_moveout_1D_9comp(sfiles,sta,dtype,freqmin,freqmax,disp_lag=None,savefig=False,sdir=None):
'''
display the moveout waveforms of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: source station name
dtype: datatype either 'Allstack0pws' or 'Allstack0linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
disp_lag: lag times for displaying
savefig: set True to save the figures (in pdf format)
sdir: diresied directory to save the figure (if not provided, save to default dir)
USAGE:
----------------------
plot_substack_moveout('temp.h5','Allstack0pws',0.1,0.2,'ZZ',200,True,'./temp')
'''
# open data for read
if savefig:
if sdir==None:print('no path selected! save figures in the default path')
receiver = sta+'.h5'
stack_method = dtype.split('_')[-1]
ccomp = ['ZR','ZT','ZZ','RR','RT','RZ','TR','TT','TZ']
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mode='r')
dt = ds.auxiliary_data[dtype][ccomp[0]].parameters['dt']
maxlag= ds.auxiliary_data[dtype][ccomp[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
# lags for display
if not disp_lag:disp_lag=maxlag
if disp_lag>maxlag:raise ValueError('lag excceds maxlag!')
tt = np.arange(-int(disp_lag),int(disp_lag)+dt,dt)
indx1 = int((maxlag-disp_lag)/dt)
indx2 = indx1+2*int(disp_lag/dt)+1
# load cc and parameter matrix
mdist = 80
plt.figure(figsize=(14,10.5))
for ic in range(len(ccomp)):
comp = ccomp[ic]
tmp = '33'+str(ic+1)
plt.subplot(tmp)
for ii in range(len(sfiles)):
sfile = sfiles[ii]
iflip = 0
treceiver = sfile.split('_')[-1]
if treceiver == receiver:
iflip = 1
ds = pyasdf.ASDFDataSet(sfile,mode='r')
try:
# load data to variables
dist = ds.auxiliary_data[dtype][comp].parameters['dist']
ngood= ds.auxiliary_data[dtype][comp].parameters['ngood']
tdata = ds.auxiliary_data[dtype][comp].data[indx1:indx2]
except Exception:
print("continue! cannot read %s "%sfile);continue
if dist>mdist:continue
tdata = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
tdata /= np.max(tdata,axis=0)
if iflip:
plt.plot(tt,np.flip(tdata,axis=0)+dist,'k',linewidth=0.8)
else:
plt.plot(tt,tdata+dist,'k',linewidth=0.8)
if ic==1:
plt.title('%s filtered @%4.1f-%4.1f Hz' % (sta,freqmin,freqmax))
plt.xlabel('time (s)')
plt.ylabel('offset (km)')
if ic==0:
plt.plot([0,160],[0,80],'r--',linewidth=0.2)
plt.plot([0,80],[0,80],'g--',linewidth=0.2)
plt.text(disp_lag*1.1,dist+0.5,treceiver,fontsize=6)
plt.plot([0,0],[0,mdist],'b--',linewidth=1)
font = {'family': 'serif', 'color': 'red', 'weight': 'bold','size': 16}
plt.text(disp_lag*0.65,80,comp,fontdict=font)
plt.tight_layout()
# save figure or show
if savefig:
outfname = sdir+'/moveout_'+sta+'_1D_'+str(stack_method)+'.pdf'
plt.savefig(outfname, format='pdf', dpi=300)
plt.close()
else:
plt.show()
| 39.895504 | 141 | 0.589663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,614 | 0.323262 |
f39066e523946592eb25eb6c65542beaf6ed93ea | 195 | py | Python | version_2.0/cli.py | Isak-Landin/AlienWorldsBot | c8750229de4a7d4efe2950b7b89b7c457b956b5b | [
"MIT"
] | null | null | null | version_2.0/cli.py | Isak-Landin/AlienWorldsBot | c8750229de4a7d4efe2950b7b89b7c457b956b5b | [
"MIT"
] | null | null | null | version_2.0/cli.py | Isak-Landin/AlienWorldsBot | c8750229de4a7d4efe2950b7b89b7c457b956b5b | [
"MIT"
] | null | null | null | try:
import time
import traceback
from program_files.__main__ import main
if __name__ == '__main__':
main()
except:
print(traceback.print_exc())
time.sleep(10000) | 19.5 | 43 | 0.65641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.051282 |
f393ca21257641b3a4a749444737ab5f7f35e5a8 | 13,255 | py | Python | zeugs/wz_table/spreadsheet_make.py | gradgrind/Zeugs | 56361a63f245ac15a8cd21c7316879dc944609db | [
"Apache-2.0"
] | null | null | null | zeugs/wz_table/spreadsheet_make.py | gradgrind/Zeugs | 56361a63f245ac15a8cd21c7316879dc944609db | [
"Apache-2.0"
] | null | null | null | zeugs/wz_table/spreadsheet_make.py | gradgrind/Zeugs | 56361a63f245ac15a8cd21c7316879dc944609db | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
wz_table/spreadsheet_make.py
Last updated: 2019-10-14
Create a new spreadsheet (.xlsx).
=+LICENCE=============================
Copyright 2017-2019 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
import os, datetime
from collections import namedtuple
from openpyxl import Workbook
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.styles import (NamedStyle, PatternFill, Alignment,
Protection, Font, Border, Side)
from openpyxl.utils import get_column_letter
from openpyxl.worksheet.properties import (WorksheetProperties,
PageSetupProperties)
class NewSpreadsheet:
FORMAT_DATE = 'DD.MM.YYYY'
def __init__ (self, sheetName=None):
# Create the workbook and worksheet we'll be working with
self._wb = Workbook ()
self._ws = self._wb.active
if sheetName:
self._ws.title = sheetName
self._unlocked = None # cache for a <Protection> instance
@staticmethod
def cellName (row, col):
if row == None:
r = '*r*'
else:
r = str (row+1)
if col == None:
c = '*c*'
else:
c = get_column_letter (col+1)
return c + r
@staticmethod
def completeCellNames (rcstring, row=None, col=None):
if col != None:
rcstring = rcstring.replace ('*c*', get_column_letter (col+1))
if row != None:
rcstring = rcstring.replace ('*r*', str (row+1))
return rcstring
def makeStyle (self, style):
"""Return the attributes of this style in the form needed for
applying it to a cell. The result is cached in the style object.
"""
cellstyle = style.cellStyle
if cellstyle == None:
cellstyle = {}
cstyle = style.attributes
# Font
try:
fontname = cstyle ['font']
f = True
except KeyError:
fontname = 'Arial'
f = False
try:
fontsize = int (cstyle ['size'])
f = True
except KeyError:
fontsize = 12
try:
fontbold = bool (cstyle ['bold'])
f = True
except KeyError:
fontbold = False
try:
fontital = bool (cstyle ['emph'])
f = True
except KeyError:
fontital = False
try:
fontcol = cstyle ['fg']
f = True
except KeyError:
fontcol = '000000'
pass
if f:
cellstyle ['font'] = Font (name = fontname,
size = fontsize, bold = fontbold,
italic = fontital, color=fontcol)
# "Number format"
try:
cellstyle ['number_format'] = cstyle ['number_format']
except KeyError:
pass
# Alignment
try:
align = cstyle ['align']
if align in 'bmt':
# Vertical
h = 'c'
v = align
rotate = 90
else:
h = align
v = 'm'
rotate = None
cellstyle ['alignment'] = self.alignment (h=h, v=v,
rotate=rotate)
except KeyError:
pass
# Border
try:
border = cstyle ['border']
if border == 2:
cellstyle ['border'] = self.border (left=0, right=0,
top=0, bottom=2)
elif border == 1:
cellstyle ['border'] = self.border ()
except KeyError:
pass
# Background
try:
cellstyle ['fill'] = self.background (cstyle ['background'])
except KeyError:
pass
# Validation is not really a style ...
try:
valid = cstyle ['valid']
if valid:
# The default is 'locked' so only if <valid> is present
# is an action necessary.
if not self._unlocked:
self._unlocked = Protection (locked=False)
# Remove cell protection
cellstyle ['protection'] = self._unlocked
if type (valid) == list:
style.validation = self.dataValidation (valid)
except KeyError:
pass
style.cellStyle = cellstyle
return cellstyle
def setCell (self, row, col, val, style=None, isDate=False):
"""Set the cell at the given coordinates to the given value.
The coordinates start at 0.
Style objects can be passed as additional arguments.
"""
cell = self._ws.cell (row=row+1, column=col+1)
if style:
cellstyle = self.makeStyle (style)
for k, v in cellstyle.items ():
setattr (cell, k, v)
if style.validation:
style.validation.add (cell)
if val != None:
if isDate:
# Set cell number format
cell.number_format = self.FORMAT_DATE
# Convert to <datetime.date> instance
cell.value = datetime.date (*[int (v) for v in val.split ('-')])
else:
# Workaround for probable bug in openpyxl:
if isinstance (val, str) and type (val) != str:
val = str (val)
cell.value = val
def setWidth (self, col, width):
"""Set a column width in mm – probably very roughly.
"""
# The internal width parameter is related to the width of the
# 'Normal style font'! The conversion factor tries to compensate.
self._ws.column_dimensions [get_column_letter (col+1)].width = width * 0.5
def setHeight (self, row, height):
"""Set a row height in mm – probably very roughly.
"""
# The internal height parameter is related to the height of the
# 'Normal style font'! The conversion factor tries to compensate.
self._ws.row_dimensions [row+1].height = height * 2.8
def merge (self, row0, col0, height, width):
self._ws.merge_cells (start_row=row0 + 1, start_column=col0 + 1,
end_row=row0 + height, end_column=col0 + width)
def dataValidation (self, valList, allow_blank=True):
"""Create a data-validation object with list validation.
"""
def newValidationList ():
dv = DataValidation (type='list',
formula1 = '"' + ','.join (valList) + '"',
allow_blank = allow_blank)
# Optionally set a custom error message
#dv.error ='Your entry is not in the list'
#dv.errorTitle = 'Invalid Entry'
# Optionally set a custom prompt message
#dv.prompt = 'Please select from the list'
#dv.promptTitle = 'List Selection'
# Add the data-validation object to the worksheet
self._ws.add_data_validation (dv)
return dv
key = tuple (valList) + (allow_blank,)
try:
return self._vcache [key]
except AttributeError:
# No cache yet
self._vcache = {}
except KeyError:
# No existing validation instance for this key
pass
dv = newValidationList ()
self._vcache [key] = dv
return dv
def dataValidationLength (self, chars):
"""Create a data-validation object for a string with maximum
length validation (chars >= 0) or exact length validation
(-chars for chars < 0).
"""
if chars < 0:
op = 'equal'
chars = - chars
else:
op = 'lessThanOrEqual'
dv = DataValidation(type='textLength', operator=op, formula1=chars)
# Optionally set a custom error message
dv.error ='Entry is too long'
# Add the data-validation object to the worksheet
self._ws.add_data_validation (dv)
return dv
@staticmethod
def background (colour):
return PatternFill (patternType='solid', fgColor=colour)
@staticmethod
def alignment (h=None, v=None, rotate=None, indent=None, wrap=None):
al = Alignment ()
if h:
hal = {'l': 'left', 'r': 'right', 'c': 'center'}.get (h)
if hal:
al.horizontal = hal
if v:
val = {'t': 'top', 'b': 'bottom', 'm': 'center'}.get (v)
if val:
al.vertical = val
if rotate:
try:
ral = int (rotate)
if ral >=0 and ral <= 180:
al.textRotation = ral
except:
pass
if indent != None:
al.indent = float (indent)
if wrap != None:
al.wrapText = wrap
return al
@staticmethod
def border (left=1, right=1, top=1, bottom=1):
"""Simple borders. Only supports definition of the sides and thickness.
The value must lie in the range 0 – 3.
"""
bstyle = [None, 'thin', 'medium', 'thick']
return Border (
left=Side (style=bstyle [left]),
right=Side (style=bstyle [right]),
top=Side (style=bstyle [top]),
bottom=Side (style=bstyle [bottom]))
def protectSheet (self, pw=None):
if pw:
self._ws.protection.set_password (pw)
else:
self._ws.protection.enable ()
def sheetProperties (self, paper='A4', landscape=False,
fitWidth=False, fitHeight=False):
if landscape:
self._ws.page_setup.orientation = self._ws.ORIENTATION_LANDSCAPE
self._ws.page_setup.paperSize = getattr (self._ws, 'PAPERSIZE_' + paper)
# Property settings
if fitWidth or fitHeight:
wsprops = self._ws.sheet_properties
wsprops.pageSetUpPr = PageSetupProperties(fitToPage=True,
autoPageBreaks=False)
# self._ws.page_setup.fitToPage = True
if not fitWidth:
self._ws.page_setup.fitToWidth = False
if not fitHeight:
self._ws.page_setup.fitToHeight = False
def freeze (self, row, col):
self._ws.freeze_panes = self.cellName (row, col)
def save (self, filepath):
"""Write the spreadsheet to a file.
The ending '.xlsx' is added automatically if it is not present
already.
Return the full filepath.
"""
fdir = os.path.dirname (filepath)
fname = os.path.basename (filepath).rsplit ('.', 1) [0] + '.xlsx'
fp = os.path.join (fdir, fname)
self._wb.save (fp)
return fp
class TableStyle:
def __init__ (self, base=None, **kargs):
"""
<base> is an existing style (<TableStyle> instance).
The following kargs are processed:
<font> is the font name.
<size> is the font size.
<bold> (bool) and <emph> (bool) are font styles.
<fg> is the font colour (RRGGBB).
<align> is the horizontal (l, c or r) OR vertical (b, m, t) alignment.
Vertical alignment is for rotated text.
<background> is a colour in the form 'RRGGBB', default none.
<border>: Only three border types are supported here:
0: none
1: all sides
2: (thicker) underline
<number_format>: By default force all cells to text format.
<valid>: <True> just unlocks cell (removes protection).
Otherwise it can be a list of valid strings (which will also
unlock the cell).
#TODO: other types of validation?
"""
if base == None:
self.attributes = {}
# Set default values
if 'border' not in kargs: kargs ['border'] = 1
if 'number_format' not in kargs: kargs ['number_format'] = '@'
if 'align' not in kargs: kargs ['align'] = 'c'
else:
self.attributes = base.attributes.copy ()
self.attributes.update (kargs)
# These are for the sheet style info (cache),
# see <NewSpreadsheet.makeStyle>.
self.cellStyle = None
self.validation = None
| 33.220551 | 82 | 0.530517 | 12,039 | 0.90785 | 0 | 0 | 1,844 | 0.139054 | 0 | 0 | 4,401 | 0.331875 |
f3940f471c47cc139263585f43ad154e0740108a | 8,509 | py | Python | pygpsnmea/kml.py | tww-software/py_gps_nmea | 8295d146014d4e8636e7ca05f7e843f023c67e45 | [
"MIT"
] | null | null | null | pygpsnmea/kml.py | tww-software/py_gps_nmea | 8295d146014d4e8636e7ca05f7e843f023c67e45 | [
"MIT"
] | null | null | null | pygpsnmea/kml.py | tww-software/py_gps_nmea | 8295d146014d4e8636e7ca05f7e843f023c67e45 | [
"MIT"
] | null | null | null | """
a parser to generate Keyhole Markup Language (KML) for Google Earth
"""
import datetime
import os
import re
DATETIMEREGEX = re.compile(
r'\d{4}/(0[1-9]|1[0-2])/(0[1-9]|1[0-9]|2[0-9]|3[01]) '
r'(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])')
class KMLOutputParser():
"""
Class to parse KML into an output file.
Attributes:
kmldoc(list): list of strings to make up the doc.kml
kmlfilepath(str): path to output KML file
kmlheader(str): first part of a KML file
placemarktemplate(str): template for a KML placemark (pin on map)
lineplacemarktemplate(str): template for KML linestring (line on map)
"""
def __init__(self, kmlfilepath):
self.kmldoc = []
self.kmlfilepath = kmlfilepath
self.kmlheader = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%s</name>
<open>1</open>"""
self.placemarktemplate = """
<Placemark>
<name>%s</name>
<description>%s</description>
<TimeStamp>
<when>%s</when>
</TimeStamp>
<LookAt>
<longitude>%s</longitude>
<latitude>%s</latitude>
<altitude>%s</altitude>
<heading>-0</heading>
<tilt>0</tilt>
<range>500</range>
</LookAt>
<Point>
<coordinates>%s</coordinates>
</Point>
</Placemark>"""
self.lineplacemarktemplate = """
<Placemark>
<name>%s</name>
<LineString>
<coordinates>%s</coordinates>
</LineString>
</Placemark>"""
@staticmethod
def format_kml_placemark_description(placemarkdict):
"""
format html tags for inside a kml placemark from a dictionary
Args:
placemarkdict(dict): dictionary of information for a placemark
Returns:
description(str): the dictionary items formatted as HTML string
suitable to be in a KML placemark description
"""
starttag = "<![CDATA["
newlinetag = "<br />\n"
endtag = "]]>"
descriptionlist = []
descriptionlist.append(starttag)
for item in placemarkdict:
if isinstance(placemarkdict[item], dict):
descriptionlist.append(newlinetag)
descriptionlist.append(item.upper())
descriptionlist.append(newlinetag)
for subitem in placemarkdict[item]:
descriptionlist.append(str(subitem).upper())
descriptionlist.append(' - ')
descriptionlist.append(str(placemarkdict[item][subitem]))
descriptionlist.append(newlinetag)
continue
descriptionlist.append(str(item).upper())
descriptionlist.append(' - ')
descriptionlist.append(str(placemarkdict[item]))
descriptionlist.append(newlinetag)
descriptionlist.append(endtag)
description = ''.join(descriptionlist)
return description
def create_kml_header(self, name):
"""
Write the first part of the KML output file.
This only needs to be called once at the start of the kml file.
Args:
name(str): name to use for this kml document
"""
self.kmldoc.append(self.kmlheader % (name))
def add_kml_placemark(self, placemarkname, description, lon, lat,
altitude='0', timestamp=''):
"""
Write a placemark to the KML file (a pin on the map!)
Args:
placemarkname(str): text that appears next to the pin on the map
description(str): text that will appear in the placemark
lon(str): longitude in decimal degrees
lat(str): latitude in decimal degrees
altitude(str): altitude in metres
timestamp(str): time stamp in XML format
"""
placemarkname = remove_invalid_chars(placemarkname)
coords = lon + ',' + lat + ',' + altitude
placemark = self.placemarktemplate % (
placemarkname, description, timestamp, lon, lat,
altitude, coords)
self.kmldoc.append(placemark)
def open_folder(self, foldername):
"""
open a folder to store placemarks
Args:
foldername(str): the name of the folder
"""
cleanfoldername = remove_invalid_chars(foldername)
openfolderstr = "<Folder>\n<name>{}</name>".format(cleanfoldername)
self.kmldoc.append(openfolderstr)
def close_folder(self):
"""
close the currently open folder
"""
closefolderstr = "</Folder>"
self.kmldoc.append(closefolderstr)
def add_kml_placemark_linestring(self, placemarkname, coords):
"""
Write a linestring to the KML file (a line on the map!)
Args:
placemarkname(str): name of the linestring
coords(list): list of dicts containing Lat/Lon
"""
placemarkname = remove_invalid_chars(placemarkname)
newcoordslist = []
for item in coords:
lon = str(item['longitude'])
lat = str(item['latitude'])
try:
alt = str(item['altitude (M)'])
except KeyError:
alt = '0'
coordsline = '{},{},{}'.format(lon, lat, alt)
newcoordslist.append(coordsline)
placemark = self.lineplacemarktemplate % (placemarkname,
'\n'.join(newcoordslist))
self.kmldoc.append(placemark)
def close_kml_file(self):
"""
Write the end of the KML file.
This needs to be called once at the end of the file
to ensure the tags are closed properly.
"""
endtags = "\n</Document></kml>"
self.kmldoc.append(endtags)
def write_kml_doc_file(self):
"""
write the tags to the kml doc.kml file
"""
with open(self.kmlfilepath, 'w') as kmlout:
for kmltags in self.kmldoc:
kmlout.write(kmltags)
kmlout.flush()
class LiveKMLMap(KMLOutputParser):
"""
live plot positions on a map
"""
kmlnetlink = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<NetworkLink>
<name>Live GPS Positon</name>
<description>current GPS position</description>
<Link>
<href>{}</href>
<refreshVisibility>1</refreshVisibility>
<refreshMode>onInterval</refreshMode>
<refreshInterval>1</refreshInterval>
</Link>
</NetworkLink>
</kml>"""
def __init__(self, kmlfilepath):
super().__init__(kmlfilepath)
outputpath = os.path.dirname(kmlfilepath)
self.netlinkpath = os.path.join(outputpath, 'open_this.kml')
def create_netlink_file(self):
"""
write the netlink file
"""
with open(os.path.join(self.netlinkpath), 'w') as netlinkfile:
netlinkfile.write(self.kmlnetlink.format(self.kmlfilepath))
class InvalidDateTimeString(Exception):
"""
raise if timestamp is the wrong format
"""
def remove_invalid_chars(xmlstring):
"""
remove invalid chars from a string
Args:
xmlstring(str): input string to clean
Returns:
cleanstring(str): return string with invalid chars replaced or removed
"""
invalidchars = {'<': '<', '>': '>', '"': '"',
'\t': ' ', '\n': ''}
cleanstring = xmlstring.replace('&', '&')
for invalidchar in invalidchars:
cleanstring = cleanstring.replace(
invalidchar, invalidchars[invalidchar])
return cleanstring
def convert_timestamp_to_kmltimestamp(timestamp):
"""
convert the pygps timestamp string to one suitable for KML
Args:
timestamp(str): the timestamp string in the format '%Y/%m/%d %H:%M:%S'
Raises:
InvalidDateTimeString: when the timestamp is not correctly formatted
Returns:
xmltimestamp(str): the timestamp in the format '%Y-%m-%dT%H:%M:%SZ'
"""
if DATETIMEREGEX.match(timestamp):
if timestamp.endswith(' (estimated)'):
timestamp = timestamp.rstrip(' (estimated)')
try:
dtobj = datetime.datetime.strptime(timestamp, '%Y/%m/%d %H:%M:%S')
kmltimestamp = dtobj.strftime('%Y-%m-%dT%H:%M:%SZ')
except ValueError as err:
raise InvalidDateTimeString('wrong') from err
return kmltimestamp
raise InvalidDateTimeString('timestamp must be %Y/%m/%d %H:%M:%S')
| 31.868914 | 78 | 0.595135 | 6,796 | 0.798684 | 0 | 0 | 1,467 | 0.172406 | 0 | 0 | 4,169 | 0.489952 |
f396ce948df906683a7c38c39ca570318694f325 | 120 | py | Python | config_web/uberon.py | NikkiBytes/pending.api | 3c83bb8e413c3032a3a4539d19a779b5f0b67650 | [
"Apache-2.0"
] | 3 | 2019-02-17T23:36:35.000Z | 2022-03-01T16:43:06.000Z | config_web/uberon.py | NikkiBytes/pending.api | 3c83bb8e413c3032a3a4539d19a779b5f0b67650 | [
"Apache-2.0"
] | 56 | 2019-01-26T16:34:12.000Z | 2022-03-23T06:57:03.000Z | config_web/uberon.py | NikkiBytes/pending.api | 3c83bb8e413c3032a3a4539d19a779b5f0b67650 | [
"Apache-2.0"
] | 6 | 2020-10-22T17:37:54.000Z | 2022-03-01T16:56:55.000Z |
ES_HOST = 'localhost:9200'
ES_INDEX = 'pending-uberon'
ES_DOC_TYPE = 'anatomy'
API_PREFIX = 'uberon'
API_VERSION = ''
| 15 | 27 | 0.716667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.425 |
f397defd4691690e72cce36ba5215724539cf0f1 | 4,596 | py | Python | tests/unittests/models/movinet/test_movinet.py | jaelgu/towhee | 34c79cf50831dc271ae0ab02f319f9e355c2d0bf | [
"Apache-2.0"
] | null | null | null | tests/unittests/models/movinet/test_movinet.py | jaelgu/towhee | 34c79cf50831dc271ae0ab02f319f9e355c2d0bf | [
"Apache-2.0"
] | null | null | null | tests/unittests/models/movinet/test_movinet.py | jaelgu/towhee | 34c79cf50831dc271ae0ab02f319f9e355c2d0bf | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import unittest
from towhee.models.movinet.config import _C
from towhee.models.movinet.movinet import MoViNet, create_model
class MovinetTest(unittest.TestCase):
def test_Movinet_A0(self):
"""
Test MovinetA0 model.
"""
model = create_model(model_name='movineta0',causal=False)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A0_stream(self):
"""
Test MovinetA0 stream model.
"""
model = create_model(model_name='movineta0',causal=True)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A1(self):
"""
Test MovinetA1 model.
"""
model = create_model(model_name='movineta1',causal=False)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A1_stream(self):
"""
Test MovinetA1 stream model.
"""
model = create_model(model_name='movineta1',causal=True)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A2(self):
"""
Test MovinetA2 model.
"""
model = create_model(model_name='movineta2',causal=False)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A2_stream(self):
"""
Test MovinetA2 stream model.
"""
model = create_model(model_name='movineta2',causal=True)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A3(self):
"""
Test MovinetA3 model.
"""
model = create_model(model_name='movineta3',causal=False)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A3_stream(self):
"""
Test MovinetA3 model.
"""
model = create_model(model_name='movineta3',causal=True)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A4(self):
"""
Test MovinetA4 model.
"""
model = create_model(model_name='movineta4',causal=False)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A4_stream(self):
"""
Test MovinetA4 model.
"""
model = create_model(model_name='movineta4',causal=True)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A5(self):
"""
Test MovinetA5 model.
"""
model = create_model(model_name='movineta5',causal=False)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A5_stream(self):
"""
Test MovinetA5 model.
"""
model = create_model(model_name='movineta5',causal=True)
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A4_(self):
"""
Test MovinetA4 2+1d model.
"""
config = _C.MODEL.MoViNetA4
model = MoViNet(cfg = config, causal = True, conv_type = '2plus1d')
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
def test_Movinet_A5_(self):
"""
Test MovinetA5 2+1d model.
"""
config = _C.MODEL.MoViNetA5
model = MoViNet(cfg=config, causal = False, conv_type = '2plus1d')
x=torch.randn(1,3,1,64,64)
y=model(x)
self.assertTrue(y.shape == torch.Size([1,600]))
| 31.054054 | 75 | 0.595735 | 3,864 | 0.840731 | 0 | 0 | 0 | 0 | 0 | 0 | 1,390 | 0.302437 |
f39a1fae3017b69be241d72f355eabb23b20cb7e | 766 | py | Python | setup.py | btpka3/certbot-auto-dns-challenge | 6f0eee2c4a65380aaf51e439714b0c88820cd392 | [
"Apache-2.0"
] | null | null | null | setup.py | btpka3/certbot-auto-dns-challenge | 6f0eee2c4a65380aaf51e439714b0c88820cd392 | [
"Apache-2.0"
] | null | null | null | setup.py | btpka3/certbot-auto-dns-challenge | 6f0eee2c4a65380aaf51e439714b0c88820cd392 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='certbot_adc',
version='0.1',
description="perform certbot auto dns challenge with DNS provider's API",
url='http://github.com/btpka3/certbot-auto-dns-challenge',
author='btpka3',
author_email='btpka3@163.com',
license='Apache License v2.0',
packages=['certbot_adc'],
install_requires=[
"aliyun-python-sdk-core>=2.0.7",
"aliyun-python-sdk-alidns>=2.0.7",
"PyYAML>=3.12",
"validate_email>=1.3",
"qcloudapi-sdk-python>=2.0.9"
],
scripts=[
'bin/certbot-adc-check-conf',
'bin/certbot-adc-manual-auth-hook',
'bin/certbot-adc-manual-cleanup-hook',
],
zip_safe=False
)
| 26.413793 | 77 | 0.612272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 459 | 0.599217 |
f39ab617903bf5c066a5df4506e51a1d5032af8b | 3,402 | py | Python | meta_learn/hyperparameter/hyperactive_wrapper.py | SimonBlanke/Meta-Learn | 099c12dd78c67216949f0773b5e642de0199d130 | [
"MIT"
] | 2 | 2018-04-11T08:44:52.000Z | 2018-04-11T19:40:49.000Z | meta_learn/hyperparameter/hyperactive_wrapper.py | SimonBlanke/meta-learn | 099c12dd78c67216949f0773b5e642de0199d130 | [
"MIT"
] | null | null | null | meta_learn/hyperparameter/hyperactive_wrapper.py | SimonBlanke/meta-learn | 099c12dd78c67216949f0773b5e642de0199d130 | [
"MIT"
] | null | null | null | import os
import glob
import hashlib
import inspect
from .collector import Collector
from ._meta_regressor import MetaRegressor
from ._recognizer import Recognizer
from ._predictor import Predictor
class HyperactiveWrapper:
def __init__(self, search_config, meta_learn_path_alt=None):
self.search_config = search_config
current_path = os.path.realpath(__file__)
meta_learn_path, _ = current_path.rsplit("/", 1)
self.meta_data_path = meta_learn_path + "/meta_data/"
self.meta_regressor_path = meta_learn_path + "/meta_regressor/"
if not os.path.exists(self.meta_data_path):
os.makedirs(self.meta_data_path)
if not os.path.exists(self.meta_regressor_path):
os.makedirs(self.meta_regressor_path)
def get_func_metadata(self, model_func):
self.collector = Collector()
paths = glob.glob(self._get_func_file_paths(model_func))
if len(paths) > 0:
return self.collector._get_func_metadata(paths)
else:
return None, None
def collect(self, X, y, _cand_):
self.collector = Collector()
path = self._get_file_path(X, y, _cand_.func_)
self.collector.extract(X, y, _cand_, path)
def retrain(self, model_func):
path = self._get_metaReg_file_path(model_func)
meta_features, target = self.get_func_metadata(model_func)
if meta_features is None or target is None:
return
self.regressor = MetaRegressor()
self.regressor.fit(meta_features, target)
self.regressor.store_model(path)
def search(self, X, y, model_func):
path = self._get_metaReg_file_path(model_func)
if not os.path.exists(path):
return None, None
self.recognizer = Recognizer(_cand_)
self.predictor = Predictor()
self.predictor.load_model(path)
X_test = self.recognizer.get_test_metadata([X, y])
return self.predictor.search(X_test)
def _get_hash(self, object):
return hashlib.sha1(object).hexdigest()
def _get_func_str(self, func):
return inspect.getsource(func)
def _get_metaReg_file_path(self, model_func):
func_str = self._get_func_str(model_func)
return self.meta_regressor_path + (
"metamodel__func_hash="
+ self._get_hash(func_str.encode("utf-8"))
+ "__.csv"
)
def _get_func_file_paths(self, model_func):
func_str = self._get_func_str(model_func)
self.func_path = self._get_hash(func_str.encode("utf-8")) + "/"
directory = self.meta_data_path + self.func_path
if not os.path.exists(directory):
os.makedirs(directory)
return directory + ("metadata" + "*" + "__.csv")
def _get_file_path(self, X_train, y_train, model_func):
func_str = self._get_func_str(model_func)
feature_hash = self._get_hash(X_train)
label_hash = self._get_hash(y_train)
self.func_path = self._get_hash(func_str.encode("utf-8")) + "/"
directory = self.meta_data_path + self.func_path
if not os.path.exists(directory):
os.makedirs(directory)
return directory + (
"metadata"
+ "__feature_hash="
+ feature_hash
+ "__label_hash="
+ label_hash
+ "__.csv"
)
| 30.927273 | 71 | 0.643739 | 3,200 | 0.940623 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.047913 |
f39ac023b77aabbaf4afaccbd9f78e8fb65f2bd7 | 4,130 | py | Python | SIR_Model_Spread_of_Disease/SIR.py | Ukasz09/Machine-learning | fad267247a98099dd0647840fb2cbeb91ba63ef6 | [
"MIT"
] | 1 | 2020-04-18T11:30:26.000Z | 2020-04-18T11:30:26.000Z | SIR_Model_Spread_of_Disease/SIR.py | Ukasz09/Machine-learning | fad267247a98099dd0647840fb2cbeb91ba63ef6 | [
"MIT"
] | null | null | null | SIR_Model_Spread_of_Disease/SIR.py | Ukasz09/Machine-learning | fad267247a98099dd0647840fb2cbeb91ba63ef6 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
T = 200
h = 1e-2
t = np.arange(start=0, stop=T + h, step=h)
bet, gam = 0.15, 1 / 50
# todo: zmienic poziej na randoma
# S_pocz = np.random.uniform(0.7, 1)
S_start = 0.8
I_start = 1 - S_start
R_start = 0
N = S_start + I_start + R_start # is const
# using odeint
# ---------------------------------------------------------------------------------------------------------------------#
def two_diff_ode_equation(state, t, bet, gam):
S, I = state
return [- bet * I * S / N, bet * I * S / N - gam * I]
def one_diff_equation_ode(state, t, bet, gam):
S = state[0]
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
return [(-bet / N * S * (gam / bet * np.log(S) - S + C))]
def calc_R(S_arr, I_arr):
R_arr = np.zeros(len(t))
for i in range(len(R_arr)):
R_arr[i] = N - S_arr[i] - I_arr[i]
return R_arr
def calc_I(S_arr):
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
I_arr = np.zeros(len(t))
for i in range(len(I_arr)):
I_arr[i] = gam / bet * np.log(S_arr[i]) - S_arr[i] + C
return I_arr
def two_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S, I)
for i in range(len(labels) - 1):
ax.plot(t, sym[:, i], label=labels[i])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], sym[:, 1]), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def one_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S)
ax.plot(t, sym[:, 0], label=labels[0])
# plot drawing (I)
I_arr = calc_I(sym[:, 0])
ax.plot(t, I_arr, label=labels[2])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], I_arr), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_ode_main():
start_state = S_start, I_start
sym = odeint(two_diff_ode_equation, start_state, t, args=(bet, gam))
two_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
def one_equation_ode_main():
start_state = S_start
sym = odeint(one_diff_equation_ode, start_state, t, args=(bet, gam))
one_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
# using manual
# ---------------------------------------------------------------------------------------------------------------------#
S = np.zeros(len(t))
S[0] = S_start
I = np.zeros(len(t))
I[0] = I_start
R = np.zeros(len(t))
R[0] = R_start
def two_diff_equation_manual():
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (- bet * I[i] * S[i] / N)
I[i + 1] = I[i] + h * (bet * I[i] * S[i + 1] / N - gam * I[i])
R[i + 1] = N - S[i + 1] - I[i + 1]
def one_diff_equation_manual():
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (-bet / N * S[i] * (gam / bet * np.log(S[i]) - S[i] + C))
I[i + 1] = gam / bet * np.log(S[i + 1]) - S[i + 1] + C
R[i + 1] = N - S[i + 1] - I[i + 1]
def equation_man_plot(t, sirList, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (R, S, I)
for i in range(len(sirList)):
ax.plot(t, sirList[i], label=labels[i])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_man_main():
two_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
def one_equation_man_main():
one_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
if __name__ == "__main__":
# one_equation_ode_main()
# one_equation_man_main()
# two_equation_ode_main()
two_equation_man_main()
exit(0)
| 28.482759 | 120 | 0.542373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.164165 |
f39c2a97e0f79f5846c7d80b8e4cc63e36f79092 | 2,309 | py | Python | Source/fcm_estimator.py | SamanKhamesian/Imputation-of-Missing-Values | 01bcce1033e311133529ac7519bf1eccb3adac67 | [
"Apache-2.0"
] | 4 | 2019-07-17T18:36:42.000Z | 2021-06-04T16:04:55.000Z | Source/fcm_estimator.py | SamanKhamesian/Imputation-of-Missing-Values | 01bcce1033e311133529ac7519bf1eccb3adac67 | [
"Apache-2.0"
] | 1 | 2019-10-01T05:04:24.000Z | 2019-10-11T12:38:26.000Z | Source/fcm_estimator.py | SamanKhamesian/Imputation-of-Missing-Values | 01bcce1033e311133529ac7519bf1eccb3adac67 | [
"Apache-2.0"
] | 2 | 2019-11-01T04:09:49.000Z | 2021-02-27T23:44:23.000Z | import numpy as np
from skfuzzy import cmeans
from config import NAN, FCMParam
class FCMeansEstimator:
def __init__(self, c, m, data):
self.c = c
self.m = m
self.data = data
self.complete_rows, self.incomplete_rows = self.__extract_rows()
# Extract complete and incomplete rows
def __extract_rows(self):
rows, columns = len(self.data), len(self.data[0])
complete_rows, incomplete_rows = [], []
for i in range(rows):
for j in range(columns):
if self.data[i][j] == NAN:
incomplete_rows.append(i)
break
complete_rows.append(i)
return np.array(complete_rows), np.array(incomplete_rows)
# Estimate the missing values
def estimate_missing_values(self):
estimated_data = []
complete_data = np.array([self.data[x] for x in self.complete_rows])
centers, _, _, _, _, _, _ = cmeans(data=complete_data.transpose(), c=self.c, m=self.m, error=FCMParam.ERROR,
maxiter=FCMParam.MAX_ITR, init=None)
# Calculate distance between two points based on euclidean distance
def calculate_distance(data_1, data_2):
return np.linalg.norm(data_1 - data_2)
# Calculate the membership value for given point
def calculate_membership(dist_matrix, distance, m):
numerator = np.power(distance, -2 / (1 - m))
denominator = np.array([np.power(x, -2 / (1 - m)) for x in dist_matrix]).sum()
return numerator / denominator
for i in self.incomplete_rows:
estimated = 0
dist, membership_value = [], []
miss_ind = np.where(self.data[i] == NAN)[0][0]
for center in centers:
dist.append(calculate_distance(data_1=np.delete(np.array(center), miss_ind),
data_2=np.delete(np.array(self.data[i]), miss_ind)))
for d in dist:
membership_value.append(calculate_membership(dist, d, self.m))
for k in range(self.c):
estimated += centers[k][miss_ind] * membership_value[k]
estimated_data.append(estimated)
return np.array(estimated_data)
| 37.241935 | 116 | 0.585535 | 2,226 | 0.964054 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.078822 |
f39d173c9fbc317050ff95708eda00f0d96dd9ff | 3,444 | py | Python | assignments/06_common/common_expanded.py | xanoob/be434-fall-2021 | d06c2f105a03a48e4a2c4024151f24eb8dbab30e | [
"MIT"
] | null | null | null | assignments/06_common/common_expanded.py | xanoob/be434-fall-2021 | d06c2f105a03a48e4a2c4024151f24eb8dbab30e | [
"MIT"
] | null | null | null | assignments/06_common/common_expanded.py | xanoob/be434-fall-2021 | d06c2f105a03a48e4a2c4024151f24eb8dbab30e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : RoxanneB <RoxanneB@localhost>
Date : 2021-10-07
Purpose: Rock the Casbah
"""
import argparse
import sys
import string
from collections import defaultdict
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile1',
help='Input file 1',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
parser.add_argument('infile2',
help='Input file 2',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
parser.add_argument('-o',
'--outfile',
help='Optional output file',
metavar='FILE',
type=argparse.FileType('wt'),
default=sys.stdout)
parser.add_argument('-d',
'--distance',
help='Calculate Hamming distance',
metavar='int',
type=int,
default=0)
return parser.parse_args()
# --------------------------------------------------
def flatlist(nested_list):
flat_list = [item for inner_list in nested_list for item in inner_list]
return(flat_list)
def lines_to_list(infile):
""" put all words in a file into a list """
nested_list = [line.split() for line in infile]
# nested_list = []
# for line in infile:
# nested_list.append(line.split())
flat_list = flatlist(nested_list)
# flat_list = []
# for inner_list in nested_list:
# for item in inner_list:
# flat_list.append(item)
return flat_list
def rm_punctuation(inlist):
""" takes the output of lines_to_list, requires str module """
no_punct = [item.translate(str.maketrans('', '', string.punctuation)) for item in inlist]
return no_punct
def get_hamming(seq1, seq2):
l1, l2 = len(seq1), len(seq2)
dist = abs(l1-l2)
for i in range(min(len(seq1), len(seq2))):
if seq1[i] != seq2[i]:
dist += 1
return(int(dist))
def main():
"""Make a jazz noise here"""
args = get_args()
infile1_ls = rm_punctuation([word.lower() for word in lines_to_list(args.infile1)])
infile2_ls = rm_punctuation([word.lower() for word in lines_to_list(args.infile2)])
if args.distance:
dist_dict = defaultdict(list)
for word1 in infile1_ls:
for word2 in infile2_ls:
hamm = get_hamming(word1, word2)
dist_dict[hamm].append([word1, word2])
for k, v in dist_dict.items():
if k <= args.distance:
print(flatlist(v), file=args.outfile)
else:
file_intersect = sorted(list(set(infile1_ls).intersection(infile2_ls)))
for word in file_intersect:
print(word, file=args.outfile)
# file_intersect = sorted(list(set(infile1_ls).intersection(infile2_ls)))
#
# # for word in file_intersect:
# # print(word, file=args.outfile)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 28.941176 | 93 | 0.539199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 957 | 0.277875 |
f3a12a458087d0fac906201bd400b84157ccb937 | 3,421 | py | Python | protocols/acars618.py | wagoodman/protocol-tools | 7daa021265beca9755fe0a96203801e2270f4a8b | [
"MIT"
] | 1 | 2022-01-29T00:28:50.000Z | 2022-01-29T00:28:50.000Z | protocols/acars618.py | wagoodman/protocol-tools | 7daa021265beca9755fe0a96203801e2270f4a8b | [
"MIT"
] | null | null | null | protocols/acars618.py | wagoodman/protocol-tools | 7daa021265beca9755fe0a96203801e2270f4a8b | [
"MIT"
] | null | null | null | from byteProtocol import *
import re
import time
class Acars618Support(object):
@staticmethod
def int(cls, value):
if isinstance(value, int):
value = str(value)
return value
@staticmethod
def tail(cls, value):
if not isinstance(value, str):
raise BadProtocolFieldType('tail', type(value), str)
return value.rjust(cls.assembleSpecFields['tail'].maxLen, ".")
@staticmethod
def msn(cls, value):
if isinstance(value, bool):
if value:
return MSN(useCache=False)
else:
return cls.assembleSpecFields['msn'].defaultValue
if not isinstance(value, str):
raise BadProtocolFieldType('msn', type(value), str)
return value
class Acars618Assembler(ByteProtocolAssembler):
assembleSpecFields = {
# name : FieldSpec( mn, mx , inputTypes, isRequired, defaultValue, assembleCallback)
"soh": FieldSpec(1, 1, (str,), True, "\x01", None),
"mode": FieldSpec(1, 1, (str, int), True, "2", Acars618Support.int),
"tail": FieldSpec(7, 7, (str,), True, "N123456", Acars618Support.tail),
"ack": FieldSpec(1, 1, (str,), True, "\x15", None),
"label": FieldSpec(1, 1, (str,), True, "H1", None),
"dbi": FieldSpec(1, 1, (str, int), False, "1", Acars618Support.int),
"sot": FieldSpec(1, 1, (str,), True, "\x02", None),
"msn": FieldSpec(4, 4, (str, bool), True, "M01A", Acars618Support.msn),
"agency": FieldSpec(2, 2, (str,), True, "UA", None),
"flight": FieldSpec(4, 4, (str, int), True, "9090", None),
"text": FieldSpec(1, 220, (str,), False, None, None),
"trailer": FieldSpec(1, 1, (str,), True, "\x03", None),
}
assembleGenFields = {"rn": "\r\n",
"sp": " ",
"dot": "."}
# protocol specification in field order (all fields)
assembleFieldOrder = ("soh", "mode", "tail", "ack", "label", "dbi", "sot",
"msn", "agency", "flight", "text", "trailer")
class Acars620Parser(ByteProtocolParser):
minLength = 20
# Protocol specific fields of interest (to be named)
parseSpecFields = {"soh": "\x01",
"mode": "[!a-zA-Z0-9]{1}",
"tail": "[!a-zA-z0-9\.\-]{7}",
"ack": "[!a-zA-Z\x15\x06]{1}",
"label": "[!a-zA-Z0-9_\x7f]{2}",
"dbi": "[!a-zA-Z0-9]{0,1}",
"sot": "\x02",
"msn": "[!a-zA-Z0-9]{4}",
"agency": "[!a-zA-Z0-9$]{2}",
"flight": "[!a-zA-Z0-9 ]{4}",
"text": "[^\x03\x17]*",
"trailer": "[\x03\x17]{1}",
}
# Protocol specific repeating fields (not to be named)
parseGenFields = {}
# protocol specification named + unnamed field order
parseFieldOrder = ("soh", "mode", "tail", "ack", "label", "dbi", "sot",
"msn", "agency", "flight", "text", "trailer")
# Fields which can be safely censored upon multiple reruns of the same test
parseCensorFields = ("dbi", "msn")
parseNamedFields, parsePattern, parseReObj = initParserFields(parseSpecFields, parseGenFields, parseFieldOrder)
class Acars618(Acars620Parser, Acars618Assembler):
pass
| 36.010526 | 115 | 0.524408 | 3,360 | 0.982169 | 0 | 0 | 688 | 0.201111 | 0 | 0 | 983 | 0.287343 |
f3a248c8b3cb8bcd7112da99c041f910e5474413 | 7,018 | py | Python | db_env/tpch/tpch_stream/RefreshPair.py | Chotom/rl-db-indexing | 16eaf0a3e3aef83b3fd077111e922dea6dd6a1f3 | [
"MIT"
] | null | null | null | db_env/tpch/tpch_stream/RefreshPair.py | Chotom/rl-db-indexing | 16eaf0a3e3aef83b3fd077111e922dea6dd6a1f3 | [
"MIT"
] | null | null | null | db_env/tpch/tpch_stream/RefreshPair.py | Chotom/rl-db-indexing | 16eaf0a3e3aef83b3fd077111e922dea6dd6a1f3 | [
"MIT"
] | null | null | null | import datetime
from typing import Iterator, Tuple, List
import numpy as np
import pandas as pd
from mysql.connector import MySQLConnection
from mysql.connector.cursor import MySQLCursorBuffered
from db_env.tpch.config import DB_REFRESH_DIR
from db_env.tpch.tpch_stream.consts import LINEITEM_QUOTE_INDEX_LIST, ORDERS_QUOTE_INDEX_LIST, VALUE_SEP
from shared_utils.utils import create_logger
class RefreshPair:
__insert_queries_iter: Iterator[str]
def __init__(self, logger_name: str, run_number: int, connection: MySQLConnection, cursor: MySQLCursorBuffered):
self._log = create_logger(logger_name)
self._connection = connection
self._cursor = cursor
self.__run_number = run_number
self.__rf1_time = datetime.timedelta(0)
self.__rf2_time = datetime.timedelta(0)
self._df_measures = pd.DataFrame(columns=['name', 'time'], dtype=np.dtype(str, np.timedelta64))
self._df_measures.set_index('name', inplace=True)
def load_data(self):
self._log.info('Load refresh queries...')
insert_queries = []
# Load insert queries from files
with open(f'{DB_REFRESH_DIR}/orders.tbl.u{self.__run_number}', 'r') as orders_file, \
open(f'{DB_REFRESH_DIR}/lineitem.tbl.u{self.__run_number}', 'r') as lineitem_file:
try:
# read first lineitem query
lineitem_row = next(lineitem_file)
lineitem_id, lineitem_query = self._data_row_to_query(lineitem_row, 'lineitem',
LINEITEM_QUOTE_INDEX_LIST)
except StopIteration:
raise Exception("Lineitem update file is empty.")
for orders_row in orders_file:
# read orders query
orders_id, orders_query = self._data_row_to_query(orders_row, 'orders', ORDERS_QUOTE_INDEX_LIST)
insert_queries.append(orders_query)
# read next lineitem query if current orders query has lineitem children
while orders_id == lineitem_id and (lineitem_row := next(lineitem_file, None)) is not None:
# add lineitem
insert_queries[-1] += lineitem_query
lineitem_id, lineitem_query = self._data_row_to_query(lineitem_row, 'lineitem',
LINEITEM_QUOTE_INDEX_LIST)
# save queries
self.__insert_queries_iter = iter(insert_queries)
# load delete queries
with open(f'{DB_REFRESH_DIR}/delete.{self.__run_number}', 'r') as deletes_file:
delete_queries = deletes_file.readlines()
for i, delete_row in enumerate(delete_queries):
delete_queries[i] = self._delete_row_to_query(delete_row)
# save queries
self.__delete_queries_iter = iter(delete_queries)
self._log.info('Refresh queries loaded successfully...')
def execute_pair(self):
self._log.info(f'Run refresh pair {self.__run_number}...')
self.execute_refresh_function1()
self.execute_refresh_function2()
self._log.info(
f'Execution of refresh pair {self.__run_number} ended successful. Measured time: {self.__rf1_time + self.__rf2_time}')
def execute_refresh_function1(self):
self._log.info('Run refresh function 1...')
# Execute insert queries
for query in self.__insert_queries_iter:
# Insert new order into `orders` and its items into `lineitem`
cursors_generator = self._cursor.execute(query, multi=True)
# Measure time for transaction
start = datetime.datetime.now()
# for _ in cursors_generator: pass # iterate over generated cursors to execute them and get the results
cursors = [cur for cur in cursors_generator]
self._connection.commit()
time_delta = datetime.datetime.now() - start
self.__rf1_time += time_delta
# Print additional information
self._log.debug(f'Time for {query} query: {time_delta}')
self._df_measures = self._df_measures.append({'name': f'RF1_{self.__run_number}', 'time': self.__rf1_time},
ignore_index=True)
self._log.info(f'Execution of refresh function 1 ended successful. Measured time: {self.__rf1_time}')
def execute_refresh_function2(self):
self._log.info('Run refresh function 2...')
# Execute delete queries
for query in self.__delete_queries_iter:
# Delete order from `orders` and its items from `lineitem`
cursors_generator = self._cursor.execute(query, multi=True)
# Measure time for transaction
start = datetime.datetime.now()
# for _ in cursors_generator: pass # iterate over generated cursors to execute them and get the results
cursors = [cur for cur in cursors_generator]
self._connection.commit()
time_delta = datetime.datetime.now() - start
self.__rf2_time += time_delta
# Print additional information
self._log.debug(f'Time for {query} query: {time_delta}')
self._df_measures = self._df_measures.append({'name': f'RF2_{self.__run_number}', 'time': self.__rf2_time},
ignore_index=True)
self._log.info(f'Execution of refresh function 2 ended successful. Measured time: {self.__rf2_time}')
@property
def df_measures(self) -> pd.DataFrame:
""":return: dataframe with measured queries and total_time of execution"""
return self._df_measures.set_index('name')
def _data_row_to_query(self, row: str, table_name: str, quoted_values_indexes: List[int]) -> Tuple[int, str]:
"""
Convert string data into mysql insert query.
:param row: data separated by '|' in string for query to insert
:param table_name: table name to insert values
:param quoted_values_indexes: indexes of varchar columns in table
:return: tuple of record id and Mysql query to execute
"""
# Remove last '|' or '\n' character and split into values
values = row.rstrip('\n|').split('|')
# Surround varchar values with quotes
for index in quoted_values_indexes:
values[index] = f"'{values[index]}'"
return int(values[0]), f'INSERT INTO `{table_name}` VALUES ({VALUE_SEP.join(values)});'
def _delete_row_to_query(self, delete_row: str) -> str:
id = delete_row.rstrip('\n|')
# return f'DELETE FROM `orders`, `lineitem`' \
# f'USING `orders` INNER JOIN `lineitem` ON `orders`.`o_orderkey` = `l_orderkey`' \
# f'WHERE O_ORDERKEY = {id};'
return f'DELETE FROM lineitem WHERE l_orderkey = {id}; DELETE FROM orders WHERE o_orderkey = {id};'
| 44.138365 | 130 | 0.635081 | 6,622 | 0.943574 | 0 | 0 | 186 | 0.026503 | 0 | 0 | 2,420 | 0.344828 |
f3a26881a4ca1cefb8ab84deb4c848ac0e3245ce | 285 | py | Python | zip_submission.py | RapidsAtHKUST/TriangleCounting | b63541f8f9f32d3cb2f52b9b8f07e5974238b6e1 | [
"MIT"
] | null | null | null | zip_submission.py | RapidsAtHKUST/TriangleCounting | b63541f8f9f32d3cb2f52b9b8f07e5974238b6e1 | [
"MIT"
] | null | null | null | zip_submission.py | RapidsAtHKUST/TriangleCounting | b63541f8f9f32d3cb2f52b9b8f07e5974238b6e1 | [
"MIT"
] | null | null | null | import datetime
import os
if __name__ == '__main__':
date_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
print(date_str)
os.system('zip -r tc-rapids-{}.zip triangle-counting technical_report.pdf -x *cmake-build-debug/* -x */CMake* -x *.idea/*'.format(date_str))
| 35.625 | 144 | 0.673684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.484211 |
f3a2c44f9a438f736d2343eb401aac7338e90dca | 382 | py | Python | binarysearch/loneInteger.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | 1 | 2021-06-06T19:55:22.000Z | 2021-06-06T19:55:22.000Z | binarysearch/loneInteger.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | 1 | 2022-01-20T19:20:33.000Z | 2022-01-20T23:51:46.000Z | binarysearch/loneInteger.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | null | null | null | class Solution:
def solve(self, nums):
integersDict = {}
for i in range(len(nums)):
try:
integersDict[nums[i]] += 1
except:
integersDict[nums[i]] = 1
for integer in integersDict:
if integersDict[integer] != 3:
return integer
return nums[0]
| 19.1 | 42 | 0.455497 | 373 | 0.97644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f3a50a6379175fbc9b08a548137328cb017058f2 | 8,493 | py | Python | data/multimodal_miss_dataset.py | Norwa9/missing_modalities | 53cdcc1749eb5f0b74cc57b05ab3768b9c514385 | [
"MIT"
] | null | null | null | data/multimodal_miss_dataset.py | Norwa9/missing_modalities | 53cdcc1749eb5f0b74cc57b05ab3768b9c514385 | [
"MIT"
] | null | null | null | data/multimodal_miss_dataset.py | Norwa9/missing_modalities | 53cdcc1749eb5f0b74cc57b05ab3768b9c514385 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.append("/data/luowei/MMIN")
import json
import random
import torch
import numpy as np
import h5py
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils.rnn import pack_padded_sequence
from data.base_dataset import BaseDataset
class MultimodalMissDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, isTrain=None):
parser.add_argument('--cvNo', type=int, help='which cross validation set')
parser.add_argument('--A_type', type=str, help='which audio feat to use')
parser.add_argument('--V_type', type=str, help='which visual feat to use')
parser.add_argument('--L_type', type=str, help='which lexical feat to use')
parser.add_argument('--output_dim', type=int, help='how many label types in this dataset')
parser.add_argument('--norm_method', type=str, choices=['utt', 'trn'], help='how to normalize input comparE feature')
return parser
def __init__(self, opt, set_name):
''' IEMOCAP dataset reader
set_name in ['trn', 'val', 'tst']
'''
super().__init__(opt)
# record & load basic settings
cvNo = opt.cvNo
self.set_name = set_name
pwd = os.path.abspath(__file__)
pwd = os.path.dirname(pwd)
config = json.load(open(os.path.join(pwd, 'config', 'IEMOCAP_config.json')))
self.norm_method = opt.norm_method
# load feature
self.A_type = opt.A_type
self.all_A = h5py.File(os.path.join(config['feature_root'], 'A', f'{self.A_type}.h5'), 'r')
if self.A_type == 'comparE':
self.mean_std = h5py.File(os.path.join(config['feature_root'], 'A', 'comparE_mean_std.h5'), 'r')
self.mean = torch.from_numpy(self.mean_std[str(cvNo)]['mean'][()]).unsqueeze(0).float()
self.std = torch.from_numpy(self.mean_std[str(cvNo)]['std'][()]).unsqueeze(0).float()
self.V_type = opt.V_type
self.all_V = h5py.File(os.path.join(config['feature_root'], 'V', f'{self.V_type}.h5'), 'r')
self.L_type = opt.L_type
self.all_L = h5py.File(os.path.join(config['feature_root'], 'L', f'{self.L_type}.h5'), 'r')
# load target
label_path = os.path.join(config['target_root'], f'{cvNo}', f"{set_name}_label.npy")
int2name_path = os.path.join(config['target_root'], f'{cvNo}', f"{set_name}_int2name.npy")
self.label = np.load(label_path)
self.label = np.argmax(self.label, axis=1)
self.int2name = np.load(int2name_path)
# make missing index
if set_name != 'trn': # val && tst
self.missing_index = torch.tensor([
[1,0,0], # AZZ
[0,1,0], # ZVZ
[0,0,1], # ZZL
[1,1,0], # AVZ
[1,0,1], # AZL
[0,1,1], # ZVL
# [1,1,1] # AVL
] * len(self.label)).long()
self.miss_type = ['azz', 'zvz', 'zzl', 'avz', 'azl', 'zvl'] * len(self.label)
else: # trn
self.missing_index = [
[1,0,0], # AZZ
[0,1,0], # ZVZ
[0,0,1], # ZZL
[1,1,0], # AVZ
[1,0,1], # AZL
[0,1,1], # ZVL
# [1,1,1] # AVL
]
self.miss_type = ['azz', 'zvz', 'zzl', 'avz', 'azl', 'zvl']
# set collate function
self.manual_collate_fn = True
def __getitem__(self, index):
if self.set_name != 'trn':
# val && tst
# 对于val和tst,按顺序获取到不同模态缺失情况的数据
feat_idx = index // 6 # totally 6 missing types
missing_index = self.missing_index[index]
miss_type = self.miss_type[index]
else:
# trn
# 对于trn,每次getitem时missingType是随机的
feat_idx = index
missing_index = torch.tensor(random.choice(self.missing_index)).long() # 每次随机选取一个缺失情况,当epoch足够多时,就好像数据集是原来的6倍了。
miss_type = random.choice(self.miss_type) # miss_type 与 missing_index 不对应? 我感觉这里不对应是搞错了
int2name = self.int2name[feat_idx][0].decode()
label = torch.tensor(self.label[feat_idx])
# process A_feat
A_feat = torch.from_numpy(self.all_A[int2name][()]).float()
if self.A_type == 'comparE':
A_feat = self.normalize_on_utt(A_feat) if self.norm_method == 'utt' else self.normalize_on_trn(A_feat)
# process V_feat
V_feat = torch.from_numpy(self.all_V[int2name][()]).float()
# proveee L_feat
L_feat = torch.from_numpy(self.all_L[int2name][()]).float()
return {
# trn 返回的是完整版本
'A_feat': A_feat,
'V_feat': V_feat,
'L_feat': L_feat,
'label': label,
'int2name': int2name,
'missing_index': missing_index,
'miss_type': miss_type
} if self.set_name == 'trn' else{
# val,tst 返回的是缺失版本
'A_feat': A_feat * missing_index[0], # 做一个mask运算,例如miss_index = [0,1,1],那么A_feat = zero
'V_feat': V_feat * missing_index[1],
'L_feat': L_feat * missing_index[2],
'label': label,
'int2name': int2name,
'missing_index': missing_index,
'miss_type': miss_type
}
def __len__(self):
return len(self.missing_index) if self.set_name != 'trn' else len(self.label)
def normalize_on_utt(self, features):
mean_f = torch.mean(features, dim=0).unsqueeze(0).float()
std_f = torch.std(features, dim=0).unsqueeze(0).float()
std_f[std_f == 0.0] = 1.0
features = (features - mean_f) / std_f
return features
def normalize_on_trn(self, features):
features = (features - self.mean) / self.std
return features
# 每读取batch个样本,就调用collate_fn进行打包
def collate_fn(self, batch):
A = [sample['A_feat'] for sample in batch]
V = [sample['V_feat'] for sample in batch]
L = [sample['L_feat'] for sample in batch]
lengths = torch.tensor([len(sample) for sample in A]).long()
A = pad_sequence(A, batch_first=True, padding_value=0)
V = pad_sequence(V, batch_first=True, padding_value=0)
L = pad_sequence(L, batch_first=True, padding_value=0)
label = torch.tensor([sample['label'] for sample in batch])
int2name = [sample['int2name'] for sample in batch]
missing_index = torch.cat([sample['missing_index'].unsqueeze(0) for sample in batch], axis=0)
miss_type = [sample['miss_type'] for sample in batch]
return {
'A_feat': A,
'V_feat': V,
'L_feat': L,
'label': label,
'lengths': lengths,
'int2name': int2name,
'missing_index': missing_index,
'miss_type': miss_type
}
# run
# /data/luowei/anaconda3/envs/wav2clip_env/bin/python /data/luowei/MMIN/data/multimodal_miss_dataset.py
if __name__ == '__main__':
class test:
cvNo = 1
A_type = "comparE"
V_type = "denseface"
L_type = "bert_large"
norm_method = 'trn'
opt = test()
print('Reading from dataset:')
a = MultimodalMissDataset(opt, set_name='trn')
# print()
# data = next(iter(a))
# for k, v in data.items():
# if k not in ['int2name', 'label']:
# print(k, v.shape, torch.sum(v))
# else:
# print(k, v)
print('Reading from dataloader:')
x = [a[i] for i in range(16)]
print('each one:')
for i, _x in enumerate(x):
print(_x['missing_index'], _x['miss_type'])
# 不同模态的sequence length 和 feature dimension是不一样的
print(_x['A_feat'].shape,_x['V_feat'].shape,_x['L_feat'].shape) # torch.Size([xx, 130]) torch.Size([50, 342]) torch.Size([22, 1024])
# for i, _x in enumerate(x):
# print(i, ':')
# for k, v in _x.items():
# if k not in ['int2name', 'label', 'missing_index']:
# print(k, v.shape, torch.sum(v))
# else:
# print(k, v)
# print('packed output')
x = a.collate_fn(x)
# for k, v in x.items():
# if k not in ['int2name', 'label', 'miss_type']:
# print(k, v.shape, torch.sum(v))
# else:
# print(k, v)
print('collate_feat.shape:',x['L_feat'].shape)
| 39.686916 | 140 | 0.558695 | 7,083 | 0.808839 | 0 | 0 | 650 | 0.074226 | 0 | 0 | 2,841 | 0.324426 |
f3a82067b35984c55df054378d7da2b20c33f677 | 524 | py | Python | projects/admin.py | BridgesLab/Lab-Website | d6f6c9c068bbf668c253e5943d9514947023e66d | [
"CC0-1.0",
"MIT"
] | 6 | 2015-08-31T16:55:16.000Z | 2022-02-10T08:23:07.000Z | projects/admin.py | BridgesLab/Lab-Website | d6f6c9c068bbf668c253e5943d9514947023e66d | [
"CC0-1.0",
"MIT"
] | 30 | 2015-03-22T15:49:31.000Z | 2020-05-25T23:59:37.000Z | projects/admin.py | BridgesLab/Lab-Website | d6f6c9c068bbf668c253e5943d9514947023e66d | [
"CC0-1.0",
"MIT"
] | 6 | 2016-09-07T08:25:21.000Z | 2020-03-27T10:24:57.000Z | '''This package sets up the admin interface for the :mod:`papers` app.'''
from django.contrib import admin
from projects.models import Funding, FundingAgency
class FundingAdmin(admin.ModelAdmin):
'''The :class:`~projects.models.Funding` model admin is the default.'''
pass
admin.site.register(Funding, FundingAdmin)
class FundingAgencyAdmin(admin.ModelAdmin):
'''The :class:`~projects.models.FundingAgency` model admin is the default.'''
pass
admin.site.register(FundingAgency, FundingAgencyAdmin)
| 37.428571 | 81 | 0.751908 | 264 | 0.503817 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.421756 |
f3a93a6c183cf430e924e42fd67f039a0f3d50e9 | 859 | py | Python | evap/evaluation/migrations/0032_populate_rating_answer_counters.py | JenniferStamm/EvaP | 1d71e4efcd34d01f28e30c6026c8dcc708921193 | [
"MIT"
] | null | null | null | evap/evaluation/migrations/0032_populate_rating_answer_counters.py | JenniferStamm/EvaP | 1d71e4efcd34d01f28e30c6026c8dcc708921193 | [
"MIT"
] | null | null | null | evap/evaluation/migrations/0032_populate_rating_answer_counters.py | JenniferStamm/EvaP | 1d71e4efcd34d01f28e30c6026c8dcc708921193 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populateRatingAnswerCounters(apps, schema_editor):
LikertAnswerCounter = apps.get_model('evaluation', 'LikertAnswerCounter')
GradeAnswerCounter = apps.get_model('evaluation', 'GradeAnswerCounter')
RatingAnswerCounter = apps.get_model('evaluation', 'RatingAnswerCounter')
for counter in list(LikertAnswerCounter.objects.all()) + list(GradeAnswerCounter.objects.all()):
RatingAnswerCounter.objects.create(question=counter.question, contribution=counter.contribution, answer=counter.answer, count=counter.count)
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0031_add_rating_answer_counter'),
]
operations = [
migrations.RunPython(populateRatingAnswerCounters),
]
| 34.36 | 148 | 0.756694 | 210 | 0.24447 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.192084 |
f3a9594235f744c013c4a756f64e59b619db23a2 | 2,523 | py | Python | core/entities/character_utils.py | jklemm/py-dnd | 9f20a0e4f484297e80170cb529c0af8da0cf1032 | [
"MIT"
] | 9 | 2015-04-16T20:13:20.000Z | 2021-11-29T18:56:16.000Z | core/entities/character_utils.py | jklemm/py-dnd | 9f20a0e4f484297e80170cb529c0af8da0cf1032 | [
"MIT"
] | 1 | 2015-04-16T20:12:43.000Z | 2015-04-18T12:25:48.000Z | core/entities/character_utils.py | jklemm/py-dnd | 9f20a0e4f484297e80170cb529c0af8da0cf1032 | [
"MIT"
] | 2 | 2021-11-27T23:49:52.000Z | 2021-11-29T18:56:19.000Z |
class CharacterRaceList(object):
DEVA = 'DEVA'
DRAGONBORN = 'DRAGONBORN'
DWARF = 'DWARF'
ELADRIN = 'ELADRIN'
ELF = 'ELF'
GITHZERAI = 'GITHZERAI'
GNOME = 'GNOME'
GOLIATH = 'GOLIATH'
HALFELF = 'HALFELF'
HALFLING = 'HALFLING'
HALFORC = 'HALFORC'
HUMAN = 'HUMAN'
MINOTAUR = 'MINOTAUR'
SHARDMIND = 'SHARDMIND'
SHIFTER = 'SHIFTER'
TIEFLING = 'TIEFLING'
WILDEN = 'WILDEN'
class CharacterClassList(object):
ARDENT = 'ARDENT'
AVENGER = 'AVENGER'
BARBARIAN = 'BARBARIAN'
BARD = 'BARD'
BATTLEMIND = 'BATTLEMIND'
CLERIC = 'CLERIC'
DRUID = 'DRUID'
FIGHTER = 'FIGHTER'
INVOKER = 'INVOKER'
MONK = 'MONK'
PALADIN = 'PALADIN'
PSION = 'PSION'
RANGER = 'RANGER'
ROGUE = 'ROGUE'
RUNEPRIEST = 'RUNEPRIEST'
SEEKER = 'SEEKER'
SHAMAN = 'SHAMAN'
SORCERER = 'SORCERER'
WARDEN = 'WARDEN'
WARLOCK = 'WARLOCK'
WARLORD = 'WARLORD'
WIZARD = 'WIZARD'
class CharacterRoleList(object):
CONTROLLER = 'CONTROLLER'
DEFENDER = 'DEFENDER'
LEADER = 'LEADER'
STRIKER = 'STRIKER'
class AlignmentList(object):
GOOD = 'GOOD'
LAWFUL_GOOD = 'LAWFUL_GOOD'
UNALIGNED = 'UNALIGNED'
EVIL = 'EVIL'
CHAOTIC_EVIL = 'CHAOTIC_EVIL'
class DeitiesList(object):
ASMODEUS = AlignmentList.EVIL
AVANDRA = AlignmentList.GOOD
BAHAMUT = AlignmentList.LAWFUL_GOOD
BANE = AlignmentList.EVIL
CORELLON = AlignmentList.UNALIGNED
ERATHIS = AlignmentList.UNALIGNED
GRUUMSH = AlignmentList.CHAOTIC_EVIL
IOUN = AlignmentList.UNALIGNED
KORD = AlignmentList.UNALIGNED
LOLTH = AlignmentList.CHAOTIC_EVIL
MELORA = AlignmentList.UNALIGNED
MORADIN = AlignmentList.LAWFUL_GOOD
PELOR = AlignmentList.GOOD
SEHANINE = AlignmentList.UNALIGNED
THE_RAVEN_QUEEN = AlignmentList.UNALIGNED
TIAMAT = AlignmentList.EVIL
TOROG = AlignmentList.EVIL
VECNA = AlignmentList.EVIL
ZEHIR = AlignmentList.EVIL
class ScriptList(object):
COMMON = 'COMMON'
RELLANIC = 'RELLANIC'
IOKHARIC = 'IOKHARIC'
DAVEK = 'DAVEK'
BARAZHAD = 'BARAZHAD'
SUPERNAL = 'SUPERNAL'
class LanguageList(object):
COMMON = ScriptList.COMMON
DEEP_SPEECH = ScriptList.RELLANIC
DRACONIC = ScriptList.IOKHARIC
DWARVEN = ScriptList.DAVEK
ELVEN = ScriptList.RELLANIC
GIANT = ScriptList.DAVEK
GOBLIN = ScriptList.COMMON
PRIMORDIAL = ScriptList.BARAZHAD
SUPERNA = ScriptList.SUPERNAL
ABYSSAL = ScriptList.BARAZHAD
| 24.259615 | 45 | 0.663496 | 2,503 | 0.992073 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.190646 |
f3aa23f0ec5b96698289637ee3c9f3e193d97695 | 1,054 | py | Python | src/main.py | NonAbelianCapu/Traffic_Sim | bdbb4a7f058d8622ed6e6d7128db681cdae39b1b | [
"MIT"
] | null | null | null | src/main.py | NonAbelianCapu/Traffic_Sim | bdbb4a7f058d8622ed6e6d7128db681cdae39b1b | [
"MIT"
] | null | null | null | src/main.py | NonAbelianCapu/Traffic_Sim | bdbb4a7f058d8622ed6e6d7128db681cdae39b1b | [
"MIT"
] | null | null | null | import sim
import utils
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
my_parser = argparse.ArgumentParser(description='Parameters for Simulation')
my_parser.add_argument('-N', '--n_cars', type=int, action='store', help='Number of cars', default = 40)
my_parser.add_argument('-L', '--length', type=int, action='store', help='Length of road', default = 250)
my_parser.add_argument('-P', '--p_break', type=float, action='store', help='probability of stopping', default = 0.1)
my_parser.add_argument('-S', '--steps', type=int, action='store', help='Steps of simulation', required = True)
args = my_parser.parse_args()
print(dir(args))
N=args.n_cars
L=args.length
pos = np.zeros(N)
vel = np.zeros(N)
sim.populate_arrays(pos,vel,N)
pos_list = sim.run_simulation(pos,vel,N,L, MAX_STEPS=args.steps, p = args.p_break)
flow = utils.estimate_flow(pos_list,N, 0,250)
sim_fig = utils.plot_simulation(pos_list)
plt.show()
if __name__ == '__main__':
main()
| 29.277778 | 120 | 0.681214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.188805 |
f3aa39200a91e8ee89c1f49153a1bf66342a6c27 | 1,666 | py | Python | tests/functional/parser/schemas.py | n2N8Z/aws-lambda-powertools-python | 0cb5d506f534ac76b42f2d5959d93c7b2bb4d8e9 | [
"MIT-0"
] | null | null | null | tests/functional/parser/schemas.py | n2N8Z/aws-lambda-powertools-python | 0cb5d506f534ac76b42f2d5959d93c7b2bb4d8e9 | [
"MIT-0"
] | null | null | null | tests/functional/parser/schemas.py | n2N8Z/aws-lambda-powertools-python | 0cb5d506f534ac76b42f2d5959d93c7b2bb4d8e9 | [
"MIT-0"
] | null | null | null | from typing import Dict, List, Optional
from pydantic import BaseModel
from typing_extensions import Literal
from aws_lambda_powertools.utilities.parser.models import (
DynamoDBStreamChangedRecordModel,
DynamoDBStreamModel,
DynamoDBStreamRecordModel,
EventBridgeModel,
SnsModel,
SnsNotificationModel,
SnsRecordModel,
SqsModel,
SqsRecordModel,
)
class MyDynamoBusiness(BaseModel):
Message: Dict[Literal["S"], str]
Id: Dict[Literal["N"], int]
class MyDynamoScheme(DynamoDBStreamChangedRecordModel):
NewImage: Optional[MyDynamoBusiness]
OldImage: Optional[MyDynamoBusiness]
class MyDynamoDBStreamRecordModel(DynamoDBStreamRecordModel):
dynamodb: MyDynamoScheme
class MyAdvancedDynamoBusiness(DynamoDBStreamModel):
Records: List[MyDynamoDBStreamRecordModel]
class MyEventbridgeBusiness(BaseModel):
instance_id: str
state: str
class MyAdvancedEventbridgeBusiness(EventBridgeModel):
detail: MyEventbridgeBusiness
class MySqsBusiness(BaseModel):
message: str
username: str
class MyAdvancedSqsRecordModel(SqsRecordModel):
body: str
class MyAdvancedSqsBusiness(SqsModel):
Records: List[MyAdvancedSqsRecordModel]
class MySnsBusiness(BaseModel):
message: str
username: str
class MySnsNotificationModel(SnsNotificationModel):
Message: str
class MyAdvancedSnsRecordModel(SnsRecordModel):
Sns: MySnsNotificationModel
class MyAdvancedSnsBusiness(SnsModel):
Records: List[MyAdvancedSnsRecordModel]
class MyKinesisBusiness(BaseModel):
message: str
username: str
class MyCloudWatchBusiness(BaseModel):
my_message: str
user: str
| 19.833333 | 61 | 0.779712 | 1,238 | 0.743097 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.003601 |
f3aaaccfebbaff94bdcc88b4d60c133a5920a086 | 387 | py | Python | src/350.py | hippieZhou/The-Way-Of-LeetCode | c63d777e01413726b6214c616c20c61f8e5b330b | [
"MIT"
] | null | null | null | src/350.py | hippieZhou/The-Way-Of-LeetCode | c63d777e01413726b6214c616c20c61f8e5b330b | [
"MIT"
] | null | null | null | src/350.py | hippieZhou/The-Way-Of-LeetCode | c63d777e01413726b6214c616c20c61f8e5b330b | [
"MIT"
] | null | null | null | # 给定两个数组,编写一个函数来计算它们的交集。
class Solution:
def intersect(self, nums1: list, nums2: list) -> list:
inter = set(nums1) & set(nums2)
print(inter)
l = []
for i in inter:
l += [i] * min(nums1.count(i), nums2.count(i))
print(l)
return l
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
li = Solution().intersect(nums1, nums2)
print(li)
| 20.368421 | 58 | 0.5323 | 271 | 0.62877 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.157773 |
f3ac47f0d2af46f81dd25d634efde4812cbc00a9 | 2,263 | py | Python | IATI2LOD/src/gather data scripts/DbpediaData.py | KasperBrandt/IATI2LOD | 3a4fbcbf59d324e948b14509f74c50633d36a497 | [
"MIT"
] | 1 | 2019-08-03T00:52:44.000Z | 2019-08-03T00:52:44.000Z | IATI2LOD/src/gather data scripts/DbpediaData.py | KasperBrandt/IATI2LOD | 3a4fbcbf59d324e948b14509f74c50633d36a497 | [
"MIT"
] | 1 | 2015-10-11T09:47:25.000Z | 2015-10-16T12:58:43.000Z | IATI2LOD/src/gather data scripts/DbpediaData.py | KasperBrandt/IATI2LOD | 3a4fbcbf59d324e948b14509f74c50633d36a497 | [
"MIT"
] | 1 | 2021-05-29T03:43:01.000Z | 2021-05-29T03:43:01.000Z | ## By Kasper Brandt
## Last updated on 26-05-2013
import os, sys, datetime, urllib2, AddProvenance
from rdflib import Namespace, Graph
# Settings
dbpedia_folder = "/media/Acer/School/IATI-data/dataset/DBPedia/"
dbpedia_files = ["/media/Acer/School/IATI-data/mappings/DBPedia/dbpedia-countries-via-factbook.ttl",
"/media/Acer/School/IATI-data/mappings/DBPedia/dbpedia-organisations.ttl"]
if not os.path.isdir(dbpedia_folder):
os.makedirs(dbpedia_folder)
# Provenance settings
Iati = Namespace("http://purl.org/collections/iati/")
start_time = datetime.datetime.now()
source_ttls = []
for dbpedia_file in dbpedia_files:
with open(dbpedia_file, 'r') as f:
for line in f:
if "owl:sameAs" in line:
line_list = line.split()
if "<" in line_list[2]:
dbpedia_item = line_list[2].replace("<http://dbpedia.org/resource/","").replace(">","")
else:
dbpedia_item = line_list[2].replace("dbpedia:", "")
dbpedia_url = "http://dbpedia.org/data/" + dbpedia_item + ".ttl"
source_ttls.append(dbpedia_url)
turtle_response = urllib2.urlopen(dbpedia_url)
turtle_data = turtle_response.read()
print "Retrieved data from " + dbpedia_url + ", writing to file..."
with open(dbpedia_folder + dbpedia_item + ".ttl", 'w') as turtle_f:
turtle_f.write(turtle_data)
# Add provenance
print "Adding provenance..."
provenance = Graph()
provenance = AddProvenance.addProv(Iati,
provenance,
'DBPedia',
start_time,
source_ttls,
['DBPedia'],
"gather%20data%20scripts/DbpediaData.py")
provenance_turtle = provenance.serialize(format='turtle')
with open(dbpedia_folder + 'provenance-dbpedia.ttl', 'w') as turtle_file_prov:
turtle_file_prov.write(provenance_turtle)
print "Done!"
| 35.359375 | 107 | 0.550155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.268228 |
f3acee9d31ba13e3a43678346252052e3800ae55 | 4,435 | py | Python | mellow/core.py | unsonnet/mellow | 130cadbf3e44108bcff311889ce6c8d95fb914ab | [
"Apache-2.0"
] | null | null | null | mellow/core.py | unsonnet/mellow | 130cadbf3e44108bcff311889ce6c8d95fb914ab | [
"Apache-2.0"
] | null | null | null | mellow/core.py | unsonnet/mellow | 130cadbf3e44108bcff311889ce6c8d95fb914ab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import warnings
import jax.numpy as np
import jax.ops as jo
import mellow.factory as factory
import mellow.ops as mo
class Network(object):
"""Homogenous feedforward neural network."""
def __init__(self, inp, out, params, act):
"""Inits Network.
Deduces the structure of a network represented by `inp`, `out`,
and `params`. A weighted adjacency matrix and node vector of
appropriate shape are constructed as well.
Args:
inp: Number of input nodes excluding bias unit.
out: Number of output nodes.
params: Sequence of weights.
act: Activation function.
Raises:
ValueError: If a network structure cannot be found with the
same number of arcs as weights in `params`.
"""
inb = inp + 1 # Accounts for bias unit.
hid = factory.depth(inb, out, params)
with warnings.catch_warnings(): # Filters precision warnings.
warnings.filterwarnings("ignore", message="Explicitly requested dtype.*")
self.shape = np.array([inb, hid, out], dtype=int)
self.θ = factory.adj_arr(self.shape, params)
self.v = factory.nd_vect(self.shape)
self.A = act
def add_nd(self, params):
"""Adds a new hidden node to network.
Inserts a node represented by `params` at the beginning of the
hidden layer. Regarding the weighted adjacency matrix, arcs are
assigned in row-major order such that it preserves topological
ordering.
Args:
params: Sequence of weights.
Raises:
AttributeError: If insufficient number of weights is given.
"""
inb, hid, _ = self.shape
Σ = np.sum(self.shape)
if Σ != len(params):
msg = "{} weights required to add a node to a {} network, got {}."
raise AttributeError(msg.format(Σ, self.shape, len(params)))
self.shape = jo.index_add(self.shape, 1, 1)
self.v = np.append(self.v, 0)
col = np.pad(params[:inb], (0, hid), constant_values=0)
row = np.pad(params[inb:], (1, 0), constant_values=0)
self.θ = mo.insert(self.θ, 0, col, axis=1)
self.θ = mo.insert(self.θ, inb, row, axis=0)
def predict(self, z):
"""Produces a hypothesis from `z`.
Args:
z: Stacked input samples.
Returns:
Stacked output layers.
Raises:
AttributeError: If insufficient input is given.
"""
return self.eval(self.θ, z)
def eval(self, θ, z):
"""Evaluates network on input.
Forward propagates data from `z` through network with arcs
parameterized by `θ`.
Args:
θ: Topologically-sorted weighted adjacency matrix.
z: Stacked input samples.
Returns:
Stacked output layers.
Raises:
AttributeError: If insufficient input is given.
ValueError: If network cannot be parameterized by `θ`.
"""
inb, _, out = self.shape
v = self.reshape(z)
if self.θ.shape != θ.shape:
msg = "{} adj matrix required to parameterize a {} network, got {}."
raise ValueError(msg.format(self.θ.shape, self.shape, θ.shape))
for idx in range(inb, v.shape[-1]):
Σ = np.dot(v[..., :idx], θ[:idx, idx - inb])
v = jo.index_update(v, jo.index[..., idx], self.A(Σ))
return np.dot(v, θ[:, -out:])
def reshape(self, z):
"""Formats input for network evaluation.
Assigns data from `z` to input layer. Multiple node vectors are
constructed if `z` represents a sequence of input samples.
Args:
z: Stacked input samples.
Returns:
Stacked node vectors.
Raises:
AttributeError: If input layers cannot be completely
initialized.
"""
inb, _, _ = self.shape
if inb - 1 != np.transpose(z).shape[0]:
msg = "{} values required per input sample, got {}."
raise AttributeError(msg.format(inb - 1, np.transpose(z).shape[0]))
rows = np.transpose(z)[..., None].shape[1] # Counts number of input samples.
v = np.tile(self.v, (rows, 1))
return jo.index_update(v, jo.index[..., 1:inb], z)
| 31.013986 | 85 | 0.573168 | 4,309 | 0.967011 | 0 | 0 | 0 | 0 | 0 | 0 | 2,454 | 0.550718 |
f3ad16d950f69a4578e20241f5adeaaeb72687a9 | 566 | py | Python | PythonExercicios/ex082.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex082.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex082.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | print('\033[32m{:=^60}'.format('\033[36m Dividindo valores em várias listas \033[32m'))
lista = []
par = []
impar = []
while True:
lista.append(int(input('\033[36mDigite um número: ')))
resp = str(input('\033[32mQuer continuar? [S/N] ')).strip()[0]
if resp in 'Nn':
break
for c in range(len(lista)):
if lista[c] % 2 == 0:
par.append(lista[c])
else:
impar.append(lista[c])
print('\033[36m-\033[32m='*40)
print(f'\033[34mA lista completa é {lista}')
print(f'A lista de pares é {par}')
print(f'A lista de impares é {impar}')
| 29.789474 | 87 | 0.60424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.446585 |
f3ad47eaa0299f9f0aeb207c1f9ec4e1799a981c | 1,337 | py | Python | test_api.py | cagcoach/aikapi | cbb796d736b7a37536672b0f1841014b9b7545bb | [
"MIT"
] | null | null | null | test_api.py | cagcoach/aikapi | cbb796d736b7a37536672b0f1841014b9b7545bb | [
"MIT"
] | null | null | null | test_api.py | cagcoach/aikapi | cbb796d736b7a37536672b0f1841014b9b7545bb | [
"MIT"
] | null | null | null | from PythonAPI.bam import BAM
import numpy as np
dataset_dir = '/home/beatriz/Documentos/Work/final_datasets' # For Bea
# dataset_dir = '/home/almartmen/Github/aikapi' # For Alberto
dataset_name = '181129'
bam = BAM(dataset_dir, dataset_name, image_format='png')
# bam.unroll_videos()
# print(bam.get_persons_in_frame(800))
# print(bam.get_poses_in_frame(801))
# person3d = bam.get_person_in_frame(800, 1)
# print(person3d)
# pose3d = bam.get_pose_in_frame(799, 1)
# print(pose3d)
# print(bam.get_activities_for_person(2))
# print(bam.get_images_in_frame(1141))
# bam.unroll_videos(force=True, video=1)
# camera = bam.get_camera(3,1)
# points3d = np.array([[0.498339264765202, 3.2171029078369897, 1.5828869056621102]])
# points2d = camera.project_points(points3d)
# print(points2d)
# points2d_pose = camera.project_points(pose3d)
# print(points2d_pose)
# bam.unroll_videos()
print(bam.get_total_cameras())
print(bam.get_total_frames())
print(bam.get_person_ids())
print(bam.get_static_object_ids())
# print(bam.get_activity_names())
# print(bam.get_annotations_for_person(19))
print(bam.get_persons_in_frame(1000))
# p = bam.get_annotations_for_person(1)
# print(p)
### OBJECTS
# print(bam.get_static_objects_in_frame(2))
# print(bam.get_static_object_in_frame(3, 21))
# print(bam.get_annotations_for_static_object(21))
| 26.74 | 84 | 0.76739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,001 | 0.748691 |
f3ae9409239312cf1f2c371fef7e0fa56ab7da38 | 2,268 | py | Python | account/management/commands/monthly_charging.py | coseasonruby/Gluu-Ecommerce-djagno-project | d196309bbd76571ee7793bd3de4342eb81f789e1 | [
"MIT"
] | null | null | null | account/management/commands/monthly_charging.py | coseasonruby/Gluu-Ecommerce-djagno-project | d196309bbd76571ee7793bd3de4342eb81f789e1 | [
"MIT"
] | null | null | null | account/management/commands/monthly_charging.py | coseasonruby/Gluu-Ecommerce-djagno-project | d196309bbd76571ee7793bd3de4342eb81f789e1 | [
"MIT"
] | null | null | null | import logging
import stripe
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from account import constants
from account.utils import send_billing_email, send_charging_failed_email
from payment.models import Payment
from payment.constants import INITIATED, PAID, FAILED
stripe.api_key = settings.STRIPE_API_KEY
logger = logging.getLogger('billing')
class Command(BaseCommand):
def handle(self, *args, **options):
payments = Payment.objects.filter(status=INITIATED)
for payment in payments:
try:
account = payment.account
logger.info('Running monthly payment for {}'.format(account.get_name()))
if payment.paid_amount > 0:
response = stripe.Charge.create(
amount=int(payment.paid_amount * 100),
currency='USD',
customer=account.stripe.customer_id,
description=constants.CHARGE_DESCIRPTION.format(account.get_name())
)
payment.stripe_reference = response.id
send_billing_email(payment)
payment.status = PAID
payment.save()
logger.info('Payment successful')
except ObjectDoesNotExist as e:
payment.status = FAILED
payment.save()
logger.error('No card attached to account {}, {}'.format(account, e))
except (stripe.error.CardError, stripe.error.StripeError) as e:
payment.status = FAILED
payment.save()
customer = stripe.Customer.retrieve(account.stripe.customer_id)
card_details = customer.sources.retrieve(customer.default_source)
send_charging_failed_email(payment, card_details.last4)
logger.error('Card has been declined: account {}, {}'.format(account, e))
except Exception as e:
logger.exception(e)
logger.error('Billing failed: account {}, {}'.format(account, e))
| 32.4 | 92 | 0.588624 | 1,802 | 0.794533 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.07672 |
f3aec13ab37c42864f83f8353ef76bbf6e8e00c7 | 420 | py | Python | pokemon_combat/body_part.py | ryndovaira/telebot_fight_game | 660473fea0de9635935495b0b2fd827bb51c47c9 | [
"MIT"
] | null | null | null | pokemon_combat/body_part.py | ryndovaira/telebot_fight_game | 660473fea0de9635935495b0b2fd827bb51c47c9 | [
"MIT"
] | null | null | null | pokemon_combat/body_part.py | ryndovaira/telebot_fight_game | 660473fea0de9635935495b0b2fd827bb51c47c9 | [
"MIT"
] | null | null | null | from enum import Enum, auto
class BodyPart(Enum):
NOTHING = auto() # ничего (начальное состояние)
HEAD = auto() # голова
BELLY = auto() # живот
LEGS = auto() # ноги
@classmethod
def min_index(cls):
return cls.HEAD.value
@classmethod
def max_index(cls):
return cls.LEGS.value
@classmethod
def has_item(cls, name):
return name in cls._member_names_
| 20 | 52 | 0.619048 | 428 | 0.932462 | 0 | 0 | 215 | 0.46841 | 0 | 0 | 90 | 0.196078 |
f3b257822b7913a1facfcfc850069ed58c98159b | 57,211 | py | Python | labyrinth_8_rooms_quantum.py | katema-official/Tesi-laurea-triennale-Pisa-2021 | e174c3d1719de6e66ca39923f4d21f0bee321385 | [
"MIT"
] | 1 | 2021-09-05T08:35:40.000Z | 2021-09-05T08:35:40.000Z | labyrinth_8_rooms_quantum.py | katema-official/Tesi-laurea-triennale-Pisa-2021 | e174c3d1719de6e66ca39923f4d21f0bee321385 | [
"MIT"
] | null | null | null | labyrinth_8_rooms_quantum.py | katema-official/Tesi-laurea-triennale-Pisa-2021 | e174c3d1719de6e66ca39923f4d21f0bee321385 | [
"MIT"
] | 1 | 2021-09-06T09:39:25.000Z | 2021-09-06T09:39:25.000Z | import math
from random import *
from qiskit import *
from qiskit.tools.visualization import plot_histogram
import random
#Molte cose saranno simili al codice classico, cambia solo la rappresentazione della
#funzione di transizione, ovvero come l'agente sceglie la prossima stanza in cui andare
#variabili aggiuntive che uso per tenere traccia della situazione
qc_list = []
counts_list = []
#dizionario di coppie <contenuto di una stanza - relativo valore in bit>
room_types_dict = {"Enemies":"00", "Treasure":"01", "Shop":"10", "Boss":"11"}
#dichiaro i qubit di cui avro' bisogno nel mio circuito
#dato che la scelta e' tra tre stanze, ho bisongo di due qubit per salvarmi
#il movimento scelto dall'agente. In particolare:
#00 = vado a sinistra
#01 = vado al centro
#10 = vado a destra
phi = QuantumRegister(2, "phi")
#due qubit per la salute, dove 00 = 1, 01 = 2, 10 = 3 e 11 = 4
health = QuantumRegister(2, "health")
#due qubit anche per l'attacco, stesso discorso di prima
attack = QuantumRegister(2, "attack")
#un qubit per sapere se ho gia' visto la treasure, uno per sapere se ho gia'
#visto il negozio e un ulteriore qubit per sapere se il qubit che mi dice
#se ho gia' visto il negozio e' a 1 (e' un qubit di lavoro)
treasure_seen = QuantumRegister(1, "if treasure seen")
shop_seen = QuantumRegister(1, "if shop seen")
shop_seen_true = QuantumRegister(1, "if shop seen = 1")
#due qubit che descrivono il contenuto della stanza a sinistra
left_content = QuantumRegister(2, "left content")
#due qubit che descrivono il contenuto della stanza al centro
center_content = QuantumRegister(2, "center content")
#due qubit che descrivono il contenuto della stanza a destra
right_content = QuantumRegister(2, "right content")
#tre qubit, ciascuno dei quali mi dice se ho visto di gia' la stanza a sinistra,
#al centro e a destra
left_seen = QuantumRegister(1, "left seen")
center_seen = QuantumRegister(1, "center seen")
right_seen = QuantumRegister(1, "right seen")
#tre qubit, ciascuno per sapere se ho adiacente il tesoro, il negozio o il boss
is_treasure_adjacent = QuantumRegister(1, "treasure adjacent")
is_shop_adjacent = QuantumRegister(1, "shop adjacent")
is_boss_adjacent = QuantumRegister(1, "boss adjacent")
#due qubit di lavoro aggiuntivi che mi serviranno per scegliere tra negozio e boss quando
#ce li ho entrambi adiacenti
shop_max_quality = QuantumRegister(1, "shop max quality")
boss_max_quality = QuantumRegister(1, "boss max quality")
#nella implementazione classica associo un valore ad ogni stanza adiacente. In particolare sia
#il negozio che il boss possono assumere, a seconda delle stanze adiacenti e quelle viste
#in precedenza, uno tra quattro valori distinti. Bene, qui facciamo lo stesso: usiamo due qubit
#per mantenerci il valore del negozio e altri due per il valore del boss.
#Occhio alle associazioni (da confrontare col codice classico):
#NEGOZIO
#00 = -1
#01 = 0
#10 = 6
#11 = 8
#BOSS
#00 = 0
#01 = 1
#10 = 3
#11 = 7
shop_quality = QuantumRegister(2, "shop quality")
boss_quality = QuantumRegister(2, "boss quality")
#non dimentichiamoci il qubit dell'oracolo che deve effettuare il kickback di fase
oracle_qubit = QuantumRegister(1, "oracle qubit")
#cosi' come di due bit classici per leggere quale sara' la prossima stanza che il nostro agente
#vorra' esplorare
measure_bits = ClassicalRegister(2, "next_room")
#ci sara' un'altra cosa che vorremo misurare: se abbiamo visto il negozio o meno. Questo
#non e' necessario nel caso del tesoro, in quanto l'agente se vede la stanza del tesoro
#ci entra di sicuro. Per il negozio invece abbiamo visto nel codice classico che la situazione
#e' un po' diversa: se l'agente ha visto il tesoro e ha di fianco il negozio, si segna
#che comunque l'ha visto, in quanto, se non ci entra adesso, non ci entra piu' (dato che,
#una volta ottenuto il tesoro, la vita puo' solo scendere)
shop_bit = ClassicalRegister(1, "shop_seen_bit")
#costruiamo l'inizio del circuito
qc_1 = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit, measure_bits, shop_bit)
qc_1.draw('mpl')
#ora ci serve una funzione che costruisca il circuito vero e proprio. O meglio, ci servono
#tre funzioni principali:
#-una che inizializzi i qubit del circuito
#-l'oracolo, ovvero la funzione di transizione
#-il diffuser
#a sua volta, la funzione dell'oracolo e' divisa in piu' parti per semplificare la lettura:
#-una funzione relativa alla scelta del tesoro
#-una funzione relativa alla scelta del negozio
#-una funzione relativa alla scelta del boss
#-una funzione che decide, quando necessario, se sia meglio andare al negozio o dal boss
def initialize_dungeon_circuit(qc, agent, adjacent_rooms):
initialize_dungeon_circuit_debug = False
#come prima cosa porto nello stato di ugual sovrapposizione i qubit phi
qc.h(phi)
#poi inizializzo i qubit che descrivono la salute e l'attacco del personaggio
if(agent.health_points == 2):
qc.x(health[0])
elif(agent.health_points == 3):
qc.x(health[1])
elif(agent.health_points == 4):
qc.x(health[0])
qc.x(health[1])
if(agent.attack_points == 2):
qc.x(attack[0])
elif(agent.attack_points == 3):
qc.x(attack[1])
elif(agent.attack_points == 4):
qc.x(attack[0])
qc.x(attack[1])
#poi i qubit che descrivono se ho visto o meno il tesoro e il negozio
if(agent.treasure_seen == True):
qc.x(treasure_seen[0])
if(agent.shop_seen == True):
qc.x(shop_seen[0])
qc.cx(shop_seen[0], shop_seen_true[0])
#ora i qubit che descrivono il contenuto delle stanze adiacenti
sx_cx_dx_content = [left_content, center_content, right_content]
i = 0
for room in adjacent_rooms:
value = room_types_dict[room.content]
if value == "01":
qc.x(sx_cx_dx_content[i][0])
if initialize_dungeon_circuit_debug:
print("room number " + str(i) + ": applied content qubits 01")
elif value == "10":
qc.x(sx_cx_dx_content[i][1])
if initialize_dungeon_circuit_debug:
print("room number " + str(i) + ": applied content qubits 10")
elif value == "11":
qc.x(sx_cx_dx_content[i][0])
qc.x(sx_cx_dx_content[i][1])
if initialize_dungeon_circuit_debug:
print("room number " + str(i) + ": applied content qubits 11")
i += 1
#e anche i qubit che mi dicono se ho gia' visto una delle stanze adiacenti
dx_cx_sx_seen = [left_seen, center_seen, right_seen]
i = 0
for room in adjacent_rooms:
if room.visited == True:
qc.x(dx_cx_sx_seen[i])
i += 1
#inizializzo infine il qubit dell'oracolo
qc.x(oracle_qubit)
qc.h(oracle_qubit)
def treasure_logic():
qc = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit)
#TREASURE: se ho la stanza del tesoro vicina, ci vado, e mi segno che mi e' adiacente
#tesoro a sinistra
qc.x(phi)
qc.x(left_content[1])
qc.x(treasure_seen[0])
qc.mcx([phi[0], phi[1], left_content[0], left_content[1], treasure_seen[0]], oracle_qubit)
qc.mcx([left_content[0], left_content[1], treasure_seen[0]], is_treasure_adjacent[0])
qc.x(treasure_seen[0])
qc.x(left_content[1])
qc.x(phi)
#tesoro al centro
qc.x(phi[1])
qc.x(center_content[1])
qc.x(treasure_seen[0])
qc.mcx([phi[0], phi[1], center_content[0], center_content[1], treasure_seen[0]],
oracle_qubit)
qc.mcx([center_content[0], center_content[1], treasure_seen[0]], is_treasure_adjacent[0])
qc.x(treasure_seen[0])
qc.x(center_content[1])
qc.x(phi[1])
#tesoro a destra
qc.x(phi[0])
qc.x(right_content[1])
qc.x(treasure_seen[0])
qc.mcx([phi[0], phi[1], right_content[0], right_content[1], treasure_seen[0]], oracle_qubit)
qc.mcx([right_content[0], right_content[1], treasure_seen[0]], is_treasure_adjacent[0])
qc.x(treasure_seen[0])
qc.x(right_content[1])
qc.x(phi[0])
gate = qc.to_gate()
gate.name = "Treasure logic"
return gate
def shop_logic():
qc = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit)
#SHOP: devo capire il valore del negozio
#...ma mi segno anche se effettivamente ce l'ho il negozio adiacente
qc.x(left_content[0])
qc.mcx([left_content[0], left_content[1]], is_shop_adjacent[0])
qc.x(left_content[0])
qc.x(center_content[0])
qc.mcx([center_content[0], center_content[1]], is_shop_adjacent[0])
qc.x(center_content[0])
qc.x(right_content[0])
qc.mcx([right_content[0], right_content[1]], is_shop_adjacent[0])
qc.x(right_content[0])
#dicevamo: VALORE DEL NEGOZIO
#di default, se non ce l'ho adiacente, varra' -1. Questo semplifica il circuito
#quando arriviamo alla parte delle stanze contenenti nemici
#-1 anche se ho 1 di salute (default, non devo applicare porte)
#0 se, andando nel negozio, peggiorerei la mia chance di vittoria. Succede solo
#quando ho 2 salute e 3 attacco
qc.x(health[1])
qc.x(attack[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.x(attack[0])
qc.x(health[1])
#6 se, andando nel negozio, le mie probabilita' di vittoria non cambiano. Succede quando:
#health = 2 attack = 2
#health = 3 attack = 3
qc.x(health[1])
qc.x(attack[1])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack[1])
qc.x(health[1])
qc.x(health[0])
qc.x(attack[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack[0])
qc.x(health[0])
#8 se, andando nel negozio, le mie probabilita' di vittoria aumentano strettamente.
#Succede quando:
#health = 2 and attack = 1
#health = 3 and (attack=1 or attack=2)
#health = 4 and (attack=1 or attack=2 or attack=3)
qc.x(health[1])
qc.x(attack)
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack)
qc.x(health[1])
qc.x(health[0])
qc.x(attack)
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack)
qc.x(health[0])
qc.x(health[0])
qc.x(attack[1])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack[1])
qc.x(health[0])
qc.x(attack)
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack)
qc.x(attack[1])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack[1])
qc.x(attack[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[0])
qc.mcx([health[0], health[1], attack[0], attack[1], is_shop_adjacent[0]], shop_quality[1])
qc.x(attack[0])
#prima di passare al boss, devo vedere se ho il negozio adiacente e ho gia' visto il tesoro.
#Questo serve per decidere poi se andare dal boss, in quanto, se ho visto il tesoro e ho
#il negozio adiacente, o ci vado ora o non ci vado mai piu'
#quindi: se ho visto il tesoro, non ho ancora segnato shop_seen = 1 e il negozio ce l'ho
#di fianco, mi segno di averlo visto
qc.x(left_content[0])
qc.x(shop_seen_true[0])
qc.mcx([treasure_seen[0], shop_seen_true[0], left_content[0], left_content[1]],
shop_seen[0])
qc.x(shop_seen_true[0])
qc.x(left_content[0])
qc.x(center_content[0])
qc.x(shop_seen_true[0])
qc.mcx([treasure_seen[0], shop_seen_true[0], center_content[0], center_content[1]],
shop_seen[0])
qc.x(shop_seen_true[0])
qc.x(center_content[0])
qc.x(right_content[0])
qc.x(shop_seen_true[0])
qc.mcx([treasure_seen[0], shop_seen_true[0], right_content[0], right_content[1]],
shop_seen[0])
qc.x(shop_seen_true[0])
qc.x(right_content[0])
#aggiungo questa riga per rimettere a zero il qubit shop_seen_true, cosi' da poterlo
#"riciclare" piu' tardi. L'uso che ne faccio e' spiagato meglio in fondo a questo circuito,
#nella parte dedicata ai nemici
qc.cx(shop_seen[0], shop_seen_true[0])
gate = qc.to_gate()
gate.name = "Shop logic"
return gate
def boss_logic():
qc = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit)
#BOSS: devo capire il valore del boss
#vale lo stesso discorso di prima: se non ho il boss adiacente, il suo valore
#e' il minimo possibile (0)
#controlliamo quindi se ce l'abbiamo adiacente
qc.mcx([left_content[0], left_content[1]], is_boss_adjacent[0])
qc.mcx([center_content[0], center_content[1]], is_boss_adjacent[0])
qc.mcx([right_content[0], right_content[1]], is_boss_adjacent[0])
#dunque: VALORE DEL BOSS
#0 se non ho visto il tesoro (default)
#7 se ho visto il tesoro E il negozio
qc.mcx([treasure_seen[0], shop_seen[0], is_boss_adjacent[0]], boss_quality[0])
qc.mcx([treasure_seen[0], shop_seen[0], is_boss_adjacent[0]], boss_quality[1])
#3 se ho visto il tesoro, non il negozio e non ho interesse ad andare nel negozio
#(perche' non mi aumenterebbe le chance di vittoria)
qc.x(health)
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], is_boss_adjacent[0]],
boss_quality[1])
qc.x(shop_seen)
qc.x(health)
#1 se ho visto il tesoro, ma prima di andare dal boss, mi conviene passare per il
#negozio, dato che aumenterebbe strettamente le mie chance di vittoria.
#Succede quando (come prima):
#health = 2 and attack = 1
#health = 3 and (attack=1 or attack=2)
#health = 4 and (attack=1 or attack=2 or attack=3)
qc.x(health[1])
qc.x(attack)
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], attack[0], attack[1],
is_boss_adjacent[0]], boss_quality[0])
qc.x(shop_seen)
qc.x(attack)
qc.x(health[1])
qc.x(health[0])
qc.x(attack)
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], attack[0], attack[1],
is_boss_adjacent[0]], boss_quality[0])
qc.x(shop_seen)
qc.x(attack)
qc.x(health[0])
qc.x(health[0])
qc.x(attack[1])
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], attack[0], attack[1],
is_boss_adjacent[0]], boss_quality[0])
qc.x(shop_seen)
qc.x(attack[1])
qc.x(health[0])
qc.x(attack)
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], attack[0], attack[1],
is_boss_adjacent[0]], boss_quality[0])
qc.x(shop_seen)
qc.x(attack)
qc.x(attack[1])
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], attack[0], attack[1],
is_boss_adjacent[0]], boss_quality[0])
qc.x(shop_seen)
qc.x(attack[1])
qc.x(attack[0])
qc.x(shop_seen)
qc.mcx([treasure_seen[0], shop_seen[0], health[0], health[1], attack[0], attack[1],
is_boss_adjacent[0]], boss_quality[0])
qc.x(shop_seen)
qc.x(attack[0])
gate = qc.to_gate()
gate.name = "Boss logic"
return gate
def shop_vs_boss_logic():
qc = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit)
#la scelta di andare nel negozio o dal boss dipende dal valore che ho assegnato a queste
#stanze e dal fatto che potrei averne di fianco una, l'altra o entrambe (ma in ogni caso
#NON avro' di fianco il tesoro, altrimenti andrei la'). Allora distinguo tre casi:
#-ho adiacente solo il negozio
#-ho adiacente solo il boss
#-ho adiacenti entrambi
#a seconda di quale caso si verifica e della qualita' che ho assegnato a queste due stanze,
#decidero' dove andare
#caso 1) ho adiacente solo il negozio
#se non ho il tesoro adiacente...
qc.x(is_treasure_adjacent[0])
#...e nemmeno il boss...
qc.x(is_boss_adjacent[0])
#...e ho il negozio a sinistra...
qc.x(left_content[0])
#...e il negozio vale piu' dei nemici (ovvero vale 6 o 8, che si traduce
#in shop_quality[1] = 1)...
#...allora ci vado
qc.x(phi)
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], left_content[0],
left_content[1], shop_quality[1]], oracle_qubit)
qc.x(phi)
qc.x(left_content[0])
qc.x(is_boss_adjacent[0])
qc.x(is_treasure_adjacent[0])
#ripetere per il centro e per la destra
qc.x(is_treasure_adjacent[0])
qc.x(is_boss_adjacent[0])
qc.x(center_content[0])
qc.x(phi[1])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], center_content[0],
center_content[1], shop_quality[1]], oracle_qubit)
qc.x(phi[1])
qc.x(center_content[0])
qc.x(is_boss_adjacent[0])
qc.x(is_treasure_adjacent[0])
qc.x(is_treasure_adjacent[0])
qc.x(is_boss_adjacent[0])
qc.x(right_content[0])
qc.x(phi[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], right_content[0],
right_content[1], shop_quality[1]], oracle_qubit)
qc.x(phi[0])
qc.x(right_content[0])
qc.x(is_boss_adjacent[0])
qc.x(is_treasure_adjacent[0])
#caso 2) ho adiacente solo il boss
#se non ho adiacente la stanza del tesoro...
qc.x(is_treasure_adjacent[0])
#...e nemmeno il negozio...
qc.x(is_shop_adjacent[0])
#...e ho il boss a sinistra (left_content = 11)...
#...e il boss vale piu' dei nemici (vero quando vale 3 o 7, ovvero boss_quality[1] = 1)...
#...allora ci vado
qc.x(phi)
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], left_content[0],
left_content[1], boss_quality[1]], oracle_qubit)
qc.x(phi)
qc.x(is_shop_adjacent[0])
qc.x(is_treasure_adjacent[0])
#ripetere per il centro e per la destra
qc.x(is_treasure_adjacent[0])
qc.x(is_shop_adjacent[0])
qc.x(phi[1])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], center_content[0],
center_content[1], boss_quality[1]], oracle_qubit)
qc.x(phi[1])
qc.x(is_shop_adjacent[0])
qc.x(is_treasure_adjacent[0])
qc.x(is_treasure_adjacent[0])
qc.x(is_shop_adjacent[0])
qc.x(phi[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], right_content[0],
right_content[1], boss_quality[1]], oracle_qubit)
qc.x(phi[0])
qc.x(is_shop_adjacent[0])
qc.x(is_treasure_adjacent[0])
#caso 3) ho adiacenti entrambi
#se non ho adiacente il tesoro...
qc.x(is_treasure_adjacent[0])
#(Questa volta, per cambiare, metto l'altra riga che flippa il valore del qubit
#is_treasure_adjacent[0] dopo aver compiuto TUTTE le operazioni, tanto deve essere
#sempre vero che il tesoro non e' adiacente.)
#...ma ho adiacenti sia il boss che il negozio (is_shop_adjacent = is_boss_adjacent = 1)...
#...allora devo capire chi ha piu' importanza tra loro due e la stanza del nemico
#Per farlo, devo analizzare il valore massimo che c'e' tra negozio e boss. Ricordiamo che:
#1) il negozio puo' avere come valori di qualita' 8, 6, 0 e -1
#2) il boss puo' avere come valori di qualita' 7, 3, 1 e 0
#Quindi: se il negozio vale 8 e ce l'ho a sinistra, al centro o a destra, ci vado.
qc.x(left_content[0])
qc.x(phi)
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], left_content[0],
left_content[1], shop_quality[0], shop_quality[1]], oracle_qubit)
qc.x(phi)
qc.x(left_content[0])
qc.x(center_content[0])
qc.x(phi[1])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], center_content[0],
center_content[1], shop_quality[0], shop_quality[1]], oracle_qubit)
qc.x(phi[1])
qc.x(center_content[0])
qc.x(right_content[0])
qc.x(phi[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], right_content[0],
right_content[1], shop_quality[0], shop_quality[1]], oracle_qubit)
qc.x(phi[0])
qc.x(right_content[0])
#Mi segno anche che shop_max_quality = 1, se effettivamente il negozio ha la qualita'
#piu' alta possibile
qc.mcx([shop_quality[0], shop_quality[1]], shop_max_quality[0])
#Se il boss vale 7 e il negozio NON vale 8 (ovvero vale meno del boss), vado dal boss
qc.x(phi)
qc.x(shop_max_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], left_content[0],
left_content[1], boss_quality[0], boss_quality[1], shop_max_quality[0]],
oracle_qubit)
qc.x(shop_max_quality[0])
qc.x(phi)
qc.x(phi[1])
qc.x(shop_max_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], center_content[0],
center_content[1], boss_quality[0], boss_quality[1], shop_max_quality[0]],
oracle_qubit)
qc.x(shop_max_quality[0])
qc.x(phi[1])
qc.x(phi[0])
qc.x(shop_max_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], right_content[0],
right_content[1], boss_quality[0], boss_quality[1], shop_max_quality[0]],
oracle_qubit)
qc.x(shop_max_quality[0])
qc.x(phi[0])
#Anche qui mi segno se boss_max_quality = 1
qc.mcx([boss_quality[0], boss_quality[1]], boss_max_quality[0])
#Ora si applica lo stesso discorso per quando lo shop vale 6 e il boss NON vale 7
#(ovvero, vale comunque meno del negozio).
#In questo caso vado nel negozio
qc.x(phi)
qc.x(left_content[0])
qc.x(shop_quality[0])
qc.x(boss_max_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], left_content[0],
left_content[1], shop_quality[0], shop_quality[1], boss_max_quality[0]],
oracle_qubit)
qc.x(boss_max_quality[0])
qc.x(shop_quality[0])
qc.x(left_content[0])
qc.x(phi)
qc.x(phi[1])
qc.x(center_content[0])
qc.x(shop_quality[0])
qc.x(boss_max_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], center_content[0],
center_content[1], shop_quality[0], shop_quality[1], boss_max_quality[0]],
oracle_qubit)
qc.x(boss_max_quality[0])
qc.x(shop_quality[0])
qc.x(center_content[0])
qc.x(phi[1])
qc.x(phi[0])
qc.x(right_content[0])
qc.x(shop_quality[0])
qc.x(boss_max_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_boss_adjacent[0], right_content[0],
right_content[1], shop_quality[0], shop_quality[1], boss_max_quality[0]],
oracle_qubit)
qc.x(boss_max_quality[0])
qc.x(shop_quality[0])
qc.x(right_content[0])
qc.x(phi[0])
#Infine, se lo shop non vale ne' 8 ne' 6, e il boss non vale 7 ma 3, allora va bene il boss
qc.x(phi)
qc.x(shop_quality[1])
qc.x(boss_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], left_content[0],
left_content[1], shop_quality[1], boss_quality[0], boss_quality[1]], oracle_qubit)
qc.x(boss_quality[0])
qc.x(shop_quality[1])
qc.x(phi)
qc.x(phi[1])
qc.x(shop_quality[1])
qc.x(boss_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], center_content[0],
center_content[1], shop_quality[1], boss_quality[0], boss_quality[1]], oracle_qubit)
qc.x(boss_quality[0])
qc.x(shop_quality[1])
qc.x(phi[1])
qc.x(phi[0])
qc.x(shop_quality[1])
qc.x(boss_quality[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], is_shop_adjacent[0], right_content[0],
right_content[1], shop_quality[1], boss_quality[0], boss_quality[1]], oracle_qubit)
qc.x(boss_quality[0])
qc.x(shop_quality[1])
qc.x(phi[0])
qc.x(is_treasure_adjacent[0])
gate = qc.to_gate()
gate.name = "Shop vs boss logic"
return gate
def enemies_logic():
qc = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit)
#NEMICI: quando non ci sono alternative migliori
#Potrebbe accadere che ho adiacenti tre stanze dei nemici non ancora esplorate (per esempio
#all'inizio dell'esplorazione). In un caso del genere seleziono un ordine casuale
#per valutare le stanze, perche' se altrimenti ogni stanza potesse fornire un kickback
#di fase a phi, otterrei come risultato 11. Quindi solo una di queste deve generare il
#kickback di fase. Per sapere quindi quando una stanza con nemici e' stata scelta, mettero'
#in pratica una procedura un po' strana ma che permette di risparmiare qubit:
#-per andare in una stanza, anche shop_seen_true[0] deve essere a 0
#-se ho scelto di andare in una certa stanza, setto shop_seen_true[0] a 1
#-se quest'ultimo qubit e' a uno, flippo il valore di shop_quality[1], cosi' che
#le prossime stanze contenenti nemici non vengano considerate
#-rimetto a 0 shop_seen_true[0]
directions = ["left", "center", "right"]
random.shuffle(directions)
for d in directions:
if d == "left":
#La spiego per quando devo andare a sinistra, le altre sono analoghe.
#Se non ho il tesoro adiacente...
qc.x(is_treasure_adjacent[0])
#...e il negozio e il boss valgono meno di me (cioe' il negozio vale -1 o 0
#e il boss vale 0 o 1)...
qc.x(shop_quality[1])
qc.x(boss_quality[1])
#...e non ho mai esplorato la stanza a sinistra...
qc.x(left_seen[0])
#...e a sinistra ci sono nemici...
qc.x(left_content)
#...allora puo' interessarmi andare a sinistra.
qc.x(phi)
#aggiungo come controllo anche not(shop_seen_true[0]). Potevo aggiungere
#un nuovo qubit del tipo enemies_chosen[0], ma non l'ho fatto perche' cosi'
#sono in grado di riciclare un vecchio qubit.
#Fondamentalmente, a questo punto del circuito, uso shop_seen_true[0] come segue:
#-se e' a 0, vuol dire che non ho ancora scelto una stanza con nemici dove andare
#-se e' a 1, vuol dire che invece l'ho scelta
#tanto la scelta di entrare in una stanza con nemici verra' considerata solo quando
#non e' possibile procedere in una stanza migliore, e mi sono assicurato di
#far si' che, a questo punto del circuito, shop_seen_true[0] sia a 1. Questo
#ragionamento vale per tutti e tre i casi
qc.x(shop_seen_true[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], shop_quality[1], boss_quality[1],
left_seen[0], left_content[0], left_content[1], shop_seen_true[0]],
oracle_qubit)
qc.x(shop_seen_true[0])
#se effettivamente ho deciso di andare a sinistra, mi segno questo fatto
#mettendo a uno il qubit shop_seen_true[0]
qc.mcx([is_treasure_adjacent[0], shop_quality[1], boss_quality[1], left_seen[0],
left_content[0], left_content[1]], shop_seen_true[0])
#e cambio il valore di shop_quality[1] cosi' che le prossime stanze contenenti
#nemici vengano ignorate
qc.cx(shop_seen_true[0], shop_quality[1])
#ora rimetto a posto shop_seen_true[0]
qc.x(shop_quality[1])
qc.mcx([is_treasure_adjacent[0], shop_quality[1], boss_quality[1], left_seen[0],
left_content[0], left_content[1]], shop_seen_true[0])
qc.x(shop_quality[1])
qc.x(phi)
qc.x(left_content)
qc.x(left_seen[0])
qc.x(boss_quality[1])
qc.x(shop_quality[1])
qc.x(is_treasure_adjacent[0])
if d == "center":
#vado al centro
qc.x(is_treasure_adjacent[0])
qc.x(shop_quality[1])
qc.x(boss_quality[1])
qc.x(center_seen[0])
qc.x(center_content)
qc.x(phi[1])
qc.x(shop_seen_true[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], shop_quality[1], boss_quality[1],
center_seen[0], center_content[0], center_content[1], shop_seen_true[0]],
oracle_qubit)
qc.x(shop_seen_true[0])
qc.mcx([is_treasure_adjacent[0], shop_quality[1], boss_quality[1], center_seen[0],
center_content[0], center_content[1]], shop_seen_true[0])
qc.cx(shop_seen_true[0], shop_quality[1])
qc.x(shop_quality[1])
qc.mcx([is_treasure_adjacent[0], shop_quality[1], boss_quality[1], center_seen[0],
center_content[0], center_content[1]], shop_seen_true[0])
qc.x(shop_quality[1])
qc.x(phi[1])
qc.x(center_content)
qc.x(center_seen[0])
qc.x(boss_quality[1])
qc.x(shop_quality[1])
qc.x(is_treasure_adjacent[0])
if d == "right":
#vado a destra
qc.x(is_treasure_adjacent[0])
qc.x(shop_quality[1])
qc.x(boss_quality[1])
qc.x(right_seen[0])
qc.x(right_content)
qc.x(phi[0])
qc.x(shop_seen_true[0])
qc.mcx([phi[0], phi[1], is_treasure_adjacent[0], shop_quality[1], boss_quality[1],
right_seen[0], right_content[0], right_content[1], shop_seen_true[0]],
oracle_qubit)
qc.x(shop_seen_true[0])
qc.mcx([is_treasure_adjacent[0], shop_quality[1], boss_quality[1], right_seen[0],
right_content[0], right_content[1]], shop_seen_true[0])
qc.cx(shop_seen_true[0], shop_quality[1])
qc.x(shop_quality[1])
qc.mcx([is_treasure_adjacent[0], shop_quality[1], boss_quality[1], right_seen[0],
right_content[0], right_content[1]], shop_seen_true[0])
qc.x(shop_quality[1])
qc.x(phi[0])
qc.x(right_content)
qc.x(right_seen[0])
qc.x(boss_quality[1])
qc.x(shop_quality[1])
qc.x(is_treasure_adjacent[0])
gate = qc.to_gate()
gate.name = "Enemies logic"
return gate
#definiamo adesso l'oracolo, cioe' la funzione di transizione che deve indicare all'agente
#in quale stanza muoversi
def oracle_function(qc):
qc.append(treasure_logic(), [phi[0], phi[1], health[0], health[1], attack[0], attack[1],
treasure_seen, shop_seen, shop_seen_true, left_content[0], left_content[1],
center_content[0], center_content[1], right_content[0], right_content[1],
left_seen, center_seen, right_seen, is_treasure_adjacent, is_shop_adjacent,
is_boss_adjacent, shop_max_quality, boss_max_quality, shop_quality[0],
shop_quality[1], boss_quality[0], boss_quality[1], oracle_qubit])
qc.append(shop_logic(), [phi[0], phi[1], health[0], health[1], attack[0], attack[1],
treasure_seen, shop_seen, shop_seen_true, left_content[0], left_content[1],
center_content[0], center_content[1], right_content[0], right_content[1],
left_seen, center_seen, right_seen, is_treasure_adjacent, is_shop_adjacent,
is_boss_adjacent, shop_max_quality, boss_max_quality, shop_quality[0],
shop_quality[1], boss_quality[0], boss_quality[1], oracle_qubit])
qc.append(boss_logic(), [phi[0], phi[1], health[0], health[1], attack[0], attack[1],
treasure_seen, shop_seen, shop_seen_true, left_content[0], left_content[1],
center_content[0], center_content[1], right_content[0], right_content[1],
left_seen, center_seen, right_seen, is_treasure_adjacent, is_shop_adjacent,
is_boss_adjacent, shop_max_quality, boss_max_quality, shop_quality[0],
shop_quality[1], boss_quality[0], boss_quality[1], oracle_qubit])
qc.append(shop_vs_boss_logic(), [phi[0], phi[1], health[0], health[1], attack[0], attack[1],
treasure_seen, shop_seen, shop_seen_true, left_content[0], left_content[1],
center_content[0], center_content[1], right_content[0], right_content[1],
left_seen, center_seen, right_seen, is_treasure_adjacent, is_shop_adjacent,
is_boss_adjacent, shop_max_quality, boss_max_quality, shop_quality[0],
shop_quality[1], boss_quality[0], boss_quality[1], oracle_qubit])
qc.append(enemies_logic(), [phi[0], phi[1], health[0], health[1], attack[0], attack[1],
treasure_seen, shop_seen, shop_seen_true, left_content[0], left_content[1],
center_content[0], center_content[1], right_content[0], right_content[1],
left_seen, center_seen, right_seen, is_treasure_adjacent, is_shop_adjacent,
is_boss_adjacent, shop_max_quality, boss_max_quality, shop_quality[0],
shop_quality[1], boss_quality[0], boss_quality[1], oracle_qubit])
#e ora definisco la funzione diffuser
def diffuser():
qc = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen, shop_seen_true,
left_content, center_content, right_content, left_seen, center_seen,
right_seen, is_treasure_adjacent, is_shop_adjacent, is_boss_adjacent,
shop_max_quality, boss_max_quality, shop_quality, boss_quality,
oracle_qubit)
qc.h(phi)
qc.x(phi)
qc.mcx(phi, oracle_qubit)
qc.x(phi)
qc.h(phi)
gate = qc.to_gate()
gate.name = "Diffuser"
return gate
#----------VARIABILI PER CONSERVARE L'ESECUZIONE DI UNA ESPLORAZIONE----------
all_explorations_path = []
class Exploration:
def __init__(self):
self.rooms_explored = []
self.health = -1
self.attack = -1
self.outcome = None
def print_exploration(self):
print("In this exploration, the rooms explored were: \n" + str(self.rooms_explored) +
"\n and the stats are: \n " + "health = " + str(self.health) + "\n attack = "
+ str(self.attack) + "\n and the outcome was: " + str(self.outcome))
class ExplorationOutcome():
BOSSDEFEATED = "Boss defeated!"
BOSSKILLEDYOU = "The Boss killed you!"
YOUDIED = "You died while exploring the dungeon!"
#----------LOGICA DEL DUNGEON----------
class RoomContent():
ENEMIES = "Enemies"
TREASURE = "Treasure"
SHOP = "Shop"
BOSS = "Boss"
class Room:
#ogni stanza avra' un indice (tre bit rappresentati come stringa) e un contenuto
#(rappresentato anch'esso da una stringa). Altre cose che mi interessa sapere di una stanza
#sono se l'ho gia' visitata e qual e' la sua percentuale qualitativa (quest'ultimo attributo
#sara' l'avventuriero a settarlo)
def __init__(self, index):
self.index = index
self.adjacent_rooms = getAdjacentRooms(self.index)
self.content = None
self.visited = False
self.quality = -10
def room_to_string(self):
print("Stanza = " + self.index + ", content = " + self.content)
class Agent:
DEBUG = False
#definisco il mio agente in termini di:
#-la stanza in cui si trova
#-se ha visto il tesoro
#-se ha visto lo shop
#-i suoi punti salute
#-i suoi punti attacco
#la qualita' della stanza puo' essere intesa sia come la probabilita' di non prendere danno
#quando si parla di stanze coi nemici, sia come la probabilita' di vincere contro il boss
#se si tratta della stanza del boss, e sara' sempre 100% quando si tratta del tesoro e puo'
#variare per il negozio. Sia il negozio che il boss usano la stessa formula per indicare
#la loro qualita', che ha come parametri i punti vita e i punti attacco
def __init__(self, current_room, dungeon):
self.current_room = current_room
self.dungeon = dungeon
self.treasure_seen = False
self.shop_seen = False
self.health_points = 4
self.attack_points = 1
#definisco una funzione che iteri nel dizionario e mi restituisca la chiave di valore
#piu' alto (serve per capire dove dovrei andare secondo l'algoritmo di Grover).
#Questa funzione serve anche ad aggiornare il valore shop_seen
def choice_quantum_movement(self, counts, dungeon, current_room_index):
choice = ""
max = -1
for k in counts.keys():
if counts[k] > max:
max = counts[k]
choice = k
shop_seen_string = choice[0]
if shop_seen_string == "1":
if Agent.DEBUG:
print("From the measure, I can tell i saw the shop!")
self.shop_seen = True
next_room_string = choice[2] + choice[3]
#next_room_string mi dice semplicemente in che stanza ho scelto di andare:
#00 = sinistra (devo cambiare il primo bit indice)
#01 = centro (devo cambiare il secondo bit indice)
#10 = destra (devo cambiare il terzo bit indice)
next_room_index = ""
if next_room_string == "00":
opposite_bit = opposite(current_room_index[0])
next_room_index = opposite_bit + current_room_index[1] + current_room_index[2]
elif next_room_string == "01":
opposite_bit = opposite(current_room_index[1])
next_room_index = current_room_index[0] + opposite_bit + current_room_index[2]
elif next_room_string == "10":
opposite_bit = opposite(current_room_index[2])
next_room_index = current_room_index[0] + current_room_index[1] + opposite_bit
else:
if Agent.DEBUG:
print("TOO MUCH NOISE IN SELECTING NEXT ROOM! Retry this room")
next_room_index = current_room_index
next_room = dungeon[next_room_index]
return next_room
#funzione che implementa l'intera esplorazione del mio agente
def explore(self):
#eo sara' l'oggetto che maniene il riepilogo di questa esplorazione
eo = Exploration()
#finche' non ho trovato il boss
while not self.current_room.content == RoomContent.BOSS:
eo.rooms_explored.append(self.current_room.index)
if(Agent.DEBUG):
print("++++++++++++++++++++++++++++++++++++++++++++\n")
print("Stanza = " + str(self.current_room.index))
print("Contenuto = " + str(self.current_room.content))
print("Vista = " + str(self.current_room.visited))
#-----PARTE IN CUI APPLICO GLI EFFETTI DELLA STANZA IN CUI SONO APPENA ENTRATO-----
#se sono entrato in una stanza con nemici, lancio una monetina e forse prendo danno
if (self.current_room.content == RoomContent.ENEMIES and not
(self.current_room.index == "000" and self.current_room.visited == False)):
#switch del danno e poi scegli
l = []
r = 1
if self.attack_points == 1:
for i in range(25):
l.append(1)
for i in range(75):
l.append(0)
r = random.choice(l)
if self.attack_points == 2:
for i in range(50):
l.append(1)
for i in range(50):
l.append(0)
r = random.choice(l)
if self.attack_points == 3:
for i in range(75):
l.append(1)
for i in range(25):
l.append(0)
r = random.choice(l)
if self.attack_points == 4:
r = 1
if r == 0:
self.health_points -= 1
if Agent.DEBUG:
print("Damage taken! Argh!")
elif Agent.DEBUG:
print("No damage taken! Phew!")
#se sono entrato nella stanza del tesoro o nel negozio, me lo segno, e aggiorno
#il contenuto di queste stanze con dei nemici
if self.current_room.content == RoomContent.TREASURE:
self.treasure_seen = True
#ottengo il potenziamento casuale
values = [1, 2]
power_up_types = ["health", "attack"]
v = random.choice(values)
p = random.choice(power_up_types)
if Agent.DEBUG:
print("Power up found = " + str(v) + " " + p)
if v == 1 and p == "health" and self.health_points <= 3:
self.health_points += 1
if Agent.DEBUG:
print("+1 h")
if v == 2 and p == "health" and self.health_points <= 3:
if self.health_points <=2:
if Agent.DEBUG:
print("+2 h")
self.health_points += 2
else:
if Agent.DEBUG:
print("+2 h, but really +1 h")
self.health_points += 1
if v == 1 and p == "attack":
if Agent.DEBUG:
print("+1 a")
self.attack_points += 1
if v == 2 and p == "attack":
if Agent.DEBUG:
print("+2 a")
self.attack_points += 2
self.current_room.content = RoomContent.ENEMIES
if self.current_room.content == RoomContent.SHOP:
self.shop_seen = True
#tolgo un punto vita e ne aggiungo uno di danno
self.health_points -= 1
self.attack_points += 1
self.current_room.content = RoomContent.ENEMIES
if Agent.DEBUG:
print("Health = " + str(self.health_points))
print("Attack = " + str(self.attack_points))
#--------------------STANZA ESPLORATA, ORA DECIDO SE SONO MORTO O MENO. SE NON
#SONO MORTO, MI SEGNO CHE HO ESPLORATO QUESTA STANZA--------------------
#se un nemico mi ha ucciso, esco dal ciclo
if self.health_points == 0:
break
#segno che mi sono visto la stanza corrente
self.current_room.visited = True
#--------------------ADESSO C'e' LA FUNZIONE DI TRANSIZIONE, OVVERO LA LOGICA
#CHE PORTA A SCEGLIERE LA PROSSIMA STANZA--------------------
#considero le stanze adiacenti
adj_r_index = self.current_room.adjacent_rooms
adjacent_rooms = []
for i in adj_r_index:
adjacent_rooms.append(dungeon[i])
#A causa del rumore il personaggio potrebbe non riuscire a prendere una decisione
#circa la prossima stanza da esplorare (ovvero la misura sarebbe 11, che non
#corrisponde a nessuna stanza adiacente). Finche' questo e' vero, ri-eseguiamo
#il circuito
new_room_chosen = False
while not new_room_chosen:
#Parte la parte (gioco di parole) quantistica.
#costruisco il circuito che mi serve per fare i calcoli
qc_curr = QuantumCircuit(phi, health, attack, treasure_seen, shop_seen,
shop_seen_true, left_content, center_content, right_content, left_seen,
center_seen, right_seen, is_treasure_adjacent, is_shop_adjacent,
is_boss_adjacent, shop_max_quality, boss_max_quality, shop_quality,
boss_quality, oracle_qubit, measure_bits, shop_bit)
#lo inizializzo
initialize_dungeon_circuit(qc_curr, self, adjacent_rooms)
#applico l'oracolo di grover
oracle_function(qc_curr)
#applico il diffuser
qc_curr.append(diffuser(), [phi[0], phi[1], health[0], health[1], attack[0],
attack[1], treasure_seen, shop_seen, shop_seen_true, left_content[0],
left_content[1], center_content[0], center_content[1], right_content[0],
right_content[1], left_seen, center_seen, right_seen, is_treasure_adjacent,
is_shop_adjacent, is_boss_adjacent, shop_max_quality, boss_max_quality,
shop_quality[0], shop_quality[1], boss_quality[0], boss_quality[1],
oracle_qubit])
#metto le misure
qc_curr.measure(phi, measure_bits)
qc_curr.measure(shop_seen, shop_bit)
#aggiungo il circuito all'array di circuiti
qc_list.append(qc_curr)
#eseguo il circuito
simulator = Aer.get_backend('qasm_simulator')
result = execute(qc_curr, backend = simulator).result()
counts = result.get_counts(qc_curr)
#aggiungo i counts di questa esecuzione all'array di counts
counts_list.append(counts)
#scelgo dove andare
next_room = self.choice_quantum_movement(counts, self.dungeon,
self.current_room.index)
if not next_room.index == self.current_room.index:
new_room_chosen = True
if Agent.DEBUG:
print("I chose a new room! I'm in room " + str(self.current_room.index) +
", but I'm going to room " + str(next_room.index))
else:
if Agent.DEBUG:
print("I didn't choose a new room! I'm gonna retry...")
self.current_room = next_room
outcome = None
if self.health_points == 0:
outcome = ExplorationOutcome.YOUDIED
if Agent.DEBUG:
print("Sei morto!")
else:
probability_of_victory = 5 * (1 + self.health_points) * self.attack_points
if Agent.DEBUG:
print("La tua probabilita' di vittoria, con " + str(self.health_points) +
" punti vita e " + str(self.attack_points) + " punti attacco, e' del " +
str(probability_of_victory) + " %")
l = []
for i in range(probability_of_victory):
l.append(1)
for i in range(100 - probability_of_victory):
l.append(0)
r = random.choice(l)
if r == 1:
outcome = ExplorationOutcome.BOSSDEFEATED
if Agent.DEBUG:
print("Vittoria!")
else:
outcome = ExplorationOutcome.BOSSKILLEDYOU
if Agent.DEBUG:
print("Il Boss ti ha ucciso!")
#cosi' che tutte le partite che finiscono dal boss abbiano
#come ultima stanza proprio quella del boss
eo.rooms_explored.append(self.current_room.index)
eo.health = self.health_points
eo.attack = self.attack_points
eo.outcome = outcome
all_explorations_path.append(eo)
#solita funzione per invertire un bit
def opposite(bit):
res = "-1"
if bit == "0": res = "1"
if bit == "1": res = "0"
return res
#definisco un metodo che, dato l'indice di una stanza, restituisce tutte le stanze adiacenti
def getAdjacentRooms(index):
res = []
for i in range(len(index)):
#prendo l'indice originale e swappo ogni suo qubit, ottenendo tre nuove stringhe
current_bit = index[i]
if current_bit == "1":
current_bit = "0"
else:
current_bit = "1"
list_index = list(index)
list_index[i] = current_bit
new_index = ""
for i in list_index:
new_index += i
res.append(new_index)
return res
#funzione che, dato un numero naturale e un numero di bits, codifica tale numero in una stringa
#di bit lunga number_of_bits
def intToBinary(num, number_of_bits):
if num > pow(2, number_of_bits) - 1:
raise Exception("Error: number " + str(num) + " cannot be reperesented with only " +
str(number_of_bits) + " bits")
list_bits = []
for i in reversed(range(number_of_bits)):
current_pow_of_two = pow(2, i)
if current_pow_of_two <= num:
list_bits.append(1)
num -= current_pow_of_two
else:
list_bits.append(0)
res = ""
for i in list_bits:
res += str(i)
return res
dungeon = {}
def full_exploration():
rooms = []
for i in range(8):
rooms.append(Room(intToBinary(i, 3)))
#la prima stanza contiene sempre nemici
rooms[0].content = RoomContent.ENEMIES
rooms[0].visited = False
#questo e' il dungeon di prova usato nella Tesi
rooms[1].content = RoomContent.ENEMIES
rooms[1].visited = False
rooms[2].content = RoomContent.ENEMIES
rooms[2].visited = False
rooms[3].content = RoomContent.BOSS
rooms[3].visited = False
rooms[4].content = RoomContent.ENEMIES
rooms[4].visited = False
rooms[5].content = RoomContent.SHOP
rooms[5].visited = False
rooms[6].content = RoomContent.TREASURE
rooms[6].visited = False
rooms[7].content = RoomContent.ENEMIES
rooms[7].visited = False
if Agent.DEBUG:
for i in rooms:
print("contenuto della stanza " + str(i.index) + " = " + str(i.content) +
", le sue stanze adiacenti sono " + str(i.adjacent_rooms))
#metto adesso le stanze in un dizionario cosi' che per il mio agente sia piu' semplice
#vederne il contenuto
for i in rooms:
dungeon[i.index] = i
if Agent.DEBUG:
for i in rooms:
i.room_to_string()
a = Agent(dungeon["000"], dungeon)
a.explore()
#per la tesi ho fatto anche qui 100 esplorazioni, ma ho dovuto lasciare il computer
#acceso tutta la notte. Ora lo metto a uno cosi' che se uno volesse copia-incollare
#questo codice per eseguirlo non dovrebbe aspettare 10 ore prima che termini, ma
#qualche minuto.
for k in range(1):
full_exploration()
#giusto per vedere qualcosa
qc_list[0].draw('mpl')
plot_histogram(counts_list[0])
#settando a true questa variabile e' possibile visualizzare il contenuto di una delle liste
#di percorsi con cui abbiamo lavorato finora.
visualize_a_list = True
if visualize_a_list:
#e' possibile modificare la lista da assegnare a f_list per vedere
#un diverso tipo di output.
#Le liste possibili includono:
#all_explorations_path
#explorations_path_boss_reached
#explorations_path_high_stats
#explorations_path_shortest
f_list = all_explorations_path
f = open("dungeon_summary.txt", "w")
for i in range(len(f_list)):
if i < 10:
f.write("00")
elif i>=10 and i<100:
f.write("0")
f.write(str(i) + "° exploration = " + str(f_list[i].rooms_explored))
#assumo che un agente non possa esplorare piu' di 20 stanze
blank_spaces = 20 - len(f_list[i].rooms_explored)
for j in range(blank_spaces):
f.write(" ")
f.write(", h = " + str(f_list[i].health) + ", a = " + str(f_list[i].attack) +
", outcome = " + str(f_list[i].outcome) + "\n")
f.close()
f = open("dungeon_summary.txt", "r")
print(f.read())
| 38.039229 | 98 | 0.592229 | 14,126 | 0.246906 | 0 | 0 | 0 | 0 | 0 | 0 | 16,701 | 0.291914 |
f3b36f2c7d198610a8e506b544282c232463d80f | 890 | py | Python | setup.py | monk1337/amazon-denseclus | db891887a73210091f1b7454301efad60510e388 | [
"MIT-0"
] | 46 | 2021-08-04T13:21:57.000Z | 2022-03-15T11:13:35.000Z | setup.py | monk1337/amazon-denseclus | db891887a73210091f1b7454301efad60510e388 | [
"MIT-0"
] | 7 | 2021-08-09T13:26:44.000Z | 2022-02-12T03:36:32.000Z | setup.py | monk1337/amazon-denseclus | db891887a73210091f1b7454301efad60510e388 | [
"MIT-0"
] | 10 | 2021-08-07T06:22:14.000Z | 2022-01-29T21:58:29.000Z | #!/usr/bin/env/python3
import setuptools
with open("README.md", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="Amazon DenseClus",
version="0.0.19",
author="Charles Frenzel",
description="Dense Clustering for Mixed Data Types",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/awslabs/amazon-denseclus",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
license_files=("LICENSE",),
install_requires=[
"umap_learn>=0.5.1",
"numpy>=1.20.2",
"hdbscan>=0.8.27",
"numba>=0.51.2",
"pandas>=1.2.4",
"scikit_learn>=0.24.2",
],
)
| 27.8125 | 56 | 0.617978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.465169 |
f3b46bdda72352593d198e93a82e51738a8a28a2 | 1,715 | py | Python | polog/tests/handlers/memory/test_saver.py | pomponchik/polog | 104c5068a65b0eaeab59327aac1a583e2606e77e | [
"MIT"
] | 30 | 2020-07-16T16:52:46.000Z | 2022-03-24T16:56:29.000Z | polog/tests/handlers/memory/test_saver.py | pomponchik/polog | 104c5068a65b0eaeab59327aac1a583e2606e77e | [
"MIT"
] | 6 | 2021-02-07T22:08:01.000Z | 2021-12-07T21:56:46.000Z | polog/tests/handlers/memory/test_saver.py | pomponchik/polog | 104c5068a65b0eaeab59327aac1a583e2606e77e | [
"MIT"
] | 4 | 2020-12-22T07:05:34.000Z | 2022-03-24T16:56:50.000Z | import pytest
from polog.handlers.memory.saver import memory_saver
from polog.core.log_item import LogItem
handler = memory_saver()
def test_singleton():
"""
Проверка, что memory_saver - синглтон.
"""
assert memory_saver() is memory_saver()
def test_add_empty_args():
"""
Проверка, что запись лога в память происходит.
"""
handler({'message': 'hello'})
assert handler.last['message'] == 'hello'
assert len(handler.all) > 0
def test_add_full_args():
"""
Проверка, что запись лога в память происходит.
"""
log = LogItem()
log.set_data({'message': 'hello'})
log.set_function_input_data((1, 2, 3), {'lol': 'kek'})
handler(log)
assert handler.last['message'] == 'hello'
assert handler.last.function_input_data.args == (1, 2, 3)
assert handler.last.function_input_data.kwargs == {'lol': 'kek'}
def test_clean():
"""
Проверка, что список логов очищается.
"""
log = LogItem()
log.set_data({'message': 'hello'})
handler(log)
handler.clean()
assert len(handler.all) == 0
assert handler.last is None
def test_add_to_all():
"""
Проверка, что список handler.all заполняется логами.
"""
log = LogItem()
log.set_data({'message': 'hello'})
handler.clean()
handler(log)
assert len(handler.all) > 0
def test_getargs():
"""
Проверка, что можно получить доступ к полям лога без обращения напрямую к словарю.
"""
log = LogItem()
log.set_data({'message': 'hello'})
handler.clean()
handler(log)
assert handler.last is not None
assert handler.last['message'] is not None
assert handler.last.fields['message'] == handler.last['message']
| 23.493151 | 86 | 0.641399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.4035 |
f3b7348c11a181769f9cbdb928c63709a1e219cd | 828 | py | Python | api/v2/serializers/fields/identity.py | simpsonw/atmosphere | 3a5203ef0b563de3a0e8c8c8715df88186532d7a | [
"BSD-3-Clause"
] | 197 | 2016-12-08T02:33:32.000Z | 2022-03-23T14:27:47.000Z | api/v2/serializers/fields/identity.py | simpsonw/atmosphere | 3a5203ef0b563de3a0e8c8c8715df88186532d7a | [
"BSD-3-Clause"
] | 385 | 2017-01-03T22:51:46.000Z | 2020-12-16T16:20:42.000Z | api/v2/serializers/fields/identity.py | benlazarine/atmosphere | 38fad8e4002e510e8b4294f2bb5bc75e8e1817fa | [
"BSD-3-Clause"
] | 50 | 2016-12-08T08:32:25.000Z | 2021-12-10T00:21:39.000Z | from rest_framework import exceptions, serializers
from api.v2.serializers.summaries import IdentitySummarySerializer
from core.models import Identity
class IdentityRelatedField(serializers.RelatedField):
def get_queryset(self):
return Identity.objects.all()
def to_representation(self, identity):
serializer = IdentitySummarySerializer(identity, context=self.context)
return serializer.data
def to_internal_value(self, data):
queryset = self.get_queryset()
if isinstance(data, dict):
identity = data.get("id", None)
else:
identity = data
try:
return queryset.get(id=identity)
except:
raise exceptions.ValidationError(
"Identity with id '%s' does not exist." % identity
)
| 31.846154 | 78 | 0.660628 | 674 | 0.81401 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.051932 |
f3b7884aafaa410b826e7b2d1ae2cd0d7f82d7f9 | 426 | py | Python | snippets/python/notification.py | Gautam-virmani/snippets | 54b06ec46ce8b4afba3bfaf57a42e71686fd1cbd | [
"MIT"
] | null | null | null | snippets/python/notification.py | Gautam-virmani/snippets | 54b06ec46ce8b4afba3bfaf57a42e71686fd1cbd | [
"MIT"
] | null | null | null | snippets/python/notification.py | Gautam-virmani/snippets | 54b06ec46ce8b4afba3bfaf57a42e71686fd1cbd | [
"MIT"
] | null | null | null | #Title: Notification Processor
#Tags:plyer,python
#Can process notification of your choice
#plyer:built in module help you to find more information
from plyer import notification
def notifyme(title, message):
notification.notify(
title=title,
message=message,
app_icon='Write your icon address here',
timeout=5
)
notifyme("Title of notification box", "Message in notification")
| 21.3 | 64 | 0.713615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.535211 |
f3b7b87f4bd425920ed3e99f413e2bce0c1f1914 | 1,296 | py | Python | mysite/forms.py | donovan680/django-form-rendering | b075ab8904a8f5289f7c63bc62ff137313264a64 | [
"MIT"
] | 41 | 2017-10-11T13:24:22.000Z | 2021-01-01T06:39:53.000Z | mysite/forms.py | donovan680/django-form-rendering | b075ab8904a8f5289f7c63bc62ff137313264a64 | [
"MIT"
] | 8 | 2020-06-24T01:37:23.000Z | 2022-03-12T00:38:17.000Z | mysite/forms.py | sonkt22/lab_5_16521038_16520223 | 5801f594f7d3a0498d5f8790ccc6ba1e7947e42d | [
"MIT"
] | 17 | 2017-11-10T22:43:09.000Z | 2021-11-05T14:45:06.000Z | from django import forms
class ContactForm(forms.Form):
name = forms.CharField(max_length=30)
email = forms.EmailField(max_length=254)
message = forms.CharField(
max_length=2000,
widget=forms.Textarea(),
help_text='Write here your message!'
)
source = forms.CharField(
max_length=50,
widget=forms.HiddenInput()
)
def clean(self):
cleaned_data = super(ContactForm, self).clean()
name = cleaned_data.get('name')
email = cleaned_data.get('email')
message = cleaned_data.get('message')
if not name and not email and not message:
raise forms.ValidationError('You have to write something!')
class ColorfulContactForm(forms.Form):
name = forms.CharField(
max_length=30,
widget=forms.TextInput(
attrs={
'style': 'border-color: blue;',
'placeholder': 'Write your name here'
}
)
)
email = forms.EmailField(
max_length=254,
widget=forms.TextInput(attrs={'style': 'border-color: green;'})
)
message = forms.CharField(
max_length=2000,
widget=forms.Textarea(attrs={'style': 'border-color: orange;'}),
help_text='Write here your message!'
)
| 28.8 | 72 | 0.600309 | 1,265 | 0.97608 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.174383 |
f3b81f0da5b4d78d7f7a29ad65dbfe51dd9c3d39 | 2,411 | py | Python | modules/dialogue_importer.py | KAIST-AILab/PyOpenDial | c9bca653c18ccc082dc8b86b4a8feee9ed00a75b | [
"MIT"
] | 9 | 2019-09-23T01:56:43.000Z | 2022-03-13T17:58:40.000Z | modules/dialogue_importer.py | KAIST-AILab/PyOpenDial | c9bca653c18ccc082dc8b86b4a8feee9ed00a75b | [
"MIT"
] | 2 | 2019-11-18T17:02:30.000Z | 2021-07-14T15:47:08.000Z | modules/dialogue_importer.py | KAIST-AILab/PyOpenDial | c9bca653c18ccc082dc8b86b4a8feee9ed00a75b | [
"MIT"
] | 1 | 2022-02-08T06:41:19.000Z | 2022-02-08T06:41:19.000Z | import logging
from threading import Thread
from time import sleep
from multipledispatch import dispatch
from dialogue_state import DialogueState
from modules.dialogue_recorder import DialogueRecorder
from modules.forward_planner import ForwardPlanner
class DialogueImporter(Thread):
"""
Functionality to import a previously recorded dialogue in the dialogue system. The
import essentially "replays" the previous interaction, including all state update
operations.
"""
# logger
log = logging.getLogger('PyOpenDial')
def __init__(self, system, turns):
"""
Creates a new dialogue importer attached to a particular dialogue system, and
with an ordered list of turns (encoded by their dialogue state).
:param system: the dialogue system
:param turns: the sequence of turns
"""
self.system = system
self.turns = turns
self.wizard_of_mode = False
@dispatch(bool)
def set_wizard_of_oz_mode(self, is_wizard_of_oz):
"""
Sets whether the import should consider the system actions as "expert"
Wizard-of-Oz actions to imitate.
:param is_wizard_of_oz: whether the system actions are wizard-of-Oz examples
"""
self.wizard_of_mode = is_wizard_of_oz
@dispatch()
def run(self):
if self.wizard_of_mode:
# TODO: WizardLearner
# self.system.attach_module(WizardLearner)
# for turn in self.turns:
# self.add_turn(turn)
pass
else:
self.system.detach_module(ForwardPlanner)
for turn in self.turns:
self.add_turn(turn)
self.system.get_state().remove_nodes(self.system.get_state().get_action_node_ids())
self.system.get_state().remove_nodes(self.system.get_state().get_utility_node_ids())
self.system.attach_module(ForwardPlanner)
@dispatch(DialogueState)
def add_turn(self, turn):
try:
while self.system.is_pauesd() or not self.system.get_module(DialogueRecorder).is_running():
try:
# TODO: Thread
sleep(100)
except:
pass
self.system.add_content(turn.copy())
except Exception as e:
self.log.warning("could not add content: %s" % e)
| 33.486111 | 103 | 0.635421 | 2,153 | 0.89299 | 0 | 0 | 1,441 | 0.597677 | 0 | 0 | 865 | 0.358772 |
f3bd372022e0d53e9cbf680c7d0eed7c97dec5db | 1,652 | py | Python | flask_cm/pycompile.py | Ginnam/flask-onlineIDE | fdac6ac4c0cdc096450704b5a345acc6d0f9e5b5 | [
"MIT"
] | 1 | 2022-02-25T02:21:21.000Z | 2022-02-25T02:21:21.000Z | flask_cm/pycompile.py | Ginnam/flask-onlineIDE | fdac6ac4c0cdc096450704b5a345acc6d0f9e5b5 | [
"MIT"
] | null | null | null | flask_cm/pycompile.py | Ginnam/flask-onlineIDE | fdac6ac4c0cdc096450704b5a345acc6d0f9e5b5 | [
"MIT"
] | null | null | null | import os, sys, subprocess, tempfile, time
# 创建临时文件夹,返回临时文件夹路径
TempFile = tempfile.mkdtemp(suffix='_test', prefix='python_')
# 文件名
FileNum = int(time.time() * 1000)
# python编译器位置
EXEC = sys.executable
# 获取python版本
def get_version():
v = sys.version_info
version = "python %s.%s" % (v.major, v.minor)
return version
# 获得py文件名
def get_pyname():
global FileNum
return 'test_%d' % FileNum
# 接收代码写入文件
def write_file(pyname, code):
fpath = os.path.join(TempFile, '%s.py' % pyname)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(code)
print('file path: %s' % fpath)
return fpath
# 编码
def decode(s):
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s.decode('gbk')
# 主执行函数
def main(code):
r = dict()
r["version"] = get_version()
pyname = get_pyname()
fpath = write_file(pyname, code)
try:
# subprocess.check_output 是 父进程等待子进程完成,返回子进程向标准输出的输出结果
# stderr是标准输出的类型
# subprocess.check_output 执行一条shell命令
outdata = decode(subprocess.check_output([EXEC, fpath], stderr=subprocess.STDOUT, timeout=10))
except subprocess.CalledProcessError as e:
# e.output是错误信息标准输出
# 错误返回的数据
r["code"] = 'Error'
r["output"] = decode(e.output)
return r
else:
# 成功返回的数据
r['output'] = outdata
r["code"] = "Success"
return r
finally:
# 删除文件(其实不用删除临时文件会自动删除)
try:
os.remove(fpath)
except Exception as e:
exit(1)
if __name__ == '__main__':
code = "print(11);print(12)"
print(main(code))
| 22.026667 | 102 | 0.601695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.352665 |
f3bfdc81eb1e2339210b6f5811b1e4a0893f03f6 | 218 | py | Python | flask_resultful_plugin/error.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | 2 | 2018-11-28T13:49:18.000Z | 2018-11-29T11:13:40.000Z | flask_resultful_plugin/error.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | null | null | null | flask_resultful_plugin/error.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | null | null | null | # encoding:utf-8
# 401 错误
class UnauthorizedError(Exception):
pass
# 400 错误
class BadRequestError(Exception):
pass
class MediaTypeError(Exception):
pass
# 父异常
class RestfulException(Exception):
pass | 13.625 | 35 | 0.729358 | 170 | 0.732759 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.219828 |
f3c07f9cec57f76e746c76c17235957335504e0a | 986 | py | Python | wunderkafka/config/generated/enums.py | severstal-digital/wunderkafka | 8c56fa4559a8576af7f005fd916bf97127576278 | [
"Apache-2.0"
] | null | null | null | wunderkafka/config/generated/enums.py | severstal-digital/wunderkafka | 8c56fa4559a8576af7f005fd916bf97127576278 | [
"Apache-2.0"
] | null | null | null | wunderkafka/config/generated/enums.py | severstal-digital/wunderkafka | 8c56fa4559a8576af7f005fd916bf97127576278 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class BrokerAddressFamily(str, Enum):
any = 'any'
v4 = 'v4'
v6 = 'v6'
class SecurityProtocol(str, Enum):
plaintext = 'plaintext'
ssl = 'ssl'
sasl_plaintext = 'sasl_plaintext'
sasl_ssl = 'sasl_ssl'
class SslEndpointIdentificationAlgorithm(str, Enum):
none = 'none'
https = 'https'
class IsolationLevel(str, Enum):
read_uncommitted = 'read_uncommitted'
read_committed = 'read_committed'
class AutoOffsetReset(str, Enum):
smallest = 'smallest'
earliest = 'earliest'
beginning = 'beginning'
largest = 'largest'
latest = 'latest'
end = 'end'
error = 'error'
class CompressionCodec(str, Enum):
none = 'none'
gzip = 'gzip'
snappy = 'snappy'
lz4 = 'lz4'
zstd = 'zstd'
class CompressionType(str, Enum):
none = 'none'
gzip = 'gzip'
snappy = 'snappy'
lz4 = 'lz4'
zstd = 'zstd'
class QueuingStrategy(str, Enum):
fifo = 'fifo'
lifo = 'lifo'
| 17.607143 | 52 | 0.618661 | 940 | 0.953347 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.239351 |
f3c198c78e3e7bb246c315b241264dd26e8e6ee5 | 12,928 | py | Python | std/captum/21.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | std/captum/21.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | std/captum/21.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig
from captum.attr import visualization as viz
from captum.attr import LayerConductance, LayerIntegratedGradients
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_path = "<PATH-TO-SAVED-MODEL>"
model = BertForQuestionAnswering.from_pretrained(model_path)
model.to(device)
model.eval()
model.zero_grad()
tokenizer = BertTokenizer.from_pretrained(model_path)
def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):
output = model(
inputs,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
)
return output.start_logits, output.end_logits
def squad_pos_forward_func(inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0):
pred = predict(inputs, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
pred = pred[position]
return pred.max(1).values
ref_token_id = tokenizer.pad_token_id
sep_token_id = tokenizer.sep_token_id
cls_token_id = tokenizer.cls_token_id
def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):
question_ids = tokenizer.encode(question, add_special_tokens=False)
text_ids = tokenizer.encode(text, add_special_tokens=False)
input_ids = [cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]
ref_input_ids = (
[cls_token_id]
+ [ref_token_id] * len(question_ids)
+ [sep_token_id]
+ [ref_token_id] * len(text_ids)
+ [sep_token_id]
)
return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(question_ids)
def construct_input_ref_token_type_pair(input_ids, sep_ind=0):
seq_len = input_ids.size(1)
token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device)
ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)
return token_type_ids, ref_token_type_ids
def construct_input_ref_pos_id_pair(input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)
return position_ids, ref_position_ids
def construct_attention_mask(input_ids):
return torch.ones_like(input_ids)
def construct_whole_bert_embeddings(
input_ids, ref_input_ids, token_type_ids=None, ref_token_type_ids=None, position_ids=None, ref_position_ids=None
):
input_embeddings = model.bert.embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
ref_input_embeddings = model.bert.embeddings(
ref_input_ids, token_type_ids=ref_token_type_ids, position_ids=ref_position_ids
)
return input_embeddings, ref_input_embeddings
question, text = (
"What is important to us?",
"It is important to us to include, empower and support humans of all kinds.",
)
input_ids, ref_input_ids, sep_id = construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id)
token_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id)
position_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)
attention_mask = construct_attention_mask(input_ids)
indices = input_ids[0].detach().tolist()
all_tokens = tokenizer.convert_ids_to_tokens(indices)
ground_truth = "to include, empower and support humans of all kinds"
ground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)
ground_truth_end_ind = indices.index(ground_truth_tokens[-1])
ground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1
start_scores, end_scores = predict(
input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask
)
print("Question: ", question)
print("Predicted Answer: ", " ".join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores) + 1]))
lig = LayerIntegratedGradients(squad_pos_forward_func, model.bert.embeddings)
attributions_start, delta_start = lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),
return_convergence_delta=True,
)
attributions_end, delta_end = lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),
return_convergence_delta=True,
)
def summarize_attributions(attributions):
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
return attributions
attributions_start_sum = summarize_attributions(attributions_start)
attributions_end_sum = summarize_attributions(attributions_end)
start_position_vis = viz.VisualizationDataRecord(
attributions_start_sum,
torch.max(torch.softmax(start_scores[0], dim=0)),
torch.argmax(start_scores),
torch.argmax(start_scores),
str(ground_truth_start_ind),
attributions_start_sum.sum(),
all_tokens,
delta_start,
)
end_position_vis = viz.VisualizationDataRecord(
attributions_end_sum,
torch.max(torch.softmax(end_scores[0], dim=0)),
torch.argmax(end_scores),
torch.argmax(end_scores),
str(ground_truth_end_ind),
attributions_end_sum.sum(),
all_tokens,
delta_end,
)
print("\033[1m", "Visualizations For Start Position", "\033[0m")
viz.visualize_text([start_position_vis])
print("\033[1m", "Visualizations For End Position", "\033[0m")
viz.visualize_text([end_position_vis])
from IPython.display import Image
Image(filename="img/bert/visuals_of_start_end_predictions.png")
lig2 = LayerIntegratedGradients(
squad_pos_forward_func,
[
model.bert.embeddings.word_embeddings,
model.bert.embeddings.token_type_embeddings,
model.bert.embeddings.position_embeddings,
],
)
attributions_start = lig2.attribute(
inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 0),
)
attributions_end = lig2.attribute(
inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 1),
)
attributions_start_word = summarize_attributions(attributions_start[0])
attributions_end_word = summarize_attributions(attributions_end[0])
attributions_start_token_type = summarize_attributions(attributions_start[1])
attributions_end_token_type = summarize_attributions(attributions_end[1])
attributions_start_position = summarize_attributions(attributions_start[2])
attributions_end_position = summarize_attributions(attributions_end[2])
def get_topk_attributed_tokens(attrs, k=5):
values, indices = torch.topk(attrs, k)
top_tokens = [all_tokens[idx] for idx in indices]
return top_tokens, values, indices
top_words_start, top_words_val_start, top_word_ind_start = get_topk_attributed_tokens(attributions_start_word)
top_words_end, top_words_val_end, top_words_ind_end = get_topk_attributed_tokens(attributions_end_word)
top_token_type_start, top_token_type_val_start, top_token_type_ind_start = get_topk_attributed_tokens(
attributions_start_token_type
)
top_token_type_end, top_token_type_val_end, top_token_type_ind_end = get_topk_attributed_tokens(
attributions_end_token_type
)
top_pos_start, top_pos_val_start, pos_ind_start = get_topk_attributed_tokens(attributions_start_position)
top_pos_end, top_pos_val_end, pos_ind_end = get_topk_attributed_tokens(attributions_end_position)
df_start = pd.DataFrame(
{
"Word(Index), Attribution": [
"{} ({}), {}".format(word, pos, round(val.item(), 2))
for word, pos, val in zip(top_words_start, top_word_ind_start, top_words_val_start)
],
"Token Type(Index), Attribution": [
"{} ({}), {}".format(ttype, pos, round(val.item(), 2))
for ttype, pos, val in zip(top_token_type_start, top_token_type_ind_start, top_words_val_start)
],
"Position(Index), Attribution": [
"{} ({}), {}".format(position, pos, round(val.item(), 2))
for position, pos, val in zip(top_pos_start, pos_ind_start, top_pos_val_start)
],
}
)
df_start.style.apply(["cell_ids: False"])
df_end = pd.DataFrame(
{
"Word(Index), Attribution": [
"{} ({}), {}".format(word, pos, round(val.item(), 2))
for word, pos, val in zip(top_words_end, top_words_ind_end, top_words_val_end)
],
"Token Type(Index), Attribution": [
"{} ({}), {}".format(ttype, pos, round(val.item(), 2))
for ttype, pos, val in zip(top_token_type_end, top_token_type_ind_end, top_words_val_end)
],
"Position(Index), Attribution": [
"{} ({}), {}".format(position, pos, round(val.item(), 2))
for position, pos, val in zip(top_pos_end, pos_ind_end, top_pos_val_end)
],
}
)
df_end.style.apply(["cell_ids: False"])
["{}({})".format(token, str(i)) for i, token in enumerate(all_tokens)]
df_start
df_end
def squad_pos_forward_func2(input_emb, attention_mask=None, position=0):
pred = model(
inputs_embeds=input_emb,
attention_mask=attention_mask,
)
pred = pred[position]
return pred.max(1).values
layer_attrs_start = []
layer_attrs_end = []
token_to_explain = 23
layer_attrs_start_dist = []
layer_attrs_end_dist = []
input_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(
input_ids,
ref_input_ids,
token_type_ids=token_type_ids,
ref_token_type_ids=ref_token_type_ids,
position_ids=position_ids,
ref_position_ids=ref_position_ids,
)
for i in range(model.config.num_hidden_layers):
lc = LayerConductance(squad_pos_forward_func2, model.bert.encoder.layer[i])
layer_attributions_start = lc.attribute(
inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 0)
)
layer_attributions_end = lc.attribute(
inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 1)
)
layer_attrs_start.append(summarize_attributions(layer_attributions_start).cpu().detach().tolist())
layer_attrs_end.append(summarize_attributions(layer_attributions_end).cpu().detach().tolist())
layer_attrs_start_dist.append(layer_attributions_start[0, token_to_explain, :].cpu().detach().tolist())
layer_attrs_end_dist.append(layer_attributions_end[0, token_to_explain, :].cpu().detach().tolist())
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(np.array(layer_attrs_start), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(np.array(layer_attrs_end), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.boxplot(data=layer_attrs_start_dist)
plt.xlabel("Layers")
plt.ylabel("Attribution")
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.boxplot(data=layer_attrs_end_dist)
plt.xlabel("Layers")
plt.ylabel("Attribution")
plt.show()
def pdf_attr(attrs, bins=100):
return np.histogram(attrs, bins=bins, density=True)[0]
layer_attrs_end_pdf = map(lambda layer_attrs_end_dist: pdf_attr(layer_attrs_end_dist), layer_attrs_end_dist)
layer_attrs_end_pdf = np.array(list(layer_attrs_end_pdf))
attr_sum = np.array(layer_attrs_end_dist).sum(-1)
layer_attrs_end_pdf_norm = np.linalg.norm(layer_attrs_end_pdf, axis=-1, ord=1)
layer_attrs_end_pdf = np.transpose(layer_attrs_end_pdf)
layer_attrs_end_pdf = np.divide(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=layer_attrs_end_pdf_norm != 0)
fig, ax = plt.subplots(figsize=(20, 10))
plt.plot(layer_attrs_end_pdf)
plt.xlabel("Bins")
plt.ylabel("Density")
plt.legend(["Layer " + str(i) for i in range(1, 13)])
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
layer_attrs_end_pdf[layer_attrs_end_pdf == 0] = 1
layer_attrs_end_pdf_log = np.log2(layer_attrs_end_pdf)
entropies = -(layer_attrs_end_pdf * layer_attrs_end_pdf_log).sum(0)
plt.scatter(np.arange(12), attr_sum, s=entropies * 100)
plt.xlabel("Layers")
plt.ylabel("Total Attribution")
plt.show()
| 35.712707 | 117 | 0.75905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 797 | 0.061649 |
f3c21ba1dec0f31a9f84ecd704fb5126c483d973 | 1,593 | py | Python | examples/example_ME5ME6.py | bmachiel/python-substratestack | 5689cdbc8582dac7a6b567c03e871ebb3a89343a | [
"BSD-2-Clause"
] | 1 | 2020-03-10T14:46:47.000Z | 2020-03-10T14:46:47.000Z | examples/example_ME5ME6.py | bmachiel/python-substratestack | 5689cdbc8582dac7a6b567c03e871ebb3a89343a | [
"BSD-2-Clause"
] | null | null | null | examples/example_ME5ME6.py | bmachiel/python-substratestack | 5689cdbc8582dac7a6b567c03e871ebb3a89343a | [
"BSD-2-Clause"
] | 1 | 2020-03-10T14:48:49.000Z | 2020-03-10T14:48:49.000Z | #!/bin/env python
# import the technology's complete stack definition
from example import stack
# in order to decrease simulation times, some metal layers can be removed from
# the stack, allowing more oxide layers to be merged in the next step
stack.remove_metal_layer_by_name('PO1')
stack.remove_metal_layer_by_name('ME1')
stack.remove_metal_layer_by_name('ME2')
stack.remove_metal_layer_by_name('ME3')
stack.remove_metal_layer_by_name('ME4')
#stack.remove_metal_layer_by_name('ME5')
#stack.remove_metal_layer_by_name('ME6')
if __name__ == '__main__':
# Print the standardized stack to example_ME5ME6_std.pdf
stack.draw('example_ME5ME6_std', pages=3, single_page=True)
# Merge oxide layers to reduce the stack's complexity, decreasing simulation
# times
stack.simplify()
if __name__ == '__main__':
# Print the simplified stack to example_ME5ME6.pdf
stack.draw('example_ME5ME6', pages=3, single_page=True)
# Write out a Momentum subtrate definition file of the simplified stack
# write_momentum_substrate argument: filename (without extension),
# infinite ground plane
# NOTE: this might produce bad output when the stack has not been
# simplified before!
stack.write_momentum_substrate('example_ME5ME6', True)
# Write out a Sonnet project that includes the simplified subtrate stack
# write_sonnet_technology argument: filename (without extension)
# NOTE: this might produce bad output when the stack has not been
# simplified before!
stack.write_sonnet_technology('example_ME5ME6')
| 38.853659 | 78 | 0.755179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,103 | 0.692404 |
f3c32a1c29b23a9a39eedc92636dc8c119583df9 | 450 | py | Python | other/dingding/dingtalk/api/rest/OapiProjectPointAddRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiProjectPointAddRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiProjectPointAddRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2020.12.24
'''
from dingtalk.api.base import RestApi
class OapiProjectPointAddRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.action_time = None
self.rule_code = None
self.rule_name = None
self.score = None
self.tenant_id = None
self.userid = None
self.uuid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.project.point.add'
| 21.428571 | 42 | 0.733333 | 369 | 0.82 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.177778 |
f3c435d0d39f9e66d72d19cb75fb37c344115fe5 | 364 | py | Python | Q6.2_brain_teaser.py | latika18/learning | a57c9aacc0157bf7c318f46c1e7c4971d1d55aea | [
"Unlicense"
] | null | null | null | Q6.2_brain_teaser.py | latika18/learning | a57c9aacc0157bf7c318f46c1e7c4971d1d55aea | [
"Unlicense"
] | null | null | null | Q6.2_brain_teaser.py | latika18/learning | a57c9aacc0157bf7c318f46c1e7c4971d1d55aea | [
"Unlicense"
] | null | null | null | There is an 8x8 chess board in which two diagonally opposite corners have been cut off.
You are given 31 dominos, and a single domino can cover exactly two squares.
Can you use the 31 dominos to cover the entire board? Prove your answer (by providing an example, or showing why it’s impossible).
_
________________________________________________________________
| 60.666667 | 130 | 0.821429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f3c669bdb2eccb7f12b62cdf06d7c30a09c174eb | 1,128 | py | Python | make-web.py | blockulator/mpm-wasm | b88b12c27d0501ec2b1e793eee4dcbd6ee20d7c0 | [
"CC0-1.0"
] | 10 | 2019-04-24T15:19:22.000Z | 2021-01-06T22:09:18.000Z | make-web.py | binaryfoundry/mpm-wasm | b88b12c27d0501ec2b1e793eee4dcbd6ee20d7c0 | [
"CC0-1.0"
] | null | null | null | make-web.py | binaryfoundry/mpm-wasm | b88b12c27d0501ec2b1e793eee4dcbd6ee20d7c0 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
import os
import stat
from sys import platform
from shutil import rmtree
from subprocess import check_call
def get_platform_type():
if platform == "linux" or platform == "linux2" or platform == "darwin":
return "unix"
elif platform == "win32":
return "windows"
else:
raise ValueError("Unknown platform.")
def resolve_path(rel_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), rel_path))
def makedirs_silent(root):
try:
os.makedirs(root)
except:
pass
if __name__ == "__main__":
platform_type = get_platform_type()
if platform_type == "unix":
build_dir = resolve_path("bin/web")
elif platform_type == "windows":
build_dir = resolve_path(".\\bin\\web")
makedirs_silent(build_dir)
os.chdir(build_dir)
if platform_type == "unix":
os.system("emcmake cmake ../.. -DEMSCRIPTEN=ON -G \"Unix Makefiles\"")
os.system("make")
elif platform_type == "windows":
os.system("emcmake cmake ../.. -DEMSCRIPTEN=ON -G \"NMake Makefiles\"")
os.system("nmake")
| 26.232558 | 79 | 0.64273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.24734 |
f3c6b52efc5243517d9844c7baac03ac3dc8305c | 645 | py | Python | django_site/parser_vacancies/migrations/0005_vacancies_count.py | StGrail/v.it | 765cb720a16b0fff66e013b8b66a80d99168c16b | [
"MIT"
] | null | null | null | django_site/parser_vacancies/migrations/0005_vacancies_count.py | StGrail/v.it | 765cb720a16b0fff66e013b8b66a80d99168c16b | [
"MIT"
] | 1 | 2020-12-23T19:08:20.000Z | 2020-12-23T19:08:20.000Z | django_site/parser_vacancies/migrations/0005_vacancies_count.py | StGrail/v.it | 765cb720a16b0fff66e013b8b66a80d99168c16b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-02-28 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parser_vacancies', '0004_auto_20210207_0052'),
]
operations = [
migrations.CreateModel(
name='Vacancies_count',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('data', models.DateField(null=True, unique=True)),
('added_today', models.IntegerField(null=True)),
('total_vacancies_count', models.IntegerField(null=True)),
],
),
]
| 28.043478 | 79 | 0.586047 | 552 | 0.855814 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.237209 |
f3c7491e9e71d4e9502531e23a18eb825a935574 | 1,906 | py | Python | online.py | abraker95/auto-loved-bot | 07e050a8804cea7614af917476c193797a453cb1 | [
"MIT"
] | null | null | null | online.py | abraker95/auto-loved-bot | 07e050a8804cea7614af917476c193797a453cb1 | [
"MIT"
] | null | null | null | online.py | abraker95/auto-loved-bot | 07e050a8804cea7614af917476c193797a453cb1 | [
"MIT"
] | null | null | null | import time
import requests
import json
class Online():
session = requests.session()
REQUEST_OK = 0 # Data can be handled
REQUEST_RETRY = 1 # Try getting data again
REQUEST_BAD = 2 # No point in trying, skip and go to next one
@staticmethod
def fetch_web_data(url):
response = Online.session.get(url, timeout=60*5)
# Common response if match is yet to exist
if 'Page Missing' in response.text:
return Online.REQUEST_RETRY, {}
# What to do with the data
status = Online.validate_response(response)
try: data = json.loads(response.text)
except: return status, {}
return status, data
@staticmethod
def validate_response(response):
if response.status_code == 200: return Online.REQUEST_OK # Ok
if response.status_code == 400: return Online.REQUEST_BAD # Unable to process request
if response.status_code == 401: return Online.REQUEST_BAD # Need to log in
if response.status_code == 403: return Online.REQUEST_BAD # Forbidden
if response.status_code == 404: return Online.REQUEST_BAD # Resource not found
if response.status_code == 405: return Online.REQUEST_BAD # Method not allowed
if response.status_code == 407: return Online.REQUEST_BAD # Proxy authentication required
if response.status_code == 408: return Online.REQUEST_RETRY # Request timeout
if response.status_code == 429: return Online.REQUEST_RETRY # Too many requests
if response.status_code == 500: return Online.REQUEST_RETRY # Internal server error
if response.status_code == 502: return Online.REQUEST_RETRY # Bad Gateway
if response.status_code == 503: return Online.REQUEST_RETRY # Service unavailable
if response.status_code == 504: return Online.REQUEST_RETRY # Gateway timeout
| 39.708333 | 100 | 0.681532 | 1,860 | 0.975866 | 0 | 0 | 1,635 | 0.857817 | 0 | 0 | 411 | 0.215635 |
f3c7ea76bf70fcfdcb695ac78bfffa2982783fbb | 5,674 | py | Python | env/lib/python3.6/site-packages/txaio/__init__.py | CanOzcan93/TriviaServer | 64bdffb91198a123860047ba46e3577078bdf2b8 | [
"MIT"
] | null | null | null | env/lib/python3.6/site-packages/txaio/__init__.py | CanOzcan93/TriviaServer | 64bdffb91198a123860047ba46e3577078bdf2b8 | [
"MIT"
] | 4 | 2020-02-12T03:17:43.000Z | 2022-02-11T03:43:30.000Z | env/lib/python3.6/site-packages/txaio/__init__.py | CanOzcan93/TriviaServer | 64bdffb91198a123860047ba46e3577078bdf2b8 | [
"MIT"
] | null | null | null | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
from txaio._version import __version__
from txaio.interfaces import IFailedFuture, ILogger
version = __version__
# This is the API
# see tx.py for Twisted implementation
# see aio.py for asyncio/trollius implementation
class _Config:
"""
This holds all valid configuration options, accessed as
class-level variables. For example, if you were using asyncio:
.. sourcecode:: python
txaio.config.loop = asyncio.get_event_loop()
``loop`` is populated automatically (while importing one of the
framework-specific libraries) but can be changed before any call
into this library. Currently, it's only used by :meth:`call_later`
If using asyncio, you must set this to an event-loop (by default,
we use asyncio.get_event_loop). If using Twisted, set this to a
reactor instance (by default we "from twisted.internet import
reactor" on the first call to call_later)
"""
#: the event-loop object to use
loop = None
__all__ = (
'using_twisted', # True if we're using Twisted
'using_asyncio', # True if we're using asyncio
'use_twisted', # sets the library to use Twisted, or exception
'use_asyncio', # sets the library to use asyncio, or exception
'config', # the config instance, access via attributes
'create_future', # create a Future (can be already resolved/errored)
'create_future_success',
'create_future_error',
'create_failure', # return an object implementing IFailedFuture
'as_future', # call a method, and always return a Future
'is_future', # True for Deferreds in tx and Futures, @coroutines in asyncio
'reject', # errback a Future
'resolve', # callback a Future
'add_callbacks', # add callback and/or errback
'gather', # return a Future waiting for several other Futures
'is_called', # True if the Future has a result
'call_later', # call the callback after the given delay seconds
'failure_message', # a printable error-message from a IFailedFuture
'failure_traceback', # returns a traceback instance from an IFailedFuture
'failure_format_traceback', # a string, the formatted traceback
'make_batched_timer', # create BatchedTimer/IBatchedTimer instances
'make_logger', # creates an object implementing ILogger
'start_logging', # initializes logging (may grab stdin at this point)
'set_global_log_level', # Set the global log level
'get_global_log_level', # Get the global log level
'add_log_categories',
'IFailedFuture', # describes API for arg to errback()s
'ILogger', # API for logging
'sleep', # little helper for inline sleeping
)
_explicit_framework = None
def use_twisted():
global _explicit_framework
if _explicit_framework is not None and _explicit_framework != 'twisted':
raise RuntimeError("Explicitly using '{}' already".format(_explicit_framework))
_explicit_framework = 'twisted'
from txaio import tx
_use_framework(tx)
import txaio
txaio.using_twisted = True
txaio.using_asyncio = False
def use_asyncio():
global _explicit_framework
if _explicit_framework is not None and _explicit_framework != 'asyncio':
raise RuntimeError("Explicitly using '{}' already".format(_explicit_framework))
_explicit_framework = 'asyncio'
from txaio import aio
_use_framework(aio)
import txaio
txaio.using_twisted = False
txaio.using_asyncio = True
def _use_framework(module):
"""
Internal helper, to set this modules methods to a specified
framework helper-methods.
"""
import txaio
for method_name in __all__:
if method_name in ['use_twisted', 'use_asyncio']:
continue
setattr(txaio, method_name,
getattr(module, method_name))
# use the "un-framework", which is neither asyncio nor twisted and
# just throws an exception -- this forces you to call .use_twisted()
# or .use_asyncio() to use the library.
from txaio import _unframework # noqa
_use_framework(_unframework)
| 38.863014 | 94 | 0.669545 | 750 | 0.132182 | 0 | 0 | 0 | 0 | 0 | 0 | 3,927 | 0.692104 |
f3c92fc16f8069456d0e03c98aecac588427d163 | 631 | py | Python | ml/FirstTry.py | mzs9540/covid19 | efe8b6e243f576f728a91fc5cde00b1ac0990ac1 | [
"MIT"
] | 1 | 2020-04-27T15:20:15.000Z | 2020-04-27T15:20:15.000Z | ml/FirstTry.py | mzs9540/covid19 | efe8b6e243f576f728a91fc5cde00b1ac0990ac1 | [
"MIT"
] | null | null | null | ml/FirstTry.py | mzs9540/covid19 | efe8b6e243f576f728a91fc5cde00b1ac0990ac1 | [
"MIT"
] | 1 | 2020-05-30T13:55:22.000Z | 2020-05-30T13:55:22.000Z | import numpy as np
import pandas as pd
import matplotlib as plt
import seaborn as sns
df = pd.read_csv('from_april.csv', parse_dates=['Date'])
df = df[(df.T !=0).any()]
india = df[df['Country/Region'] == 'India']
india = india.drop(['Country/Region', 'Province/State', 'Lat', 'Long'], axis=1)
india_confirmed = india.drop(['Recovered', 'Deaths'], axis=1)
india_deaths = india.drop(['Recovered', 'Confirmed'], axis=1)
india_recovered = india.drop(['Confirmed', 'Deaths'], axis=1)
india_confirmed.to_csv('india_confirmed.csv', index=False)
india_deaths.to_csv('india_deaths.csv')
india_recovered.to_csv('india_recovered.csv')
| 30.047619 | 79 | 0.717908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.329635 |
f3c9330ad60935c8bba02ff2210990863eb4975a | 8,714 | py | Python | packetracer/ppcap.py | hatamiarash7/PacketTracer | a36aea8c5fb52c1950740dd086f44535989a6e40 | [
"MIT"
] | 3 | 2020-03-24T06:39:16.000Z | 2021-04-20T19:25:49.000Z | packetracer/ppcap.py | hatamiarash7/PacketTracer | a36aea8c5fb52c1950740dd086f44535989a6e40 | [
"MIT"
] | 1 | 2019-12-29T12:55:55.000Z | 2019-12-29T12:55:55.000Z | packetracer/ppcap.py | hatamiarash7/PacketTracer | a36aea8c5fb52c1950740dd086f44535989a6e40 | [
"MIT"
] | 7 | 2019-12-29T12:20:13.000Z | 2021-08-02T05:04:48.000Z | """
Packet read and write routines for pcap format.
See http://wiki.wireshark.org/Development/LibpcapFileFormat
"""
import logging
import types
from packetracer import packetracer
from packetracer.layer12 import ethernet, linuxcc, radiotap, btle, can
from packetracer.structcbs import pack_I, unpack_I_le
from packetracer.structcbs import pack_IIII, unpack_IIII, unpack_IIII_le
logger = logging.getLogger("packetracer")
# PCAP/TCPDump related
# PCAP file header
# File magic numbers
# pcap using microseconds resolution
TCPDUMP_MAGIC = 0xA1B2C3D4
TCPDUMP_MAGIC_SWAPPED = 0xD4C3B2A1
# pcap using nanoseconds resolution
TCPDUMP_MAGIC_NANO = 0xA1B23C4D
TCPDUMP_MAGIC_NANO_SWAPPED = 0x4D3CB2A1
PCAP_VERSION_MAJOR = 2
PCAP_VERSION_MINOR = 4
DLT_NULL = 0
DLT_EN10MB = 1
DLT_EN3MB = 2
DLT_AX25 = 3
DLT_PRONET = 4
DLT_CHAOS = 5
DLT_IEEE802 = 6
DLT_ARCNET = 7
DLT_SLIP = 8
DLT_PPP = 9
DLT_FDDI = 10
DLT_PFSYNC = 18
DLT_IEEE802_11 = 105
DLT_LINUX_SLL = 113
DLT_PFLOG = 117
DLT_IEEE802_11_RADIO = 127
DLT_CAN_SOCKETCAN = 227
DLT_LINKTYPE_BLUETOOTH_LE_LL = 251
LINKTYPE_BLUETOOTH_LE_LL_WITH_PHDR = 256
PCAPTYPE_CLASS = {
DLT_LINUX_SLL: linuxcc.LinuxCC,
DLT_EN10MB: ethernet.Ethernet,
DLT_CAN_SOCKETCAN: can.CAN,
DLT_IEEE802_11_RADIO: radiotap.Radiotap,
LINKTYPE_BLUETOOTH_LE_LL_WITH_PHDR: btle.BTLEHdr
}
class PcapFileHdr(packetracer.Packet):
"""pcap file header."""
# header length = 24
__hdr__ = (
("magic", "I", TCPDUMP_MAGIC),
("v_major", "H", PCAP_VERSION_MAJOR),
("v_minor", "H", PCAP_VERSION_MINOR),
("thiszone", "I", 0),
("sigfigs", "I", 0),
("snaplen", "I", 1500),
("linktype", "I", 1),
)
class PcapPktHdr(packetracer.Packet):
"""pcap packet header."""
# header length: 16
__hdr__ = (
("tv_sec", "I", 0),
# this can be either microseconds or nanoseconds: check magic number
("tv_usec", "I", 0),
("caplen", "I", 0),
("len", "I", 0),
)
# PCAP callbacks
def pcap_cb_init_write(self, snaplen=1500, linktype=DLT_EN10MB, **initdata):
self._timestamp = 0
header = PcapFileHdr(magic=TCPDUMP_MAGIC_NANO, snaplen=snaplen, linktype=linktype)
self._fh.write(header.bin())
def pcap_cb_write(self, bts, **metadata):
ts = metadata.get("ts", self._timestamp + 1000000)
self._timestamp = ts
sec = int(ts / 1000000000)
nsec = ts - (sec * 1000000000)
# logger.debug("paket time sec/nsec: %d/%d", sec, nsec)
n = len(bts)
self._fh.write(pack_IIII(sec, nsec, n, n))
self._fh.write(bts)
def pcap_cb_init_read(self, **initdata):
buf = self._fh.read(24)
# file header is skipped per default (needed for __next__)
self._fh.seek(24)
# this is not needed anymore later on but we set it anyway
fhdr = PcapFileHdr(buf)
self._closed = False
if fhdr.magic not in [TCPDUMP_MAGIC, TCPDUMP_MAGIC_NANO, TCPDUMP_MAGIC_SWAPPED, TCPDUMP_MAGIC_NANO_SWAPPED]:
return False
is_le = False
# handle file types
if fhdr.magic == TCPDUMP_MAGIC:
self._resolution_factor = 1000
# Note: we could use PcapPktHdr/PcapLEPktHdr to parse pre-packetdata but calling unpack directly
# greatly improves performance
self._callback_unpack_meta = unpack_IIII
elif fhdr.magic == TCPDUMP_MAGIC_NANO:
self._resolution_factor = 1
self._callback_unpack_meta = unpack_IIII
elif fhdr.magic == TCPDUMP_MAGIC_SWAPPED:
is_le = True
self._resolution_factor = 1000
self._callback_unpack_meta = unpack_IIII_le
elif fhdr.magic == TCPDUMP_MAGIC_NANO_SWAPPED:
is_le = True
self._resolution_factor = 1
self._callback_unpack_meta = unpack_IIII_le
else:
raise ValueError("invalid tcpdump header, magic value: %s" % fhdr.magic)
linktype = fhdr.linktype if not is_le else unpack_I_le(pack_I(fhdr.linktype))[0]
self._lowest_layer_new = PCAPTYPE_CLASS.get(linktype, None)
def is_resolution_nano(obj):
"""return -- True if resolution is in Nanoseconds, False if milliseconds."""
return obj._resolution_factor == 1000
self.is_resolution_nano = types.MethodType(is_resolution_nano, self)
return True
def pcap_cb_read(self):
buf = self._fh.read(16)
if not buf:
raise StopIteration
d = self._callback_unpack_meta(buf)
buf = self._fh.read(d[2])
return d[0] * 1000000000 + (d[1] * self._resolution_factor), buf
def pcap_cb_btstopkt(self, meta, bts):
return self._lowest_layer_new(bts)
# PCAPNG related_btstopkt
# Generic/filetype invariant related
FILETYPE_PCAP = 0
FILETYPE_PCAPNG = 1
# type_id : [
# cb_init_write(obj, **initdata),
# cb_write(self, bytes, **metadata),
# cb_init_read(obj, **initdata),
# cb_read(self): metadata, bytes
# cb_btstopkt(self, metadata, bytes): pkt
# ]
FILEHANDLER = {
FILETYPE_PCAP: [
pcap_cb_init_write, pcap_cb_write, pcap_cb_init_read, pcap_cb_read, pcap_cb_btstopkt
]
# FILETYPE_PCAPNG : [
# None, None, None
# ]
}
class FileHandler(object):
def __init__(self, filename, accessmode):
self._fh = open(filename, accessmode)
self._closed = False
def __enter__(self):
return self
def __exit__(self, objtype, value, traceback):
self.close()
def flush(self):
self._fh.flush()
def close(self):
self._closed = True
self._fh.close()
class PcapHandler(FileHandler):
MODE_READ = 1
MODE_WRITE = 2
def __init__(self, filename, mode, filetype=FILETYPE_PCAP, **initdata):
try:
callbacks = FILEHANDLER[filetype]
except IndexError:
raise Exception("unknown filehandler type for mode %d: %d" % (mode, filetype))
if mode == PcapHandler.MODE_WRITE:
super().__init__(filename, "wb")
callbacks[0](self, **initdata)
self.write = types.MethodType(callbacks[1], self)
elif mode == PcapHandler.MODE_READ:
super().__init__(filename, "rb")
ismatch = False
for pcaptype, callbacks in FILEHANDLER.items():
self._fh.seek(0)
# init callback
ismatch = callbacks[2](self, **initdata)
if ismatch:
# logger.debug("found handler for file: %x", pcaptype)
# read callback
self.__next__ = types.MethodType(callbacks[3], self)
# bytes-to-packet callback
self._btstopkt = types.MethodType(callbacks[4], self)
break
if not ismatch:
raise Exception("no matching handler found")
else:
raise Exception("wrong mode: %d" % mode)
def read_packet(self, pktfilter=lambda pkt: True):
"""
pktfilter -- filter as lambda function to match packets to be retrieved,
return True to accept a specific packet.
return -- (metadata, packet) if packet can be created from bytes
else (metadata, bytes). For pcap/tcpdump metadata is a nanoseconds timestamp
"""
while True:
# until StopIteration
meta, bts = self.__next__()
try:
pkt = self._btstopkt(meta, bts)
except Exception as ex:
logger.warning("could not create packets from bytes: %r", ex)
return meta, bts
if pktfilter(pkt):
return meta, pkt
def read_packet_iter(self, pktfilter=lambda pkt: True):
"""
pktfilter -- filter as lambda function to match packets to be retrieved,
return True to accept a specific packet.
return -- iterator yielding (metadata, packet)
"""
if self._closed:
return
while True:
try:
yield self.read_packet(pktfilter=pktfilter)
except:
return
def __iter__(self):
"""
return -- (metadata, bytes)
"""
if self._closed:
return
while True:
try:
yield self.__next__()
except StopIteration:
break
class Writer(PcapHandler):
"""
Simple pcap writer supporting pcap format.
"""
def __init__(self, filename, filetype=FILETYPE_PCAP, **initdata):
super().__init__(filename, PcapHandler.MODE_WRITE, **initdata)
class Reader(PcapHandler):
"""
Simple pcap file reader supporting pcap format.
"""
def __init__(self, filename, filetype=FILETYPE_PCAP, **initdata):
super().__init__(filename, PcapHandler.MODE_READ, **initdata)
| 28.477124 | 112 | 0.638742 | 4,319 | 0.495639 | 708 | 0.081249 | 0 | 0 | 0 | 0 | 2,238 | 0.256828 |
45ea50c656f2df404d63c9bcf2e207daad1a55b1 | 9,945 | py | Python | ExomeDepth/ed_csv_to_vcf.py | UMCUGenetics/Dx_resources | a85743a02cfff9307ab6773be5c08aa76be24979 | [
"MIT"
] | 1 | 2021-11-10T12:53:12.000Z | 2021-11-10T12:53:12.000Z | ExomeDepth/ed_csv_to_vcf.py | UMCUGenetics/Dx_resources | a85743a02cfff9307ab6773be5c08aa76be24979 | [
"MIT"
] | 21 | 2020-11-19T12:21:01.000Z | 2021-04-22T15:32:27.000Z | ExomeDepth/ed_csv_to_vcf.py | UMCUGenetics/Dx_resources | a85743a02cfff9307ab6773be5c08aa76be24979 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import sys
import os
import re
import subprocess
import argparse
import collections
import copy
import decimal
import vcf
import pysam
import pandas as pd
import settings
def cnv_locationtype(region, par1, par2):
chrom = str(region[0]).upper()
start = int(region[1])
stop = int(region[2])
if chrom == "X":
if start >= par1[0] and stop <= par1[1] or start >= par2[0] and stop <= par2[1]: #If CNV is nested in par1 or par2 region
return "chrXpar"
else:
return "chrX"
elif chrom == "Y":
return "chrY"
else:
return "auto"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('inputcsv', help='Full path to input CSV file')
parser.add_argument('refset', help='Used reference set ID')
parser.add_argument('model', help='Used model ID')
parser.add_argument('gender', choices=['male', 'female'], help='Used gender')
parser.add_argument('sampleid', help='sampleid name to be included in VCF')
parser.add_argument('template', help='Full path to template VCF')
parser.add_argument('runid', help='runid to be added to VCF metadata')
parser.add_argument('--vcf_filename_suffix', help='suffix to be included in VCF filename. Do not include spaces or underscores in suffix')
args = parser.parse_args()
vcf_reader = vcf.Reader(open(args.template, 'r'))
format_keys = vcf_reader.formats.keys()
record = vcf_reader.__next__() # First record (dummy for all other records)
new_record = copy.deepcopy(record)
new_record.samples[0].data = collections.namedtuple('CallData', format_keys) # For single sample VCF only!
format_vals = [record.samples[0].data[vx] for vx in range(len(format_keys))]
format_dict = dict(zip(format_keys, format_vals))
for f in ['GT', 'CCN', 'BF', 'RT', 'CR', 'RS', 'IH', 'CM', 'PD', 'TC']:
format_dict[f] = ""
new_vals = [format_dict[x] for x in format_keys]
new_record.samples[0].data = new_record.samples[0].data._make(new_vals)
df_csv = pd.read_csv(args.inputcsv)
vcf_reader.samples = [args.sampleid] # Change template sampleid in sampleid
"""Add reference and ED reference set metadata."""
vcf_reader.metadata['exomedepth_reference'] = [args.refset]
vcf_reader.metadata['calling_model'] = [args.model]
vcf_reader.metadata['gender_refset'] = [args.gender]
vcf_reader.metadata['reference'] = "file:{}".format(settings.reference_genome)
dx_track_git = subprocess.getoutput("git --git-dir={repo}/.git log --pretty=oneline --decorate -n 1".format(repo=settings.reffile_repo))
vcf_reader.metadata['track_repository'] = ["{0}:{1}".format(settings.reffile_repo, dx_track_git)]
vcf_reader.metadata['runid'] = [args.runid]
"""Open reference genome fasta file"""
reference_fasta = pysam.Fastafile(settings.reference_genome)
if args.vcf_filename_suffix:
vcf_output_filename = "{input}_{vcf_filename_suffix}.vcf".format(input=args.inputcsv[0:-4], vcf_filename_suffix=args.vcf_filename_suffix)
else:
vcf_output_filename = "{input}.vcf".format(input=args.inputcsv[0:-4])
with open(vcf_output_filename, 'w') as vcf_output_file:
vcf_writer = vcf.Writer(vcf_output_file, vcf_reader)
"""Determine percentage DEL/(DEL+DUP) for all calls in VCF."""
dels = 0
dups = 0
for index, row in df_csv.iterrows():
if row['type'] == "deletion":
dels += 1
elif row['type'] == "duplication":
dups += 1
perc_del = "%.2f" % ((float(dels) / (float(dels) + float(dups))) * 100)
total_calls = dels + dups
for index, row in df_csv.iterrows(): # index not used as we only use single sample VCF
"""Change record fields."""
new_record.CHROM = row['chromosome']
new_record.POS = row['start']
new_record.ID = "."
row_type = str(row['type'])
"""Include reference genome base"""
reference_base = reference_fasta.fetch(str(row['chromosome']), int(row['start']-1), int(row['start'])) # 0-based coordinates
new_record.REF = reference_base
"""Write type of call."""
if row_type == "duplication":
new_record.ALT = ["<DUP>"]
new_record.INFO['SVTYPE'] = "DUP"
elif row_type == "deletion":
new_record.ALT = ["<DEL>"]
new_record.INFO['SVTYPE'] = "DEL"
else:
new_record.ALT = ["NaN"]
new_record.INFO['SVTYPE'] = "NaN"
"""Add QUAL and Filter fields """
new_record.QUAL = "1000" # as STRING
new_record.FILTER = "PASS"
"""Determine genotype."""
ratio = row['reads.ratio']
if str(ratio) == "inf": #Rename infinitity values to 99
ratio = 99
"""Consider homozygous genotype only for deletion and with ratio <0.25."""
if row_type.lower() == "deletion" and float(ratio) < float(settings.ratio_threshold_del):
genotype = "1/1"
else: # Always het for duplication, and het for deletion if not < settings.ratio_threshold_del
genotype = "0/1"
"""Determine copy number. Note this will not work for mosaik events"""
par1 = settings.par1
par2 = settings.par2
normal_CN = settings.normal_CN
region = [str(row['chromosome']), int(row['start']), int(row['end'])]
locus_type = cnv_locationtype(region, par1, par2)
normal_copy = float(normal_CN[args.gender][locus_type])
calc_copynumber = normal_copy * float(ratio)
# Estimate true copynumber by rounding to nearest integer
copynumber = int(decimal.Decimal(calc_copynumber).quantize(decimal.Decimal('0'), rounding=decimal.ROUND_HALF_UP))
if args.gender == "female" and locus_type == "chrY":
"""In case CNV is detected on chrY in female, correct for this"""
print("WARNING: {sample} chromosome Y CNV detected (region = {region}) in female, calc_copynumber set to 0 (deletion call) or 1 (duplication call)".format(
sample = str(args.sampleid),
region = str("_".join(str(x) for x in region))
))
# CNV CN is set to 1, could also be >1 (but makes no biological sense)
if ratio > 1:
calc_copynumber = 1
genotype = "1/1"
else: # Assuming CNV is called by noise/mismapping/ on chrY, set CN to 0. Could result in masking contamination of male sample?
calc_copynumber = 0
genotype = "1/1"
else:
if row_type == "deletion" and calc_copynumber > normal_copy or row_type == "duplication" and calc_copynumber < normal_copy:
""" If calc_copynumber is opposite of expected CN for region, i.e. ratio 1.5 for a deletion"""
print("WARNING: {sample} CNV copynumber estimation {copynumber} does not match CNV type {rowtype} for region {region}".format(
sample = str(args.sampleid),
copynumber = str(float(calc_copynumber)),
rowtype = row_type,
region = str("_".join(str(x) for x in region))
))
"""Note: no correction here. should be bugfix in the ExomeDepth code"""
if copynumber == int(normal_copy):
""" Estimated copynumber is similar to copyneutral """
print("WARNING: {sample} true copynumber for region {region} is same as normal CN > set to -1 for deletion, +1 for duplication".format(
sample = str(args.sampleid),
region = str("_".join(str(x) for x in region))
))
if row_type == "deletion": # If deletion correct copynumber with -1
copynumber -= 1
if copynumber == 0:
genotype = "1/1"
elif row_type == "duplication": #If duplication correct copynumber with +1
copynumber += 1
"""Change INFO fields"""
new_record.INFO['END'] = row['end']
new_record.INFO['NTARGETS'] = row['nexons']
new_record.INFO['SVLEN'] = int(row['end']) - int(row['start']) # Input is assumed 0-based
new_record.INFO['CN'] = copynumber
call_conrad = row['Conrad.hg19']
if str(call_conrad) == "nan":
call_conrad = "NaN"
new_record.INFO['cCNV'] = call_conrad
"""Change FORMAT fields"""
for f in ['GT', 'CCN', 'BF', 'RT', 'CR', 'RS', 'IH', 'CM', 'PD', 'TC']:
format_dict[f] = ""
format_dict['GT'] = str(genotype)
format_dict['CCN'] = "%.2f" % (float(calc_copynumber))
format_dict['BF'] = "%.2f" % (float(row['BF']))
format_dict['RT'] = "%.2f" % (float(ratio))
format_dict['CR'] = "%.4f" % (float(row['correlation']))
format_dict['RS'] = row['refsize']
format_dict['IH'] = "NaN" # Inheritence is not build in yet
format_dict['CM'] = args.model
format_dict['PD'] = perc_del
format_dict['TC'] = total_calls
new_vals = [format_dict[x] for x in format_keys]
new_record.samples[0].data = new_record.samples[0].data._make(new_vals) # NOTE: GT must be first in order of metadata!
vcf_writer.write_record(new_record)
vcf_writer.flush()
reference_fasta.close()
| 49.232673 | 171 | 0.578783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,195 | 0.321267 |
45ebe3749e2046bc703af338f5dbb735d4819c59 | 11,617 | py | Python | wallet.py | ock666/python-blockchain | 3729ba0b3de03bc0011b1cb5cb535573ca24b79c | [
"MIT"
] | null | null | null | wallet.py | ock666/python-blockchain | 3729ba0b3de03bc0011b1cb5cb535573ca24b79c | [
"MIT"
] | null | null | null | wallet.py | ock666/python-blockchain | 3729ba0b3de03bc0011b1cb5cb535573ca24b79c | [
"MIT"
] | null | null | null | import binascii
import json
import os
import requests
from time import time
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Hash import RIPEMD160
from Crypto.Signature import pkcs1_15
import Validation
import PySimpleGUI as sg
import hashlib
class Wallet:
unix_time = time()
def __init__(self):
if not os.path.isfile('data/wallet.json'):
self.generate_wallet()
self.nodes = []
# GUI INIT
layout = [[sg.Text('Please enter the address/ip and port of a known node')],
[sg.InputText()],
[sg.Submit(), sg.Cancel()]]
window = sg.Window('Wallet waiting to connect...', layout)
event, values = window.read()
window.close()
# variable inits
self.node = values[0]
self.nodes.append(self.node)
sg.popup("Connecting to ", values[0])
# get the chain from the blockchain node
self.chain = self.get_chain()
#load our wallet file
wallet_file = json.load(open('data/wallet.json', 'r'))
self.private_key = RSA.import_key(wallet_file['private key'])
self.public_key = RSA.import_key(wallet_file['public key'])
self.public_key_hex = wallet_file['public key hex']
self.public_key_hash = wallet_file['public key hash']
# if wallet doesnt exist we'll generate one
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.isfile('data/wallet.json'):
self.generate_wallet()
# wallet file functions
def generate_wallet(self):
private_key = RSA.generate(2048)
private_key_plain = private_key.export_key("PEM")
public_key_plain = private_key.publickey().export_key("PEM")
public_key = private_key.publickey().export_key("DER")
public_key_hex = binascii.hexlify(public_key).decode("utf-8")
public_key_hash = self.calculate_hash(self.calculate_hash(public_key_hex, hash_function="sha256"),
hash_function="ripemd160")
wallet_data = {
'private key': private_key_plain.decode(),
'public key': public_key_plain.decode(),
'public key hex': public_key_hex,
'public key hash': public_key_hash
}
self.write_json(wallet_data, 'w')
def write_json(self, data, mode, filename='data/wallet.json'):
# opens the file in write mode
with open(filename, mode) as file:
block_dict = json.dumps(data, indent=6)
file.write(block_dict)
# hash functions
@staticmethod
def hash(block):
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def calculate_hash(self, data, hash_function):
data = bytearray(data, "utf-8")
if hash_function == "sha256":
h = SHA256.new()
h.update(data)
return h.hexdigest()
if hash_function == "ripemd160":
h = RIPEMD160.new()
h.update(data)
return h.hexdigest()
# functions for transactions
def new_transaction(self, recipient, amount, unix_time):
sender = self.public_key_hash
previous_block_hash = self.get_last_block_hash()
trans_data = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'time_submitted': unix_time,
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex
}
total_bytes = self.calculate_bytes(trans_data)
fee = self.calculate_fee(total_bytes)
total_amount = amount + fee
transaction = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'fee': fee,
'time_submitted': unix_time,
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex
}
hashed_trans = self.hash(transaction)
trans_with_hash = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'fee': fee,
'time_submitted': trans_data['time_submitted'],
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex,
'transaction_hash': hashed_trans
}
signed_trans = self.sign(trans_with_hash)
full_transaction = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'fee': fee,
'time_submitted': trans_data['time_submitted'],
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex,
'transaction_hash': hashed_trans,
'signature': signed_trans
}
confirmation_window_layout = [
[sg.Text("Are you sure you want to send this Transaction?")],
[[sg.Text("Recipient", justification='left'), sg.Text(recipient, justification='right')]],
[[sg.Text("Amount to send: ", justification='left')], [sg.Text(amount, justification='right')]],
[[sg.Text("Transaction Fee: ", justification='left')], sg.Text(fee, justification='right')],
[[sg.Text("Total Amount: ", justification='left')], sg.Text(total_amount, justification='right')],
[[sg.Button('Confirm')], [sg.Button('Exit')]]
]
window = sg.Window('Python-blockchain Wallet', confirmation_window_layout)
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Exit'):
window.close()
return "cancelled"
if event in 'Confirm':
if self.broadcast_transaction(full_transaction):
self.chain = self.get_chain()
window.close()
return "confirmed"
else:
self.chain = self.get_chain()
window.close()
return False
def broadcast_transaction(self, transaction):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
for node in self.nodes:
response = requests.post(f'http://{node}/transactions/new', json=transaction, headers=headers)
if response.status_code == 201:
return True
else:
return False
def sign_transaction_data(self, data):
transaction_bytes = json.dumps(data, sort_keys=True).encode('utf-8')
hash_object = SHA256.new(transaction_bytes)
signature = pkcs1_15.new(self.private_key).sign(hash_object)
return signature
def calculate_bytes(self, transaction):
tx_string = json.dumps(transaction)
tx_bytes = tx_string.encode('ascii')
return len(tx_bytes)
def calculate_fee(self, tx_bytes_length):
per_kb_fee = 0.25
sig_hash_bytes = 800
total = tx_bytes_length + sig_hash_bytes
return (total / 1000) * per_kb_fee
def sign(self, data):
signature_hex = binascii.hexlify(self.sign_transaction_data(data)).decode("utf-8")
return signature_hex
# functions for getting blockchain data
def get_balance(self):
chain_balance = Validation.enumerate_funds(self.public_key_hash, self.chain)
if chain_balance > 0:
return chain_balance
if chain_balance == False:
return 0
def get_block_height(self):
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
return chain[length - 1]['index']
def get_last_block_hash(self):
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
return chain[length - 1]['current_hash']
def get_chain(self):
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
chain = response.json()['chain']
return chain
wallet = Wallet()
layout = [
[sg.Text('Welcome to the Python-blockchain wallet')],
[sg.Text('Your blockchain address'), sg.Text(wallet.public_key_hash)],
[sg.Text("Available Funds: "), sg.Text(wallet.get_balance(), key='-BALANCE-')],
[sg.Button('Update Blockchain'), sg.Button('Transaction History')],
[sg.Text("Address: "), sg.InputText(key='-ADDRESS-', size=(20, 20)), sg.Text("Amount: "),
sg.InputText(key='-AMOUNT-', size=(8, 20)), sg.Button('Send Transaction')],
[sg.Button('Exit')]
]
window = sg.Window('Python-blockchain Wallet', layout)
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Exit'):
break
if event in 'Update Blockchain':
wallet.chain = wallet.get_chain()
wallet.get_balance()
window['-BALANCE-'].update(wallet.get_balance())
if event in 'Transaction History':
window.close()
# code to find relevant transactions in the blockchain pertaining to our wallets address
chain = wallet.get_chain() # get the chain
sent = [] # list for storing sent transactions
received = [] # list for storing received transactions
for block in chain: # iterate through the blockchain
for transaction in block['transactions']:
# code to find received transactions
if transaction['recipient'] == wallet.public_key_hash:
print("received: ", transaction)
received.append(transaction)
# code to find sent transactions
if transaction['sender'] == wallet.public_key_hash:
print("sent: ", transaction)
sent.append(transaction)
else:
continue
sent_json = json.dumps(sent, indent=2)
received_json = json.dumps(received, indent=2)
transaction_window_layout = [
[sg.Text("Sent Transactions:")],
[sg.Multiline(sent_json, size=(100, 25))],
[sg.Text("Received Transactions:")],
[sg.Multiline(received_json, size=(100, 25))],
[sg.Button('Exit')]
]
transaction_window = sg.Window('Transaction History', transaction_window_layout)
events, values = transaction_window.read()
if event in 'Exit':
transaction_window.close()
if event in 'Send Transaction':
time = wallet.unix_time
recipient = values['-ADDRESS-']
amount = float(values['-AMOUNT-'])
if wallet.new_transaction(recipient, amount, time) == "confirmed":
sg.popup(
'Transaction submitted and accepted by network...\nPlease wait for next block confirmation for transaction to confirm')
continue
if wallet.new_transaction(recipient, amount, time) == "cancelled":
sg.popup("Transaction Cancelled")
else:
sg.popup(
'Transaction denied by network\nyou either have unconfirmed transactions in the mempool or insufficient balance.\nPlease try again')
window.close()
| 34.574405 | 148 | 0.597659 | 8,236 | 0.708961 | 0 | 0 | 250 | 0.02152 | 0 | 0 | 2,638 | 0.227081 |
45ed841d519de0d549307e7c5e5b5c1fb5b0def1 | 2,586 | py | Python | opps/core/tags/views.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 159 | 2015-01-03T16:36:35.000Z | 2022-03-29T20:50:13.000Z | opps/core/tags/views.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 81 | 2015-01-02T21:26:16.000Z | 2021-05-29T12:24:52.000Z | opps/core/tags/views.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 75 | 2015-01-23T13:41:03.000Z | 2021-09-24T03:45:23.000Z | # -*- encoding: utf-8 -*-
from django.utils import timezone
from django.contrib.sites.models import get_current_site
from django.conf import settings
from haystack.query import SearchQuerySet
from opps.views.generic.list import ListView
from opps.containers.models import Container
from opps.channels.models import Channel
from .models import Tag
USE_HAYSTACK = getattr(settings, 'OPPS_TAGS_USE_HAYSTACK', False)
class TagList(ListView):
model = Container
def get_template_list(self, domain_folder="containers"):
templates = []
list_name = 'list_tags'
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}_paginated.html'.format(domain_folder,
list_name))
templates.append('{0}/{1}.html'.format(domain_folder, list_name))
return templates
def get_context_data(self, **kwargs):
context = super(TagList, self).get_context_data(**kwargs)
context['tag'] = self.kwargs['tag']
site = get_current_site(self.request)
context['channel'] = Channel.objects.get_homepage(site)
return context
def get_queryset(self):
self.site = get_current_site(self.request)
# without the long_slug, the queryset will cause an error
self.long_slug = 'tags'
self.tag = self.kwargs['tag']
if USE_HAYSTACK:
return self.get_queryset_from_haystack()
return self.get_queryset_from_db()
def get_queryset_from_haystack(self):
models = Container.get_children_models()
sqs = SearchQuerySet().models(*models).filter(
tags=self.tag).order_by('-date_available')
sqs.model = Container
return sqs
def get_queryset_from_db(self):
tags = Tag.objects.filter(slug=self.tag).values_list('name') or []
tags_names = []
if tags:
tags_names = [i[0] for i in tags]
ids = []
for tag in tags_names:
result = self.containers = self.model.objects.filter(
site_domain=self.site,
tags__contains=tag,
date_available__lte=timezone.now(),
published=True
)
if result.exists():
ids.extend([i.id for i in result])
# remove the repeated
ids = list(set(ids))
# grab the containers
self.containers = self.model.objects.filter(id__in=ids)
return self.containers
| 31.156627 | 75 | 0.626837 | 2,165 | 0.8372 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.103635 |
45edca108e30053b7caf54d078e854708b738e3e | 300 | py | Python | Scripts/TK_xls-to-peaks.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | Scripts/TK_xls-to-peaks.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | Scripts/TK_xls-to-peaks.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
if len(sys.argv) < 2:
print "usage: TK_xls-to-peaks.py <TK_xls>"
sys.exit(0)
for line in open(sys.argv[1]):
l = line[:-1].split()
for (i,x) in enumerate(l[4][:-1].split(",")):
print "\t".join((l[0],l[1],x,l[5][:-1].split(",")[i]))
| 21.428571 | 62 | 0.52 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.223333 |
45ee805f01f9aaeb166bd38905c0d475f3c26174 | 538 | py | Python | ibsng/handler/invoice/get_invoice_by_i_d.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 6 | 2018-03-06T10:16:36.000Z | 2021-12-05T12:43:10.000Z | ibsng/handler/invoice/get_invoice_by_i_d.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-03-06T10:27:08.000Z | 2022-01-02T15:21:27.000Z | ibsng/handler/invoice/get_invoice_by_i_d.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-01-06T16:28:31.000Z | 2018-09-17T19:47:19.000Z | """Get invoice by id API method."""
from ibsng.handler.handler import Handler
class getInvoiceByID(Handler):
"""Get invoice by id method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.invoice_id, int)
def setup(self, invoice_id):
"""Setup required parameters.
:param int invoice_id: ibsng invoice id
:return: None
:rtype: None
"""
self.invoice_id = invoice_id
| 21.52 | 47 | 0.598513 | 457 | 0.849442 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.557621 |
45eea81559471893b1def8629833f3b22ac4855f | 5,848 | py | Python | data/semialigned_dataset.py | jlim13/pytorch-CycleGAN-and-pix2pix | 15d0cb5b81034ade7e8f160c973bf119c118026f | [
"BSD-3-Clause"
] | null | null | null | data/semialigned_dataset.py | jlim13/pytorch-CycleGAN-and-pix2pix | 15d0cb5b81034ade7e8f160c973bf119c118026f | [
"BSD-3-Clause"
] | null | null | null | data/semialigned_dataset.py | jlim13/pytorch-CycleGAN-and-pix2pix | 15d0cb5b81034ade7e8f160c973bf119c118026f | [
"BSD-3-Clause"
] | null | null | null | import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import torchvision
import numpy as np
class SemiAlignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
#unaligned data
self.unaligned_dir_A = os.path.join(opt.dataroot,'unaligned', opt.phase + 'A') # create a path '/path/to/data/trainA'
self.unaligned_dir_B = os.path.join(opt.dataroot, 'unaligned', opt.phase + 'B') # create a path '/path/to/data/trainB'
self.unaligned_A_paths = sorted(make_dataset(self.unaligned_dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.unaligned_B_paths = sorted(make_dataset(self.unaligned_dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.unaligned_A_size = len(self.unaligned_A_paths) # get the size of dataset A
self.unaligned_B_size = len(self.unaligned_B_paths) # get the size of dataset B
#aligned data
self.aligned_dir_A = os.path.join(opt.dataroot,'aligned', opt.phase + 'A') # create a path '/path/to/data/trainA'
self.aligned_dir_B = os.path.join(opt.dataroot, 'aligned', opt.phase + 'B') # create a path '/path/to/data/trainB'
self.aligned_A_paths = sorted(make_dataset(self.aligned_dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.aligned_B_paths = sorted(make_dataset(self.aligned_dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.aligned_A_size = len(self.aligned_A_paths) # get the size of dataset A
self.aligned_B_size = len(self.aligned_B_paths) # get the size of dataset B
#create a dict to easily map pairs
#when we call __getitem__ for aligned, we will sample from aligned_A_paths
self.aligned_glossary = {}
for im in self.aligned_B_paths:
label = im.split('/')[-2]
if not label in self.aligned_glossary:
self.aligned_glossary[label] = [im]
else:
self.aligned_glossary[label].append(im)
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
flip = random.randint(0, 1)
if flip == 0: #unaligned
A_path = self.unaligned_A_paths[index % self.unaligned_A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.unaligned_B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.unaligned_B_size - 1)
B_path = self.unaligned_B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
else: #aligned
A_path = self.aligned_A_paths[index % self.aligned_A_size]
label = A_path.split('/')[-2]
aligned_B_paths = self.aligned_glossary[label]
if self.opt.serial_batches: # make sure index is within then range
print ("here")
index_B = index % len(aligned_B_paths)
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, len(aligned_B_paths) - 1)
B_path = aligned_B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# A_img = torchvision.transforms.functional.crop(A_img, top = 300 , left =0, height = 632-300 , width = 312)
# B_img = torchvision.transforms.functional.crop(B_img, top = 300 , left =0, height = 632-300 , width = 312)
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.unaligned_A_size, self.unaligned_B_size)
| 47.16129 | 142 | 0.642613 | 5,657 | 0.967339 | 0 | 0 | 0 | 0 | 0 | 0 | 2,492 | 0.426129 |
45eef8d38d3d2882fbd9149df61dee967b85e748 | 1,193 | py | Python | iotcookbook/device/pi/neopixel/client.py | Weeshlow/crossbarexamples | e392ea2708f52ba322e63808041f9fe6dcfe893e | [
"Apache-2.0"
] | 2 | 2020-11-05T21:59:58.000Z | 2020-11-05T22:00:04.000Z | iotcookbook/device/pi/neopixel/client.py | Weeshlow/crossbarexamples | e392ea2708f52ba322e63808041f9fe6dcfe893e | [
"Apache-2.0"
] | null | null | null | iotcookbook/device/pi/neopixel/client.py | Weeshlow/crossbarexamples | e392ea2708f52ba322e63808041f9fe6dcfe893e | [
"Apache-2.0"
] | null | null | null | import time
import random
import Adafruit_ADS1x15
from neopixel import *
LED_COUNT = 8 # Number of LED pixels.
LED_PIN = 12 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 80 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
def show_level(strip, value):
i = int(round(float(strip.numPixels()) * value))
for k in range(strip.numPixels()):
if k < i:
strip.setPixelColor(k, Color(255, 255, 255))
else:
strip.setPixelColor(k, Color(0, 0, 0))
strip.show()
if __name__ == '__main__':
adc = Adafruit_ADS1x15.ADS1015()
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
strip.begin()
while False:
c = 800.
value = 0
for i in range(2):
value += (c - float(adc.read_adc(0, gain=8))) / c
show_level(strip, value / 2.)
time.sleep(20./1000.)
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(0, 0, 0))
strip.show()
| 29.825 | 96 | 0.677284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.248952 |
45f3c2d27aa4a16e31771b7d1672513375750f1f | 64 | py | Python | src/strokes.py | jafetimbre/pil-to-ps | 003361a5b49b212500be6e64a27211691c65cd7b | [
"MIT"
] | null | null | null | src/strokes.py | jafetimbre/pil-to-ps | 003361a5b49b212500be6e64a27211691c65cd7b | [
"MIT"
] | null | null | null | src/strokes.py | jafetimbre/pil-to-ps | 003361a5b49b212500be6e64a27211691c65cd7b | [
"MIT"
] | null | null | null |
def inner_stroke(im):
pass
def outer_stroke(im):
pass
| 9.142857 | 21 | 0.65625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45f3dd0f0a21b65109b31f2a77f344a848021f3d | 3,118 | py | Python | find_sources.py | DarthPumpkin/github-search | 2dc8489380825b83e2d2773bc32d439bd30e1d00 | [
"MIT"
] | null | null | null | find_sources.py | DarthPumpkin/github-search | 2dc8489380825b83e2d2773bc32d439bd30e1d00 | [
"MIT"
] | 1 | 2018-05-07T10:05:34.000Z | 2018-05-17T23:52:12.000Z | find_sources.py | DarthPumpkin/github-search | 2dc8489380825b83e2d2773bc32d439bd30e1d00 | [
"MIT"
] | null | null | null | import requests
import json
import time
import os
import sys
green = "\x1b[38;2;0;255;0m"
greenish = "\x1b[38;2;93;173;110m"
red = "\x1b[38;2;255;0;0m"
grey = "\x1b[38;2;193;184;192m"
reset = "\033[0m"
clear_line = "\033[0K"
# Maximum repository size in megabytes
MAX_REPO_SIZE = 5
def load_cache():
result = []
seen = set()
for file in os.listdir("sources"):
if file.endswith(".json"):
text = open("sources/" + file).read()
for item in json.loads(text)["items"]:
size_in_mb = item["size"]/1000
if size_in_mb > MAX_REPO_SIZE:
continue
url = item["clone_url"]
if url not in seen:
seen.add(url)
result.append({
"url": item["clone_url"],
"score": item["stargazers_count"] + 0.5*item["watchers_count"] + 1,
})
return result
def create_cache():
print(green + "Searching github..." + reset)
if not os.path.exists("sources"):
os.mkdir("sources")
queries = ["utilities", "", "useful", "tools"]
for query_id, query in enumerate(queries):
page = 1
print("\r" + clear_line + "\n" + greenish +
"Searching using query '{}' (id: {})".format(query, query_id))
while page < 100:
path = "sources/query_{}_page_{}.json".format(query_id, page)
if os.path.exists(path):
print("\r" + clear_line + green +
"Skipping page {}, using data from cache...".format(page) + reset)
page += 1
continue
else:
print("\r" + clear_line + green +
"Searching page {}... ".format(page) + reset, end="")
r = requests.get(
"https://api.github.com/search/repositories?q={}+language:java&sort=stars&order=desc&page={}".format(query, page))
if not r.ok:
if "Only the first 1000 search results are available" in r.text:
print(
"limit reached: only the first 1000 search results are available")
break
print(red + "Query failed\n" + r.text + reset)
print("Sleeping for some time before retrying")
time.sleep(10)
continue
try:
data = json.loads(r.text)
except Exception as e:
print("Json parsing failed")
print(e)
print("Sleeping for some time before retrying", end="")
time.sleep(10)
continue
print(green + "done" + reset)
with open(path, "w") as f:
f.write(json.dumps(data))
page += 1
# Github rate limit of 10 requests per minute
print(grey + "Sleeping due to rate limit..." + reset, end="")
sys.stdout.flush()
time.sleep(60/10)
if __name__ == "__main__":
create_cache()
print(load_cache())
| 32.821053 | 130 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 894 | 0.286722 |
45f3fcad09fc42f40418fb6463c46682701c0e2e | 437 | py | Python | aproaches/edit_distance.py | tyomik-mnemonic/genling | a04c5ea39c85dadf2ceabbe145cc8af368feaf3d | [
"MIT"
] | null | null | null | aproaches/edit_distance.py | tyomik-mnemonic/genling | a04c5ea39c85dadf2ceabbe145cc8af368feaf3d | [
"MIT"
] | null | null | null | aproaches/edit_distance.py | tyomik-mnemonic/genling | a04c5ea39c85dadf2ceabbe145cc8af368feaf3d | [
"MIT"
] | null | null | null | import numpy as np
class JaroDist:
def __init__(w1:str, w2:str):
self.w1 = w1
self.w2 = w2
self.m:list = None
self.t:int = None
def compare(self):
for w,wo in zip(w1,w2):
self.m.append(1) if w == wo else self.m.append(0)
self.t = sum(self.m)/2
#dj = 1/3*(sum(self.m)/max(w1,w2) + sum(self.m)/max(w1,w2) + (sum(self.m) - self.t)/m if sum(self.m)/2>0 else: 0 | 25.705882 | 120 | 0.528604 | 417 | 0.954233 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.256293 |
45f527d040e941bb168a70a411e833a07e6375f2 | 3,228 | py | Python | packages/plugins/minos-broker-kafka/tests/test_kafka/test_publisher.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | null | null | null | packages/plugins/minos-broker-kafka/tests/test_kafka/test_publisher.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | null | null | null | packages/plugins/minos-broker-kafka/tests/test_kafka/test_publisher.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import (
AsyncMock,
)
from aiokafka import (
AIOKafkaProducer,
)
from minos.common import (
MinosConfig,
)
from minos.networks import (
BrokerMessage,
BrokerMessageV1,
BrokerMessageV1Payload,
BrokerPublisher,
InMemoryBrokerPublisherQueue,
PostgreSqlBrokerPublisherQueue,
)
from minos.plugins.kafka import (
InMemoryQueuedKafkaBrokerPublisher,
KafkaBrokerPublisher,
PostgreSqlQueuedKafkaBrokerPublisher,
)
from tests.utils import (
CONFIG_FILE_PATH,
)
class TestKafkaBrokerPublisher(unittest.IsolatedAsyncioTestCase):
def test_is_subclass(self):
self.assertTrue(issubclass(KafkaBrokerPublisher, BrokerPublisher))
def test_from_config(self):
config = MinosConfig(CONFIG_FILE_PATH)
publisher = KafkaBrokerPublisher.from_config(config)
self.assertIsInstance(publisher, KafkaBrokerPublisher)
self.assertEqual(config.broker.host, publisher.broker_host)
self.assertEqual(config.broker.port, publisher.broker_port)
async def test_client(self):
publisher = KafkaBrokerPublisher.from_config(CONFIG_FILE_PATH)
self.assertIsInstance(publisher.client, AIOKafkaProducer)
async def test_send(self):
send_mock = AsyncMock()
message = BrokerMessageV1("foo", BrokerMessageV1Payload("bar"))
async with KafkaBrokerPublisher.from_config(CONFIG_FILE_PATH) as publisher:
publisher.client.send_and_wait = send_mock
await publisher.send(message)
self.assertEqual(1, send_mock.call_count)
self.assertEqual("foo", send_mock.call_args.args[0])
self.assertEqual(message, BrokerMessage.from_avro_bytes(send_mock.call_args.args[1]))
async def test_setup_destroy(self):
publisher = KafkaBrokerPublisher.from_config(CONFIG_FILE_PATH)
start_mock = AsyncMock()
stop_mock = AsyncMock()
publisher.client.start = start_mock
publisher.client.stop = stop_mock
async with publisher:
self.assertEqual(1, start_mock.call_count)
self.assertEqual(0, stop_mock.call_count)
start_mock.reset_mock()
stop_mock.reset_mock()
self.assertEqual(0, start_mock.call_count)
self.assertEqual(1, stop_mock.call_count)
class TestPostgreSqlQueuedKafkaBrokerPublisher(unittest.IsolatedAsyncioTestCase):
def test_from_config(self):
publisher = PostgreSqlQueuedKafkaBrokerPublisher.from_config(CONFIG_FILE_PATH)
self.assertIsInstance(publisher, PostgreSqlQueuedKafkaBrokerPublisher)
self.assertIsInstance(publisher.impl, KafkaBrokerPublisher)
self.assertIsInstance(publisher.queue, PostgreSqlBrokerPublisherQueue)
class TestInMemoryQueuedKafkaBrokerPublisher(unittest.IsolatedAsyncioTestCase):
def test_from_config(self):
publisher = InMemoryQueuedKafkaBrokerPublisher.from_config(CONFIG_FILE_PATH)
self.assertIsInstance(publisher, InMemoryQueuedKafkaBrokerPublisher)
self.assertIsInstance(publisher.impl, KafkaBrokerPublisher)
self.assertIsInstance(publisher.queue, InMemoryBrokerPublisherQueue)
if __name__ == "__main__":
unittest.main()
| 33.625 | 93 | 0.746592 | 2,630 | 0.814746 | 0 | 0 | 0 | 0 | 1,255 | 0.388786 | 25 | 0.007745 |
45f53cf6771cde1fc2d9a9e66f10c9b1b627fc8f | 1,406 | py | Python | training.py | BertilBraun/Keras-Testing | d6326fd60b2ef66cef5918963735c09108c57a39 | [
"MIT",
"Unlicense"
] | null | null | null | training.py | BertilBraun/Keras-Testing | d6326fd60b2ef66cef5918963735c09108c57a39 | [
"MIT",
"Unlicense"
] | null | null | null | training.py | BertilBraun/Keras-Testing | d6326fd60b2ef66cef5918963735c09108c57a39 | [
"MIT",
"Unlicense"
] | null | null | null | from model import make_model, IMAGE_SIZE
from tensorflow.keras.preprocessing.image import ImageDataGenerator
model = make_model()
batch_size = 16
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'PokemonData/', # this is the target directory
target_size=IMAGE_SIZE, # all images will be resized to 150x150
batch_size=batch_size,
class_mode='categorical') # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory(
'PokemonData/',
target_size=IMAGE_SIZE,
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=2000 // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=800 // batch_size)
# always save your weights after training or during training
model.save_weights('first_try.h5') | 32.697674 | 93 | 0.771693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.423898 |
45f5aa52358da93645231378c0fbfaf7b12d6756 | 2,319 | py | Python | examples/custom_node.py | tomaszkurgan/sapling | 717681f04f9cb00a47322c5a16f052b8658d558b | [
"MIT"
] | null | null | null | examples/custom_node.py | tomaszkurgan/sapling | 717681f04f9cb00a47322c5a16f052b8658d558b | [
"MIT"
] | null | null | null | examples/custom_node.py | tomaszkurgan/sapling | 717681f04f9cb00a47322c5a16f052b8658d558b | [
"MIT"
] | null | null | null | import uuid
import sapling
# create your own node by inheritance from sapling.Node
class MyNode(sapling.Node):
def __init__(self, name, data=None, id=None):
self.id = id or uuid.uuid4()
super(MyNode, self).__init__(name, data=data)
# There are two ways to force the sapling.Tree to use your node's class:
# 1. create new tree class using sapling.Tree and your node class as bases
# Because sapling.Tree inherits from sapling.Node class, using dependency injection by inheritance
# gives your the possibility to override much of sapling.Tree features simultaneously with
# sapling.Node features, without any additional effort of overriding function directly in
# tree class.
# All nodes in tree, including tree-node itself, will get all features form your new
# node class.
# 2. pass your node class as parameter while constructing Tree
# Option 2. is monkey patch which create new class inherited from Tree class
# and new node class in runtime.
# Because of that, first solution is the prefered one.
class MyTree(sapling.Tree, MyNode):
pass
if __name__ == '__main__':
t = MyTree('a')
t.insert('a/b/c/d/e', force=True)
t.insert('a/b/f/g', force=True)
t.insert('a/b/h/i/j', force=True)
t.insert('a/b/h/i/k', force=True)
t.insert('a/b/h/i/l', force=True)
t.insert('a/z', force=True)
t.insert('a/zz/e/f/g', force=True)
t.insert('a/zz/z/f/g', force=True)
print t.printout()
# all nodes in the tree are instances of MyNode
for node in t:
print node, type(node), node.id
print '\n\n'
# if you don't want to create new Tree class
# use optional parameter while creating Tree instance
# But remember - it's monkey patching
t2 = sapling.Tree('a', node_cls=MyNode)
t2.insert('a/b/c/d/e', force=True)
t2.insert('a/b/f/g', force=True)
t2.insert('a/b/h/i/j', force=True)
t2.insert('a/b/h/i/k', force=True)
t2.insert('a/b/h/i/l', force=True)
print t2.printout()
for node in t2:
print node, type(node), node.id
print '\n\n'
t3 = sapling.Tree('a', node_cls=MyNode)
print type(t3)
t4 = sapling.Tree('a', node_cls=MyNode)
print type(t4)
print type(t2) is type(t4)
t5 = MyTree('a')
print type(t5)
print type(t) is type(t5)
| 31.337838 | 101 | 0.661492 | 212 | 0.091419 | 0 | 0 | 0 | 0 | 0 | 0 | 1,192 | 0.514015 |
45f7d5e01697e17c5a199f0a10ec2c7a976ffda3 | 760 | py | Python | lesson_06/Classwork_03.py | rotorypower/lessons | bd8f0d54159023e677608f2104e38d82de388fe4 | [
"BSD-3-Clause"
] | null | null | null | lesson_06/Classwork_03.py | rotorypower/lessons | bd8f0d54159023e677608f2104e38d82de388fe4 | [
"BSD-3-Clause"
] | null | null | null | lesson_06/Classwork_03.py | rotorypower/lessons | bd8f0d54159023e677608f2104e38d82de388fe4 | [
"BSD-3-Clause"
] | null | null | null | """Написать функцию которая возвращают случайным образом одну карту из стандартной колоды в 36 карт,
где на первом месте номинал карты номинал (6 - 10, J, D, K, A),
а на втором название масти (Hearts, Diamonds, Clubs, Spades)."""
import random
"""faces = ["6", "7", "8", "9", "10", "J", "D", "K", "A"]
suits = ["Hearts", "Diamonds", "Clubs", "Spades"]
a = random.choice(faces)
b = random.choice(suits)
print(a)
print(b)
"""
def carddraw():
faces = ["6", "7", "8", "9", "10", "J", "D", "K", "A"]
suits = ["Hearts", "Diamonds", "Clubs", "Spades"]
cards = []
for suit in suits:
for face in faces:
cards.append([face, suit])
card = random.choice(cards)
return card
if __name__ == "__main__":
print(carddraw()) | 26.206897 | 100 | 0.588158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.688196 |
45f963fdadc62c879fcb2ad403eb6e3254578fab | 5,911 | py | Python | permon/frontend/native/__init__.py | bminixhofer/permon | 47fe06ebdc8c0655f865d38f073bae63a1acbd4d | [
"MIT"
] | 25 | 2018-04-03T00:26:57.000Z | 2021-11-08T12:05:07.000Z | permon/frontend/native/__init__.py | bminixhofer/permon | 47fe06ebdc8c0655f865d38f073bae63a1acbd4d | [
"MIT"
] | 13 | 2018-03-31T11:57:00.000Z | 2021-03-08T23:04:38.000Z | permon/frontend/native/__init__.py | bminixhofer/permon | 47fe06ebdc8c0655f865d38f073bae63a1acbd4d | [
"MIT"
] | 6 | 2018-04-18T19:18:52.000Z | 2020-08-29T05:54:22.000Z | import sys
import os
import logging
import signal
from permon.frontend import Monitor, MonitorApp
# these modules will be imported later because PySide2
# might not be installed
QtWidgets = None
QtGui = None
QtCore = None
QtQuick = None
Qt = None
MonitorModel = None
SettingsModel = None
def import_delayed():
from PySide2 import QtWidgets, QtGui, QtCore, QtQuick # noqa: F401
from PySide2.QtCore import Qt # noqa: F401
from permon.frontend.native.utils import (MonitorModel, # noqa: F401
SettingsModel)
globals().update(locals().copy())
class NativeMonitor(Monitor):
def __init__(self, stat, buffer_size, fps, color,
app, thickness):
super(NativeMonitor, self).__init__(stat, buffer_size,
fps, color, app)
self.value = 0
self.contributors = []
def update(self):
# every frame, we remove the last point of the history and
# append a new measurement to the end
if self.stat.has_contributor_breakdown:
value, contributors = self.stat.get_stat()
else:
value = self.stat.get_stat()
contributors = []
self.value = value
self.contributors = contributors
class NativeApp(MonitorApp):
# QApplication is a global singleton. It can only ever be instantiated once
qapp = None
fonts = None
def __init__(self, stats, colors, buffer_size=None, fps=None,
line_thickness=2):
buffer_size = buffer_size or 500
fps = fps or 10
self.line_thickness = line_thickness
super(NativeApp, self).__init__(stats, colors, buffer_size, fps)
def add_stat(self, stat, add_to_config=True):
monitor = NativeMonitor(stat,
color=self.next_color(),
buffer_size=self.buffer_size,
fps=self.fps,
app=self,
thickness=self.line_thickness)
self.monitor_model.addMonitor(monitor)
super(NativeApp, self).add_stat(stat, add_to_config=add_to_config)
def remove_stat(self, stat, remove_from_config=True):
monitor_of_stat = None
for monitor in self.monitors:
if isinstance(monitor.stat, stat):
monitor_of_stat = monitor
if monitor_of_stat is not None:
self.monitor_model.removeMonitor(monitor_of_stat)
super(NativeApp, self).remove_stat(
stat, remove_from_config=remove_from_config)
else:
logging.error(f'Removing {stat.tag} failed')
def initialize(self):
# create a MonitorModel to communicate with the QML view
self.monitor_model = MonitorModel()
self.monitors = self.monitor_model.monitors
# create a SettingsModel to communicate with the settings drawer
# in the QML view
self.settings_model = SettingsModel(self)
# connect the statAdded and statRemoved signals
self.settings_model.statAdded.connect(self.add_stat)
self.settings_model.statRemoved.connect(self.remove_stat)
if self.qapp is None:
self.qapp = QtWidgets.QApplication(sys.argv)
# add custom fonts
font_db = QtGui.QFontDatabase()
font_paths = [
self.get_asset_path('Raleway-Regular.ttf'),
self.get_asset_path('RobotoMono-Regular.ttf')
]
for font_path in font_paths:
font_id = font_db.addApplicationFont(font_path)
if font_id == -1:
logging.warn(f'Could not load font ({font_path})')
font = QtGui.QFont('Raleway')
self.qapp.setFont(font)
# set favicon
icon_info = [
('icons/favicon-16x16.png', (16, 16)),
('icons/favicon-32x32.png', (32, 32)),
('icons/android-chrome-192x192.png', (192, 192)),
('icons/android-chrome-256x256.png', (256, 256))
]
app_icon = QtGui.QIcon()
for path, size in icon_info:
app_icon.addFile(
self.get_asset_path(path), QtCore.QSize(*size))
self.qapp.setWindowIcon(app_icon)
for stat in self.initial_stats:
self.add_stat(stat, add_to_config=False)
view = QtQuick.QQuickView()
view.setResizeMode(QtQuick.QQuickView.SizeRootObjectToView)
root_context = view.rootContext()
# make monitor model and settings model available in QML
root_context.setContextProperty('monitorModel', self.monitor_model)
root_context.setContextProperty('settingsModel', self.settings_model)
# qml/view.qml is the root QML file
qml_file = os.path.join(os.path.dirname(__file__), 'qml', 'view.qml')
view.setSource(QtCore.QUrl.fromLocalFile(os.path.abspath(qml_file)))
if view.status() == QtQuick.QQuickView.Error:
sys.exit(-1)
def signal_handler(signal, frame):
# the app can not gracefully quit
# when there is a keyboard interrupt
# because the QAbstractListModel catches all errors
# in a part of its code
print()
os._exit(0)
signal.signal(signal.SIGINT, signal_handler)
view.show()
self.qapp.exec_()
self.quit()
def quit(self):
# delete everything with a reference to monitors
# so that all stats are deleted, and possible threads
# they are using stopped
del self.monitors
del self.settings_model
del self.monitor_model
self.qapp.exit()
def make_available(self):
self.verify_installed('PySide2')
import_delayed()
| 34.567251 | 79 | 0.603451 | 5,298 | 0.896295 | 0 | 0 | 0 | 0 | 0 | 0 | 1,148 | 0.194214 |
45fba0d47dbe3aae5e278b953d7dc0590607ce49 | 2,631 | py | Python | experiments/ring_buffer.py | reip-project/reip-pipelines | c6a8341e963b73f6fd08d63513876590e5af3d62 | [
"BSD-3-Clause-Clear"
] | null | null | null | experiments/ring_buffer.py | reip-project/reip-pipelines | c6a8341e963b73f6fd08d63513876590e5af3d62 | [
"BSD-3-Clause-Clear"
] | null | null | null | experiments/ring_buffer.py | reip-project/reip-pipelines | c6a8341e963b73f6fd08d63513876590e5af3d62 | [
"BSD-3-Clause-Clear"
] | null | null | null | from interface import *
import numpy as np
import copy
class Pointer:
def __init__(self, ring_size):
self.counter = 0
self.ring_size = ring_size
@property
def pos(self):
return self.counter % self.ring_size
@property
def loop(self):
return self.counter // self.ring_size
class RingBuffer(Sink):
def __init__(self, size, **kw):
self.size = size + 1 # need extra slot because head == tail means empty
self.slots = [None] * self.size
self.head = Pointer(self.size)
self.tail = Pointer(self.size)
self.readers = []
super().__init__(**kw)
def full(self):
if len(self.readers) > 0:
new_counter = min([reader.counter for reader in self.readers])
for c in range(self.tail.counter, new_counter):
self.slots[c % self.size] = None
self.tail.counter = new_counter
# self.tail.counter = min([reader.counter for reader in self.readers])
return (self.head.counter - self.tail.counter) >= (self.size - 1)
def _put(self, buffer):
data, meta = buffer
if isinstance(data, np.ndarray):
data.flags.writeable = False
self.slots[self.head.pos] = (data, ImmutableDict(meta))
self.head.counter += 1
def gen_source(self, **kw):
self.readers.append(Pointer(self.size))
self.readers[-1].counter = self.tail.counter
return RingReader(self, len(self.readers) - 1, **kw)
class RingReader(Source):
def __init__(self, ring, id, **kw):
self.ring = ring
self.id = id
super().__init__(**kw)
def empty(self):
return self.ring.head.counter == self.ring.readers[self.id].counter
def last(self):
return (self.ring.head.counter - self.ring.readers[self.id].counter) <= 1
def next(self):
self.ring.readers[self.id].counter += 1
def _get(self):
return self.ring.slots[self.ring.readers[self.id].pos]
# return copy.deepcopy(self.ring.slots[self.ring.readers[self.id].pos])
if __name__ == '__main__':
rb = RingBuffer(100)
print(issubclass(type(rb), Sink))
r0 = rb.gen_source()
r1 = rb.gen_source(strategy=Source.Skip, skip=1)
r2 = rb.gen_source(strategy=Source.Latest)
for i in range(10):
rb.put((i, {}))
print("r0:")
while not r0.empty():
print(r0.get()[0])
r0.next()
print("r1:")
while not r1.empty():
print(r1.get()[0])
r1.next()
print("r2:")
while not r2.empty():
print(r2.get()[0])
r2.next()
| 26.31 | 82 | 0.58913 | 2,029 | 0.77119 | 0 | 0 | 148 | 0.056252 | 0 | 0 | 216 | 0.082098 |
45feb86155af725aa9e4cb373ec68bcdb40c838f | 837 | py | Python | solvcon/tests/test_numpy.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 16 | 2015-12-09T02:54:42.000Z | 2021-04-20T11:26:39.000Z | solvcon/tests/test_numpy.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 95 | 2015-12-09T00:49:40.000Z | 2022-02-14T13:34:55.000Z | solvcon/tests/test_numpy.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 13 | 2015-05-08T04:16:42.000Z | 2021-01-15T09:28:06.000Z | # -*- coding: UTF-8 -*-
from unittest import TestCase
class TestNumpy(TestCase):
def test_dot(self):
from numpy import array, dot
A = array([[1,2],[3,4]], dtype='int32')
B = array([[5,6],[7,8]], dtype='int32')
R = array([[19,22],[43,50]], dtype='int32')
for val in (dot(A,B)-R).flat:
self.assertEqual(val, 0)
u = array([1,1], dtype='int32')
Ru = array([3,7], dtype='int32')
for val in (dot(A,u)-Ru).flat:
self.assertEqual(val, 0)
def test_eig(self):
from numpy import array, dot
from numpy.linalg import eig, inv
A = array([[1,2],[3,4]], dtype='int32')
vals, mat = eig(A)
lbd = dot(dot(inv(mat), A), mat)
for i in range(2):
self.assertAlmostEqual(vals[i], lbd[i,i], places=14)
| 31 | 64 | 0.523297 | 779 | 0.930705 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.077658 |
45ff2ecb9b4c51dd290f8df481731ee3dacbf736 | 67 | py | Python | localized_fields/__init__.py | GabLeRoux/django-localized-fields | f0ac0f7f2503317fde5d75ba8481e34db83512bd | [
"MIT"
] | null | null | null | localized_fields/__init__.py | GabLeRoux/django-localized-fields | f0ac0f7f2503317fde5d75ba8481e34db83512bd | [
"MIT"
] | null | null | null | localized_fields/__init__.py | GabLeRoux/django-localized-fields | f0ac0f7f2503317fde5d75ba8481e34db83512bd | [
"MIT"
] | null | null | null | default_app_config = 'localized_fields.apps.LocalizedFieldsConfig'
| 33.5 | 66 | 0.880597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.671642 |