blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42a59f892a98e74a4c42441549d2d609eed08529 | a1b892c0f5f8c5aa2c67b555b8d1d4b7727a86a4 | /HTML/outage/test.py | feaec475507a72c66bfa111e20a713c1f88284c0 | [] | no_license | Vivekdjango/outage | 60f463ae5294d2b33544a19bda34cc2c22dd42c8 | 20cfbc07e6714f0c8c7e685ea389f1b8ef1bfd53 | refs/heads/master | 2021-01-20T04:18:40.023340 | 2017-04-28T06:46:26 | 2017-04-28T06:46:26 | 89,675,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import re
with open('/var/www/html/outage/Vivektest.html','r+') as f:
s=f.read()
d=re.search(r'Downtime</b>:([0-9]+)',s)
j='Downtime</b>:90'
x=d.group()
s=re.sub(x,j,s)
f.seek(0)
f.truncate()
with open('/var/www/html/outage/Vivektest.html','a') as g:
g.write(s)
g.close()
| [
"viveksinha@IC0532-L0.corp.inmobi.com"
] | viveksinha@IC0532-L0.corp.inmobi.com |
7d7403199232daf804abcef3c44475cc6d85bbdb | e55c876bf34e150cef8c4575f45481b567296a8c | /backend/manage.py | 7b441b75d60ab7bb439e9056593756e1a615208d | [] | no_license | crowdbotics-apps/testtttt5555-dev-15969 | 5c44f20694d6b99efb93d36241a38f7912afb2b5 | e092cd8b1ea34149ee2cfae1e9612f86a754932c | refs/heads/master | 2023-01-27T22:12:52.002176 | 2020-11-30T11:59:38 | 2020-11-30T11:59:38 | 317,209,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testtttt5555_dev_15969.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
97005734ec6b4c93adc78cd19c609fa7f49dac2c | f83ca7d939e8b7688445f336ceedcf871da3ed21 | /sysroot/usr/lib/python3/dist-packages/blinkt.py | 3dae99458928bbdd066f8966ee0d34197bd45404 | [] | no_license | FundamentalFrequency/juce-aarch64-linux-cross-compiler | 1c432d77b8bc7dd11692a9aef34408ed8baddf1f | 3fb7539e4459228231699efbe227e50638b620e4 | refs/heads/main | 2023-03-17T07:49:31.396127 | 2022-07-15T04:52:18 | 2022-07-15T04:52:18 | 496,806,629 | 13 | 4 | null | 2022-10-21T21:07:26 | 2022-05-26T23:57:36 | Python | UTF-8 | Python | false | false | 3,619 | py | import atexit
import time
import RPi.GPIO as GPIO
__version__ = '0.1.2'
DAT = 23
CLK = 24
NUM_PIXELS = 8
BRIGHTNESS = 7
pixels = [[0, 0, 0, BRIGHTNESS]] * NUM_PIXELS
_gpio_setup = False
_clear_on_exit = True
def _exit():
if _clear_on_exit:
clear()
show()
GPIO.cleanup()
def set_brightness(brightness):
"""Set the brightness of all pixels
:param brightness: Brightness: 0.0 to 1.0
"""
if brightness < 0 or brightness > 1:
raise ValueError("Brightness should be between 0.0 and 1.0")
for x in range(NUM_PIXELS):
pixels[x][3] = int(31.0 * brightness) & 0b11111
def clear():
"""Clear the pixel buffer"""
for x in range(NUM_PIXELS):
pixels[x][0:3] = [0, 0, 0]
def _write_byte(byte):
for x in range(8):
GPIO.output(DAT, byte & 0b10000000)
GPIO.output(CLK, 1)
time.sleep(0.0000005)
byte <<= 1
GPIO.output(CLK, 0)
time.sleep(0.0000005)
# Emit exactly enough clock pulses to latch the small dark die APA102s which are weird
# for some reason it takes 36 clocks, the other IC takes just 4 (number of pixels/2)
def _eof():
GPIO.output(DAT, 0)
for x in range(36):
GPIO.output(CLK, 1)
time.sleep(0.0000005)
GPIO.output(CLK, 0)
time.sleep(0.0000005)
def _sof():
GPIO.output(DAT, 0)
for x in range(32):
GPIO.output(CLK, 1)
time.sleep(0.0000005)
GPIO.output(CLK, 0)
time.sleep(0.0000005)
def show():
"""Output the buffer to Blinkt!"""
global _gpio_setup
if not _gpio_setup:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(DAT, GPIO.OUT)
GPIO.setup(CLK, GPIO.OUT)
atexit.register(_exit)
_gpio_setup = True
_sof()
for pixel in pixels:
r, g, b, brightness = pixel
_write_byte(0b11100000 | brightness)
_write_byte(b)
_write_byte(g)
_write_byte(r)
_eof()
def set_all(r, g, b, brightness=None):
"""Set the RGB value and optionally brightness of all pixels
If you don't supply a brightness value, the last value set for each pixel be kept.
:param r: Amount of red: 0 to 255
:param g: Amount of green: 0 to 255
:param b: Amount of blue: 0 to 255
:param brightness: Brightness: 0.0 to 1.0 (default around 0.2)
"""
for x in range(NUM_PIXELS):
set_pixel(x, r, g, b, brightness)
def get_pixel(x):
"""Get the RGB and brightness value of a specific pixel"""
r, g, b, brightness = pixels[x]
brightness /= 31.0
return r, g, b, round(brightness, 3)
def set_pixel(x, r, g, b, brightness=None):
"""Set the RGB value, and optionally brightness, of a single pixel
If you don't supply a brightness value, the last value will be kept.
:param x: The horizontal position of the pixel: 0 to 7
:param r: Amount of red: 0 to 255
:param g: Amount of green: 0 to 255
:param b: Amount of blue: 0 to 255
:param brightness: Brightness: 0.0 to 1.0 (default around 0.2)
"""
if brightness is None:
brightness = pixels[x][3]
else:
brightness = int(31.0 * brightness) & 0b11111
pixels[x] = [int(r) & 0xff, int(g) & 0xff, int(b) & 0xff, brightness]
def set_clear_on_exit(value=True):
"""Set whether Blinkt! should be cleared upon exit
By default Blinkt! will turn off the pixels on exit, but calling::
blinkt.set_clear_on_exit(False)
Will ensure that it does not.
:param value: True or False (default True)
"""
global _clear_on_exit
_clear_on_exit = value
| [
"stonepreston@protonmail.com"
] | stonepreston@protonmail.com |
0aa45401d8b9d7ecc694f70ae64fad64be567afa | c00a2490947ad10582b5d675f070ccb62b70901d | /testing/vivaldi_testing_base.gypi | 17dedd737c47f8dbfc349f283963feba7c8c0596 | [
"BSD-3-Clause"
] | permissive | teotikalki/vivaldi-source | 543d0ab336fb5784eaae1904457598f95f426186 | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | refs/heads/master | 2021-01-23T01:17:34.305328 | 2016-04-29T20:28:18 | 2016-04-29T20:28:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | gypi | {
'type': 'none',
'target_conditions': [
['OS=="win"', {
# Allow running and debugging browser_tests from the testing directory of the MSVS solution view
'product_name':'<(_target_name).exe',
}],
],
}
| [
"jason@theograys.com"
] | jason@theograys.com |
93cdc0d21e1503e893733f61a9dd2b1977f3ea97 | 659a7a65c877f2eb0adbb6001a1f85f063d01acd | /mscreen/autodocktools_prepare_py3k/AutoDockTools/Utilities24/write_models_from_states.py | 3fab201f1f0771a18e5098623dd8904e1f25a07d | [
"MIT"
] | permissive | e-mayo/mscreen | da59771be250ebe341feb102e0cbf41aab70de43 | a50f0b2f7104007c730baa51b4ec65c891008c47 | refs/heads/main | 2023-06-21T17:47:06.519307 | 2021-08-09T16:06:29 | 2021-08-09T16:06:29 | 345,008,321 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,818 | py | #!/usr/bin/env python
#
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/Utilities24/write_models_from_states.py,v 1.4 2010/04/05 21:07:42 rhuey Exp $
#
# $Id: write_models_from_states.py,v 1.4 2010/04/05 21:07:42 rhuey Exp $
#
import os, sys, sys
from string import join
from MolKit import Read
from mglutil.math.statetocoords import StateToCoords
from AutoDockTools.Conformation import Conformation
from AutoDockTools.Docking import Docking
if __name__ == '__main__':
import sys
import getopt
def usage():
"Print helpful, accurate usage statement to stdout."
print("Usage: write_modes_from_states.py -l ligandfile -s statefile -o multimodelfile")
print()
print(" Description of command...")
print(" -l ligandfile name")
print(" -s statefile name")
print(" Optional parameters:")
print(" [-o] multimodel output filename ")
print(" [-S] single string replacing statefile eg:")
print(" 'State: 29.303 14.415 23.603 0.5609 0.4518 0.2662 -0.6406 -20.89 -0.65 81.86 -17.36 28.83 -10.80 -23.98 114.21'")
print(" [-e] statefile includes energy")
print(" [-z] use zero origin")
print(" [-i] interim state->apply quaternion before 'about' translation")
print(" [-n] index of energy on energy line: default is 8")
print(" [-v] verbose output")
# process command arguments
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'l:S:s:o:n:ezivh')
except getopt.GetoptError as msg:
print('write_modes_from_states.py: %s' %msg)
usage()
sys.exit(2)
# initialize required parameters
#-l: ligandfile name
ligandfile = None
#-S: state
SINGLESTATE = None #0? or False?
#-s: statefile name
statefile = None
#-o multimodel outputfilename
outputfile = None
#-e states_have_energy
states_have_energy = 0
#-n index of energy
index_of_energy = 8
#-z use_zero_origin
use_zero_origin = False
#-i interim_state
interim_state = False
# initialize optional parameter
#-v verbose best only
verbose = False
#'s:o:evh'
for o, a in opt_list:
#print "o=", o, " a=", a
if o in ('-l', '--l'):
ligandfile = a
if verbose: print('set ligandfile to ', a)
if o in ('-S', '--S'):
SINGLESTATE = 1
if verbose: print('set SINGLESTATE to ', a)
#['30.691_15.206_23.914_-0.3807_0.0201_0.9215_0.0747_-7.06_93.27_127.02_-130.67_7.04_-95.44_-4.91_-126.85']"
if args[0].find('_')>-1:
args[0] = args[0].replace("_"," ")
states = ["State: " + join(args)]
if o in ('-s', '--s'):
statefile = a
if verbose: print('set statefile to ', a)
if o in ('-o', '--o'):
outputfile = a
if verbose: print('set outputfile to ', a)
if o in ('-e', '--e'):
states_have_energy = True
if verbose: print('set states_have_energy to ', states_have_energy)
if o in ('-n', '--n'):
index_of_energy = int(a)
if verbose: print('set index_of_energy to ', index_of_energy)
if o in ('-z', '--z'):
use_zero_origin = True
if verbose: print('set use_zero_origin to ', use_zero_origin)
if o in ('-i', '--i'):
interim_state = True
if verbose: print('set interim_state to ', interim_state)
if o in ('-v', '--v'):
verbose = True
if verbose: print('set verbose to ', True)
if o in ('-h', '--'):
usage()
sys.exit()
if not ligandfile:
print('write_modes_from_states.py: ligandfile must be specified.')
usage()
sys.exit()
if not statefile and not SINGLESTATE:
print('write_modes_from_states.py: SINGLESTATE or statefile must be specified.')
usage()
sys.exit()
if not outputfile:
if verbose: print('write_modes_from_states.py: outputfile not specified. Using stdout')
#usage()
#sys.exit()
lig = Read(ligandfile)
if not len(lig):
print("no ligand found in ", ligandfile)
sys.exit()
lig = lig[0]
if not hasattr(lig, 'ndihe'):
print(ligandfile + "molecule has no torsion tree")
sys.exit()
lig.buildBondsByDistance()
# add extra slot to ._coords for changing coordinates
lig.allAtoms.addConformation(lig.allAtoms.coords)
#?is this necessary
lig.allAtoms.setConformation(1)
ntors = lig.ndihe
length_of_state = 7+lig.ndihe
# @@ handle to the input ligLines
ligLines = lig.parser.allLines
#setup StateToCoords object
origin = lig.getCenter()
if use_zero_origin or interim_state:
origin = [0.,0.,0.]
#note: index of _coords to use is always 1
lig.stoc = StateToCoords(lig, origin, 1)
outptr = sys.stdout
if outputfile:
outptr = open(outputfile, 'w')
#if SINGLESTATE:
# eg:
#"State: 29.303 14.415 23.603 0.5609 0.4518 0.2662 -0.6406 -20.89 -0.65 81.86 -17.36 28.83 -10.80 -23.98 114.21"
#states = state.split()
#['State:', '29.303', '14.415', '23.603', '0.5609', '0.4518', '0.2662', '-0.6406', '-20.89', '-0.65', '81.86', '-17.36', '28.83', '-10.80', '-23.98', '114.21']
if statefile:
sptr = open(statefile)
states = sptr.readlines()
sptr.close()
if not len(states):
print("no states found in ", statefile)
sys.exit()
state_list = []
ctr = 1
count = len(states)
if not SINGLESTATE and states_have_energy:
for i in range(0,len(states),2):
sline = states[i]
eline =states[i+1]
#build a states from each of the lines in statefile
#State:\t 4.847 -2.386 14.760 -0.413 0.552 -0.724 4.257 58.27 -33.47 -87.92 134.64 -36.46 114.79 -44.86 -74.96 -118.53 77.29 139.08 78.23 -52.09 -12.69 35.08 -118.21 -175.94\n'
fl = list(map(float, sline.split()[1:]))
assert len(fl)==length_of_state
# 0 1 2 3 4 5 6 [7....
#[t1,t2,t3,q1,q2,q3,q4,tors1,tors2, tors3....
#
translation = fl[:3]
quaternion = [fl[6],fl[3:6]]
#energy = eline.split()[8]
energy = eline.split()[index_of_energy]
torsion_angles = []
if ntors>0:
torsion_angles = fl[7:]
newConf = Conformation(lig,origin,translation, quaternion, torsion_angles)
newCrds = newConf.getCoords()
if interim_state:
#here's where to add back the origin or ?
newCrds -= origin
#write some MODEL stuff then newCrds
ostr = "MODEL %d\n"%ctr
outptr.write(ostr)
ctr += 1
ostr = "REMARK AD4 RESULT: %s\n" %energy #put energy here...
outptr.write(ostr)
#lig.parser.write_with_new_coords(newCrds,outptr)
ct = 0
for l in ligLines:
if l.find("ATOM")==0 or l.find("HETATM")==0:
cc = newCrds[ct]
ct = ct + 1
new_l = l[:30]+"%8.3f%8.3f%8.3f" %(cc[0],cc[1],cc[2]) + l[54:]
else:
new_l = l
outptr.write(new_l)
if verbose: print("wrote ", outputfile)
ostr = "ENDMDL\n"
outptr.write(ostr)
i+=1
else:
for sline in states:
#build a state from each of the lines in states
#State:\t 4.847 -2.386 14.760 -0.413 0.552 -0.724 4.257 58.27 -33.47 -87.92 134.64 -36.46 114.79 -44.86 -74.96 -118.53 77.29 139.08 78.23 -52.09 -12.69 35.08 -118.21 -175.94\n'
fl = list(map(float, sline.split()[1:]))
assert len(fl)==length_of_state
# 0 1 2 3 4 5 6 [7....
#[t1,t2,t3,q1,q2,q3,q4,tors1,tors2, tors3....
#
translation = fl[:3]
# interpret axis_angle as quaternion x y z w
# use quaternion as w, (x,y,z) for mglutil/math/transformation.py class
quaternion = [fl[6],fl[3:6]]
torsion_angles = []
if ntors>0:
torsion_angles = fl[7:]
newConf = Conformation(lig,origin,translation, quaternion, torsion_angles)
newCrds = newConf.getCoords()
#write some MODEL stuff then newCrds
ostr = "MODEL %d\n"%ctr
outptr.write(ostr)
ctr += 1
ostr = "REMARK AD4 RESULT: n/a\n" #put energy here...
outptr.write(ostr)
#lig.parser.write_with_new_coords(newCrds,outptr)
ct = 0
for l in ligLines:
if l.find("ATOM")==0 or l.find("HETATM")==0:
cc = newCrds[ct]
ct = ct + 1
new_l = l[:30]+"%8.3f%8.3f%8.3f" %(cc[0],cc[1],cc[2]) + l[54:]
else:
new_l = l
outptr.write(new_l)
if verbose: print("wrote ", outputfile)
ostr = "ENDMDL\n"
outptr.write(ostr)
if verbose: print("Done!")
outptr.close()
# To execute this command type:
# write_modes_from_states.py -d docking_filename
# optional arguments
# -o outputfile_stem (default is ligandname)
| [
"eduardomayoyanes@gmail.com"
] | eduardomayoyanes@gmail.com |
5057692bd4eb3599bc2347781c35514eda74b72f | bdda88c9a9141e9873f871dea6a197a3c413aad4 | /last/wsgi.py | 01e630a2b2f7ee2e8f1bb214003fbde4dfcffb09 | [] | no_license | thienkimlove/python_last | 83e890e30ef3e4dbd7e063b7f11c5ae2b65c9f84 | f6685ed71c30196f40b122b2aefc35271802d092 | refs/heads/master | 2022-12-15T21:57:24.128016 | 2018-08-20T10:57:42 | 2018-08-20T10:57:42 | 120,873,885 | 0 | 0 | null | 2022-12-08T00:54:39 | 2018-02-09T07:42:28 | JavaScript | UTF-8 | Python | false | false | 385 | py | """
WSGI config for last project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "last.settings")
application = get_wsgi_application()
| [
"quan.dm@teko.vn"
] | quan.dm@teko.vn |
376f29ecfebbf1df2ed02a33676c3f737a6e6b60 | 7022c58c3affc4a31cb261a44cb42ff07088e654 | /modify_video.py | 64aef4b9fc60e71aaf8fbed518e654834661b99e | [
"MIT"
] | permissive | reading-stiener/Audio-to-audio-alignment-research | 9298c47c139240b8b2a4b80bfeffa6db6d278c8a | 8ea2789a760e63b92a3a2f14236a87417236e533 | refs/heads/main | 2023-02-12T14:05:25.948830 | 2020-12-22T05:53:11 | 2020-12-22T05:53:11 | 303,211,401 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | from moviepy.editor import VideoFileClip, concatenate_videoclips, vfx
from moviepy.video.fx.speedx import speedx
from pprint import pprint
import os
import json
def modify_video(folder, filename, mod_dict, audio_file=None):
with VideoFileClip(filename, audio=False) as videoclip:
cliplist = []
for changes in mod_dict:
ta, tb, rate = changes["original_start_time"], changes["original_start_time"]+changes["original_delta_time"], changes["rate"]
snippet = videoclip.subclip(ta, tb)
# applying speed effect and appending to clip list
cliplist.append(snippet.fx(speedx, rate))
modified_clip = concatenate_videoclips(cliplist)
inputfolder, inputfile = os.path.split(filename)
if audio_file:
modified_clip.write_videofile(os.path.join(folder, inputfile[:-4]+ "_"+ audio_file.split("_")[1]) + ".mp4")
else:
modified_clip.write_videofile(os.path.join(folder, inputfile))
if __name__ == "__main__":
filename = "/home/camel/Documents/Honors Thesis Research/Audio-to-audio-alignment-research/LSTM_dataset_4/violin/01_Jupiter_vn_vc/violin_1.mp4"
folder = "annotations"
with open("annotations/Jupiter_vn_vc.json") as f:
mod_dict = json.load(fp=f)
modify_video(folder, filename, mod_dict)
| [
"apradha1@conncoll.edu"
] | apradha1@conncoll.edu |
0e2abcb6c0c351b66ae5fb542aaaecce8c8f3fbf | b23b3a4cc7d4ebdf08d958af82128ba535b1402f | /Codeforces/T-primes.py | 8fc2cb522a317a237d883dfa154a807a34c8cb13 | [] | no_license | darshantak/Competitive-Programming | 8665cb1f837140275b6664464522ae942fb6ca50 | 1413d3cc9904b534178a5ac3e4dcd48733c9d26f | refs/heads/master | 2021-08-16T00:02:33.507276 | 2020-05-15T13:55:22 | 2020-05-15T13:55:22 | 180,850,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | import math
def SieveOfEratosthenes(n, prime,primesquare, a):
for i in range(2,n+1):
prime[i] = True
for i in range((n * n + 1)+1):
primesquare[i] = False
prime[1] = False
p = 2
while(p * p <= n):
if (prime[p] == True):
i = p * 2
while(i <= n):
prime[i] = False
i += p
p+=1
j = 0
for p in range(2,n+1):
if (prime[p]==True):
a[j] = p
primesquare[p * p] = True
j+=1
def countDivisors(n):
if (n == 1):
return 1
prime = [False]*(n + 2)
primesquare = [False]*(n * n + 2)
a = [0]*n
SieveOfEratosthenes(n, prime, primesquare, a)
ans = 1
i=0
while(1):
if(a[i] * a[i] * a[i] > n):
break
cnt = 1
while (n % a[i] == 0):
n = n / a[i]
cnt = cnt + 1
ans = ans * cnt
i+=1
n=int(n) dsd
if (prime[n]==True):
ans = ans * 2
elif (primesquare[n]==True):
ans = ans * 3
elif (n != 1):
ans = ans * 4
return ans
n=int(input())
x=list(map(int,input().split()))
for number in x:
temp=countDivisors(number)
if temp==3:
print("YES")
else:
print("NO")
| [
"30834020+darshantak@users.noreply.github.com"
] | 30834020+darshantak@users.noreply.github.com |
3754f58c4dc34461dbba2390774e8247149a0188 | 90c2619937019bb1145edfb2d9d6a7cdea460b57 | /src/538.py | 004d39d2e9e53a8ea9413a35ac09f724706a45d1 | [
"MIT"
] | permissive | zhaoyi3264/leetcode-solutions | 2d289a7e5c74cfe7f8b019c6056ce16485ae057b | 1a3a2d441cdd07a17e80b0ea43b7b266844f530c | refs/heads/main | 2023-06-03T11:35:25.054669 | 2021-06-28T02:58:07 | 2021-06-28T02:58:07 | 349,618,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
s = 0
def update(self, root):
if not root:
return
self.update(root.right)
self.s += root.val
root.val = self.s
self.update(root.left)
def convertBST(self, root: TreeNode) -> TreeNode:
self.update(root)
return root
| [
"zhaoyi3264@gmail.com"
] | zhaoyi3264@gmail.com |
c76ddadc6c35d5df98c2328ccbdfe66c0af64a20 | 8be967439ddf76eaad9e49fb9d8f18d832db5cf4 | /mmd/realtime.py | d2ada934da2d2003fb6abac9d7ea39210fb9fd41 | [
"BSD-3-Clause"
] | permissive | jjgoings/McMurchie-Davidson | b4acda0d0e49f96ec10bee4e8c58c0bf20d77f77 | 8c9d176204498655a358edf41698e59cf970a548 | refs/heads/master | 2023-02-07T21:13:50.285990 | 2023-02-05T04:32:59 | 2023-02-05T04:32:59 | 81,615,513 | 72 | 23 | BSD-3-Clause | 2023-02-05T04:33:00 | 2017-02-10T23:13:26 | Python | UTF-8 | Python | false | false | 7,265 | py | from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.linalg import expm
class RealTime(object):
"""Class for real-time routines"""
def __init__(self,mol,numsteps=1000,stepsize=0.1,field=0.0001,pulse=None):
self.mol = mol
self.field = field
self.stepsize = stepsize
self.numSteps = numsteps
self.time = np.arange(0,self.numSteps)*self.stepsize
if pulse:
self.pulse = pulse
else:
# zero pulse envelope
self.pulse = lambda t: 0.0
self.reset()
def reset(self):
"""Reset all time-dependent property arrays to empty, will also
re-do the SCF in order to set the reference back to ground state.
This will likely need to be changed in the future.
"""
self.mol.RHF(doPrint=False)
self.dipole = []
self.angmom = []
self.Energy = []
self.shape = []
def Magnus2(self,direction='x'):
"""Propagate in time using the second order explicit Magnus.
See: Blanes, Sergio, and Fernando Casas. A concise introduction
to geometric numerical integration. Vol. 23. CRC Press, 2016.
Magnus2 is Eq (4.61), page 128.
"""
self.reset()
self.mol.orthoDen()
self.mol.orthoFock()
h = -1j*self.stepsize
for idx,time in enumerate((self.time)):
if direction.lower() == 'x':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[0]))
elif direction.lower() == 'y':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[1]))
elif direction.lower() == 'z':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[2]))
# record pulse envelope for later plotting, etc.
self.shape.append(self.pulse(time))
curDen = np.copy(self.mol.PO)
self.addField(time + 0.0*self.stepsize,direction=direction)
k1 = h*self.mol.FO
U = expm(k1)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 1.0*self.stepsize,direction=direction)
L = 0.5*(k1 + h*self.mol.FO)
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
# density and Fock are done updating, wrap things up
self.mol.unOrthoFock()
self.mol.unOrthoDen()
self.mol.computeEnergy()
self.Energy.append(np.real(self.mol.energy))
def Magnus4(self,direction='x'):
"""Propagate in time using the fourth order explicit Magnus.
See: Blanes, Sergio, and Fernando Casas. A concise introduction
to geometric numerical integration. Vol. 23. CRC Press, 2016.
Magnus4 is Eq (4.62), page 128.
"""
self.reset()
self.mol.orthoDen()
self.mol.orthoFock()
h = -1j*self.stepsize
for idx,time in enumerate((self.time)):
if direction.lower() == 'x':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[0]))
elif direction.lower() == 'y':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[1]))
elif direction.lower() == 'z':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[2]))
# record pulse envelope for later plotting, etc.
self.shape.append(self.pulse(time))
curDen = np.copy(self.mol.PO)
self.addField(time + 0.0*self.stepsize,direction=direction)
k1 = h*self.mol.FO
Q1 = k1
U = expm(0.5*Q1)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 0.5*self.stepsize,direction=direction)
k2 = h*self.mol.FO
Q2 = k2 - k1
U = expm(0.5*Q1 + 0.25*Q2)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 0.5*self.stepsize,direction=direction)
k3 = h*self.mol.FO
Q3 = k3 - k2
U = expm(Q1 + Q2)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 1.0*self.stepsize,direction=direction)
k4 = h*self.mol.FO
Q4 = k4 - 2*k2 + k1
L = 0.5*Q1 + 0.25*Q2 + (1/3.)*Q3 - (1/24.)*Q4
L += -(1/48.)*self.mol.comm(Q1,Q2)
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 0.5*self.stepsize,direction=direction)
k5 = h*self.mol.FO
Q5 = k5 - k2
L = Q1 + Q2 + (2/3.)*Q3 + (1/6.)*Q4 - (1/6.)*self.mol.comm(Q1,Q2)
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 1.0*self.stepsize,direction=direction)
k6 = h*self.mol.FO
Q6 = k6 -2*k2 + k1
L = Q1 + Q2 + (2/3.)*Q5 + (1/6.)*Q6
L += -(1/6.)*self.mol.comm(Q1, (Q2 - Q3 + Q5 + 0.5*Q6))
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
# density and Fock are done updating, wrap things up
self.mol.unOrthoFock()
self.mol.unOrthoDen()
self.mol.computeEnergy()
self.Energy.append(np.real(self.mol.energy))
def addField(self,time,direction='x'):
""" Add the electric dipole contribution to the Fock matrix,
and then orthogonalize the results. The envelope (shape) of
the interaction with the electric field (self.pulse) needs
to be set externally in a job, since the desired pulse is
specific to each type of realtime simulation.
self.pulse: function of time (t) that returns the envelope
amplitude at a given time.
Example:
def gaussian(t):
envelope = np.exp(-(t**2))
return envelope
rt = RealTime(molecule, pulse=gaussian, field=0.001)
The above example would set up a realtime simulations with
the external field to have the gaussian envelope defined above
scaled by field=0.001.
"""
shape = self.pulse(time)
if direction.lower() == 'x':
self.mol.F += self.field*shape*(self.mol.M[0])
elif direction.lower() == 'y':
self.mol.F += self.field*shape*(self.mol.M[1])
elif direction.lower() == 'z':
self.mol.F += self.field*shape*(self.mol.M[2])
self.mol.orthoFock()
| [
"jjgoings@gmail.com"
] | jjgoings@gmail.com |
a5da9ca40cc5a5566e5d13d5e2df167a59f0917c | 88b7c57a0d9a7a3b28ebd9d6c12ecbbebc50e8a5 | /beep/wechat_callback/routing.py | 45e3e73b84586df877f05a8f9c0b838cde257889 | [] | no_license | largerbigsuper/beep | 71438a4c2feae1afd6ecd25899e95f441bf2165b | a5d84437d79f065cec168f68210c4344a60d08d1 | refs/heads/master | 2022-09-23T02:09:37.117676 | 2020-01-03T06:21:57 | 2020-01-03T06:21:57 | 209,052,138 | 0 | 0 | null | 2022-09-13T23:03:25 | 2019-09-17T12:47:26 | Python | UTF-8 | Python | false | false | 315 | py | from django.urls import path
from . import consumers_wehub, consumers_live, consumers_wehub_task
websocket_urlpatterns = [
path('ws/wehub/', consumers_wehub.WehubConsumer),
path('ws/wehub_task/', consumers_wehub_task.WehubTaskConsumer),
path('ws/live/<str:room_name>/', consumers_live.LiveConsumer),
] | [
"zaihuazhao@163.com"
] | zaihuazhao@163.com |
57609feaa868e5d5d230added1c8394bdd894701 | ed06ef44c944707276a2fca16d61e7820596f51c | /Python/build-array-where-you-can-find-the-maximum-exactly-k-comparisons.py | 1e62106f0b746ff6dded6ba1d710508c12ac3a25 | [] | no_license | sm2774us/leetcode_interview_prep_2021 | 15842bef80637c6ff43542ed7988ec4b2d03e82c | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | refs/heads/master | 2023-05-29T14:14:49.074939 | 2021-06-12T19:52:07 | 2021-06-12T19:52:07 | 374,725,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Time: O(n * m * k)
# Space: O(m * k)
class Solution(object):
def numOfArrays(self, n, m, k):
"""
:type n: int
:type m: int
:type k: int
:rtype: int
"""
MOD = 10**9 + 7
# dp[l][i][j] = number of ways of constructing array length l with max element i at search cost j
dp = [[[0]*(k+1) for _ in range(m+1)] for _ in range(2)]
# prefix_dp[l][i][j] = sum(dp[l][i][j] for i in [1..i])
prefix_dp = [[[0]*(k+1) for _ in range(m+1)] for _ in range(2)]
for i in range(1, m+1):
dp[1][i][1] = 1
prefix_dp[1][i][1] = (prefix_dp[1][i-1][1] + dp[1][i][1])%MOD
for l in range(2, n+1):
for i in range(1, m+1):
for j in range(1, k+1):
dp[l%2][i][j] = (i*dp[(l-1)%2][i][j]%MOD + prefix_dp[(l-1)%2][i-1][j-1])%MOD
prefix_dp[l%2][i][j] = (prefix_dp[l%2][i-1][j] + dp[l%2][i][j])%MOD
return prefix_dp[n%2][m][k]
| [
"sm2774us@gmail.com"
] | sm2774us@gmail.com |
9c32666c1d023909998ab37c378f153017c92d8e | bdd40ea113fdf2f04ef7d61a096a575322928d1d | /Rupesh/DjangoTutorial/ecomarce/analytics/admin.py | ab78365d3c71a629a921a000a91bb6fc59872b46 | [] | no_license | rupesh7399/rupesh | 3eebf924d33790c29636ad59433e10444b74bc2f | 9b746acf37ab357c147cdada1de5458c5fc64f53 | refs/heads/master | 2020-12-22T05:01:29.176696 | 2020-03-03T10:32:36 | 2020-03-03T10:32:36 | 202,111,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.contrib import admin
from .models import ObjectViewed, UserSession
admin.site.register(ObjectViewed)
admin.site.register(UserSession)
| [
"rupesh7399@gmail.com"
] | rupesh7399@gmail.com |
27c774ca6c97f31bc49ca5a0c1b98c71f35dbc89 | 00df54846f8ee079785e39844329cb764c52dcd4 | /message/views.py | bf0fc4d77a173b5e6b8958456a319309963758f5 | [] | no_license | idber/devops | 1d6dcae3f5bdd173b9f38985552d40bea191f0e0 | e0371c4ae7ae552489ea376ecdb72b8847fc41a8 | refs/heads/master | 2020-04-19T15:29:31.884030 | 2019-01-30T01:45:57 | 2019-01-30T01:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | from django.shortcuts import render
from message.models import Message
from django.http import Http404
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def log_audit(request):
'''
审计日志
'''
if request.user.is_superuser:
logs = Message.objects.all()[:300]
if request.method == 'GET':
if 'aid' in request.GET:
aid = request.get_full_path().split('=')[1]
log_detail = Message.objects.filter(id=aid)
data = {
'log_detail': log_detail,
'page_name': '日志明细'
}
return render(request, 'message/log_audit_detail.html',data)
data = {
'all_logs':logs,
'page_name':'审计日志'
}
return render(request, 'message/log_audit.html', data)
else:
raise Http404
| [
"ntuwang@126.com"
] | ntuwang@126.com |
b4a702b6ed8560ac00be41b9f452df78a097e8f9 | 8694f444cf64f28bd208a470551f2c267da040d6 | /spider_04_爬虫数据处理/douban案例/django_orm_usage/doubanBookSpider/spider/spider.py | a469fcf3edd508f0e6e9cb6f87952e5598216cf6 | [] | no_license | fadeawaylove/spider_improve | 2b52fa2b7c0fee990cc3442c7929a2a8eeb4d0f7 | da9c9e3c59c8dba235e9635d91bff6d4998e6588 | refs/heads/master | 2020-08-12T20:21:49.543179 | 2019-11-07T08:08:18 | 2019-11-07T08:08:18 | 214,837,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,380 | py | import requests
import lxml.etree
import re
import datetime
import random
from utils.spider_utils import get_free_proxies
class DoubanBook(object):
tag_list_url = "https://book.douban.com/tag/?icn=index-nav"
list_page_url = "https://book.douban.com/tag/{tag_name}?start={start}&type=T"
book_info = ["作者", "出版社", "出品方", "原作名", "译者", "出版年", "页数", "定价", "装帧", "丛书", "ISBN", "副标题"]
# 将book_info转换为中文
book_info_details = [("作者", "author"),
("出版社", "publisher"),
("出品方", "producer"),
("原作名", "original_title"),
("译者", "translator"),
("出版年", "publish_time"),
("页数", "page_number"),
("定价", "price"),
("装帧", "pack"),
("丛书", "series"),
("ISBN", "isbn"),
("副标题", "subtitle")]
request_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36",
"Host": "book.douban.com",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"Cookie": 'll="118282"; bid=sVgpNNcg4h0; _vwo_uuid_v2=DDF0192A91C8216C5A42BF421E30FB580|472b7c1a09d6b06a818bcbf1e7802f20; push_doumail_num=0; __utmv=30149280.20156; douban-fav-remind=1; dbcl2="201568471:ZpVhEkEQZ3I"; douban-profile-remind=1; ck=v-Tl; gr_user_id=fa3806ae-4b83-4ee8-a4ba-8a771cca6758; __utma=30149280.1614056093.1565774305.1568700472.1571111367.7; __utmc=30149280; __utmz=30149280.1571111367.7.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=81379588.116534101.1571111367.1571111367.1571111367.1; __utmc=81379588; __utmz=81379588.1571111367.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __yadk_uid=8njJt3CZEiFAL75t2C1mCl02qk13t4t1; push_noty_num=0; gr_cs1_be525764-c466-4c97-a8b6-a622ee68eab1=user_id%3A1; _pk_ref.100001.3ac3=%5B%22%22%2C%22%22%2C1571138283%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D7u5Jw1vq1YBlhMpYoZwEx7VUVd-BW1CTtLbUtNmWiRz1ZtjdbQEQQTw_sRjP76c0%26wd%3D%26eqid%3De3985ce400063ea7000000065da541c3%22%5D; _pk_ses.100001.3ac3=*; ap_v=0,6.0; gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03=abbd1d1e-54be-497b-a155-effa705c3b21; gr_cs1_abbd1d1e-54be-497b-a155-effa705c3b21=user_id%3A1; gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03_abbd1d1e-54be-497b-a155-effa705c3b21=true; _pk_id.100001.3ac3=bffae89b4350fc30.1571111367.3.1571139703.1571128736.'
}
https_proxies = {"https": random.choice(get_free_proxies()["https"])}
print(https_proxies)
def __init__(self, offset=20):
self.offset = offset
def get_tags(self):
resp = requests.get(self.tag_list_url)
print(resp.text)
html = lxml.etree.HTML(resp.content)
for tag in html.xpath("""//div[@class="article"]/div[2]/div"""):
big_tag = tag.xpath("./a/@name")
small_tag = tag.xpath("./table/tbody/tr/td/a/text()")
print(big_tag, small_tag)
yield big_tag, small_tag
def get_list_page(self, tag_name, page_num):
"""获取详情页地址"""
resp = requests.get(self.list_page_url.format(tag_name=tag_name, start=(page_num - 1) * self.offset),
proxies=self.https_proxies)
html = lxml.etree.HTML(resp.content)
detail_page_url_list = html.xpath("""//*[@id="subject_list"]/ul/li/div[1]/a/@href""")
has_next = True if html.xpath("""//span[@class="next"]/a/@href""") else False
# print(has_next, detail_page_url_list)
# print(has_next, resp.url)
return has_next, detail_page_url_list
def get_all_list_page(self, tag_name, page_num=0):
"""获取所有详情页的地址"""
hash_next, url_list = self.get_list_page(tag_name, page_num)
yield page_num, url_list
if hash_next:
yield from self.get_all_list_page(tag_name, page_num + 1)
def get_book_detail(self, url):
resp = requests.get(url, headers=self.request_header, proxies=self.https_proxies)
html = lxml.etree.HTML(resp.content)
book_info = html.xpath("""//div[@id="info"]//text()""")
book_info_str = "".join(book_info)
book_info_str = re.sub("\s", "", book_info_str)
# 作者:王朔出版社:云南人民出版社出版年:2004-9页数:224定价:20.00元装帧:平装丛书:王朔文集ISBN:9787222041226
book_detail = {}
for info, en_info in self.book_info_details:
temp_info = re.search(r"({}:.*?:)".format(info), book_info_str + ":")
if temp_info:
temp_info = temp_info.group(0)[0: -1]
temp_info = re.sub("{}$".format("$|".join(self.book_info)), "", temp_info)
_, content = temp_info.split(":")
book_detail[en_info] = content
else:
book_detail[en_info] = None
book_detail["title"] = html.xpath("""//div[@id="wrapper"]/h1/span/text()""")
book_detail["rating_num"] = html.xpath("""//div[@class="rating_self clearfix"]//strong/text()""")
book_detail["book_summary"] = html.xpath("""//div[@id="link-report"]/div[1]/div[@class="intro"]/p/text()""") \
or html.xpath("""//div[@class="related_info"]//div[@class="intro"]/p/text()""")
book_detail["author_summary"] = \
html.xpath("""//div[@class="related_info"]/div[@class="indent "]/div/div[@class="intro"]/p/text()""") or \
html.xpath("""//div[@class="related_info"]/div[2]/span/div/p/text()""") or \
html.xpath("""//*[@id="content"]/div/div[1]/div[3]/div[3]/span[1]/div/p/text()""")
return book_detail
def clean_detail(self, data):
# 1.去重,可根据ISBN
# 2.将数据处理为正确的格式
publish_time = data["publish_time"].split("-")
if len(publish_time) >= 2:
year, *m_d = publish_time
if len(m_d) == 1:
month = m_d[0]
day = 1
else:
month, day = m_d[0:2]
data["publish_time"] = datetime.date(int(year), int(month), int(day)).strftime("%Y-%m-%d")
else:
publish_time = datetime.datetime.strptime(data["publish_time"], "%Y年%m月")
data["publish_time"] = publish_time.strftime("%Y-%m-%d")
data["page_number"] = int(data["page_number"])
data["price"] = float(re.search("(.*)\w", data["price"]).group(1))
data["title"] = data["title"][0]
data["rating_num"] = float(data["rating_num"][0])
data["book_summary"] = "".join(data["book_summary"])
data["author_summary"] = "".join(data["author_summary"])
return data
def store_to_json(self, data, filename):
import json
with open(filename, "w") as file:
# ensure_ascii=False,保证存储进去后,不是unicode类型数据
file.write(json.dumps(data, indent=2, ensure_ascii=False))
if __name__ == '__main__':
douban = DoubanBook()
for x in douban.get_tags():
print(x)
| [
"dengrt@akulaku.com"
] | dengrt@akulaku.com |
9995f2b5def6c1301c8524c51013d6babad47b8d | 457c673c8c8d704ec150322e4eeee2fde4f827ca | /Python Fundamentals - January 2020/Basic_Syntax_Conditional_Statements_and_Loops_Exercise/03_Leonardo_DICaprio_Oscars.py | 57f953fa07bfb812d762ec8c1cb6bd4b23ec85cf | [] | no_license | xMrShadyx/SoftUni | 13c08d56108bf8b1ff56d17bb2a4b804381e0d4e | ce4adcd6e8425134d138fd8f4b6101d4eb1c520b | refs/heads/master | 2023-08-02T03:10:16.205251 | 2021-06-20T05:52:15 | 2021-06-20T05:52:15 | 276,562,926 | 5 | 1 | null | 2021-09-22T19:35:25 | 2020-07-02T06:07:35 | Python | UTF-8 | Python | false | false | 294 | py | oscar = int(input())
if oscar == 88:
print('Leo finally won the Oscar! Leo is happy')
elif oscar == 86:
print(f'Not even for Wolf of Wall Street?!')
elif oscar < 88 and 86 or oscar <= 88:
print("When will you give Leo an Oscar?")
elif oscar > 88:
print("Leo got one already!")
| [
"daredevil91138@gmail.com"
] | daredevil91138@gmail.com |
27a5c84cd1a806afab12078d6674fbb2f952c37e | 83b7bfc7c8db61ffe5490699347f11da2185287c | /env/bin/django-admin | 75649bf991bc1ca1deeb5f9666c7c52148410f21 | [] | no_license | Kennedy-Njeri/Geo-Django | aa8e428f63431ebf137f03b1f7a6ea9d716960c2 | 2924421ad543662e7718bfc2e3330d9e2d6e2d75 | refs/heads/master | 2020-05-09T15:10:47.000864 | 2019-04-14T20:57:17 | 2019-04-14T20:57:17 | 181,222,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | #!/Users/kennedy/Desktop/Geo-django/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"mistakenz123@gmail.com"
] | mistakenz123@gmail.com | |
ebbe6bda3d528d935983968f89a4a23146afb88b | 7b02a5580ff924dce93b85c8614ae5a1468cd15e | /experiment_brain_parcellation.py | 2dc8bc780bd44e4ad11bbf888998f913c2c10f2b | [] | no_license | dr-alok-tiwari/brain_segmentation | 96638aafd891718666e391dac10492b3d2b38e90 | 0b233a78cad1cff686637c90ef04abeef6d830d3 | refs/heads/master | 2023-08-09T15:24:58.312337 | 2015-01-11T17:07:43 | 2015-01-11T17:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,808 | py | __author__ = 'adeb'
from shutil import copy2
import inspect
import PIL
import pickle
from spynet.utils.utilities import analyse_classes
from data_brain_parcellation import DatasetBrainParcellation
from network_brain_parcellation import *
from spynet.models.network import *
from spynet.models.neuron_type import *
from spynet.data.dataset import *
from spynet.training.trainer import *
from spynet.training.monitor import *
from spynet.training.parameters_selector import *
from spynet.training.stopping_criterion import *
from spynet.training.cost_function import *
from spynet.training.learning_update import *
from spynet.experiment import Experiment
from spynet.utils.utilities import tile_raster_images
import theano
class ExperimentBrain(Experiment):
"""
Main experiment to train a network on a dataset
"""
def __init__(self, exp_name, data_path):
Experiment.__init__(self, exp_name, data_path)
def copy_file_virtual(self):
copy2(inspect.getfile(inspect.currentframe()), self.path)
def run(self):
###### Create the datasets
# aa = CostNegLLWeighted(np.array([0.9, 0.1]))
# e = theano.function(inputs=[], outputs=aa.test())
# print e()
## Load the data
training_data_path = self.data_path + "train.h5"
ds_training = DatasetBrainParcellation()
ds_training.read(training_data_path)
[ds_training, ds_validation] = ds_training.split_dataset_proportions([0.95, 0.05])
testing_data_path = self.data_path + "test.h5"
ds_testing = DatasetBrainParcellation()
ds_testing.read(testing_data_path)
## Display data sample
# image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:50],
# img_shape=(29, 29), tile_shape=(5, 10),
# tile_spacing=(1, 1)))
# image.save(self.path + "filters_corruption_30.png")
## Few stats about the targets
classes, proportion_class = analyse_classes(np.argmax(ds_training.outputs, axis=1), "Training data:")
print classes
## Scale some part of the data
print "Scaling"
s = Scaler([slice(-134, None, None)])
s.compute_parameters(ds_training.inputs)
s.scale(ds_training.inputs)
s.scale(ds_validation.inputs)
s.scale(ds_testing.inputs)
pickle.dump(s, open(self.path + "s.scaler", "wb"))
###### Create the network
net = NetworkUltimateConv()
net.init(33, 29, 5, 134, 135)
print net
###### Configure the trainer
# Cost function
cost_function = CostNegLL(net.ls_params)
# Learning update
learning_rate = 0.05
momentum = 0.5
lr_update = LearningUpdateGDMomentum(learning_rate, momentum)
# Create monitors and add them to the trainer
freq = 1
freq2 = 0.00001
# err_training = MonitorErrorRate(freq, "Train", ds_training)
# err_testing = MonitorErrorRate(freq, "Test", ds_testing)
err_validation = MonitorErrorRate(freq, "Val", ds_validation)
# dice_training = MonitorDiceCoefficient(freq, "Train", ds_training, 135)
dice_testing = MonitorDiceCoefficient(freq, "Test", ds_testing, 135)
# dice_validation = MonitorDiceCoefficient(freq, "Val", ds_validation, 135)
# Create stopping criteria and add them to the trainer
max_epoch = MaxEpoch(300)
early_stopping = EarlyStopping(err_validation, 10, 0.99, 5)
# Create the network selector
params_selector = ParamSelectorBestMonitoredValue(err_validation)
# Create the trainer object
batch_size = 200
t = Trainer(net, cost_function, params_selector, [max_epoch, early_stopping],
lr_update, ds_training, batch_size,
[err_validation, dice_testing])
###### Train the network
t.train()
###### Plot the records
# pred = np.argmax(t.net.predict(ds_testing.inputs, 10000), axis=1)
# d = compute_dice(pred, np.argmax(ds_testing.outputs, axis=1), 134)
# print "Dice test: {}".format(np.mean(d))
# print "Error rate test: {}".format(error_rate(np.argmax(ds_testing.outputs, axis=1), pred))
save_records_plot(self.path, [err_validation], "err", t.n_train_batches, "upper right")
# save_records_plot(self.path, [dice_testing], "dice", t.n_train_batches, "lower right")
###### Save the network
net.save_parameters(self.path + "net.net")
if __name__ == '__main__':
exp_name = "paper_ultimate_conv"
data_path = "./datasets/paper_ultimate_conv/"
exp = ExperimentBrain(exp_name, data_path)
exp.run() | [
"adbrebs@gmail.com"
] | adbrebs@gmail.com |
fa626b028d0775bee0feb369374052f9523ec263 | b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd | /High_Frequency/BFS/Normal/Number of Big Islands/bfs.py | 7700361d8fde7e07bf4ccef36dd2c64d3a73a234 | [] | no_license | atomextranova/leetcode-python | 61381949f2e78805dfdd0fb221f8497b94b7f12b | 5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56 | refs/heads/master | 2021-07-15T20:32:12.592607 | 2020-09-21T00:10:27 | 2020-09-21T00:10:27 | 207,622,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import collections
DIRECTIONS = [(0, -1), (0, 1), (-1, 0), (1, 0)]
class Solution:
"""
@param grid: a 2d boolean array
@param k: an integer
@return: the number of Islands
"""
def numsofIsland(self, grid, k):
# Write your code here
if not grid or not grid[0]:
return 0
self.col_len = len(grid)
self.row_len = len(grid[0])
self.visited = set()
result = 0
for i in range(self.col_len):
for j in range(self.row_len):
if self.is_valid(grid, i, j):
island_size = self.bfs(grid, i, j)
if island_size >= k:
result += 1
return result
def bfs(self, grid, i, j):
deque = collections.deque([(i, j)])
self.visited.add((i, j))
size = 0
cur = []
while deque:
x, y = deque.popleft()
cur.append((x, y))
size += 1
for dx, dy in DIRECTIONS:
new_x, new_y = x + dx, y + dy
if self.is_valid(grid, new_x, new_y):
deque.append((new_x, new_y))
self.visited.add((new_x, new_y))
return size
def is_valid(self, grid, i, j):
return -1 < i < self.col_len and -1 < j < self.row_len and (
i, j) not in self.visited and grid[i][j] == 1
sol = Solution()
sol.numsofIsland([[1,1,0,1,0],[0,0,0,1,1],[1,1,0,1,0],[1,1,0,0,0],[0,0,0,0,1]], 5) | [
"atomextranova@gmail.com"
] | atomextranova@gmail.com |
8ce4c2319c585a7dd079e69d0ca3dc8c5b98cd32 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqpt/ingrdroppkts.py | 478c00de06b7c4e986fd229f08ef660e7c8dc624 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,753 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IngrDropPkts(Mo):
meta = StatsClassMeta("cobra.model.eqpt.IngrDropPkts", "Ingress Drop Packets")
counter = CounterMeta("error", CounterCategory.COUNTER, "packets", "Ingress Error Drop Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "errorLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "errorCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "errorPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "errorMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "errorMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "errorAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "errorSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "errorBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "errorThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "errorTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "errorTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "errorRate"
meta._counters.append(counter)
counter = CounterMeta("lb", CounterCategory.COUNTER, "packets", "Ingress Load Balancer Drop Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "lbLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "lbCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "lbPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "lbMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "lbMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "lbAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "lbSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "lbBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "lbThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "lbTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "lbTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "lbRate"
meta._counters.append(counter)
counter = CounterMeta("buffer", CounterCategory.COUNTER, "packets", "Ingress Buffer Drop Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "bufferLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bufferCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bufferPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bufferMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bufferMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bufferAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bufferSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "bufferBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bufferThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "bufferTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bufferTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "bufferRate"
meta._counters.append(counter)
counter = CounterMeta("forwarding", CounterCategory.COUNTER, "packets", "Ingress Forwarding Drop Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "forwardingLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "forwardingCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "forwardingPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "forwardingMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "forwardingMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "forwardingAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "forwardingSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "forwardingBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "forwardingThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "forwardingTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "forwardingTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "forwardingRate"
meta._counters.append(counter)
meta.isAbstract = True
meta.moClassName = "eqptIngrDropPkts"
meta.moClassName = "eqptIngrDropPkts"
meta.rnFormat = ""
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Ingress Drop Packets stats"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts1qtr")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts5min")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts1mo")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts1h")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts15min")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts1w")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts1year")
meta.concreteSubClasses.add("cobra.model.eqpt.IngrDropPkts1d")
meta.rnPrefixes = [
]
prop = PropMeta("str", "bufferAvg", "bufferAvg", 8432, PropCategory.IMPLICIT_AVG)
prop.label = "Ingress Buffer Drop Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferAvg", prop)
prop = PropMeta("str", "bufferBase", "bufferBase", 8427, PropCategory.IMPLICIT_BASELINE)
prop.label = "Ingress Buffer Drop Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferBase", prop)
prop = PropMeta("str", "bufferCum", "bufferCum", 8428, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Ingress Buffer Drop Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferCum", prop)
prop = PropMeta("str", "bufferLast", "bufferLast", 8426, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Ingress Buffer Drop Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferLast", prop)
prop = PropMeta("str", "bufferMax", "bufferMax", 8431, PropCategory.IMPLICIT_MAX)
prop.label = "Ingress Buffer Drop Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferMax", prop)
prop = PropMeta("str", "bufferMin", "bufferMin", 8430, PropCategory.IMPLICIT_MIN)
prop.label = "Ingress Buffer Drop Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferMin", prop)
prop = PropMeta("str", "bufferPer", "bufferPer", 8429, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Ingress Buffer Drop Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferPer", prop)
prop = PropMeta("str", "bufferRate", "bufferRate", 8437, PropCategory.IMPLICIT_RATE)
prop.label = "Ingress Buffer Drop Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferRate", prop)
prop = PropMeta("str", "bufferSpct", "bufferSpct", 8433, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Ingress Buffer Drop Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferSpct", prop)
prop = PropMeta("str", "bufferThr", "bufferThr", 8434, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Ingress Buffer Drop Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bufferThr", prop)
prop = PropMeta("str", "bufferTr", "bufferTr", 8436, PropCategory.IMPLICIT_TREND)
prop.label = "Ingress Buffer Drop Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferTr", prop)
prop = PropMeta("str", "bufferTrBase", "bufferTrBase", 8435, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Ingress Buffer Drop Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bufferTrBase", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "errorAvg", "errorAvg", 8459, PropCategory.IMPLICIT_AVG)
prop.label = "Ingress Error Drop Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorAvg", prop)
prop = PropMeta("str", "errorBase", "errorBase", 8454, PropCategory.IMPLICIT_BASELINE)
prop.label = "Ingress Error Drop Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("errorBase", prop)
prop = PropMeta("str", "errorCum", "errorCum", 8455, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Ingress Error Drop Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("errorCum", prop)
prop = PropMeta("str", "errorLast", "errorLast", 8453, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Ingress Error Drop Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorLast", prop)
prop = PropMeta("str", "errorMax", "errorMax", 8458, PropCategory.IMPLICIT_MAX)
prop.label = "Ingress Error Drop Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorMax", prop)
prop = PropMeta("str", "errorMin", "errorMin", 8457, PropCategory.IMPLICIT_MIN)
prop.label = "Ingress Error Drop Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorMin", prop)
prop = PropMeta("str", "errorPer", "errorPer", 8456, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Ingress Error Drop Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("errorPer", prop)
prop = PropMeta("str", "errorRate", "errorRate", 8464, PropCategory.IMPLICIT_RATE)
prop.label = "Ingress Error Drop Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("errorRate", prop)
prop = PropMeta("str", "errorSpct", "errorSpct", 8460, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Ingress Error Drop Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("errorSpct", prop)
prop = PropMeta("str", "errorThr", "errorThr", 8461, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Ingress Error Drop Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("errorThr", prop)
prop = PropMeta("str", "errorTr", "errorTr", 8463, PropCategory.IMPLICIT_TREND)
prop.label = "Ingress Error Drop Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("errorTr", prop)
prop = PropMeta("str", "errorTrBase", "errorTrBase", 8462, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Ingress Error Drop Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("errorTrBase", prop)
prop = PropMeta("str", "forwardingAvg", "forwardingAvg", 8486, PropCategory.IMPLICIT_AVG)
prop.label = "Ingress Forwarding Drop Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingAvg", prop)
prop = PropMeta("str", "forwardingBase", "forwardingBase", 8481, PropCategory.IMPLICIT_BASELINE)
prop.label = "Ingress Forwarding Drop Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingBase", prop)
prop = PropMeta("str", "forwardingCum", "forwardingCum", 8482, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Ingress Forwarding Drop Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingCum", prop)
prop = PropMeta("str", "forwardingLast", "forwardingLast", 8480, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Ingress Forwarding Drop Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingLast", prop)
prop = PropMeta("str", "forwardingMax", "forwardingMax", 8485, PropCategory.IMPLICIT_MAX)
prop.label = "Ingress Forwarding Drop Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingMax", prop)
prop = PropMeta("str", "forwardingMin", "forwardingMin", 8484, PropCategory.IMPLICIT_MIN)
prop.label = "Ingress Forwarding Drop Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingMin", prop)
prop = PropMeta("str", "forwardingPer", "forwardingPer", 8483, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Ingress Forwarding Drop Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingPer", prop)
prop = PropMeta("str", "forwardingRate", "forwardingRate", 8491, PropCategory.IMPLICIT_RATE)
prop.label = "Ingress Forwarding Drop Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingRate", prop)
prop = PropMeta("str", "forwardingSpct", "forwardingSpct", 8487, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Ingress Forwarding Drop Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingSpct", prop)
prop = PropMeta("str", "forwardingThr", "forwardingThr", 8488, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Ingress Forwarding Drop Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("forwardingThr", prop)
prop = PropMeta("str", "forwardingTr", "forwardingTr", 8490, PropCategory.IMPLICIT_TREND)
prop.label = "Ingress Forwarding Drop Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingTr", prop)
prop = PropMeta("str", "forwardingTrBase", "forwardingTrBase", 8489, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Ingress Forwarding Drop Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("forwardingTrBase", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "lbAvg", "lbAvg", 8513, PropCategory.IMPLICIT_AVG)
prop.label = "Ingress Load Balancer Drop Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("lbAvg", prop)
prop = PropMeta("str", "lbBase", "lbBase", 8508, PropCategory.IMPLICIT_BASELINE)
prop.label = "Ingress Load Balancer Drop Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lbBase", prop)
prop = PropMeta("str", "lbCum", "lbCum", 8509, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Ingress Load Balancer Drop Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("lbCum", prop)
prop = PropMeta("str", "lbLast", "lbLast", 8507, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Ingress Load Balancer Drop Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("lbLast", prop)
prop = PropMeta("str", "lbMax", "lbMax", 8512, PropCategory.IMPLICIT_MAX)
prop.label = "Ingress Load Balancer Drop Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lbMax", prop)
prop = PropMeta("str", "lbMin", "lbMin", 8511, PropCategory.IMPLICIT_MIN)
prop.label = "Ingress Load Balancer Drop Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lbMin", prop)
prop = PropMeta("str", "lbPer", "lbPer", 8510, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Ingress Load Balancer Drop Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("lbPer", prop)
prop = PropMeta("str", "lbRate", "lbRate", 8518, PropCategory.IMPLICIT_RATE)
prop.label = "Ingress Load Balancer Drop Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("lbRate", prop)
prop = PropMeta("str", "lbSpct", "lbSpct", 8514, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Ingress Load Balancer Drop Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("lbSpct", prop)
prop = PropMeta("str", "lbThr", "lbThr", 8515, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Ingress Load Balancer Drop Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("lbThr", prop)
prop = PropMeta("str", "lbTr", "lbTr", 8517, PropCategory.IMPLICIT_TREND)
prop.label = "Ingress Load Balancer Drop Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("lbTr", prop)
prop = PropMeta("str", "lbTrBase", "lbTrBase", 8516, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Ingress Load Balancer Drop Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lbTrBase", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
63d0fc53f9cc76b62fdd92fc85ebfca1525ae1a9 | f07c7e3966de00005230ebe31ab0579b92b66872 | /math_utils/convolution.py | 6d414a7c9bd485bd3e9a29deee84ce7215587363 | [
"Apache-2.0"
] | permissive | Algomorph/LevelSetFusion-Python | 30d990228e3d63a40668ade58e7879ae6e581719 | 46625cd185da4413f9afaf201096203ee72d3803 | refs/heads/master | 2021-06-25T11:30:44.672555 | 2020-11-11T14:47:33 | 2020-11-11T14:47:33 | 152,263,399 | 12 | 2 | Apache-2.0 | 2019-05-30T23:12:33 | 2018-10-09T14:15:03 | Python | UTF-8 | Python | false | false | 7,026 | py | # ================================================================
# Created by Gregory Kramida on 9/18/18.
# Copyright (c) 2018 Gregory Kramida
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================
import numpy as np
from utils.sampling import get_focus_coordinates
from utils.printing import *
sobolev_kernel_1d = np.array([2.995900285895913839e-04,
4.410949535667896271e-03,
6.571318954229354858e-02,
9.956527948379516602e-01,
6.571318954229354858e-02,
4.410949535667896271e-03,
2.995900285895913839e-04])
def convolve_with_kernel_y(vector_field, kernel):
y_convolved = np.zeros_like(vector_field)
if len(vector_field.shape) == 3 and vector_field.shape[2] == 2:
for x in range(vector_field.shape[1]):
y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same')
y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same')
np.copyto(vector_field, y_convolved)
elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3:
for z in range(vector_field.shape[2]):
for x in range(vector_field.shape[0]):
for i_val in range(3):
y_convolved[x, :, z, i_val] = np.convolve(vector_field[x, :, z, i_val], kernel, mode='same')
else:
raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or "
"tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields")
return y_convolved
def convolve_with_kernel_x(vector_field, kernel):
x_convolved = np.zeros_like(vector_field)
if len(vector_field.shape) == 3 and vector_field.shape[2] == 2:
for y in range(vector_field.shape[0]):
x_convolved[y, :, 0] = np.convolve(vector_field[y, :, 0], kernel, mode='same')
x_convolved[y, :, 1] = np.convolve(vector_field[y, :, 1], kernel, mode='same')
elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3:
for z in range(vector_field.shape[0]):
for y in range(vector_field.shape[1]):
for i_val in range(3):
x_convolved[z, y, :, i_val] = np.convolve(vector_field[z, y, :, i_val], kernel, mode='same')
else:
raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or "
"tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields")
np.copyto(vector_field, x_convolved)
return x_convolved
def convolve_with_kernel_z(vector_field, kernel):
if len(vector_field.shape) != 4 or vector_field.shape[3] != 3:
raise ValueError("Can only process tensors with 4 dimensions (where last dimension is 3), i.e. 3D Vector field")
def convolve_with_kernel(vector_field, kernel=sobolev_kernel_1d, print_focus_coord_info=False):
x_convolved = np.zeros_like(vector_field)
y_convolved = np.zeros_like(vector_field)
z_convolved = None
if len(vector_field.shape) == 3 and vector_field.shape[2] == 2:
focus_coordinates = get_focus_coordinates()
for x in range(vector_field.shape[1]):
y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same')
y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same')
for y in range(vector_field.shape[0]):
x_convolved[y, :, 0] = np.convolve(y_convolved[y, :, 0], kernel, mode='same')
x_convolved[y, :, 1] = np.convolve(y_convolved[y, :, 1], kernel, mode='same')
if print_focus_coord_info:
new_gradient_at_focus = vector_field[focus_coordinates[1], focus_coordinates[0]]
print(
" H1 grad: {:s}[{:f} {:f}{:s}]".format(BOLD_GREEN, -new_gradient_at_focus[0], -new_gradient_at_focus[1],
RESET), sep='', end='')
np.copyto(vector_field, x_convolved)
elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3:
z_convolved = np.zeros_like(vector_field)
for z in range(vector_field.shape[0]):
for y in range(vector_field.shape[1]):
for i_val in range(3):
x_convolved[z, y, :, i_val] = np.convolve(vector_field[z, y, :, i_val], kernel, mode='same')
for z in range(vector_field.shape[0]):
for x in range(vector_field.shape[2]):
for i_val in range(3):
y_convolved[z, :, x, i_val] = np.convolve(x_convolved[z, :, x, i_val], kernel, mode='same')
for y in range(vector_field.shape[1]):
for x in range(vector_field.shape[2]):
for i_val in range(3):
z_convolved[:, y, x, i_val] = np.convolve(y_convolved[:, y, x, i_val], kernel, mode='same')
np.copyto(vector_field, z_convolved)
else:
raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or "
"tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields")
return vector_field
def convolve_with_kernel_preserve_zeros(vector_field, kernel=sobolev_kernel_1d, print_focus_coord_info=False):
x_convolved = np.zeros_like(vector_field)
y_convolved = np.zeros_like(vector_field)
focus_coordinates = get_focus_coordinates()
zero_check = np.abs(vector_field) < 1e-6
for x in range(vector_field.shape[1]):
y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same')
y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same')
y_convolved[zero_check] = 0.0
for y in range(vector_field.shape[0]):
x_convolved[y, :, 0] = np.convolve(y_convolved[y, :, 0], kernel, mode='same')
x_convolved[y, :, 1] = np.convolve(y_convolved[y, :, 1], kernel, mode='same')
x_convolved[zero_check] = 0.0
np.copyto(vector_field, x_convolved)
if print_focus_coord_info:
new_gradient_at_focus = vector_field[focus_coordinates[1], focus_coordinates[0]]
print(" H1 grad: {:s}[{:f} {:f}{:s}]".format(BOLD_GREEN, -new_gradient_at_focus[0], -new_gradient_at_focus[1],
RESET), sep='', end='')
return vector_field
| [
"algomorph@gmail.com"
] | algomorph@gmail.com |
7e4b361f79a43152d672caa1c83ae56bd44ff673 | 5a25edcf994a760688dc7c933e8071bf4ff24df3 | /exercises/en/exc_04_11_02.py | a9511ce0f53ab920097e6454be946cf6c9569440 | [
"CC-BY-NC-4.0",
"MIT"
] | permissive | heyMP/spacy-course | 8762990ed6179011680730d9c24d5d34c0a8d954 | 3740c717f0d1090b01c1b0fe23f8e30af3bf0101 | refs/heads/master | 2022-11-07T21:52:15.479840 | 2020-06-25T18:13:44 | 2020-06-25T18:13:44 | 275,202,487 | 1 | 0 | MIT | 2020-06-26T16:39:32 | 2020-06-26T16:39:31 | null | UTF-8 | Python | false | false | 419 | py | TRAINING_DATA = [
(
"Reddit partners with Patreon to help creators build communities",
{"entities": [(0, 6, "WEBSITE"), (21, 28, "WEBSITE")]},
),
("PewDiePie smashes YouTube record", {"entities": [____, (18, 25, "WEBSITE")]}),
(
"Reddit founder Alexis Ohanian gave away two Metallica tickets to fans",
{"entities": [(0, 6, "WEBSITE"), ____]},
),
# And so on...
]
| [
"ines@ines.io"
] | ines@ines.io |
b5b3be15067d366ee420348a2d7f641af1b2cf54 | d92db7dc61cd6ed719b691866d6512713c981a1b | /tests/transformations_test/RemoveEpsilonRules/Reverse/__init__.py | 015f6088ca4d2afda459d5ea45855316fe7366f6 | [
"MIT"
] | permissive | PatrikValkovic/grammpy | e4dfdc28f023415909a666581892a480392c7cb7 | 8308a1fd349bf9ea0d267360cc9a4ab20d1629e8 | refs/heads/master | 2021-07-24T12:05:13.685812 | 2021-06-10T12:50:41 | 2021-06-10T12:50:41 | 95,211,841 | 2 | 1 | null | 2017-07-29T10:44:43 | 2017-06-23T10:51:44 | Python | UTF-8 | Python | false | false | 110 | py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 03.09.2017 16:15
:Licence MIT
Part of grammpy
"""
| [
"patrik.valkovic@hotmail.cz"
] | patrik.valkovic@hotmail.cz |
e5536d37b56d755c780c4e6d352acd6422f843bd | 04a4d89bc7915e0624abf95651e5ad21d9ed6da2 | /base/src/cloudstrype/array.py | 8ecac8685d1afb3f2bf4bec40a582be19bf1dc0f | [] | no_license | btimby/cloudstrype-too | 5af8f8a4fecb60838093aafc6de44cab5bf5da7c | 6bc600de7d181c41a9d7e7cca557025c6aea16f2 | refs/heads/master | 2021-04-29T21:49:14.585978 | 2018-02-15T22:18:14 | 2018-02-15T22:18:14 | 121,624,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from __future__ import absolute_import
import os
from .base import sync_wrapper
from .auth import AuthClient
ARRAY_URL = os.environ.get('CLOUDSTRYPE_ARRAY_URL', 'http://array/')
class ArrayClient(AuthClient):
base_url = ARRAY_URL
def __init__(self, *args, **kwargs, replicas=1):
super().__init__(*args, **kwargs)
self.replicas = replicas
async def put(self, chunk_id, params=None, replicas=None):
replicas = replicas or self.replicas
params = params or {}
params.setdefault('replicas', replicas)
return await self.request('POST', '/chunk/', params=params)
put_sync = sync_wrapper(put)
async def get(self, chunk_id, params=None):
return await self.request(
'GET', '/chunk/%s/' % chunk_id, params=params)
get_sync = sync_wrapper(get)
| [
"btimby@gmail.com"
] | btimby@gmail.com |
507a8251ec9f391f3544477ad1510d2654cb40f3 | 4c4c589f9047c60eb3d65d5a7fa86ded7c6c1d64 | /populators/create_all.py | 333b163df8a55cfe1deeb691c55c3af5c901374b | [] | no_license | navkal/el | 39a27e92283f922219cebffa3821806fe5cd8a5e | a4739dc33022fb1b4e9a6f71ef40c989896b08f5 | refs/heads/master | 2023-08-09T03:35:06.918760 | 2023-08-04T15:57:12 | 2023-08-04T15:57:12 | 237,359,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | # Copyright 2019 Energize Andover. All rights reserved.
import argparse
import os
import sys
sys.path.append( '../util' )
import util
# Main program
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Create all Andover databases' )
parser.add_argument( '-d', dest='debug', action='store_true', help='Generate debug versions of databases?' )
args = parser.parse_args()
PYTHON = 'python '
print( '\n=======> Master' )
os.system( PYTHON + 'master.py -o ../db/master.sqlite' )
print( '\n=======> Publish' )
os.system( PYTHON + 'publish.py -i ../db/master.sqlite -o ../db' )
if args.debug:
print( '\n=======> Master Debug' )
os.system( PYTHON + 'master.py -o ../db/master_debug.sqlite -d' )
print( '\n=======> Publish Debug' )
os.system( PYTHON + 'publish.py -i ../db/master_debug.sqlite -o ../db -d' )
util.report_elapsed_time()
| [
"navkal@hotmail.com"
] | navkal@hotmail.com |
b0d8569db3fe5071021907875f85cbb80e012dd1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03646/s658997973.py | aa705f075e79683fc7b7b695a6c8322e03dacc5a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | k = int(input())
n = 50
q, r = divmod(k, n)
A = [n-1]*n
for i in range(n):
A[i] += q
for i in range(r):
A[i] += n+1
for j in range(n):
A[j] -= 1
print(n)
print(*A)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4115eda73266412d21b877ad0115bc888ce614c4 | f9215dc23bf0ab8ef9730e78413e5844247d270b | /jarbas/core/tests/__init__.py | d5b296cc9396389d1a2b48c6d00078e0e120f390 | [
"MIT"
] | permissive | vitallan/jarbas | 6c873c98663e68e5d8c2192d0648c6870a01a4a1 | 2ffdccfec499c271bec13bf518c847d8d5210d94 | refs/heads/master | 2021-01-13T04:31:09.492131 | 2016-09-26T19:39:55 | 2016-09-26T21:27:52 | 69,503,820 | 1 | 0 | null | 2016-09-28T21:07:35 | 2016-09-28T21:07:35 | null | UTF-8 | Python | false | false | 1,606 | py | from datetime import date, datetime
sample_document_data = dict(
document_id=42,
congressperson_name='Roger That',
congressperson_id=1,
congressperson_document=2,
term=1970,
state='UF',
party='Partido',
term_id=3,
subquota_number=4,
subquota_description='Subquota description',
subquota_group_id=5,
subquota_group_description='Subquota group desc',
supplier='Acme',
cnpj_cpf='11111111111111',
document_number='6',
document_type=7,
issue_date='1970-01-01 00:00:00',
document_value=8.90,
remark_value=1.23,
net_value=4.56,
month=1,
year=1970,
installment=7,
passenger='John Doe',
leg_of_the_trip=8,
batch_number=9,
reimbursement_number=10,
reimbursement_value=11.12,
applicant_id=13
)
sample_activity_data = dict(
code='42',
description='So long, so long, and thanks for all the fish'
)
sample_supplier_data = dict(
cnpj='12.345.678/9012-34',
opening=date(1995, 9, 27),
legal_entity='42 - The answer to life, the universe, and everything',
trade_name="Don't panic",
name='Do not panic, sir',
type='BOOK',
status='OK',
situation='EXISTS',
situation_reason='Douglas Adams wrote it',
situation_date=date(2016, 9, 25),
special_situation='WE LOVE IT',
special_situation_date=date(1997, 9, 28),
responsible_federative_entity='Vogons',
address='Earth',
number='',
additional_address_details='',
neighborhood='',
zip_code='',
city='',
state='',
email='',
phone='',
last_updated=datetime.now(),
)
| [
"cuducos@gmail.com"
] | cuducos@gmail.com |
0ca467d38b50cb1b3fea439d181379282e495201 | e2cf46746537799f2584fa9bc3307c95f11768e3 | /flashsale/jimay/models/order.py | faa88c71549e4202226cdc4bc94d4af4f50fc0ef | [] | no_license | wahello/xiaolusys | 3c7801543d352a7a1b1825481982cea635ebcdd4 | 7296b9b68167001c91f4b07c1f8d441cc5653578 | refs/heads/master | 2020-03-30T03:39:26.485590 | 2018-08-21T06:23:05 | 2018-08-21T06:23:05 | 150,700,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | # coding: utf8
from __future__ import absolute_import, unicode_literals
import datetime
import hashlib
from django.db import models, transaction
from django.dispatch import receiver
from django.utils.functional import cached_property
from signals.jimay import signal_jimay_agent_order_ensure, signal_jimay_agent_order_paid
from core.utils.unikey import uniqid
import logging
logger = logging.getLogger(__name__)
def gen_uuid_order_no():
return uniqid('%s%s' % (JimayAgentOrder.PREFIX_CODE, datetime.date.today().strftime('%y%m%d')))
class JimayAgentOrder(models.Model):
PREFIX_CODE = 'ad'
ST_CREATE = 0
ST_ENSURE = 1
ST_PAID = 2
ST_SEND = 3
ST_COMPLETED = 4
ST_CANCEL = 5
ST_CHOICES = (
(ST_CREATE, '已提交申请'),
(ST_ENSURE, '已确认订金'),
(ST_PAID, '已确认付款'),
(ST_SEND, '已打包出库'),
(ST_COMPLETED, '已签收完成'),
(ST_CANCEL, '已取消订货'),
)
UNSURE = 0
WEIXIN = 1
ALIPAY = 2
BANK = 3
CHANNEL_CHOICES = (
(UNSURE, '未知渠道'),
(WEIXIN, '个人微信'),
(ALIPAY, '个人支付宝'),
(BANK, '银行转账'),
)
buyer = models.ForeignKey('pay.Customer', verbose_name='原始用户')
order_no = models.CharField(max_length=24, default=gen_uuid_order_no, unique=True, verbose_name='订单编号')
title = models.CharField(max_length=64, blank=True, verbose_name='商品名称')
pic_path = models.CharField(max_length=256, blank=True, verbose_name='商品图片')
model_id = models.IntegerField(default=0, verbose_name='款式ID')
sku_id = models.IntegerField(default=0, verbose_name='SKUID')
num = models.IntegerField(default=0, verbose_name='数量')
total_fee = models.IntegerField(default=0, verbose_name='商品总价(分)', help_text='商品零售价')
payment = models.IntegerField(default=0, verbose_name='支付金额(分)', help_text='现默认由运营人员填写')
address = models.ForeignKey('pay.UserAddress', related_name='jimay_agent_manager', verbose_name='用户地址')
status = models.IntegerField(default=ST_CREATE, db_index=True, choices=ST_CHOICES, verbose_name='状态')
ensure_time = models.DateTimeField(blank=True, null=True, verbose_name='审核时间')
pay_time = models.DateTimeField(blank=True, null=True, verbose_name='付款时间')
channel = models.IntegerField(choices=CHANNEL_CHOICES, default=UNSURE, db_index=True ,verbose_name='支付渠道')
logistic = models.ForeignKey('logistics.LogisticsCompany', null=True, blank=True, verbose_name='物流公司')
logistic_no = models.CharField(max_length=32, blank=True, verbose_name='物流单号')
send_time = models.DateTimeField(blank=True, null=True, verbose_name='发货时间')
manager = models.ForeignKey('auth.user', blank=True, null=True, verbose_name='管理员')
sys_memo = models.CharField(max_length=512, blank=True, verbose_name='系统备注')
created = models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建日期')
modified = models.DateTimeField(auto_now=True, verbose_name='修改日期')
class Meta:
db_table = 'jimay_agentorder'
app_label = 'jimay'
verbose_name = '己美医学/订货记录'
verbose_name_plural = '己美医学/订货记录'
def __unicode__(self):
return '%s,%s' % (self.id, self.buyer)
@classmethod
def gen_unique_order_no(cls):
return gen_uuid_order_no()
@classmethod
def is_createable(cls, buyer):
return not cls.objects.filter(buyer=buyer,status=JimayAgentOrder.ST_CREATE).exists()
def save(self, *args, **kwargs):
if self.status == JimayAgentOrder.ST_ENSURE and not self.pay_time:
self.action_ensure(self.ensure_time or datetime.datetime.now())
if self.status in (JimayAgentOrder.ST_PAID, JimayAgentOrder.ST_SEND) and self.pay_time:
self.action_paid(self.pay_time or datetime.datetime.now())
resp = super(JimayAgentOrder, self).save(*args, **kwargs)
return resp
def is_cancelable(self):
return self.status == JimayAgentOrder.ST_CREATE
def set_status_canceled(self):
self.status = JimayAgentOrder.ST_CANCEL
def action_ensure(self, time_ensure):
""" 订单审核通过 """
transaction.on_commit(lambda: signal_jimay_agent_order_ensure.send_robust(
sender=JimayAgentOrder,
obj=self,
time_ensure=time_ensure
))
def action_paid(self, time_paid):
""" 订单支付通知 """
transaction.on_commit(lambda: signal_jimay_agent_order_paid.send_robust(
sender=JimayAgentOrder,
obj=self,
time_paid=time_paid
))
@receiver(signal_jimay_agent_order_ensure, sender=JimayAgentOrder)
def jimay_order_ensure_weixin_paynotify(sender, obj, time_ensure, **kwargs):
try:
from shopapp.weixin.models import WeiXinAccount
from ..tasks import task_weixin_asynchronous_send_payqrcode
from django.conf import settings
wx_account = WeiXinAccount.objects.get(app_id=settings.WX_JIMAY_APPID)
task_weixin_asynchronous_send_payqrcode.delay(
wx_account.account_id, obj.buyer.id,
'wxpub',
('您的订货单已审核通过, 需支付金额:¥%s元, 请长按识别二维码转账, '
+'转账时请备注: %s_的订货号_%s .(如果需要支付宝付款, 请点击菜单[己美医学]/[支付宝付款码])'
) % (obj.payment * 0.01, obj.buyer.mobile, obj.id)
)
except Exception, exc:
logger.error(str(exc), exc_info=True)
@receiver(signal_jimay_agent_order_paid, sender=JimayAgentOrder)
def jimay_order_paid_update_stat(sender, obj, time_paid, **kwargs):
try:
from .stat import JimayAgentStat
from .agent import JimayAgent
agent = JimayAgent.objects.filter(mobile=obj.buyer.mobile).first()
JimayAgentStat.calc_salenum_and_sales_by_agent(agent)
except Exception, exc:
logger.error(str(exc), exc_info=True)
| [
"xiuqing.mei@xiaolu.so"
] | xiuqing.mei@xiaolu.so |
c9b6c3a1c4baa68286a7c7502f3b5d1a66d14d49 | 94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af | /1109.py | ba76a0936d7d3434232ab867509244588d344dbe | [] | no_license | huosan0123/leetcode-py | f1ec8226bae732369d4e1989b99ab0ba4b4061c4 | 22794e5e80f534c41ff81eb40072acaa1346a75c | refs/heads/master | 2021-01-25T11:48:17.365118 | 2019-09-12T15:45:34 | 2019-09-12T15:45:34 | 93,934,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | class Solution(object):
def corpFlightBookings(self, bookings, n):
"""
:type bookings: List[List[int]]
:type n: int
:rtype: List[int]
"""
if not bookings:
return []
ans = [0] * n
for b in bookings:
ans[b[0]-1] += b[2]
if b[1] < n:
ans[b[1]] -= b[2]
for i in range(1, n):
ans[i] += ans[i-1]
return ans
| [
"noreply@github.com"
] | huosan0123.noreply@github.com |
2f47ca3bbbcb658c413b278edb19c876e0151737 | d5be2d0dadbe7c89642eadae595b6fb739ba1f63 | /Some_python/Genome_Sequencing_Bioinformatics_II-master/18.LinearSpectrum.py | 29c66e8db5505ea9a5774efd8b3e5647cca2b6db | [] | no_license | TomaszSzyborski/Bioinformatics_Specialization | af738599713cebef9d9fdb0265ec473a125df0d1 | 2ceb9d8595904da7dd5f718e82b786e3993957a8 | refs/heads/master | 2020-12-02T19:47:54.635971 | 2017-07-16T20:31:40 | 2017-07-16T20:31:40 | 96,391,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | '''
LinearSpectrum(Peptide, AminoAcid, AminoAcidMass)
PrefixMass(0) ? 0
for i ? 1 to |Peptide|
for j ? 1 to 20
if AminoAcid(j) = i-th amino acid in Peptide
PrefixMass(i) ? PrefixMass(i ? 1) + AminoAcidMass(j)
LinearSpectrum ? a list consisting of the single integer 0
for i ? 0 to |Peptide| ? 1
for j ? i + 1 to |Peptide|
add PrefixMass(j) ? PrefixMass(i) to LinearSpectrum
return sorted list LinearSpectrum
CODE CHALLENGE: Implement LinearSpectrum.
Input: An amino acid string Peptide.
Output: The linear spectrum of Peptide.
Sample Input:
NQEL
Sample Output:
0 113 114 128 129 242 242 257 370 371 484
https://github.com/AnnaUfliand/Bioinformatics/blob/1a38fc077eaef5cf176fecf97153ad7f78f3deab/HW5/TheoreticalSpectrumOfLinearPeptide.py
'''
masses = {'G': 57, 'A': 71, 'S': 87, 'P': 97, 'V': 99, 'T': 101, 'C': 103, 'I': 113, 'L': 113, 'N': 114, 'D': 115,
'K': 128, 'Q': 128, 'E': 129, 'M': 131, 'H': 137, 'F': 147, 'R': 156, 'Y': 163, 'W': 186}
def linearSpectrum(peptide):
prefixMass = [0]
for i in range(0, len(peptide) - 1):
prefixMass.append(prefixMass[i] + masses[peptide[i]])
prefixMass.append(prefixMass[-1] + masses[peptide[-1]])
spectrum = [0]
for i in range(len(peptide) + 1):
for j in range(i + 1, len(peptide) + 1):
spectrum.append(prefixMass[j] - prefixMass[i])
return sorted(spectrum)
peptide = 'VAQ'
print (*linearSpectrum(peptide), sep = ' ') | [
"tomasz.szyborski@gmail.com"
] | tomasz.szyborski@gmail.com |
6c9052cd8ec268fdb89ca4c446426047f5b2e64b | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_express_route_ports_locations_operations.py | 2cc07c596adf32b9e88c46b7057b896344f9f125 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 7,901 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations:
"""ExpressRoutePortsLocationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRoutePortsLocationListResult"]:
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
async def get(
self,
location_name: str,
**kwargs: Any
) -> "_models.ExpressRoutePortsLocation":
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
62fcf1cc3e7abf245e0eed7686b48be4857dcc31 | 1194cd5a8b92a4ff2b0480d31c7fd24a7ab46fe9 | /usr/lib/enigma2/python/Plugins/Extensions/TuneinRadio/addons/__init__.py | 7cb0114e35a01a88decc5d1328aa6b4253ecea33 | [] | no_license | linuxbox10/enigma2-plugin-extensions-tuneinradio | 8aaa41ee607758503f40d2ec9a85bdd6b4c6f1e0 | fb8609a72208f556cc172f31cdf1aae33b013148 | refs/heads/master | 2020-12-02T10:14:57.513546 | 2017-07-09T19:11:52 | 2017-07-09T19:11:52 | 96,705,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | # Embedded file name: /usr/lib/enigma2/python/Plugins/Extensions/TuneinRadio/addons/__init__.py
pass | [
"jaysmith940@hotmail.co.uk"
] | jaysmith940@hotmail.co.uk |
085d88ce79b4c09829e7c422a1acd0f34b144625 | 7e9248f3b79b2ea6698d873189de0c3422997033 | /backend/tasker_business/models.py | cce19087ab6df3c7583760945bc91ed79d08b103 | [] | no_license | crowdbotics-apps/rpa-19128 | cf2c05d0e5e82b9989b8b9694bd48802655e5b3f | 88e35e7b6249fa167bb3dea3730141cb277a8e09 | refs/heads/master | 2022-11-20T03:08:53.659668 | 2020-07-23T18:18:07 | 2020-07-23T18:18:07 | 282,024,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | from django.conf import settings
from django.db import models
class Timeslot(models.Model):
"Generated Model"
date = models.DateField()
start_time = models.TimeField()
end_time = models.TimeField()
class TaskerAvailability(models.Model):
"Generated Model"
tasker = models.OneToOneField(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskeravailability_tasker",
)
timeslots = models.ManyToManyField(
"tasker_business.Timeslot", related_name="taskeravailability_timeslots",
)
class BusinessPhoto(models.Model):
"Generated Model"
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="businessphoto_tasker",
)
photo = models.URLField()
description = models.TextField()
class TaskerSkill(models.Model):
"Generated Model"
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskerskill_tasker",
)
name = models.CharField(max_length=255,)
rate = models.FloatField()
description = models.TextField()
category = models.ForeignKey(
"task_category.Category",
on_delete=models.CASCADE,
related_name="taskerskill_category",
)
subcategory = models.ForeignKey(
"task_category.Subcategory",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="taskerskill_subcategory",
)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
024836a46dc4863b62f2a6f7b309595a72918541 | c78a3ff497590a813ed5b6f283afea4515b6fb49 | /aliyun-python-sdk-ocs/setup.py | 19b7b16ca47b919381a58083f81a360d3eae0dde | [
"Apache-2.0"
] | permissive | StarsHu/aliyun-openapi-python-sdk | 2a4af194882cd68f700f42413655616d1a78117b | 00df8731169b968f6f7837c89a00bcea441745e0 | refs/heads/master | 2021-07-02T23:28:20.603399 | 2017-09-25T08:59:38 | 2017-09-25T08:59:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | #!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from setuptools import setup, find_packages
import os
"""
setup module for ocs.
Created on 7/3/2015
@author: alex
"""
PACKAGE = "aliyunsdkocs"
NAME = "aliyun-python-sdk-ocs"
DESCRIPTION = "The ocs module of Aliyun Python sdk."
AUTHOR = "Aliyun"
AUTHOR_EMAIL = "aliyun-developers-efficiency@list.alibaba-inc.com"
URL = "http://develop.aliyun.com/sdk/python"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
desc_file = open("README.rst")
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache",
url=URL,
keywords=["aliyun","sdk","ocs"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=["aliyun-python-sdk-core>=2.0.2"],
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development",
)
) | [
"lijie.ma@alibaba-inc.com"
] | lijie.ma@alibaba-inc.com |
1f396a456bd9a3f856c99417b47ad3c1238279fc | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/usersactions_beta/azext_usersactions_beta/vendored_sdks/usersactions/aio/operations_async/_user_onenote_notebook_section_page_parent_section_operations_async.py | 864979970170cae0e0a0df45aa5d8662690d9ec6 | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,575 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UserOnenoteNotebookSectionPageParentSectionOperations:
"""UserOnenoteNotebookSectionPageParentSectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def copy_to_notebook(
self,
user_id: str,
notebook_id: str,
onenote_section_id: str,
onenote_page_id: str,
id: Optional[str] = None,
group_id: Optional[str] = None,
rename_as: Optional[str] = None,
site_collection_id: Optional[str] = None,
site_id: Optional[str] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteOperation":
"""Invoke action copyToNotebook.
Invoke action copyToNotebook.
:param user_id: key: id of user.
:type user_id: str
:param notebook_id: key: id of notebook.
:type notebook_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param id:
:type id: str
:param group_id:
:type group_id: str
:param rename_as:
:type rename_as: str
:param site_collection_id:
:type site_collection_id: str
:param site_id:
:type site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.Paths1Wtwkl0UsersUserIdOnenoteNotebooksNotebookIdSectionsOnenotesectionIdPagesOnenotepageIdParentsectionMicrosoftGraphCopytonotebookPostRequestbodyContentApplicationJsonSchema(id=id, group_id=group_id, rename_as=rename_as, site_collection_id=site_collection_id, site_id=site_id)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_to_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'notebook-id': self._serialize.url("notebook_id", notebook_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'Paths1Wtwkl0UsersUserIdOnenoteNotebooksNotebookIdSectionsOnenotesectionIdPagesOnenotepageIdParentsectionMicrosoftGraphCopytonotebookPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_to_notebook.metadata = {'url': '/users/{user-id}/onenote/notebooks/{notebook-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/parentSection/microsoft.graph.copyToNotebook'} # type: ignore
async def copy_to_section_group(
self,
user_id: str,
notebook_id: str,
onenote_section_id: str,
onenote_page_id: str,
id: Optional[str] = None,
group_id: Optional[str] = None,
rename_as: Optional[str] = None,
site_collection_id: Optional[str] = None,
site_id: Optional[str] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteOperation":
"""Invoke action copyToSectionGroup.
Invoke action copyToSectionGroup.
:param user_id: key: id of user.
:type user_id: str
:param notebook_id: key: id of notebook.
:type notebook_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param id:
:type id: str
:param group_id:
:type group_id: str
:param rename_as:
:type rename_as: str
:param site_collection_id:
:type site_collection_id: str
:param site_id:
:type site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.Paths1X4EkfuUsersUserIdOnenoteNotebooksNotebookIdSectionsOnenotesectionIdPagesOnenotepageIdParentsectionMicrosoftGraphCopytosectiongroupPostRequestbodyContentApplicationJsonSchema(id=id, group_id=group_id, rename_as=rename_as, site_collection_id=site_collection_id, site_id=site_id)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_to_section_group.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'notebook-id': self._serialize.url("notebook_id", notebook_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'Paths1X4EkfuUsersUserIdOnenoteNotebooksNotebookIdSectionsOnenotesectionIdPagesOnenotepageIdParentsectionMicrosoftGraphCopytosectiongroupPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_to_section_group.metadata = {'url': '/users/{user-id}/onenote/notebooks/{notebook-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/parentSection/microsoft.graph.copyToSectionGroup'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
b2873364981f6c89eed840b8be52ebb7fb2cd891 | 81f2d4aa3bfb216e04efec81c7f614603a8fd384 | /irekua/rest/utils/permissions.py | a579360cb99113d2a5632cf58fca0969b2adc678 | [] | no_license | CONABIO-audio/irekua | 44564020c342e8bd49a14707f206962869bc026d | 4531a6dbb8b0a0014567930a134bc4399c2c00d4 | refs/heads/master | 2022-12-10T09:43:05.866848 | 2019-10-17T16:18:21 | 2019-10-17T16:18:21 | 170,434,169 | 0 | 1 | null | 2022-12-08T01:44:04 | 2019-02-13T03:32:55 | Python | UTF-8 | Python | false | false | 1,680 | py | from rest_framework.permissions import BasePermission
from rest.permissions import IsAdmin
from rest.permissions import ReadOnly
class PermissionMapping(object):
DEFAULT_PERMISSION = IsAdmin | ReadOnly
def __init__(self, mapping=None, default=None):
if mapping is None:
mapping = {}
assert isinstance(mapping, dict)
self.permission_mapping = mapping
if default is None:
default = PermissionMapping.DEFAULT_PERMISSION
if not isinstance(default, (tuple, list)):
default = [default]
self.default_permission = default
def get_permissions(self, action):
try:
permissions = self.permission_mapping[action]
if not isinstance(permissions, (list, tuple)):
return [permissions]
return permissions
except KeyError:
return self.default_permission
def extend(self, additional_actions=None, **kwargs):
if additional_actions is None:
additional_actions = {}
extended_mapping = self.permission_mapping.copy()
extended_mapping.update(additional_actions)
for key in kwargs:
extended_mapping[key] = kwargs[key]
return PermissionMapping(extended_mapping)
class PermissionMappingMixin(object):
@property
def permission_mapping(self):
print(self.__name__)
raise NotImplementedError
def get_permissions(self):
if self.action is None:
return []
permission_classes = self.permission_mapping.get_permissions(self.action)
return [permission() for permission in permission_classes]
| [
"santiago.mbal@gmail.com"
] | santiago.mbal@gmail.com |
a900434d73f3b5d48843ddb0b816fc9d3d54df50 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/OpenPhish/Integrations/OpenPhish_v2/test_data/api_raw.py | 734dfd9cc498a4640f5a22d562a7bb1755d31082 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 788 | py | RAW_DATA = 'https://cnannord.com/paypal/firebasecloud/83792/htmjrtfgdsaopjdnbhhdmmdgrhehnndnmmmbvvbnmn' \
'dmnbnnbbmnm/service/paypal\nhttp://payameghdir.ir/cxxc/Owa/\nhttps://fxsearchdesk.net/Client' \
'/tang/step4.html\nhttps://fxsearchdesk.net/Client/tang/step3.html\nhttps://fxsearchdesk.net/' \
'Client/tang/step2.html\nhttp://fxsearchdesk.net/Client/tang/step2.html\n' \
'http://fxsearchdesk.net/Client/tang/step3.html\nhttp://fxsearchdesk.net/Client/tang/step4.html\n' \
'https://fxsearchdesk.net/Client/tang\nhttp://fxsearchdesk.net/Client/tang/\n' \
'http://fxsearchdesk.net/Client/tang\nhttp://revisepayee.com/admin\n' \
'http://hmrc.resolutionfix.com/\nhttps://hmrc.resolutionfix.com/refund/details'
| [
"noreply@github.com"
] | demisto.noreply@github.com |
718fe8cfe0779e581eb25d464850b5df0c04d846 | f9f54c110fa422408e95deb077bbe594f8aec960 | /epikjjh/sort/sort.py | 63e3e33e2febd51074e1f7c02c8be30ff3a875ba | [
"MIT"
] | permissive | 15ers/Solve_Naively | 39f9dc0e96aef7d957dde33cd1353dd7671aeb9c | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | refs/heads/master | 2021-07-07T23:56:10.231601 | 2020-09-10T08:55:44 | 2020-09-10T08:55:44 | 184,999,228 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | def selection_sort(arr: list):
for i in range(len(arr)-1, 0, -1):
max_index = i
for j in range(i):
if max_elem < arr[j]:
max_index = j
arr[i], arr[max_index] = arr[max_index], arr[i]
def insertion_sort(arr: list):
for i in range(1, len(arr)):
for j in range(i, 0, -1):
if arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
def bubble_sort(arr: list):
for i in range(len(arr)-1, 0, -1):
for j in range(i):
if arr[j+1] < arr[j]:
arr[j], arr[j+1] = arr[j+1], arr[j]
def merge(arr: list, left: int, mid: int, right: int):
tmp = []
left_index = left
right_index = mid+1
while left_index <= mid and right_index <= right:
if arr[left_index] < arr[right_index]:
tmp.append(arr[left_index])
left_index += 1
else:
tmp.append(arr[right_index])
right_index += 1
if left_index > mid:
for idx in range(right_index, right+1):
tmp.append(arr[idx])
else:
for idx in range(left_index, mid+1):
tmp.append(arr[idx])
arr[left:right+1] = tmp[:]
def merge_sort(arr: list, left: int, right: int):
if left < right:
mid = (left+right) // 2
merge_sort(arr, left, mid)
merge_sort(arr, mid+1, right)
merge(arr, left, mid, right)
def quick_sort_outplace(arr: list, left: int, right: int):
if left < right:
pivot = arr[(left+right) // 2]
tmp = arr[left:right+1]
tmp_left = [elem for elem in tmp if elem < pivot]
tmp_equal = [elem for elem in tmp if elem == pivot]
tmp_right = [elem for elem in tmp if elem > pivot]
arr[left:right+1] = tmp_left + tmp_equal + tmp_right
quick_sort_outplace(arr, left, left+len(tmp_left)-1)
quick_sort_outplace(arr, right-len(tmp_right)+1, right)
def quick_sort_inplace(arr: list, left: int, right: int):
if left < right:
low = left
high = right
pivot = arr[(low+high)//2]
while low <= high:
while arr[low] < pivot:
low += 1
while arr[high] > pivot:
high -= 1
if low <= high:
arr[low], arr[high] = arr[high], arr[low]
low, high = low + 1, high - 1
quick_sort_inplace(arr, left, low-1)
quick_sort_inplace(arr, low, right)
def heapify(tree: list, idx: int, length: int):
max_idx = idx
left_idx = 2*idx + 1
right_idx = 2*idx + 2
if left_idx < length and tree[left_idx] > tree[max_idx]:
max_idx = left_idx
if right_idx < length and tree[right_idx] > tree[max_idx]:
max_idx = right_idx
if max_idx != idx:
tree[idx], tree[max_idx] = tree[max_idx], tree[idx]
heapify(tree, max_idx, length)
def heap_sort(arr: list):
n = len(arr)
for idx in range((n//2)-1, -1, -1):
heapify(arr, idx, n)
for i in range(n-1, 0, -1):
arr[0], arr[i] = arr[i], arr[0]
heapify(arr, 0, i) | [
"epikjjh@gmail.com"
] | epikjjh@gmail.com |
6f78f852e6aca7f1c649ae0695b2151e991cabc2 | 2bacd64bd2679bbcc19379947a7285e7ecba35c6 | /1-notebook-examples/keras-udemy-course/cnn_class2/use_pretrained_weights_resnet.py | 48c6bc23ebedeb15802fec8078bb5d46dd73b5ff | [
"MIT"
] | permissive | vicb1/deep-learning | cc6b6d50ae5083c89f22512663d06b777ff8d881 | 23d6ef672ef0b3d13cea6a99984bbc299d620a73 | refs/heads/master | 2022-12-12T15:56:55.565836 | 2020-03-06T01:55:55 | 2020-03-06T01:55:55 | 230,293,726 | 0 | 0 | MIT | 2022-12-08T05:27:43 | 2019-12-26T16:23:18 | Jupyter Notebook | UTF-8 | Python | false | false | 4,849 | py | # https://deeplearningcourses.com/c/advanced-computer-vision
# https://www.udemy.com/advanced-computer-vision
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications.resnet50 import ResNet50, preprocess_input
# from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
# re-size all the images to this
IMAGE_SIZE = [224, 224] # feel free to change depending on dataset
# training config:
epochs = 16
batch_size = 32
# https://www.kaggle.com/paultimothymooney/blood-cells
train_path = '../large_files/blood_cell_images/TRAIN'
valid_path = '../large_files/blood_cell_images/TEST'
# https://www.kaggle.com/moltean/fruits
# train_path = '../large_files/fruits-360/Training'
# valid_path = '../large_files/fruits-360/Validation'
# train_path = '../large_files/fruits-360-small/Training'
# valid_path = '../large_files/fruits-360-small/Validation'
# useful for getting number of files
image_files = glob(train_path + '/*/*.jp*g')
valid_image_files = glob(valid_path + '/*/*.jp*g')
# useful for getting number of classes
folders = glob(train_path + '/*')
# look at an image for fun
plt.imshow(image.load_img(np.random.choice(image_files)))
plt.show()
# add preprocessing layer to the front of VGG
res = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in res.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(res.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(len(folders), activation='softmax')(x)
# create a model object
model = Model(inputs=res.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
# create an instance of ImageDataGenerator
gen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=preprocess_input
)
# test generator to see how it works and some other useful things
# get label mapping for confusion matrix plot later
test_gen = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE)
print(test_gen.class_indices)
labels = [None] * len(test_gen.class_indices)
for k, v in test_gen.class_indices.items():
labels[v] = k
# should be a strangely colored image (due to VGG weights being BGR)
for x, y in test_gen:
print("min:", x[0].min(), "max:", x[0].max())
plt.title(labels[np.argmax(y[0])])
plt.imshow(x[0])
plt.show()
break
# create generators
train_generator = gen.flow_from_directory(
train_path,
target_size=IMAGE_SIZE,
shuffle=True,
batch_size=batch_size,
)
valid_generator = gen.flow_from_directory(
valid_path,
target_size=IMAGE_SIZE,
shuffle=True,
batch_size=batch_size,
)
# fit the model
r = model.fit_generator(
train_generator,
validation_data=valid_generator,
epochs=epochs,
steps_per_epoch=len(image_files) // batch_size,
validation_steps=len(valid_image_files) // batch_size,
)
def get_confusion_matrix(data_path, N):
# we need to see the data in the same order
# for both predictions and targets
print("Generating confusion matrix", N)
predictions = []
targets = []
i = 0
for x, y in gen.flow_from_directory(data_path, target_size=IMAGE_SIZE, shuffle=False, batch_size=batch_size * 2):
i += 1
if i % 50 == 0:
print(i)
p = model.predict(x)
p = np.argmax(p, axis=1)
y = np.argmax(y, axis=1)
predictions = np.concatenate((predictions, p))
targets = np.concatenate((targets, y))
if len(targets) >= N:
break
cm = confusion_matrix(targets, predictions)
return cm
cm = get_confusion_matrix(train_path, len(image_files))
print(cm)
valid_cm = get_confusion_matrix(valid_path, len(valid_image_files))
print(valid_cm)
# plot some data
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
# accuracies
plt.plot(r.history['acc'], label='train acc')
plt.plot(r.history['val_acc'], label='val acc')
plt.legend()
plt.show()
from util import plot_confusion_matrix
plot_confusion_matrix(cm, labels, title='Train confusion matrix')
plot_confusion_matrix(valid_cm, labels, title='Validation confusion matrix') | [
"vbajenaru@gmail.com"
] | vbajenaru@gmail.com |
b46ff85b0961b21961ea9d2a07ee248a8ad4b92e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_portico.py | 71356c0776ec5f684070f1cc1c5508df8c828aeb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py |
#calss header
class _PORTICO():
def __init__(self,):
self.name = "PORTICO"
self.definitions = [u'a covered entrance to a building, usually a large and impressive building, that is supported by columns']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
23228fca858ae9f4969e14f3cfd351321ad5a08f | bc2ea53e1dbbda6818efae0e30c93498562f850a | /setup.py | 439d3a7315cf802b47906b6e1a22207c6dad985a | [
"BSD-2-Clause"
] | permissive | zmedico/gemato | fa47db3320824fca5ff5e1dfdd04391ff9c081e6 | 2a3c4354ba3c3515a86b81782ab30a34a14faea2 | refs/heads/master | 2021-10-23T13:16:10.822184 | 2019-03-16T07:51:59 | 2019-03-16T07:51:59 | 112,166,687 | 1 | 1 | null | 2017-11-27T08:09:08 | 2017-11-27T08:09:08 | null | UTF-8 | Python | false | false | 1,187 | py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# (C) 2017-2018 Michał Górny <mgorny@gentoo.org>
# Licensed under the terms of 2-clause BSD license
from setuptools import setup
setup(
name='gemato',
version='14.0',
description='Gentoo Manifest Tool -- a stand-alone utility to verify and update Gentoo Manifest files',
author='Michał Górny',
author_email='mgorny@gentoo.org',
license='BSD',
url='http://github.com/mgorny/gemato',
extras_require={
'blake2': ['pyblake2;python_version<"3.6"'],
'bz2': ['bz2file;python_version<"3.0"'],
'lzma': ['backports.lzma;python_version<"3.0"'],
'sha3': ['pysha3;python_version<"3.6"'],
},
packages=['gemato'],
entry_points={
'console_scripts': [
'gemato=gemato.cli:setuptools_main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Security :: Cryptography',
]
)
| [
"mgorny@gentoo.org"
] | mgorny@gentoo.org |
1a01c08c797dcb23bf43fe33e045c0e7ca633adf | dd59a809da984f59315110aa019eabfbbf1a547d | /submissions/AlexMiller/AlexMiller-stitch_and_average.py | 38ff80363396aa0910870fb9a96217aaaa6f5d93 | [] | no_license | tranhoangkhuongvn/dc-michelin-challenge | f679b55a0d595c56cbc3c82e8673fb1c49bdeb44 | a34e8183f0c04314ee433852d3567c6b88a3aee6 | refs/heads/master | 2020-04-18T10:05:50.661201 | 2016-10-15T20:01:49 | 2016-10-15T20:01:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import pandas as pd
import pdb
import random
prefix = "/media/alex/HD/"
# prefix = "D:/"
prefix2 = "/media/alex/SSD/"
# prefix2 = "C:/"
#Read in the original DC dataset
df = pd.read_csv(prefix+"Documents/Data/Yelp/dc.csv",header=0,encoding="latin1")
#Keep only restaurant name, review date, price rating, average score, review count
df = df[["req.restaurant","date","price","avg.score","review.count"]]
#Read in the prediction vectors
pred = pd.read_csv(prefix+"git/word2vec.torch/prediction_vectors.csv",header=None)
pred.columns = ["star0","star1","star2","star3"]
#We never scrambled them, so we can keep them in the same order
dc = df.join(pred)
dc_means = dc.groupby('req.restaurant').mean()
dc_means['max'] = dc_means[['star0','star1','star2','star3']].idxmax(axis=1)
# Write csv
dc_means.to_csv(prefix2+"git/dc-michelin-challenge/submissions/AlexMiller/dc_predictions.csv")
| [
"alex.k.miller@gmail.com"
] | alex.k.miller@gmail.com |
e0b4271157ee723d721cd2ef40b941481e4d51da | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-Quartz/Examples/Programming with Quartz/BasicDrawing/MyView.py | d13dea356b286df29b777944be4bb3413e728402 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 4,743 | py | import AppDrawing
import Cocoa
import Quartz
import FrameworkTextDrawing
import FrameworkUtilities
import objc
import UIHandling
import PDFHandling
from objc import super
# XXX: Why are these global?
_drawingCommand = UIHandling.kHICommandSimpleRect
_pdfDocument = None
class MyView(Cocoa.NSView):
currentMenuItem = objc.IBOutlet()
def initWithFrame_(self, frameRect):
self = super().initWithFrame_(frameRect)
if self is None:
return None
global _pdfDocument
_pdfDocument = None
return self
if False:
def isFlipped(self):
return True
def drawRect_(self, rect):
context = Cocoa.NSGraphicsContext.currentContext().graphicsPort()
if _pdfDocument is None:
if _drawingCommand in (
UIHandling.kHICommandDrawNSString,
UIHandling.kHICommandDrawNSLayoutMgr,
UIHandling.kHICommandDrawCustomNSLayoutMgr,
):
if _drawingCommand == UIHandling.kHICommandDrawNSString:
FrameworkTextDrawing.drawNSStringWithAttributes()
elif _drawingCommand == UIHandling.kHICommandDrawNSLayoutMgr:
FrameworkTextDrawing.drawWithNSLayout()
else:
FrameworkTextDrawing.drawWithCustomNSLayout()
else:
AppDrawing.DispatchDrawing(context, _drawingCommand)
else:
mediaRect = Quartz.CGPDFDocumentGetMediaBox(_pdfDocument, 1)
mediaRect.origin.x = mediaRect.origin.y = 0
Quartz.CGContextDrawPDFDocument(context, mediaRect, _pdfDocument, 1)
@objc.IBAction
def setDrawCommand_(self, sender):
global _drawingCommand, _pdfDocument
newCommand = sender.tag()
if _drawingCommand != newCommand:
_drawingCommand = newCommand
# The view needs to be redisplayed since there is a new drawing command.
self.setNeedsDisplay_(True)
# Disable previous menu item.
if self.currentMenuItem is not None:
self.currentMenuItem.setState_(Cocoa.NSOffState)
# Update the current item.
self.currentMenuItem = sender
# Enable new menu item.
self.currentMenuItem.setState_(Cocoa.NSOnState)
# If we were showing a pasted document, let's get rid of it.
if _pdfDocument:
_pdfDocument = None
def currentPrintableCommand(self):
# The best representation for printing or exporting
# when the current command caches using a bitmap context
# or a layer is to not do any caching.
if _drawingCommand in (
UIHandling.kHICommandDrawOffScreenImage,
UIHandling.kHICommandDrawWithLayer,
):
return UIHandling.kHICommandDrawNoOffScreenImage
return _drawingCommand
def print_(self, sender):
global _drawingCommand
savedDrawingCommand = _drawingCommand
# Set the drawing command to be one that is printable.
_drawingCommand = self.currentPrintableCommand()
# Do the printing operation on the view.
Cocoa.NSPrintOperation.printOperationWithView_(self).runOperation()
# Restore that before the printing operation.
_drawingCommand = savedDrawingCommand
def acceptsFirstResponder(self):
return True
@objc.IBAction
def copy_(self, sender):
FrameworkUtilities.addPDFDataToPasteBoard(_drawingCommand)
@objc.IBAction
def paste_(self, sender):
global _pdfDocument
newPDFDocument = PDFHandling.createNewPDFRefFromPasteBoard()
if newPDFDocument is not None:
_pdfDocument = newPDFDocument
# The view needs to be redisplayed since there is
# a new PDF document.
self.setNeedsDisplay_(True)
# Return the number of pages available for printing. For this
# application it is always 1.
def knowsPageRange_(self, aRange):
return True, Cocoa.NSRange(1, 1)
# Return the drawing rectangle for a particular page number.
# For this application it is always the page width and height.
def rectForPage_(self, page):
pi = Cocoa.NSPrintOperation.currentOperation().printInfo()
# Calculate the page height in points.
paperSize = pi.paperSize()
return Cocoa.NSMakeRect(0, 0, paperSize.width, paperSize.height)
def validateMenuItem_(self, menuItem):
if menuItem.tag() == _drawingCommand:
self.currentMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
return True
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
f426826bbfe2daf0908d2f859ebab5c6b49d17d6 | 70cdf0741a22c678401a306229003bf036ffe5a6 | /ocbind/bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/__init__.py | 369c2b9a761dcce70880524e2b59b6a685ba2aed | [] | no_license | zsblevins/nanog81-hackathon | 5001e034339d6b0c6452ae2474f06916bcd715cf | 1b64fd207dd69837f947094fbd6d6c1cea3a1070 | refs/heads/main | 2023-03-03T09:39:28.460000 | 2021-02-15T13:41:38 | 2021-02-15T13:41:38 | 336,698,856 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,432 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-common - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-common-multiprotocol - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-common-structure - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-peer-group - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-neighbor - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
from . import routes
class loc_rib(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-global - based on the path /bgp/rib/afi-safis/afi-safi/ipv4-unicast/loc-rib. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Container for the IPv4 BGP LOC-RIB data
"""
__slots__ = ('_path_helper', '_extmethods', '__routes',)
_yang_name = 'loc-rib'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bgp', 'rib', 'afi-safis', 'afi-safi', 'ipv4-unicast', 'loc-rib']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
YANG Description: Enclosing container for list of routes in the routing
table.
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /bgp/rib/afi_safis/afi_safi/ipv4_unicast/loc_rib/routes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: Enclosing container for list of routes in the routing
table.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=routes.routes, is_container='container', yang_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=False)
routes = __builtin__.property(_get_routes)
_pyangbind_elements = OrderedDict([('routes', routes), ])
| [
"zblevins@netflix.com"
] | zblevins@netflix.com |
9a3ca5f038df2d91274264f5c5e0e84d04919a94 | 9745e5d8acae70bcdd7011cc1f81c65d3f5eed22 | /Interview Preparation Kit/Stacks and Queues /Min Max Riddle/solutions.py | fa525d9722e71608c301d368bff05d2bbd5b24a6 | [] | no_license | rinleit/hackerrank-solutions | 82d71b562d276ec846ab9a26b3e996c80172f51e | 519a714c5316892dce6bd056b14df5e222078109 | refs/heads/master | 2022-11-10T05:08:11.185284 | 2020-07-02T01:34:35 | 2020-07-02T01:34:35 | 254,403,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | #!/bin/python3
import os
import sys
# Complete the riddle function below.
def riddle(arr):
maxes=[float("-inf")]*len(arr)
for i in range(0,len(arr)):
# find biggest window for which i is a minimum
right=i
left=i
while right+1<len(arr) and arr[right+1]>=arr[i]:
right+=1
while left-1>=0 and arr[left-1]>=arr[i]:
left-=1
for j in range(right-left+1):
maxes[j]=max(maxes[j],arr[i])
return maxes
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [
"rinle.it@gmail.com"
] | rinle.it@gmail.com |
4b5e7e285b8e0c0129a0dff3c8dc00f64d5ef802 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/prefix_list_global_rulestack_get_maximum_set_gen.py | c8ef113d486bafe5a4bee4d324a8a7d46b6bcafb | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,652 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python prefix_list_global_rulestack_get_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.prefix_list_global_rulestack.get(
global_rulestack_name="praval",
name="armid1",
)
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_Get_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
217b70a3e8d3cb947f5ede6970b9ebb41ba81cc8 | 7778920b9f05fa25a76c4caf89c05838398a3f91 | /sciwing/metrics/token_cls_accuracy.py | f258efa2fed41e25349b896e41ee2c0131557373 | [
"MIT"
] | permissive | sean-dingxu/sciwing | 7c68101d579e031ecff20e1886911119b16861cf | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | refs/heads/master | 2021-01-26T11:13:55.625792 | 2020-05-17T18:28:41 | 2020-05-17T18:28:41 | 243,418,557 | 0 | 0 | MIT | 2020-03-08T13:56:09 | 2020-02-27T03:07:19 | null | UTF-8 | Python | false | false | 12,061 | py | from typing import Dict, Union, Any, List, Optional
from sciwing.metrics.BaseMetric import BaseMetric
import wasabi
from sciwing.utils.common import merge_dictionaries_with_sum
import numpy as np
import pandas as pd
from sciwing.metrics.classification_metrics_utils import ClassificationMetricsUtils
import torch
from sciwing.utils.class_nursery import ClassNursery
from sciwing.data.datasets_manager import DatasetsManager
from sciwing.data.line import Line
from sciwing.data.seq_label import SeqLabel
from collections import defaultdict
class TokenClassificationAccuracy(BaseMetric, ClassNursery):
def __init__(
self,
datasets_manager: DatasetsManager = None,
predicted_tags_namespace_prefix="predicted_tags",
):
super(TokenClassificationAccuracy, self).__init__(
datasets_manager=datasets_manager
)
self.datasets_manager = datasets_manager
self.label_namespaces = datasets_manager.label_namespaces
self.predicted_tags_namespace_prefix = predicted_tags_namespace_prefix
self.msg_printer = wasabi.Printer()
self.classification_metrics_utils = ClassificationMetricsUtils()
# a mapping between namespace and tp_counters for every class
self.tp_counter: Dict[str, Dict[str, Any]] = defaultdict(dict)
self.fp_counter: Dict[str, Dict[str, Any]] = defaultdict(dict)
self.fn_counter: Dict[str, Dict[str, Any]] = defaultdict(dict)
self.tn_counter: Dict[str, Dict[str, Any]] = defaultdict(dict)
def calc_metric(
self,
lines: List[Line],
labels: List[SeqLabel],
model_forward_dict: Dict[str, Any],
) -> None:
"""
Parameters
----------------
lines: List[Line]
The list of lines
labels: List[Label]
The list of sequence labels
model_forward_dict: Dict[str, Any]
The model_forward_dict should have predicted tags for every namespace
The predicted_tags are the best possible predicted tags for the batch
They are List[List[int]] where the size is ``[batch_size, time_steps]``
We expect that the predicted tags are
"""
# get true labels for all namespaces
namespace_to_true_labels = defaultdict(list)
namespace_to_true_labels_mask = defaultdict(list)
namespace_to_pred_labels_mask = defaultdict(list)
for namespace in self.label_namespaces:
# List[List[int]]
predicted_tags = model_forward_dict.get(
f"{self.predicted_tags_namespace_prefix}_{namespace}"
)
max_length = max([len(tags) for tags in predicted_tags]) # max num tokens
numericalizer = self.datasets_manager.namespace_to_numericalizer[namespace]
pred_tags_mask = numericalizer.get_mask_for_batch_instances(
instances=predicted_tags
).tolist()
namespace_to_pred_labels_mask[namespace] = pred_tags_mask
for label in labels:
true_labels = label.tokens[namespace]
true_labels = [tok.text for tok in true_labels]
true_labels = numericalizer.numericalize_instance(instance=true_labels)
true_labels = numericalizer.pad_instance(
numericalized_text=true_labels,
max_length=max_length,
add_start_end_token=False,
)
labels_mask = numericalizer.get_mask_for_instance(
instance=true_labels
).tolist()
namespace_to_true_labels[namespace].append(true_labels)
namespace_to_true_labels_mask[namespace].append(labels_mask)
for namespace in self.label_namespaces:
labels_ = namespace_to_true_labels[namespace]
labels_mask_ = namespace_to_true_labels_mask[namespace]
pred_labels_mask_ = namespace_to_pred_labels_mask[namespace]
# List[List[int]]
predicted_tags = model_forward_dict.get(
f"{self.predicted_tags_namespace_prefix}_{namespace}"
)
(
confusion_mtrx,
classes,
) = self.classification_metrics_utils.get_confusion_matrix_and_labels(
true_tag_indices=labels_,
predicted_tag_indices=predicted_tags,
true_masked_label_indices=labels_mask_,
pred_labels_mask=pred_labels_mask_,
)
tps = np.around(np.diag(confusion_mtrx), decimals=4)
fps = np.around(np.sum(confusion_mtrx, axis=0) - tps, decimals=4)
fns = np.around(np.sum(confusion_mtrx, axis=1) - tps, decimals=4)
tps = tps.tolist()
fps = fps.tolist()
fns = fns.tolist()
class_tps_mapping = dict(zip(classes, tps))
class_fps_mapping = dict(zip(classes, fps))
class_fns_mapping = dict(zip(classes, fns))
self.tp_counter[namespace] = merge_dictionaries_with_sum(
self.tp_counter.get(namespace, {}), class_tps_mapping
)
self.fp_counter[namespace] = merge_dictionaries_with_sum(
self.fp_counter.get(namespace, {}), class_fps_mapping
)
self.fn_counter[namespace] = merge_dictionaries_with_sum(
self.fn_counter.get(namespace, {}), class_fns_mapping
)
def get_metric(self) -> Dict[str, Union[Dict[str, float], float]]:
""" Returns different values being tracked to calculate Precision Recall FMeasure
Returns
-------
Dict[str, Any]
Returns a dictionary with following key value pairs for every namespace
precision: Dict[str, float]
The precision for different classes
recall: Dict[str, float]
The recall values for different classes
"fscore": Dict[str, float]
The fscore values for different classes,
num_tp: Dict[str, int]
The number of true positives for different classes,
num_fp: Dict[str, int]
The number of false positives for different classes,
num_fn: Dict[str, int]
The number of false negatives for different classes
"macro_precision": float
The macro precision value considering all different classes,
macro_recall: float
The macro recall value considering all different classes
macro_fscore: float
The macro fscore value considering all different classes
micro_precision: float
The micro precision value considering all different classes,
micro_recall: float
The micro recall value considering all different classes.
micro_fscore: float
The micro fscore value considering all different classes
"""
metrics = {}
for namespace in self.label_namespaces:
(
precision_dict,
recall_dict,
fscore_dict,
) = self.classification_metrics_utils.get_prf_from_counters(
tp_counter=self.tp_counter[namespace],
fp_counter=self.fp_counter[namespace],
fn_counter=self.fn_counter[namespace],
)
# macro scores
# for a detailed discussion on micro and macro scores please follow the discussion @
# https://datascience.stackexchange.com/questions/15989/micro-average-vs-macro-average-performance-in-a-multiclass-classification-settin
# micro scores
(
micro_precision,
micro_recall,
micro_fscore,
) = self.classification_metrics_utils.get_micro_prf_from_counters(
tp_counter=self.tp_counter[namespace],
fp_counter=self.fp_counter[namespace],
fn_counter=self.fn_counter[namespace],
)
# macro scores
(
macro_precision,
macro_recall,
macro_fscore,
) = self.classification_metrics_utils.get_macro_prf_from_prf_dicts(
precision_dict=precision_dict,
recall_dict=recall_dict,
fscore_dict=fscore_dict,
)
metrics[namespace] = {
"precision": precision_dict,
"recall": recall_dict,
"fscore": fscore_dict,
"num_tp": self.tp_counter[namespace],
"num_fp": self.fp_counter[namespace],
"num_fn": self.fn_counter[namespace],
"macro_precision": macro_precision,
"macro_recall": macro_recall,
"macro_fscore": macro_fscore,
"micro_precision": micro_precision,
"micro_recall": micro_recall,
"micro_fscore": micro_fscore,
}
return metrics
def report_metrics(self, report_type="wasabi") -> Any:
""" Reports metrics in a printable format
Parameters
----------
report_type : type
Select one of ``[wasabi, paper]``
If wasabi, then we return a printable table that represents the
precision recall and fmeasures for different classes
"""
reports = {}
for namespace in self.label_namespaces:
if report_type == "wasabi":
report = self.classification_metrics_utils.generate_table_report_from_counters(
tp_counter=self.tp_counter[namespace],
fp_counter=self.fp_counter[namespace],
fn_counter=self.fn_counter[namespace],
idx2labelname_mapping=self.datasets_manager.get_idx_label_mapping(
namespace
),
)
reports[namespace] = report
return reports
def reset(self):
self.tp_counter = {}
self.fp_counter = {}
self.fn_counter = {}
self.tn_counter = {}
def print_confusion_metrics(
self,
predicted_tag_indices: List[List[int]],
true_tag_indices: List[List[int]],
labels_mask: Optional[torch.ByteTensor] = None,
) -> None:
""" Prints confusion matrics for a batch of tag indices. It assumes that the batch
is padded and every instance is of similar length
Parameters
----------
predicted_tag_indices : List[List[int]]
Predicted tag indices for a batch of sentences
true_tag_indices : List[List[int]]
True tag indices for a batch of sentences
labels_mask : Optional[torch.ByteTensor]
The labels mask which has the same as ``true_tag_indices``.
0 in a position indicates that there is no masking
1 indicates that there is a masking
"""
if labels_mask is None:
labels_mask = torch.zeros_like(torch.Tensor(true_tag_indices)).type(
torch.bool
)
(
confusion_mtrx,
classes,
) = self.classification_metrics_utils.get_confusion_matrix_and_labels(
predicted_tag_indices=predicted_tag_indices,
true_tag_indices=true_tag_indices,
true_masked_label_indices=labels_mask,
)
classes_with_names = classes
confusion_mtrx = pd.DataFrame(confusion_mtrx)
confusion_mtrx.insert(0, "class_name", classes_with_names)
assert len(classes) == confusion_mtrx.shape[1] - 1
header = [f"{class_}" for class_ in classes]
header.insert(0, "pred(cols)/true(rows)")
table = self.msg_printer.table(
data=confusion_mtrx.values.tolist(), header=header, divider=True
)
print(table)
| [
"abhinav@comp.nus.edu.sg"
] | abhinav@comp.nus.edu.sg |
4948b993f20d3fd92127a96ca2711c9fa8bf6de4 | 7dbcbec8cfd75576d1be86270899b6642e0e9d70 | /testhtmlserv.py | b84d1716321ed64ef1a70b2c87ecb3637afdf4c0 | [] | no_license | huangnauh/learnpython | b72f43572cd42b962c11688c65437cca0b081f4f | e25f582a7811aa63de2f533736921529b600bcc8 | refs/heads/master | 2021-01-22T23:25:30.370990 | 2015-03-23T13:51:06 | 2015-03-23T13:51:06 | 23,954,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | #!E:/python34/python.exe
#coding= utf-8
from http.server import BaseHTTPRequestHandler #BaseHTTPRequestHandler类细分到处理每个协议的方法,这里是‘GET’方法的例子
import urllib.parse
class GetHandler(BaseHTTPRequestHandler):
def do_GET(self): #重写这个方法
parsed_path = urllib.parse.urlparse(self.path)
message_parts = [ #建立一个想要返回的列表
'CLIENT VALUES:', #客户端信息
'client_address=%s (%s)' % (self.client_address,
self.address_string()), #返回客户端的地址和端口
'command=%s' % self.command, #返回操作的命令,这里比然是'get'
'path=%s' % self.path, #返回请求的路径
'real path=%s' % parsed_path.path, #返回通过urlparse格式化的路径
'query=%s' % parsed_path.query, #返回urlparse格式化的查询语句的键值
'request_version=%s' % self.request_version, #返回请求的http协议版本号
'',
'SERVER VALUES:', #服务器段信息
'server_version=%s' % self.server_version, #返回服务器端http的信息
'sys_version=%s' % self.sys_version, #返回服务器端使用的python版本
'protocol_version=%s' % self.protocol_version, #返回服务器端使用的http协议版本
'',
'HEADERS RECEIVED:',
]
for name, value in sorted(self.headers.items()): #返回項添加头信息,包含用户的user-agent信息,主机信息等
message_parts.append('%s=%s' % (name, value.rstrip()))
message_parts.append('')
message = '\r\n'.join(message_parts)
self.send_response(200) #返回给客户端结果,这里的响应码是200 OK,并包含一些其他信息
self.end_headers() #结束头信息
self.wfile.write(message.encode('ascii')) #返回数据
return
def do_POST(self):
datas = self.rfile.read(int(self.headers['content-length']))
datas = urllib.parse.unquote(datas.decode('ascii'))
print(datas)
self.send_response(200)
self.end_headers()
self.wfile.write(b'yes posted')
if __name__ == '__main__':
from http.server import HTTPServer
server = HTTPServer(('localhost', 8080), GetHandler) #在本地8080端口上启用httpserver,使用自定义的GetHandler处理
print('Starting server, use <Ctrl-C> to stop')
server.serve_forever() #保存程序一直运行 | [
"huanglibo2010@gmail.com"
] | huanglibo2010@gmail.com |
22212b89866c682e600156e061acfef822671fd1 | a5d05e3cecfa6571016e41d19c982f1082714582 | /PROXYC.py | e125e22e4e8ce246a22c2fe83135f1a2f165446a | [] | no_license | Sanket-Mathur/CodeChef-Practice | 8ebc80eb9a32c90a5b3785348fca2048190dbeb0 | cba5bc2eaaf5489cbd8e85acaca6f82d223cff4f | refs/heads/master | 2023-08-08T05:59:42.755206 | 2021-09-26T12:44:15 | 2021-09-26T12:44:15 | 268,267,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | import math
for _ in range(int(input())):
N = int(input())
S = list(input())
Cp = S.count('P')
Ca = S.count('A')
req = math.ceil(N * 0.75)
if req <= Cp:
print(0)
else:
c = 0
for i in range(2,N-2):
if (S[i]=='A') and (S[i-1]=='P' or S[i-2]=='P') and (S[i+1]=='P' or S[i+2]=='P'):
c += 1
print(req-Cp if Cp+c>=req else -1)
| [
"rajeev.sanket@gmail.com"
] | rajeev.sanket@gmail.com |
785f17b8be057a2d4b4d69e3b7ba1879ff2d3dca | b1eac5e638273ddce5f7a9111676ecf1a7a0305a | /day1/selenium8.py | 5bfc2c56f17e9600606c08d8fb1b6aeb17298ec0 | [] | no_license | zhangbailong945/pachong | 081d4b79448ab01292d91011e6db4811784baa63 | 8af730989488ecfc09d40e96a4790ce1a6ce1714 | refs/heads/master | 2020-03-28T01:15:27.909917 | 2019-06-06T09:46:07 | 2019-06-06T09:46:07 | 147,490,458 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | #执行js
import sys,time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
chrome=webdriver.Chrome()
chrome.get('https://www.baidu.com')
chrome.implicitly_wait(3)
try:
chrome.execute_script('alert("111111111")')
finally:
chrome.close() | [
"1207549344@qq.com"
] | 1207549344@qq.com |
d3daa7f23d6a4d10810242a59fcec06aa298a39e | 1676168244eed1c5610b2c1c38f692f89990b112 | /part3-python/Bigdata/Code09-01 화면크기 및 서브윈도.py | f4671848d6135e29810e9a23864dbe61fa9a5036 | [] | no_license | gtpgg1013/AI_docs | 351e83f986d66224c82fff2de944753c98336d03 | 43f8eed8b2732314bd40ed65e1d7eb44dd28fc04 | refs/heads/master | 2022-12-09T17:32:02.992554 | 2019-11-20T09:03:56 | 2019-11-20T09:03:56 | 182,927,565 | 1 | 0 | null | 2022-12-08T06:50:23 | 2019-04-23T03:54:56 | Jupyter Notebook | UTF-8 | Python | false | false | 23,641 | py | from tkinter import *
from tkinter.simpledialog import *
from tkinter.filedialog import *
import math
import os
import os.path
####################
#### 함수 선언부 ####
####################
# 메모리를 할당해서 리스트(참조)를 반환하는 함수
def malloc(h, w, initValue=0) :
retMemory= []
for _ in range(h) :
tmpList = []
for _ in range(w) :
tmpList.append(initValue)
retMemory.append(tmpList)
return retMemory
# 파일을 메모리로 로딩하는 함수
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
fsize = os.path.getsize(fname) # 파일의 크기(바이트)
inH = inW = int(math.sqrt(fsize)) # 핵심 코드
## 입력영상 메모리 확보 ##
inImage=[]
inImage=malloc(inH,inW)
# 파일 --> 메모리
with open(filename, 'rb') as rFp:
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(rFp.read(1)))
# 파일을 선택해서 메모리로 로딩하는 함수
def openImage() :
global window, canvas, paper, filename, inImage, outImage,inH, inW, outH, outW
filename = askopenfilename(parent=window,
filetypes=(("RAW 파일", "*.raw"), ("모든 파일", "*.*")))
if filename == '' or filename == None :
return
loadImage(filename)
equalImage()
import struct
def saveImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension='*.raw', filetypes=(("RAW 파일", "*.raw"), ("모든 파일", "*.*")))
if saveFp == '' or saveFp == None :
return
for i in range(outH) :
for k in range(outW) :
saveFp.write(struct.pack('B', outImage[i][k]))
saveFp.close()
def displayImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
global VIEW_X, VIEW_Y
if canvas != None : # 예전에 실행한 적이 있다.
canvas.destroy()
## 고정된 화면 크기
if outH <= VIEW_Y or outW <= VIEW_X :
VIEW_X = outW
VIEW_Y = outH
step = 1
else :
step = outW / VIEW_X
window.geometry(str(int(VIEW_Y*1.2)) + 'x' + str(int(VIEW_X*1.2))) # 벽
canvas = Canvas(window, height=VIEW_Y, width=VIEW_X)
paper = PhotoImage(height=VIEW_Y, width=VIEW_X)
canvas.create_image((VIEW_Y // 2, VIEW_X // 2), image=paper, state='normal')
## 화면 크기를 조절
# window.geometry(str(outH) + 'x' + str(outW)) # 벽
# canvas = Canvas(window, height=outH, width=outW) # 보드
# paper = PhotoImage(height=outH, width=outW) # 빈 종이
# canvas.create_image((outH//2, outW//2), image=paper, state='normal')
# ## 출력영상 --> 화면에 한점씩 찍자.
# for i in range(outH) :
# for k in range(outW) :
# r = g = b = outImage[i][k]
# paper.put("#%02x%02x%02x" % (r, g, b), (k, i))
## 성능 개선
import numpy
rgbStr = '' # 전체 픽셀의 문자열을 저장
for i in numpy.arange(0,outH, step) :
tmpStr = ''
for k in numpy.arange(0,outW, step) :
i = int(i); k = int(k)
r = g = b = outImage[i][k]
tmpStr += ' #%02x%02x%02x' % (r,g,b)
rgbStr += '{' + tmpStr + '} '
paper.put(rgbStr)
canvas.bind('<Button-1>', mouseClick)
canvas.bind('<ButtonRelease-1>', mouseDrop)
canvas.pack(expand=1, anchor=CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))
###############################################
##### 컴퓨터 비전(영상처리) 알고리즘 함수 모음 #####
###############################################
# 동일영상 알고리즘
def equalImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
displayImage()
# 동일영상 알고리즘
def addImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
value = askinteger("밝게/어둡게", "값-->", minvalue=-255, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
v = inImage[i][k] + value
if v > 255 :
v = 255
elif v < 0 :
v = 0
outImage[i][k] = v
displayImage()
# 반전영상 알고리즘
def revImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = 255 - inImage[i][k]
displayImage()
# 이진화 알고리즘
def bwImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
## 영상의 평균 구하기.
sum = 0
for i in range(inH) :
for k in range(inW) :
sum += inImage[i][k]
avg = sum // (inW * inH)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] > avg :
outImage[i][k] = 255
else :
outImage[i][k] = 0
displayImage()
# 파라볼라 알고리즘 with LUT
def paraImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
LUT = [0 for _ in range(256)]
for input in range(256) :
LUT[input] = int(255 - 255 * math.pow(input/128 -1, 2))
for i in range(inH) :
for k in range(inW) :
input = inImage[i][k]
outImage[i][k] = LUT[inImage[i][k]]
displayImage()
# 상하반전 알고리즘
def upDownImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
for i in range(inH) :
for k in range(inW) :
outImage[inH-i-1][k] = inImage[i][k]
displayImage()
# 화면이동 알고리즘
def moveImage() :
global panYN
panYN = True
canvas.configure(cursor='mouse')
def mouseClick(event) :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
global sx,sy,ex,ey, panYN
if panYN == False :
return
sx = event.x; sy = event.y
def mouseDrop(event) :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
global sx, sy, ex, ey, panYN
if panYN == False :
return
ex = event.x; ey = event.y
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
mx = sx - ex; my = sy - ey
for i in range(inH) :
for k in range(inW) :
if 0 <= i-my < outW and 0 <= k-mx < outH :
outImage[i-my][k-mx] = inImage[i][k]
panYN = False
displayImage()
# 영상 축소 알고리즘
def zoomOutImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
scale = askinteger("축소", "값-->", minvalue=2, maxvalue=16)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH//scale; outW = inW//scale;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
for i in range(outH) :
for k in range(outW) :
outImage[i][k] = inImage[i*scale][k*scale]
displayImage()
# 영상 축소 알고리즘 (평균변환)
def zoomOutImage2() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
scale = askinteger("축소", "값-->", minvalue=2, maxvalue=16)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH//scale; outW = inW//scale;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
for i in range(inH) :
for k in range(inW) :
outImage[i//scale][k//scale] += inImage[i][k]
for i in range(outH):
for k in range(outW):
outImage[i][k] //= (scale*scale)
displayImage()
# 영상 확대 알고리즘
def zoomInImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
scale = askinteger("확대", "값-->", minvalue=2, maxvalue=8)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH*scale; outW = inW*scale;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
for i in range(outH) :
for k in range(outW) :
outImage[i][k] = inImage[i//scale][k//scale]
displayImage()
# 영상 확대 알고리즘 (양선형 보간)
def zoomInImage2() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
scale = askinteger("확대", "값-->", minvalue=2, maxvalue=8)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH*scale; outW = inW*scale;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
rH, rW, iH, iW = [0] * 4 # 실수위치 및 정수위치
x, y = 0, 0 # 실수와 정수의 차이값
C1,C2,C3,C4 = [0] * 4 # 결정할 위치(N)의 상하좌우 픽셀
for i in range(outH) :
for k in range(outW) :
rH = i / scale ; rW = k / scale
iH = int(rH) ; iW = int(rW)
x = rW - iW; y = rH - iH
if 0 <= iH < inH-1 and 0 <= iW < inW-1 :
C1 = inImage[iH][iW]
C2 = inImage[iH][iW+1]
C3 = inImage[iH+1][iW+1]
C4 = inImage[iH+1][iW]
newValue = C1*(1-y)*(1-x) + C2*(1-y)* x+ C3*y*x + C4*y*(1-x)
outImage[i][k] = int(newValue)
displayImage()
# 영상 회전 알고리즘
def rotateImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
angle = askinteger("회전", "값-->", minvalue=1, maxvalue=360)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
radian = angle * math.pi / 180
for i in range(inH) :
for k in range(inW) :
xs = i ; ys = k;
xd = int(math.cos(radian) * xs - math.sin(radian) * ys)
yd = int(math.sin(radian) * xs + math.cos(radian) * ys)
if 0<= xd < inH and 0 <= yd < inW :
outImage[xd][yd] = inImage[i][k]
displayImage()
# 영상 회전 알고리즘 - 중심, 역방향
def rotateImage2() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
angle = askinteger("회전", "값-->", minvalue=1, maxvalue=360)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
radian = angle * math.pi / 180
cx = inW//2; cy = inH//2
for i in range(outH) :
for k in range(outW) :
xs = i ; ys = k;
xd = int(math.cos(radian) * (xs-cx) - math.sin(radian) * (ys-cy)) + cx
yd = int(math.sin(radian) * (xs-cx) + math.cos(radian) * (ys-cy)) + cy
if 0<= xd < outH and 0 <= yd < outW :
outImage[xs][ys] = inImage[xd][yd]
else :
outImage[xs][ys] = 255
displayImage()
# 히스토그램
import matplotlib.pyplot as plt
def histoImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
inCountList = [0] * 256
outCountList = [0] * 256
for i in range(inH) :
for k in range(inW) :
inCountList[inImage[i][k]] += 1
for i in range(outH) :
for k in range(outW) :
outCountList[outImage[i][k]] += 1
plt.plot(inCountList)
plt.plot(outCountList)
plt.show()
def histoImage2() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
outCountList = [0] * 256
normalCountList = [0] * 256
# 빈도수 계산
for i in range(outH) :
for k in range(outW) :
outCountList[outImage[i][k]] += 1
maxVal = max(outCountList); minVal = min(outCountList)
High = 256
# 정규화 = (카운트값 - 최소값) * High / (최대값 - 최소값)
for i in range(len(outCountList)) :
normalCountList[i] = (outCountList[i] - minVal) * High / (maxVal-minVal)
## 서브 윈도창 생성 후 출력
subWindow = Toplevel(window)
subWindow.geometry('256x256')
subCanvas = Canvas(subWindow, width=256, height=256)
subPaper = PhotoImage(width=256, height=256)
subCanvas.create_image((256//2, 256//2), image=subPaper, state='normal')
for i in range(len(normalCountList)) :
for k in range(int(normalCountList[i])) :
data= 0
subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255-k))
subCanvas.pack(expand=1, anchor=CENTER)
subWindow.mainloop()
# 스트레칭 알고리즘
def stretchImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
maxVal = minVal = inImage[0][0]
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] < minVal :
minVal = inImage[i][k]
elif inImage[i][k] > maxVal :
maxVal = inImage[i][k]
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = int(((inImage[i][k] - minVal) / (maxVal - minVal)) * 255)
displayImage()
# 스트레칭 알고리즘
def endinImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
maxVal = minVal = inImage[0][0]
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] < minVal :
minVal = inImage[i][k]
elif inImage[i][k] > maxVal :
maxVal = inImage[i][k]
minAdd = askinteger("최소", "최소에서추가-->", minvalue=0, maxvalue=255)
maxAdd = askinteger("최대", "최대에서감소-->", minvalue=0, maxvalue=255)
#
minVal += minAdd
maxVal -= maxAdd
for i in range(inH) :
for k in range(inW) :
value = int(((inImage[i][k] - minVal) / (maxVal - minVal)) * 255)
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
displayImage()
# 평활화 알고리즘
def equalizeImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
histo = [0] * 256; sumHisto = [0]*256; normalHisto = [0] * 256
## 히스토그램
for i in range(inH) :
for k in range(inW) :
histo[inImage[i][k]] += 1
## 누적히스토그램
sValue = 0
for i in range(len(histo)) :
sValue += histo[i]
sumHisto[i] = sValue
## 정규화 누적 히스토그램
for i in range(len(sumHisto)):
normalHisto[i] = int(sumHisto[i] / (inW*inH) * 255)
## 영상처리
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = normalHisto[inImage[i][k]]
displayImage()
## 엠보싱 처리
def embossImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
MSIZE = 3
mask = [ [-1, 0, 0],
[ 0, 0, 0],
[ 0, 0, 1] ]
## 임시 입력영상 메모리 확보
tmpInImage = malloc(inH+MSIZE-1, inW+MSIZE-1, 127)
tmpOutImage = malloc(outH, outW)
## 원 입력 --> 임시 입력
for i in range(inH) :
for k in range(inW) :
tmpInImage[i+MSIZE//2][k+MSIZE//2] = inImage[i][k]
## 회선연산
for i in range(MSIZE//2, inH + MSIZE//2) :
for k in range(MSIZE//2, inW + MSIZE//2) :
# 각 점을 처리.
S = 0.0
for m in range(0, MSIZE) :
for n in range(0, MSIZE) :
S += mask[m][n]*tmpInImage[i+m-MSIZE//2][k+n-MSIZE//2]
tmpOutImage[i-MSIZE//2][k-MSIZE//2] = S
## 127 더하기 (선택)
for i in range(outH) :
for k in range(outW) :
tmpOutImage[i][k] += 127
## 임시 출력 --> 원 출력
for i in range(outH):
for k in range(outW):
value = tmpOutImage[i][k]
if value > 255 :
value = 255
elif value < 0 :
value = 0
outImage[i][k] = int(value)
displayImage()
# 모핑 알고리즘
def morphImage() :
global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH; outW = inW;
## 추가 영상 선택
filename2 = askopenfilename(parent=window,
filetypes=(("RAW 파일", "*.raw"), ("모든 파일", "*.*")))
if filename2 == '' or filename2 == None:
return
fsize = os.path.getsize(filename2) # 파일의 크기(바이트)
inH2 = inW2 = int(math.sqrt(fsize)) # 핵심 코드
## 입력영상 메모리 확보 ##
inImage2 = []
inImage2 = malloc(inH2, inW2)
# 파일 --> 메모리
with open(filename2, 'rb') as rFp:
for i in range(inH2):
for k in range(inW2):
inImage2[i][k] = int(ord(rFp.read(1)))
###### 메모리 할당 ################
outImage = []; outImage = malloc(outH, outW)
####### 진짜 컴퓨터 비전 알고리즘 #####
#w1 = askinteger("원영상 가중치", "가중치(%)->", minvalue=0, maxvalue=100)
#w2 = 1- (w1/100); w1 = 1-w2
import threading
import time
def morpFunc() :
w1 = 1; w2 = 0
for _ in range(20) :
for i in range(inH) :
for k in range(inW) :
newValue = int(inImage[i][k]*w1 + inImage2[i][k]*w2)
if newValue > 255 :
newValue = 255
elif newValue < 0 :
newValue = 0
outImage[i][k] =newValue
displayImage()
w1 -= 0.05; w2 += 0.05
time.sleep(0.5)
threading.Thread(target=morpFunc).start()
####################
#### 전역변수 선언부 ####
####################
inImage, outImage = [], [] ; inH, inW, outH, outW = [0] * 4
window, canvas, paper = None, None, None
filename = ""
panYN = False
sx,sy,ex,ey = [0] * 4
VIEW_X, VIEW_Y = 512, 512 # 화면에 보일 크기 (출력용)
####################
#### 메인 코드부 ####
####################
window = Tk()
window.geometry("500x500")
window.title("컴퓨터 비전(딥러닝 기법) ver 0.03")
status = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
## 마우스 이벤트
mainMenu = Menu(window)
window.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="파일", menu=fileMenu)
fileMenu.add_command(label="파일 열기", command=openImage)
fileMenu.add_separator()
fileMenu.add_command(label="파일 저장", command=saveImage)
comVisionMenu1 = Menu(mainMenu)
mainMenu.add_cascade(label="화소점 처리", menu=comVisionMenu1)
comVisionMenu1.add_command(label="덧셈/뺄셈", command=addImage)
comVisionMenu1.add_command(label="반전하기", command=revImage)
comVisionMenu1.add_command(label="파라볼라", command=paraImage)
comVisionMenu1.add_separator()
comVisionMenu1.add_command(label="모핑", command=morphImage)
comVisionMenu2 = Menu(mainMenu)
mainMenu.add_cascade(label="통계", menu=comVisionMenu2)
comVisionMenu2.add_command(label="이진화", command=bwImage)
comVisionMenu2.add_command(label="축소(평균변환)", command=zoomOutImage2)
comVisionMenu2.add_command(label="확대(양선형보간)", command=zoomInImage2)
comVisionMenu2.add_separator()
comVisionMenu2.add_command(label="히스토그램", command=histoImage)
comVisionMenu2.add_command(label="히스토그램(내꺼)", command=histoImage2)
comVisionMenu2.add_command(label="명암대비", command=stretchImage)
comVisionMenu2.add_command(label="End-In탐색", command=endinImage)
comVisionMenu2.add_command(label="평활화", command=equalizeImage)
comVisionMenu3 = Menu(mainMenu)
mainMenu.add_cascade(label="기하학 처리", menu=comVisionMenu3)
comVisionMenu3.add_command(label="상하반전", command=upDownImage)
comVisionMenu3.add_command(label="이동", command=moveImage)
comVisionMenu3.add_command(label="축소", command=zoomOutImage)
comVisionMenu3.add_command(label="확대", command=zoomInImage)
comVisionMenu3.add_command(label="회전1", command=rotateImage)
comVisionMenu3.add_command(label="회전2(중심,역방향)", command=rotateImage2)
comVisionMenu4 = Menu(mainMenu)
mainMenu.add_cascade(label="화소영역 처리", menu=comVisionMenu4)
comVisionMenu4.add_command(label="엠보싱", command=embossImage)
window.mainloop() | [
"gtpgg1013@gmail.com"
] | gtpgg1013@gmail.com |
2bae305f0500147fc5d51dff37b5e9d7fd5dce7b | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/sql/operations/geo_backup_policies_operations.py | c92d279872efaa148b9ae1fbb6e5a3e1f681184a | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 12,711 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class GeoBackupPoliciesOperations(object):
"""GeoBackupPoliciesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2014-04-01".
:ivar geo_backup_policy_name: The name of the geo backup policy. Constant value: "Default".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2014-04-01"
self.geo_backup_policy_name = "Default"
self.config = config
def create_or_update(
self, resource_group_name, server_name, database_name, state, custom_headers=None, raw=False, **operation_config):
"""Updates a database geo backup policy.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param state: The state of the geo backup policy. Possible values
include: 'Disabled', 'Enabled'
:type state: str or :class:`GeoBackupPolicyState
<azure.mgmt.sql.models.GeoBackupPolicyState>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`GeoBackupPolicy
<azure.mgmt.sql.models.GeoBackupPolicy>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`GeoBackupPolicy
<azure.mgmt.sql.models.GeoBackupPolicy>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.GeoBackupPolicy(state=state)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/geoBackupPolicies/{geoBackupPolicyName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'geoBackupPolicyName': self._serialize.url("self.geo_backup_policy_name", self.geo_backup_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'GeoBackupPolicy')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('GeoBackupPolicy', response)
if response.status_code == 200:
deserialized = self._deserialize('GeoBackupPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Gets a geo backup policy.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`GeoBackupPolicy
<azure.mgmt.sql.models.GeoBackupPolicy>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`GeoBackupPolicy
<azure.mgmt.sql.models.GeoBackupPolicy>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/geoBackupPolicies/{geoBackupPolicyName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'geoBackupPolicyName': self._serialize.url("self.geo_backup_policy_name", self.geo_backup_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GeoBackupPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_database(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns a list of geo backup policies.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`GeoBackupPolicy
<azure.mgmt.sql.models.GeoBackupPolicy>`
:rtype: :class:`GeoBackupPolicyPaged
<azure.mgmt.sql.models.GeoBackupPolicyPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/geoBackupPolicies'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.GeoBackupPolicyPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.GeoBackupPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
c6292bb43fa0041b229f80af33521753cd403b09 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03030/s045798606.py | 802f6b64e445e923406fd390ac302f6e446d0078 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | N = int(input())
l = []
for i in range(N):
s, p = input().split()
l.append([s, int(p), i+1])
# 地名に関して辞書順に / 点数に関して降順に並べる
l = sorted(l, key=lambda x:(x[0], -x[1]))
for i in range(N):
print(l[i][2]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
368593ff4cb7bb628c9d4940f35785de2bf17747 | 28d1f2444b6078ba2aae89a76cd6fd03a6f86d0c | /imadial-system/imadial/nlu/nlu.py | 7f55c2117559f45dfe4aeb23570bd2d28e863f59 | [] | no_license | tzuhsial/ImageEditingWithDialogue | a35b793ba5bde52b6c1570604cdf5cfbbe3835ab | 355f52ff6bb9e46e1beafaf336dadfc96537886c | refs/heads/master | 2020-05-30T00:19:02.980304 | 2020-02-22T23:40:04 | 2020-02-22T23:40:04 | 189,456,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,248 | py | import copy
import os
import re
import sys
import requests
from nltk import edit_distance
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from ..util import build_slot_dict
english_stopwords = stopwords.words('english')
stemmer = PorterStemmer()
integer_pattern = r"-?\d+"
def NLUPortal(nlu_config):
nlu_type = nlu_config["nlu"]
uri = nlu_config["uri"]
return builder(nlu_type)(uri)
class NLIETagger(object):
def __init__(self, uri):
self.uri = uri
def reset(self):
self.observation = {}
def observe(self, observation):
self.observation = observation
def act(self):
sentence = self.observation.get("user_utterance", "")
sentence = sentence.strip().lower()
if sentence in ["yes", "no"]:
nlu_act = self.act_confirm(sentence)
else:
nlu_act = self.act_inform(sentence)
user_act = copy.deepcopy(self.observation)
user_act['user_acts'] = [nlu_act]
return user_act
def act_inform(self, sentence):
data = {"sent": sentence}
try:
res = requests.post(self.uri, data=data)
res.raise_for_status()
tagged = res.json()
slots = []
# Track individual slots
# Intent
intent_value = tagged["intent"]
slot = build_slot_dict('intent', intent_value, 1.0)
slots.append(slot)
# Attribute and referring expression
individual_slots = ["attribute", "refer"]
for slot_name in individual_slots:
if len(tagged[slot_name]) > 0:
value = tagged[slot_name][0]
if slot_name == "refer":
slot = build_slot_dict('object', value, 1.0)
else:
slot = build_slot_dict(slot_name, value, 1.0)
slots.append(slot)
# Get action word positive negative
value_sign = 1
negative_action_words = ["decrease", "lower", "reduce", "drop"]
if len(tagged["action"]) > 0:
action_word = tagged["action"][0]
if action_word in negative_action_words:
value_sign = -1
if len(tagged["value"]) > 0:
value = int(tagged["value"][0])
if value_sign < 0 and value > 0:
value *= -1
slot = build_slot_dict('adjust_value', value, 1.0)
slots.append(slot)
except Exception as e:
print(e)
slots = []
nlu_act = {
'dialogue_act': build_slot_dict('dialogue_act', 'inform', 1.0),
'slots': slots
}
return nlu_act
def act_confirm(self, sentence):
if sentence == "yes":
da = "affirm"
elif sentence == "no":
da = "negate"
else:
raise ValueError("Unknown confirm sentence: {}".format(sentence))
nlu_act = {'dialogue_act': {
"slot": "dialogue_act", "value": da, 'conf': 1.0}}
return nlu_act
class EditmeTagger(object):
"""
Calls the nlu on editme for state updates
"""
def __init__(self, uri):
self.uri = uri
def reset(self):
pass
def observe(self, observation):
self.observation = observation
def act(self):
sentence = self.observation.get("user_utterance", "")
sentence = sentence.strip().rstrip(".")
if sentence.lower() in ["", "undo", "redo", "close"]:
nlu_act = self.act_inform(sentence)
elif sentence.lower() in ["yes", "no"]:
nlu_act = self.act_confirm(sentence)
else:
nlu_act = self.act_editme(sentence)
act = copy.deepcopy(self.observation)
act['user_acts'] = [nlu_act]
return act
def act_inform(self, intent):
nlu_act = {
'dialogue_act': build_slot_dict('dialogue_act', 'inform', 1.0),
'slots': []
}
if intent != "":
nlu_act['intent'] = build_slot_dict('intent', intent, 1.0)
return nlu_act
def act_confirm(self, sentence):
if sentence.lower() == "yes":
da = "affirm"
elif sentence.lower() == "no":
da = "negate"
else:
raise ValueError("Unknown confirms sentence: {}".format(sentence))
nlu_act = {'dialogue_act': {
"slot": "dialogue_act", "value": da, 'conf': 1.0}}
return nlu_act
def act_editme(self, sentence):
data = {"sentence": sentence}
# Query Editme Tagger
res = requests.post(self.uri, data=data)
obj = res.json()
intent = obj["intent"]
tags = obj["tags"]
tokens = obj["tokens"]
# Write Rules to tailor to our domain
if sentence.strip().lower() in ["adjust", "undo", "redo", "close"]:
intent = sentence.strip().lower()
# Process Tags here
slots = []
slot = {}
# Simple extract the last word of each IOB tag
# Group tag and words
prev_iob = None
prev_label = None
for word, tag in zip(tokens, tags):
if tag == 'O':
continue
iob, label = tag.split('-') # B-mask
if prev_label != None:
if label != prev_label or iob == 'B':
if len(slot):
slots.append(slot)
slot = {}
if label not in slot:
slot[label] = list()
slot[label].append(word)
# Append only if you have a new occurence
prev_label = label
prev_iob = iob
if len(slot):
slots.append(slot)
# Now we can filter the stuff we want
updated_slots = []
for slot in slots:
slot_type, tokens = list(slot.items())[0]
word = tokens[-1]
if word in english_stopwords:
continue
if slot_type == "attribute": # attribute
s = slot_type
v = word
matched = False
if v.startswith('bright'):
v = "brightness"
elif v.startswith('saturat'):
v = "saturation"
elif v.startswith('light'):
v = "lightness"
if v not in ["brightness", "contrast", "hue", "saturation", "lightness"]:
continue
elif slot_type == "value": # adjust_value
if word not in ["more", "less"]:
continue
s = "adjust_value"
v = {'more': 10, 'less': -10}.get(word)
elif slot_type == "mask": # object
s = "object"
v = word
else:
continue
slot_dict = {'slot': s, 'value': v, 'conf': 1.0}
updated_slots.append(slot_dict)
# Special case: adjust_value, use regex
matches = re.findall(integer_pattern, sentence)
if len(matches):
adjust_value_slot = {
'slot': 'adjust_value', 'value': int(matches[0]), 'conf': 1.0}
updated_slots.append(adjust_value_slot)
intent_slot = {'slot': 'intent', 'value': intent, 'conf': 1.0}
nlu_act = {
'dialogue_act': {'slot': 'dialogue_act', 'value': 'inform', 'conf': 1.0},
'intent': intent_slot,
'slots': updated_slots
}
return nlu_act
def builder(string):
return getattr(sys.modules[__name__], string)
if __name__ == "__main__":
nlu = EditmeTagger("http://localhost:2004/tag")
with open('testing.txt', 'r') as fin:
next(fin)
for line in fin.readlines():
pos, intent = line.strip().split('|')
pairs = pos.split()
sentence, tags = zip(*(pair.split("###") for pair in pairs))
sentence = ' '.join(sentence)
obsrv = {"user_utterance": sentence}
print(sentence)
nlu.observe(obsrv)
act = nlu.act()
| [
"iammrhelo@gmail.com"
] | iammrhelo@gmail.com |
8c32f32f1ae46ac6ef655a80201fa69829b661bb | b0839dfc309ef90ddfea3d902799c8eefb7a9641 | /Heisenberg.py | d5d833d466b9fe22983a116dc7f0f3e863c2f03d | [] | no_license | vishalpatil0/Heisenberg | 2b1ce030d295be691c32f7144205fc9444aebc80 | 94247dc2436a6e72366ffccc155e32751d0ab6db | refs/heads/main | 2023-02-09T13:54:59.149643 | 2021-01-05T11:47:09 | 2021-01-05T11:47:09 | 310,631,823 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 22,902 | py | import datetime,wikipedia,webbrowser,os,random,requests,pyautogui,playsound,subprocess,time
import urllib.request,bs4 as bs,sys,threading
import Annex,wolframalpha
from ttkthemes import themed_tk
from tkinter import ttk
import tkinter as tk
from tkinter import scrolledtext
from PIL import ImageTk,Image
import sqlite3,pyjokes,pywhatkit
from functools import partial
import getpass,calendar
try:
app=wolframalpha.Client("JPK4EE-L7KR3XWP9A") #API key for wolframalpha
except Exception as e:
pass
#setting chrome path
chrome_path="C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s"
def there_exists(terms,query):
for term in terms:
if term in query:
return True
def CommandsList():
'''show the command to which voice assistant is registered with'''
os.startfile('Commands List.txt')
def clearScreen():
''' clear the scrollable text box'''
SR.scrollable_text_clearing()
def greet():
conn = sqlite3.connect('Heisenberg.db')
mycursor=conn.cursor()
hour=int(datetime.datetime.now().hour)
if hour>=4 and hour<12:
mycursor.execute('select sentences from goodmorning')
result=mycursor.fetchall()
SR.speak(random.choice(result)[0])
elif hour>=12 and hour<18:
mycursor.execute('select sentences from goodafternoon')
result=mycursor.fetchall()
SR.speak(random.choice(result)[0])
elif hour>=18 and hour<21:
mycursor.execute('select sentences from goodevening')
result=mycursor.fetchall()
SR.speak(random.choice(result)[0])
else:
mycursor.execute('select sentences from night')
result=mycursor.fetchall()
SR.speak(random.choice(result)[0])
conn.commit()
conn.close()
SR.speak("\nMyself Heisenberg. How may I help you?")
def mainframe():
"""Logic for execution task based on query"""
SR.scrollable_text_clearing()
greet()
query_for_future=None
try:
while(True):
query=SR.takeCommand().lower() #converted the command in lower case of ease of matching
#wikipedia search
if there_exists(['search wikipedia for','from wikipedia'],query):
SR.speak("Searching wikipedia...")
if 'search wikipedia for' in query:
query=query.replace('search wikipedia for','')
results=wikipedia.summary(query,sentences=2)
SR.speak("According to wikipedia:\n")
SR.speak(results)
elif 'from wikipedia' in query:
query=query.replace('from wikipedia','')
results=wikipedia.summary(query,sentences=2)
SR.speak("According to wikipedia:\n")
SR.speak(results)
elif there_exists(['wikipedia'],query):
SR.speak("Searching wikipedia....")
query=query.replace("wikipedia","")
results=wikipedia.summary(query,sentences=2)
SR.speak("According to wikipedia:\n")
SR.speak(results)
#jokes
elif there_exists(['tell me joke','tell me a joke','tell me some jokes','i would like to hear some jokes',"i'd like to hear some jokes",
'can you please tell me some jokes','i want to hear a joke','i want to hear some jokes','please tell me some jokes',
'would like to hear some jokes','tell me more jokes'],query):
SR.speak(pyjokes.get_joke(language="en", category="all"))
query_for_future=query
elif there_exists(['one more','one more please','tell me more','i would like to hear more of them','once more','once again','more','again'],query) and (query_for_future is not None):
SR.speak(pyjokes.get_joke(language="en", category="all"))
#asking for name
elif there_exists(["what is your name","what's your name","tell me your name",'who are you'],query):
SR.speak("My name is Heisenberg and I'm here to serve you.")
#How are you
elif there_exists(['how are you'],query):
conn = sqlite3.connect('Heisenberg.db')
mycursor=conn.cursor()
mycursor.execute('select sentences from howareyou')
result=mycursor.fetchall()
temporary_data=random.choice(result)[0]
SR.updating_ST_No_newline(temporary_data+'😃\n')
SR.nonPrintSpeak(temporary_data)
conn.close()
#what is my name
elif there_exists(['what is my name','tell me my name',"i don't remember my name"],query):
SR.speak("Your name is "+str(getpass.getuser()))
#calendar
elif there_exists(['show me calendar','display calendar'],query):
SR.updating_ST(calendar.calendar(2021))
#google, youtube and location
#playing on youtube
elif there_exists(['open youtube and play','on youtube'],query):
if 'on youtube' in query:
SR.speak("Opening youtube")
pywhatkit.playonyt(query.replace('on youtube',''))
else:
SR.speak("Opening youtube")
pywhatkit.playonyt(query.replace('open youtube and play ',''))
break
elif there_exists(['play some songs on youtube','i would like to listen some music','i would like to listen some songs','play songs on youtube'],query):
SR.speak("Opening youtube")
pywhatkit.playonyt('play random songs')
break
elif there_exists(['open youtube','access youtube'],query):
SR.speak("Opening youtube")
webbrowser.get(chrome_path).open("https://www.youtube.com")
break
elif there_exists(['open google and search','google and search'],query):
url='https://google.com/search?q='+query[query.find('for')+4:]
webbrowser.get(chrome_path).open(url)
break
#image search
elif there_exists(['show me images of','images of','display images'],query):
url="https://www.google.com/search?tbm=isch&q="+query[query.find('of')+3:]
webbrowser.get(chrome_path).open(url)
break
elif there_exists(['search for','do a little searching for','show me results for','show me result for','start searching for'],query):
SR.speak("Searching.....")
if 'search for' in query:
SR.speak(f"Showing results for {query.replace('search for','')}")
pywhatkit.search(query.replace('search for',''))
elif 'do a little searching for' in query:
SR.speak(f"Showing results for {query.replace('do a little searching for','')}")
pywhatkit.search(query.replace('do a little searching for',''))
elif 'show me results for' in query:
SR.speak(f"Showing results for {query.replace('show me results for','')}")
pywhatkit(query.replace('show me results for',''))
elif 'start searching for' in query:
SR.speak(f"Showing results for {query.replace('start searching for','')}")
pywhatkit(query.replace('start searching for',''))
break
elif there_exists(['open google'],query):
SR.speak("Opening google")
webbrowser.get(chrome_path).open("https://www.google.com")
break
elif there_exists(['find location of','show location of','find location for','show location for'],query):
if 'of' in query:
url='https://google.nl/maps/place/'+query[query.find('of')+3:]+'/&'
webbrowser.get(chrome_path).open(url)
break
elif 'for' in query:
url='https://google.nl/maps/place/'+query[query.find('for')+4:]+'/&'
webbrowser.get(chrome_path).open(url)
break
elif there_exists(["what is my exact location","What is my location","my current location","exact current location"],query):
url = "https://www.google.com/maps/search/Where+am+I+?/"
webbrowser.get().open(url)
SR.speak("Showing your current location on google maps...")
break
elif there_exists(["where am i"],query):
Ip_info = requests.get('https://api.ipdata.co?api-key=test').json()
loc = Ip_info['region']
SR.speak(f"You must be somewhere in {loc}")
#who is searcing mode
elif there_exists(['who is','who the heck is','who the hell is','who is this'],query):
query=query.replace("wikipedia","")
results=wikipedia.summary(query,sentences=1)
SR.speak("According to wikipdedia: ")
SR.speak(results)
#play music
elif there_exists(['play music','play some music for me','like to listen some music'],query):
SR.speak("Playing musics")
music_dir='D:\\Musics\\vishal'
songs=os.listdir(music_dir)
# print(songs)
indx=random.randint(0,50)
os.startfile(os.path.join(music_dir,songs[indx]))
break
# top 5 news
elif there_exists(['top 5 news','top five news','listen some news','news of today'],query):
news=Annex.News(scrollable_text)
news.show()
#whatsapp message
elif there_exists(['open whatsapp messeaging','send a whatsapp message','send whatsapp message','please send a whatsapp message'],query):
whatsapp=Annex.WhatsApp(scrollable_text)
whatsapp.send()
del whatsapp
#what is meant by
elif there_exists(['what is meant by','what is mean by'],query):
results=wikipedia.summary(query,sentences=2)
SR.speak("According to wikipedia:\n")
SR.speak(results)
#taking photo
elif there_exists(['take a photo','take a selfie','take my photo','take photo','take selfie','one photo please','click a photo'],query):
takephoto=Annex.camera()
Location=takephoto.takePhoto()
os.startfile(Location)
del takephoto
SR.speak("Captured picture is stored in Camera folder.")
#bluetooth file sharing
elif there_exists(['send some files through bluetooth','send file through bluetooth','bluetooth sharing','bluetooth file sharing','open bluetooth'],query):
SR.speak("Opening bluetooth...")
os.startfile(r"C:\Windows\System32\fsquirt.exe")
break
#play game
elif there_exists(['would like to play some games','play some games','would like to play some game','want to play some games','want to play game','want to play games','play games','open games','play game','open game'],query):
SR.speak("We have 2 games right now.\n")
SR.updating_ST_No_newline('1.')
SR.speak("Stone Paper Scissor")
SR.updating_ST_No_newline('2.')
SR.speak("Snake")
SR.speak("\nTell us your choice:")
while(True):
query=SR.takeCommand().lower()
if ('stone' in query) or ('paper' in query):
SR.speak("Opening stone paper scissor...")
sps=Annex.StonePaperScissor()
sps.start(scrollable_text)
break
elif ('snake' in query):
SR.speak("Opening snake game...")
import Snake
Snake.start()
break
else:
SR.speak("It did not match the option that we have. \nPlease say it again.")
#makig note
elif there_exists(['make a note','take note','take a note','note it down','make note','remember this as note','open notepad and write'],query):
SR.speak("What would you like to write down?")
data=SR.takeCommand()
n=Annex.note()
n.Note(data)
SR.speak("I have a made a note of that.")
break
#flipping coin
elif there_exists(["toss a coin","flip a coin","toss"],query):
moves=["head", "tails"]
cmove=random.choice(moves)
playsound.playsound('quarter spin flac.mp3')
SR.speak("It's " + cmove)
#time and date
elif there_exists(['the time'],query):
strTime =datetime.datetime.now().strftime("%H:%M:%S")
SR.speak(f"Sir, the time is {strTime}")
elif there_exists(['the date'],query):
strDay=datetime.date.today().strftime("%B %d, %Y")
SR.speak(f"Today is {strDay}")
elif there_exists(['what day it is','what day is today','which day is today',"today's day name please"],query):
SR.speak(f"Today is {datetime.datetime.now().strftime('%A')}")
#opening software applications
elif there_exists(['open chrome'],query):
SR.speak("Opening chrome")
os.startfile(r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe')
break
elif there_exists(['open notepad plus plus','open notepad++','open notepad ++'],query):
SR.speak('Opening notepad++')
os.startfile(r'C:\Program Files\Notepad++\notepad++.exe')
break
elif there_exists(['open notepad','start notepad'],query):
SR.speak('Opening notepad')
os.startfile(r'C:\Windows\notepad.exe')
break
elif there_exists(['open ms paint','open mspaint','open microsoft paint','start microsoft paint','start ms paint'],query):
SR.speak("Opening Microsoft paint....")
os.startfile('C:\Windows\System32\mspaint.exe')
break
elif there_exists(['show me performance of my system','open performance monitor','performance monitor','performance of my computer','performance of this computer'],query):
os.startfile("C:\Windows\System32\perfmon.exe")
break
elif there_exists(['open snipping tool','snipping tool','start snipping tool'],query):
SR.speak("Opening snipping tool....")
os.startfile("C:\Windows\System32\SnippingTool.exe")
break
elif there_exists(['open code','open visual studio ','open vs code'],query):
SR.speak("Opeining vs code")
codepath=r"C:\Users\Vishal\AppData\Local\Programs\Microsoft VS Code\Code.exe"
os.startfile(codepath)
break
elif there_exists(['open file manager','file manager','open my computer','my computer','open file explorer','file explorer','open this pc','this pc'],query):
SR.speak("Opening File Explorer")
os.startfile("C:\Windows\explorer.exe")
break
elif there_exists(['powershell'],query):
SR.speak("Opening powershell")
os.startfile(r'C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe')
break
elif there_exists(['cmd','command prompt','command prom','commandpromt',],query):
SR.speak("Opening command prompt")
os.startfile(r'C:\Windows\System32\cmd.exe')
break
elif there_exists(['open whatsapp'],query):
SR.speak("Opening whatsApp")
os.startfile(r'C:\Users\Vishal\AppData\Local\WhatsApp\WhatsApp.exe')
break
elif there_exists(['open settings','open control panel','open this computer setting Window','open computer setting Window' ,'open computer settings','open setting','show me settings','open my computer settings'],query):
SR.speak("Opening settings...")
os.startfile('C:\Windows\System32\control.exe')
break
elif there_exists(['open your setting','open your settings','open settiing window','show me setting window','open voice assistant settings'],query):
SR.speak("Opening my Setting window..")
sett_wind=Annex.SettingWindow()
sett_wind.settingWindow(root)
break
elif there_exists(['open vlc','vlc media player','vlc player'],query):
SR.speak("Opening VLC media player")
os.startfile(r"C:\Program Files\VideoLAN\VLC\vlc.exe")
break
#password generator
elif there_exists(['suggest me a password','password suggestion','i want a password'],query):
m3=Annex.PasswordGenerator()
m3.givePSWD(scrollable_text)
del m3
#screeshot
elif there_exists(['take screenshot','take a screenshot','screenshot please','capture my screen'],query):
SR.speak("Taking screenshot")
SS=Annex.screenshot()
SS.takeSS()
SR.speak('Captured screenshot is saved in Screenshots folder.')
del SS
#voice recorder
elif there_exists(['record my voice','start voice recorder','voice recorder'],query):
VR=Annex.VoiceRecorer()
VR.Record(scrollable_text)
del VR
#text to speech conversion
elif there_exists(['text to speech','convert my notes to voice'],query):
SR.speak("Opening Text to Speech mode")
TS=Annex.TextSpeech()
del TS
#weather report
elif there_exists(['weather report','temperature'],query):
Weather=Annex.Weather()
Weather.show(scrollable_text)
#shutting down system
elif there_exists(['exit','quit','shutdown','shut up','goodbye','shut down'],query):
SR.speak("shutting down")
sys.exit()
elif there_exists(['none'],query):
pass
elif there_exists(['stop the flow','stop the execution','halt','halt the process','stop the process','stop listening','stop the listening'],query):
SR.speak("Listening halted.")
break
#it will give online results for the query
elif there_exists(['search something for me','to do a little search','search mode','i want to search something'],query):
SR.speak('What you want me to search for?')
query=SR.takeCommand()
SR.speak(f"Showing results for {query}")
try:
res=app.query(query)
SR.speak(next(res.results).text)
except:
print("Sorry, but there is a little problem while fetching the result.")
#what is the capital
elif there_exists(['what is the capital of','capital of','capital city of'],query):
try:
res=app.query(query)
SR.speak(next(res.results).text)
except:
print("Sorry, but there is a little problem while fetching the result.")
elif there_exists(['temperature'],query):
try:
res=app.query(query)
SR.speak(next(res.results).text)
except:
print("Internet Connection Error")
elif there_exists(['+','-','*','x','/','plus','add','minus','subtract','divide','multiply','divided','multiplied'],query):
try:
res=app.query(query)
SR.speak(next(res.results).text)
except:
print("Internet Connection Error")
else:
SR.speak("Sorry it did not match with any commands that i'm registered with. Please say it again.")
except Exception as e:
pass
def gen(n):
for i in range(n):
yield i
class MainframeThread(threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self):
mainframe()
def Launching_thread():
Thread_ID=gen(1000)
global MainframeThread_object
MainframeThread_object=MainframeThread(Thread_ID.__next__(),"Mainframe")
MainframeThread_object.start()
if __name__=="__main__":
#tkinter code
root=themed_tk.ThemedTk()
root.set_theme("winnative")
root.geometry("{}x{}+{}+{}".format(745,360,int(root.winfo_screenwidth()/2 - 745/2),int(root.winfo_screenheight()/2 - 360/2)))
root.resizable(0,0)
root.title("Heisenberg")
root.iconbitmap('Heisenberg.ico')
root.configure(bg='#2c4557')
scrollable_text=scrolledtext.ScrolledText(root,state='disabled',height=15,width=87,relief='sunken',bd=5,wrap=tk.WORD,bg='#add8e6',fg='#800000')
scrollable_text.place(x=10,y=10)
mic_img=Image.open("Mic.png")
mic_img=mic_img.resize((55,55),Image.ANTIALIAS)
mic_img=ImageTk.PhotoImage(mic_img)
Speak_label=tk.Label(root,text="SPEAK:",fg="#FFD700",font='"Times New Roman" 12 ',borderwidth=0,bg='#2c4557')
Speak_label.place(x=250,y=300)
"""Setting up objects"""
SR=Annex.SpeakRecog(scrollable_text) #Speak and Recognition class instance
Listen_Button=tk.Button(root,image=mic_img,borderwidth=0,activebackground='#2c4557',bg='#2c4557',command=Launching_thread)
Listen_Button.place(x=330,y=280)
myMenu=tk.Menu(root)
m1=tk.Menu(myMenu,tearoff=0) #tearoff=0 means the submenu can't be teared of from the window
m1.add_command(label='Commands List',command=CommandsList)
myMenu.add_cascade(label="Help",menu=m1)
stng_win=Annex.SettingWindow()
myMenu.add_cascade(label="Settings",command=partial(stng_win.settingWindow,root))
myMenu.add_cascade(label="Clear Screen",command=clearScreen)
root.config(menu=myMenu)
root.mainloop() | [
"vishalgpatil10@gmail.com"
] | vishalgpatil10@gmail.com |
9f80173b3cf8f824c5a2554175f22621476761a1 | 7d9030094153bf363ba5690607bf496b0bb76009 | /script/Thread_pool.py | 44b8fbb80234a42e0b2260bf9ee1fedc71cfb46c | [] | no_license | stan12138/archive | 2773c309e59458000fb1eac44c6d3fc073bfc511 | 54478dc286712948913e3c9ca126015a8bb24bc8 | refs/heads/master | 2020-12-02T18:15:39.609495 | 2020-02-24T13:19:29 | 2020-02-24T13:19:29 | 96,503,611 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | import threading
import queue
__all__ = ["Work","ThreadPool"]
class WorkDone(Exception) :
pass
class WorkThread(threading.Thread) :
def __init__(self,work_queue,result_queue,timeout) :
threading.Thread.__init__(self)
#self.setDaemon(True) 已经被废弃了
self.daemon = True
self.work_queue = work_queue
self.result_queue = result_queue
self.timeout = timeout
self.dismiss = threading.Event()
self.start()
def run(self) :
while True:
if self.dismiss.is_set() :
break
try :
work = self.work_queue.get(True,self.timeout)
except queue.Empty :
continue
else :
if self.dismiss.is_set() :
self.work_queue.put(work)
break
try :
result = work.work_func(*work.args)
#print('%s is done'%work.work_ID)
self.result_queue.put((work,result))
except :
pass
def set_dismiss(self) :
self.dismiss.set()
class Work() :
def __init__(self,target=None,args=[],work_ID=None) :
if work_ID == None :
self.work_ID = id(self)
else :
try :
self.work_ID = hash(work_ID)
except :
print("workID must be hashable,this id can't use,we will set as default")
self.work_ID = id(self)
self.work_func = target
self.args = args
def __str__(self) :
return 'work thread id=%s args=%s'%(self.work_ID,self.args)
class ThreadPool(object):
def __init__(self,worker_num,work_size=0,result_size=0,timeout=5) :
self.work_queue = queue.Queue(work_size)
self.result_queue = queue.Queue(result_size)
self.timeout = timeout
self.workers = []
self.dismiss_workers = []
self.work = {}
self.creat_workers(worker_num)
def creat_workers(self,num) :
for i in range(num) :
self.workers.append(WorkThread(self.work_queue, self.result_queue, self.timeout))
def dismiss_thread(self,num,do_join=False) :
dismiss_list = []
num = min(num,len(self.workers))
for i in range(num) :
worker = self.workers.pop()
worker.set_dismiss()
dismiss_list.append(worker)
print('stop %s work thread and leave %s thread.....'%(num,len(self.workers)))
if do_join :
for i in dismiss_list :
i.join()
print('join all dismiss thread already...')
else :
self.dismiss_workers.extend(dismiss_list)
def join_dismiss_thread(self) :
for i in self.dismiss_workers :
i.join()
print('join %s dismiss workers already,now there are still %s workers...'%(len(self.dismiss_workers),len(self.workers)))
self.dismiss_workers = []
def put_work(self,work,block=True,timeout=None) :
if isinstance(work,Work) :
self.work_queue.put(work,block,timeout)
self.work[work.work_ID] = work
else :
print('work must be Work class,put failure.....')
#print('add one work')
def get_all_result(self,block=False) :
while True:
if not self.work :
raise WorkDone
try :
work, result = self.result_queue.get(block=block)
#print('got one result')
del self.work[work.work_ID]
except :
break
def stop(self) :
self.dismiss_thread(self.worker_num(),True)
self.join_dismiss_thread()
def worker_num(self) :
return len(self.workers)
def wait(self) :
while True:
try:
self.get_all_result(True)
except WorkDone:
print('work done!!!!')
break
if __name__ == "__main__" :
import datetime
def work(name,data) :
with open(name,'w') as fi :
fi.write(data)
print('write %s already'%name)
main = ThreadPool(5)
for i in range(10) :
main.put_work(Work(target=work,args=(str(i)+'.txt','hello')))
main.wait()
main.stop()
| [
"ihnyi@qq.com"
] | ihnyi@qq.com |
263a0ba65383bbc06082fdef23c848663ec54781 | 45f6a4dfc837998565d4e4e4cde258a27fdbd424 | /learn_tu_you/wx_superboss/trunk/hall37-newfish/src/newfish/player/poseidon_player.py | bb1fbcf1975f01557c0d822235d2c2cc2b615f06 | [] | no_license | isoundy000/learn_python | c220966c42187335c5342269cafc6811ac04bab3 | fa1591863985a418fd361eb6dac36d1301bc1231 | refs/heads/master | 2022-12-29T10:27:37.857107 | 2020-10-16T03:52:44 | 2020-10-16T03:52:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Auther: houguangdong
# @Time: 2020/7/17
import time
from freetime.entity.msg import MsgPack
from freetime.util import log as ftlog
from poker.entity.dao import gamedata
from poker.entity.configure import gdata
from newfish.entity.msg import GameMsg
from newfish.entity import config, util, change_notify, weakdata
from newfish.entity.lotterypool import poseidon_lottery_pool
from newfish.entity.config import FISH_GAMEID, TOWERIDS
from newfish.player.multiple_player import FishMultiplePlayer
from newfish.room.poseidon_room import Tower
from newfish.entity.redis_keys import GameData, WeakData
from newfish.servers.util.rpc import user_rpc
class FishPoseidonPlayer(FishMultiplePlayer):
pass | [
"1737785826@qq.com"
] | 1737785826@qq.com |
b71b29353e59f08f5782750c9968d379ea377173 | ce522e5edb852562d688be96d0c15294a0d9e66b | /ecommerce/checkout/migrations/0002_auto_20170320_0235.py | 0a52143404eb6550a78302aa47ac334a1bba2924 | [] | no_license | juniorcarvalho/django-ecommerce | 62c67f57d615afa47fc77ca3f738e966616b36d3 | c6511aed95719a65f349bd7caec052515ddbbe39 | refs/heads/master | 2021-01-19T12:55:13.824402 | 2017-03-21T02:57:41 | 2017-03-21T02:57:41 | 82,350,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-20 05:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
('checkout', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='cartitem',
unique_together=set([('cart_key', 'product')]),
),
]
| [
"joseadolfojr@gmail.com"
] | joseadolfojr@gmail.com |
6bae73ca218abf294b04fd6534f4406c794145b2 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc095/A/4966582.py | 2037c5670eb3dba4a28fc15b2812759685ba5649 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | print(int(input().count("o"))*100+700) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
f91277476203cfe568c65fc4eb763e51affe8f00 | 7482abade21b37b188cd4d7636fdcc9b59927847 | /projekt/primer.py | 56468f6fffdd38197a69a1c1f88ee9397d4d684d | [
"MIT"
] | permissive | evadezelak/OPB | f25a4924c680b2ee85d8e81e55cab1cfa4bdc717 | 425533f41660353a52abed439c85efc5dd801273 | refs/heads/master | 2020-05-17T07:48:22.224513 | 2019-04-18T11:57:33 | 2019-04-18T11:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# uvozimo bottle.py
from bottle import *
# uvozimo ustrezne podatke za povezavo
import auth_public as auth
# uvozimo psycopg2
import psycopg2, psycopg2.extensions, psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) # se znebimo problemov s šumniki
# odkomentiraj, če želiš sporočila o napakah
# debug(True)
@get('/static/<filename:path>')
def static(filename):
return static_file(filename, root='static')
@get('/')
def index():
cur.execute("SELECT * FROM oseba ORDER BY priimek, ime")
return template('komitenti.html', osebe=cur)
@get('/transakcije/:x/')
def transakcije(x):
cur.execute("SELECT * FROM transakcija WHERE znesek > %s ORDER BY znesek, id", [int(x)])
return template('transakcije.html', x=x, transakcije=cur)
@get('/dodaj_transakcijo')
def dodaj_transakcijo():
return template('dodaj_transakcijo.html', znesek='', racun='', opis='', napaka=None)
@post('/dodaj_transakcijo')
def dodaj_transakcijo_post():
znesek = request.forms.znesek
racun = request.forms.racun
opis = request.forms.opis
try:
cur.execute("INSERT INTO transakcija (znesek, racun, opis) VALUES (%s, %s, %s)",
(znesek, racun, opis))
except Exception as ex:
return template('dodaj_transakcijo.html', znesek=znesek, racun=racun, opis=opis,
napaka = 'Zgodila se je napaka: %s' % ex)
redirect("/")
######################################################################
# Glavni program
# priklopimo se na bazo
conn = psycopg2.connect(database=auth.db, host=auth.host, user=auth.user, password=auth.password)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # onemogočimo transakcije
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# poženemo strežnik na portu 8080, glej http://localhost:8000/
run(host='localhost', port=8000)
| [
"janos.vidali@fmf.uni-lj.si"
] | janos.vidali@fmf.uni-lj.si |
574d4092f50b993d1f38e0a957a6acfa58a43c31 | fe54d59a1a030a9c1395f4f4d3ef2e2b2ec48343 | /build/scripts-2.7/manage.py | d56608cfd1ed804a07264990cbacd3890abd8d63 | [] | no_license | zbwzy/nailgun | 38a4198a0630a1608c14e55bee03b5ed04ded3e8 | 2eaeece03ebc53f48791db2aa8e7d24c010910f2 | refs/heads/master | 2022-09-25T09:03:33.296368 | 2016-02-23T09:32:55 | 2016-02-23T09:32:55 | 52,345,460 | 0 | 0 | null | 2022-09-16T17:45:43 | 2016-02-23T09:03:07 | Python | UTF-8 | Python | false | false | 8,813 | py | #!C:\Python27\python.exe
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __main__
import argparse
import code
import os
import sys
def add_config_parameter(parser):
parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
def load_run_parsers(subparsers):
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='8000'
)
run_parser.add_argument(
'-a', '--address', dest='address', action='store', type=str,
help='application address', default='0.0.0.0'
)
run_parser.add_argument(
'--fake-tasks', action='store_true', help='fake tasks'
)
run_parser.add_argument(
'--fake-tasks-amqp', action='store_true',
help='fake tasks with real AMQP'
)
run_parser.add_argument(
'--keepalive',
action='store_true',
help='run keep alive thread'
)
add_config_parameter(run_parser)
run_parser.add_argument(
'--fake-tasks-tick-count', action='store', type=int,
help='Fake tasks tick count'
)
run_parser.add_argument(
'--fake-tasks-tick-interval', action='store', type=int,
help='Fake tasks tick interval in seconds'
)
run_parser.add_argument(
'--authentication-method', action='store', type=str,
help='Choose authentication type',
choices=['none', 'fake', 'keystone'],
)
def load_db_parsers(subparsers):
subparsers.add_parser(
'syncdb', help='sync application database'
)
subparsers.add_parser(
'dropdb', help='drop application database'
)
# fixtures
loaddata_parser = subparsers.add_parser(
'loaddata', help='load data from fixture'
)
loaddata_parser.add_argument(
'fixture', action='store', help='json fixture to load'
)
dumpdata_parser = subparsers.add_parser(
'dumpdata', help='dump models as fixture'
)
dumpdata_parser.add_argument(
'model', action='store', help='model name to dump; underscored name'
'should be used, e.g. network_group for NetworkGroup model'
)
subparsers.add_parser(
'loaddefault',
help='load data from default fixtures '
'(settings.FIXTURES_TO_IPLOAD)'
)
def load_alembic_parsers(migrate_parser):
alembic_parser = migrate_parser.add_subparsers(
dest="alembic_command",
help='alembic command'
)
for name in ['current', 'history', 'branches']:
parser = alembic_parser.add_parser(name)
for name in ['upgrade', 'downgrade']:
parser = alembic_parser.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser = alembic_parser.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser = alembic_parser.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
def load_db_migrate_parsers(subparsers):
migrate_parser = subparsers.add_parser(
'migrate', help='dealing with DB migration'
)
load_alembic_parsers(migrate_parser)
def load_dbshell_parsers(subparsers):
dbshell_parser = subparsers.add_parser(
'dbshell', help='open database shell'
)
add_config_parameter(dbshell_parser)
def load_test_parsers(subparsers):
subparsers.add_parser(
'test', help='run unit tests'
)
def load_shell_parsers(subparsers):
shell_parser = subparsers.add_parser(
'shell', help='open python REPL'
)
add_config_parameter(shell_parser)
def load_settings_parsers(subparsers):
subparsers.add_parser(
'dump_settings', help='dump current settings to YAML'
)
def action_dumpdata(params):
import logging
logging.disable(logging.WARNING)
from nailgun.db.sqlalchemy import fixman
fixman.dump_fixture(params.model)
sys.exit(0)
def action_loaddata(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
with open(params.fixture, "r") as fileobj:
fixman.upload_fixture(fileobj)
logger.info("Done")
def action_loaddefault(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
fixman.upload_fixtures()
logger.info("Done")
def action_syncdb(params):
from nailgun.db import syncdb
from nailgun.logger import logger
logger.info("Syncing database...")
syncdb()
logger.info("Done")
def action_dropdb(params):
from nailgun.db import dropdb
from nailgun.logger import logger
logger.info("Dropping database...")
dropdb()
logger.info("Done")
def action_migrate(params):
from nailgun.db.migration import action_migrate_alembic
action_migrate_alembic(params)
def action_test(params):
from nailgun.logger import logger
from nailgun.unit_test import TestRunner
logger.info("Running tests...")
TestRunner.run()
logger.info("Done")
def action_dbshell(params):
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
args = ['psql']
env = {}
if settings.DATABASE['passwd']:
env['PGPASSWORD'] = settings.DATABASE['passwd']
if settings.DATABASE['user']:
args += ["-U", settings.DATABASE['user']]
if settings.DATABASE['host']:
args.extend(["-h", settings.DATABASE['host']])
if settings.DATABASE['port']:
args.extend(["-p", str(settings.DATABASE['port'])])
args += [settings.DATABASE['name']]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvpe('psql', args, env)
def action_dump_settings(params):
from nailgun.settings import settings
sys.stdout.write(settings.dump())
def action_shell(params):
from nailgun.db import db
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except ImportError:
code.interact(local={'db': db, 'settings': settings})
def action_run(params):
from nailgun.settings import settings
settings.update({
'LISTEN_PORT': int(params.port),
'LISTEN_ADDRESS': params.address,
})
for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
param = getattr(params, attr.lower())
if param is not None:
settings.update({attr: param})
if params.authentication_method:
auth_method = params.authentication_method
settings.AUTH.update({'AUTHENTICATION_METHOD' : auth_method})
if params.config_file:
settings.update_from_file(params.config_file)
from nailgun.app import appstart
appstart()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
load_run_parsers(subparsers)
load_db_parsers(subparsers)
load_db_migrate_parsers(subparsers)
load_dbshell_parsers(subparsers)
load_test_parsers(subparsers)
load_shell_parsers(subparsers)
load_settings_parsers(subparsers)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
action = getattr(
__main__,
"action_{0}".format(params.action)
)
action(params) if action else parser.print_help()
| [
"zhangbai2008@gmail.com"
] | zhangbai2008@gmail.com |
a03f17fd5bf1c66f472802a75980cdd2d516268c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /gt9LLufDCMHKMioh2_7.py | 016938adf4ecabd120de337982e70522542a7c1a | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py |
stutter = lambda w : (w[:2] + "... ")*2 + w + "?"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4e002e755d0c221b8396df31adcb81a4bffa5b2a | ba916d93dfb8074241b0ea1f39997cb028509240 | /python/sliding_window.py | 2279df01725d7098a84cbdb30106369deb9195a2 | [] | no_license | satojkovic/algorithms | ecc1589898c61d2eef562093d3d2a9a2d127faa8 | f666b215bc9bbdab2d2257c83ff1ee2c31c6ff8e | refs/heads/master | 2023-09-06T08:17:08.712555 | 2023-08-31T14:19:01 | 2023-08-31T14:19:01 | 169,414,662 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | def find_average_subarrays(k, arr):
result = []
for i in range(len(arr) - k + 1):
avr = sum(arr[i:i+k]) / k
result.append(avr)
return result
def find_average_subarrays_window(k, arr):
result = []
window_sum, left = 0.0, 0
for right in range(len(arr)):
window_sum += arr[right]
if right >= k - 1:
result.append(window_sum / k)
window_sum -= arr[left]
left += 1
return result
def test_find_average_subarrays():
k = 5
arr = [1, 3, 2, 6, -1, 4, 1, 8, 2]
ans = [2.2, 2.8, 2.4, 3.6, 2.8]
assert find_average_subarrays(k, arr) == ans
assert find_average_subarrays_window(k, arr) == ans
assert find_average_subarrays(1, [10]) == [10]
assert find_average_subarrays_window(1, [10]) == [10]
assert find_average_subarrays(2, [123]) == []
assert find_average_subarrays_window(2, [123]) == []
def max_sum_subarray(k, arr):
import sys
max_sum = -sys.maxsize
curr_sum, left = 0.0, 0
for right in range(len(arr)):
curr_sum += arr[right]
if right >= k - 1:
max_sum = curr_sum if curr_sum > max_sum else max_sum
curr_sum -= arr[left]
left += 1
return max_sum
def test_max_sum_subarray():
assert max_sum_subarray(3, [2, 1, 5, 1, 3, 2]) == 9
assert max_sum_subarray(2, [2, 3, 4, 1, 5]) == 7
assert max_sum_subarray(2, [1, 2, -3]) == 3
assert max_sum_subarray(2, [-3, 2, -5, -9]) == -1
assert max_sum_subarray(2, [1, 2, -3, 9]) == 6
def max_substring_with_k_distinct_chars(k, s):
from collections import defaultdict
char_freqs = defaultdict(int)
left = 0
max_length = 0
for right in range(len(s)):
char_freqs[s[right]] += 1
while len(char_freqs) > k:
left_char = s[left]
char_freqs[left_char] -= 1
if char_freqs[left_char] == 0:
del char_freqs[left_char]
left += 1
max_length = max(max_length, right - left + 1)
return max_length
def test_max_susbstring_with_k_distinct_chars():
assert max_substring_with_k_distinct_chars(2, 'araaci') == 4
assert max_substring_with_k_distinct_chars(1, 'araaci') == 2
assert max_substring_with_k_distinct_chars(3, 'cbbebi') == 5
| [
"satojkovic@gmail.com"
] | satojkovic@gmail.com |
dc0732bc17c1d9bd0f61168d8585bf9ebcd8dcc7 | 19ae613228d539deb768ece8b65e1f50a610bab6 | /pddl/pddl.py | 296e3e4b75acebd41edb2508ab767a5ada2e43be | [] | no_license | hyzcn/Bat-leth | cf7166c8c465bfabd3abf78ea712af95eff42ab3 | 1fc9c033d89f4d8a758f57e539622c4a36f1811b | refs/heads/master | 2021-06-07T20:54:21.554291 | 2016-10-16T01:04:21 | 2016-10-16T01:04:21 | 255,225,651 | 0 | 1 | null | 2020-04-13T03:50:15 | 2020-04-13T03:50:15 | null | UTF-8 | Python | false | false | 5,333 | py | #
# This file is part of pyperplan.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""
This module contains all data structures needed to represent a PDDL domain and
possibly a task definition.
"""
class Type:
"""
This class represents a PDDL type.
"""
def __init__(self, name, parent):
self.name = name.lower()
self.parent = parent
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Predicate:
def __init__(self, name, signature):
"""
name: The name of the predicate.
signature: A list of tuples (name, [types]) to represent a list of
parameters and their type(s).
"""
self.name = name
self.signature = signature
def __repr__(self):
return self.name + str(self.signature)
def __str__(self):
return self.name + str(self.signature)
class Quantifier:
def __init__(self, name, signature, predicates):
"""
name: The name of the predicate.
signature: A list of tuples (name, [types]) to represent a list of
parameters and their type(s) which are in the scope.
predicate: A list of predicates
"""
self.name = name #exists | forall
self.signature = signature
self.predicates = predicates
def __repr__(self):
return self.name + str(self.signature) + str(self.predicates)
def __str__(self):
return self.name + str(self.signature) + str(self.predicates)
# Formula is unused right now!
#class Formula:
# def __init__(self, operator, operands=[]):
# # right now we only need AND
# self._operator = operator # 'AND' | 'OR' | 'NOT'
# self._operands = operands
#
# def getOperator(self):
# return self._operator
# operator = property(getOperator)
#
# def getOperands(self):
# return self._operands
# operands = property(getOperands)
class Effect:
def __init__(self):
"""
addlist: Set of predicates that have to be true after the action
dellist: Set of predicates that have to be false after the action
"""
self.addlist = set()
self.dellist = set()
class Action:
def __init__(self, name, signature, precondition, effect, decomp=None):
"""
name: The name identifying the action
signature: A list of tuples (name, [types]) to represent a list of
parameters an their type(s).
precondition: A list of predicates that have to be true before the
action can be applied
effect: An effect instance specifying the postcondition of the action
"""
self.name = name
self.signature = signature
self.precondition = precondition
self.effect = effect
self.decomp = decomp
class Domain:
def __init__(self, name, types, predicates, actions, constants={}):
"""
name: The name of the domain
types: A dict of typename->Type instances in the domain
predicates: A list of predicates in the domain
actions: A list of actions in the domain
constants: A dict of name->type pairs of the constants in the domain
"""
self.name = name
self.types = types
self.predicates = predicates
self.actions = actions
self.constants = constants
def __repr__(self):
return ('< Domain definition: %s Predicates: %s Actions: %s '
'Constants: %s >' % (self.name,
[str(p) for p in self.predicates],
[str(a) for a in self.actions],
[str(c) for c in self.constants]))
__str__ = __repr__
class Problem:
def __init__(self, name, domain, objects, init, goal):
"""
name: The name of the problem
domain: The domain in which the problem has to be solved
story_objs: A dict name->type of story_objs that are used in the problem
init: A list of predicates describing the initial state
goal: A list of predicates describing the goal state
"""
self.name = name
self.domain = domain
self.objects = objects
self.initial_state = init
self.goal = goal
def __repr__(self):
return ('< Problem definition: %s '
'Domain: %s Objects: %s Initial State: %s Goal State : %s >' %
(self.name, self.domain.name,
[self.objects[o].name for o in self.objects],
[str(p) for p in self.initial_state],
[str(p) for p in self.goal]))
__str__ = __repr__
| [
"drwiner131@gmail.com"
] | drwiner131@gmail.com |
b2a52cd3a7ad6f3b0a3ac55ff2c6147a0ded178e | b1303152c3977a22ff9a0192c0c32310e65a6d77 | /python/530.minimum-absolute-difference-in-bst.py | a3a1a96c14fc0afec10ff9a53dfcfe8ba839397c | [
"Apache-2.0"
] | permissive | stavanmehta/leetcode | 1b8da1c2bfacaa76ddfb96b8dbce03bf08c54c27 | 1224e43ce29430c840e65daae3b343182e24709c | refs/heads/master | 2021-07-15T16:02:16.107962 | 2021-06-24T05:39:14 | 2021-06-24T05:39:14 | 201,658,706 | 0 | 0 | Apache-2.0 | 2021-06-24T05:39:15 | 2019-08-10T16:59:32 | Java | UTF-8 | Python | false | false | 247 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
| [
"noreply@github.com"
] | stavanmehta.noreply@github.com |
eb96e3855d329c0fad9ec94c2f1a5316a047fe52 | 134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2 | /desktop/core/ext-py/django_nose/django_nose/nose_runner.py | 3cb8d1e63c08da8d09673c4aa7912ef1358027e8 | [
"Apache-2.0"
] | permissive | civascu/hue | 22637f13a4cfc557716557661523131b6ac16da4 | 82f2de44789ff5a981ed725175bae7944832d1e9 | refs/heads/master | 2020-03-31T01:50:39.449966 | 2010-07-21T01:05:50 | 2010-07-21T01:07:15 | 788,284 | 0 | 0 | Apache-2.0 | 2019-02-04T07:03:12 | 2010-07-21T07:34:27 | Python | UTF-8 | Python | false | false | 2,660 | py | """
Django test runner that invokes nose.
Usage:
./manage.py test DJANGO_ARGS -- NOSE_ARGS
The 'test' argument, and any other args before '--', will not be passed
to nose, allowing django args and nose args to coexist.
You can use
NOSE_ARGS = ['list', 'of', 'args']
in settings.py for arguments that you always want passed to nose.
"""
import sys
from django.conf import settings
from django.db import connection
from django.test import utils
import nose
SETUP_ENV = 'setup_test_environment'
TEARDOWN_ENV = 'teardown_test_environment'
def get_test_enviroment_functions():
"""The functions setup_test_environment and teardown_test_environment in
<appname>.tests modules will be automatically called before and after
running the tests.
"""
setup_funcs = []
teardown_funcs = []
for app_name in settings.INSTALLED_APPS:
mod = __import__(app_name, None, None, ['tests'])
if hasattr(mod, 'tests'):
if hasattr(mod.tests, SETUP_ENV):
setup_funcs.append(getattr(mod.tests, SETUP_ENV))
if hasattr(mod.tests, TEARDOWN_ENV):
teardown_funcs.append(getattr(mod.tests, TEARDOWN_ENV))
return setup_funcs, teardown_funcs
def setup_test_environment(setup_funcs):
utils.setup_test_environment()
for func in setup_funcs:
func()
def teardown_test_environment(teardown_funcs):
utils.teardown_test_environment()
for func in teardown_funcs:
func()
def run_tests_explicit(nose_args, verbosity=1, interactive=True):
"""Setup django and run nose with given arguments."""
setup_funcs, teardown_funcs = get_test_enviroment_functions()
# Prepare django for testing.
setup_test_environment(setup_funcs)
old_db_name = settings.DATABASE_NAME
connection.creation.create_test_db(verbosity, autoclobber=not interactive)
# Pretend it's a production environment.
settings.DEBUG = False
ret = nose.run(argv=nose_args)
# Clean up django.
connection.creation.destroy_test_db(old_db_name, verbosity)
teardown_test_environment(teardown_funcs)
return ret
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
"""Calculates nose arguments and runs tests."""
nose_argv = ['nosetests']
if hasattr(settings, 'NOSE_ARGS'):
nose_argv.extend(settings.NOSE_ARGS)
# Everything after '--' is passed to nose.
if '--' in sys.argv:
hyphen_pos = sys.argv.index('--')
nose_argv.extend(sys.argv[hyphen_pos + 1:])
if verbosity >= 1:
print ' '.join(nose_argv)
return run_tests_explicit(nose_argv, verbosity, interactive)
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
8dffd4a74543cb7509f054827c210076c6e09a40 | 7bd0954e956993df19d833810f9d71b60e2ebb9a | /test/matrix/test_LIGO_noise.py | 07feafe27f250c7076da65d882d3fdc1023562bf | [
"Apache-2.0"
] | permissive | aa158/phasor | 5ee0cec4f816b88b0a8ac298c330ed48458ec3f2 | fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d | refs/heads/master | 2021-10-22T09:48:18.556091 | 2019-03-09T18:56:05 | 2019-03-09T18:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from os import path
import numpy as np
import declarative
import numpy.testing as np_test
import pytest
from declarative.bunch import (
DeepBunch
)
from phasor.utilities.np import logspaced
from phasor import system
from ligo_sled import (
LIGOBasicOperation
)
import pickle
try:
stresstest = pytest.mark.skipif(
not pytest.config.getoption("--do-stresstest"),
reason="need --do-stresstest option to run"
)
except AttributeError:
#needed for importing when py.test isn't in test mode
stresstest = lambda x : x
@stresstest
def test_LIGO_noise_inversion():
with open(path.join(path.split(__file__)[0], 'aLIGO_outspec.pckl'), 'rb') as F:
output = declarative.Bunch(pickle.load(F))
def test_inverse():
db = DeepBunch()
db.det.input.PSL.power.val = 27 * 7
db.det.input.PSL.power.units = 'W'
db.det.LIGO.S_BS_IX.L_detune.val = 1064e-9 * .001
db.det.LIGO.S_BS_IX.L_detune.units = 'm'
db.det.output.AS_efficiency_percent = 85
db.environment.F_AC.frequency.val = logspaced(.5, 10000, 1000)
sys = system.BGSystem(
ctree = db,
solver_name = 'loop_LUQ',
)
sys.own.det = LIGOBasicOperation()
print(sys.det.LIGO.YarmDC.DC_readout)
print(sys.det.LIGO.XarmDC.DC_readout)
print(sys.det.LIGO.REFLDC.DC_readout)
print(sys.det.LIGO.POPTrueDC.DC_readout)
print(sys.det.output.ASPD_DC.DC_readout)
readoutI = sys.det.output.ASPDHD_AC
ASPDHD_AC_nls = readoutI.AC_noise_limited_sensitivity
rel = (ASPDHD_AC_nls / output.ASPDHD_AC_nls).real
print("RELMINMAX: ", np.min(rel), np.max(rel))
np_test.assert_almost_equal(
rel, 1, 2
)
for i in range(20):
test_inverse()
| [
"Lee.McCuller@gmail.com"
] | Lee.McCuller@gmail.com |
c946e3d7d9f7bc6cd9acbc12ac3715f0fec5809c | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/distutils/command/bdist.pyi | 616b93e4aa49694aba5a4a8b4e65e50adb80db1b | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/distutils/command/bdist.pyi | [
"mersoj@rpi.edu"
] | mersoj@rpi.edu |
db803a0586142b52f75809ffd21a1d35b32ff2a4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2976/60705/263776.py | 3a732b098934518bb61117ea96d97db82cacde9c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | short = input()
length = len(short)
k = 1
while k < 20:
try:
a = input()
except EOFError:
k += 1
continue
len_of_a = len(a)
# 删除短字符串
i = 0
while i + length <= len_of_a:
if short == a[i:i+length]:
a = a[0:i] + a[i+length:len(a)]
len_of_a -= length
i = -1
i += 1
# 删除空格
i = 0
while i < len_of_a:
if a[i] == " ":
a = a[0:i] + a[i + 1:len(a)]
len_of_a -= 1
i -= 1
i += 1
if a == 'printf("Hi")':
a = a.replace("H", "")
print(a)
k += 1
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
4adc28cf02ad73151e0d6daf883933c57205b21f | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/sword2offer/000剑指0_Offer_29._顺时针打印矩阵.py | 583332df292ce5dd4b00535341ccffb0c19c9cf1 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | '''
执行用时:24 ms, 在所有 Python 提交中击败了87.79%的用户
内存消耗:13.5 MB, 在所有 Python 提交中击败了94.97%的用户
'''
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
M, N = len(matrix), len(matrix[0]) if matrix else 0
top, left, bottom, right = 0, 0, M - 1, N - 1
spiral = []
while left <= right and top <= bottom:
# left to right
for j in range(left, right + 1):
spiral.append(matrix[top][j])
# right to bottom
for i in range(top + 1, bottom + 1):
spiral.append(matrix[i][right])
if left < right and top < bottom:
# right to left
for j in range(right - 1, left - 1, -1):
spiral.append(matrix[bottom][j])
# bottom to top
for i in range(bottom - 1, top, -1):
spiral.append(matrix[i][left])
top += 1
left += 1
right -= 1
bottom -= 1
return spiral
'''
逆时针旋转 == (转置+倒序)
转置: zip
倒序: [:: -1]
执行用时:36 ms, 在所有 Python 提交中击败了36.27%的用户
内存消耗:14.1 MB, 在所有 Python 提交中击败了9.69%的用户
'''
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
spiral = []
while matrix:
spiral.extend(matrix[0])
matrix = zip(*matrix[1:])[::-1]
return spiral
| [
"838255715@qq.com"
] | 838255715@qq.com |
413d10d141e21938b4e969cb4513dd7b41f93f96 | d54e1b89dbd0ec5baa6a018464a419e718c1beac | /Python from others/飞机大战/wk_11_事件退出事件.py | 60da8d84e1ea8c3b53ded2f4b9a0bad6839fe54c | [] | no_license | cjx1996/vscode_Pythoncode | eda438279b7318e6cb73211e26107c7e1587fdfb | f269ebf7ed80091b22334c48839af2a205a15549 | refs/heads/master | 2021-01-03T19:16:18.103858 | 2020-05-07T13:51:31 | 2020-05-07T13:51:31 | 240,205,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | import pygame
# 游戏的初始化
pygame.init()
# 创建游戏的窗口
screen = pygame.display.set_mode((480, 700))
# 绘制背景图像
bg = pygame.image.load("./images/background.png")
screen.blit(bg, (0, 0))
# 绘制英雄的飞机
hero = pygame.image.load("./images/me1.png")
# screen.blit(hero, (200, 500))
# 可以在所有绘制工作完成之后, 统一调用 update 方法
# pygame.display.update()
# 创建时钟对象
clock = pygame.time.Clock()
# 1. 定义rect记录飞机的初始位置
hero_rect = pygame.Rect(150, 300, 102, 126)
while True:
# 可以指定循环体内部的代码执行频率
clock.tick(60)
# 监听事件
for event in pygame.event.get():
# 判断事件类型是否是退出事件
if event.type == pygame.QUIT:
print("游戏退出...")
# quit 卸载所有的模块
pygame.quit()
# exit() 直接终止当前正在执行的程序
exit()
# 2. 修改飞机的位置
hero_rect.y -= 1
if hero_rect.y <= -126:
hero_rect.y = 700
# 3. 调用blit方法绘制图像
screen.blit(bg, (0, 0))
screen.blit(hero, hero_rect)
# 4. 调用update方法更新显示
pygame.display.update()
pygame.quit()
| [
"1121287904@qq.com"
] | 1121287904@qq.com |
023b73f0b93d17480fbbccec70ee83318541d482 | 876773d9c2f3945b37730416ae972ffe912d61af | /backend/manage.py | b04e2e78e5ebc452fbec2a899a8cf2e24ddfbf83 | [] | no_license | crowdbotics-apps/upfood-22998 | 955c21c85e1890108b656ca9451fe31bfdebb8c5 | 80f4e9e4969af8ee4b71b57490ae38212d046e53 | refs/heads/master | 2023-01-20T06:19:08.499602 | 2020-11-26T19:07:06 | 2020-11-26T19:07:06 | 316,316,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "upfood_22998.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a4ee19fffd100a3fc05a6e021b724a07d4482aad | 459929ce79538ec69a6f8c32e608f4e484594d68 | /venv/Lib/site-packages/kubernetes/client/models/extensions_v1beta1_deployment_list.py | 10b69c3d65178e80e977c84bfa9479ff4e264369 | [] | no_license | yychai97/Kubernetes | ec2ef2a98a4588b7588a56b9d661d63222278d29 | 2955227ce81bc21f329729737b5c528b02492780 | refs/heads/master | 2023-07-02T18:36:41.382362 | 2021-08-13T04:20:27 | 2021-08-13T04:20:27 | 307,412,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,727 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1DeploymentList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[ExtensionsV1beta1Deployment]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""ExtensionsV1beta1DeploymentList - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this ExtensionsV1beta1DeploymentList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this ExtensionsV1beta1DeploymentList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this ExtensionsV1beta1DeploymentList. # noqa: E501
Items is the list of Deployments. # noqa: E501
:return: The items of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: list[ExtensionsV1beta1Deployment]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ExtensionsV1beta1DeploymentList.
Items is the list of Deployments. # noqa: E501
:param items: The items of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: list[ExtensionsV1beta1Deployment]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this ExtensionsV1beta1DeploymentList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this ExtensionsV1beta1DeploymentList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this ExtensionsV1beta1DeploymentList. # noqa: E501
:return: The metadata of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ExtensionsV1beta1DeploymentList.
:param metadata: The metadata of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1DeploymentList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"49704239+yychai97@users.noreply.github.com"
] | 49704239+yychai97@users.noreply.github.com |
3aafcf9d843e58fbbf774ed45ecdf3dcfeaabe94 | f7db9442c0ba1f5c9f0b2c5fe0f1b0c10320b64a | /Warmup-1/near_hundred.py | f756ae999c0bdda305e96ae6fb20f6bd936a5169 | [] | no_license | antonioramos1/codingbat-python | aba5498fbf331ea47ce1b4178c6f7b8f81fc5908 | bfd02e338d886f8807dd3fb8a766d77a91e0ab50 | refs/heads/master | 2021-09-10T04:16:16.589515 | 2018-03-21T00:20:39 | 2018-03-21T00:20:39 | 126,085,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | def near_hundred(n):
return ((n >= 90 and n <= 110) or (n >= 190 and n <= 210))
| [
"antonioramosglz@gmail.com"
] | antonioramosglz@gmail.com |
fb1a0fe8be3323847c1589104e7ad955265f9f5a | 94487ea9d7d2bbdf46797fc5bf82fee45cf23db5 | /tests/python/unittest/test_tir_schedule_set_axis_separator.py | 102b3d1cd71062ee45a0b251ca94cf6c3217bb9c | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | were/tvm | 9cc379dac1bcd9ae83b133a313db75f5a63640f6 | afb67e64a1891e1d1aab03c4614fca11473e7b27 | refs/heads/master | 2022-11-22T15:47:02.888421 | 2022-05-28T00:10:40 | 2022-05-28T00:10:40 | 146,328,333 | 3 | 0 | Apache-2.0 | 2018-08-27T17:03:20 | 2018-08-27T17:03:19 | null | UTF-8 | Python | false | false | 6,121 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.tir import IndexMap
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_set_axis_separator_input_buffer(A: T.Buffer(shape=(128, 128), dtype="float32", axis_separators=(1,)), C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[i, j], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[i, j], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[i, j], [], dtype="float32", offset_factor=1, axis_separators=[1])
B_subregion0[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[i, j], [], dtype="float32", offset_factor=1, axis_separators=[1])
C[vi, vj] = B_subregion1[()] + T.float32(1)
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
use_sugared_transform = tvm.testing.parameter(
by_dict={"set_axis_separators": False, "transform_layout_sugared": True}
)
def test_set_axis_separator(use_sugared_transform):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if use_sugared_transform:
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
else:
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
tvm.ir.assert_structural_equal(element_wise_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_index_out_of_bound():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("write",1),[1])
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("read",-1),[1])
def test_set_axis_separator_input_buffer(use_sugared_transform):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if use_sugared_transform:
s.transform_layout(block='B', buffer='A', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
s.set_axis_separator(s.get_block("B"), ("read",0), [1])
tvm.ir.assert_structural_equal(element_wise_set_axis_separator_input_buffer, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_axis_separator_subregion(use_sugared_transform):
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
if use_sugared_transform:
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
if __name__ == "__main__":
tvm.testing.main()
| [
"noreply@github.com"
] | were.noreply@github.com |
b8f8c7887b4161a0796f663e1360ff23717fcf82 | 3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72 | /month01/code/day08/shopping.py | afbad4e6ba4720b418384e8528526145ab6bd682 | [] | no_license | leinian85/year2019 | 30d66b1b209915301273f3c367bea224b1f449a4 | 2f573fa1c410e9db692bce65d445d0543fe39503 | refs/heads/master | 2020-06-21T20:06:34.220046 | 2019-11-04T06:37:02 | 2019-11-04T06:37:02 | 197,541,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,483 | py | commodity_info = {
101: {"name": "屠龙刀", "price": 10000},
102: {"name": "倚天剑", "price": 10000},
103: {"name": "九阴白骨爪", "price": 8000},
104: {"name": "九阳神功", "price": 9000},
105: {"name": "降龙十八掌", "price": 8000},
106: {"name": "乾坤大挪移", "price": 10000}
}
order = {}
def commodity_list():
"""
显示商品明细
:return:
"""
for key, value in commodity_info.items():
print("编号:%d,名称:%s,单价:%d。" % (key, value["name"], value["price"]))
def buy():
"""
购买商品
:return:
"""
commodity_list()
while True:
cid = int(input("请输入商品编号:"))
if cid in commodity_info:
break
else:
print("该商品不存在")
count = int(input("请输入购买数量:"))
if cid not in order:
order[cid] = count
else:
order[cid] += count
print("添加到购物车。")
def shopping_list():
"""
计算购物的总金额
:return: 返回总金额
"""
total_money = 0
for cid,count in order.items():
commodity = commodity_info[cid]
print("商品:%s,单价:%d,数量:%d." % (commodity["name"], commodity["price"], count))
total_money += commodity["price"] * count
return total_money
def square():
"""
商品结算
:return:
"""
if bool(order):
total_money = shopping_list()
while True:
str_money = input("总价%d元,请输入金额:" % total_money)
if str_money == "":
str_out = input("退出请按'Y':")
if str_out== "Y":
order.clear()
break
else:
money = float(str_money)
if money >= total_money:
print("购买成功,找回:%d元。" % (money - total_money))
order.clear()
break
else:
print("金额不足.")
else:
print("你未购买任何物品,不需要结算")
def shopping():
"""
购物
:return:
"""
while True:
item = input("1键购买,2键结算。")
if item == "1":
buy()
elif item == "2":
square()
else:
break
shopping() | [
"42737521@qq.com"
] | 42737521@qq.com |
e606fb13a685824d01e0a0355fcc0f0aa2b9a8da | 6c42b234cba1f077dc306242ad1973d56f812343 | /beginner_tasks/strings.py | 82cd9c20d1567b20f2865c2fe8e03db530f63596 | [] | no_license | wencakisa/Python-Dev | 330a9ba3a8320f8e1fa5bfb86c85b24253361a6a | 511d307b6f64174002112cadcdbd0e23c1d69b70 | refs/heads/master | 2021-01-17T13:23:22.939323 | 2016-08-02T14:57:36 | 2016-08-02T14:57:36 | 59,685,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import re
def slice_str(string, max_length):
if max_length < len(string):
return string[: max_length] + "..."
return string
def show_after(string, after):
return string[re.search(r'\s{}\s'.format(after), string).span()[1]:]
def main():
string = "This is soo difficult, I prefer playing WoW"
after = "is"
print(show_after(string, after))
if __name__ == '__main__':
main()
| [
"wencakisa@gmail.com"
] | wencakisa@gmail.com |
4e7130c92ca3727848a0d762125ea88d8243c716 | da052c0bbf811dc4c29a83d1b1bffffd41becaab | /core/stock_by_location/__openerp__.py | 9d07484e4fbf097c623c67fd130a188a963c3698 | [] | no_license | Muhammad-SF/Test | ef76a45ad28ac8054a4844f5b3826040a222fb6e | 46e15330b5d642053da61754247f3fbf9d02717e | refs/heads/main | 2023-03-13T10:03:50.146152 | 2021-03-07T20:28:36 | 2021-03-07T20:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 BrowseInfo(<http://www.browseinfo.in>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stocks By Location',
'version': '1.1.3',
'category': 'Warehouse',
'sequence': 14,
'price': '25',
'currency': "EUR",
'summary': '',
'description': """
-Stock Balance by Location
-Stock Quantity by location
-Location based stock
-Display Product Quantity based on stock.
-Warehouse stock based on location
-Stock Quantity based on location
-Stock by location
-Stock qty by location
-Stock location
""",
'author': 'BrowseInfo',
'website': 'http://www.browseinfo.in',
'images': [],
'depends': ['base','sale','stock', 'inventory_reserved_available_qty'],
'data': [
'product.xml',
],
'installable': True,
'auto_install': False,
"images":['static/description/Banner.png'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"jbalu2801@gmail.com"
] | jbalu2801@gmail.com |
ddf9db09959a2290f75dfe464a502b92e03bf010 | ce285e8e855137888552e55083e19838fab3afda | /settings/common.py | 226e17ddba151670455e45a4f50c668643b365b0 | [] | no_license | bmarchenko/traveler | 4215b5596f2ea70796ea1ff4d21342aa3cf6ccff | 5af5a302677dd3037e7e1b52d7e5ef49dd41cf86 | refs/heads/master | 2016-08-11T20:33:16.410335 | 2012-11-10T22:48:19 | 2012-11-10T22:48:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | # Django settings for your project.
import conf.environment
import os
LOCAL = False
SITE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wsgi.application'
ROOT_URLCONF = 'urls'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.\
os.path.join(SITE_ROOT, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates')
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sdliu298sdf2398fqwf2089asdfasdfu098u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"traveler.context_processors.nav_content",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.comments',
'south',
'traveler',
'taggit',
'inlines',
'blog',
'hadrian.contrib.locations',
'gallery',
'sorl.thumbnail',
'django_extensions',
'bootstrap',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
}
}
}
| [
"dstegelman@gmail.com"
] | dstegelman@gmail.com |
cdfd9f0abdd1724bd2da56b313d4938752e38da7 | a691e764b10453c69e040abfa6841d25b622beba | /orquesta/composers/native.py | f025177d6f03821eb4e085cdc882ebde1fc3d98c | [
"Apache-2.0"
] | permissive | alertlogic/orquesta | ee0952c5e79663e4c928e6028e0cf514c55359d4 | 68fddf0ab312cca35616fcb3815966ab2fe83edb | refs/heads/master | 2023-08-15T23:01:10.836310 | 2021-10-14T16:20:49 | 2021-10-14T16:20:49 | 405,152,762 | 0 | 0 | Apache-2.0 | 2021-09-10T18:24:28 | 2021-09-10T16:56:38 | null | UTF-8 | Python | false | false | 4,312 | py | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from six.moves import queue
from orquesta.composers import base as comp_base
from orquesta import graphing
from orquesta.specs import native as native_specs
LOG = logging.getLogger(__name__)
class WorkflowComposer(comp_base.WorkflowComposer):
wf_spec_type = native_specs.WorkflowSpec
@classmethod
def compose(cls, spec):
if not cls.wf_spec_type:
raise TypeError("Undefined spec type for composer.")
if not isinstance(spec, cls.wf_spec_type):
raise TypeError('Unsupported spec type "%s".' % str(type(spec)))
return cls._compose_wf_graph(spec)
@classmethod
def _compose_wf_graph(cls, wf_spec):
if not isinstance(wf_spec, cls.wf_spec_type):
raise TypeError("Workflow spec is not typeof %s." % cls.wf_spec_type.__name__)
q = queue.Queue()
wf_graph = graphing.WorkflowGraph()
for task_name, condition, task_transition_item_idx in wf_spec.tasks.get_start_tasks():
q.put((task_name, []))
while not q.empty():
task_name, splits = q.get()
wf_graph.add_task(task_name)
if wf_spec.tasks.is_join_task(task_name):
task_spec = wf_spec.tasks[task_name]
barrier = "*" if task_spec.join == "all" else task_spec.join
wf_graph.set_barrier(task_name, value=barrier)
# Determine if the task is a split task and if it is in a cycle. If the task is a
# split task, keep track of where the split(s) occurs.
if wf_spec.tasks.is_split_task(task_name) and not wf_spec.tasks.in_cycle(task_name):
splits.append(task_name)
if splits:
wf_graph.update_task(task_name, splits=splits)
# Update task attributes if task spec has retry criteria.
task_spec = wf_spec.tasks.get_task(task_name)
if task_spec.has_retry():
retry_spec = {
"when": getattr(task_spec.retry, "when", None),
"count": getattr(task_spec.retry, "count", None),
"delay": getattr(task_spec.retry, "delay", None),
}
wf_graph.update_task(task_name, retry=retry_spec)
# Add task transition to the workflow graph.
next_tasks = wf_spec.tasks.get_next_tasks(task_name)
for next_task_name, condition, task_transition_item_idx in next_tasks:
if next_task_name == "retry":
retry_spec = {"when": condition or "<% completed() %>", "count": 3}
wf_graph.update_task(task_name, retry=retry_spec)
continue
if not wf_graph.has_task(next_task_name) or not wf_spec.tasks.in_cycle(
next_task_name
):
q.put((next_task_name, list(splits)))
crta = [condition] if condition else []
seqs = wf_graph.has_transition(
task_name, next_task_name, criteria=crta, ref=task_transition_item_idx
)
# Use existing transition if present otherwise create new transition.
if seqs:
wf_graph.update_transition(
task_name,
next_task_name,
key=seqs[0][2],
criteria=crta,
ref=task_transition_item_idx,
)
else:
wf_graph.add_transition(
task_name, next_task_name, criteria=crta, ref=task_transition_item_idx
)
return wf_graph
| [
"m4d.coder@gmail.com"
] | m4d.coder@gmail.com |
a5fc7a520f05a37155608a9bdf8777f0b5348450 | f33b30743110532ddae286ba1b34993e61669ab7 | /weekly contest/第180场周赛/2.py | ec84e43eebe3c1867ea8e5cce5a67373fe0b84be | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from typing import List | [
"762307667@qq.com"
] | 762307667@qq.com |
c44a31bc672dd09d76b28c6336bbe521e2267744 | 409ce560793c070ef4211b99c5a4a5316a258c4f | /pylith/meshio/DataWriterVTK.py | 09ebe3ec0aea184c8dbba3a476889a1d9c618937 | [
"MIT"
] | permissive | calum-chamberlain/pylith | bb718bfb4305f03b45d42348e5d4fa5ed5f4a918 | 8712c39ade53c1cc5ac0e671e4296cee278c1dcf | refs/heads/master | 2020-12-06T17:15:08.638337 | 2016-05-15T20:30:28 | 2016-05-15T20:30:28 | 46,401,744 | 0 | 0 | null | 2016-05-15T20:30:29 | 2015-11-18T07:09:12 | C++ | UTF-8 | Python | false | false | 3,366 | py | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2015 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pyre/meshio/DataWriterVTK.py
##
## @brief Python object for writing finite-element data to VTK file.
from DataWriter import DataWriter
from meshio import DataWriterVTK as ModuleDataWriterVTK
# DataWriterVTK class
class DataWriterVTK(DataWriter, ModuleDataWriterVTK):
"""
Python object for writing finite-element data to VTK file.
Inventory
\b Properties
@li \b filename Name of VTK file.
@li \b time_format C style format string for time stamp in filename.
@li \b time_constant Value used to normalize time stamp in filename.
\b Facilities
@li None
"""
# INVENTORY //////////////////////////////////////////////////////////
import pyre.inventory
filename = pyre.inventory.str("filename", default="output.vtk")
filename.meta['tip'] = "Name of VTK file."
timeFormat = pyre.inventory.str("time_format", default="%f")
timeFormat.meta['tip'] = "C style format string for time stamp in filename."
from pyre.units.time import second
timeConstant = pyre.inventory.dimensional("time_constant",
default=1.0*second,
validator=pyre.inventory.greater(0.0*second))
timeConstant.meta['tip'] = "Values used to normalize time stamp in filename."
precision = pyre.inventory.int("float_precision", default=6,
validator=pyre.inventory.greater(0))
precision.meta['tip'] = "Precision of floating point values in output."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="datawritervtk"):
"""
Constructor.
"""
DataWriter.__init__(self, name)
ModuleDataWriterVTK.__init__(self)
return
def initialize(self, normalizer):
"""
Initialize writer.
"""
DataWriter.initialize(self, normalizer, self.filename)
timeScale = normalizer.timeScale()
timeConstantN = normalizer.nondimensionalize(self.timeConstant, timeScale)
ModuleDataWriterVTK.filename(self, self.filename)
ModuleDataWriterVTK.timeScale(self, timeScale.value)
ModuleDataWriterVTK.timeFormat(self, self.timeFormat)
ModuleDataWriterVTK.timeConstant(self, timeConstantN)
ModuleDataWriterVTK.precision(self, self.precision)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Configure object.
"""
try:
DataWriter._configure(self)
except ValueError, err:
aliases = ", ".join(self.aliases)
raise ValueError("Error while configuring VTK output "
"(%s):\n%s" % (aliases, err.message))
return
# FACTORIES ////////////////////////////////////////////////////////////
def data_writer():
"""
Factory associated with DataWriter.
"""
return DataWriterVTK()
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
f324e10f5052054d2e3506b49f197c47921214b8 | 8ec05f1d5800e0b98afa92367f74bed9f95e0ee9 | /venv/Scripts/autopep8-script.py | 4a622cfc0daaa5f5bad725211ff99e47d1376171 | [] | no_license | ayanchyaziz123/ecom-final-year-project | 28362922a88c71aba29d22f29c7f34e1cad6189f | d21fdd885b3b768935dc29171c5a6761c4b88e9c | refs/heads/master | 2023-08-12T17:10:23.826744 | 2021-10-06T12:36:17 | 2021-10-06T12:36:17 | 405,435,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!f:\proshop_django-master\venv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.4','console_scripts','autopep8'
__requires__ = 'autopep8==1.5.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.5.4', 'console_scripts', 'autopep8')()
)
| [
"aaziz9642@gmail.com"
] | aaziz9642@gmail.com |
bdcb618792133f600fa704d895b48da6883d5bea | 341dd7b46978fb898999233aa8aa5cede7e73e60 | /PathPlanning/RRTStarReedsShepp/rrt_star_reeds_shepp.py | d30001fb9c23e80aa3478aa0d67dcba5a7b8c651 | [
"MIT"
] | permissive | michaelchi08/PythonRobotics | 577d94a54ac5e5bec2199a109e3a9cbd74b24ee1 | d302e2756b7f2835b1a6524651e490525bd1c8c2 | refs/heads/master | 2021-05-09T20:28:36.179719 | 2018-01-23T00:28:14 | 2018-01-23T00:28:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,655 | py | """
Path Planning Sample Code with RRT for car like robot.
author: AtsushiSakai(@Atsushi_twi)
"""
import random
import math
import copy
import numpy as np
import reeds_shepp_path_planning
import matplotlib.pyplot as plt
show_animation = True
class RRT():
"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList, randArea,
goalSampleRate=10, maxIter=400):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start = Node(start[0], start[1], start[2])
self.end = Node(goal[0], goal[1], goal[2])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
self.obstacleList = obstacleList
def Planning(self, animation=True):
"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
for i in range(self.maxIter):
rnd = self.get_random_point()
nind = self.GetNearestListIndex(self.nodeList, rnd)
newNode = self.steer(rnd, nind)
if self.CollisionCheck(newNode, self.obstacleList):
nearinds = self.find_near_nodes(newNode)
newNode = self.choose_parent(newNode, nearinds)
self.nodeList.append(newNode)
self.rewire(newNode, nearinds)
if animation and i % 5 == 0:
self.DrawGraph(rnd=rnd)
# generate coruse
lastIndex = self.get_best_last_index()
path = self.gen_final_course(lastIndex)
return path
def choose_parent(self, newNode, nearinds):
if len(nearinds) == 0:
return newNode
dlist = []
for i in nearinds:
tNode = self.steer(newNode, i)
if self.CollisionCheck(tNode, self.obstacleList):
dlist.append(tNode.cost)
else:
dlist.append(float("inf"))
mincost = min(dlist)
minind = nearinds[dlist.index(mincost)]
if mincost == float("inf"):
print("mincost is inf")
return newNode
newNode = self.steer(newNode, minind)
return newNode
def pi_2_pi(self, angle):
while(angle > math.pi):
angle = angle - 2.0 * math.pi
while(angle < -math.pi):
angle = angle + 2.0 * math.pi
return angle
def steer(self, rnd, nind):
curvature = 1.0
nearestNode = self.nodeList[nind]
px, py, pyaw, mode, clen = reeds_shepp_path_planning.reeds_shepp_path_planning(
nearestNode.x, nearestNode.y, nearestNode.yaw, rnd.x, rnd.y, rnd.yaw, curvature)
newNode = copy.deepcopy(nearestNode)
newNode.x = px[-1]
newNode.y = py[-1]
newNode.yaw = pyaw[-1]
newNode.path_x = px
newNode.path_y = py
newNode.path_yaw = pyaw
newNode.cost += clen
newNode.parent = nind
return newNode
def get_random_point(self):
if random.randint(0, 100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand),
random.uniform(self.minrand, self.maxrand),
random.uniform(-math.pi, math.pi)
]
else: # goal point sampling
rnd = [self.end.x, self.end.y, self.end.yaw]
node = Node(rnd[0], rnd[1], rnd[2])
return node
def get_best_last_index(self):
# print("get_best_last_index")
YAWTH = math.radians(3.0)
XYTH = 0.5
goalinds = []
for (i, node) in enumerate(self.nodeList):
if self.calc_dist_to_goal(node.x, node.y) <= XYTH:
goalinds.append(i)
# print("OK XY TH num is")
# print(len(goalinds))
# angle check
fgoalinds = []
for i in goalinds:
if abs(self.nodeList[i].yaw - self.end.yaw) <= YAWTH:
fgoalinds.append(i)
# print("OK YAW TH num is")
# print(len(fgoalinds))
if len(fgoalinds) == 0:
return None
mincost = min([self.nodeList[i].cost for i in fgoalinds])
for i in fgoalinds:
if self.nodeList[i].cost == mincost:
return i
return None
def gen_final_course(self, goalind):
path = [[self.end.x, self.end.y]]
while self.nodeList[goalind].parent is not None:
node = self.nodeList[goalind]
for (ix, iy) in zip(reversed(node.path_x), reversed(node.path_y)):
path.append([ix, iy])
# path.append([node.x, node.y])
goalind = node.parent
path.append([self.start.x, self.start.y])
return path
def calc_dist_to_goal(self, x, y):
return np.linalg.norm([x - self.end.x, y - self.end.y])
def find_near_nodes(self, newNode):
nnode = len(self.nodeList)
r = 50.0 * math.sqrt((math.log(nnode) / nnode))
# r = self.expandDis * 5.0
dlist = [(node.x - newNode.x) ** 2 +
(node.y - newNode.y) ** 2 +
(node.yaw - newNode.yaw) ** 2
for node in self.nodeList]
nearinds = [dlist.index(i) for i in dlist if i <= r ** 2]
return nearinds
def rewire(self, newNode, nearinds):
nnode = len(self.nodeList)
for i in nearinds:
nearNode = self.nodeList[i]
tNode = self.steer(nearNode, nnode - 1)
obstacleOK = self.CollisionCheck(tNode, self.obstacleList)
imporveCost = nearNode.cost > tNode.cost
if obstacleOK and imporveCost:
# print("rewire")
self.nodeList[i] = tNode
def DrawGraph(self, rnd=None):
"""
Draw Graph
"""
plt.clf()
if rnd is not None:
plt.plot(rnd.x, rnd.y, "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot(node.path_x, node.path_y, "-g")
# plt.plot([node.x, self.nodeList[node.parent].x], [
# node.y, self.nodeList[node.parent].y], "-g")
for (ox, oy, size) in self.obstacleList:
plt.plot(ox, oy, "ok", ms=30 * size)
reeds_shepp_path_planning.plot_arrow(
self.start.x, self.start.y, self.start.yaw)
reeds_shepp_path_planning.plot_arrow(
self.end.x, self.end.y, self.end.yaw)
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
# plt.show()
# input()
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd.x) ** 2 +
(node.y - rnd.y) ** 2 +
(node.yaw - rnd.yaw) ** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def CollisionCheck(self, node, obstacleList):
for (ox, oy, size) in obstacleList:
for (ix, iy) in zip(node.path_x, node.path_y):
dx = ox - ix
dy = oy - iy
d = dx * dx + dy * dy
if d <= size ** 2:
return False # collision
return True # safe
class Node():
"""
RRT Node
"""
def __init__(self, x, y, yaw):
self.x = x
self.y = y
self.yaw = yaw
self.path_x = []
self.path_y = []
self.path_yaw = []
self.cost = 0.0
self.parent = None
def main():
print("Start rrt start planning")
# ====Search Path with RRT====
# obstacleList = [
# (5, 5, 1),
# (3, 6, 2),
# (3, 8, 2),
# (3, 10, 2),
# (7, 5, 2),
# (9, 5, 2)
# ] # [x,y,size(radius)]
obstacleList = [
(5, 5, 1),
(4, 6, 1),
(4, 8, 1),
(4, 10, 1),
(6, 5, 1),
(7, 5, 1),
(8, 6, 1),
(8, 8, 1),
(8, 10, 1)
] # [x,y,size(radius)]
# Set Initial parameters
start = [0.0, 0.0, math.radians(0.0)]
goal = [6.0, 7.0, math.radians(90.0)]
rrt = RRT(start, goal, randArea=[-2.0, 15.0], obstacleList=obstacleList)
path = rrt.Planning(animation=show_animation)
# Draw final path
if show_animation:
rrt.DrawGraph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.001)
plt.show()
if __name__ == '__main__':
main()
| [
"asakai.amsl+github@gmail.com"
] | asakai.amsl+github@gmail.com |
f004bf5cdc9d3ec9a3989848533cffae640eb624 | ced2dc1f67830f70bc8449b864a5ddf9858a6f76 | /CMSSW_9_4_12/src/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysispri_M4500_R_0-5.py | 73cdd810f68a21cb46fd834007aa58975b8517cb | [] | no_license | xdlyu/16_MINIAODV3 | 6e1c455a17e8453974b200c05da18a81386936fe | 5f506cb0e3411fe85bc0b86d6f9477ca7d46eea3 | refs/heads/master | 2020-12-13T03:46:54.895084 | 2020-01-17T14:47:22 | 2020-01-17T14:47:22 | 234,304,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4500_R0-5_off_pri'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt','L1PrefiringMaps_new.root']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.sendExternalFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/VVV-4500-R05_hpp/qili-crab_VVV_MiniAOD_v1-05265a4e675f881e1e171fd06785811b/USER'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =50
config.Data.totalUnits = -1
config.Data.publication = False
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4500_R0-1_off_pri'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
f41ed179960d7125eec63d2285d10a17156fb3c6 | c5148bc364dac753c0872bd5676027a30b260486 | /build/lib/biosteam/utils/piping.py | ed498bd6d23025192278e5db5152a86278742757 | [
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ecoent/biosteam | 86f47c713a2cae5d6261b6c2c7734ccf7a90fb4e | f1371386d089df3aa8ce041175f210c0318c1fe0 | refs/heads/master | 2021-02-24T14:10:23.158984 | 2020-03-05T03:43:17 | 2020-03-05T03:43:17 | 245,433,768 | 1 | 0 | NOASSERTION | 2020-03-06T13:59:27 | 2020-03-06T13:59:26 | null | UTF-8 | Python | false | false | 11,369 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 16:47:33 2018
This module includes classes and functions relating Stream objects.
@author: Yoel Cortes-Pena
"""
from thermosteam import Stream, MultiStream
__all__ = ('MissingStream', 'Ins', 'Outs', 'Sink', 'Source',
'as_stream', 'as_upstream', 'as_downstream')
isa = isinstance
# %% Utilities
def pipe_info(source, sink):
"""Return stream information header."""
# First line
if source is None:
source = ''
else:
source = f' from {repr(source)}'
if sink is None:
sink = ''
else:
sink = f' to {repr(sink)}'
return f"{source}{sink}"
def as_stream(stream):
if isa(stream, Stream):
return stream
elif isa(stream, str):
return Stream(stream)
elif stream is None:
return MissingStream()
def as_upstream(stream, sink):
stream = as_stream(stream)
stream._sink = sink
return stream
def as_downstream(stream, source):
stream = as_stream(stream)
stream._source = source
return stream
# %% Dummy Stream object
class MissingStream:
"""Create a MissingStream object that acts as a dummy in Ins and Outs objects until replaced by an actual Stream object."""
__slots__ = ('_source', '_sink')
def __bool__(self):
return False
def __getattr__(self, key):
raise AttributeError(str(self))
def __repr__(self):
return f'<MissingStream>'
def __str__(self):
return 'missing stream'
# %% Utilities
def n_missing(ub, N):
assert ub >= N, f"size of streams exceeds {ub}"
return ub - N
MissingStream = MissingStream()
# %% List objects for input and output streams
class StreamSequence:
"""
Abstract class for a sequence of streams for a Unit object.
Abstract methods:
* _dock(self, stream) -> Stream
* _redock(self, stream) -> Stream
* _undock(self) -> None
* _load_missing_stream(self)
"""
__slots__ = ('_size', '_streams', '_fixed_size')
def __init__(self, size, streams, thermo, fixed_size):
self._size = size
self._fixed_size = fixed_size
dock = self._dock
redock = self._redock
if streams == ():
self._streams = [dock(Stream(thermo=thermo)) for i in range(size)]
else:
if fixed_size:
self._streams = [MissingStream] * size #: All input streams
if streams:
if isa(streams, str):
self._streams[0] = dock(Stream(streams, thermo=thermo))
elif isa(streams, (Stream, MultiStream)):
self._streams[0] = redock(streams)
else:
N = len(streams)
n_missing(size, N) # Assert size is not too big
self._streams[:N] = [redock(i) if isa(i, Stream)
else dock(Stream(i, thermo=thermo)) for i in streams]
else:
if streams:
if isa(streams, str):
self._streams = [dock(Stream(streams, thermo=thermo))]
elif isa(streams, (Stream, MultiStream)):
self._streams = [redock(streams)]
else:
self._streams = [redock(i) if isa(i, Stream)
else dock(Stream(i, thermo=thermo)) for i in streams]
else:
self._streams = size * [MissingStream]
def __add__(self, other):
return self._streams + other
def __radd__(self, other):
return other + self._streams
def _dock(self, stream): return stream
def _redock(self, stream): return stream
def _undock(self, stream): pass
def _set_streams(self, slice, streams):
all_streams = self._streams
for stream in all_streams[slice]: self._undock(stream)
all_streams[slice] = streams
for stream in all_streams: self._redock(stream)
if self._fixed_size:
size = self._size
N_streams = len(all_streams)
if N_streams < size:
N_missing = n_missing(size, N_streams)
if N_missing:
all_streams[N_streams: size] = (MissingStream,) * N_missing
@property
def size(self):
return self._streams.__len__()
def __len__(self):
return self._streams.__len__()
def _set_stream(self, int, stream):
self._undock(self._streams[int])
self._redock(stream)
self._streams[int] = stream
def index(self, stream):
return self._streams.index(stream)
def pop(self, index):
streams = self._streams
if self._fixed_size:
stream = streams[index]
streams[index] = MissingStream
else:
stream = streams.pop(index)
return stream
def remove(self, stream):
streams = self._streams
self._undock(stream)
if self._fixed_size:
index = streams.index(stream)
streams[index] = MissingStream
else:
streams.remove(stream)
def clear(self):
if self._fixed_size:
self._streams = [MissingStream] * self.size
else:
self._streams.clear()
def __iter__(self):
yield from self._streams
def __getitem__(self, index):
return self._streams[index]
def __setitem__(self, index, item):
if isa(index, int):
self._set_stream(index, item)
elif isa(index, slice):
self._set_streams(index, item)
else:
raise TypeError(f"Only intergers and slices are valid indices for '{type(self).__name__}' objects")
def __repr__(self):
return repr(self._streams)
class Ins(StreamSequence):
"""Create an Ins object which serves as input streams for a Unit object."""
__slots__ = ('_sink', '_fixed_size')
def __init__(self, sink, size, streams, thermo, fixed_size=True):
self._sink = sink
super().__init__(size, streams, thermo, fixed_size)
@property
def sink(self):
return self._sink
def _dock(self, stream):
stream._sink = self._sink
return stream
def _redock(self, stream):
sink = stream._sink
if sink:
ins = sink._ins
if ins is not self:
ins.remove(stream)
stream._sink = self._sink
else:
stream._sink = self._sink
return stream
def _undock(self, stream):
stream._sink = None
class Outs(StreamSequence):
"""Create an Outs object which serves as output streams for a Unit object."""
__slots__ = ('_source',)
def __init__(self, source, size, streams, thermo, fixed_size=True):
self._source = source
super().__init__(size, streams, thermo, fixed_size)
@property
def source(self):
return self._source
def _dock(self, stream):
stream._source = self._source
return stream
def _redock(self, stream):
source = stream._source
if source:
outs = source._outs
if outs is not self:
# Remove from source
outs.remove(stream)
stream._source = self._source
else:
stream._source = self._source
return stream
def _undock(self, stream):
stream._source = None
# %% Sink and Source object for piping notation
class Sink:
"""
Create a Sink object that connects a stream to a unit using piping notation:
Parameters
----------
stream : Stream
index : int
Examples
--------
First create a stream and a Mixer:
.. code-block:: python
>>> stream = Stream('s1')
>>> unit = Mixer('M1')
Sink objects are created using -pipe- notation:
.. code-block:: python
>>> stream-1
<Sink: s1-1>
Use pipe notation to create a sink and connect the stream:
.. code-block:: python
>>> stream-1-unit
>>> M1.show()
Mixer: M1
ins...
[0] Missing stream
[1] s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
outs...
[0] d27
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
"""
__slots__ = ('stream', 'index')
def __init__(self, stream, index):
self.stream = stream
self.index = index
# Forward pipping
def __sub__(self, unit):
unit.ins[self.index] = self.stream
return unit
# Backward pipping
__pow__ = __sub__
def __repr__(self):
return '<' + type(self).__name__ + ': ' + self.stream.ID + '-' + str(self.index) + '>'
class Source:
"""
Create a Source object that connects a stream to a unit using piping notation:
Parameters
----------
stream : Stream
index : int
Examples
--------
First create a stream and a Mixer:
.. code-block:: python
>>> stream = Stream('s1')
>>> unit = Mixer('M1')
Source objects are created using -pipe- notation:
.. code-block:: python
>>> 1**stream
<Source: 1-s1>
Use -pipe- notation to create a source and connect the stream:
.. code-block:: python
>>> unit**0**stream
>>> M1.show()
Mixer: M1
ins...
[0] Missing stream
[1] Missing stream
outs...
[0] s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
"""
__slots__ = ('stream', 'index')
def __init__(self, stream, index):
self.stream = stream
self.index = index
# Forward pipping
def __rsub__(self, unit):
unit.outs[self.index] = self.stream
return unit
# Backward pipping
__rpow__ = __rsub__
def __repr__(self):
return '<' + type(self).__name__ + ': ' + str(self.index) + '-' + self.stream.ID + '>'
# %% Pipping
def __sub__(self, index):
if isinstance(index, int):
return Sink(self, index)
elif isinstance(index, Stream):
raise TypeError("unsupported operand type(s) for -: "
f"'{type(self)}' and '{type(index)}'")
return index.__rsub__(self)
def __rsub__(self, index):
if isinstance(index, int):
return Source(self, index)
elif isinstance(index, Stream):
raise TypeError("unsupported operand type(s) for -: "
"'{type(self)}' and '{type(index)}'")
return index.__sub__(self)
Stream.__pow__ = Stream.__sub__ = __sub__ # Forward pipping
Stream.__rpow__ = Stream.__rsub__ = __rsub__ # Backward pipping
Stream.sink = property(lambda self: self._sink)
Stream.source = property(lambda self: self._source)
Stream._basic_info = lambda self: (f"{type(self).__name__}: {self.ID or ''}"
f"{pipe_info(self._source, self._sink)}\n")
| [
"yoelcortes@gmail.com"
] | yoelcortes@gmail.com |
db151470bbf8c4aa74cb965d7db73f15f4d712e4 | d8b259ea6401e435643a7b90365489f0ccef61b6 | /chapter5/poplib/poplib_gmail.py | 073067347352f2e51b8438815d313baaad07c4a1 | [
"MIT"
] | permissive | elgsaid/Learning-Python-Networking-Second-Edition | 08598637c2e8cdbebaf5ebf2c2c76cac96b0c76c | 39b68fbb936cf8fa2765c5819dcf0ce0a38a3b79 | refs/heads/master | 2020-04-29T11:10:02.791614 | 2019-03-15T09:17:24 | 2019-03-15T09:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | #!/usr/bin/env python3
import poplib
import getpass
mailbox = poplib.POP3_SSL ('pop.gmail.com', 995)
mailbox.user('user@gmail.com')
password = getpass.getpass(prompt='Enter your password:')
mailbox.pass_(password)
EmailInformation = mailbox.stat()
print("Number of new emails: %s ", EmailInformation)
numberOfMails = EmailInformation[0]
num_messages = len(mailbox.list()[1])
for i in range (num_messages):
print("Message number "+str(i+1))
print("--------------------")
# read message
response, headerLines, bytes = mailbox.retr(i+1)
message = '\n'.join (headerLines)
#Parsing the message
parser = Parser()
email = p.parsestr(message)
print("From: "+email["From"])
print("To: "+email["To"])
print("Subject: "+email["Subject"])
print("ID: "+email['message-id'])
content_type = email.get_content_type()
if ("text/plain" == str(content_type)):
print(email.get_payload(decode=True))
# If it is an image, the name of the file is extracted
elif ("image/gif" == str(content_type)):
file_name = email.get_filename()
fp = open(file_name, 'wb')
fp.write(part.get_payload(decode = True))
fp.close()
mailbox.quit()
| [
"jose-manuel.ortega-candel@capgemini.com"
] | jose-manuel.ortega-candel@capgemini.com |
c2e9579ce129c1425c3e34152fa1d73e81a4ab49 | 3b09dc4623dac559c85c0333526d55b0615d79d7 | /problems/160.py | 2954f908cc61c9c88f77a0e50b347db0751403d2 | [] | no_license | Asperas13/leetcode | 5d45bd65c490ada9b3cb2c33331a728eab2ef9b4 | 7f2f1d4f221925945328a355d653d9622107fae7 | refs/heads/master | 2021-09-28T15:54:54.761873 | 2020-05-05T15:29:48 | 2020-05-05T15:30:59 | 145,767,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
memo = {}
while headA or headB:
if headA:
if headA in memo:
return headA
memo[headA] = 1
headA = headA.next
if headB:
if headB in memo:
return headB
memo[headB] = 1
headB = headB.next
return None
| [
"ivan.pashnev@developex.com"
] | ivan.pashnev@developex.com |
09df7e7f68fd8ba2a32ed3f08fa5d77a2593c809 | e9ccc5228e8a4e404aa6e58efbba248a1aa21d5f | /server/sources.py | be7113c98923f89dc9a57907d16e916c00ac67a0 | [] | no_license | thisismyrobot/kindleclock | c65f0a50dc4d799593955584d44572ca28c729bd | 188dd4362691acb563d9f911bd896328c650a63f | refs/heads/master | 2016-08-05T04:48:38.051853 | 2013-01-28T23:55:07 | 2013-01-28T23:55:07 | 6,515,177 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | import base64
import cgi
import datetime
import re
import time
import tools
import urllib
import urllib2
import xml.dom.minidom
def unreadgmail():
try:
auth = open("gmailauth.txt").read()
URL = 'https://gmail.google.com/gmail/feed/atom'
req = urllib2.Request(URL)
req.add_header('Authorization', 'Basic %s' % auth)
dom = xml.dom.minidom.parse(urllib2.urlopen(req))
count = int(dom.getElementsByTagName("fullcount")[0].lastChild.toxml())
cls = 'nounread'
if count > 0:
cls = 'unread'
else:
count = ''
return '{0}<img src="logo_emails.png" class="{1}"/>'.format(count, cls)
except:
pass
return '???'
def agenda():
""" Returns events from a google calendar URL. For instance you could use a
private one like:
https://www.google.com/calendar/feeds/[email address]/private-[stuff]/basic?[options]
The url is stored in calxmlurl.txt in the same folder as sources.py.
The options are whatever suits, I use:
orderby=starttime&sortorder=ascending&singleevents=true&futureevents=true&max-results=5
See the following for hints on options:
* https://developers.google.com/google-apps/calendar/v2/reference#Parameters
* https://developers.google.com/gdata/docs/2.0/reference#Queries
"""
try:
results = ""
URL = open("calxmlurl.txt").read()
dom = xml.dom.minidom.parse(urllib.urlopen(URL))
entries = dom.getElementsByTagName("entry")
for e in dom.getElementsByTagName("entry"):
# Parse out the event title
event = e.getElementsByTagName("title")[0].lastChild.toxml()\
.encode('ascii','ignore')
event = cgi.escape(
tools.unescape(tools.unescape(event)).encode('ascii'))
if len(event) > 20:
event = event[:17] + '...'
# Parse out the summary, this contains the start and end date/time
summary = e.getElementsByTagName("summary")[0].lastChild.toxml()\
.encode('ascii','ignore').split("\n")[0]
date = re.findall(
r'When:.*?[ ]([0-9]{1,2}[ ].*?[0-9]{4}).*?', summary)[0]
date = time.strptime(date, "%d %b %Y")
date = "%i%s" % (date.tm_mday, tools.ordinal(date.tm_mday))
times = re.findall(r'.*?([0-9]{2}:[0-9]{2}).*?', summary)
# Handle "All day" events
displaytime = "All day"
if len(times) > 0:
displaytime = times[0]
# Generate some HTML
results += "%s - <span class=\"dt\">%s, %s</span><br />" %\
(event, date, displaytime)
return results
except:
pass
return "???"
def forecast():
try:
URL = "ftp://ftp2.bom.gov.au/anon/gen/fwo/IDA00007.dat"
data = urllib.urlopen(URL).read()
temp = ""
for line in data.split("\n"):
if line.startswith("094029"):
if (line.split("#")[6] != ""):
temp = "Min: " + line.split("#")[6] + ", "
if (line.split("#")[7] != ""):
temp += "Max: " + line.split("#")[7]
if temp != "":
temp += "<br />"
for line in data.split("\n"):
if line.startswith("094029"):
return temp + line.split("#")[22]
except:
pass
return "???"
def temperature():
try:
URL = "http://www.bom.gov.au/fwo/IDT60901/IDT60901.94970.axf"
data = urllib.urlopen(URL).read()
for line in data.split("\n"):
if line.startswith("0,94970"):
return line.split(",")[7]
except:
pass
return "???" | [
"rwallhead@gmail.com"
] | rwallhead@gmail.com |
6d06e28c7bd02f094783d6e10e8a240deb8a4028 | d0d3697b723e11c33837b8de2d572a44b84a26db | /cruise_track_data_plotting.py | 8e0334b753d7f83150f732e2e9db4165f9c38eef | [
"MIT"
] | permissive | Swiss-Polar-Institute/science-data-utils | dc77e86a1cca0d7642daf97fa3e9045919efd867 | 6a85570ee586fa1ba1644ba2b1c9dea3a5257eae | refs/heads/master | 2022-08-09T10:02:13.678635 | 2021-11-29T20:03:47 | 2021-12-02T07:39:29 | 145,117,181 | 0 | 0 | MIT | 2022-06-21T21:37:22 | 2018-08-17T12:20:58 | Python | UTF-8 | Python | false | false | 3,863 | py | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas
import datetime
#import seaborn as sns
def get_data_file(filepath, columns):
"""Get a subset of the data from one file and write into a dataframe"""
#filepath = input("Enter the full path and name of the file")
dataframe = pandas.read_csv(filepath, usecols=columns, header=0)
return dataframe
def plot_data_sources_from_file():
# get some data from GPS
filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/ace_trimble_gps_2017-01-02.csv'
columns = ['date_time', 'latitude', 'longitude', 'device_id']
gps_data = get_data_file(filepath, columns)
sixty_sec_res_gps = gps_data.iloc[::60]
# get some data from GLONASS
filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/ace_glonass_2017-01-02.csv'
columns = ['date_time', 'latitude', 'longitude', 'device_id']
glonass_data = get_data_file(filepath, columns)
sixty_sec_res_glonass = glonass_data.iloc[::60]
# Plot one second resolution data
plt.subplot(211)
plt.scatter(gps_data.longitude, gps_data.latitude, c="red", label="trimble")
plt.scatter(glonass_data.longitude, glonass_data.latitude, c="green", label="glonass")
plt.title("One-second resolution, 2017-01-02")
plt.xlabel("Longitude, decimal degrees E")
plt.ylabel("Latitude, decimal degrees N")
plt.grid(True)
plt.legend()
# Plot sixty-second resolution data
plt.subplot(212)
plt.scatter(sixty_sec_res_gps.longitude, sixty_sec_res_gps.latitude, c="red", label="trimble")
plt.scatter(sixty_sec_res_glonass.longitude, sixty_sec_res_glonass.latitude, c="green", label="glonass")
plt.title("Sixty-second resolution, 2017-01-02")
plt.xlabel("Longitude, decimal degrees E")
plt.ylabel("Latitude, decimal degrees N")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
def plot_data_sources_from_dataframe(dataframe, category):
fig, ax = plt.subplots(figsize=(10, 5))
ax.scatter(dataframe['longitude'], dataframe['latitude'], alpha=0.70, c=dataframe[category], cmap=cm.brg)
plt.show()
# def get_flagged_glonass_data(filename, columns):
# # Get GLONASS data
# filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/flagging_data_ace_glonass_2017-01-02.csv'
# columns = ['date_time', 'latitude', 'longitude', 'speed', 'device_id']
# glonass_data_flagged = get_data_file(filepath, columns)
# print(glonass_data_flagged.head(5))
#
# return glonass_data_flagged
def plot_speed(dataframe1, colour, legend_label):
"""Plot the speed of the vessel throughout the cruise to identify outlying speeds."""
# Plot speed data
plt.scatter(dataframe1.iloc[::60].longitude, dataframe1.iloc[::60].speed, c=colour, label=legend_label)
plt.title("Speed of vessel along track")
plt.xlabel("Longitude")
plt.ylabel("Speed of vessel, knots")
plt.grid(True)
plt.legend()
plt.show()
# Plot of frequency distribution of speed of vessel.
plt.subplot(211)
dataframe1['speed'].hist()
plt.title("Frequency distribution of speed of vessel")
plt.xlabel("Speed of vessel, knots")
plt.ylabel("Count")
plt.grid(True)
plt.subplot(212)
dataframe1['speed'].hist(bins=80,range=[0,20])
plt.title("Frequency distribution of speed of vessel")
plt.xlabel("Speed of vessel, knots")
plt.ylabel("Count")
plt.grid(True)
plt.tight_layout()
plt.show()
# filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/flagging_data_ace_trimble_gps_2017-01-02.csv'
# columns = ['date_time', 'latitude', 'longitude', 'speed', 'device_id']
# gps_data_flagged = get_data_file(filepath, columns)
# print(gps_data_flagged.head(5))
#
# plot_speed(gps_data_flagged, "red", "trimble")
| [
"jenny_t152@yahoo.co.uk"
] | jenny_t152@yahoo.co.uk |
705621984ef4661f63de4f2a9be8693afc845f01 | 3fd7adb56bf78d2a5c71a216d0ac8bc53485b034 | /tensorflow_data/sawyer/noup_28_dna5/conf.py | 04cd6eb5133f3a850c3be2f9893050d50ffa4d9b | [] | no_license | anair13/lsdc | 6d1675e493f183f467cab0bfe9b79a4f70231e4e | 7760636bea24ca0231b4f99e3b5e8290c89b9ff5 | refs/heads/master | 2021-01-19T08:02:15.613362 | 2017-05-12T17:13:54 | 2017-05-12T17:13:54 | 87,596,344 | 0 | 0 | null | 2017-04-08T00:18:55 | 2017-04-08T00:18:55 | null | UTF-8 | Python | false | false | 1,935 | py | import os
current_dir = os.path.dirname(os.path.realpath(__file__))
# tf record data location:
DATA_DIR = '/'.join(str.split(current_dir, '/')[:-3]) + '/pushing_data/sawyer_noup_29/train'
# local output directory
OUT_DIR = current_dir + '/modeldata'
from video_prediction.prediction_model_downsized_lesslayer import construct_model
configuration = {
'experiment_name': 'rndaction_var10',
'data_dir': DATA_DIR, # 'directory containing data.' ,
'output_dir': OUT_DIR, #'directory for model checkpoints.' ,
'current_dir': current_dir, #'directory for writing summary.' ,
'num_iterations': 50000, #'number of training iterations.' ,
'pretrained_model': '', # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 28, # 'sequence length, including context frames.' ,
'skip_frame': 1, # 'use ever i-th frame to increase prediction horizon' ,
'context_frames': 2, # of frames before predictions.' ,
'use_state': 1, #'Whether or not to give the state+action to the model' ,
'model': 'DNA', #'model architecture to use - CDNA, DNA, or STP' ,
'num_masks': 1, # 'number of masks, usually 1 for DNA, 10 for CDNA, STN.' ,
'schedsamp_k': 900.0, # 'The k hyperparameter for scheduled sampling -1 for no scheduled sampling.' ,
'train_val_split': 0.95, #'The percentage of files to use for the training set vs. the validation set.' ,
'batch_size': 32, #'batch size for training' ,
'learning_rate': 0.001, #'the base learning rate of the generator' ,
'visualize': '', #'load model from which to generate visualizations
'downsize': construct_model, #'create downsized model'
'file_visual': '', # datafile used for making visualizations
'penal_last_only': False, # penalize only the last state, to get sharper predictions
'dna_size': 5, #size of DNA kerns
'sawyer':'',
'numcam':2,
} | [
"frederik.ebert@mytum.de"
] | frederik.ebert@mytum.de |
1df51ff8f1c07cfd37c44135ee621e5a6a511252 | fe7133ea8e879631e63ef3c5312670464ae0970b | /email_test.py | f0d2e88d51707ef12a621e035d9e4ae58ea8429e | [] | no_license | jonathaw/general_scripts | 4f13c55d3544b829488c1d479c2feff1a6c26829 | 0cf47ab3ade55b9396cb5aea00e09dafd2694067 | refs/heads/master | 2021-01-17T02:50:54.936936 | 2017-03-19T17:18:44 | 2017-03-19T17:18:44 | 41,436,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | import smtplib
sender = 'jonathan.weinstein2012@gmail.com'
receivers = ['jonathan.weinstein2012@gmail.com']
message = """From: LSFManager <LSF@manager.com>
To: Me <jonathan.weinstein2012@gmail.com>
Subject: LSFManager Report
This is a test e-mail message.
"""
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print("Successfully sent email")
except:
print("Error: unable to send email") | [
"jonathan.weinstein@weizmann.ac.il"
] | jonathan.weinstein@weizmann.ac.il |
663ddf06b0dcc361f9b79ebadbd3809ffe539966 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayOpenAppSilanApigrayelevenQueryRequest.py | 6ae4d53b1a2df987c4f0ae79daab60b26f7e67a5 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,196 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAppSilanApigrayelevenQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.app.silan.apigrayeleven.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
c509131d6d695838fd5f8caf0e0236271d308935 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_2/16_2_1_Taizo_R1B_A.py | f57872090549f06b308b5880482ee39c7abee60d | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,168 | py |
# -*- coding: utf8 -*-
import sys
# inputFile = "A-small-attempt0.in"
# inputFile = "A-large-practice.in"
inputFile = "A-large.in"
f = open(inputFile)
sys.stdout = open(inputFile.replace(".in", ".txt"), 'w')
tc_num = int(f.readline().rstrip())
k1 = {"Z": ["ZERO", 0], "W": ["TWO", 2], "U": ["FOUR", 4], "X": ["SIX", 6], "G": ["EIGHT", 8]}
k2 = {"O": ["ONE", 1], "R": ["THREE", 3], "F": ["FIVE", 5], "S": ["SEVEN", 7]}
k3 = {"I": ["NINE", 9]}
for tc in range(tc_num):
s = f.readline().rstrip()
numbers = []
for k in k1:
# sys.stderr.write(k + "\n")
while k in s:
# sys.stderr.write(s + "\n")
for c in k1[k][0]:
s = s.replace(c, "", 1)
numbers.append(k1[k][1])
for k in k2:
while k in s:
for c in k2[k][0]:
s = s.replace(c, "", 1)
numbers.append(k2[k][1])
for k in k3:
while k in s:
for c in k3[k][0]:
s = s.replace(c, "", 1)
numbers.append(k3[k][1])
numbers.sort()
ans = ""
for n in numbers:
ans += str(n)
print("Case #" + str(tc + 1) + ": " + ans)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
06b86c6227955f55df9ca40267865fd155d7cdd9 | 75e7093ba88fc8fb7c2787fc9b1f289f058f1807 | /reprounzip/setup.py | 80acb4c5d726fbd814e5e31c3c0d3e42347ffd71 | [
"BSD-3-Clause"
] | permissive | Aloma/reprozip | f8a6e7117d29d7b3a4477acf34f3e09993c7f235 | 449bebbcba0674467515383ecfbd6e9cee1f5dc1 | refs/heads/master | 2020-12-03T09:18:17.523917 | 2014-10-06T20:44:25 | 2014-10-06T20:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import os
from setuptools import setup
import sys
# pip workaround
os.chdir(os.path.abspath(os.path.dirname(__file__)))
with open('README.rst') as fp:
description = fp.read()
req = [
'PyYAML',
'rpaths>=0.8']
if sys.version_info < (2, 7):
req.append('argparse')
setup(name='reprounzip',
version='0.4.1',
packages=['reprounzip', 'reprounzip.unpackers'],
entry_points={
'console_scripts': [
'reprounzip = reprounzip.main:main'],
'reprounzip.unpackers': [
'graph = reprounzip.unpackers.graph:setup',
'installpkgs = reprounzip.unpackers.default:setup_installpkgs',
'directory = reprounzip.unpackers.default:setup_directory',
'chroot = reprounzip.unpackers.default:setup_chroot']},
namespace_packages=['reprounzip', 'reprounzip.unpackers'],
install_requires=req,
description="Linux tool enabling reproducible experiments (unpacker)",
author="Remi Rampin, Fernando Chirigati, Dennis Shasha, Juliana Freire",
author_email='reprozip-users@vgc.poly.edu',
maintainer="Remi Rampin",
maintainer_email='remirampin@gmail.com',
url='http://vida-nyu.github.io/reprozip/',
long_description=description,
license='BSD',
keywords=['reprozip', 'reprounzip', 'reproducibility', 'provenance',
'vida', 'nyu'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering',
'Topic :: System :: Archiving'])
| [
"remirampin@gmail.com"
] | remirampin@gmail.com |
d00d7f695d0fa7ea3119cd533450e09474399e48 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/avi/avi_ipaddrgroup.py | fd2cd3c38ff7a56e237e08e0d1bb627f9756c9f5 | [
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 4,970 | py | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_ipaddrgroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of IpAddrGroup Avi RESTful Object
description:
- This module is used to configure IpAddrGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
addrs:
description:
- Configure ip address(es).
apic_epg_name:
description:
- Populate ip addresses from members of this cisco apic epg.
country_codes:
description:
- Populate the ip address ranges from the geo database for this country.
description:
description:
- User defined description for the object.
ip_ports:
description:
- Configure (ip address, port) tuple(s).
marathon_app_name:
description:
- Populate ip addresses from tasks of this marathon app.
marathon_service_port:
description:
- Task port associated with marathon service port.
- If marathon app has multiple service ports, this is required.
- Else, the first task port is used.
name:
description:
- Name of the ip address group.
required: true
prefixes:
description:
- Configure ip address prefix(es).
ranges:
description:
- Configure ip address range(s).
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the ip address group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create an IP Address Group configuration
avi_ipaddrgroup:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
name: Client-Source-Block
prefixes:
- ip_addr:
addr: 10.0.0.0
type: V4
mask: 8
- ip_addr:
addr: 172.16.0.0
type: V4
mask: 12
- ip_addr:
addr: 192.168.0.0
type: V4
mask: 16
"""
RETURN = '''
obj:
description: IpAddrGroup (api/ipaddrgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
addrs=dict(type='list',),
apic_epg_name=dict(type='str',),
country_codes=dict(type='list',),
description=dict(type='str',),
ip_ports=dict(type='list',),
marathon_app_name=dict(type='str',),
marathon_service_port=dict(type='int',),
name=dict(type='str', required=True),
prefixes=dict(type='list',),
ranges=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'ipaddrgroup',
set([]))
if __name__ == '__main__':
main()
| [
"theyashkins@gmail.com"
] | theyashkins@gmail.com |
c3227b65442886d4c177b72911ad5fcb24542c5d | c66c214f062bc4de08354bb15d4d7e343b6b7e4a | /custom_dists/gaussian_circle.py | b07fdde7579a75be61689138444384e4801054dd | [] | no_license | pinakm9/JKO | 1a297f58fd1630d33a2314c82e702208943098e8 | 88265e9f38040e3a6ec73edeec59c85b770bf0ed | refs/heads/master | 2023-05-10T11:03:00.482300 | 2021-06-12T19:42:48 | 2021-06-12T19:42:48 | 352,030,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | import numpy as np
import scipy.stats as ss
import tensorflow as tf
import tensorflow_probability as tfp
class GaussianCircle:
"""
Description:
creates a multimodal distribution aranged on a circle uniformly using iid Gaussians
Args:
mean: mean for each Gaussian distribution
cov: covarinace matrix for each Gaussian distribution
weights: a 1d array
"""
def __init__(self, cov, weights):
self.cov = cov
self.weights = weights / weights.sum()
self.num_modes = len(weights)
self.dim = cov.shape[0]
self.means = np.zeros((self.num_modes, self.dim))
angle = 2.0 * np.pi / self.num_modes
self.tf_probs = []
scale_tril = tf.linalg.cholesky(cov)
for i in range(self.num_modes):
self.means[i, :2] = np.cos(i * angle), np.sin(i * angle)
self.tf_probs.append(tfp.distributions.MultivariateNormalTriL(loc=self.means[i], scale_tril=scale_tril).prob)
def sample(self, size):
"""
Description:
samples from the multimodal distribtion
Args:
size: number of samples to be generated
Returns:
the generated samples
"""
samples = np.zeros((size, self.dim))
idx = np.random.choice(self.num_modes, size=size, replace=True, p=self.weights)
for i in range(size):
samples[i, :] = np.random.multivariate_normal(mean=self.means[idx[i]], cov=self.cov, size=1)
return samples
def pdf(self, x):
"""
Description:
computes probability for given samples
Args:
x: samples at which probability is to be computed
Returns:
the computed probabilities
"""
probs = 0.0
for i in range(self.num_modes):
probs += self.weights[i] * ss.multivariate_normal.pdf(x, mean=self.means[i], cov=self.cov)
return probs
def prob(self, x):
"""
Description:
computes probability for given samples in tensorflow format
Args:
x: samples at which probability is to be computed
Returns:
the computed probabilities
"""
probs = 0.0
for i in range(self.num_modes):
probs += self.weights[i] * self.tf_probs[i](x)
return tf.reshape(probs, (-1, 1)) | [
"pinakm9@gmail.com"
] | pinakm9@gmail.com |
94f6e3cf69a00aa6b1808778722130a275ee4713 | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1407-group-the-people-given-the-group-size-they-belong-to/group-the-people-given-the-group-size-they-belong-to.py | b33c28a8c7eb9dab579c4c5b78fa1624574c7390 | [] | no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | # There are n people that are split into some unknown number of groups. Each person is labeled with a unique ID from 0 to n - 1.
#
# You are given an integer array groupSizes, where groupSizes[i] is the size of the group that person i is in. For example, if groupSizes[1] = 3, then person 1 must be in a group of size 3.
#
# Return a list of groups such that each person i is in a group of size groupSizes[i].
#
# Each person should appear in exactly one group, and every person must be in a group. If there are multiple answers, return any of them. It is guaranteed that there will be at least one valid solution for the given input.
#
#
# Example 1:
#
#
# Input: groupSizes = [3,3,3,3,3,1,3]
# Output: [[5],[0,1,2],[3,4,6]]
# Explanation:
# The first group is [5]. The size is 1, and groupSizes[5] = 1.
# The second group is [0,1,2]. The size is 3, and groupSizes[0] = groupSizes[1] = groupSizes[2] = 3.
# The third group is [3,4,6]. The size is 3, and groupSizes[3] = groupSizes[4] = groupSizes[6] = 3.
# Other possible solutions are [[2,1,6],[5],[0,4,3]] and [[5],[0,6,2],[4,3,1]].
#
#
# Example 2:
#
#
# Input: groupSizes = [2,1,3,3,3,2]
# Output: [[1],[0,5],[2,3,4]]
#
#
#
# Constraints:
#
#
# groupSizes.length == n
# 1 <= n <= 500
# 1 <= groupSizes[i] <= n
#
#
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
d = {}
for i,v in enumerate(groupSizes):
if v in d:
d[v].append(i)
else:
d[v] = [i]
return [ d[i][j:j+i] for i in d for j in range(0,len(d[i]),i) ]
| [
"zx8733520+github@gapp.nthu.edu.tw"
] | zx8733520+github@gapp.nthu.edu.tw |
6ac4b9f6d7a8296aec554296ab0dc68eaee88daa | eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6 | /ccpnmr2.4/python/ccp/format/spectra/params/FactorisedParams.py | 9e8686207c6eec119c39f5eea89f9cd2b36199d8 | [] | no_license | edbrooksbank/ccpnmr2.4 | cfecb0896dcf8978d796e6327f7e05a3f233a921 | f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c | refs/heads/master | 2021-06-30T22:29:44.043951 | 2019-03-20T15:01:09 | 2019-03-20T15:01:09 | 176,757,815 | 0 | 1 | null | 2020-07-24T14:40:26 | 2019-03-20T14:59:23 | HTML | UTF-8 | Python | false | false | 11,487 | py |
"""
======================COPYRIGHT/LICENSE START==========================
AzaraParams.py
Copyright (C) 2008 Wayne Boucher and Tim Stevens (University of Cambridge)
=======================================================================
This file contains reserved and/or proprietary information
belonging to the author and/or organisation holding the copyright.
It may not be used, distributed, modified, transmitted, stored,
or in any way accessed, except by members or employees of the CCPN,
and by these people only until 31 December 2008 and in accordance with
the guidelines of the CCPN.
A copy of this license can be found in ../../../license/CCPN.license.
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- email: ccpn@bioc.cam.ac.uk
- contact the authors: wb104@bioc.cam.ac.uk, tjs23@cam.ac.uk
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
import os
from memops.universal.ElementTree import ElementTree as ET
from memops.general.Implementation import ApiError
from ccp.format.spectra.params.ExternalParams import ExternalParams
from ccp.format.spectra.params.ExternalParams import freqDimType, fidDimType
from ccp.util.Software import getMethod
from memops.universal import BlockData
class FactorisedParams(ExternalParams):
format = 'Factorised'
def __init__(self, parFile, **kw):
self.parFile = parFile
self.dim = -1
ExternalParams.__init__(self, **kw)
# ExternalParams requires this to be defined
def parseFile(self):
# read file and get Decomposition node :
try:
elementTree = ET.parse(self.parFile)
except IOError, e:
raise ApiError(str(e))
topNode = elementTree.getroot()
if topNode.tag == 'Decomposition':
# Decomposition is document element
decomposition = topNode
else:
# look for Decompositions directly within document element
decompositions = list(topNode.findall('Decomposition'))
if decompositions:
if len(decompositions) > 1:
print ('WARNING, found %s Decompositions in file %s\n Using first one'
% (len(decompositions), self.parFile))
decomposition = decompositions[0]
else:
print 'WARNING, no Decompositions found in file', self.parFile
return
# get top-level parameters from Decomposition node :
self.dataFile = self.parFile
# irrelevant anyway - set to model default to avoid attempts to determine
self.big_endian = True
self.topParams['nodeTree'] = elementTree
self.topParams['numShapes'] = int(decomposition.get('nshapes'))
self.topParams['isReconstructable'] = (decomposition.get('reconstructable')
in ('True', 'true'))
self.topParams['isResolved'] = (decomposition.get('resolved')
in ('True', 'true'))
pulProgName = decomposition.get('refexperiment')
if pulProgName is not None:
self.pulProgName = pulProgName
self.pulProgType = 'ccpn'
# the following are not meaningful and/or use the defaults:
# 'head', 'integer', 'numRecords'
# get axes in shape number order
ll = []
for elem in decomposition.findall('Axis'):
ll.append((int(elem.get('a')), elem))
axes = [x[1] for x in sorted(ll)]
# get ndims and initialise
self.ndim = len(axes)
self.initDims()
# per-dimension parameters (from Axis) :
# 'npts', 'nuc', 'sw', 'sf', 'refppm', 'refpt' 'dimType',
# The following is not relevant: 'block'
# extra parameters 'name', 'isComplex', 'shapeSerial'
for dim,axis in enumerate(axes):
self.npts[dim] = npts = int(axis.get('size'))
self.sf[dim] = float(axis.get('sfo'))
swppm = float(axis.get('swppm'))
self.sw[dim] = swppm * self.sf[dim]
self.refppm[dim] = refppm = float(axis.get('startppm'))
self.refpt[dim] = refpt = 1.0
self.nuc[dim] = self.standardNucleusName(axis.get('nucleus'))
ss = axis.get('origsize')
if ss:
self.numPointsOrig[dim] = numPointsOrig = int(axis.get('origsize'))
else:
numPointsOrig = npts
if axis.get('domain').lower() == 'time':
self.dimType[dim] = fidDimType
else:
self.dimType[dim] = freqDimType
self.isComplex[dim] = (axis.get('type').lower() == 'complex')
dimParams = self.dimParams[dim]
dimParams['name'] = axis.get('name')
dimParams['shapeSerial'] = int(axis.get('a'))
# carrier ppm. Reset referencing, and handle point offset
carppm = axis.get('carppm')
if carppm:
carppm = float(carppm)
refpt = self.refpt[dim] = refpt + (refppm - carppm) * npts / swppm
refppm = self.refppm[dim] = carppm
# NB add 1.5 before int call to get nearest int
# (pointOffset is alwayspositive)
pointOffset = int(numPointsOrig / 2 + 1.5 - refpt)
if numPointsOrig == npts:
if pointOffset > npts or pointOffset < 0:
# LikelynumPointsOrig was not set correctly
pointOffset = 0
self.pointOffset[dim] = pointOffset
# get Component parameters :
ll = []
for elem in decomposition.findall('Component'):
dd = {}
index = elem.get('c')
if index is None:
raise ApiError(
"Incorrect component, lacks 'c' attribute: <Component %s >"
% elem.attrib
)
ampl = elem.get('ampl')
if ampl is None:
raise ApiError(
"Incorrect component, lacks 'ampl' attribute: <Component %s >"
% elem.attrib
)
else:
dd['amplitude'] = float(ampl)
regionId = elem.get('regionid')
if regionId:
dd['regionId'] = int(regionId)
dd['status'] = elem.get('status')
dd['annotation'] = elem.get('annotation')
ll.append((index, dd))
#
self.topParams['componentList'] = [x[1] for x in sorted(ll)]
def parsePeaks(self, dataSource, software = None):
"""Parse peaks from file and add them to dataSource new peaklist
"""
decomposition = self.topParams['nodeTree'].getroot()
nshapes = int(decomposition.get('nshapes'))
ss = decomposition.get('ncomponents')
if ss:
ncomp = int(ss)
else:
ncomp = None
name = decomposition.get('name')
peakList = dataSource.newPeakList(name=name)
if nshapes != dataSource.numDim:
raise Exception(
"Reading peaks for %s:Not implemented yet: nshapes %s != numDims %s"
% (dataSource, nshapes, dataSource.numDim)
)
else:
# one shape per dimension. Add peaks
if not software:
software = getSoftware(dataSource.root, name='unknown', version='unknown')
heightMethod = getMethod(software, task='find peak height',
procedure='Factorised dataSource import')
compFound = 0
for elem in decomposition:
if elem.tag == 'Component':
compFound += 1
ampl = float(elem.get('ampl'))
compNo = int(elem.get('c'))
dd = {}
lenArray = []
for ii,ee2 in enumerate(elem):
if ee2.tag == 'Shape':
a = int(ee2.get('a'))
data = None
for ee3 in ee2:
if ee3.tag == 'Peaks':
data = unpackListElement(ee3)
break
if data:
dd[a] = data
lenArray.append(len(data))
else:
break
else:
# There were peaks picked for all shapes
# now make peak
peaks1D = [x[1] for x in sorted(dd.items())]
nPeaks, cumul = BlockData.cumulativeArray(lenArray)
for index in range(nPeaks):
# 26 May 2011: WARNING: arrayOfIndex implementation was broken
# for Analysis usage so it was fixed but that might break below
arr = BlockData.arrayOfIndex(index, cumul)
height = ampl
peak = peakList.newPeak(componentNumbers=(compNo,))
peakDims = peak.sortedPeakDims()
for ii in range(nshapes):
dd = peaks1D[ii][arr[ii]]
height *= dd['intensity']
peakDim = peakDims[ii]
pos = dd.get('pos')
if pos is None:
posppm = dd['posppm']
peakDim.value = posppm
else:
peakDim.position = pos
pos = dd.get('pos')
if pos is None:
posppm = dd['posppm']
peakDim.value = posppm
else:
peakDim.position = pos
# set up dataDimRef NBNB TBD could be neater
ll = [(x.expDimRef.serial, x)
for x in peakDim.dataDim.dataDimRefs]
if ll:
ll.sort()
peakDim.dataDimRef = ll[0][1]
else:
peakDim.dataDimRef = None
# read width
# NBNB TBD
# NBNB we are not using PeakBasic.pickPeak or any of the setup
# routines called by it, as we can assume that all peaks found
# are in the principal region.
peak.newPeakIntensity(intensityType='height', value=height,
method=heightMethod)
if compFound != ncomp:
print ("WARNING, % components expected, % found" % (ncomp, compFound))
def unpackListElement(elem):
"""Unpacks list type element (with 'list' column name attribute)
into a list of dictionari
"""
result = []
keywds = [x.strip() for x in elem.get('list').split(',')]
ncols = len(keywds)
if elem.text:
data = elem.text.split()
else:
data = []
if len(set(keywds)) == len(keywds):
# no duplicate keywords
nlines = len(data) // ncols
ind = 0
for ii in range(nlines):
dd = {}
result.append(dd)
for jj in range(ncols):
xx = data[ind]
try:
val = int(xx)
except:
try:
val = float(xx)
except:
val = xx
dd[keywds[jj]] = val
ind += 1
else:
raise Exception("duplicate keywords parsing not implemented yet")
#
return result
| [
"ejb66@le.ac.uk"
] | ejb66@le.ac.uk |
aa09d6bdb5805a8c7fee8a75dfc9873d3a8b7afc | cefd6c17774b5c94240d57adccef57d9bba4a2e9 | /WebKit/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py | fcc76ca14127db2dbe9b959c1bd40143f03524e3 | [
"BSL-1.0"
] | permissive | adzhou/oragle | 9c054c25b24ff0a65cb9639bafd02aac2bcdce8b | 5442d418b87d0da161429ffa5cb83777e9b38e4d | refs/heads/master | 2022-11-01T05:04:59.368831 | 2014-03-12T15:50:08 | 2014-03-12T15:50:08 | 17,238,063 | 0 | 1 | BSL-1.0 | 2022-10-18T04:23:53 | 2014-02-27T05:39:44 | C++ | UTF-8 | Python | false | false | 2,323 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class AbstractSequencedCommand(Command):
steps = None
def __init__(self):
self._sequence = StepSequence(self.steps)
Command.__init__(self, self._sequence.options())
def _prepare_state(self, options, args, tool):
return None
def execute(self, options, args, tool):
try:
state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._sequence.run_and_handle_errors(tool, options, state)
| [
"adzhou@hp.com"
] | adzhou@hp.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.