hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
357951c0b40f80c53a3a5f44fa52511a12076619 | 6,426 | py | Python | python/utility_functions.py | stellarpower/vio_common | 5508203cbcc166cbcd34dc0a6f7852c73e2cff55 | [
"BSD-3-Clause"
] | 16 | 2017-06-02T07:22:31.000Z | 2022-03-23T02:39:39.000Z | python/utility_functions.py | stellarpower/vio_common | 5508203cbcc166cbcd34dc0a6f7852c73e2cff55 | [
"BSD-3-Clause"
] | 2 | 2020-08-10T04:01:35.000Z | 2021-01-18T08:21:17.000Z | python/utility_functions.py | stellarpower/vio_common | 5508203cbcc166cbcd34dc0a6f7852c73e2cff55 | [
"BSD-3-Clause"
] | 19 | 2017-08-03T02:23:11.000Z | 2021-09-22T02:17:46.000Z | import json
import os
import numpy as np
from numpy import genfromtxt
SECOND_TO_MILLIS = 1000
SECOND_TO_MICROS = 1000000
SECOND_TO_NANOS = 1000000000
TIME_UNIT_TO_DECIMALS = {'s': 0, "ms": 3, "us": 6, "ns": 9}
def parse_time(timestamp_str, time_unit):
"""
convert a timestamp string to a rospy time
if a dot is not in the string, the string is taken as an int in time_unit
otherwise, taken as an float in secs
:param timestamp_str:
:return:
"""
secs = 0
nsecs = 0
if '.' in timestamp_str:
if 'e' in timestamp_str:
stamp = float(timestamp_str)
secs = int(stamp)
nsecs = int(round((stamp - secs) * SECOND_TO_NANOS))
else:
index = timestamp_str.find('.')
if index == 0:
nsecs = int(
round(float(timestamp_str[index:]) * SECOND_TO_NANOS))
elif index == len(timestamp_str) - 1:
secs = int(timestamp_str[:index])
else:
secs = int(timestamp_str[:index])
nsecs = int(
round(float(timestamp_str[index:]) * SECOND_TO_NANOS))
return secs, nsecs
else:
decimal_count = TIME_UNIT_TO_DECIMALS[time_unit]
if len(timestamp_str) <= decimal_count:
return 0, int(timestamp_str) * 10**(9 - decimal_count)
else:
if decimal_count == 0:
val = float(timestamp_str)
return int(val), int(
(val - int(val)) * 10**(9 - decimal_count))
else:
return int(timestamp_str[0:-decimal_count]),\
int(timestamp_str[-decimal_count:]) * 10 ** \
(9 - decimal_count)
def is_float(element_str):
"""check if a string represent a float. To this function, 30e5 is float,
but 2131F or 2344f is not float"""
try:
float(element_str)
return True
except ValueError:
return False
def is_header_line(line):
common_header_markers = ['%', '#', '//']
has_found = False
for marker in common_header_markers:
if line.startswith(marker):
has_found = True
break
else:
continue
if not has_found:
if line[0].isdigit():
return False
else:
return True
return has_found
def decide_delimiter(line):
common_delimiters = [',', ' ']
occurrences = []
for delimiter in common_delimiters:
occurrences.append(line.count(delimiter))
max_occurrence = max(occurrences)
max_pos = occurrences.index(max_occurrence)
return common_delimiters[max_pos]
def decide_time_index_and_unit(lines, delimiter):
"""
Time and frame number are at index 0 and 1
Frame number may not exist
At least two lines are required to decide if frame number exists
Following the time or frame number is the tx ty tz and quaternions
Unit is decided as either nanosec or sec
depending on if decimal dot is found.
So the unit can be wrong if timestamps in units ns or ms are provided.
"""
if len(lines) < 2:
raise ValueError("Not enough lines to determine time index")
value_rows = []
for line in lines:
rags = line.rstrip(delimiter).split(delimiter)
value = [float(rag) for rag in rags]
value_rows.append(value)
value_array = np.array(value_rows)
delta_row = value_array[-1, :] - value_array[-2, :]
whole_number = [value.is_integer() for value in delta_row]
if whole_number[0]:
if whole_number[1]:
if delta_row[0] < delta_row[1]: # frame id time[ns] tx[m] ty tz
time_index = 1
time_unit = 'ns'
t_index = 2
else: # time[ns] frame id tx ty tz
time_index = 0
time_unit = 'ns'
t_index = 2
else:
if delta_row[0] > 100: # time[ns] tx ty tz
time_index = 0
time_unit = 'ns'
t_index = 1
else: # frame id time[s] tx ty tz
time_index = 1
time_unit = 's'
t_index = 2
else:
if whole_number[1]:
# time[s] frame id tx ty tz
time_index = 0
time_unit = 's'
t_index = 2
else:
# time[s] tx ty tz
time_index = 0
time_unit = 's'
t_index = 1
return time_index, time_unit, t_index
def normalize_quat_str(val_str_list):
max_len = max([len(x) - x.find('.') - 1 for x in val_str_list])
if max_len > 8:
return val_str_list
q4 = np.array([float(x) for x in val_str_list])
q4_normalized = q4 / np.linalg.norm(q4)
strlist = []
for j in range(4):
strlist.append("{}".format(q4_normalized[j]))
return strlist
def read_pose_from_json(pose_json):
"""
:param pose_json:
:return:
"""
with open(pose_json, 'r') as load_f:
load_dict = json.load(load_f)
x = float(load_dict['translation']['x'])
y = float(load_dict['translation']['y'])
z = float(load_dict['translation']['z'])
q_x = float(load_dict['rotation']['i'])
q_y = float(load_dict['rotation']['j'])
q_z = float(load_dict['rotation']['k'])
q_w = float(load_dict['rotation']['w'])
pose = [x, y, z, q_x, q_y, q_z, q_w]
return pose
def interpolate_imu_data(time_gyro_array, time_accel_array):
"""
interpolate accelerometer data at gyro epochs
:param time_gyro_array: each row [time in sec, gx, gy, gz]
:param time_accel_array: each row [time in sec, ax, ay, az]
:return: time_gyro_accel_array: each row
[time in sec, gx, gy, gz, ax, ay, az]
"""
a = []
for c in range(1, 1 + 3):
a.append(
np.interp(time_gyro_array[:, 0], time_accel_array[:, 0],
time_accel_array[:, c]))
return np.column_stack((time_gyro_array, a[0], a[1], a[2]))
def load_advio_imu_data(file_csv):
"""
:param file_csv: each row [time in sec, x, y, z]
:return: np array nx4
"""
return genfromtxt(file_csv, delimiter=',', skip_header=0)
def check_file_exists(filename):
"""sanity check"""
if not os.path.exists(filename):
raise OSError("{} does not exist".format(filename))
| 31.194175 | 77 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,538 | 0.23934 |
357a7c212d64ef469b6cfe0399c8c3f62da9f7bf | 7,160 | py | Python | app/models.py | nickspeal/net-zero-python-backend | a31139fe8d365abed4f48407a2f407a9b5ac7357 | [
"MIT"
] | null | null | null | app/models.py | nickspeal/net-zero-python-backend | a31139fe8d365abed4f48407a2f407a9b5ac7357 | [
"MIT"
] | null | null | null | app/models.py | nickspeal/net-zero-python-backend | a31139fe8d365abed4f48407a2f407a9b5ac7357 | [
"MIT"
] | null | null | null | from app import db
# Junction Tables for many-to-many relationships
campaign_users = db.Table('campaign_users',
db.Column('campaign', db.Integer, db.ForeignKey('campaigns.id'), primary_key=True),
db.Column('user', db.Integer, db.ForeignKey('users.username'), primary_key=True),
)
campaign_vehicles = db.Table('campaign_vehicles',
db.Column('campaign', db.Integer, db.ForeignKey('campaigns.id'), primary_key=True),
db.Column('vehicle', db.Integer, db.ForeignKey('vehicles.id'), primary_key=True),
)
class Campaign(db.Model):
''' A Campaign is a context in which to tally a carbon footprint total.
For example, a personal total, or an activity shared across multiple people
'''
__tablename__ = 'campaigns'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
offsets_available = db.Column(db.Float, nullable=False, default=0.0)
users = db.relationship('User', secondary=campaign_users, lazy='subquery', backref=db.backref('campaigns', lazy=True))
vehicles = db.relationship('Vehicle', secondary=campaign_vehicles, lazy='subquery', backref=db.backref('campaigns', lazy=True))
# Other resources
# consumptions = TODO m2m relationship to consumptions
# offsets = db.relationship('Offset', backref='campaign', lazy=True)
def __repr__(self):
return '<Campaign {}>'.format(self.name)
class User(db.Model):
''' A person with an account
'''
__tablename__ = 'users'
username = db.Column(db.String, primary_key=True)
# campaigns attribute is backreferenced
def __repr__(self):
return '<User {}>'.format(self.username)
class Vehicle(db.Model):
''' A Vehicle is a type of Resource
Resources have an increasing counter (i.e. odometer, gas meter) that can be snapshotted over time to measure usage
Usage can be converted into CO2 emitted according with some linear factors
'''
__tablename__ = 'vehicles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
units = db.Column(db.String, nullable=False, default='km') # Should be km for all vehicles for now. This column exists for extensibility to other resources.
notes = db.Column(db.String, nullable=True)
fuel_l_per_100km = db.Column(db.Float, nullable=False, default=10.6) # Default average car 22 mpg
carbon_to_manufacture = db.Column(db.Float, nullable=False, default=10000) # Default wild estimate based on quick search
expected_life_km = db.Column(db.Float, nullable=False, default=321868) # Default based on guess of 200k miles
def get_carbon_per_unit(self):
''' Calculate the CO2 emission per mile of driving
as the sum of contributions of burning gas and deprecating a vehicle
that emitted lots of carbon during manufacure
Units of kg CO2 per km
'''
GAL_PER_L = 0.2641729
CARBON_PER_LITRE = 8.9 * GAL_PER_L # 8.9 kg CO2 per gallon of gas: https://www.epa.gov/greenvehicles/greenhouse-gas-emissions-typical-passenger-vehicle-0
gas_contribution = ( self.fuel_l_per_100km / 100.0 ) * CARBON_PER_LITRE
deprecation_contribution = self.carbon_to_manufacture / self.expected_life_km
return gas_contribution + deprecation_contribution
def __repr__(self):
return '<Vehicle {}>'.format(self.id)
class ResourceMeasurement(db.Model):
''' This table stores the timeseries of all measurements for all resources (i.e. car odometer readings over time)
'''
__tablename__ = 'resource_measurements'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime, nullable=False)
value = db.Column(db.Float, nullable=False)
resource = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
## TODO FUTURE - Add additional models
# The following models are stubbed out, but likely don't work yet
# They're needed to complete a larger carbon footprint picture for a Campaign
# Add new consumptions to track each contribution to your carbon footprint (as discrete events)
# They can be of different types, (in an enum table) mostly just for future potential categorization features and stats across users
# Add Offsets each time you buy an offset and apply it to a campaign
# class Consumption(db.Model):
# ''' A Consumption is a thing that has a carbon footprint. I.e. a flight, or a cheeseburger, or a bonfire '''
# __tablename__ = 'consumptions'
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String, nullable=False)
# date = db.Column(db.DateTime, nullable=False)
# category = # TODO foreign key
# quantity = db.Column(db.Float, nullable=False)
# units = db.Column(db.String, nullable=False)
# carbon_per_unit = db.Column(db.Float, nullable=False)
# # Footprint off the consumption can be derived from the product of quantity and carbon_per_unity
# def __repr__(self):
# return '<Consumption {}, {}>'.format(self.name, self.id)
# class ConsumptionCategories(db.Model):
# ''' Enumeration of categories that Consumptions can fall into. One category to many Consumptions.'''
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String, nullable=False)
# def __repr__(self):
# return '<Consumption Category {}, {}>'.format(self.name, self.id)
# class Offset(db.Model):
# __tablename__ = 'offsets'
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String, nullable=False)
# date = db.Column(db.DateTime, nullable=False)
# price_usd = db.Column(db.Float, nullable=True)
# carbon_offset_quantity = db.Column(db.Float, nullable=False)
# reference = db.Column(db.String)
# # Foreign key to Campaigns. Many offsets to one campaign.
# campaign = db.Column(db.Integer, db.ForeignKey('campaigns.id'), nullable=False)
# def __repr__(self):
# return '<Offset #{}: {} ({}) kg CO2>'.format(self.id, self.name, self.carbon_offset_quantity)
# ======================================================
# TODO FUTURE
# Potentially I might want to abstract the Vehicle Resource into a Resource base class that Vehicles and Utilities can extend
# # Abstract Class
# class Resource(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# carbon_per_unit = db.Column(db.Float, nullable=False)
# units = db.Column(db.String, nullable=False)
# notes = db.Column(db.String, nullable=True)
# def __repr__(self):
# return '<Resource {}>'.format(self.id)
# class Vehicle(Resource):
# __tablename__ = 'vehicles'
# fuel_l_per_100km = db.Column(db.Float, nullable=False)
# carbon_to_manufacture = db.Column(db.Float, nullable=False)
# expected_life_km = db.Column(db.Float, nullable=False)
# units = 'miles' # Somehow set this.
# def __repr__(self):
# return '<Vehicle {}>'.format(self.id)
# class Utility(Resource):
# __tablename__ = 'electric'
# username = db.Column(db.String, primary_key=True)
# def __repr__(self):
# return '<User {}>'.format(self.id) | 41.149425 | 161 | 0.691899 | 3,310 | 0.462291 | 0 | 0 | 0 | 0 | 0 | 0 | 4,873 | 0.680587 |
357adb90337719d5723ab2cf058c01616c052b6e | 806 | py | Python | course/views.py | author31/HongsBlog | a94dc56a05062b5b2bab3f28f84b7ede1ae44bf8 | [
"MIT"
] | null | null | null | course/views.py | author31/HongsBlog | a94dc56a05062b5b2bab3f28f84b7ede1ae44bf8 | [
"MIT"
] | null | null | null | course/views.py | author31/HongsBlog | a94dc56a05062b5b2bab3f28f84b7ede1ae44bf8 | [
"MIT"
] | null | null | null | from typing import List
from django.shortcuts import render
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from assignment.models import Assignment
from course.models import Course
class CourseListView(ListView):
template_name = 'course/course_list.html'
model = Course
context_object_name = 'course'
class CourseDetailView(DetailView):
template_name = 'course/course_detail.html'
model = Course
context_object_name = 'course'
def get(self, request, *args, **kwargs):
self.pk = kwargs["pk"]
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs["assignment"] = Assignment.objects.filter(course__id=self.pk)
return super().get_context_data(**kwargs)
| 29.851852 | 76 | 0.729529 | 568 | 0.704715 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.104218 |
357beddbd194c981ff7bfbba130b3a5fe87ac6d4 | 157 | py | Python | aldryn_search/apps.py | lab360-ch/aldryn-search | 15a319edac126aa1e44f22d34a7bcb5aec3e3dde | [
"BSD-3-Clause"
] | 11 | 2019-03-29T10:32:13.000Z | 2021-02-26T11:44:44.000Z | aldryn_search/apps.py | lab360-ch/aldryn-search | 15a319edac126aa1e44f22d34a7bcb5aec3e3dde | [
"BSD-3-Clause"
] | 23 | 2019-01-31T16:20:57.000Z | 2021-11-10T19:57:58.000Z | aldryn_search/apps.py | lab360-ch/aldryn-search | 15a319edac126aa1e44f22d34a7bcb5aec3e3dde | [
"BSD-3-Clause"
] | 23 | 2019-02-14T09:59:40.000Z | 2022-03-10T12:38:48.000Z | from django.apps import AppConfig
class AldrynSearchConfig(AppConfig):
name = 'aldryn_search'
def ready(self):
from . import conf # noqa
| 17.444444 | 36 | 0.687898 | 120 | 0.764331 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.133758 |
357c2647b8417e551b803cdb6ce940f55d5d1f67 | 606 | py | Python | tests/utils.py | openlobby/openlobby-server | b7a1a2b73e903c4da57970926844b0639dce5aae | [
"MIT"
] | 7 | 2017-11-23T15:24:50.000Z | 2018-11-29T21:47:55.000Z | tests/utils.py | openlobby/openlobby-server | b7a1a2b73e903c4da57970926844b0639dce5aae | [
"MIT"
] | 20 | 2018-02-21T22:25:42.000Z | 2020-06-05T17:22:36.000Z | tests/utils.py | openlobby/openlobby-server | b7a1a2b73e903c4da57970926844b0639dce5aae | [
"MIT"
] | 3 | 2018-03-08T10:05:01.000Z | 2018-08-16T14:36:28.000Z | from datetime import datetime
def strip_value(data, *path):
element = path[0]
value = data.get(element)
if len(path) == 1:
data[element] = "__STRIPPED__"
return value
else:
if isinstance(value, dict):
return strip_value(value, *path[1:])
elif isinstance(value, list):
return [strip_value(item, *path[1:]) for item in value]
else:
raise NotImplementedError()
def dates_to_iso(data):
for key, val in data.items():
if isinstance(val, datetime):
data[key] = val.isoformat()
return data
| 25.25 | 67 | 0.589109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.023102 |
357cffec267dd0668f1c68a283ee49efc4b0ead9 | 3,077 | py | Python | datastructures/binarytree.py | tkaleas/python-sandbox | 37ebe92c5f89300e27803118259d16f62d67f612 | [
"MIT"
] | null | null | null | datastructures/binarytree.py | tkaleas/python-sandbox | 37ebe92c5f89300e27803118259d16f62d67f612 | [
"MIT"
] | null | null | null | datastructures/binarytree.py | tkaleas/python-sandbox | 37ebe92c5f89300e27803118259d16f62d67f612 | [
"MIT"
] | null | null | null | class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
#Binary Tree
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def search(self, find_val):
"""Return True if the value
is in the tree, return
False otherwise."""
return self.preorder_search(self.root, find_val)
def print_tree(self):
"""Print out all tree nodes
as they are visited in
a pre-order traversal."""
return self.preorder_print(self.root,"")[:-1]
def preorder_search(self, start, find_val):
"""Helper method - use this to create a
recursive search solution."""
if start:
hasVal = False
if start.value == find_val:
hasVal = True
return hasVal or self.preorder_search(start.left, find_val) or self.preorder_search(start.right, find_val)
return False
def preorder_print(self, start, traversal):
"""Helper method - use this to create a
recursive print solution."""
if start:
traversal += str(start.value) + "-"
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal
# Binary Search Tree
class BST(object):
def __init__(self, root):
self.root = Node(root)
def insert(self, new_val):
self.insert_helper(self.root, new_val)
def search(self, find_val):
return self.search_helper(self.root, find_val)
def search_helper(self, start, find_val):
if start.value == find_val:
return True
elif find_val < start.value:
if start.left:
return self.search_helper(start.left, find_val)
elif find_val > start.value:
if start.right:
return self.search_helper(start.right, find_val)
return False
def insert_helper(self, start, new_val):
if start.value == new_val:
return
if new_val > start.value:
if start.right:
self.insert_helper(start.right, new_val)
else:
start.right = Node(new_val)
if new_val < start.value:
if start.left:
self.insert_helper(start.left, new_val)
else:
start.left = Node(new_val)
return
def print_tree(self):
"""Print out all tree nodes
as they are visited in
a pre-order traversal."""
return self.preorder_print(self.root,"")[:-1]
def preorder_print(self, start, traversal):
"""Helper method - use this to create a
recursive print solution."""
if start:
traversal += str(start.value) + "-"
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal
| 32.734043 | 118 | 0.577836 | 3,030 | 0.984725 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.176796 |
357d8ee4029bbe48236c823d9888079f0ce3ef3f | 4,203 | py | Python | cs15211/StoneGame.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/StoneGame.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/StoneGame.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://leetcode.com/problems/stone-game/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 877. Stone Game
#
# Alex and Lee play a game with piles of stones.
# There are an even number of piles arranged in a row,
# and each pile has a positive integer number of stones piles[i].
#
# The objective of the game is to end with the most stones.
# The total number of stones is odd, so there are no ties.
#
# Alex and Lee take turns, with Alex starting first.
# Each turn, a player takes the entire pile of stones from either the beginning
# or the end of the row. This continues until there are no more piles left,
# at which point the person with the most stones wins.
#
# Assuming Alex and Lee play optimally, return True if and only if Alex wins the game.
#
#
#
# Example 1:
#
# Input: [5,3,4,5]
# Output: true
# Explanation:
# Alex starts first, and can only take the first 5 or the last 5.
# Say he takes the first 5, so that the row becomes [3, 4, 5].
# If Lee takes 3, then the board is [4, 5], and Alex takes 5 to win with 10 points.
# If Lee takes the last 5, then the board is [3, 4], and Alex takes 4 to win with 9 points.
# This demonstrated that taking the first 5 was a winning move for Alex, so we return true.
#
#
# Note:
#
# 2 <= piles.length <= 500
# piles.length is even.
# 1 <= piles[i] <= 500
# sum(piles) is odd.
#
import unittest
class Solution(object):
def stoneGame(self, piles):
"""
:type piles: List[int]
:rtype: bool
"""
return True
class SolutionDP(object):
def stoneGame(self, piles):
"""
:type piles: List[int]
:rtype: bool
"""
n = len(piles)
dp = [[0] * n for _ in range(n)]
for i in range(n):
dp[i][i] = piles[i]
for l in range(2, n + 1):
for i in range(n - l + 1):
j = i + l - 1
dp[i][j] = max(piles[i] - dp[i + 1][j], piles[j] - dp[i][j - 1])
return dp[0][n - 1] > 0
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/stone-game/solution/
Approach 1: Dynamic Programming
Complexity Analysis
Time Complexity: O(N^2), where N is the number of piles.
Space Complexity: O(N^2), the space used storing the intermediate results of each subgame.
# 10ms 36.14%
class Solution {
public boolean stoneGame(int[] piles) {
int N = piles.length;
// dp[i+1][j+1] = the value of the game [piles[i], ..., piles[j]].
int[][] dp = new int[N+2][N+2];
for (int size = 1; size <= N; ++ size) {
for (int i = 0; i + size <= N; ++i) {
int j = i + size - 1;
int parity = ( j + i + N) % 2; // j - i - N; but +x = -x (mod 2)
if (parity == 1) {
dp[i + 1][j + 1] = Math.max(piles[i] + dp[i +2][j + 1], piles[j] + dp[i + 1][j]);
} else {
dp[i + 1][j + 1] = Math.min(-piles[i] + dp[i +2][j + 1], -piles[j] + dp[i + 1][j]);
}
}
}
return dp[1][N] > 0;
}
}
Approach 2: Mathematical
Complexity Analysis
Time and Space Complexity: O(1)
# 3ms 53.69%
class Solution {
public boolean stoneGame(int[] piles) {
return true;
}
}
# 2ms 99.64%
class Solution {
public boolean stoneGame(int[] piles) {
int left = 0;
int right = piles.length-1;
int alex = 0;
int lee = 0;
boolean alexTurn = true;
while (left < right) {
if (alexTurn) {
if (piles[left] > piles[right]) {
alex += piles[left];
left++;
} else {
alex += piles[right];
right--;
}
} else {
if (piles[left] > piles[right]) {
lee += piles[left];
left++;
} else {
lee += piles[right];
right--;
}
}
}
return alex > lee;
}
}
'''
| 28.02 | 103 | 0.524863 | 718 | 0.17083 | 0 | 0 | 0 | 0 | 0 | 0 | 3,496 | 0.831787 |
3580446fc9f2895b65f629897e85da7d65bcfe94 | 149 | py | Python | project_pawz/sponsorships/apps.py | rlaneyjr/project_pawz | 27f316ef35968ed1319ec0585a050ebed795763a | [
"MIT"
] | null | null | null | project_pawz/sponsorships/apps.py | rlaneyjr/project_pawz | 27f316ef35968ed1319ec0585a050ebed795763a | [
"MIT"
] | 13 | 2020-02-12T00:12:52.000Z | 2022-02-12T09:42:36.000Z | project_pawz/sponsorships/apps.py | rlaneyjr/project_pawz | 27f316ef35968ed1319ec0585a050ebed795763a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SponsorshipsAppConfig(AppConfig):
name = 'project_pawz.sponsorships'
verbose_name = "Sponsorships"
| 21.285714 | 39 | 0.778523 | 112 | 0.751678 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.275168 |
3580b7cb753fcaa31d0c440e5b6586620bfd111a | 745 | py | Python | assignment_solutions/6/is_all_upper.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | assignment_solutions/6/is_all_upper.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | assignment_solutions/6/is_all_upper.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | import re
only_letters = re.compile("[a-zA-Z]")
def is_all_upper(text: str) -> bool:
# check if text has actual content
has_no_content = len(only_letters.findall(text)) == 0
return False if has_no_content else text.upper() == text
if __name__ == '__main__':
print("Example:")
print(is_all_upper('ALL UPPER'))
# These "asserts" are used for self-checking and not for an auto-testing
assert is_all_upper('ALL UPPER') is True
assert is_all_upper('all lower') is False
assert is_all_upper('mixed UPPER and lower') is False
assert is_all_upper('') is False
assert is_all_upper(' ') is False
assert is_all_upper('123') is False
print("Coding complete? Click 'Check' to earn cool rewards!")
| 29.8 | 76 | 0.689933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.346309 |
3581a840c4be8b7ea8177fe99a8cfde9dc61f37d | 1,661 | py | Python | osm/GeoFabrikSpider.py | TheGreatRefrigerator/openpoiservice | e345062c4eb887e661597f47cf167b793a586d49 | [
"Apache-2.0"
] | null | null | null | osm/GeoFabrikSpider.py | TheGreatRefrigerator/openpoiservice | e345062c4eb887e661597f47cf167b793a586d49 | [
"Apache-2.0"
] | null | null | null | osm/GeoFabrikSpider.py | TheGreatRefrigerator/openpoiservice | e345062c4eb887e661597f47cf167b793a586d49 | [
"Apache-2.0"
] | null | null | null | # sudo scrapy runspider GeoFabrikSpider.py
import scrapy
import os
import urlparse
from scrapy.selector import Selector
import subprocess
from time import sleep
class GeoFabrikSpider(scrapy.Spider):
name = "geofabrik_spider"
start_urls = ['https://download.geofabrik.de/']
def parse(self, response):
set_selector = '.subregion'
for region in response.css(set_selector):
name_selector = 'a ::text'
subregion = region.css(name_selector).extract_first()
regions = ['Asia']
# regions = ['Asia', 'Europe', 'North America']
if subregion in regions:
link_selector = 'a::attr(href)'
next_page = region.css(link_selector).extract_first(),
yield scrapy.Request(
urlparse.urljoin(response.url, next_page[0]),
callback=self.fetch_sub_regions
)
def fetch_sub_regions(self, response):
sel = Selector(response)
sub_regions = sel.xpath("//a[contains(text(),'[.osm.pbf]')]/@href").extract()
for sub_region in sub_regions:
osm_filename = sub_region.split('/')[1]
if os.path.exists(osm_filename):
print('{} already downloaded'.format(osm_filename))
else:
print('Starting download of {}'.format(osm_filename))
download_link = urlparse.urljoin(response.url, sub_region)
subprocess.call(['wget', download_link])
sleep(120) # few minutes
yield {
"subregion_link": download_link,
}
| 30.759259 | 85 | 0.580373 | 1,496 | 0.900662 | 1,365 | 0.821794 | 0 | 0 | 0 | 0 | 310 | 0.186635 |
35822f26b3ef7809b8f1f7729babe9f761bebf6a | 3,252 | py | Python | tests/basic.py | mkindahl/mysql-replicant-python | e627d29feae623f3f406ea472ca8cfae97abaa97 | [
"Apache-2.0"
] | null | null | null | tests/basic.py | mkindahl/mysql-replicant-python | e627d29feae623f3f406ea472ca8cfae97abaa97 | [
"Apache-2.0"
] | null | null | null | tests/basic.py | mkindahl/mysql-replicant-python | e627d29feae623f3f406ea472ca8cfae97abaa97 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010, Mats Kindahl, Charles Bell, and Lars Thalmann
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# * Neither the name of Sun Microsystems nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUN
# MICROSYSTEMS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import sys, os.path
rootpath = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
sys.path.append(rootpath)
import unittest
import replicant
class TestPosition(unittest.TestCase):
"Test case for binlog positions class."
def _checkPos(self, p, s):
"""Check that a position is valid, have the expected
representation, and can be converted from string
representation to class and back."""
from replicant import Position
self.assertEqual(repr(p), s)
self.assertEqual(p, eval(repr(p)))
self.assertEqual(s, repr(eval(s)))
def testSimple(self):
from replicant import Position
positions = [Position('master-bin.00001', 4711),
Position('master-bin.00001', 9393),
Position('master-bin.00002', 102)]
strings = ["Position('master-bin.00001', 4711)",
"Position('master-bin.00001', 9393)",
"Position('master-bin.00002', 102)"]
for i in range(0,len(positions)-1):
self._checkPos(positions[i], strings[i])
# Check that comparison works as expected.
for i in range(0, len(positions)-1):
for j in range(0, len(positions)-1):
if i < j:
self.assertTrue(positions[i] < positions[j])
elif i == j:
self.assertEqual(positions[i], positions[j])
else:
self.assertTrue(positions[i] > positions[j])
def suite():
return unittest.makeSuite(TestPosition)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 41.164557 | 71 | 0.671279 | 1,369 | 0.420972 | 0 | 0 | 0 | 0 | 0 | 0 | 1,983 | 0.609779 |
358309e43cd19cc91ae52a7e9d812eb22388abf7 | 2,348 | py | Python | routa/test_levenshtein.py | piglaker/SpecialEdition | 172688ef111e1b5c62bdb1ba0a523a2654201b90 | [
"Apache-2.0"
] | 2 | 2022-01-06T07:41:50.000Z | 2022-01-22T14:18:51.000Z | routa/test_levenshtein.py | piglaker/SpecialEdition | 172688ef111e1b5c62bdb1ba0a523a2654201b90 | [
"Apache-2.0"
] | null | null | null | routa/test_levenshtein.py | piglaker/SpecialEdition | 172688ef111e1b5c62bdb1ba0a523a2654201b90 | [
"Apache-2.0"
] | null | null | null |
def iterative_levenshtein(string, target, costs=(1, 1, 1)):
"""
piglaker modified version :
return edits
iterative_levenshtein(s, t) -> ldist
ldist is the Levenshtein distance between the strings
s and t.
For all i and j, dist[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
costs: a tuple or a list with three integers (d, i, s)
where d defines the costs for a deletion
i defines the costs for an insertion and
s defines the costs for a substitution
"""
rows = len(string) + 1
cols = len(target) + 1
deletes, inserts, substitutes = costs
dist = [[0 for x in range(cols)] for x in range(rows)] # dist = np.zeros(shape=(rows, cols))
edits = [ [[] for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for row in range(1, rows):
dist[row][0] = row * deletes
# target prefixes can be created from an empty source string
# by inserting the characters
for col in range(1, cols):
dist[0][col] = col * inserts
for col in range(1, cols):
for row in range(1, rows):
if string[row - 1] == target[col - 1]:
cost = 0
else:
cost = substitutes
dist[row][col] = min(dist[row - 1][col] + deletes,
dist[row][col - 1] + inserts,
dist[row - 1][col - 1] + cost) # substitution
# record edit
min_distance = dist[row][col]
if min_distance == dist[row - 1][col] + deletes:
edit = ("delete", string[row - 1])
edits[row][col] = edits[row-1][col] + [edit]
elif min_distance == dist[row][col - 1] + inserts:
edit = ("insert", col, target[col-1])
edits[row][col] = edits[row][col-1] + [edit]
else:
edit = ("substitution", string[row-1], target[col-1])
edits[row][col] = edits[row-1][col-1] + [edit]
return dist[row][col], edits[row][col]
result, edits = iterative_levenshtein([1,2,3], [0,13])
print(result, edits)
| 35.044776 | 96 | 0.534072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 849 | 0.361584 |
3583587e857ba2e03f4dbec666f2d6bee6d2dba7 | 168 | py | Python | loggo2/__init__.py | bitpanda-labs/loggo2 | 0e0d157b12a34a737ddfc0c9083241a6db03b78a | [
"MIT"
] | 6 | 2018-05-15T13:30:02.000Z | 2022-03-08T16:08:05.000Z | loggo2/__init__.py | bitpanda-labs/loggo2 | 0e0d157b12a34a737ddfc0c9083241a6db03b78a | [
"MIT"
] | 102 | 2018-06-15T11:08:11.000Z | 2022-01-21T11:12:11.000Z | loggo2/__init__.py | bitpanda-labs/loggo | 0e0d157b12a34a737ddfc0c9083241a6db03b78a | [
"MIT"
] | 10 | 2018-05-15T11:01:14.000Z | 2022-02-06T20:58:56.000Z | from ._loggo2 import JsonLogFormatter, LocalLogFormatter, Loggo # noqa: F401
__version__ = "10.1.2" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
| 42 | 88 | 0.767857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.5 |
3584dd069c4afaf6ad2ec3546a7187fbdbbed6aa | 1,762 | py | Python | aoc/day11/__init__.py | scorphus/advent-of-code-2020 | 12270ccc86475a18e587007da0fbfc6c9ef3a6a8 | [
"BSD-3-Clause"
] | 9 | 2020-12-04T17:40:49.000Z | 2022-01-08T03:14:21.000Z | aoc/day11/__init__.py | scorphus/advent-of-code-2020 | 12270ccc86475a18e587007da0fbfc6c9ef3a6a8 | [
"BSD-3-Clause"
] | 1 | 2021-02-12T20:49:33.000Z | 2021-02-12T20:49:33.000Z | aoc/day11/__init__.py | scorphus/advent-of-code-2020 | 12270ccc86475a18e587007da0fbfc6c9ef3a6a8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Advent of Code 2020
# https://github.com/scorphus/advent-of-code-2020
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2020, Pablo S. Blum de Aguiar <scorphus@gmail.com>
from aoc import strip
import copy
DIR = [
(-1, 0),
(-1, 1),
(0, 1),
(1, 1),
(1, 0),
(1, -1),
(0, -1),
(-1, -1),
]
def part1(lines, shortsight=True, least_occ=4):
grid = [list(line) for line in strip(lines)]
total = 0
while True:
changed = False
grid_copy = copy.deepcopy(grid)
for i, row in enumerate(grid_copy):
for j, loc in enumerate(row):
if loc == "L" and occupied(grid_copy, i, j, shortsight) == 0:
grid[i][j] = "#"
total += 1
changed = True
elif loc == "#" and occupied(grid_copy, i, j, shortsight) >= least_occ:
grid[i][j] = "L"
total -= 1
changed = True
if not changed:
return total
def part2(lines):
# I just couldn't avoid refactoring part1 to accept extra arguments.
# Meh... seems like a shortage of imagination/creativity from AoC author(s)
return part1(lines, False, 5)
def occupied(grid, i, j, shortsight=True):
m = len(grid)
n = len(grid[0])
s = 0
for di, dj in DIR:
k = 1
while 0 <= i + k * di < m and 0 <= j + k * dj < n:
if grid[i + k * di][j + k * dj] == "#":
s += 1
break
if shortsight or grid[i + k * di][j + k * dj] == "L":
break
k += 1
return s
| 25.911765 | 87 | 0.498297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.255392 |
35869169565f147aa6ca2effa9a220febbbdf872 | 3,349 | py | Python | analyses/ParamAnalyzer.py | ThinkNaive/Matrix-Vector-Multiplication | 9a00ba9e4d0d298ce4ff3bfae092f49571a56605 | [
"MIT"
] | null | null | null | analyses/ParamAnalyzer.py | ThinkNaive/Matrix-Vector-Multiplication | 9a00ba9e4d0d298ce4ff3bfae092f49571a56605 | [
"MIT"
] | null | null | null | analyses/ParamAnalyzer.py | ThinkNaive/Matrix-Vector-Multiplication | 9a00ba9e4d0d298ce4ff3bfae092f49571a56605 | [
"MIT"
] | null | null | null | # coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
rows = [10000]
col = 10000
iteration = 10
params = [
{'id': '01', 'strategy': 'rep', 'p': 10, 'repNum': 1},
{'id': '02', 'strategy': 'rep', 'p': 10, 'repNum': 2},
{'id': '03', 'strategy': 'mds', 'p': 10, 'k': 4},
{'id': '04', 'strategy': 'mds', 'p': 10, 'k': 5},
{'id': '05', 'strategy': 'mds', 'p': 10, 'k': 8},
{'id': '06', 'strategy': 'mds', 'p': 10, 'k': 10},
{'id': '07', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 1.25},
{'id': '08', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 1.5},
{'id': '09', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 2.0},
{'id': '10', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 3.0}
]
latency = []
computation = []
for i, param in enumerate(params):
comps = np.load('statistics/Param_' + param['strategy'] + '_' + param['id'] + '_Comp.npy')
stops = np.load('statistics/Param_' + param['strategy'] + '_' + param['id'] + '_Stop.npy')
latency.append(np.mean(stops))
computation.append(np.sum(comps) / rows[0] / 10) # comp/m,平均每次迭代
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
marker = ['o', '^', 's', 'D', 'x', '*', '+']
# 计算节点总次数
fig = plt.figure(num=1, figsize=(6, 4), dpi=150)
plt.title('Computation vs Latency')
plt.xlabel('latency (s)')
plt.ylabel('computation/$m$ (ratio)')
plt.plot(latency[0:2], computation[0:2], color=color[0], label=params[0]['strategy'].upper(), marker=marker[0])
plt.plot(latency[2:6], computation[2:6], color=color[1], label=params[2]['strategy'].upper(), marker=marker[1])
plt.plot(latency[6:12], computation[6:12], color=color[2], label=params[6]['strategy'].upper(), marker=marker[2])
for i, (x, y) in enumerate(zip(latency[0:2], computation[0:2])):
plt.annotate(r'$r$=%s' % params[i]['repNum'], xy=(x, y), xytext=(0, 5), textcoords='offset points')
for i, (x, y) in enumerate(zip(latency[2:6], computation[2:6])):
plt.annotate(r'$k$=%s' % params[i + 2]['k'], xy=(x, y), xytext=(-10, 5), textcoords='offset points')
# plt.annotate('',
# xy=(3.6, 1.28),
# xytext=(3.45, 1.07),
# arrowprops=dict(arrowstyle='fancy',
# color='#1E90FF',
# connectionstyle=ConnectionStyle("Angle3, angleA=45, angleB=-100")))
#
# sub = fig.add_axes([0.25, 0.4, 0.25, 0.25])
# sub.plot(latency[6:12], computation[6:12], color=color[2], label=params[6]['strategy'].upper(), marker=marker[2])
# for i, (x, y) in enumerate(zip(latency[6:12], computation[6:12])):
# sub.annotate(r'$\alpha$=%s' % params[i + 6]['alpha'], xy=(x, y), xytext=(0, 5), textcoords='offset points')
for i, (x, y) in enumerate(zip(latency[6:12], computation[6:12])):
txtPos = (0, 5)
if params[i + 6]['id'] == '09':
txtPos = (0, -10)
plt.annotate(r'$\alpha$=%s' % params[i + 6]['alpha'], xy=(x, y), xytext=txtPos, textcoords='offset points')
plt.legend(loc='upper left')
plt.savefig('figures/Param_ComputationVsLatency.svg', dpi=150, bbox_inches='tight')
plt.show()
| 48.536232 | 119 | 0.521947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,438 | 0.425822 |
358826b9087dfd29b78b2c2b77da9c4af1e5706c | 2,077 | py | Python | MsgRoute/myBackend.py | zhouli1014/OurGame | 5c1aa81c928c23b3ac3ca8a447dd1da44d4de44a | [
"MIT"
] | 3 | 2018-08-11T14:47:11.000Z | 2018-12-06T09:21:01.000Z | MsgRoute/myBackend.py | zhouli1014/OurGame | 5c1aa81c928c23b3ac3ca8a447dd1da44d4de44a | [
"MIT"
] | null | null | null | MsgRoute/myBackend.py | zhouli1014/OurGame | 5c1aa81c928c23b3ac3ca8a447dd1da44d4de44a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys, time
from backend import daemon
import itchat
import time
from ipcqueue import posixmq
import logging
import datetime as dt
import threading
import time
logFileDir = "/opt/crontab/IpcToItchat/"
nowDateTime = dt.datetime.now().strftime('%Y%m%d%H%M%S')
pyFilename = sys.argv[0].split('/')[-1].split('.')[0]
logFileName = '{1}_{0}.log'.format(nowDateTime , pyFilename)
logging.basicConfig(level=logging.DEBUG, format='[%(levelname)-8s] [%(asctime)s]: %(message)s',\
datefmt='%Y-%m-%d %H:%M:%S', filename=logFileDir + logFileName, filemode='w')
class MyDaemon(daemon):
def run(self):
logging.info('run begin...')
q = posixmq.Queue('/ipcmsg')
itchat.load_login_status(fileDir='/opt/crontab/IpcToItchat/itchat.pkl');
while True:
rcvMsg = q.get()
logging.debug('Get msg: {}'.format(rcvMsg))
itchat.send(rcvMsg[1], 'filehelper')
if int(rcvMsg[0]) == 1: # beatheart
itchat.send(rcvMsg[1], 'filehelper')
if int(rcvMsg[0]) == 2: # spider
for room in itchat.get_chatrooms():
nickName = room['NickName']
if room['NickName'] == "liuyi":
author = itchat.search_chatrooms(userName=room['UserName'])
author.send(rcvMsg[1])
logging.debug('Send msg: {}'.format(rcvMsg[1]))
logging.info('run exit')
if __name__ == "__main__":
logging.info('Game start...')
itchat.auto_login(enableCmdQR=2, hotReload=True)
daemon = MyDaemon('/tmp/daemon-example.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
logging.info('Game over.')
| 34.616667 | 96 | 0.569571 | 886 | 0.426577 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.22051 |
35884559fa63fe01a4c1157a7757aab150c568df | 3,979 | py | Python | mlbgame/data/people.py | trevor-viljoen/mlbgame3 | 73e79aebad751b36773f01c93f09a422f69ef9cc | [
"MIT"
] | 6 | 2018-03-23T02:51:10.000Z | 2021-06-15T21:26:51.000Z | mlbgame/data/people.py | trevor-viljoen/mlbgame3 | 73e79aebad751b36773f01c93f09a422f69ef9cc | [
"MIT"
] | null | null | null | mlbgame/data/people.py | trevor-viljoen/mlbgame3 | 73e79aebad751b36773f01c93f09a422f69ef9cc | [
"MIT"
] | 1 | 2019-02-23T07:11:51.000Z | 2019-02-23T07:11:51.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""mlbgame functions for the people API endpoints.
This module's functions gets the JSON payloads for the mlb.com games API
endpoints.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
from mlbgame.data import request
def get_person(person_id, params=None):
"""This endpoint allows you to pull the information for a player.
Args:
person_id (int): Unique Player Identifier
params (dict): Contains the person_ids, season, group, and fields
parameters described below.
params:
person_id (required)
Description: Unique Player Identifier
Parameter Type: path
Data Type: integer
person_ids
Description: Comma delimited list of person ID.
Format: 1234, 2345
Parameter Type: query
Data Type: array[integer]
season
Description: Season of play
Parameter Type: query
Data Type: string
group *may not yet do anything
Description: Category of statistics to return. 0: hitting, 1: pitching,
2: fielding, 3: running
Format: 0, 1, 2, 3
Parameter Type: query
Data Type: array[string]
fields
Description: Comma delimited list of specific fields to be returned.
Format: topLevelNode, childNode, attribute
Parameter Type: query
Data Type: array[string]
Returns:
json
"""
return request(7, primary_key=person_id, params=params)
def get_current_game_stats(person_id, params=None):
"""This endpoint allows you to pull the current game status for a given
player.
Args:
person_id (int): Unique Player Identifier
params (dict): Contains the person_ids, season, group, and fields
parameters described below.
params:
person_id (required)
Description: Unique Player Identifier
Parameter Type: path
Data Type: integer
group *may not yet do anything
Description: Category of statistics to return. 0: hitting, 1: pitching,
2: fielding, 3: running
Format: 0, 1, 2, 3
Parameter Type: query
Data Type: array[string]
timecode
Description: Use this parameter to return a snapshot of the data at the
specified time.
Format: YYYYMMDD_HHMMSS
fields
Description: Comma delimited list of specific fields to be returned.
Format: topLevelNode, childNode, attribute
Parameter Type: query
Data Type: array[string]
Returns:
json
"""
return request(7, 'stats/game/current', primary_key=person_id,
params=params)
def get_game_stats(person_id, game_pk, params=None):
"""This endpoint allows you to pull the game stats for a given player and
game.
Args:
person_id (int): Unique Player Identifier
game_pk (int): Unique Primary Key representing a game.
params (dict): Contains the group, and fields parameters described
below.
params:
person_id (required)
Description: Unique Player Identifier
Parameter Type: path
Data Type: integer
game_pk (required)
Description: Unique Primary Key representing a game.
Parameter Type: path
Data Type: integer
group *may not yet do anything
Description: Category of statistics to return. 0: hitting, 1: pitching,
2: fielding, 3: running
Format: 0, 1, 2, 3
Parameter Type: query
Data Type: array[string]
fields
Description: Comma delimited list of specific fields to be returned.
Format: topLevelNode, childNode, attribute
Parameter Type: query
Data Type: array[string]
Returns:
json
"""
return request(7, 'stats/game', primary_key=person_id,
secondary_key=game_pk, params=params)
| 32.349593 | 79 | 0.642372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,533 | 0.887912 |
35889badcfa8dc6a845da5eecd5c85fc2bdcc83d | 25,571 | py | Python | src/swadr.py | ericpruitt/swadr | 17ce19032ce9cd0ac2b640c5a54093ef9ff0effc | [
"BSD-2-Clause"
] | 2 | 2015-01-14T13:43:52.000Z | 2015-03-30T08:36:24.000Z | src/swadr.py | ericpruitt/swadr | 17ce19032ce9cd0ac2b640c5a54093ef9ff0effc | [
"BSD-2-Clause"
] | null | null | null | src/swadr.py | ericpruitt/swadr | 17ce19032ce9cd0ac2b640c5a54093ef9ff0effc | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import csv
import getopt
import io
import itertools
import logging
import numbers
import os
import re
import sqlite3
import string
import sys
import textwrap
import time
try:
import readline
except ImportError:
pass
try:
import wcwidth
WCWIDTH_SUPPORT = True
except ImportError:
WCWIDTH_SUPPORT = False
PYTHON_3 = sys.version_info >= (3, )
EXIT_GENERAL_FAILURE = 1
EXIT_DATABASE_ERROR = 2
__all__ = ["PYTHON_3", "EXIT_GENERAL_FAILURE", "EXIT_DATABASE_ERROR",
"SQLite3CSVImporter", "pretty_print_table", "query_split",
"metaquery_conversion", "sqlite3_repl", "WCWIDTH_SUPPORT"]
__license__ = "BSD 2-Clause"
class SQLite3CSVImporter:
sniffer = csv.Sniffer()
typemap = [
("INTEGER", int),
("REAL", float),
("TEXT", (lambda v: v.encode("utf-8")) if PYTHON_3 else unicode),
("BLOB", (lambda v: v.encode("utf-8", errors="surrogateescape"))
if PYTHON_3 else str),
]
def __init__(self, dbc, ignore_errors=True, log_warnings=True):
"""
Setup SQLite3CSVImporter. When `ignore_errors` is set, any SQL errors
encountered while inserting rows into the database will be ignored and,
if `log_warnings` is set, a warning containing information about the
failed INSERT will be logged.
"""
self.dbc = dbc
self.ignore_errors = ignore_errors
self.log_warnings = log_warnings
@classmethod
def detect_types(cls, table):
"""
Return list of SQL type definition clauses that can safely be applied
to each of the columns in the `table`.
"""
typedefs = list()
for column in zip(*table):
rows_with_content = [value for value in column if value]
if not rows_with_content:
scanned_columns = ("", )
else:
scanned_columns = rows_with_content
for typedef, caster in cls.typemap:
try:
for value in scanned_columns:
caster(value)
colschema = typedef
break
except Exception:
pass
else:
raise ValueError("Could not detect type of %r" % (column,))
typedefs.append(typedef)
return typedefs
@staticmethod
def quote_identifier(identifier):
"""
Return ANSI-quoted SQL identifier.
"""
return '"' + identifier.replace('"', '""') + '"'
def create_table(self, tablename, types, columns=None, if_not_exists=True):
"""
Create a table named `tablename` with a column named after each element
in `columns` with corresponding type defintions in the `types` list. If
`columns` is not specified, the column names will be generated
automatically. When `if_not_exists` is set, the "IF NOT EXISTS" infix
will be added to the "CREATE TABLE" query.
"""
if not types:
raise ValueError("Must specify types.")
if not columns:
for char in tablename:
if char.isalpha():
char = char.lower()
break
else:
char = "n"
columns = (char + str(n) for n in itertools.count(1))
else:
# Restrict column identifiers to "word" characters.
_columns = list()
for column in columns:
word_column = re.sub("\W+", "_", column, re.M | re.U).strip("_")
column = word_column
base = 1
while column in _columns:
base += 1
column = word_column + "_" + str(base)
_columns.append(column)
columns = _columns
columns = (self.quote_identifier(column) for column in columns)
table = self.quote_identifier(tablename)
body = ",\n ".join(("%s %s" % (c, t) for c, t in zip(columns, types)))
infix = "IF NOT EXISTS " if if_not_exists else ""
cursor = self.dbc.cursor()
cursor.execute("CREATE TABLE %s%s (\n %s\n)" % (infix, table, body))
def loadfile(self, filename, tablename, create_table=True):
"""
Load a CSV file into the specified database table. When `create_table`
is set, this method will auto-detect the CSV schema and create the
`tablename` if it does not already exist. Please note that this method
**will not** work on un-seekable files in Python 3.
"""
def csv_open(path):
"""
Open `path` in a manner best suited for use with csv module.
"""
if PYTHON_3:
# https://docs.python.org/3/library/csv.html#csv.reader
return open(path, newline="", errors="surrogateescape")
else:
return open(path, mode="rbU")
with csv_open(filename) as iostream:
# Use first 20 lines to determine CSV dialect.
sample_lines = "".join(itertools.islice(iostream, 20))
dialect = self.sniffer.sniff(sample_lines)
# In Python 2, this method supports reading data from unseekable
# files by buffering the sampled data into a BytesIO object. I
# could not figure out how to get BytesIO in Python 3 to play
# nicely with the csv module, so I gave up supporting unseekable
# files in Python 3.
if PYTHON_3:
sample_reader_io = iostream
else:
sample_reader_io = io.BytesIO(sample_lines)
# Read the first 20 CSV records.
sample_reader_io.seek(0)
sample_reader = csv.reader(sample_reader_io, dialect)
sample_rows = list(itertools.islice(sample_reader, 20))
# Figure out the table schema using the sniffed records.
sample_reader_io.seek(0)
types_with_row_one = self.detect_types(sample_rows)
types_sans_row_one = self.detect_types(sample_rows[1:])
has_header = types_sans_row_one != types_with_row_one
types = types_sans_row_one or types_with_row_one
if has_header:
try:
next(sample_reader)
except StopIteration:
pass
first_line_number = 2
columns = sample_rows[0]
else:
first_line_number = 1
columns = None
with self.dbc:
cursor = self.dbc.cursor()
if create_table:
self.create_table(tablename, columns=columns, types=types)
stream_reader = csv.reader(iostream, dialect)
rowgen = itertools.chain(sample_reader, stream_reader)
table = self.quote_identifier(tablename)
binds = ", ".join("?" * len(sample_rows[0]))
query = "INSERT INTO %s VALUES (%s)" % (tablename, binds)
try:
original_text_factory = self.dbc.text_factory
if not PYTHON_3:
self.dbc.text_factory = str
for lineno, row in enumerate(rowgen, first_line_number):
parameters = [val if val else None for val in row]
logging.debug("Inserting row: %r", parameters)
try:
cursor.execute(query, parameters)
except Exception as e:
if not self.ignore_errors or self.log_warnings:
if not e.args:
e.args = ("", )
suffix = " (%s, row %d) " % (filename, lineno)
e.args = e.args[:-1] + (e.args[-1] + suffix,)
if not self.ignore_errors:
self.dbc.text_factory = original_text_factory
raise
elif self.log_warnings:
logging.warning("%s", e)
finally:
self.dbc.text_factory = original_text_factory
def pretty_print_table(table, breakafter=[0], dest=None, tabsize=8):
"""
Pretty-print data from a table in a style similar to MySQL CLI. The
`breakafter` option is used to determine where row-breaks should be
inserted. When set to `False`, no breaks will be inserted after any
rows. When set to `True`, a break is set after everywhere. The
`breakafter` option can also be an iterable containing row numbers
after which a break should be inserted. Assuming the first entry in
`table` is the tabular data's header, the function can be executed as
follows to insert a break just after the header:
>>> table = [
... ["Name", "Age", "Favorite Color"],
... ["Bob", 10, "Blue"],
... ["Rob", 25, "Red"],
... ["Penny", 70, "Purple"]]
>>> pretty_print_table(table)
+-------+-----+----------------+
| Name | Age | Favorite Color |
+-------+-----+----------------+
| Bob | 10 | Blue |
| Rob | 25 | Red |
| Penny | 70 | Purple |
+-------+-----+----------------+
By default, the table is printed to stdout, but this can be changed by
providing a file-like object as the `dest` parameter.
The `tabsize` parameter controls how many spaces tabs are expanded to.
"""
# The textwidth function returns the number of printed columns the given
# text will span in a monospaced terminal. When the wcwidth module is not
# available, this falls back to the len builtin which will be inaccurate
# for many non-Latin characters.
if not WCWIDTH_SUPPORT:
textwidth = len
elif PYTHON_3:
def textwidth(text):
length = wcwidth.wcswidth(text)
return len(text) if length == -1 else length
else:
def textwidth(text):
if isinstance(text, unicode):
length = wcwidth.wcswidth(text)
return len(text) if length == -1 else length
else:
text = text.decode("utf-8", "replace")
length = wcwidth.wcswidth(text)
return len(text) if length == -1 else length
table = list(table)
last = len(table) - 1
colwidths = list()
table_lines = list()
left_aligned = [True] * len(table[0]) if table else []
for rowindex, row in enumerate(table):
# Split each cell into lines
cells = list()
for colindex, column in enumerate(row):
if column is None:
column = "NULL"
else:
if isinstance(column, numbers.Number):
left_aligned[colindex] = False
if PYTHON_3 or not isinstance(column, unicode):
column = str(column)
column = column.expandtabs(tabsize)
cells.append(column.split("\n"))
# Check if row-break should be inserted after row
separate = ((breakafter is True) or
(rowindex == last) or
(breakafter and rowindex in breakafter))
# Find tallest cell in the row
row_height = max(map(len, cells))
# Update the column widths if any of the cells are wider than the
# widest, previously encountered cell in each column.
initialize = not table_lines
for index, contents in enumerate(cells):
width = max(map(textwidth, contents))
if initialize:
colwidths.append(width)
else:
colwidths[index] = max(width, colwidths[index])
if initialize:
table_lines.append([None])
# Pad line count of each cell in the row to match the row_height
cells[index] += [""] * (row_height - len(contents))
# Add lines to line table and insert a break if needed
table_lines[index].extend(cells[index] + [None] * separate)
# Transpose the table and print each row. Rows containing `None` indicate a
# row break should be inserted.
for row in zip(*table_lines):
printcols = list()
if row[0] is None:
print("+-", end="", file=dest)
for index, column in enumerate(row):
printcols.append("-" * colwidths[index])
print(*printcols, sep="-+-", end="-+\n", file=dest)
else:
print("| ", end="", file=dest)
for index, column in enumerate(row):
if not PYTHON_3 and isinstance(column, unicode):
column = column.encode("utf-8", "replace")
padding = " " * (colwidths[index] - textwidth(column))
if left_aligned[index]:
printcols.append(column + padding)
else:
printcols.append(padding + column)
print(*printcols, sep=" | ", end=" |\n", file=dest)
def query_split(text):
"""
Yield individual SQLite3 queries found in the given `text`. The last
yielded query may be incomplete. Use `sqlite3.complete_statement` to verify
whether or not it is a fragment.
"""
segments = re.split("(;)", text)
length = len(segments)
j = 0
for k in range(length + 1):
query = ''.join(segments[j:k]).strip()
if query and sqlite3.complete_statement(query):
yield query
j = k
if j != length:
tail = ''.join(segments[j:])
if tail.strip():
yield tail
def metaquery_conversion(original_query, original_params=tuple()):
"""
Convert queries matching various, normally unsupported grammars to queries
SQLite3 understands. The currently supported grammars are as follows:
- {DESC | DESCRIBE} table_name
- SHOW CREATE TABLE table_name
- SHOW TABLES
"""
flags = re.IGNORECASE | re.MULTILINE
original_query = re.sub("[;\s]+$", "", original_query, flags)
match = re.match("DESC(?:RIBE)?\s+(\S+)$", original_query, flags)
if match:
query = "PRAGMA table_info(" + match.group(1) + ")"
return query, original_params
match = re.match("SHOW\s+CREATE\s+TABLE\s+(\S+)$", original_query, flags)
if match:
table = match.group(1)
if table[0] in "`\"":
table = table[1:-1]
query = (
"SELECT sql || ';' AS `SHOW CREATE TABLE` "
"FROM sqlite_master WHERE tbl_name = ? "
"COLLATE NOCASE"
)
if table == "?":
params = original_params
else:
params = (table, )
return query, params
match = re.match("SHOW\s+TABLES$", original_query, flags)
if match:
query = (
"SELECT tbl_name AS `Tables` "
"FROM sqlite_master "
"WHERE type = 'table'"
)
return query, original_params
return original_query, original_params
def sqlite3_repl(connection, input_function=None, dest=None):
"""
Interactive REPL loop for SQLite3 designed to emulate the MySQL CLI
REPL. Ctrl+C clears the current line buffer, and Ctrl+D exits the loop.
When an incomplete query spans multiple lines, the prompt will change
to provide a hint to the user about what token is missing to terminate
the query. This function accepts a SQLite3 connection instance.
"""
try:
clock = time.monotonic
except AttributeError:
clock = time.time
if not input_function:
input_function = input if PYTHON_3 else raw_input
linebuffer = ""
original_connection_isolation_level = connection.isolation_level
connection.isolation_level = None
cursor = connection.cursor()
while True:
prompt = "sqlite> "
if linebuffer.strip():
for query in query_split(linebuffer):
params = tuple()
if sqlite3.complete_statement(query):
try:
query, params = metaquery_conversion(query, params)
start = clock()
results = cursor.execute(query, params)
duration = clock() - start
if cursor.rowcount > -1:
n = cursor.rowcount
s = "" if n == 1 else "s"
prefix = "Query OK, %d row%s affected" % (n, s)
elif cursor.description:
results = list(results)
n = len(results)
s = "" if n == 1 else "s"
prefix = "%d row%s in set" % (n, s)
headers = [d[0] for d in cursor.description]
tbl = [headers] + results
pretty_print_table(tbl, dest=dest)
else:
prefix = "Query OK, but no data returned"
if duration >= 0:
text = "%s (%0.2f sec)" % (prefix, duration)
else:
text = "%s (execution time unknown)" % (prefix,)
except sqlite3.Error as exc:
text = "%s" % exc
print(text, end="\n\n", file=dest)
linebuffer = ""
elif query:
linebuffer = query
# Figure out what token is needed to complete the query and
# adjust the prompt accordingly.
terminators = (";", '"', "'", "`", '\\"', "\\'", "\\`")
for chars in terminators:
if sqlite3.complete_statement(query + chars + ";"):
prompt = " " + chars[-1] + "> "
break
else:
prompt = " -> "
try:
linebuffer += input_function(prompt) + "\n"
except EOFError:
# ^D to exit
print("\n", end="", file=dest)
connection.isolation_level = original_connection_isolation_level
return
except KeyboardInterrupt:
# ^C to reset the line buffer
linebuffer = ""
print("\n", end="", file=dest)
def cli(argv, dest=None):
"""
Command line interface for __file__
Usage: __file__ [OPTIONS...] [QUERIES...]
Any trailing, non-option arguments will be executed as SQLite3 queries
after the data has been imported.
Options:
--help, -h Show this documentation and exit.
-A FILE, ..., -Z FILE All capital, single-letter options are used to load
the specified file into the SQLite3 database. If no
"--table" option has been specified immediately
preceding the option, the letter name will be used
as the table name; loading a file with "-A" will
populate the table "A". Similarly, the table schema
will be auto-detected when no "--schema" option
immediately precedes this option.
--table=TABLE Name of table used to store the contents of the
next specified CSV file.
--invalid=METHOD Determines how rows of invalid data handled. The
METHOD can be "warn", "ignore", or "fail" which
will cause the script to emit a warning and skip
the record, silently skip the record or terminate
script execution respectively. When unspecified,
defaults to "warn."
--loglevel=LEVEL Set logging verbosity level. In order from the
highest verbosity to the lowest verbosity, can be
one of "DEBUG", "INFO", "WARNING", "ERROR",
"CRITICAL". The default value is "WARNING."
--pretty Pretty-print results of queries passed as command
line arguments instead of tab-separating the
results.
--database=FILE Path of the SQLite3 database the queries should be
executed on. When unspecified, the data is stored
volatile memory and becomes inaccessible after the
program stops running.
-i Enter interactive mode after importing data. When
the "--database" flag is not specified, this is
implied.
-v Increase logging verbosity. Can be used repeatedly
to further increase verbosity.
-q Decrease logging verbosity. Can be used repeatedly
to further decrease verbosity.
"""
if PYTHON_3:
letters = string.ascii_uppercase
else:
letters = string.uppercase
colopts = ":".join(letters) + ":hvqi"
longopts = ["table=", "invalid=", "help", "pretty", "database="]
options, arguments = getopt.gnu_getopt(argv[1:], colopts, longopts)
if not argv[1:] or ("--help", "") in options or ("-h", "") in options:
me = os.path.basename(argv[0] or __file__)
docstring = cli.__doc__.replace("__file__", me)
print(textwrap.dedent(docstring).strip(), file=dest)
sys.exit(0 if argv[1:] else 1)
loglevels = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
loglevel = loglevels.index("WARNING")
database = None
prettify = False
interact = False
table = None
loadfile_args = list()
importer_kwargs = dict()
for option, value in options:
# Long options
if option.startswith("--"):
if option == "--invalid":
if value not in ("ignore", "warn", "fail"):
raise getopt.GetoptError("Invalid value for --invalid")
importer_kwargs["ignore_errors"] = value in ("ignore", "warn")
importer_kwargs["log_warnings"] = value == "warn"
elif option == "--table":
table = value
elif option == "--loglevel":
try:
loglevel = loglevels.index(value.upper())
except ValueError:
raise getopt.GetoptError("Invalid log level '%s'" % value)
elif option == "--pretty":
prettify = True
elif option == "--database":
database = value
# Logging verbosity modifiers and Interactivity
elif option in ("-v", "-q", "-i"):
if option == "-v":
loglevel -= loglevel > 0
elif option == "-q":
loglevel += loglevel < (len(loglevels) - 1)
elif option == "-i":
interact = True
# All of the short options that accept arguments are just used for
# table aliases
else:
loadfile_args.append((value, table or option[1]))
table = None
if not interact and database is None:
interact = True
loglevel = loglevels[loglevel]
logging.getLogger().setLevel(getattr(logging, loglevel))
logging.debug("Log level set to %s.", loglevel)
connection = sqlite3.connect(database or ":memory:")
importer = SQLite3CSVImporter(dbc=connection, **importer_kwargs)
for args in loadfile_args:
importer.loadfile(*args)
cursor = connection.cursor()
for query in arguments:
if len(arguments) > 1:
logging.info("Executing '%s'", query)
else:
logging.debug("Executing '%s'", query)
results = cursor.execute(query)
if prettify:
results = list(results)
if results:
headers = [d[0] for d in cursor.description]
pretty_print_table([headers] + results, dest=dest)
else:
def printable(var):
"""
Return print function-friendly variable.
"""
if not PYTHON_3 and isinstance(var, unicode):
return var.encode("utf-8", "replace")
else:
return var
for r in results:
columns = ("" if c is None else printable(c) for c in r)
print(*columns, sep="\t", file=dest)
if interact:
sqlite3_repl(connection, dest=dest)
def main():
logging.basicConfig(format="%(message)s")
try:
cli(sys.argv)
except getopt.GetoptError as exc:
logging.fatal("Could not parse command line options: %s", exc)
sys.exit(EXIT_GENERAL_FAILURE)
except sqlite3.DatabaseError as exc:
logging.fatal("Error updating database: %s", exc)
sys.exit(EXIT_DATABASE_ERROR)
except EnvironmentError as exc:
logging.fatal("%s", exc)
sys.exit(EXIT_GENERAL_FAILURE)
if __name__ == "__main__":
main()
| 36.322443 | 80 | 0.538383 | 7,680 | 0.30034 | 585 | 0.022877 | 1,089 | 0.042587 | 0 | 0 | 9,223 | 0.360682 |
3589c234fc1a0fe7e6d360402ae2ceaf2a97c3d8 | 726 | py | Python | django_analyses/filters/output/output_definition.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 1 | 2020-12-30T12:43:34.000Z | 2020-12-30T12:43:34.000Z | django_analyses/filters/output/output_definition.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 59 | 2019-12-25T13:14:56.000Z | 2021-07-22T12:24:46.000Z | django_analyses/filters/output/output_definition.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 2 | 2020-05-24T06:44:27.000Z | 2020-07-09T15:47:31.000Z | """
Definition of an
:class:`~django_analyses.filters.output.output_definition.OutputDefinitionFilter`
for the :class:`~django_analyses.models.output.definitions.OutputDefinition`
model.
"""
from django_analyses.models.output.definitions.output_definition import \
OutputDefinition
from django_filters import rest_framework as filters
class OutputDefinitionFilter(filters.FilterSet):
"""
Provides useful filtering options for the
:class:`~django_analyses.models.output.definitions.output_definition.OutputDefinition`
model.
"""
output_specification = filters.AllValuesFilter("specification_set")
class Meta:
model = OutputDefinition
fields = "key", "output_specification"
| 27.923077 | 90 | 0.774105 | 383 | 0.527548 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.545455 |
358a40fd5689697a1d8d1a2da43fe16bdb4a6fd3 | 87 | py | Python | mypage/apps.py | shotastage/neco-sys | 60816ceee3eaf36d8d278db72e741d45c3f4af41 | [
"MIT"
] | 2 | 2021-08-15T02:17:43.000Z | 2022-01-31T14:46:20.000Z | rainy_project/mypage/apps.py | habijung/WEB_HeavyReading_Rainy | 58b7429e88931175ddceab2c280161a267149a15 | [
"MIT"
] | 10 | 2020-02-12T00:37:45.000Z | 2022-03-03T21:58:40.000Z | rainy_project/mypage/apps.py | habijung/WEB_HeavyReading_Rainy | 58b7429e88931175ddceab2c280161a267149a15 | [
"MIT"
] | 2 | 2020-11-17T09:37:24.000Z | 2021-10-04T05:59:02.000Z | from django.apps import AppConfig
class MypageConfig(AppConfig):
name = 'mypage'
| 14.5 | 33 | 0.747126 | 50 | 0.574713 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.091954 |
358b78e3032d08e2e8e1264f9dd0d6ae5c3241bb | 3,225 | py | Python | additional.py | NatName/BD_2 | 67392c77af02e3e41075c3e79eeea0eb1991ffa1 | [
"MIT"
] | null | null | null | additional.py | NatName/BD_2 | 67392c77af02e3e41075c3e79eeea0eb1991ffa1 | [
"MIT"
] | null | null | null | additional.py | NatName/BD_2 | 67392c77af02e3e41075c3e79eeea0eb1991ffa1 | [
"MIT"
] | null | null | null | import psycopg2
class Additional(object):
@staticmethod
def findExistRow(connection, tableName):
cursor = connection.cursor()
cursor.execute("""SELECT "{}Id" FROM public."{}" OFFSET floor(random()) LIMIT 1;"""
.format(tableName, tableName))
value = cursor.fetchall()
return value[0][0]
@staticmethod
def findExistingId(connection, tableName, anyId):
cursor = connection.cursor()
cursor.execute("""SELECT \"{}Id\" FROM public.\"{}\" WHERE \"{}Id\"={};"""
.format(tableName, tableName, tableName, anyId))
value = cursor.fetchall()
return len(value) != 0
@staticmethod
def findExistingIdOrderTable(connection, tableName, anyId):
cursor = connection.cursor()
cursor.execute("""SELECT \"{}Id\" FROM public.\"Order\" WHERE \"{}Id\"={};"""
.format(tableName, tableName, anyId))
value = cursor.fetchall()
return len(value) != 0
@staticmethod
def findWordInText(connection, words):
cursor = connection.cursor()
cursor.execute("""SELECT * FROM public."Item" WHERE to_tsvector("ItemDescriptions") @@ to_tsquery('{}');"""
.format(words))
value = cursor.fetchall()
return value
@staticmethod
def findTextWithoutWord(connection, words):
cursor = connection.cursor()
cursor.execute("""SELECT * FROM public."Item" WHERE to_tsvector("ItemDescriptions") @@ to_tsquery('!{}');"""
.format(words))
value = cursor.fetchall()
return value
@staticmethod
def findRowBetweenNumbers(connection, first, second):
cursor = connection.cursor()
cursor.execute("""SELECT "OrderId", "CustomerId", "ShopId", "OrderDate", "Item"."ItemId", "ItemQuantity" FROM public."Order" INNER JOIN public."Item" ON "Order"."ItemId"="Item"."ItemId" WHERE
"ItemQuantity" BETWEEN {} AND {};"""
.format(first, second))
value = cursor.fetchall()
return value
@staticmethod
def findItemName(connection, name):
cursor = connection.cursor()
print(name)
cursor.execute("""SELECT "OrderId", "CustomerId", "ShopId", "OrderDate", "Item"."ItemId", "ItemName" FROM public."Order" INNER JOIN public."Item" ON "Order"."ItemId"="Item"."ItemId" WHERE
"ItemName" LIKE '%{}%';"""
.format(name))
value = cursor.fetchall()
return value
@staticmethod
def addLogicOperation(word):
word = word.strip().replace('and', '&').replace('or', '|')
desc = word.split(' ')
i = 0
filterStr = list(filter(lambda x: x != '' and x != 'or' and x != 'and', desc))
desc = ''
while i < len(filterStr) - 1:
if filterStr[i] == '|' or filterStr[i] == '&':
desc += filterStr[i] + ' '
elif filterStr[i+1] == '|' or filterStr[i+1] == '&':
desc += filterStr[i] + ' '
else:
desc += filterStr[i] + ' & '
i += 1
desc += filterStr[i]
return desc
| 39.329268 | 202 | 0.548837 | 3,206 | 0.994109 | 0 | 0 | 3,133 | 0.971473 | 0 | 0 | 894 | 0.277209 |
358e7dc25cf28a3ce33d67b2d2dc79f0f2e5bd52 | 12,008 | py | Python | hanoi_window.py | SirIsaacNeutron/tower_of_hanoi | 18f7200b9b90b3117137c611fc31878405a17017 | [
"MIT"
] | null | null | null | hanoi_window.py | SirIsaacNeutron/tower_of_hanoi | 18f7200b9b90b3117137c611fc31878405a17017 | [
"MIT"
] | null | null | null | hanoi_window.py | SirIsaacNeutron/tower_of_hanoi | 18f7200b9b90b3117137c611fc31878405a17017 | [
"MIT"
] | null | null | null | """
Created on Mar 12, 2018
@author: SirIsaacNeutron
"""
import tkinter
import tkinter.messagebox
import hanoi
DEFAULT_FONT = ('Helvetica', 14)
class DiskDialog:
"""A dialog window meant to get the number of Disks per Tower for the
Tower of Hanoi puzzle.
"""
def __init__(self):
self._dialog_window = tkinter.Toplevel()
how_many_disks_label = tkinter.Label(master=self._dialog_window,
text='How many Disks per Tower do you want?',
font=DEFAULT_FONT)
how_many_disks_label.grid(row=0, column=0, columnspan=2,
padx=10, pady=10)
self.disk_entry = tkinter.Entry(master=self._dialog_window, width=20,
font=DEFAULT_FONT)
self.disk_entry.grid(row=1, column=0, columnspan=2,
padx=10, pady=1)
button_frame = tkinter.Frame(master=self._dialog_window)
button_frame.grid(row=2, column=0, padx=10, pady=10)
set_up_button = tkinter.Button(master=button_frame, text='Set Up Game',
font=DEFAULT_FONT,
command=self._on_set_up_button)
set_up_button.grid(row=0, column=0, padx=10, pady=10)
exit_button = tkinter.Button(master=button_frame, text='Exit Game',
font=DEFAULT_FONT,
command=self._on_exit_button)
exit_button.grid(row=0, column=1, padx=10, pady=10)
self.exited_intentionally = False # Did the user click the exit button?
# Shown when user input is invalid
self._error_message = 'You have to enter an integer greater than or equal to 0'
def show(self) -> None:
self._dialog_window.grab_set()
self._dialog_window.wait_window()
def _on_set_up_button(self) -> None:
self.num_disks_per_tower = self.disk_entry.get()
try:
self.num_disks_per_tower = int(self.num_disks_per_tower)
if self.num_disks_per_tower <= 0:
tkinter.messagebox.showerror('Error', self._error_message + '.')
# We have to return None in order to prevent the self._dialog_window
# from being destroyed.
return None
except ValueError: # Entry was a string or a decimal, not an integer
tkinter.messagebox.showerror('Error', self._error_message + ', not text or decimals.')
return None
self._dialog_window.destroy()
def _on_exit_button(self):
self._dialog_window.destroy()
self.exited_intentionally = True
class HanoiWindow:
_BACKGROUND_COLOR = '#FFF3E6' # Light beige
def __init__(self):
self._running = True
self._root_window = tkinter.Tk()
self._root_window.title('Tower of Hanoi')
self._set_up_buttons()
self._hanoi_canvas = tkinter.Canvas(master=self._root_window, width=500, height=400,
background=HanoiWindow._BACKGROUND_COLOR)
self._move_string = tkinter.StringVar()
self._move_string.set('No move selected.')
move_label = tkinter.Label(master=self._root_window, textvariable=self._move_string,
font=DEFAULT_FONT)
move_label.grid(row=2, column=0, padx=5, pady=5)
# Note: row here depends on the tower_button_frame's row
self._hanoi_canvas.grid(row=3, column=0, padx=10, pady=10)
self._draw_towers()
# Were the Disks already drawn? (Used to ensure Disk sizes are printed correctly)
self._disks_already_drawn = False
def _set_up_buttons(self) -> None:
"""Add buttons to the top of the window."""
button_frame = tkinter.Frame(master=self._root_window)
button_frame.grid(row=0, column=0, padx=10, pady=10)
help_button = tkinter.Button(master=button_frame, text='Help', font=DEFAULT_FONT,
command=self._on_help_button)
help_button.pack(side=tkinter.LEFT)
restart_button = tkinter.Button(master=button_frame, text='Restart', font=DEFAULT_FONT,
command=self._on_restart_button)
restart_button.pack(side=tkinter.LEFT)
tower_one_button = tkinter.Button(master=button_frame, text='Tower 1',
font=DEFAULT_FONT, command=self._on_tower_one)
tower_one_button.pack(side=tkinter.LEFT)
tower_two_button = tkinter.Button(master=button_frame, text='Tower 2',
font=DEFAULT_FONT, command=self._on_tower_two)
tower_two_button.pack(side=tkinter.LEFT)
tower_three_button = tkinter.Button(master=button_frame, text='Tower 3',
font=DEFAULT_FONT, command=self._on_tower_three)
tower_three_button.pack(side=tkinter.LEFT)
self._origin = ''
self._destination = ''
def _on_tower_one(self) -> None:
self._set_origin_and_or_destination('Tower 1')
def _set_origin_and_or_destination(self, tower_str: str) -> None:
"""Set self._origin and/or self._destination to be some tower_str."""
TOWER_DICT = {'Tower 1': self._game.tower_one, 'Tower 2': self._game.tower_two,
'Tower 3': self._game.tower_three}
if self._origin == '':
self._origin = tower_str
self._move_string.set('Moving from ' + self._origin + ' into... ')
else:
self._destination = tower_str
if self._origin != self._destination and self._destination != '':
self._make_move(TOWER_DICT)
else:
self._move_string.set('Move canceled.')
self._origin = ''
self._destination = ''
def _make_move(self, tower_dict: dict) -> None:
try:
tower_dict[self._origin].move_disk_to(tower_dict[self._destination])
except hanoi.InvalidMoveError:
self._move_string.set("Invalid move! You can't put a bigger Disk on top of a "
+ 'smaller Disk.')
return None
except hanoi.InvalidFirstMoveError:
self._move_string.set('Error: you have to make your first move from Tower 1!')
return None
except hanoi.NoDisksError:
self._move_string.set('Error: ' + self._origin + ' has no Disks!')
return None
self._move_string.set('Moved from ' + self._origin + ' to ' + self._destination
+ '.')
self._game.num_moves_made += 1
if self._game.is_over():
self._move_string.set('Congratulations! You solved Tower of Hanoi!\n'
+ 'Moves Taken: ' + str(self._game.num_moves_made) + '\n'
+ 'Min. # of Moves Required: '
+ str(self._game.min_moves_required))
self._draw_disks()
def _draw_disks(self) -> None:
tower_tower_xs = {self._tower_one_x: self._game.tower_one,
self._tower_two_x: self._game.tower_two,
self._tower_three_x: self._game.tower_three}
# 'Disk_1', 'Disk_2', 'Disk_3', and so on.
# We need underscores here because tags cannot contain whitespace.
# These tags are used to prevent the Disk size text from overwriting itself
# when the player makes moves.
disk_tags = ['Disk_' + str(disk.size) for tower in tower_tower_xs.values()
for disk in tower if disk != hanoi.EMPTY]
if self._disks_already_drawn:
for tag in disk_tags:
self._hanoi_canvas.delete(tag)
current_tag_index = 0
for tower_x, tower in tower_tower_xs.items():
topmost_y = 35
for disk in tower:
if disk == hanoi.EMPTY:
# We have to increment topmost_y here in order to represent the Disks
# falling down as far as possible.
topmost_y += 15
continue
else:
# We need to do 'tower_x + 5' here because tower_x is the x-coordinate
# of tower's upper-left corner. If we did not add 5 to tower_x, the text
# would be in the wrong place.
self._hanoi_canvas.create_text(tower_x + 5, topmost_y, anchor=tkinter.W,
font=DEFAULT_FONT,
text=str(disk.size),
tag=disk_tags[current_tag_index])
topmost_y += 15
current_tag_index += 1
self._disks_already_drawn = True
def _on_tower_two(self) -> None:
self._set_origin_and_or_destination('Tower 2')
def _on_tower_three(self) -> None:
self._set_origin_and_or_destination('Tower 3')
def _draw_towers(self) -> None:
"""Note: the width of each Tower is 25. The upper-left corner
of each Tower has a y-coordinate of 25, and the upper-right corner
a y-coordinate of 400.
"""
TOWER_COLOR = 'white'
self._tower_one_x = 50
self._hanoi_canvas.create_rectangle(self._tower_one_x, 25, 75, 400, fill=TOWER_COLOR,
tags='Tower 1')
self._tower_two_x = 240
self._hanoi_canvas.create_rectangle(self._tower_two_x, 25, 265, 400, fill=TOWER_COLOR,
tags='Tower 2')
self._tower_three_x = 425
self._hanoi_canvas.create_rectangle(self._tower_three_x, 25, 450, 400, fill=TOWER_COLOR,
tags='Tower 3')
def run(self) -> None:
"""Run a session of Tower of Hanoi."""
disk_dialog = DiskDialog()
disk_dialog.show()
if not disk_dialog.exited_intentionally:
self._num_disks_per_tower = disk_dialog.num_disks_per_tower
self._game = hanoi.Game(self._num_disks_per_tower)
self._draw_disks()
self._root_window.mainloop()
def _on_help_button(self) -> None:
help_message = (hanoi.HELP_MESSAGE + '\n\nThe Towers are white rectangles, and the Disks are '
+ "numbers that represent the Disks' sizes.\n\n"
+ "To select a Tower to move from, click on one of the 'Tower' buttons. "
+ "Then, to select the Tower to move to, click on another one of the 'Tower' buttons."
+ " In short, the first Tower button you click is the Tower you're moving from,"
+ " and the second is the one you're moving to. \n\nTo cancel a move from a Tower,"
+ " click on the button of the Tower you're moving from again.")
tkinter.messagebox.showinfo('Welcome to the Tower of Hanoi!',
help_message)
def _on_restart_button(self) -> None:
self._game = hanoi.Game(self._num_disks_per_tower)
self._move_string.set('Restarted the game.')
self._draw_disks()
if __name__ == '__main__':
HanoiWindow().run()
| 43.194245 | 110 | 0.553131 | 11,778 | 0.980846 | 0 | 0 | 0 | 0 | 0 | 0 | 2,555 | 0.212775 |
358ee60cb29f177fb65f6050d60d87a71d7179ec | 4,801 | py | Python | parser.py | PouletFreak/mailparser | 6877b879cbaaccb5e00491726ead740a42922ae3 | [
"MIT"
] | 1 | 2019-07-02T02:05:07.000Z | 2019-07-02T02:05:07.000Z | parser.py | PouletFreak/mailparser | 6877b879cbaaccb5e00491726ead740a42922ae3 | [
"MIT"
] | null | null | null | parser.py | PouletFreak/mailparser | 6877b879cbaaccb5e00491726ead740a42922ae3 | [
"MIT"
] | null | null | null | import email, json, os, re
import magic
import ssdeep
import hashlib
import datetime
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def sha1(fname):
hash_sha1 = hashlib.sha1()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha1.update(chunk)
return hash_sha1.hexdigest()
def sha256(fname):
hash_sha256 = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def sha512(fname):
hash_sha512 = hashlib.sha512()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha512.update(chunk)
return hash_sha512.hexdigest()
def main():
file = '31a891f9e074c81b4688ac5b9faac9c1e3786a20'
f = open(file, 'r')
msg = email.message_from_file(f)
message_json = {}
message_json['parsedate'] = str(datetime.datetime.now())
message_json['filename'] = file
message_json['md5'] = md5(file)
message_json['sha1'] = sha1(file)
message_json['sha512'] = sha512(file)
message_json['sha256'] = sha256(file)
detach_dir = './' + message_json['filename'][0:10]
if not os.path.exists(detach_dir):
os.makedirs(detach_dir)
scan_json = {}
scan_json['Date'] = msg['Date']
scan_json['From'] = msg['From']
scan_json['Subject'] = msg['Subject']
scan_json['To'] = msg['To']
scan_json['Cc'] = msg['Cc']
scan_json['Bcc'] = msg['Bcc']
scan_json['References'] = msg['References']
scan_json['body'] = ''
scan_json['body_html'] = ''
scan_json['xml'] = ''
scan_json['email_addresses'] = []
scan_json['ip_addresses'] = []
scan_json['attachments'] = []
message_json['scan'] = scan_json
attachment = {}
for part in msg.walk():
application_pattern = re.compile('application/*')
image_pattern = re.compile('image/*')
audio_pattern = re.compile('audio/*')
video_pattern = re.compile('video/*')
content_type = part.get_content_type()
if content_type == 'text/plain':
''' Fills the main email part into the JSON Object and searches for valid email and ip addresses '''
mainpart = part.get_payload()
scan_json['body'] += mainpart
mail_matches = re.findall(r'[\w\.-]+@[\w\.-]+', mainpart) #finds mail addresses in text
for match in mail_matches:
if match not in scan_json['email_addresses']:
scan_json['email_addresses'].append(match)
ip_matches = re.findall( r'[0-9]+(?:\.[0-9]+){3}', mainpart) #Finds IP Addresses in text
for match in ip_matches:
scan_json['ip_addresses'].append(match)
if content_type == 'text/html':
scan_json['body_html'] += part.get_payload()
if content_type == 'text/xml':
scan_json['xml'] += part.get_payload()
if re.match(image_pattern, content_type) \
or re.match(application_pattern, content_type) \
or re.match(audio_pattern, content_type) \
or re.match(video_pattern, content_type):
filename = part.get_filename()
counter = 1
if not filename:
filename = 'part-%03d%s' % (counter, 'bin')
counter += 1
att_path = os.path.join(detach_dir, filename)
print att_path
attachment['filepath'] = att_path #TODO: zum kaufen bekommen
attachment['filename'] = filename
attachment['Type'] = content_type
if not os.path.isfile(att_path):
fp = open(att_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
attachment['size'] = os.path.getsize(att_path)
attachment['magic'] = magic.from_file(att_path, mime=True)
try:
attachment['ssdeep'] = ssdeep.hash_from_file(att_path)
except:
pass
attachment['md5'] = md5(att_path)
attachment['sha1'] = sha1(att_path)
attachment['sha512'] = sha512(att_path)
attachment['sha256'] = sha256(att_path)
scan_json['attachments'].append(attachment)
attachment = {}
try:
json_data = json.dumps(message_json, indent=4, sort_keys=True)
except UnicodeDecodeError:
json_data = json.dumps(message_json, indent=4, sort_keys=True, ensure_ascii=False)
print json_data
if __name__ == '__main__':
main() | 32.006667 | 112 | 0.587378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 795 | 0.165591 |
358f891b1298dda3ec2ff6f47a9bf5842305d9ac | 5,937 | py | Python | analysis_vis/scripts/CovarEpi.py | arubenstein/deep_seq | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | [
"BSD-2-Clause"
] | null | null | null | analysis_vis/scripts/CovarEpi.py | arubenstein/deep_seq | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | [
"BSD-2-Clause"
] | null | null | null | analysis_vis/scripts/CovarEpi.py | arubenstein/deep_seq | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""Create edges and nodes from a list of sequences that are a given hamming distance apart"""
import itertools
import sys
import operator
import numpy as np
import argparse
from general_seq import conv
from general_seq import seq_IO
from plot import conv as pconv
import matplotlib.pyplot as plt
import math
import matplotlib
def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Input
-----
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
'''
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def plot_heatmap(ax, data, colormap, ticks, labels, xlabel, ylabel, title, vmin, vmax):
CS = ax.pcolor(data, cmap=colormap, vmin=vmin, vmax=vmax)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_xticks(ticks, minor=True)
ax.set_yticks(ticks, minor=True)
ax.set_xticklabels(labels, minor=True)
ax.set_yticklabels(labels, minor=True)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.xaxis.set_ticks_position('none')
return CS
def main(sequence_file):
sequences = seq_IO.read_sequences(sequence_file)
n_char = len(sequences[0])
fig, axarr = pconv.create_ax(6, 2, shx=False, shy=False)
fig2, axarr2 = pconv.create_ax(1, 1, shx=False, shy=False)
ticks = [ i + 0.5 for i in np.arange(0,20) ]
#aa_string = 'DEKRHNQYCGSTAMILVFWP'
aa_string = 'ACDEFGHIKLMNPQRSTVWY'
maxes = []
mins = []
full_data = []
positions = []
full_data_flat = []
shrunk_cmap = shiftedColorMap(matplotlib.cm.bwr, start=0.25, midpoint=0.5, stop=0.75, name='shrunk')
for ind, (pos1, pos2) in enumerate(list(itertools.combinations(range(0,5),2))):
#print pos1, pos2, conv.covar_MI(sequences, pos1, pos2)
data = np.zeros( (20,20) )
for ind1, aa1 in enumerate(aa_string):
for ind2, aa2 in enumerate(aa_string):
data[ind1,ind2] = conv.calc_epi_log(sequences, pos1, pos2, aa1, aa2)
avg_pos1 = np.sum(data, axis=1) #should check once more that this is the correct axis
avg_pos2 = np.sum(data, axis=0)
#I'm sure there is a cool numpy way to do this but I don't have time for it right now
for ind1 in xrange(0, 20):
for ind2 in xrange(0, 20):
p = (avg_pos1[ind1]+avg_pos2[ind2]-data[ind1,ind2])/(19) #n-1=19
p = p if p > 0.05 else 0.05 #min 0.05 for rcw
data[ind1,ind2] = data[ind1,ind2]/p #rcw
maxes.append(np.amax(data))
mins.append(np.amin(data))
full_data.append(data)
positions.append((pos1, pos2))
full_data_flat.extend(data.flatten())
perc = np.percentile(full_data_flat, 99.9)
for ind, (data, (pos1, pos2)) in enumerate(zip(full_data, positions)):
if pos1 == 2 and pos2 == 3:
CS2 = plot_heatmap(axarr2[0,0], data, shrunk_cmap, ticks, list(aa_string), "position {0}".format(pos2+1), "position {0}".format(pos1+1), "", vmin = -1.0 * perc, vmax = perc)
y_ind = ind % 5
x_ind = math.floor(ind/5)
CS = plot_heatmap(axarr[x_ind,y_ind], data, shrunk_cmap, ticks, list(aa_string), "position {0}".format(pos2+1), "position {0}".format(pos1+1), "MI: {0:.4f}".format(conv.covar_MI(sequences, pos1, pos2)), vmin = -1.0 * perc, vmax = perc)
average_data = np.mean(full_data, axis=0)
max_data = np.max(full_data, axis=0)
CS = plot_heatmap(axarr[0,5], average_data, shrunk_cmap, ticks, list(aa_string), "", "", "Averages", vmin = -1.0 * perc, vmax = perc)
CS = plot_heatmap(axarr[1,5], max_data, shrunk_cmap, ticks, list(aa_string), "", "", "Maximums", vmin = -1.0 * perc, vmax = perc)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig2.subplots_adjust(right=0.8)
cbar_ax2 = fig2.add_axes([0.85, 0.15, 0.05, 0.7])
plt.colorbar(CS, cax=cbar_ax)
plt.colorbar(CS2, cax=cbar_ax2)
pconv.save_fig(fig, sequence_file, "heatmap", 18, 6, tight=False, size=7)
pconv.save_fig(fig2, sequence_file, "heatmap3_4", 4, 4, tight=False, size=10)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument ('--sequence_file', '-d', help="text file which contains sequences")
args = parser.parse_args()
main(args.sequence_file)
| 36.648148 | 243 | 0.640391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,686 | 0.283982 |
359068a8c7d26ee7c6e843121d47283532279c4e | 1,664 | py | Python | Modules/DirectoryIndex.py | spanoselias/LazyReplicationTool | 8fdc968e4fdf82992b704e1c7422f3a5591798eb | [
"MIT"
] | null | null | null | Modules/DirectoryIndex.py | spanoselias/LazyReplicationTool | 8fdc968e4fdf82992b704e1c7422f3a5591798eb | [
"MIT"
] | null | null | null | Modules/DirectoryIndex.py | spanoselias/LazyReplicationTool | 8fdc968e4fdf82992b704e1c7422f3a5591798eb | [
"MIT"
] | null | null | null | import os
import pickle
from Utils import DirectoryUtils, IOUtils, LoggingUtils
# Write to the disk the structure such that will be
# persistent.
from Utils.FilesUtils import readConfigFile
def writePersistentStructure(filename, structure):
try:
writeSerializer = open(filename, "wb")
pickle.dump(structure, writeSerializer)
except:
print("Unable to write persistent data structure in the following location: {}".format(filename))
writeSerializer.close()
return
# Load a persistent data structure into memory.
def readPersistentStructure(filename):
dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'Indexes'))
path = os.path.join(dirname, filename)
try:
readSerializer = open(path, "rb")
inMemoryStructure = pickle.load(readSerializer)
except:
print("Unable to read persistent data structure in the following location: {}".format(path))
return inMemoryStructure
shouldRead = True
if shouldRead:
conf = readConfigFile()
readPath = conf['copyPath']
destPath = conf['destPath']
LoggingUtils.log('Start reading the following directory: {}'.format(readPath))
list = DirectoryUtils.readDirectoryMetadataObj(readPath)
print(len(list))
# Retrieve the current path.
dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'Indexes'))
path = os.path.join(dirname, 'directoriesIdx.idx')
entry = pickle.dumps(list)
writePersistentStructure(path, entry)
else:
memoryStructure = readPersistentStructure('directoriesIdx.idx')
newObj = pickle.loads(memoryStructure)
print(len(newObj))
| 28.689655 | 105 | 0.713942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.253005 |
35919643af15226e39bb83cf34dbdca479b35d9b | 5,046 | py | Python | perslay/utils.py | YuryUoL/perslay | b7f61ae60eaca110ef125d1c90685318ff934ae2 | [
"MIT"
] | null | null | null | perslay/utils.py | YuryUoL/perslay | b7f61ae60eaca110ef125d1c90685318ff934ae2 | [
"MIT"
] | null | null | null | perslay/utils.py | YuryUoL/perslay | b7f61ae60eaca110ef125d1c90685318ff934ae2 | [
"MIT"
] | null | null | null | """Module :mod:`perslay.utils` provide utils functions."""
# Authors: Mathieu Carriere <mathieu.carriere3@gmail.com>
# Theo Lacombe <theo.lacombe@inria.fr>
# Martin Royer <martin.royer@inria.fr>
# License: MIT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import gudhi as gd
# Input utility functions for persistence diagrams
def diag_to_dict(diag_file, filts):
out_dict = dict()
if len(filts) == 0:
filts = diag_file.keys()
for filtration in filts:
list_dgm, num_diag = [], len(diag_file[filtration].keys())
for diag in range(num_diag):
list_dgm.append(np.array(diag_file[filtration][str(diag)]))
out_dict[filtration] = list_dgm
return out_dict
# diagrams utils
def get_base_simplex(A):
num_vertices = A.shape[0]
st = gd.SimplexTree()
for i in range(num_vertices):
st.insert([i], filtration=-1e10)
for j in range(i + 1, num_vertices):
if A[i, j] > 0:
st.insert([i, j], filtration=-1e10)
return st.get_filtration()
# graph utils
def hks_signature(eigenvectors, eigenvals, time):
return np.square(eigenvectors).dot(np.diag(np.exp(-time * eigenvals))).sum(axis=1)
def apply_graph_extended_persistence(A, filtration_val, basesimplex):
num_vertices = A.shape[0]
(xs, ys) = np.where(np.triu(A))
num_edges = len(xs)
if len(filtration_val.shape) == 1:
min_val, max_val = filtration_val.min(), filtration_val.max()
else:
min_val = min([filtration_val[xs[i], ys[i]] for i in range(num_edges)])
max_val = max([filtration_val[xs[i], ys[i]] for i in range(num_edges)])
st = gd.SimplexTree()
st.set_dimension(2)
for simplex, filt in basesimplex:
st.insert(simplex=simplex + [-2], filtration=-3)
if len(filtration_val.shape) == 1:
if max_val == min_val:
fa = -.5 * np.ones(filtration_val.shape)
fd = .5 * np.ones(filtration_val.shape)
else:
fa = -2 + (filtration_val - min_val) / (max_val - min_val)
fd = 2 - (filtration_val - min_val) / (max_val - min_val)
for vid in range(num_vertices):
st.assign_filtration(simplex=[vid], filtration=fa[vid])
st.assign_filtration(simplex=[vid, -2], filtration=fd[vid])
else:
if max_val == min_val:
fa = -.5 * np.ones(filtration_val.shape)
fd = .5 * np.ones(filtration_val.shape)
else:
fa = -2 + (filtration_val - min_val) / (max_val - min_val)
fd = 2 - (filtration_val - min_val) / (max_val - min_val)
for eid in range(num_edges):
vidx, vidy = xs[eid], ys[eid]
st.assign_filtration(simplex=[vidx, vidy], filtration=fa[vidx, vidy])
st.assign_filtration(simplex=[vidx, vidy, -2], filtration=fd[vidx, vidy])
for vid in range(num_vertices):
if len(np.where(A[vid, :] > 0)[0]) > 0:
st.assign_filtration(simplex=[vid], filtration=min(fa[vid, np.where(A[vid, :] > 0)[0]]))
st.assign_filtration(simplex=[vid, -2], filtration=min(fd[vid, np.where(A[vid, :] > 0)[0]]))
st.make_filtration_non_decreasing()
distorted_dgm = st.persistence()
normal_dgm = dict()
normal_dgm["Ord0"], normal_dgm["Rel1"], normal_dgm["Ext0"], normal_dgm["Ext1"] = [], [], [], []
for point in range(len(distorted_dgm)):
dim, b, d = distorted_dgm[point][0], distorted_dgm[point][1][0], distorted_dgm[point][1][1]
pt_type = "unknown"
if (-2 <= b <= -1 and -2 <= d <= -1) or (b == -.5 and d == -.5):
pt_type = "Ord" + str(dim)
if (1 <= b <= 2 and 1 <= d <= 2) or (b == .5 and d == .5):
pt_type = "Rel" + str(dim)
if (-2 <= b <= -1 and 1 <= d <= 2) or (b == -.5 and d == .5):
pt_type = "Ext" + str(dim)
if np.isinf(d):
continue
else:
b, d = min_val + (2 - abs(b)) * (max_val - min_val), min_val + (2 - abs(d)) * (max_val - min_val)
if b <= d:
normal_dgm[pt_type].append(tuple([distorted_dgm[point][0], tuple([b, d])]))
else:
normal_dgm[pt_type].append(tuple([distorted_dgm[point][0], tuple([d, b])]))
dgmOrd0 = np.array([normal_dgm["Ord0"][point][1] for point in range(len(normal_dgm["Ord0"]))])
dgmExt0 = np.array([normal_dgm["Ext0"][point][1] for point in range(len(normal_dgm["Ext0"]))])
dgmRel1 = np.array([normal_dgm["Rel1"][point][1] for point in range(len(normal_dgm["Rel1"]))])
dgmExt1 = np.array([normal_dgm["Ext1"][point][1] for point in range(len(normal_dgm["Ext1"]))])
if dgmOrd0.shape[0] == 0:
dgmOrd0 = np.zeros([0, 2])
if dgmExt1.shape[0] == 0:
dgmExt1 = np.zeros([0, 2])
if dgmExt0.shape[0] == 0:
dgmExt0 = np.zeros([0, 2])
if dgmRel1.shape[0] == 0:
dgmRel1 = np.zeros([0, 2])
return dgmOrd0, dgmExt0, dgmRel1, dgmExt1
| 41.02439 | 109 | 0.589378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.078874 |
3591df80ea06a09fcf32bb3e38ed7f6312abf19d | 389 | py | Python | fabric/exceptions.py | lin-zh-cn/fabric | fd60d7e3d31eeddc2b90cfe367a573aff9f0540d | [
"BSD-2-Clause"
] | 1 | 2016-05-17T19:04:42.000Z | 2016-05-17T19:04:42.000Z | fabric/exceptions.py | offbyone/fabric | 4b1cd1f1326deee75d8699161e12d2da0cb09a1d | [
"BSD-2-Clause"
] | null | null | null | fabric/exceptions.py | offbyone/fabric | 4b1cd1f1326deee75d8699161e12d2da0cb09a1d | [
"BSD-2-Clause"
] | null | null | null | # TODO: this may want to move to Invoke if we can find a use for it there too?
# Or make it _more_ narrowly focused and stay here?
class NothingToDo(Exception):
pass
class GroupException(Exception):
"""
Lightweight exception wrapper for `.GroupResult` when one contains errors.
.. versionadded:: 2.0
"""
def __init__(self, result):
self.result = result
| 24.3125 | 78 | 0.686375 | 254 | 0.652956 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.632391 |
3591f068c0bebd3f9604752d46dc6a1ba19f8459 | 20,183 | py | Python | baselines/utils/agent_can_choose_helper.py | ClemenceLanfranchi/Flatland_project | 9ce4c80bb25f6cdca28cfd607ac733cb16688cc1 | [
"MIT"
] | null | null | null | baselines/utils/agent_can_choose_helper.py | ClemenceLanfranchi/Flatland_project | 9ce4c80bb25f6cdca28cfd607ac733cb16688cc1 | [
"MIT"
] | null | null | null | baselines/utils/agent_can_choose_helper.py | ClemenceLanfranchi/Flatland_project | 9ce4c80bb25f6cdca28cfd607ac733cb16688cc1 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.agent_utils import TrainState
from flatland.utils.rendertools import RenderTool, AgentRenderVariant
from utils.fast_methods import fast_count_nonzero, fast_argmax
class AgentCanChooseHelper:
def __init__(self):
self.render_debug_information = False
def reset(self, env):
self.env = env
if self.env is not None:
self.env.dev_obs_dict = {}
self.switches = {}
self.switches_neighbours = {}
self.switch_cluster = {}
self.switch_cluster_occupied = {}
self.switch_cluster_lock = {}
self.switch_cluster_grid = None
self.agent_positions = None
self.reset_swicht_cluster_lock()
self.reset_switch_cluster_occupied()
if self.env is not None:
self.find_all_cell_where_agent_can_choose()
self.calculate_agent_positions()
def get_agent_positions(self):
return self.agent_positions
def calculate_agent_positions(self):
self.agent_positions: np.ndarray = np.full((self.env.height, self.env.width), -1)
for agent_handle in self.env.get_agent_handles():
agent = self.env.agents[agent_handle]
if agent.state in [TrainState.MOVING, TrainState.STOPPED, TrainState.MALFUNCTION]:
position = agent.position
if position is None:
position = agent.initial_position
self.agent_positions[position] = agent_handle
def clear_switch_cluster_lock(self):
'''
clean up switch cluster lock
'''
self.switch_cluster_lock = {}
def clear_switch_cluster_occupied(self):
'''
clean up switch cluster occupied
'''
self.switch_cluster_occupied = {}
def lock_switch_cluster(self, handle, agent_pos, agent_dir):
'''
Lock the switch cluster if possible
:param handle: Agent handle
:param agent_pos: position to lock
:param agent_dir: direction
:return: True if lock is successfully done otherwise false (it might still have a lock)
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return True
lock_handle = self.switch_cluster_lock.get(cluster_id, None)
if lock_handle is None:
self.switch_cluster_lock.update({cluster_id: handle})
return True
if lock_handle == handle:
return True
return False
def unlock_switch_cluster(self, handle, agent_pos, agent_dir):
'''
Lock the switch cluster if possible
:param handle: Agent handle
:param agent_pos: position to lock
:param agent_dir: direction
:return: True if unlock is successfully done otherwise false (it might still have a lock own by another agent)
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return True
lock_handle = self.switch_cluster_lock.get(cluster_id, None)
if lock_handle == handle:
self.switch_cluster_lock.update({cluster_id, None})
return True
return False
def get_agent_position_and_direction(self, handle):
'''
Returns the agent position - if not yet started (active) it returns the initial position
:param handle: agent reference (handle)
:return: agent_pos, agent_dir, agent_state
'''
agent = self.env.agents[handle]
agent_pos = agent.position
agent_dir = agent.direction
if agent_pos is None:
agent_pos = agent.initial_position
agent_dir = agent.initial_direction
return agent_pos, agent_dir, agent.state, agent.target
def has_agent_switch_cluster_lock(self, handle, agent_pos=None, agent_dir=None):
'''
Checks if the agent passed by the handle has the switch cluster lock
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return: True if handle owns the lock otherwise false
'''
if agent_pos is None or agent_dir is None:
agent_pos, agent_dir, agent_state, agent_target = self.get_agent_position_and_direction(handle)
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return False
lock_handle = self.switch_cluster_lock.get(cluster_id, None)
return lock_handle == handle
def get_switch_cluster_occupiers_next_cell(self, handle, agent_pos, agent_dir):
'''
Returns all occupiers for the next cell
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return: a list of all agents (handles) which occupied the next cell switch cluster
'''
possible_transitions = self.env.rail.get_transitions(*agent_pos, agent_dir)
occupiers = []
for new_direction in range(4):
if possible_transitions[new_direction] == 1:
new_position = get_new_position(agent_pos, new_direction)
occupiers += self.get_switch_cluster_occupiers(handle,
new_position,
new_direction)
return occupiers
def mark_switch_next_cluster_occupied(self, handle):
agent_position, agent_direciton, agent_state, agent_target = \
self.get_agent_position_and_direction(handle)
possible_transitions = self.env.rail.get_transitions(*agent_position, agent_direciton)
for new_direction in range(4):
if possible_transitions[new_direction] == 1:
new_position = get_new_position(agent_position, new_direction)
self.mark_switch_cluster_occupied(handle, new_position, new_direction)
def can_agent_enter_next_cluster(self, handle):
agent_position, agent_direciton, agent_state, agent_target = \
self.get_agent_position_and_direction(handle)
occupiers = self.get_switch_cluster_occupiers_next_cell(handle,
agent_position,
agent_direciton)
if len(occupiers) > 0 and handle not in occupiers:
return False
return True
def get_switch_cluster_occupiers(self, handle, agent_pos, agent_dir):
'''
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return: a list of all agents (handles) which occupied the switch cluster
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return []
return self.switch_cluster_occupied.get(cluster_id, [])
def mark_switch_cluster_occupied(self, handle, agent_pos, agent_dir):
'''
Add the agent handle to the switch cluster occupied data. Set the agent (handle) as occupier
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return:
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return
agent_handles = self.switch_cluster_occupied.get(cluster_id, [])
agent_handles.append(handle)
self.switch_cluster_occupied.update({cluster_id: agent_handles})
def reset_swicht_cluster_lock(self):
'''
Reset the explicit lock data switch_cluster_lock
'''
self.clear_switch_cluster_lock()
def reset_switch_cluster_occupied(self, handle_only_active_agents=False):
'''
Reset the occupied flag by recomputing the switch_cluster_occupied map
:param handle_only_active_agents: if true only agent with status ACTIVE will be mapped
'''
self.clear_switch_cluster_occupied()
for handle in range(self.env.get_num_agents()):
agent_pos, agent_dir, agent_state, agent_target = self.get_agent_position_and_direction(handle)
if handle_only_active_agents:
if agent_state in [TrainState.MOVING, TrainState.STOPPED, TrainState.MALFUNCTION]:
self.mark_switch_cluster_occupied(handle, agent_pos, agent_dir)
else:
if agent_state < TrainState.DONE:
self.mark_switch_cluster_occupied(handle, agent_pos, agent_dir)
def get_switch_cluster(self, pos):
'''
Returns the switch cluster at position pos
:param pos: the position for which the switch cluster must be returned
:return: if the position is not None and the switch cluster are computed it returns the cluster_id and the
grid cell members otherwise -1 and an empty list
'''
if pos is None:
return -1, []
if self.switch_cluster_grid is None:
return -1, []
cluster_id = self.switch_cluster_grid[pos]
grid_cell_members = self.switch_cluster.get(cluster_id, [])
return cluster_id, grid_cell_members
def find_all_switches(self):
'''
Search the environment (rail grid) for all switch cells. A switch is a cell where more than one tranisation
exists and collect all direction where the switch is a switch.
'''
self.switches = {}
for h in range(self.env.height):
for w in range(self.env.width):
pos = (h, w)
for dir in range(4):
possible_transitions = self.env.rail.get_transitions(*pos, dir)
num_transitions = fast_count_nonzero(possible_transitions)
if num_transitions > 1:
directions = self.switches.get(pos, [])
directions.append(dir)
self.switches.update({pos: directions})
def find_all_switch_neighbours(self):
'''
Collect all cells where is a neighbour to a switch cell. All cells are neighbour where the agent can make
just one step and he stands on a switch. A switch is a cell where the agents has more than one transition.
'''
self.switches_neighbours = {}
for h in range(self.env.height):
for w in range(self.env.width):
# look one step forward
for dir in range(4):
pos = (h, w)
possible_transitions = self.env.rail.get_transitions(*pos, dir)
for d in range(4):
if possible_transitions[d] == 1:
new_cell = get_new_position(pos, d)
if new_cell in self.switches.keys():
directions = self.switches_neighbours.get(pos, [])
directions.append(dir)
self.switches_neighbours.update({pos: directions})
def find_cluster_label(self, in_label) -> int:
label = int(in_label)
while 0 != self.label_dict[label]:
label = self.label_dict[label]
return label
def union_cluster_label(self, root, slave) -> None:
root_label = self.find_cluster_label(root)
slave_label = self.find_cluster_label(slave)
if slave_label != root_label:
self.label_dict[slave_label] = root_label
def find_connected_clusters_and_label(self, binary_image):
padded_binary_image = np.pad(binary_image, ((1, 0), (1, 0)), 'constant', constant_values=(0, 0))
w = np.size(binary_image, 1)
h = np.size(binary_image, 0)
self.label_dict = [int(i) for i in np.zeros(w * h)]
label = 1
# first pass
for cow in range(1, h + 1):
for col in range(1, w + 1):
working_position = (cow, col)
working_pixel = padded_binary_image[working_position]
if working_pixel != 0:
left_pixel_pos = (cow, col - 1)
up_pixel_pos = (cow - 1, col)
left_pixel = padded_binary_image[left_pixel_pos]
up_pixel = padded_binary_image[up_pixel_pos]
# Use connections (rails) for clustering (only real connected pixels builds a real cluster)
if (cow < self.env.height) and (col < self.env.width):
left_ok = 0
up_ok = 0
# correct padded image position (railenv)
t_working_position = (working_position[0] - 1, working_position[1] - 1)
t_left_pixel_pos = (left_pixel_pos[0] - 1, left_pixel_pos[1] - 1)
t_up_pixel_pos = (up_pixel_pos[0] - 1, up_pixel_pos[1] - 1)
for direction_loop in range(4):
possible_transitions = self.env.rail.get_transitions(*t_working_position, direction_loop)
orientation = direction_loop
if fast_count_nonzero(possible_transitions) == 1:
orientation = fast_argmax(possible_transitions)
for dir_loop, new_direction in enumerate(
[(orientation + dir_loop) % 4 for dir_loop in range(-1, 3)]):
if possible_transitions[new_direction] == 1:
new_pos = get_new_position(t_working_position, new_direction)
if new_pos == t_left_pixel_pos:
left_ok = 1
if new_pos == t_up_pixel_pos:
up_ok = 1
left_pixel *= left_ok
up_pixel *= up_ok
# build clusters
if left_pixel == 0 and up_pixel == 0:
padded_binary_image[working_position] = label
label += 1
if left_pixel != 0 and up_pixel != 0:
smaller = left_pixel if left_pixel < up_pixel else up_pixel
bigger = left_pixel if left_pixel > up_pixel else up_pixel
padded_binary_image[working_position] = smaller
self.union_cluster_label(smaller, bigger)
if up_pixel != 0 and left_pixel == 0:
padded_binary_image[working_position] = up_pixel
if up_pixel == 0 and left_pixel != 0:
padded_binary_image[working_position] = left_pixel
for cow in range(1, h + 1):
for col in range(1, w + 1):
root = self.find_cluster_label(padded_binary_image[cow][col])
padded_binary_image[cow][col] = root
self.switch_cluster_grid = padded_binary_image[1:, 1:]
for h in range(self.env.height):
for w in range(self.env.width):
working_position = (h, w)
root = self.switch_cluster_grid[working_position]
if root > 0:
pos_data = self.switch_cluster.get(root, [])
pos_data.append(working_position)
self.switch_cluster.update({root: pos_data})
def cluster_all_switches(self):
info_image = np.zeros((self.env.height, self.env.width))
# for h in range(self.env.height):
# for w in range(self.env.width):
# # look one step forward
# if self.env.rail.grid[h][w] > 0:
# info_image[(h,w)] = -1
for key in self.switches.keys():
info_image[key] = 1
# build clusters
self.find_connected_clusters_and_label(info_image)
if self.render_debug_information:
# Setup renderer
env_renderer = RenderTool(self.env, gl="PGL",
agent_render_variant=AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX)
env_renderer.set_new_rail()
env_renderer.render_env(
show=True,
frames=False,
show_observations=True,
show_predictions=False
)
plt.subplot(1, 2, 1)
plt.imshow(info_image)
plt.subplot(1, 2, 2)
plt.imshow(self.switch_cluster_grid)
plt.show()
plt.pause(0.01)
def find_all_cell_where_agent_can_choose(self):
'''
prepare the memory - collect all cells where the agent can choose more than FORWARD/STOP.
'''
self.find_all_switches()
self.find_all_switch_neighbours()
self.cluster_all_switches()
def check_agent_decision(self, position, direction):
'''
Decide whether the agent is
- on a switch
- at a switch neighbour (near to switch). The switch must be a switch where the agent has more option than
FORWARD/STOP
- all switch : doesn't matter whether the agent has more options than FORWARD/STOP
- all switch neightbors : doesn't matter the agent has more then one options (transistion) when he reach the
switch
:param position: (x,y) cell coordinate
:param direction: Flatland direction
:return: agents_on_switch, agents_near_to_switch, agents_near_to_switch_all, agents_on_switch_all
'''
agents_on_switch = False
agents_on_switch_all = False
agents_near_to_switch = False
agents_near_to_switch_all = False
if position in self.switches.keys():
agents_on_switch = direction in self.switches[position]
agents_on_switch_all = True
if position in self.switches_neighbours.keys():
new_cell = get_new_position(position, direction)
if new_cell in self.switches.keys():
if not direction in self.switches[new_cell]:
agents_near_to_switch = direction in self.switches_neighbours[position]
else:
agents_near_to_switch = direction in self.switches_neighbours[position]
agents_near_to_switch_all = direction in self.switches_neighbours[position]
return agents_on_switch, agents_near_to_switch, agents_near_to_switch_all, agents_on_switch_all
def requires_agent_decision(self):
'''
Returns for all agents its check_agent_decision values
:return: dicts with check_agent_decision values stored (each agents)
'''
agents_can_choose = {}
agents_on_switch = {}
agents_on_switch_all = {}
agents_near_to_switch = {}
agents_near_to_switch_all = {}
for a in range(self.env.get_num_agents()):
ret_agents_on_switch, ret_agents_near_to_switch, ret_agents_near_to_switch_all, ret_agents_on_switch_all = \
self.check_agent_decision(
self.env.agents[a].position,
self.env.agents[a].direction)
agents_on_switch.update({a: ret_agents_on_switch})
agents_on_switch_all.update({a: ret_agents_on_switch_all})
ready_to_depart = self.env.agents[a].state == TrainState.READY_TO_DEPART
agents_near_to_switch.update({a: (ret_agents_near_to_switch and not ready_to_depart)})
agents_can_choose.update({a: agents_on_switch[a] or agents_near_to_switch[a]})
agents_near_to_switch_all.update({a: (ret_agents_near_to_switch_all and not ready_to_depart)})
return agents_can_choose, agents_on_switch, agents_near_to_switch, agents_near_to_switch_all, agents_on_switch_all
| 45.152125 | 122 | 0.60769 | 19,886 | 0.985285 | 0 | 0 | 0 | 0 | 0 | 0 | 4,315 | 0.213794 |
359377901334427aec4295ca0684ad7dffa3d7ff | 182 | py | Python | server/app/api/weather/resources.py | WagnerJM/home_pod | f6a51e4956d5956a85084f637e267406f21df6df | [
"MIT"
] | null | null | null | server/app/api/weather/resources.py | WagnerJM/home_pod | f6a51e4956d5956a85084f637e267406f21df6df | [
"MIT"
] | null | null | null | server/app/api/weather/resources.py | WagnerJM/home_pod | f6a51e4956d5956a85084f637e267406f21df6df | [
"MIT"
] | null | null | null | from flask import request
from flask_restful import Resource
from flask_jwt_extended import get_jwt_claims, get_jwt_identity, jwt_required
from app.cache import redis_client
| 26 | 78 | 0.840659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
35939e520372bd241032dd0ca8f839c03e847bef | 1,792 | py | Python | demo/buf.py | uldisa/tuxedo-python | 59bd44ee9be1807b63599b48b3af9b4dc4ac4277 | [
"MIT"
] | 4 | 2019-11-05T17:44:29.000Z | 2022-03-21T08:51:14.000Z | demo/buf.py | uldisa/tuxedo-python | 59bd44ee9be1807b63599b48b3af9b4dc4ac4277 | [
"MIT"
] | 3 | 2020-12-15T19:39:38.000Z | 2021-11-22T20:54:13.000Z | demo/buf.py | uldisa/tuxedo-python | 59bd44ee9be1807b63599b48b3af9b4dc4ac4277 | [
"MIT"
] | 4 | 2020-12-13T17:02:21.000Z | 2021-12-15T22:46:00.000Z | #!/usr/bin/env python3
import tuxedo as t
if __name__ == '__main__':
buf = {'TA_CLASS': ['T_SVCGRP'], 'TA_OPERATION': ['GET']}
assert t.tpimport(t.tpexport(buf)) == buf
assert t.tpimport(t.tpexport(buf, t.TPEX_STRING), t.TPEX_STRING) == buf
assert t.Fname32(t.Fldid32('TA_OPERATION')) == 'TA_OPERATION'
assert t.Fldtype32(t.Fmkfldid32(t.FLD_STRING, 10)) == t.FLD_STRING
assert t.Fldno32(t.Fmkfldid32(t.FLD_STRING, 10)) == 10
binstr = b'\xc1 hello'
binstr2 = t.tpimport(t.tpexport({'TA_OPERATION': binstr}))['TA_OPERATION'][0]
assert binstr2.encode(errors='surrogateescape') == binstr
t.tpexport({'TA_OPERATION': binstr2})
binstr3 = t.tpimport(t.tpexport({'TA_OPERATION': binstr2}))['TA_OPERATION'][0]
assert binstr3.encode(errors='surrogateescape') == binstr
utf8 = b'gl\xc4\x81\xc5\xbe\xc5\xa1\xc4\xb7\xc5\xab\xc5\x86r\xc5\xab\xc4\xb7\xc4\xabtis'
s = t.tpimport(t.tpexport({'TA_OPERATION': utf8}))['TA_OPERATION'][0]
assert s.encode('utf8') == utf8
uni = 'gl\u0101\u017e\u0161\u0137\u016b\u0146r\u016b\u0137\u012btis'
s = t.tpimport(t.tpexport({'TA_OPERATION': uni}))['TA_OPERATION'][0]
assert s == uni
assert t.Fboolev32({'TA_OPERATION': '123456789'}, "TA_OPERATION=='123456789'")
assert not t.Fboolev32({'TA_OPERATION': '123456789'}, "TA_OPERATION=='1234567890'")
assert t.Fboolev32({'TA_OPERATION': '123456789'}, "TA_OPERATION%%'.234.*'")
assert not t.Fboolev32({'TA_OPERATION': '123456789'}, "TA_OPERATION%%'.123.*'")
assert t.Fboolev32({'TA_OPERATION': '123456789'}, "TA_OPERATION!%'.123.*'")
import sys
t.Ffprint32({'TA_OPERATION': '123456789'}, sys.stdout)
t.Ffprint32({t.Fmkfldid32(t.FLD_STRING, 10): 'Dynamic field'}, sys.stdout)
print(t.Fextread32(sys.stdin))
| 41.674419 | 92 | 0.671875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.397879 |
359544a5826b09544016d35aa73f09018ee2fb1c | 1,746 | py | Python | src/term/api/serializers.py | eeriksp/e-dhamma-backend | c473b4504fb79e109c20f8f7ebaf05608d0a48ce | [
"MIT"
] | 1 | 2018-05-03T19:31:57.000Z | 2018-05-03T19:31:57.000Z | src/term/api/serializers.py | eeriksp/e-dhamma-backend | c473b4504fb79e109c20f8f7ebaf05608d0a48ce | [
"MIT"
] | 18 | 2018-05-03T19:30:52.000Z | 2022-02-12T04:20:58.000Z | src/term/api/serializers.py | eeriksp/e-dhamma-backend | c473b4504fb79e109c20f8f7ebaf05608d0a48ce | [
"MIT"
] | 1 | 2018-11-15T19:15:34.000Z | 2018-11-15T19:15:34.000Z | from rest_framework import serializers
from ..models import Term, Meaning, Comment, Example
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
# class TranslatorsChatSerializer(serializers.ModelSerializer):
# class Meta:
# model = TranslatorsChat
# fields = '__all__'
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = Example
fields = ('id', 'original', 'translation')
depth = 1
class MeaningSerializer(serializers.ModelSerializer):
example_set = ExampleSerializer(many=True)
class Meta:
model = Meaning
fields = ('id', 'est', 'eng', 'root', 'rootLang',
'rootDescription', 'expl', 'further', 'example_set')
depth = 1
class SingleTermSerializer(serializers.ModelSerializer):
meaning_set = MeaningSerializer(many=True)
class Meta:
model = Term
fields = ('id', 'slug', 'pali', 'wordClass',
'gender', 'def_in_PLS_dict', 'meaning_set', 'comment_set')
depth = 1
class MeaningForListSerializer(serializers.ModelSerializer):
class Meta:
model = Meaning
fields = ('est', 'eng')
class TermListSerializer(serializers.ModelSerializer):
meaning_set = MeaningForListSerializer(many=True)
class Meta:
model = Term
fields = ('id', 'slug', 'pali', 'meaning_set')
# depth = 1
"""
Right now the seralizer returns data in wrong format, the correct format should be the following:
[
{
'id' = 1
'slug' = 'string'
'pali' = []
'est' = []
}
]
"""
| 25.304348 | 105 | 0.596793 | 1,489 | 0.852806 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.370561 |
359667e8aabcd97e7d2ce1d0e41dfa0d7e888640 | 302 | py | Python | archive/2017/week12/tasks/more_list_tasks/tail.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 6 | 2017-11-08T14:04:39.000Z | 2019-03-24T22:11:04.000Z | archive/2016/week13/tasks/tail.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | null | null | null | archive/2016/week13/tasks/tail.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 7 | 2015-10-27T09:04:58.000Z | 2019-03-03T14:18:26.000Z | def tail(xs):
"""
Напишете функция в Python, която взима списък и връща нов списък,
който се състои от всички елементи **без първия** от първоначалния списъка.
**Не се грижете, ако списъка е празен**
>>> tail([1, 2, 3])
[2, 3]
>>> tail(["Python"])
[]
"""
pass
| 21.571429 | 79 | 0.572848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.935927 |
35973f812223d4b16bcdd0f01ec5321169f6268b | 12,094 | py | Python | pool_automation/roles/aws_manage/library/stateful_set.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 627 | 2017-07-06T12:38:08.000Z | 2022-03-30T13:18:43.000Z | pool_automation/roles/aws_manage/library/stateful_set.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 580 | 2017-06-29T17:59:57.000Z | 2022-03-29T21:37:52.000Z | pool_automation/roles/aws_manage/library/stateful_set.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 704 | 2017-06-29T17:45:34.000Z | 2022-03-30T07:08:58.000Z | #!/usr/bin/python
import re
from itertools import cycle
from collections import namedtuple, defaultdict, OrderedDict
import boto3
from ansible.module_utils.basic import AnsibleModule
# import logging
# boto3.set_stream_logger('', logging.DEBUG)
HostInfo = namedtuple('HostInfo', 'tag_id public_ip user')
InstanceParams = namedtuple(
'InstanceParams',
'project namespace group add_tags key_name security_group '
'type_name market_spot spot_max_price ebs_volume_size ebs_volume_type')
ManageResults = namedtuple('ManageResults', 'changed active terminated')
class AWSRegion(object):
def __init__(self, code, location, expensive=False):
self.code = code
self.location = location
self.expensive = expensive
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html
#
# prices:
# - https://aws.amazon.com/ec2/pricing/
# - https://www.concurrencylabs.com/blog/choose-your-aws-region-wisely/
#
# TODO automate or update periodically
AWS_REGIONS = OrderedDict([(r.code, r) for r in [
AWSRegion('us-east-1', 'US East (N. Virginia)'),
AWSRegion('us-east-2', 'US East (Ohio)'),
AWSRegion('us-west-1', 'US West (N. California)'),
AWSRegion('us-west-2', 'US West (Oregon)'),
AWSRegion('ca-central-1', 'Canada (Central)'),
AWSRegion('eu-central-1', 'EU (Frankfurt)'),
AWSRegion('eu-west-1', 'EU (Ireland)'),
AWSRegion('eu-west-2', 'EU (London)'),
AWSRegion('eu-west-3', 'EU (Paris)'),
AWSRegion('ap-northeast-1', 'Asia Pacific (Tokyo)'),
AWSRegion('ap-northeast-2', 'Asia Pacific (Seoul)'),
# some specific one, requires service subscriptions
# (ClientError: An error occurred (OptInRequired) when calling the DescribeInstances operation)
# AWSRegion('ap-northeast-3', 'Asia Pacific (Osaka-Local)'),
AWSRegion('ap-southeast-1', 'Asia Pacific (Singapore)'),
AWSRegion('ap-southeast-2', 'Asia Pacific (Sydney)'),
AWSRegion('ap-south-1', 'Asia Pacific (Mumbai)'),
AWSRegion('sa-east-1', 'South America (Sao Paulo)', True),
]])
# TODO
# - think about moving these module level funcitons into classes
# - cache results
def find_ubuntu_ami(ec2):
images = ec2.images.filter(
Owners=['099720109477'],
Filters=[
{
'Name': 'name',
'Values': ['ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server*']
}
])
# Return latest image available
images = sorted(images, key=lambda v: v.creation_date)
return images[-1].image_id if len(images) > 0 else None
def find_instances(ec2, project, namespace, group=None):
filters = [
{'Name': 'tag:Project', 'Values': [project]},
{'Name': 'tag:Namespace', 'Values': [namespace]}
]
if group is not None:
filters.append({'Name': 'tag:Group', 'Values': [group]})
return [instance for instance in ec2.instances.filter(Filters=filters)
if instance.state['Name'] not in ['terminated', 'shutting-down']]
def valid_instances(regions, count):
result = defaultdict(list)
for i, region in zip(range(count), cycle(regions)):
result[region].append(str(i + 1))
return result
def get_tag(obj, name):
for tag in obj.tags:
if tag['Key'] == name:
return tag['Value']
return None
class AwsEC2Waiter(object):
""" Base class for EC2 actors which calls long running async actions. """
def __init__(self, ev_name):
self._awaited = defaultdict(list)
self._ev_name = ev_name
@property
def awaited(self):
return dict(self._awaited)
def _instance_region(self, instance):
# TODO more mature would be to use
# ec2.client.describe_availability_zones
# and create a map av.zone -> region
return instance.placement['AvailabilityZone'][:-1]
def add_instance(self, instance, region=None):
# fallback - autodetect placement region,
# might lead to additional AWS API calls
if not region:
region = self._instance_region(instance)
self._awaited[region].append(instance)
def wait(self, update=True):
for region, instances in dict(self._awaited).iteritems():
ec2cl = boto3.client('ec2', region_name=region)
ec2cl.get_waiter(self._ev_name).wait(
InstanceIds=[inst.id for inst in instances])
if update:
for inst in instances:
inst.reload()
del self._awaited[region]
class AwsEC2Terminator(AwsEC2Waiter):
""" Helper class to terminate EC2 instances. """
def __init__(self):
super(AwsEC2Terminator, self).__init__('instance_terminated')
def terminate(self, instance, region=None):
instance.terminate()
self.add_instance(instance, region)
# cancel spot request if any
if instance.spot_instance_request_id:
ec2cl = boto3.client('ec2', region_name=(region if region
else self._instance_region(instance)))
ec2cl.cancel_spot_instance_requests(
SpotInstanceRequestIds=[instance.spot_instance_request_id])
class AwsEC2Launcher(AwsEC2Waiter):
""" Helper class to launch EC2 instances. """
_camel_to_snake_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_to_snake_re2 = re.compile('([a-z0-9])([A-Z])')
def __init__(self):
# TODO consider to use waiter for 'instance_status_ok'
# if 'instance_running' is not enough in any circumstances
super(AwsEC2Launcher, self).__init__('instance_running')
@classmethod
def _camel_to_snake(cls, camel_one):
# borrowed from here:
# https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
return cls._camel_to_snake_re2.sub(
r'\1_\2', cls._camel_to_snake_re1.sub(r'\1_\2', camel_one)).lower()
def launch(self, params, count, region=None, ec2=None):
def _get_options(opts_list, prefix=''):
res = {}
for opt in opts_list:
_opt = prefix + self._camel_to_snake(opt)
if getattr(params, _opt) is not None:
res[opt] = getattr(params, _opt)
return res
if not ec2:
ec2 = boto3.resource('ec2', region_name=region)
spot_opts_list = (
'MaxPrice',
)
# Note: default value type depends on region for API calls
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
ebs_opts_list = (
'VolumeSize',
'VolumeType',
)
launch_spec = {
'ImageId': find_ubuntu_ami(ec2),
'KeyName': params.key_name,
'SecurityGroups': [params.security_group],
'InstanceType': params.type_name,
'MinCount': count,
'MaxCount': count,
'TagSpecifications': [
{
'ResourceType': rc_type,
'Tags': [
{
'Key': 'Project',
'Value': params.project
},
{
'Key': 'Namespace',
'Value': params.namespace
},
{
'Key': 'Group',
'Value': params.group
}
] + [
{'Key': k, 'Value': v}
for k, v in params.add_tags.iteritems()
]
} for rc_type in ('instance', 'volume')
]
}
# ebs
ebs_options = _get_options(ebs_opts_list, 'ebs_')
if ebs_options:
launch_spec['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': ebs_options
}]
# spot
if params.market_spot:
launch_spec['InstanceMarketOptions'] = {
'MarketType': 'spot',
'SpotOptions': _get_options(spot_opts_list, 'spot_')
}
# tags
instances = ec2.create_instances(**launch_spec)
for i in instances:
self.add_instance(i, region)
return instances
def manage_instances(regions, params, count):
hosts = []
terminated = []
tag_ids = []
changed = False
def _host_info(inst):
return HostInfo(
tag_id=get_tag(inst, 'ID'),
public_ip=inst.public_ip_address,
user='ubuntu')
aws_launcher = AwsEC2Launcher()
aws_terminator = AwsEC2Terminator()
valid_region_ids = valid_instances(regions, count)
for region in AWS_REGIONS.keys():
ec2 = boto3.resource('ec2', region_name=region)
valid_ids = valid_region_ids[region]
instances = find_instances(
ec2, params.project, params.namespace, params.group)
for inst in instances:
tag_id = get_tag(inst, 'ID')
if tag_id in valid_ids:
valid_ids.remove(tag_id)
hosts.append(inst)
aws_launcher.add_instance(inst, region)
else:
terminated.append(_host_info(inst))
aws_terminator.terminate(inst, region)
changed = True
if valid_ids:
instances = aws_launcher.launch(
params, len(valid_ids), region=region, ec2=ec2)
for inst, tag_id in zip(instances, valid_ids):
tag_ids.append((inst, tag_id))
hosts.append(inst)
changed = True
aws_launcher.wait()
# add tags based on id once instances are running
for inst, tag_id in tag_ids:
inst.create_tags(Tags=[
{'Key': 'Name', 'Value': "{}-{}-{}-{}"
.format(params.project,
params.namespace,
params.group,
tag_id.zfill(3)).lower()},
{'Key': 'ID', 'Value': tag_id}])
aws_terminator.wait()
return ManageResults(
changed,
[_host_info(inst) for inst in hosts],
terminated
)
def run(module):
params = module.params
inst_params = InstanceParams(
project=params['project'],
namespace=params['namespace'],
group=params['group'],
add_tags=params['add_tags'],
key_name=params['key_name'],
security_group=params['security_group'],
type_name=params['instance_type'],
market_spot=params['market_spot'],
spot_max_price=params['spot_max_price'],
ebs_volume_size=params['ebs_volume_size'],
ebs_volume_type=params['ebs_volume_type'],
)
res = manage_instances(
params['regions'], inst_params, params['instance_count'])
module.exit_json(
changed=res.changed,
active=[r.__dict__ for r in res.active],
terminated=[r.__dict__ for r in res.terminated]
)
if __name__ == '__main__':
module_args = dict(
regions=dict(type='list', required=True),
project=dict(type='str', required=True),
namespace=dict(type='str', required=True),
group=dict(type='str', required=True),
add_tags=dict(type='dict', required=False, default=dict()),
key_name=dict(type='str', required=True),
security_group=dict(type='str', required=True),
instance_type=dict(type='str', required=True),
instance_count=dict(type='int', required=True),
market_spot=dict(type='bool', required=False, default=False),
spot_max_price=dict(type='str', required=False, default=None),
ebs_volume_size=dict(type='int', required=False, default=None),
ebs_volume_type=dict(type='str', required=False, default=None),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
run(module)
| 33.225275 | 112 | 0.588722 | 5,250 | 0.4341 | 0 | 0 | 387 | 0.031999 | 0 | 0 | 3,204 | 0.264925 |
3598073bb8b0c52a37225a3d3dc812d2999277d1 | 20,289 | py | Python | backend/hqlib/domain/measurement/metric.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 25 | 2016-11-25T10:41:24.000Z | 2021-07-03T14:02:49.000Z | backend/hqlib/domain/measurement/metric.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 783 | 2016-09-19T12:10:21.000Z | 2021-01-04T20:39:15.000Z | backend/hqlib/domain/measurement/metric.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 15 | 2015-03-25T13:52:49.000Z | 2021-03-08T17:17:56.000Z | """
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import cast, Dict, List, Optional, Type, Tuple, TYPE_CHECKING
import json
import re
import datetime
import functools
import logging
from hqlib import utils
from hqlib.typing import MetricParameters, MetricValue, DateTime, Number
from .metric_source import MetricSource
from .target import AdaptedTarget
if TYPE_CHECKING: # pragma: no cover
from ..software_development.project import Project # pylint: disable=unused-import
class ExtraInfo(object):
""" The class represents extra metric information structure, that is serialized to extra_info json tag."""
def __init__(self, **kwargs):
""" Class is initialized with column keys and header texts."""
self.headers = kwargs
self.title = None
self.data = []
def __add__(self, *args):
""" Adds data rows to the extra_info table, matching arguments by position to the column keys."""
item = args[0] if isinstance(args[0], tuple) else args
dictionary_length = len(self.headers)
for i in range(len(item) // dictionary_length):
self.data.append(dict(zip(self.headers.keys(), item[dictionary_length * i:dictionary_length * (i + 1)])))
return self
class Metric(object):
""" Base class for metrics. """
name: str = 'Subclass responsibility'
template = '{name} heeft {value} {unit}.'
norm_template: str = 'Subclass responsibility'
unit: str = 'Subclass responsibility' # Unit in plural, e.g. "lines of code"
target_value: MetricValue = 'Subclass responsibility'
low_target_value: MetricValue = 'Subclass responsibility'
perfect_value: MetricValue = 'Subclass responsibility'
missing_template: str = 'De {metric} van {name} kon niet gemeten worden omdat niet alle benodigde bronnen ' \
'beschikbaar zijn.'
missing_source_template: str = 'De {metric} van {name} kon niet gemeten worden omdat de bron ' \
'{metric_source_class} niet is geconfigureerd.'
missing_source_id_template: str = 'De {metric} van {name} kon niet gemeten worden omdat niet alle benodigde ' \
'bron-ids zijn geconfigureerd. Configureer ids voor de bron ' \
'{metric_source_class}.'
perfect_template: str = ''
url_label_text: str = ''
comment_url_label_text: str = ''
metric_source_class: Type[MetricSource] = None
extra_info_headers: Dict[str, str] = None
def __init__(self, subject=None, project: 'Project' = None) -> None:
self._subject = subject
self._project = project
for source in self._project.metric_sources(self.metric_source_class):
try:
source_id = self._subject.metric_source_id(source)
except AttributeError:
continue
if source_id:
self._metric_source = source
self._metric_source_id, self._display_url = self.__separate_metric_source_links(source_id)
break
else:
if self.metric_source_class:
logging.warning("Couldn't find metric source of class %s for %s", self.metric_source_class.__name__,
self.stable_id())
self._metric_source = None
self._metric_source_id = None
self._display_url = None
self.__id_string = self.stable_id()
self._extra_info_data = list()
from hqlib import metric_source
history_sources = self._project.metric_sources(metric_source.History) if self._project else []
self.__history = cast(metric_source.History, history_sources[0]) if history_sources else None
def __separate_metric_source_links(self, values) -> tuple:
if not isinstance(values, list):
return self.__split_source_and_display(values)
else:
source = []
display = []
for val in values:
src, dsp = self.__split_source_and_display(val)
source.append(src)
display.append(dsp)
return source, display
@staticmethod
def __split_source_and_display(val) -> tuple:
return (val['source'], val['display']) if isinstance(val, dict) else (val, val)
def format_text_with_links(self, text: str) -> str:
""" Format a text paragraph with additional url. """
return Metric.format_comment_with_links(text, self.url(), '')
@staticmethod
def format_comment_with_links(text: str, url_dict: Dict[str, str], # pylint: disable=no-self-use
url_label: str) -> str:
""" Format a text paragraph with optional urls and label for the urls. """
comment_text = Metric._format_links_in_comment_text(text)
links = [
str(utils.format_link_object(href, utils.html_escape(anchor))) for (anchor, href) in list(url_dict.items())
]
if links:
if url_label:
url_label += ': '
comment_text = '{0} [{1}{2}]'.format(comment_text, url_label, ', '.join(sorted(links)))
return json.dumps(comment_text)[1:-1] # Strip quotation marks
@staticmethod
def _format_links_in_comment_text(text: str) -> str:
url_pattern = re.compile(r'(?i)\b(http(?:s?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]|'
r'\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|'
r'[^\s`!()\[\]{};:\'".,<>?\xab\xbb\u201c\u201d\u2018\u2019]))')
return re.sub(url_pattern, r"{'href': '\1', 'text': '\1'}", text.replace('\n', ' '))
@classmethod
def norm_template_default_values(cls) -> MetricParameters:
""" Return the default values for parameters in the norm template. """
return dict(unit=cls.unit, target=cls.target_value, low_target=cls.low_target_value)
def is_applicable(self) -> bool: # pylint: disable=no-self-use
""" Return whether this metric applies to the specified subject. """
return True
@functools.lru_cache(maxsize=1024)
def normalized_stable_id(self):
""" Returns stable_id where non-alphanumerics are substituted by _ and codes of other characters are added. """
return "".join([c if c.isalnum() else "_" for c in self.stable_id()]) + '_' + \
"".join(['' if c.isalnum() else str(ord(c)) for c in self.stable_id()])
@functools.lru_cache(maxsize=1024)
def stable_id(self) -> str:
""" Return an id that doesn't depend on numbering/order of metrics. """
stable_id = self.__class__.__name__
if not isinstance(self._subject, list):
stable_id += self._subject.name() if self._subject else str(self._subject)
return stable_id
def set_id_string(self, id_string: str) -> None:
""" Set the identification string. This can be set by a client since the identification of a metric may
depend on the section the metric is reported in. E.g. A-1. """
self.__id_string = id_string
def id_string(self) -> str:
""" Return the identification string of the metric. """
return self.__id_string
def target(self) -> MetricValue:
""" Return the target value for the metric. If the actual value of the
metric is below the target value, the metric is not green. """
subject_target = self._subject.target(self.__class__) if hasattr(self._subject, 'target') else None
return self.target_value if subject_target is None else subject_target
def low_target(self) -> MetricValue:
""" Return the low target value for the metric. If the actual value is below the low target value, the metric
needs immediate action and its status/color is red. """
subject_low_target = self._subject.low_target(self.__class__) if hasattr(self._subject, 'low_target') else None
return self.low_target_value if subject_low_target is None else subject_low_target
def __technical_debt_target(self):
""" Return the reduced target due to technical debt for the subject. If the subject has technical debt and
the actual value of the metric is below the technical debt target, the metric is red, else it is grey. """
try:
return self._subject.technical_debt_target(self.__class__)
except AttributeError:
return None
@functools.lru_cache(maxsize=8 * 1024)
def status(self) -> str:
""" Return the status/color of the metric. """
for status_string, has_status in [('missing_source', self.__missing_source_configuration),
('missing', self._missing),
('grey', self.__has_accepted_technical_debt),
('red', self._needs_immediate_action),
('yellow', self._is_below_target),
('perfect', self.__is_perfect)]:
if has_status():
return status_string
return 'green'
def status_start_date(self) -> DateTime:
""" Return since when the metric has the current status. """
return self.__history.status_start_date(self.stable_id(), self.status()) \
if self.__history else datetime.datetime.min
def __has_accepted_technical_debt(self) -> bool:
""" Return whether the metric is below target but above the accepted technical debt level. """
technical_debt_target = self.__technical_debt_target()
if technical_debt_target:
return self._is_below_target() and self._is_value_better_than(technical_debt_target.target_value())
return False
def _missing(self) -> bool:
""" Return whether the metric source is missing. """
return self.value() == -1
def __missing_source_configuration(self) -> bool:
""" Return whether the metric sources have been completely configured. """
return self.__missing_source_class() or self.__missing_source_ids()
def __missing_source_class(self) -> bool:
""" Return whether a metric source class that needs to be configured for the metric to be measurable is
available from the project. """
return not self._project.metric_sources(self.metric_source_class) if self.metric_source_class else False
def __missing_source_ids(self) -> bool:
""" Return whether the metric source ids have been configured for the metric source class. """
return bool(self.metric_source_class) and not self._get_metric_source_ids()
def _needs_immediate_action(self) -> bool:
""" Return whether the metric needs immediate action, i.e. its actual value is below its low target value. """
return not self._is_value_better_than(self.low_target())
def _is_below_target(self) -> bool:
""" Return whether the actual value of the metric is below its target value. """
return not self._is_value_better_than(self.target())
def __is_perfect(self) -> bool:
""" Return whether the actual value of the metric equals its perfect value,
i.e. no further improvement is possible. """
return self.value() == self.perfect_value
def value(self) -> MetricValue:
""" Return the actual value of the metric. """
raise NotImplementedError
def _is_value_better_than(self, target: MetricValue) -> bool:
""" Return whether the actual value of the metric is better than the specified target value. """
raise NotImplementedError
def report(self, max_subject_length: int = 200) -> str:
""" Return the actual value of the metric in the form of a short, mostly one sentence, report. """
name = self.__subject_name()
if len(name) > max_subject_length:
name = name[:max_subject_length] + '...'
logging.info('Reporting %s on %s', self.__class__.__name__, name)
return self._get_template().format(**self._parameters())
def _get_template(self) -> str:
""" Return the template for the metric report. """
if self.__missing_source_class():
return self.missing_source_template
if self.__missing_source_ids():
return self.missing_source_id_template
if self._missing():
return self.missing_template
if self.__is_perfect() and self.perfect_template:
return self.perfect_template
return self.template
def _parameters(self) -> MetricParameters:
""" Return the parameters for the metric report template and for the metric norm template. """
return dict(name=self.__subject_name(),
metric=self.name[0].lower() + self.name[1:],
unit=self.unit,
target=self.target(),
low_target=self.low_target(),
value=self.value(),
metric_source_class=self.metric_source_class.__name__ if self.metric_source_class
else '<metric has no metric source defined>')
def norm(self) -> str:
""" Return a description of the norm for the metric. """
try:
return self.norm_template.format(**self._parameters())
except KeyError as reason:
class_name = self.__class__.__name__
logging.critical('Key missing in %s parameters (%s) for norm template "%s": %s', class_name,
self._parameters(), self.norm_template, reason)
raise
def url(self) -> Dict[str, str]:
""" Return a dictionary of urls for the metric. The key is the anchor, the value the url. """
label = self._metric_source.metric_source_name if self._metric_source else 'Unknown metric source'
urls = [url for url in self._metric_source_urls() if url] # Weed out urls that are empty or None
if len(urls) == 1:
return {label: urls[0]}
return {'{label} ({index}/{count})'.format(label=label, index=index, count=len(urls)): url
for index, url in enumerate(urls, start=1)}
def _metric_source_urls(self) -> List[str]:
""" Return a list of metric source urls to be used to create the url dict. """
if self._metric_source:
if self._get_display_urls():
return self._metric_source.metric_source_urls(*self._get_display_urls())
return [self._metric_source.url()]
return []
def _get_display_urls(self) -> List[str]:
ids = self._display_url if isinstance(self._display_url, list) else [self._display_url]
return [id_ for id_ in ids if id_]
def _get_metric_source_ids(self) -> List[str]:
""" Allow for subclasses to override what the metric source id is. """
ids = self._metric_source_id if isinstance(self._metric_source_id, list) else [self._metric_source_id]
return [id_ for id_ in ids if id_]
def comment(self) -> str:
""" Return a comment on the metric. The comment is retrieved from either the technical debt or the subject. """
comments = [comment for comment in (self.__non_default_target_comment(), self.__technical_debt_comment(),
self.__subject_comment()) if comment]
return ' '.join(comments)
def __subject_comment(self) -> str:
""" Return the comment of the subject about this metric, if any. """
try:
return self._subject.metric_options(self.__class__)['comment']
except (AttributeError, TypeError, KeyError):
return ''
def __technical_debt_comment(self) -> str:
""" Return the comment of the accepted technical debt, if any. """
td_target = self.__technical_debt_target()
return td_target.explanation(self.unit) if td_target else ''
def __non_default_target_comment(self) -> str:
""" Return a comment about a non-default target, if relevant. """
return AdaptedTarget(self.low_target(), self.low_target_value).explanation(self.unit)
def comment_urls(self) -> Dict[str, str]: # pylint: disable=no-self-use
""" Return the source for the comment on the metric. """
return dict()
def __history_records(self, method: callable) -> List[int]:
history = method(self.stable_id()) if self.__history else []
return [int(round(float(value))) if value is not None else None for value in history]
def recent_history(self) -> List[int]:
""" Return a list of recent values of the metric, to be used in e.g. a spark line graph. """
return self.__history_records(self.__history.recent_history) if self.__history else []
def long_history(self) -> List[int]:
""" Return a long list of values of the metric, to be used in e.g. a spark line graph. """
return self.__history_records(self.__history.long_history) if self.__history else []
def get_recent_history_dates(self) -> str:
""" Return a list of recent dates when report was generated. """
return self.__history.get_dates() if self.__history else ""
def get_long_history_dates(self) -> str:
""" Return a long list of dates when report was generated. """
return self.__history.get_dates(long_history=True) if self.__history else ""
def y_axis_range(self) -> Tuple[int, int]:
""" Return a two-tuple (min, max) for use in graphs. """
history = [d for d in self.recent_history() if d is not None]
if not history:
return 0, 100
minimum, maximum = min(history), max(history)
return (minimum - 1, maximum + 1) if minimum == maximum else (minimum, maximum)
def numerical_value(self) -> Number:
""" Return a numerical version of the metric value for use in graphs. By default this simply returns the
regular value, assuming it is already numerical. Metrics that don't have a numerical value by default
can override this method to convert the non-numerical value into a numerical value. """
value = self.value()
if isinstance(value, tuple):
value = value[0]
if isinstance(value, (int, float)):
return value
raise NotImplementedError
def extra_info(self) -> Optional[ExtraInfo]:
""" Method can be overridden by concrete metrics that fill extra info. """
extra_info = None
if self._metric_source and self.extra_info_headers:
url_list = self.extra_info_rows()
if url_list:
extra_info = self.__create_extra_info(url_list)
return extra_info if extra_info is not None and extra_info.data else None
def extra_info_rows(self) -> List:
""" Returns rows of extra info table. """
return self._extra_info_data
def __create_extra_info(self, url_list):
extra_info = ExtraInfo(**self.extra_info_headers)
extra_info.title = self.url_label_text
for item in url_list:
extra_info += self.convert_item_to_extra_info(item)
return extra_info
@staticmethod
def convert_item_to_extra_info(item):
""" Method should transform an item to the form used in extra info. Should be overridden. """
return item
def __subject_name(self) -> str:
""" Return the subject name, or a string representation if the subject has no name. """
try:
return self._subject.name()
except AttributeError:
return str(self._subject)
| 48.078199 | 119 | 0.642368 | 19,238 | 0.948199 | 0 | 0 | 3,193 | 0.157376 | 0 | 0 | 6,468 | 0.318793 |
35981b6b41348f376489c82ca46f8c08bcc7ebf0 | 3,564 | py | Python | examples/decrypt.py | joke325/Pyrop | 79669e3a3362180a239cd496513a60007a914e22 | [
"BSD-2-Clause"
] | null | null | null | examples/decrypt.py | joke325/Pyrop | 79669e3a3362180a239cd496513a60007a914e22 | [
"BSD-2-Clause"
] | null | null | null | examples/decrypt.py | joke325/Pyrop | 79669e3a3362180a239cd496513a60007a914e22 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2020 Janky <box@janky.tech>
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Inspired by https://github.com/rnpgp/rnp/blob/master/src/examples/decrypt.c
from pyrop.bind import RopBind
from pyrop.error import RopError
message = "Dummy"
def example_pass_provider(session, app_ctx, key, pgp_context, buf_len):
if pgp_context == 'decrypt (symmetric)':
return True, 'encpassword'
if pgp_context == 'decrypt':
return True, 'password'
return False, None
def decrypt(rop, usekeys):
alt = rop.tagging()
try:
# initialize FFI object
ses = rop.create_session(rop.KEYSTORE_GPG, rop.KEYSTORE_GPG)
# check whether we want to use key or password for decryption
if usekeys:
try:
# load secret keyring, as it is required for public-key decryption. However, you may
# need to load public keyring as well to validate key's signatures.
keyfile = rop.create_input(path="secring.pgp")
# we may use secret=True and public=True as well
ses.load_keys(rop.KEYSTORE_GPG, keyfile, secret=True)
except RopError:
print("Failed to read secring")
raise
finally:
rop.drop(object_=keyfile)
# set the password provider
ses.set_pass_provider(example_pass_provider, None)
try:
# create file input and memory output objects for the encrypted message and decrypted
# message
input_ = rop.create_input(path="encrypted.asc")
output = rop.create_output(max_alloc=0)
ses.decrypt(input_, output)
# get the decrypted message from the output structure
buf = output.memory_get_str(False)
except RopError:
print("Public-key decryption failed")
raise
print("Decrypted message ({}):\n{}\n".format("with key" if usekeys else \
"with password", buf))
global message
message = buf
finally:
rop.drop(from_=alt)
def execute():
rop = RopBind()
try:
decrypt(rop, True)
decrypt(rop, False)
finally:
rop.close()
if __name__ == '__main__':
execute()
| 36.742268 | 100 | 0.672559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,091 | 0.5867 |
359a0ef6c0200615c6b4cd4163ac0cfe31bb7efc | 970 | py | Python | squealy/urls.py | vaibhav-singh/squealy | abfe8fc5e7406987d84fb065bb70152d43f61250 | [
"MIT"
] | null | null | null | squealy/urls.py | vaibhav-singh/squealy | abfe8fc5e7406987d84fb065bb70152d43f61250 | [
"MIT"
] | null | null | null | squealy/urls.py | vaibhav-singh/squealy | abfe8fc5e7406987d84fb065bb70152d43f61250 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
url(r'^swagger.json/$',views.swagger_json_api),
url(r'^swagger/$', login_required(views.swagger)),
url(r'charts/$', login_required(views.ChartsLoaderView.as_view())),
url(r'user/$', login_required(views.UserInformation.as_view())),
url(r'filters/$', login_required(views.FilterLoaderView.as_view())),
url(r'squealy/(?P<chart_url>[-\w]+)', login_required(views.ChartView.as_view())),
url(r'filter-api/(?P<filter_url>[-\w]+)', login_required(views.FilterView.as_view())),
url(r'databases/$', login_required(views.DatabaseView.as_view())),
url(r'^$', login_required(views.squealy_interface)),
url(r'^(?P<chart_name>[\w@%.\Wd]+)/$', login_required(views.squealy_interface)),
url(r'^(?P<chart_name>[\w@%.\Wd]+)/(?P<mode>\w+)$', login_required(views.squealy_interface)),
]
| 44.090909 | 96 | 0.710309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.236082 |
359a485e3ad209d745beb2991ca78d9f951ff276 | 1,023 | py | Python | src/jomiel_kore/version.py | guendto/jomiel-kore | 7bbb7193baed13d7bb7baacd6cf63b28f5ddf6ac | [
"Apache-2.0"
] | null | null | null | src/jomiel_kore/version.py | guendto/jomiel-kore | 7bbb7193baed13d7bb7baacd6cf63b28f5ddf6ac | [
"Apache-2.0"
] | null | null | null | src/jomiel_kore/version.py | guendto/jomiel-kore | 7bbb7193baed13d7bb7baacd6cf63b28f5ddf6ac | [
"Apache-2.0"
] | null | null | null | #
# jomiel-kore
#
# Copyright
# 2019-2020 Toni Gündoğdu
#
#
# SPDX-License-Identifier: Apache-2.0
#
"""TODO."""
try: # py38+
from importlib.metadata import version as metadata_version
from importlib.metadata import PackageNotFoundError
except ModuleNotFoundError:
from importlib_metadata import version as metadata_version
from importlib_metadata import PackageNotFoundError
def package_version(package_name, destination):
"""Returns the package version string
Args:
package_name (str): the package name to look up
destination (list): the list to store the result (tuple) to
"""
try:
version = metadata_version(package_name)
except PackageNotFoundError:
version = "<unavailable>"
if package_name == "pyzmq":
from zmq import zmq_version
version = "{} (libzmq version {})".format(
version,
zmq_version(),
)
destination.append((package_name, version))
# vim: set ts=4 sw=4 tw=72 expandtab:
| 22.733333 | 67 | 0.675464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.366829 |
359ae6ec741ebfaca3f4e8299fb7c7de7b44d9e6 | 883 | py | Python | gpdist/tanh_saturate.py | saalfeldlab/gunpowder-distances | eb7702ccc21900987bf9539ea0f1c75a4502ef50 | [
"MIT"
] | null | null | null | gpdist/tanh_saturate.py | saalfeldlab/gunpowder-distances | eb7702ccc21900987bf9539ea0f1c75a4502ef50 | [
"MIT"
] | null | null | null | gpdist/tanh_saturate.py | saalfeldlab/gunpowder-distances | eb7702ccc21900987bf9539ea0f1c75a4502ef50 | [
"MIT"
] | null | null | null | import logging
import numpy as np
from gunpowder.nodes.batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class TanhSaturate(BatchFilter):
'''Saturate the values of an array to be floats between -1 and 1 by applying the tanh function.
Args:
array (:class:`ArrayKey`):
The key of the array to modify.
factor (scalar, optional):
The factor to divide by before applying the tanh, controls how quickly the values saturate to -1, 1.
'''
def __init__(self, array, scale=None):
self.array = array
if scale is not None:
self.scale = scale
else:
self.scale = 1.
def process(self, batch, request):
if self.array not in batch.arrays:
return
array = batch.arrays[self.array]
array.data = np.tanh(array.data/self.scale)
| 23.236842 | 112 | 0.628539 | 755 | 0.85504 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.390713 |
359bb85f1c025b4fbb8e2575afa0f0a3d21e6da8 | 2,497 | py | Python | app/api/users/views.py | msoedov/hackit | ebe1addbf7b9a457c6627b69223cc995a5d7ed5d | [
"MIT"
] | null | null | null | app/api/users/views.py | msoedov/hackit | ebe1addbf7b9a457c6627b69223cc995a5d7ed5d | [
"MIT"
] | null | null | null | app/api/users/views.py | msoedov/hackit | ebe1addbf7b9a457c6627b69223cc995a5d7ed5d | [
"MIT"
] | null | null | null | import os
from django.conf import settings
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.generics import ListAPIView, RetrieveUpdateAPIView, RetrieveAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import File
from .serializers import FileSerializer
class AuthRequired(object):
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
class FilesListView(AuthRequired, ListAPIView):
"""
A view that permits a GET to allow listing all the Files
in the database
Route - `/reviews`
"""
serializer_class = FileSerializer
pagination_class = PageNumberPagination
def get_queryset(self):
return File.objects.filter(owner=self.request.user)
class FileView(AuthRequired, RetrieveUpdateAPIView):
"""
A view that permits a GET to allow listing of a single File
by providing their `id` as a parameter
Route - `/reviews/:name`
"""
serializer_class = FileSerializer
pagination_class = None
lookup_field = 'name'
def get_queryset(self):
return File.objects.get(owner=self.request.user,
name=self.kwargs.get('name', ''))
def put(self, request, *args, **kwargs):
name = kwargs.get('name')
file_path = "{}/{}".format(settings.FILES_VOLUME, name)
dirname = os.path.dirname(os.path.realpath(file_path))
os.makedirs(dirname, exist_ok=True)
with open(file_path, 'wb') as fp:
fp.write(request.body)
file, created = File.objects.get_or_create(owner=request.user,
name=name)
if not created:
file.revision += 1
file.save()
return Response(data={})
class DownloadView(AuthRequired, RetrieveAPIView):
def get(self, request, *args, **kwargs):
name = kwargs.get('name')
get_object_or_404(File, owner=self.request.user, name=name)
file_path = "{}/{}".format(settings.FILES_VOLUME, name)
with open(file_path, 'rb') as fp:
data = fp.read()
return HttpResponse(content=data,
content_type='application/octet-stream')
| 33.293333 | 87 | 0.678414 | 1,950 | 0.780937 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.135362 |
359bd535f1f4c3e1aa4fe7671744773be664415f | 3,542 | py | Python | main.py | azerpas/OFFSPRING_RAFFLE_NIKE_OW | 83d27376739ea772731d6d289506480933225368 | [
"MIT"
] | 3 | 2017-11-04T16:52:27.000Z | 2019-11-25T22:48:20.000Z | main.py | azerpas/OFFSPRING_RAFFLE_NIKE_OW | 83d27376739ea772731d6d289506480933225368 | [
"MIT"
] | null | null | null | main.py | azerpas/OFFSPRING_RAFFLE_NIKE_OW | 83d27376739ea772731d6d289506480933225368 | [
"MIT"
] | null | null | null | import requests, json, time, random, datetime, threading, pickle
from termcolor import colored
sitekey = "6Ld-VBsUAAAAABeqZuOqiQmZ-1WAMVeTKjdq2-bJ"
def log(event):
d = datetime.datetime.now().strftime("%H:%M:%S")
print("Raffle OFF-S by Azerpas :: " + str(d) + " :: " + event)
class Raffle(object):
def __init__(self):
self.s = requests.session()
self.shoes = [
{"shoe_id":"8","shoe_name":"ZOOM VAPORFLY"},
{"shoe_id":"7","shoe_name":"VAPOR MAX"}]
self.url = "https://www.offspring.co.uk/view/component/entercompetition"
def register(self,identity):
# register to each shoes.
for dshoes in self.shoes:
print("Signin for: " + dshoes['shoe_name'])
d = datetime.datetime.now().strftime('%H:%M')
log("Getting Captcha")
flag = False
while flag != True:
d = datetime.datetime.now().strftime('%H:%M')
try:
file = open(str(d)+'.txt','r') #r as reading only
flag = True
except IOError:
time.sleep(2)
log("No captcha available(1)")
flag = False
try:
FileList = pickle.load(file) #FileList the list where i want to pick out the captcharep
except:
log("Can't open file")
while len(FileList) == 0: #if len(FileList) it will wait for captcha scraper
d = datetime.datetime.now().strftime('%H:%M')
try:
file = open(str(d)+'.txt','r')
FileList = pickle.load(file)
if FileList == []:
log("No captcha available(2)")
time.sleep(3)
except IOError as e:
log("No file, waiting...")
print(e)
time.sleep(3)
captchaREP = random.choice(FileList)
FileList.remove(captchaREP)
file = open(str(d)+'.txt','w')
pickle.dump(FileList,file)
log("Captcha retrieved")
# captcha
headers = {
"authority":"www.offspring.co.uk",
"method":"POST",
"path":"/view/component/entercompetition",
"scheme":"https",
"accept":"*/*",
"accept-encoding":"gzip, deflate, br",
"accept-language":"fr-FR,fr;q=0.8,en-US;q=0.6,en;q=0.4",
"content-length":"624",
"content-type":"application/x-www-form-urlencoded; charset=UTF-8",
"origin":"https://www.offspring.co.uk",
"referer":"https://www.offspring.co.uk/view/content/nikecompetition",
"user-agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"x-requested-with":"XMLHttpRequest",}
payload = {"firstName":identity['fname'],
"lastName":identity['lname'],
"competitionIDEntered":dshoes['shoe_id'],
"competitionNameEntered":dshoes['shoe_name'],
"emailAddress":identity['mail'],
"phoneNumber":identity['phone'],
"optIn":"false",
"size":identity['shoesize'],
"grecaptcharesponse":captchaREP,
}
req = self.s.post(self.url,headers=headers,data=payload)
print(req)
jsonn = json.loads(req.text)
if req.status_code == 200:
if jsonn['statusCode'] == "success":
print(colored('Successfully entered','red', attrs=['bold']))
else:
log("Failed to register for: " + identity['mail'])
sleep = random.uniform(2.3,2.9)
log("Sleeping: " + str(sleep) + " seconds")
time.sleep(sleep)
self.s.cookies.clear()
if __name__ == "__main__":
ra = Raffle()
accounts = [
# ENTER YOUR ACCOUNTS HERE
{"fname":"pete","lname":"james","mail":"7768james@gmail.com","phone":"+33612334455","city":"London","zip":"HEC 178","shoesize":"10",},
]
# catpcha
for i in accounts:
ra.register(i)
| 32.495413 | 142 | 0.61773 | 2,981 | 0.841615 | 0 | 0 | 0 | 0 | 0 | 0 | 1,602 | 0.452287 |
359d7f659114e08efb5857c6c56f60369996d99c | 142 | py | Python | my_site/objects.py | mequetrefe-do-subtroco/web_constel_cont_ext | c720c66e6d394e9aad0ed7039e1ff44e4a1c61c1 | [
"MIT"
] | 1 | 2020-06-18T09:03:53.000Z | 2020-06-18T09:03:53.000Z | my_site/objects.py | mequetrefe-do-subtroco/web_constel_cont_ext | c720c66e6d394e9aad0ed7039e1ff44e4a1c61c1 | [
"MIT"
] | 33 | 2020-06-16T18:59:33.000Z | 2021-08-12T21:33:17.000Z | constel/objects.py | gabrielhjs/web_constel | 57b5626fb17b4fefc740cbe1ac95fd4ab90147bc | [
"MIT"
] | null | null | null |
class Button(object):
def __init__(self, url, label, get=''):
self.url = url
self.label = label
self.get = get
| 15.777778 | 43 | 0.542254 | 139 | 0.978873 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.014085 |
359e4a1d1a339ad0c2dbb1f04272dbcd884b9fc0 | 163 | py | Python | HSTB/shared/settings.py | noaa-ocs-hydrography/shared | d2004e803c708dffa43d09d3ffea4e4045811b28 | [
"CC0-1.0"
] | null | null | null | HSTB/shared/settings.py | noaa-ocs-hydrography/shared | d2004e803c708dffa43d09d3ffea4e4045811b28 | [
"CC0-1.0"
] | null | null | null | HSTB/shared/settings.py | noaa-ocs-hydrography/shared | d2004e803c708dffa43d09d3ffea4e4045811b28 | [
"CC0-1.0"
] | null | null | null | from sys import platform
if 'win' in platform:
from .winreg import *
elif 'linux' in platform:
from posixreg import *
import posixreg
posixreg.__init__()
| 18.111111 | 25 | 0.730061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.07362 |
359f1c4eaa4c5057ef7d59000e8d7426e73f5504 | 13,956 | py | Python | src/skdh/features/core.py | PfizerRD/scikit-digital-health | f834a82d750d9e3cdd35f4f5692a0a388210b821 | [
"MIT"
] | 1 | 2022-03-31T20:56:49.000Z | 2022-03-31T20:56:49.000Z | src/skdh/features/core.py | PfizerRD/scikit-digital-health | f834a82d750d9e3cdd35f4f5692a0a388210b821 | [
"MIT"
] | null | null | null | src/skdh/features/core.py | PfizerRD/scikit-digital-health | f834a82d750d9e3cdd35f4f5692a0a388210b821 | [
"MIT"
] | null | null | null | """
Core functionality for feature computation
Lukas Adamowicz
Copyright (c) 2021. Pfizer Inc. All rights reserved.
"""
from abc import ABC, abstractmethod
from collections.abc import Iterator, Sequence
import json
from warnings import warn
from pandas import DataFrame
from numpy import float_, asarray, zeros, sum, moveaxis
__all__ = ["Bank"]
class ArrayConversionError(Exception):
pass
def get_n_feats(size, index):
if isinstance(index, int):
return 1
elif isinstance(index, (Iterator, Sequence)):
return len(index)
elif isinstance(index, slice):
return len(range(*index.indices(size)))
elif isinstance(index, type(Ellipsis)):
return size
def partial_index_check(index):
if index is None:
index = ...
if not isinstance(index, (int, Iterator, Sequence, type(...), slice)):
raise IndexError(f"Index type ({type(index)}) not understood.")
if isinstance(index, str):
raise IndexError("Index type (str) not understood.")
return index
def normalize_indices(nfeat, index):
if index is None:
return [...] * nfeat
elif not isinstance(index, (Iterator, Sequence)): # slice, single integer, etc
return [partial_index_check(index)] * nfeat
elif all([isinstance(i, int) for i in index]): # iterable of ints
return [index] * nfeat
elif isinstance(index, Sequence): # able to be indexed
return [partial_index_check(i) for i in index]
else: # pragma: no cover
return IndexError(f"Index type ({type(index)}) not understood.")
def normalize_axes(ndim, axis, ind_axis):
"""
Normalize input axes to be positive/correct for how the swapping has to work
"""
if axis == ind_axis:
raise ValueError("axis and index_axis cannot be the same")
if ndim == 1:
return 0, None
elif ndim >= 2:
"""
| shape | ax | ia | move1 | ax | ia | res | ax | ia | res move |
|--------|----|----|--------|----|----|-------|----|----|----------|
| (a, b) | 0 | 1 | (b, a) | 0 | 0 | (bf,) | | | |
| (a, b) | 0 | N | (b, a) | 0 | N | (f, b)| | | |
| (a, b) | 1 | 0 | | | | (3a,) | | | |
| (a, b) | 1 | N | | | | (f, a)| | | |
| shape | ax| ia | move1 | ax| ia| move2 | res | | ia| res move |
|----------|---|------|----------|---|---|----------|----------|----|---|----------|
| (a, b, c)| 0 | 1(0) | (b, c, a)| | | | (bf, c) | 0 | 0 | |
| (a, b, c)| 0 | 2(1) | (b, c, a)| | 1 | (c, b, a)| (cf, b) | 0 | 1 | (b, cf) |
| (a, b, c)| 0 | N | (b, c, a)| | | | (f, b, c)| | | |
| (a, b, c)| 1 | 0 | (a, c, b)| | | | (af, c) | 0 | 0 | |
| (a, b, c)| 1 | 2(1) | (a, c, b)| | 1 | (c, a, b)| (cf, a) | 0 | 1 | (a, cf) |
| (a, b, c)| 1 | N | (a, c, b)| | | | (f, a, c)| | | |
| (a, b, c)| 2 | 0 | (a, b, c)| | | | (af, b) | 0 | 0 | |
| (a, b, c)| 2 | 1 | (a, b, c)| | 1 | (b, a, c)| (bf, a) | 0 | 1 | (a, bf) |
| (a, b, c)| 2 | N | (a, b, c)| | | | (f, a, b)| | | |
| shape | ax| ia | move1 | ia| move2 | res | | ia| res move |
|------------|---|------|-------------|---|-------------|-------------|---|---|-----------|
|(a, b, c, d)| 0 | 1(0) | (b, c, d, a)| | | (bf, c, d) | 0 | 0 | |
|(a, b, c, d)| 0 | 2(1) | (b, c, d, a)| 1 | (c, b, d, a)| (cf, b, d) | 0 | 1 | (b, cf, d)|
|(a, b, c, d)| 0 | 3(2) | (b, c, d, a)| 2 | (d, b, c, a)| (df, b, c) | 0 | 2 | (d, c, df)|
|(a, b, c, d)| 0 | N | (b, c, d, a)| | | (f, b, c, d)| | | |
|(a, b, c, d)| 1 | 0 | (a, c, d, b)| | | (af, c, d) | | | |
|(a, b, c, d)| 1 | 2(1) | (a, c, d, b)| 1 | (c, a, d, b)| (cf, a, d) | 0 | 1 | (a, cf, d)|
|(a, b, c, d)| 1 | 3(2) | (a, c, d, b)| 2 | (d, a, c, b)| (df, a, c) | 0 | 2 | (a, c, df)|
|(a, b, c, d)| 1 | N | (a, c, d, b)| | | (f, a, c, d)| | | |
|(a, b, c, d)| 2 | 0 | (a, b, d, c)| | | (af, b, d) | | | |
|(a, b, c, d)| 2 | 1 | (a, b, d, c)| 1 | (b, a, d, c)| (bf, a, d) | 0 | 1 | (a, bf, d)|
|(a, b, c, d)| 2 | 3(2) | (a, b, d, c)| 2 | (d, a, b, c)| (df, a, b) | 0 | 2 | (a, b, df)|
|(a, b, c, d)| 2 | N | (a, b, d, c)| | | (f, a, b, d)| | | |
|(a, b, c, d)| 3 | 0 | (a, b, c, d)| | | (af, b, c) | | | |
|(a, b, c, d)| 3 | 1 | (a, b, c, d)| 1 | (b, a, c, d)| (bf, a, c) | 0 | 1 | (a, bf, c)|
|(a, b, c, d)| 3 | 2 | (a, b, c, d)| 2 | (c, a, b, d)| (cf, a, b) | 0 | 2 | (a, b, cf)|
|(a, b, c, d)| 3 | N | (a, b, c, d)| | | (f, a, b, c)| | | |
"""
ax = axis if axis >= 0 else ndim + axis
if ind_axis is None:
return ax, None
ia = ind_axis if ind_axis >= 0 else ndim + ind_axis
if ia > ax:
ia -= 1
return ax, ia
class Bank:
"""
A feature bank object for ease in creating a table or pipeline of features to be computed.
Parameters
----------
bank_file : {None, path-like}, optional
Path to a saved bank file to load. Optional
Examples
--------
"""
__slots__ = ("_feats", "_indices")
def __str__(self):
return "Bank"
def __repr__(self):
s = "Bank["
for f in self._feats:
s += f"\n\t{f!r},"
s += "\n]"
return s
def __contains__(self, item):
return item in self._feats
def __len__(self):
return len(self._feats)
def __init__(self, bank_file=None):
# initialize some variables
self._feats = []
self._indices = []
if bank_file is not None:
self.load(bank_file)
def add(self, features, index=None):
"""
Add a feature or features to the pipeline.
Parameters
----------
features : {Feature, list}
Single signal Feature, or list of signal Features to add to the feature Bank
index : {int, slice, list}, optional
Index to be applied to data input to each features. Either a index that will
apply to every feature, or a list of features corresponding to each feature being
added.
"""
if isinstance(features, Feature):
if features in self:
warn(
f"Feature {features!s} already in the Bank, will be duplicated.",
UserWarning,
)
self._indices.append(partial_index_check(index))
self._feats.append(features)
elif all([isinstance(i, Feature) for i in features]):
if any([ft in self for ft in features]):
warn("Feature already in the Bank, will be duplicated.", UserWarning)
self._indices.extend(normalize_indices(len(features), index))
self._feats.extend(features)
def save(self, file):
"""
Save the feature Bank to a file for a persistent object that can be loaded later to create
the same Bank as before
Parameters
----------
file : path-like
File to be saved to. Creates a new file or overwrites an existing file.
"""
out = []
for i, ft in enumerate(self._feats):
idx = "Ellipsis" if self._indices[i] is Ellipsis else self._indices[i]
out.append(
{ft.__class__.__name__: {"Parameters": ft._params, "Index": idx}}
)
with open(file, "w") as f:
json.dump(out, f)
def load(self, file):
"""
Load a previously saved feature Bank from a json file.
Parameters
----------
file : path-like
File to be read to create the feature Bank.
"""
# the import must be here, otherwise a circular import error occurs
from skdh.features import lib
with open(file, "r") as f:
feats = json.load(f)
for ft in feats:
name = list(ft.keys())[0]
params = ft[name]["Parameters"]
index = ft[name]["Index"]
if index == "Ellipsis":
index = Ellipsis
# add it to the feature bank
self.add(getattr(lib, name)(**params), index=index)
def compute(
self, signal, fs=1.0, *, axis=-1, index_axis=None, indices=None, columns=None
):
"""
Compute the specified features for the given signal
Parameters
----------
signal : {array-like}
Array-like signal to have features computed for.
fs : float, optional
Sampling frequency in Hz. Default is 1Hz
axis : int, optional
Axis along which to compute the features. Default is -1.
index_axis : {None, int}, optional
Axis corresponding to the indices specified in `Bank.add` or `indices`. Default is
None, which assumes that this axis is not part of the signal. Note that setting this to
None means values for `indices` or the indices set in `Bank.add` will be ignored.
indices : {None, int, list-like, slice, ellipsis}, optional
Indices to apply to the input signal. Either None, a integer, list-like, slice to apply
to each feature, or a list-like of lists/objects with a 1:1 correspondence to the
features present in the Bank. If provided, takes precedence over any values given in
`Bank.add`. Default is None, which will use indices from `Bank.add`.
columns : {None, list}, optional
Columns to use if providing a dataframe. Default is None (uses all columns).
Returns
-------
feats : numpy.ndarray
Computed features.
"""
# standardize the input signal
if isinstance(signal, DataFrame):
columns = columns if columns is not None else signal.columns
x = signal[columns].values.astype(float_)
else:
try:
x = asarray(signal, dtype=float_)
except ValueError as e:
raise ArrayConversionError("Error converting signal to ndarray") from e
axis, index_axis = normalize_axes(x.ndim, axis, index_axis)
if index_axis is None:
indices = [...] * len(self)
else:
if indices is None:
indices = self._indices
else:
indices = normalize_indices(len(self), indices)
# get the number of features that will results. Needed to allocate the feature array
if index_axis is None:
# don't have to move any other axes than the computation axis
x = moveaxis(x, axis, -1)
# number of feats is 1 per
n_feats = [1] * len(self)
feats = zeros((sum(n_feats),) + x.shape[:-1], dtype=float_)
else:
# move both the computation and index axis. do this in two steps to allow for undoing
# just the index axis swap later. The index_axis has been adjusted appropriately
# to match this axis move in 2 steps
x = moveaxis(x, axis, -1)
x = moveaxis(x, index_axis, 0)
n_feats = []
for ind in indices:
n_feats.append(get_n_feats(x.shape[0], ind))
feats = zeros((sum(n_feats),) + x.shape[1:-1], dtype=float_)
feat_i = 0 # keep track of where in the feature array we are
for i, ft in enumerate(self._feats):
feats[feat_i : feat_i + n_feats[i]] = ft.compute(
x[indices[i]], fs=fs, axis=-1
)
feat_i += n_feats[i]
# Move the shape back to the correct one.
# only have to do this if there is an index axis, because otherwise the array is still in
# the same order as originally
if index_axis is not None:
feats = moveaxis(feats, 0, index_axis) # undo the previous swap/move
return feats
class Feature(ABC):
"""
Base feature class
"""
def __str__(self):
return self.__class__.__name__
def __repr__(self):
s = self.__class__.__name__ + "("
for p in self._params:
s += f"{p}={self._params[p]!r}, "
if len(self._params) > 0:
s = s[:-2]
return s + ")"
def __eq__(self, other):
if isinstance(other, type(self)):
# double check the name
eq = str(other) == str(self)
# check the parameters
eq &= other._params == self._params
return eq
else:
return False
__slots__ = ("_params",)
def __init__(self, **params):
self._params = params
@abstractmethod
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the signal feature.
Parameters
----------
signal : array-like
Signal to compute the feature over.
fs : float, optional
Sampling frequency in Hz. Default is 1.0
axis : int, optional
Axis over which to compute the feature. Default is -1 (last dimension)
Returns
-------
feat : numpy.ndarray
ndarray of the computed feature
"""
# move the computation axis to the end
return moveaxis(asarray(signal, dtype=float_), axis, -1)
| 37.718919 | 99 | 0.481227 | 8,580 | 0.614789 | 0 | 0 | 653 | 0.04679 | 0 | 0 | 8,028 | 0.575236 |
359f880a9b970ee0910222ff4448b1d006562320 | 4,013 | py | Python | src/bin/shipyard_airflow/tests/unit/control/test_actions_validations_id_api.py | openvdro/airship-shipyard | bae15294c534cf321f5c7ca37592dfa74c4ad7c2 | [
"Apache-2.0"
] | 12 | 2018-05-18T18:59:23.000Z | 2019-05-10T12:31:44.000Z | src/bin/shipyard_airflow/tests/unit/control/test_actions_validations_id_api.py | openvdro/airship-shipyard | bae15294c534cf321f5c7ca37592dfa74c4ad7c2 | [
"Apache-2.0"
] | 4 | 2021-07-28T14:36:57.000Z | 2022-03-22T16:39:23.000Z | src/bin/shipyard_airflow/tests/unit/control/test_actions_validations_id_api.py | openvdro/airship-shipyard | bae15294c534cf321f5c7ca37592dfa74c4ad7c2 | [
"Apache-2.0"
] | 9 | 2018-05-18T16:42:41.000Z | 2019-04-18T20:12:14.000Z | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from shipyard_airflow.control.action.actions_validations_id_api import \
ActionsValidationsResource
from shipyard_airflow.errors import ApiError
from tests.unit.control import common
def actions_db(action_id):
"""
replaces the actual db call
"""
if action_id == 'error_it':
return None
else:
return {
'id': '59bb330a-9e64-49be-a586-d253bb67d443',
'name': 'dag_it',
'parameters': None,
'dag_id': 'did2',
'dag_execution_date': '2017-09-06 14:10:08.528402',
'user': 'robot1',
'timestamp': '2017-09-06 14:10:08.528402',
'context_marker': '8-4-4-4-12a'
}
def get_validations(validation_id):
"""
Stub to return validations
"""
if validation_id == '43':
return {
'id': '43',
'action_id': '59bb330a-9e64-49be-a586-d253bb67d443',
'validation_name': 'It has shiny goodness',
'details': 'This was not very shiny.'
}
else:
return None
class TestActionsValidationsResource():
@patch.object(ActionsValidationsResource, 'get_action_validation',
common.str_responder)
def test_on_get(self, api_client):
"""Validate the on_get method returns 200 on success"""
result = api_client.simulate_get(
"/api/v1.0/actions/123456/validations/123456",
headers=common.AUTH_HEADERS)
assert result.status_code == 200
def test_get_action_validation(self):
"""Tests the main response from get all actions"""
action_resource = ActionsValidationsResource()
# stubs for db
action_resource.get_action_db = actions_db
action_resource.get_validation_db = get_validations
validation = action_resource.get_action_validation(
action_id='59bb330a-9e64-49be-a586-d253bb67d443',
validation_id='43')
assert validation[
'action_id'] == '59bb330a-9e64-49be-a586-d253bb67d443'
assert validation['validation_name'] == 'It has shiny goodness'
with pytest.raises(ApiError) as api_error:
action_resource.get_action_validation(
action_id='59bb330a-9e64-49be-a586-d253bb67d443',
validation_id='not a chance')
assert 'Validation not found' in str(api_error)
with pytest.raises(ApiError) as api_error:
validation = action_resource.get_action_validation(
action_id='error_it', validation_id='not a chance')
assert 'Action not found' in str(api_error)
@patch('shipyard_airflow.db.shipyard_db.ShipyardDbAccess.get_action_by_id')
def test_get_action_db(self, mock_get_action_by_id):
action_resource = ActionsValidationsResource()
action_id = '123456789'
action_resource.get_action_db(action_id)
mock_get_action_by_id.assert_called_with(action_id=action_id)
@patch(
'shipyard_airflow.db.shipyard_db.ShipyardDbAccess.get_validation_by_id'
)
def test_get_validation_db(self, mock_get_tasks_by_id):
action_resource = ActionsValidationsResource()
validation_id = '123456'
action_resource.get_validation_db(validation_id)
mock_get_tasks_by_id.assert_called_with(validation_id=validation_id)
| 37.157407 | 79 | 0.676053 | 2,280 | 0.568154 | 0 | 0 | 1,105 | 0.275355 | 0 | 0 | 1,640 | 0.408672 |
35a03d99d3719963febefb8c4a03a9c25373c377 | 17,650 | py | Python | tests/test_lccv.py | fmohr/llcv | 22ba7b9712397f750150ed7e793245b5a0ae3fdf | [
"BSD-3-Clause"
] | 3 | 2021-11-30T18:58:58.000Z | 2022-03-07T20:40:41.000Z | tests/test_lccv.py | fmohr/llcv | 22ba7b9712397f750150ed7e793245b5a0ae3fdf | [
"BSD-3-Clause"
] | null | null | null | tests/test_lccv.py | fmohr/llcv | 22ba7b9712397f750150ed7e793245b5a0ae3fdf | [
"BSD-3-Clause"
] | null | null | null | import logging
import lccv
import numpy as np
import sklearn.datasets
from sklearn import *
import unittest
from parameterized import parameterized
import itertools as it
import time
from sklearn.experimental import enable_hist_gradient_boosting # noqa
import openml
import pandas as pd
def get_dataset(openmlid):
ds = openml.datasets.get_dataset(openmlid)
df = ds.get_data()[0].dropna()
y = df[ds.default_target_attribute].values
categorical_attributes = df.select_dtypes(exclude=['number']).columns
expansion_size = 1
for att in categorical_attributes:
expansion_size *= len(pd.unique(df[att]))
if expansion_size > 10**5:
break
if expansion_size < 10**5:
X = pd.get_dummies(df[[c for c in df.columns if c != ds.default_target_attribute]]).values.astype(float)
else:
print("creating SPARSE data")
dfSparse = pd.get_dummies(df[[c for c in df.columns if c != ds.default_target_attribute]], sparse=True)
print("dummies created, now creating sparse matrix")
X = lil_matrix(dfSparse.shape, dtype=np.float32)
for i, col in enumerate(dfSparse.columns):
ix = dfSparse[col] != 0
X[np.where(ix), i] = 1
print("Done. shape is" + str(X.shape))
return X, y
class TestLccv(unittest.TestCase):
preprocessors = [None, sklearn.preprocessing.RobustScaler, sklearn.kernel_approximation.RBFSampler]
learners = [sklearn.svm.LinearSVC, sklearn.tree.DecisionTreeClassifier, sklearn.tree.ExtraTreeClassifier, sklearn.linear_model.LogisticRegression, sklearn.linear_model.PassiveAggressiveClassifier, sklearn.linear_model.Perceptron, sklearn.linear_model.RidgeClassifier, sklearn.linear_model.SGDClassifier, sklearn.neural_network.MLPClassifier, sklearn.discriminant_analysis.LinearDiscriminantAnalysis, sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis, sklearn.naive_bayes.BernoulliNB, sklearn.naive_bayes.MultinomialNB, sklearn.neighbors.KNeighborsClassifier, sklearn.ensemble.ExtraTreesClassifier, sklearn.ensemble.RandomForestClassifier, sklearn.ensemble.GradientBoostingClassifier, sklearn.ensemble.GradientBoostingClassifier, sklearn.ensemble.HistGradientBoostingClassifier]
def setUpClass():
# setup logger for this test suite
logger = logging.getLogger('lccv_test')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# configure lccv logger (by default set to WARN, change it to DEBUG if tests fail)
lccv_logger = logging.getLogger("lccv")
lccv_logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
lccv_logger.addHandler(ch)
def setUp(self):
self.logger = logging.getLogger("lccv_test")
self.lccv_logger = logging.getLogger("lccv")
def test_partition_train_test_data(self):
self.logger.info("Start Test on Partitioning")
features, labels = sklearn.datasets.load_iris(return_X_y=True)
for seed in [0, 1, 2, 3, 4, 5]:
self.logger.info(f"Run test for seed {seed}")
n_te = 32
n_tr = 150 - n_te
f_tr, l_tr, f_te, l_te = lccv._partition_train_test_data(
features, labels, n_te, seed)
# check correct sizes
self.assertEqual(f_tr.shape, (n_tr, 4))
self.assertEqual(l_tr.shape, (n_tr, ))
self.assertEqual(f_te.shape, (n_te, 4))
self.assertEqual(l_te.shape, (n_te, ))
# assume exact same test set, also when train set is double
f_tr2, l_tr2, f_te2, l_te2 = lccv._partition_train_test_data(
features, labels, n_te, seed)
np.testing.assert_array_equal(f_te, f_te2)
np.testing.assert_array_equal(l_te, l_te2)
np.testing.assert_array_equal(f_tr, f_tr2)
np.testing.assert_array_equal(l_tr, l_tr2)
self.logger.info(f"Finished test for seed {seed}")
def test_lccv_normal_function(self):
features, labels = sklearn.datasets.load_iris(return_X_y=True)
learner = sklearn.tree.DecisionTreeClassifier(random_state=42)
self.logger.info(f"Starting test of LCCV on {learner.__class__.__name__}")
_, _, res, _ = lccv.lccv(learner, features, labels, base=2, min_exp=4, enforce_all_anchor_evaluations=True, logger=self.lccv_logger)
self.assertSetEqual(set(res.keys()), {16, 32, 64, 128, 135})
for key, val in res.items():
self.logger.info(f"Key: {key}, Val: {val}")
self.assertFalse(np.isnan(val['conf'][0]))
self.assertFalse(np.isnan(val['conf'][1]))
self.logger.info(f"Finished test of LCCV on {learner.__class__.__name__}")
def test_lccv_all_points_finish(self):
features, labels = sklearn.datasets.load_iris(return_X_y=True)
learner = sklearn.tree.DecisionTreeClassifier(random_state=42)
self.logger.info(f"Starting test of LCCV on {learner.__class__.__name__}")
_, _, res, _ = lccv.lccv(learner, features, labels, r=0.05, base=2, min_exp=4, enforce_all_anchor_evaluations=True, logger=self.lccv_logger)
self.assertSetEqual(set(res.keys()), {16, 32, 64, 128, 135})
for key, val in res.items():
self.logger.info(f"Key: {key}, Val: {val}")
self.assertFalse(np.isnan(val['conf'][0]))
self.assertFalse(np.isnan(val['conf'][1]))
self.logger.info(f"Finished test of LCCV on {learner.__class__.__name__}")
def test_lccv_all_points_skipped(self):
features, labels = sklearn.datasets.load_iris(return_X_y=True)
learner = sklearn.tree.DecisionTreeClassifier(random_state=42)
self.logger.info(f"Starting test of LCCV on {learner.__class__.__name__}")
_, _, res, _ = lccv.lccv(learner, features, labels, r=0.05, base=2, min_exp=4, enforce_all_anchor_evaluations=False, logger=self.lccv_logger)
self.assertSetEqual(set(res.keys()), {16, 32, 64, 135})
for key, val in res.items():
self.logger.info(f"Key: {key}, Val: {val}")
self.assertFalse(np.isnan(val['conf'][0]))
self.assertFalse(np.isnan(val['conf'][1]))
self.logger.info(f"Finished test of LCCV on {learner.__class__.__name__}")
def test_lccv_pruning(self):
features, labels = sklearn.datasets.load_digits(return_X_y=True)
learner = sklearn.tree.DecisionTreeClassifier(random_state=42)
self.logger.info(f"Starting test of LCCV on {learner.__class__.__name__}")
_, _, res, _ = lccv.lccv(learner, features, labels, r=-0.5, base=2, min_exp=4, enforce_all_anchor_evaluations=True, logger=self.lccv_logger)
self.assertSetEqual(set(res.keys()), {16, 32, 64})
for key, val in res.items():
self.logger.info(f"Key: {key}, Val: {val}")
self.assertFalse(np.isnan(val['conf'][0]))
self.assertFalse(np.isnan(val['conf'][1]))
self.logger.info(f"Finished test of LCCV on {learner.__class__.__name__}")
"""
This test checks whether the results are equivalent to a 5CV or 10CV
"""
@parameterized.expand(list(it.product(preprocessors, learners, [1464])))#[61])))
def test_lccv_runtime_and_result_bare(self, preprocessor, learner, dataset):
X, y = get_dataset(dataset)
self.logger.info(f"Start Test LCCV when running with r=1.0 on dataset {dataset}")
# configure pipeline
steps = []
if preprocessor is not None:
pp = preprocessor()
if "copy" in pp.get_params().keys():
pp = preprocessor(copy=False)
steps.append(("pp", pp))
learner_inst = learner()
if "warm_start" in learner_inst.get_params().keys(): # active warm starting if available, because this can cause problems.
learner_inst = learner(warm_start=True)
steps.append(("predictor", learner_inst))
pl = sklearn.pipeline.Pipeline(steps)
# do tests
try:
# run 5-fold CV
self.logger.info("Running 5CV")
start = time.time()
score_5cv = 1 - np.mean(sklearn.model_selection.cross_validate(sklearn.base.clone(pl), X, y, cv=5)["test_score"])
end = time.time()
runtime_5cv = end - start
self.logger.info(f"Finished 5CV within {runtime_5cv}s.")
# run 80lccv
self.logger.info("Running 80LCCV")
start = time.time()
score_80lccv = lccv.lccv(sklearn.base.clone(pl), X, y, target_anchor=.8, MAX_EVALUATIONS=5)[0]
end = time.time()
runtime_80lccv = end - start
self.logger.info(f"Finished 80LCCV within {runtime_80lccv}s. Runtime diff was {np.round(runtime_5cv - runtime_80lccv, 1)}s. Performance diff was {np.round(score_5cv - score_80lccv, 2)}.")
# check runtime and result
tol = 0.1#0.05 if dataset != 61 else 0.1
self.assertTrue(runtime_80lccv <= (runtime_5cv + 1), msg=f"Runtime of 80lccv was {runtime_80lccv}, which is more than the {runtime_5cv} of 5CV. Pipeline was {pl} and dataset {dataset}")
self.assertTrue(np.abs(score_5cv - score_80lccv) <= tol, msg=f"Avg Score of 80lccv was {score_80lccv}, which deviates by more than {tol} from the {score_5cv} of 5CV. Pipeline was {pl} and dataset {dataset}")
# run 10-fold CV
self.logger.info("Running 10CV")
start = time.time()
score_10cv = 1 - np.mean(sklearn.model_selection.cross_validate(sklearn.base.clone(pl), X, y, cv=10)["test_score"])
end = time.time()
runtime_10cv = end - start
self.logger.info(f"Finished 10CV within {runtime_10cv}s.")
# run 90lccv
self.logger.info("Running 90LCCV")
start = time.time()
score_90lccv = lccv.lccv(sklearn.base.clone(pl), X, y, target_anchor=.9)[0]
end = time.time()
runtime_90lccv = end - start
self.logger.info(f"Finished 90LCCV within {runtime_90lccv}s. Runtime diff was {np.round(runtime_10cv - runtime_90lccv, 1)}s. Performance diff was {np.round(score_10cv - score_90lccv, 2)}.")
# check runtime and result
tol = .1# 0.05 if dataset != 61 else 0.1
self.assertTrue(runtime_90lccv <= runtime_10cv + 1, msg=f"Runtime of 90lccv was {runtime_90lccv}, which is more than the {runtime_10cv} of 10CV. Pipeline was {pl} and dataset {dataset}")
self.assertTrue(np.abs(score_10cv - score_90lccv) <= tol, msg=f"Avg Score of 90lccv was {score_90lccv}, which deviates by more than {tol} from the {score_10cv} of 10CV. Pipeline was {pl} and dataset {dataset}")
except ValueError:
print("Skipping case in which training is not possible!")
"""
This test checks whether the results are equivalent to a 5CV or 10CV
"""
@parameterized.expand(list(it.product(preprocessors, learners, [(61, 0.0), (1485, 0.2)])))
def test_lccv_runtime_and_result_applied(self, preprocessor, learner, dataset):
X, y = get_dataset(dataset[0])
r = dataset[1]
self.logger.info(f"Start Test LCCV when running with r={r} on dataset {dataset[0]} wither preprocessor {preprocessor} and learner {learner}")
# configure pipeline
steps = []
if preprocessor is not None:
pp = preprocessor()
if "copy" in pp.get_params().keys():
pp = preprocessor(copy=False)
steps.append(("pp", pp))
learner_inst = learner()
if "warm_start" in learner_inst.get_params().keys(): # active warm starting if available, because this can cause problems.
learner_inst = learner(warm_start=True)
steps.append(("predictor", learner_inst))
pl = sklearn.pipeline.Pipeline(steps)
# do tests
try:
# run 5-fold CV
self.logger.info("Running 5CV")
start = time.time()
score_5cv = 1 - np.mean(sklearn.model_selection.cross_validate(sklearn.base.clone(pl), X, y, cv=5)["test_score"])
end = time.time()
runtime_5cv = end - start
self.logger.info(f"Finished 5CV within {round(runtime_5cv, 2)}s with score {np.round(score_5cv, 3)}.")
# run 80lccv
self.logger.info("Running 80LCCV")
start = time.time()
score_80lccv = lccv.lccv(sklearn.base.clone(pl), X, y, r=r, target_anchor=.8, MAX_EVALUATIONS=5)[0]
end = time.time()
runtime_80lccv = end - start
self.logger.info(f"Finished 80LCCV within {round(runtime_80lccv, 2)}s with score {np.round(score_80lccv, 3)}. Runtime diff was {np.round(runtime_5cv - runtime_80lccv, 1)}s. Performance diff was {np.round(score_5cv - score_80lccv, 2)}.")
# check runtime and result
tol = 0.05 if dataset != 61 else 0.1
self.assertTrue(runtime_80lccv <= 2 * (runtime_5cv + 1), msg=f"Runtime of 80lccv was {runtime_80lccv}, which is more than the {runtime_5cv} of 5CV. Pipeline was {pl}")
if np.isnan(score_80lccv):
self.assertTrue(score_5cv > r, msg=f"80lccv returned nan even though the {score_5cv} of 10CV is not worse than the threshold {r}. Pipeline was {pl} and dataset {dataset}")
else:
self.assertTrue(np.abs(score_5cv - score_80lccv) <= tol, msg=f"Avg Score of 80lccv was {score_80lccv}, which deviates by more than {tol} from the {score_5cv} of 5CV. Pipeline was {pl} and dataset {dataset}")
# run 10-fold CV
self.logger.info("Running 10CV")
start = time.time()
score_10cv = 1 - np.mean(sklearn.model_selection.cross_validate(sklearn.base.clone(pl), X, y, cv=10)["test_score"])
end = time.time()
runtime_10cv = end - start
self.logger.info(f"Finished 10CV within {round(runtime_10cv, 2)}s with score {np.round(score_10cv, 3)}")
# run 90lccv
self.logger.info("Running 90LCCV")
start = time.time()
score_90lccv = lccv.lccv(sklearn.base.clone(pl), X, y, r=r, target_anchor=.9)[0]
end = time.time()
runtime_90lccv = end - start
self.logger.info(f"Finished 90LCCV within {round(runtime_90lccv, 2)}s with score {np.round(score_90lccv, 3)}. Runtime diff was {np.round(runtime_10cv - runtime_90lccv, 1)}s. Performance diff was {np.round(score_10cv - score_90lccv, 2)}.")
# check runtime and result
tol = 0.05 if dataset != 61 else 0.1
self.assertTrue(runtime_90lccv <= 2 * (runtime_10cv + 1), msg=f"Runtime of 90lccv was {runtime_90lccv}, which is more than the {runtime_10cv} of 10CV. Pipeline was {pl}")
if np.isnan(score_90lccv):
self.assertTrue(score_10cv > r, msg=f"90lccv returned nan even though the {score_10cv} of 10CV is not worse than the threshold {r}. Pipeline was {pl} and dataset {dataset}")
else:
self.assertTrue(np.abs(score_10cv - score_90lccv) <= tol, msg=f"Avg Score of 90lccv was {score_90lccv}, which deviates by more than {tol} from the {score_10cv} of 10CV. Pipeline was {pl} and dataset {dataset}")
except ValueError:
print("Skipping case in which training is not possible!")
"""
This checks whether LCCV respects the timeout
"""
@parameterized.expand(list(it.product(preprocessors, learners, [(61, 0.0), (1485, 0.2)])))
def test_lccv_respects_timeouts(self, preprocessor, learner, dataset):
X, y = get_dataset(dataset[0])
r = dataset[1]
self.logger.info(f"Start Test LCCV when running with r={r} on dataset {dataset[0]} wither preprocessor {preprocessor} and learner {learner}")
# configure pipeline
steps = []
if preprocessor is not None:
pp = preprocessor()
if "copy" in pp.get_params().keys():
pp = preprocessor(copy=False)
steps.append(("pp", pp))
learner_inst = learner()
if "warm_start" in learner_inst.get_params().keys(): # active warm starting if available, because this can cause problems.
learner_inst = learner(warm_start=True)
steps.append(("predictor", learner_inst))
pl = sklearn.pipeline.Pipeline(steps)
timeout = 1.5
# do tests
try:
# run 80lccv
self.logger.info("Running 80LCCV")
start = time.time()
score_80lccv = lccv.lccv(sklearn.base.clone(pl), X, y, r=r, target_anchor=.8, MAX_EVALUATIONS=5, timeout=timeout)[0]
end = time.time()
runtime_80lccv = end - start
self.assertTrue(runtime_80lccv <= timeout, msg=f"Permitted runtime exceeded. Permitted was {timeout}s but true runtime was {runtime_80lccv}")
except ValueError:
print("Skipping case in which training is not possible!") | 53.810976 | 791 | 0.634051 | 16,351 | 0.926402 | 0 | 0 | 9,883 | 0.559943 | 0 | 0 | 5,135 | 0.290935 |
35a09d34a43d1ae1af511bf3da1674e129960148 | 4,099 | py | Python | plaso/formatters/firefox.py | cvandeplas/plaso | b625a2c267ed09505cfac84c9593d8c0922852b1 | [
"Apache-2.0"
] | 3 | 2016-03-11T02:47:08.000Z | 2016-12-24T03:19:27.000Z | plaso/formatters/firefox.py | cvandeplas/plaso | b625a2c267ed09505cfac84c9593d8c0922852b1 | [
"Apache-2.0"
] | null | null | null | plaso/formatters/firefox.py | cvandeplas/plaso | b625a2c267ed09505cfac84c9593d8c0922852b1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a formatter for the Mozilla Firefox history."""
from plaso.lib import errors
from plaso.formatters import interface
class FirefoxBookmarkAnnotationFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Firefox places.sqlite bookmark annotation."""
DATA_TYPE = 'firefox:places:bookmark_annotation'
FORMAT_STRING_PIECES = [
u'Bookmark Annotation: [{content}]',
u'to bookmark [{title}]',
u'({url})']
FORMAT_STRING_SHORT_PIECES = [u'Bookmark Annotation: {title}']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxBookmarkFolderFormatter(interface.EventFormatter):
"""Formatter for a Firefox places.sqlite bookmark folder."""
DATA_TYPE = 'firefox:places:bookmark_folder'
FORMAT_STRING = u'{title}'
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxBookmarkFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Firefox places.sqlite URL bookmark."""
DATA_TYPE = 'firefox:places:bookmark'
FORMAT_STRING_PIECES = [
u'Bookmark {type}',
u'{title}',
u'({url})',
u'[{places_title}]',
u'visit count {visit_count}']
FORMAT_STRING_SHORT_PIECES = [
u'Bookmarked {title}',
u'({url})']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxPageVisitFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Firefox places.sqlite page visited."""
DATA_TYPE = 'firefox:places:page_visited'
# Transitions defined in the source file:
# src/toolkit/components/places/nsINavHistoryService.idl
# Also contains further explanation into what each of these settings mean.
_URL_TRANSITIONS = {
1: 'LINK',
2: 'TYPED',
3: 'BOOKMARK',
4: 'EMBED',
5: 'REDIRECT_PERMANENT',
6: 'REDIRECT_TEMPORARY',
7: 'DOWNLOAD',
8: 'FRAMED_LINK',
}
_URL_TRANSITIONS.setdefault('UNKOWN')
# TODO: Make extra conditional formatting.
FORMAT_STRING_PIECES = [
u'{url}',
u'({title})',
u'[count: {visit_count}]',
u'Host: {host}',
u'{extra_string}']
FORMAT_STRING_SHORT_PIECES = [u'URL: {url}']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
def GetMessages(self, event_object):
"""Return the message strings."""
if self.DATA_TYPE != event_object.data_type:
raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(
event_object.data_type))
transition = self._URL_TRANSITIONS.get(
getattr(event_object, 'visit_type', 0), None)
if transition:
transition_str = u'Transition: {0!s}'.format(transition)
if hasattr(event_object, 'extra'):
if transition:
event_object.extra.append(transition_str)
event_object.extra_string = u' '.join(event_object.extra)
elif transition:
event_object.extra_string = transition_str
return super(FirefoxPageVisitFormatter, self).GetMessages(event_object)
class FirefoxDowloadFormatter(interface.EventFormatter):
"""Formatter for a Firefox dowloads.sqlite dowload."""
DATA_TYPE = 'firefox:downloads:download'
FORMAT_STRING = (u'{url} ({full_path}). Received: {received_bytes} bytes '
u'out of: {total_bytes} bytes.')
FORMAT_STRING_SHORT = u'{full_path} downloaded ({received_bytes} bytes)'
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
| 29.919708 | 78 | 0.704562 | 3,247 | 0.792144 | 0 | 0 | 0 | 0 | 0 | 0 | 2,200 | 0.536716 |
35a1f18f3c8d17f340bbcfd1a00b92ff04a898ed | 496 | py | Python | LeetCode/Unique Email Addresses.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 13 | 2021-09-02T07:30:02.000Z | 2022-03-22T19:32:03.000Z | LeetCode/Unique Email Addresses.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | null | null | null | LeetCode/Unique Email Addresses.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 3 | 2021-08-24T16:06:22.000Z | 2021-09-17T15:39:53.000Z | class Solution:
def getFormattedEMail(self, email):
userName, domain = email.split('@')
if '+' in userName:
userName = userName.split('+')[0]
if '.' in userName:
userName = ''.join(userName.split('.'))
return userName + '@' + domain
def numUniqueEmails(self, emails: List[str]) -> int:
emailsSet = set()
for email in emails:
emailsSet.add(self.getFormattedEMail(email))
return len(emailsSet) | 35.428571 | 56 | 0.5625 | 496 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.040323 |
35a2f5e7894ff096b0490a08a6c06f504ef9c5b2 | 1,175 | py | Python | algorithms/prims.py | karensuzue/Maze | a9c613323d24d115279b1892a33036acef7b715e | [
"MIT"
] | null | null | null | algorithms/prims.py | karensuzue/Maze | a9c613323d24d115279b1892a33036acef7b715e | [
"MIT"
] | null | null | null | algorithms/prims.py | karensuzue/Maze | a9c613323d24d115279b1892a33036acef7b715e | [
"MIT"
] | null | null | null | import random
from grid import Grid
from grid import Cell
class Prim():
def grid_to_list(self, grid):
"""
Place all cells from grid matrix into a list
:param grid: a Grid object
:return: a list of all cells contained in the grid
"""
list = []
for r in range(grid.rows):
for c in range(grid.cols):
list.append(grid.grid[r][c])
return list
def generate(self, grid):
"""
Generate a maze given a grid of cells.
:param grid: a Grid object
:return: a generated maze
"""
grid_list = self.grid_to_list(grid)
first = grid.random_cell()
path = [first]
grid_list.remove(first)
# frontier = []
while len(grid_list) > 0:
cell = random.choice(path)
neighbors = grid.get_neighbors(cell)
# frontier.extend(neighbors)
neighbor = random.choice(neighbors)
if neighbor not in path:
cell.link(neighbor)
path.append(neighbor)
# frontier.remove(neighbor)
grid_list.remove(neighbor) | 28.658537 | 58 | 0.544681 | 1,116 | 0.949787 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.308936 |
35a50fd2c3fd502485183ee67073c6d3b767aa38 | 14,065 | py | Python | secfs/fs.py | quinnmagendanz/vFileSystem | 9a3c4b1d27a6325325a4f048f6a8fe93e5d871bf | [
"MIT"
] | null | null | null | secfs/fs.py | quinnmagendanz/vFileSystem | 9a3c4b1d27a6325325a4f048f6a8fe93e5d871bf | [
"MIT"
] | null | null | null | secfs/fs.py | quinnmagendanz/vFileSystem | 9a3c4b1d27a6325325a4f048f6a8fe93e5d871bf | [
"MIT"
] | null | null | null | # This file implements file system operations at the level of inodes.
import time
import secfs.crypto
import secfs.tables
import secfs.access
import secfs.store.tree
import secfs.store.block
from secfs.store.inode import Inode
from secfs.store.tree import Directory
from cryptography.fernet import Fernet
from secfs.types import I, Principal, User, Group
# usermap contains a map from user ID to their public key according to /.users
usermap = {}
# groupmap contains a map from group ID to the list of members according to /.groups
groupmap = {}
# owner is the user principal that owns the current share
owner = None
# root_i is the i of the root of the current share
root_i = None
def get_inode(i):
"""
Shortcut for retrieving an inode given its i.
"""
ihash = secfs.tables.resolve(i)
if ihash == None:
raise LookupError("asked to resolve i {}, but i does not exist".format(i))
return Inode.load(ihash)
def init(owner, users, groups):
"""
init will initialize a new share root as the given user principal. This
includes setting up . and .. in the root directory, as well as adding the
.users and .groups files that list trusted user public keys and group
memberships respectively. This function will only allocate the share's
root, but not map it to any particular share at the server. The new root's
i is returned so that this can be done by the caller.
"""
if not isinstance(owner, User):
raise TypeError("{} is not a User, is a {}".format(owner, type(owner)))
node = Inode()
node.kind = 0
node.ex = True
node.ctime = time.time()
node.mtime = node.ctime
ihash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
root_i = secfs.tables.modmap(owner, I(owner), ihash)
if root_i == None:
raise RuntimeError
new_ihash = secfs.store.tree.add(root_i, b'.', root_i)
secfs.tables.modmap(owner, root_i, new_ihash)
new_ihash = secfs.store.tree.add(root_i, b'..', root_i) # TODO(eforde): why would .. be mapped to root_i?
secfs.tables.modmap(owner, root_i, new_ihash)
print("CREATED ROOT AT", new_ihash)
init = {
b".users": users,
b".groups": groups,
}
import pickle
for fn, c in init.items():
bts = pickle.dumps(c)
node = Inode()
node.kind = 1
node.size = len(bts)
node.mtime = node.ctime
node.ctime = time.time()
node.blocks = [secfs.store.block.store(bts, None)] # don't encrypt init
ihash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
i = secfs.tables.modmap(owner, I(owner), ihash)
link(owner, i, root_i, fn)
return root_i
def _create(parent_i, name, create_as, create_for, isdir, encrypt):
"""
_create allocates a new file, and links it into the directory at parent_i
with the given name. The new file is owned by create_for, but is created
using the credentials of create_as. This distinction is necessary as a user
principal is needed for the final i when creating a file as a group.
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(create_as, User):
raise TypeError("{} is not a User, is a {}".format(create_as, type(create_as)))
if not isinstance(create_for, Principal):
raise TypeError("{} is not a Principal, is a {}".format(create_for, type(create_for)))
assert create_as.is_user() # only users can create
assert create_as == create_for or create_for.is_group() # create for yourself or for a group
if create_for.is_group() and create_for not in groupmap:
raise PermissionError("cannot create for unknown group {}".format(create_for))
# This check is performed by link() below, but better to fail fast
if not secfs.access.can_write(create_as, parent_i):
if parent_i.p.is_group():
raise PermissionError("cannot create in group-writeable directory {0} as {1}; user is not in group".format(parent_i, create_as))
else:
raise PermissionError("cannot create in user-writeable directory {0} as {1}".format(parent_i, create_as))
# TODO(eforde): encrypt if parent directory is encrypted
# encrypt = encrypt or parent_i.encrypted
node = Inode()
node.encrypted = 1 if encrypt else 0
node.ctime = time.time()
node.mtime = node.ctime
node.kind = 0 if isdir else 1
node.ex = isdir
# store the newly created inode on the server
new_hash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
# map the block to an i owned by create_for, created with credentials of create_as
new_i = secfs.tables.modmap(create_as, I(create_for), new_hash)
if isdir:
# create . and .. if this is a directory
table_key = secfs.tables.get_itable_key(create_for, create_as)
new_ihash = secfs.store.tree.add(new_i, b'.', new_i, table_key)
secfs.tables.modmap(create_as, new_i, new_ihash)
new_ihash = secfs.store.tree.add(new_i, b'..', parent_i, table_key)
secfs.tables.modmap(create_as, new_i, new_ihash)
# link the new i into the directoy at parent_i with the given name
link(create_as, new_i, parent_i, name)
return new_i
def create(parent_i, name, create_as, create_for, encrypt):
"""
Create a new file.
See secfs.fs._create
"""
return _create(parent_i, name, create_as, create_for, False, encrypt)
def mkdir(parent_i, name, create_as, create_for, encrypt):
"""
Create a new directory.
See secfs.fs._create
"""
return _create(parent_i, name, create_as, create_for, True, encrypt)
def read(read_as, i, off, size):
"""
Read reads [off:off+size] bytes from the file at i.
"""
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
if not isinstance(read_as, User):
raise TypeError("{} is not a User, is a {}".format(read_as, type(read_as)))
if not secfs.access.can_read(read_as, i):
if i.p.is_group():
raise PermissionError("cannot read from group-readable file {0} as {1}; user is not in group".format(i, read_as))
else:
raise PermissionError("cannot read from user-readable file {0} as {1}".format(i, read_as))
node = get_inode(i)
table_key = secfs.tables.get_itable_key(i.p, read_as)
return node.read(table_key)[off:off+size]
def write(write_as, i, off, buf):
"""
Write writes the given bytes into the file at i at the given offset.
"""
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
if not isinstance(write_as, User):
raise TypeError("{} is not a User, is a {}".format(write_as, type(write_as)))
if not secfs.access.can_write(write_as, i):
if i.p.is_group():
raise PermissionError("cannot write to group-owned file {0} as {1}; user is not in group".format(i, write_as))
else:
raise PermissionError("cannot write to user-owned file {0} as {1}".format(i, write_as))
node = get_inode(i)
table_key = secfs.tables.get_itable_key(i.p, write_as)
# TODO: this is obviously stupid -- should not get rid of blocks that haven't changed
bts = node.read(table_key)
# write also allows us to extend a file
if off + len(buf) > len(bts):
bts = bts[:off] + buf
else:
bts = bts[:off] + buf + bts[off+len(buf):]
# update the inode
node.blocks = [secfs.store.block.store(bts, table_key if node.encrypted else None)]
node.mtime = time.time()
node.size = len(bts)
# put new hash in tree
new_hash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
secfs.tables.modmap(write_as, i, new_hash)
return len(buf)
def rename(parent_i_old, name_old, parent_i_new, name_new, rename_as):
"""
Rename renames the given file in parent_i_old into parent_i_new as name_new
"""
if not isinstance(parent_i_old, I):
raise TypeError("{} is not an I, is a {}".format(parent_i_old, type(parent_i_old)))
if not isinstance(parent_i_new, I):
raise TypeError("{} is not an I, is a {}".format(parent_i_new, type(parent_i_new)))
if not isinstance(rename_as, User):
raise TypeError("{} is not a User, is a {}".format(rename_as, type(rename_as)))
if not secfs.access.can_write(rename_as, parent_i_new):
raise PermissionError("no permission to rename {} to {} in new directory {}".format(name_old, name_new, parent_i_new))
# Fetch i we're moving
i = secfs.store.tree.find_under(parent_i_old, name_old, rename_as)
# Remove i from old directory
table_key = secfs.tables.get_itable_key(parent_i_old.p, rename_as)
new_ihash = secfs.store.tree.remove(parent_i_old, name_old, table_key)
secfs.tables.modmap(rename_as, parent_i_old, new_ihash)
# Add i to new directory
table_key = secfs.tables.get_itable_key(parent_i_new.p, rename_as)
new_ihash = secfs.store.tree.add(parent_i_new, name_new, i, table_key)
secfs.tables.modmap(rename_as, parent_i_new, new_ihash)
return i
def unlink(parent_i, i, name, remove_as):
"""
Unlink removes the given file from the parent_inode
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(remove_as, User):
raise TypeError("{} is not a User, is a {}".format(remove_as, type(remove_as)))
assert remove_as.is_user() # only users can create
if not secfs.access.can_write(remove_as, i):
if i.p.is_group():
raise PermissionError("cannot remove group-owned file {0} as {1}; user is not in group".format(i, remove_as))
else:
raise PermissionError("cannot remove user-owned file {0} as {1}".format(i, remove_as))
table_key = secfs.tables.get_itable_key(i.p, remove_as)
new_ihash = secfs.store.tree.remove(parent_i, name, table_key)
secfs.tables.modmap(remove_as, parent_i, new_ihash)
#TODO(magendanz) remove filr and inode from server using secfs.store.blocks
secfs.tables.remove(i)
def rmdir(parent_i, i, name, remove_as):
"""
rmdir removes the given directory from the parent_inode as well as all subfiles
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(remove_as, User):
raise TypeError("{} is not a User, is a {}".format(remove_as, type(remove_as)))
assert remove_as.is_user() # only users can create
if not secfs.access.can_write(remove_as, i):
if i.p.is_group():
raise PermissionError("cannot remove group-owned file {0} as {1}; user is not in group".format(i, remove_as))
else:
raise PermissionError("cannot remove user-owned file {0} as {1}".format(i, remove_as))
print("Permissions: {} can edit {} owned file".format(remove_as, i))
table_key = secfs.tables.get_itable_key(i.p, remove_as)
# recursive rm of all subfiles/subdirs
inode = get_inode(i)
sub_is = []
# pass to unlink if not dir
if inode.kind == 0:
dr = Directory(i, table_key)
subfiles = [(sub_name, sub_i) for sub_name, sub_i in dr.children if ((sub_name != b'.') and (sub_name != b'..'))]
print("Subfiles to try and rm {}".format(subfiles))
# confirm that can delete all subfiles/subdirs before starting to delete
for child_name, child_i in subfiles:
print("Checking permissions. {} can edit {}".format(remove_as, child_i))
if not secfs.access.can_write(remove_as, child_i):
raise PermissionError("cannot remove group-owned file {0} as {1}; user is not in group".format(child_i, remove_as))
for child_name, child_i in subfiles:
print("Recusing to delete child {}".format(child_name))
sub_is += rmdir(i, child_i, child_name, remove_as)
# TODO(magendanz) do we need to delete . and ..?
new_ihash = secfs.store.tree.remove(parent_i, name, table_key)
#if parent_i.p != remove_as:
# p_i = Group.(ctx.gid)
secfs.tables.modmap(remove_as, parent_i, new_ihash)
#TODO(magendanz) remove filr and inode from server using secfs.store.blocks
secfs.tables.remove(i)
sub_is.append(i)
return sub_is
else:
unlink(parent_i, i, name, remove_as)
return i
def readdir(i, off, read_as):
"""
Return a list of is in the directory at i.
Each returned list item is a tuple of an i and an index. The index can be
used to request a suffix of the list at a later time.
"""
table_key = secfs.tables.get_itable_key(i.p, read_as)
dr = Directory(i, table_key)
if dr == None:
return None
return [(i, index+1) for index, i in enumerate(dr.children) if index >= off]
def link(link_as, i, parent_i, name):
"""
Adds the given i into the given parent directory under the given name.
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
if not isinstance(link_as, User):
raise TypeError("{} is not a User, is a {}".format(link_as, type(link_as)))
if not secfs.access.can_write(link_as, parent_i):
if parent_i.p.is_group():
raise PermissionError("cannot create in group-writeable directory {0} as {1}; user is not in group".format(parent_i, link_as))
else:
raise PermissionError("cannot create in user-writeable directory {0} as {1}".format(parent_i, link_as))
table_key = secfs.tables.get_itable_key(parent_i.p, link_as)
parent_ihash = secfs.store.tree.add(parent_i, name, i, table_key)
secfs.tables.modmap(link_as, parent_i, parent_ihash)
| 40.650289 | 140 | 0.664842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,838 | 0.343974 |
35a5e76930a9f052656cb9ba7d71587f5d8d63d6 | 3,355 | py | Python | neurons/feynman/visualization.py | unconst/SimpleWord2Vec | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | 9 | 2019-12-18T10:20:15.000Z | 2021-03-18T00:07:28.000Z | neurons/feynman/visualization.py | unconst/SimpleWord2Vec | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | 5 | 2020-02-12T02:21:15.000Z | 2022-02-10T00:25:28.000Z | neurons/feynman/visualization.py | unconst/BitTensor | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | null | null | null | from __future__ import division
import io
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy
import os
import tensorflow as tf
def figure_to_buff(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
return buf
def generate_edge_weight_buffer(nodes):
b_nodes = list(nodes.values())
print(b_nodes)
G = nx.DiGraph()
total_stake = sum([node.stake for node in b_nodes])
# Build node sizes in proportion to stake held within the graph.
node_sizes = []
node_labels = {}
for node in b_nodes:
G.add_node(node.identity)
node_sizes.append(25 + 500 * (node.stake / total_stake))
node_labels[node.identity] = str(node.identity)
# Edge colors (alphas and weight) reflect attribution wieghts of each
# connection.
edge_colors = {}
edge_labels = {}
for node in b_nodes:
for edge in node.edges:
if (node.identity, edge['first']) not in edge_labels:
G.add_edge(node.identity, edge['first'])
edge_colors[(node.identity,
edge['first'])] = float(edge['second'])
if node.identity != edge['first']:
edge_labels[(
node.identity,
edge['first'])] = "%.3f" % float(edge['second'])
else:
edge_labels[(node.identity, edge['first'])] = ""
# Set edge weights.
for u, v, d in G.edges(data=True):
d['weight'] = edge_colors[(u, v)]
edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
# Clear Matplot lib buffer and create new figure.
plt.cla()
plt.clf()
figure = plt.figure(figsize=(20, 15))
pos = nx.layout.circular_layout(G)
nodes = nx.draw_networkx_nodes(G,
pos,
node_size=node_sizes,
node_color='blue')
edges = nx.draw_networkx_edges(G,
pos,
arrowstyle='->',
arrowsize=15,
edge_color=weights,
edge_cmap=plt.cm.Blues,
width=5)
edge_labels = nx.draw_networkx_edge_labels(G,
pos,
edge_labels=edge_labels,
with_labels=True,
label_pos=0.3)
for node in b_nodes:
pos[node.identity] = pos[node.identity] + numpy.array([0, 0.1])
labels = nx.draw_networkx_labels(G, pos, node_labels)
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
return buf
| 34.234694 | 80 | 0.538897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 707 | 0.21073 |
35a69bb4fad1c93931ffcca42ca3cfdb048e79fa | 160 | py | Python | signalwire/relay/messaging/send_result.py | ramarketing/signalwire-python | c0663bdd0454faaa39f42af7c936cea1d43e1842 | [
"MIT"
] | 23 | 2018-12-19T14:48:18.000Z | 2022-01-11T03:58:36.000Z | signalwire/relay/messaging/send_result.py | ramarketing/signalwire-python | c0663bdd0454faaa39f42af7c936cea1d43e1842 | [
"MIT"
] | 13 | 2018-10-17T12:57:54.000Z | 2021-09-01T21:46:01.000Z | signalwire/relay/messaging/send_result.py | ramarketing/signalwire-python | c0663bdd0454faaa39f42af7c936cea1d43e1842 | [
"MIT"
] | 12 | 2020-01-21T14:29:43.000Z | 2022-01-11T07:48:06.000Z | class SendResult:
def __init__(self, result={}):
self.successful = result.get('code', None) == '200'
self.message_id = result.get('message_id', None)
| 32 | 55 | 0.66875 | 159 | 0.99375 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.14375 |
35a7208f14977580e5b6c2241d160acef1319811 | 610 | py | Python | harvester/letsdoit/discovery/sublist3r.py | Average-stu/osint | 21c3f2b5145fb82156a317f7728498f368f6f6bb | [
"MIT"
] | 2 | 2021-02-13T13:46:21.000Z | 2021-02-13T13:53:46.000Z | harvester/letsdoit/discovery/sublist3r.py | Average-stu/osint | 21c3f2b5145fb82156a317f7728498f368f6f6bb | [
"MIT"
] | null | null | null | harvester/letsdoit/discovery/sublist3r.py | Average-stu/osint | 21c3f2b5145fb82156a317f7728498f368f6f6bb | [
"MIT"
] | null | null | null | from typing import Type
from letsdoit.lib.core import *
class SearchSublist3r:
def __init__(self, word):
self.word = word
self.totalhosts = list
self.proxy = False
async def do_search(self):
url = f'https://api.sublist3r.com/search.php?domain={self.word}'
response = await AsyncFetcher.fetch_all([url], json=True, proxy=self.proxy)
self.totalhosts: list = response[0]
async def get_hostnames(self) -> Type[list]:
return self.totalhosts
async def process(self, proxy=False):
self.proxy = proxy
await self.do_search()
| 26.521739 | 83 | 0.64918 | 551 | 0.903279 | 0 | 0 | 0 | 0 | 397 | 0.65082 | 58 | 0.095082 |
35a81e5b99ffdc95fe04f5cfea1745dceab51074 | 959 | py | Python | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/eieio/data_messages/eieio_16bit_with_payload/eieio_16bit_with_payload_timed_data_message.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | 2 | 2020-11-01T13:22:11.000Z | 2020-11-01T13:22:20.000Z | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/eieio/data_messages/eieio_16bit_with_payload/eieio_16bit_with_payload_timed_data_message.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/eieio/data_messages/eieio_16bit_with_payload/eieio_16bit_with_payload_timed_data_message.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | from spinnman.messages.eieio.eieio_type import EIEIOType
from spinnman.messages.eieio.data_messages.eieio_with_payload_data_message\
import EIEIOWithPayloadDataMessage
from spinnman.messages.eieio.data_messages.eieio_data_header\
import EIEIODataHeader
from spinnman.messages.eieio.data_messages.eieio_data_message\
import EIEIODataMessage
class EIEIO16BitWithPayloadTimedDataMessage(EIEIOWithPayloadDataMessage):
""" An EIEIO packet containing 16 bit events and payload where the\
payloads represent a timestamp for the events
"""
def __init__(self, count=0, data=None, offset=0):
EIEIOWithPayloadDataMessage.__init__(
self, EIEIODataHeader(EIEIOType.KEY_PAYLOAD_16_BIT, is_time=True,
count=count),
data, offset)
@staticmethod
def get_min_packet_length():
return EIEIODataMessage.min_packet_length(
EIEIOType.KEY_PAYLOAD_16_BIT)
| 38.36 | 77 | 0.746611 | 604 | 0.629823 | 0 | 0 | 139 | 0.144943 | 0 | 0 | 128 | 0.133472 |
35a90826145b4db8ac5f1bb20d348f0f7947e310 | 4,505 | py | Python | reliability/tasks/Apps.py | RH-ematysek/svt | 3c4f99d453c6956b434f1a90e0658a95f3fda0a4 | [
"Apache-2.0"
] | 115 | 2016-07-15T12:24:42.000Z | 2022-02-21T20:40:09.000Z | reliability/tasks/Apps.py | RH-ematysek/svt | 3c4f99d453c6956b434f1a90e0658a95f3fda0a4 | [
"Apache-2.0"
] | 452 | 2016-05-19T13:55:19.000Z | 2022-03-24T11:25:20.000Z | reliability/tasks/Apps.py | RH-ematysek/svt | 3c4f99d453c6956b434f1a90e0658a95f3fda0a4 | [
"Apache-2.0"
] | 112 | 2016-05-16T08:48:55.000Z | 2022-01-12T13:13:37.000Z | from .GlobalData import global_data
from .utils.oc import oc
import requests
import time
import logging
class App:
def __init__(self, deployment, project, template, build_config,route=""):
self.project = project
self.template = template
self.deployment = deployment
self.build_config = build_config
self.route = route
self.logger = logging.getLogger('reliability')
def build(self, kubeconfig):
(result, rc) = oc("start-build -n " + self.project + " " + self.build_config, kubeconfig)
if rc != 0:
self.logger.error("build_app: Failed to create app " + self.deployment + " in project " + self.project)
return "App build failed for build config : " + self.build_config
else:
with global_data.builds_lock:
global_data.total_build_count += 1
return "App build succeeded for build config : " + self.build_config
def visit(self):
visit_success = False
try:
r = requests.get("http://" + self.route + "/")
self.logger.info(str(r.status_code) + ": visit: " + self.route)
if r.status_code == 200:
visit_success = True
except Exception as e :
self.logger.error(f"visit: {self.route} Exception {e}")
return visit_success
def scale_up(self, kubeconfig):
(result, rc) = oc("scale --replicas=2 -n " + self.project + " dc/" + self.deployment, kubeconfig)
if rc !=0 :
self.logger.error("scale_up: Failed to scale up " + self.project + "." + self.deployment)
return "App scale up failed for deployment : " + self.deployment
else:
return "App scale up succeeded for deployment : " + self.deployment
def scale_down(self, kubeconfig):
(result, rc) = oc("scale --replicas=1 -n " + self.project + " dc/" + self.deployment, kubeconfig)
if rc !=0 :
self.logger.error("scale_down: Failed to scale down " + self.project + "." + self.deployment)
return "App scale down failed for deployment : " + self.deployment
else:
return "App scale down succeeded for deployment : " + self.deployment
class Apps:
def __init__(self):
self.failed_apps = 0
self.apps = {}
self.logger = logging.getLogger('reliability')
def add(self, app, kubeconfig):
(result, rc) = oc("new-app -n " + app.project + " --template " + app.template, kubeconfig)
if rc != 0:
self.logger.error("create_app: Failed to create app " + app.deployment + " in project " + app.project)
return None
else:
self.apps[app.project + "." + app.deployment] = app
(route,rc) = oc("get route --no-headers -n " + app.project + " | awk {'print $2'} | grep " + app.template, kubeconfig)
if rc == 0:
app.route = route.rstrip()
max_tries = 60
current_tries = 0
visit_success = False
while not visit_success and current_tries <= max_tries:
self.logger.info(app.template + " route not available yet, sleeping 10 seconds")
time.sleep(10)
current_tries += 1
visit_success = app.visit()
if not visit_success:
self.failed_apps += 1
self.logger.error("add_app: " + app.project + "." + app.deployment + " did not become available" )
return app
# removing an app just removes the dictionary entry, actual app removed by project deletion
def remove(self,app):
self.apps.pop(app.project + "." + app.deployment)
def simulate(self):
apps = {}
app1 = App('cakephp-mysql-example','cakephp-mysql-example-0','cakephp-mysql-example','cakephp-mysql-example')
self.apps[app1.project + "." + app1.deployment] = app1
# app2 = App('nodejs-mongodb-example','nodejs-mongodb-example-1','nodejs-mongodb-example','nodejs-mongodb-example')
# self.apps[app2.project + "." + app2.deployment] = app2
def init(self):
pass
all_apps=Apps()
if __name__ == "__main__":
app = App("cakephp-mysql-example", "t1", "cakephp-mysql-example","cakephp-mysql-example")
apps = Apps()
# apps.add(app)
# time.sleep(180)
app.visit()
app.scale_up()
time.sleep(30)
app.scale_down()
app.build()
| 40.954545 | 130 | 0.583796 | 4,092 | 0.908324 | 0 | 0 | 0 | 0 | 0 | 0 | 1,251 | 0.277691 |
35a90fa7fe750428ce519a6161eec0ec07750701 | 4,463 | py | Python | recognize.py | aerdem4/rock-paper-scissors | 0e520aa53d8cb146a8ab4f5fd1ebd823ffed3a4b | [
"MIT"
] | null | null | null | recognize.py | aerdem4/rock-paper-scissors | 0e520aa53d8cb146a8ab4f5fd1ebd823ffed3a4b | [
"MIT"
] | 1 | 2020-03-02T13:26:05.000Z | 2020-03-02T13:26:05.000Z | recognize.py | aerdem4/rock-paper-scissors | 0e520aa53d8cb146a8ab4f5fd1ebd823ffed3a4b | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from keras.models import load_model
bg = None
def run_avg(image, acc_weight):
global bg
if bg is None:
bg = image.copy().astype("float")
return
cv2.accumulateWeighted(image, bg, acc_weight)
def segment(image, threshold=10):
global bg
diff = cv2.absdiff(bg.astype("uint8"), image)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]
thresholded = cv2.GaussianBlur(thresholded,(5,5),0)
cnts, _ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 0:
return None
else:
segmented = max(cnts, key=cv2.contourArea)
return (thresholded, segmented)
#-------------------------------------------------------------------------------
# Main function
#-------------------------------------------------------------------------------
if __name__ == "__main__":
model = load_model("model.h5")
# initialize accumulated weight
accumWeight = 0.5
im_count = 0
# get the reference to the webcam
camera = cv2.VideoCapture(0)
x, y, r = 500, 900, 200
# region of interest (ROI) coordinates
top, right, bottom, left = x-r, y-r, x+r, y+r
# initialize num of frames
num_frames = 0
# calibration indicator
calibrated = False
# keep looping, until interrupted
while(True):
# get the current frame
(grabbed, frame) = camera.read()
# flip the frame so that it is not the mirror view
frame = cv2.flip(frame, 1)
# clone the frame
clone = frame.copy()
# get the height and width of the frame
(height, width) = frame.shape[:2]
# get the ROI
roi = frame[top:bottom, right:left]
# convert the roi to grayscale and blur it
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# to get the background, keep looking till a threshold is reached
# so that our weighted average model gets calibrated
if num_frames < 30:
run_avg(gray, accumWeight)
if num_frames == 1:
print "[STATUS] please wait! calibrating..."
elif num_frames == 29:
print "[STATUS] calibration successfull..."
else:
# segment the hand region
hand = segment(gray)
# check whether hand region is segmented
if hand is not None:
# if yes, unpack the thresholded image and
# segmented region
(thresholded, segmented) = hand
epsilon = 0.01*cv2.arcLength(segmented,True)
segmented = cv2.approxPolyDP(segmented,epsilon,True)
# draw the segmented region and display the frame
convex_hull = cv2.convexHull(segmented)
cv2.rectangle(clone, (left, top), (right, bottom), (0,0,0), thickness=cv2.cv.CV_FILLED)
cv2.drawContours(clone, [convex_hull + (right, top)], -1, (255, 0, 0), thickness=cv2.cv.CV_FILLED)
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 255, 255), thickness=cv2.cv.CV_FILLED)
preds = model.predict(cv2.resize(clone[top:bottom, right:left], (64, 64)).reshape((-1, 64, 64, 3)))[0]
index = np.argmax(preds)
text = ["rock", "paper", "scissors"][index] + " " + str(round(preds[index], 2))
cv2.putText(clone, text, (right, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
# draw the segmented hand
cv2.rectangle(clone, (left, top), (right, bottom), (0,255,0), 2)
# increment the number of frames
num_frames += 1
# display the frame with segmented hand
cv2.imshow("Video Feed", clone)
# observe the keypress by the user
keypress = cv2.waitKey(1) & 0xFF
# if the user pressed "q", then stop looping
path = None
if keypress == ord("r"):
path = "r" + str(im_count) + ".png"
elif keypress == ord("p"):
path = "p" + str(im_count) + ".png"
elif keypress == ord("s"):
path = "s" + str(im_count) + ".png"
if path is not None:
cv2.imwrite("data/" + path, clone[top:bottom, right:left])
print "saved", path
im_count += 1
# free up memory
camera.release()
cv2.destroyAllWindows()
| 33.556391 | 118 | 0.561058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,221 | 0.273583 |
35ab2a8e1ddb97b8c757c40675e5df5553af81af | 4,829 | py | Python | src/extract-data.py | SMTG-UCL/singlet-fission-screening | 69c85d672a98420397094e55bef98287879db685 | [
"MIT"
] | null | null | null | src/extract-data.py | SMTG-UCL/singlet-fission-screening | 69c85d672a98420397094e55bef98287879db685 | [
"MIT"
] | null | null | null | src/extract-data.py | SMTG-UCL/singlet-fission-screening | 69c85d672a98420397094e55bef98287879db685 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
import tarfile
import shutil
import tempfile
from contextlib import contextmanager
from pymatgen.io.gaussian import GaussianInput, GaussianOutput
from tinydb import TinyDB
@contextmanager
def cd(run_path, cleanup=lambda: True):
"""
Temporarily work in another directory, creating it if necessary.
"""
home = os.getcwd()
os.chdir(os.path.expanduser(run_path))
try:
yield
finally:
os.chdir(home)
cleanup()
@contextmanager
def tempdir():
"""
Temporarily work in temporary directory, deleting it aftewards.
"""
dirpath = tempfile.mkdtemp()
def cleanup():
shutil.rmtree(dirpath)
with cd(dirpath, cleanup):
yield dirpath
def extract_data_from_tar_file(tar_file):
with tarfile.open(tar_file, 'r:gz') as tar:
tar.extractall()
folder = tar_file.replace('.tar.gz', '')
with cd(folder):
tdout = GaussianOutput('td.log')
td_exit = tdout.read_excitation_energies()
td_triplet = [e for e in td_exit if 'triplet' in e[3].lower()][0][0]
td_singlet = [e for e in td_exit if 'singlet' in e[3].lower()][0][0]
tdaout = GaussianOutput('tda.log')
tda_exit = tdaout.read_excitation_energies()
tda_triplet = [e for e in tda_exit if 'triplet' in e[3].lower()][0][0]
tda_singlet = [e for e in tda_exit if 'singlet' in e[3].lower()][0][0]
nicssout = GaussianOutput('nics_singlet.log')
# occasionally some jobs fail here
if not nicssout.properly_terminated:
return False
nicss_mag = nicssout.read_magnetic_shielding()
nicss_six_ring_above = (abs(nicss_mag[-8]['isotropic']) +
abs(nicss_mag[-6]['isotropic']))/2
nicss_six_ring_below = (abs(nicss_mag[-7]['isotropic']) +
abs(nicss_mag[-5]['isotropic']))/2
nicss_five_ring_above = (abs(nicss_mag[-4]['isotropic']) +
abs(nicss_mag[-2]['isotropic']))/2
nicss_five_ring_below = (abs(nicss_mag[-3]['isotropic']) +
abs(nicss_mag[-1]['isotropic']))/2
nicstout = GaussianOutput('nics_triplet.log')
if not nicstout.properly_terminated:
return False
nicst_mag = nicstout.read_magnetic_shielding()
nicst_six_ring_above = (abs(nicst_mag[-8]['isotropic']) +
abs(nicst_mag[-6]['isotropic']))/2
nicst_six_ring_below = (abs(nicst_mag[-7]['isotropic']) +
abs(nicst_mag[-5]['isotropic']))/2
nicst_five_ring_above = (abs(nicst_mag[-4]['isotropic']) +
abs(nicst_mag[-2]['isotropic']))/2
nicst_five_ring_below = (abs(nicst_mag[-3]['isotropic']) +
abs(nicst_mag[-1]['isotropic']))/2
data = {'td_singlet': td_singlet, 'td_triplet': td_triplet,
'tda_singlet': tda_singlet, 'tda_triplet': tda_triplet,
'nicss_six_ring_above': nicss_six_ring_above,
'nicss_six_ring_below': nicss_six_ring_below,
'nicss_five_ring_above': nicss_five_ring_above,
'nicss_five_ring_below': nicss_five_ring_below,
'nicst_six_ring_above': nicst_six_ring_above,
'nicst_six_ring_below': nicst_six_ring_below,
'nicst_five_ring_above': nicst_five_ring_above,
'nicst_five_ring_below': nicst_five_ring_below}
return data
data_to_write = []
db = TinyDB(os.path.join('..', 'data', 'structures.json'))
systems = list(db.all())
done = 0
for i, system in enumerate(systems):
input_file = GaussianInput.from_dict(system['input'])
directory = input_file.title
tar_name = '{}.tar.gz'.format(directory)
tar_file = os.path.abspath(os.path.join('..', 'data', 'calculations', tar_name))
if os.path.isfile(tar_file):
# extract the data in a temp directory to avoid clobbering any data
with tempdir() as tmp_dir:
shutil.copy(tar_file, tmp_dir)
data = extract_data_from_tar_file(tar_name)
if not data:
print('{} did not finish correctly, skipping'.format(directory))
continue
data.update({'x_sub': system['x_sub'], 'y_sub': system['y_sub'],
'z_sub': system['z_sub'], 'nx': system['nx'],
'ny': system['ny'], 'title': system['title']})
data_to_write.append(data)
if i % 500 == 0:
done += 5
print('{}% completed'.format(done))
print('writing data')
db = TinyDB(os.path.join('..', 'data', 'calculated-data.json'))
db.insert_multiple(data_to_write)
| 37.726563 | 84 | 0.601781 | 0 | 0 | 501 | 0.103748 | 533 | 0.110375 | 0 | 0 | 1,034 | 0.214123 |
35acde1fca854d1c0727ab3afd45a6080b48ff76 | 14,007 | py | Python | src/pyprocessing/shapes.py | agarwalnaimish/pyprocessing | 46695c379c95a7b874f76257c37f44f4b59d39e9 | [
"MIT"
] | 3 | 2018-04-23T17:33:02.000Z | 2021-01-05T04:52:36.000Z | src/pyprocessing/shapes.py | agarwalnaimish/pyprocessing | 46695c379c95a7b874f76257c37f44f4b59d39e9 | [
"MIT"
] | 1 | 2018-04-23T17:37:37.000Z | 2018-04-23T17:37:37.000Z | src/pyprocessing/shapes.py | agarwalnaimish/pyprocessing | 46695c379c95a7b874f76257c37f44f4b59d39e9 | [
"MIT"
] | 2 | 2019-01-16T06:34:52.000Z | 2019-10-15T14:03:07.000Z | # coding: utf-8
# ************************
# SHAPE STUFF
# ************************
import ctypes
from pyglet.gl import *
from .globs import *
from .constants import *
from .pvector import *
from .primitives import _smoothFixHackBegin, _smoothFixHackEnd
from math import *
__all__ = ['beginShape', 'vertex', 'normal', 'bezierVertex', 'endShape',
'bezierDetail', 'bezierPoint', 'bezierTangent', 'bezierSample',
'bezier', 'curveTightness', 'curve', 'curveVertex', 'curvePoint',
'curveDetail',
'curveTangent', 'sphereDetail']
def beginShape(type=None):
"""Begins the drawing of a shape."""
shape.type = type
shape.vtx = [] # vertices drawn with vertex or sampled by curveVertex
shape.bez = [] # bezier vertices drawn with bezierVertex
shape.crv = [] # contents of the last three curveVertex calls
shape.nrm = [] # pairs (vtxindex,normal)
def vertex(x, y, z=0.0, u=0.0, v=0.0):
"""Adds a new vertex to the shape"""
if attrib.texture:
shape.vtx += [(x, y, z, u, v)]
else:
shape.vtx += [(x, y, z)]
def normal(x, y, z):
"""Sets the next vertex's normal"""
shape.nrm += [(len(shape.vtx), (x, y, z))]
def bezierVertex(*coords):
"""Generates a cubic bezier arc. Arguments are of the form
(cx1, cy1, cx2, cy2, x, y) or
(cx1, cy1, cz1, cx2, cy2, cz2, x, y, z), i.e. coordinates
for 3 control points in 2D or 3D. The first control point of the
arc is the last point of the previous arc or the last vertex.
"""
assert (len(coords) in (6, 9))
assert (len(shape.vtx) > 0)
# remember the index where the bezier control points will be stored
shape.bez.append(len(shape.vtx))
if len(coords) == 6:
shape.vtx += [coords[:2] + (0,), coords[2:4] + (0,), coords[4:6] + (0,)]
else:
shape.vtx += [coords[:3], coords[3:6], coords[6:9]]
def endShape(close=False):
"""Does the actual drawing of the shape."""
def computeNormal(p0, p1, p2):
"""Computes a normal for triangle p0-p1-p2."""
return (PVector(p1) - PVector(p0)).cross(PVector(p2) - PVector(p1))
if attrib.texture:
glEnable(GL_TEXTURE_2D)
if type(attrib.texture) == str:
image = pyglet.image.load(attrib.texture)
texture = image.get_texture()
else:
texture = attrib.texture.img.get_texture()
t = texture.tex_coords
glBindTexture(GL_TEXTURE_2D, texture.id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glTranslatef(0.0, t[7], 0.0);
glMatrixMode(GL_MODELVIEW);
if attrib.textureMode == IMAGE:
normx = image.width
normy = image.height
elif attrib.textureMode == NORMALIZED:
normx = normy = 1.0
if shape.type:
glBegin(shape.type)
else:
glBegin(GL_POLYGON)
for v in shape.vtx:
glTexCoord2f(v[3] * t[6] / normx, -v[4] * t[7] / normy)
glVertex3f(*v[:3])
glEnd()
attrib.texture = None
# Draw the interior of the shape
elif attrib.fillColor != None:
glColor4f(*attrib.fillColor)
# establish an initial normal vector
if shape.nrm != []:
inormal, normal = shape.nrm[0]
else:
inormal = len(shape.vtx)
if len(shape.vtx) >= 3:
normal = computeNormal(shape.vtx[0], shape.vtx[1], shape.vtx[2])
else:
normal = [0, 0, 1]
glNormal3f(*normal)
# Draw filled shape
if shape.type == None:
_smoothFixHackBegin()
# first create a tesselator object if none was defined yet
if shape.tess == None:
shape.tess = gl.gluNewTess()
# set up the tesselator callbacks
gluTessCallback(shape.tess, GLU_TESS_VERTEX,
ctypes.cast(glVertex3dv, ctypes.CFUNCTYPE(None)))
gluTessCallback(shape.tess, GLU_TESS_BEGIN,
ctypes.cast(glBegin, ctypes.CFUNCTYPE(None)))
gluTessCallback(shape.tess, GLU_TESS_END,
ctypes.cast(glEnd, ctypes.CFUNCTYPE(None)))
gluTessBeginPolygon(shape.tess, None)
gluTessBeginContour(shape.tess)
i = 0
n = len(shape.vtx)
shape.bez += [n]
b = 0
a = []
while i < n:
if i == shape.bez[b]:
for v in bezierSample(shape.vtx[i - 1], shape.vtx[i],
shape.vtx[i + 1], shape.vtx[i + 2]):
a += [(ctypes.c_double * 3)(*v)]
gluTessVertex(shape.tess, a[-1], a[-1])
b += 1
i += 3
else:
v = shape.vtx[i]
a += [(ctypes.c_double * 3)(*v)]
i += 1
gluTessVertex(shape.tess, a[-1], a[-1])
gluTessEndContour(shape.tess)
gluTessEndPolygon(shape.tess)
_smoothFixHackEnd()
else:
if shape.nrm != []:
# User supplied normals
inrm = 0
inormal, normal = shape.nrm[0]
glBegin(shape.type)
glNormal3f(*normal)
for i, v in enumerate(shape.vtx):
if i == inormal:
# load the next normal before proceeding
glNormal3f(*normal)
inrm += 1
if inrm < len(shape.nrm):
inormal, normal = shape.nrm[inrm]
glVertex3f(*v)
glEnd()
else:
# No normals were specified. Must compute normals on the fly
glBegin(shape.type)
for i, v in enumerate(shape.vtx):
if i + 2 < len(shape.vtx):
if shape.type == QUADS and i % 4 == 0 or \
shape.type == QUAD_STRIP and i % 2 == 0 or \
shape.type == TRIANGLES and i % 3 == 0 or \
shape.type == TRIANGLE_FAN and i > 1 or \
shape.type == TRIANGLE_STRIP and i > 1:
normal = computeNormal(shape.vtx[i],
shape.vtx[i + 1],
shape.vtx[i + 2])
glNormal3f(*normal)
glVertex3f(*v)
glEnd()
# Draw the outline of the shape
if attrib.strokeColor != None:
glColor4f(*attrib.strokeColor)
glPushAttrib(GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glLineWidth(attrib.strokeWeight)
if shape.type == None:
if close:
glBegin(GL_LINE_LOOP)
else:
glBegin(GL_LINE_STRIP)
i = 0
n = len(shape.vtx)
shape.bez += [n]
nextbez = shape.bez.pop(0)
while i < n:
if i == nextbez:
for v in bezierSample(shape.vtx[i - 1], shape.vtx[i],
shape.vtx[i + 1], shape.vtx[i + 2]):
glVertex3f(*v)
nextbez = shape.bez.pop(0)
i += 3
else:
v = shape.vtx[i]
i += 1
glVertex3f(*v)
glEnd()
else:
glBegin(shape.type)
for v in shape.vtx:
glVertex3f(*v)
glEnd()
glPopAttrib()
def bezierDetail(n=shape.bezierDetail):
"""Establishes the Bézier level of detail, i.e., the number of points
per Bézier curve segment."""
shape.bezierDetail = n
# precompute blending factors
shape.bezierBlend = []
for i in range(n + 1):
t = float(i) / n
u = 1 - t
shape.bezierBlend.append(
(u * u * u, 3 * u * u * t, 3 * t * t * u, t * t * t))
def bezierPoint(a, b, c, d, t):
"""Given the x or y coordinate of Bézier control points a,b,c,d and
the value of the t parameter, return the corresponding
coordinate of the point."""
u = 1.0 - t
return a * u * u * u + b * 3 * u * u * t + c * 3 * t * t * u + d * t * t * t
def bezierTangent(a, b, c, d, t):
"""Given the x or y coordinate of Bézier control points a,b,c,d and
the value of the t parameter, return the corresponding
coordinate of the tangent at that point."""
u = 1.0 - t
return -a * 3 * u * u + b * (9 * u * u - 6 * u) + c * (
6 * t - 9 * t * t) + d * 3 * t * t
def bezierSample(*p):
"""Returns a list of points for cubic bezier arc defined by the given
control points. The number of points is given by shape.bezierDetail."""
assert (len(p) == 4)
result = []
for b in shape.bezierBlend:
x, y, z = 0, 0, 0
for pi, bi in zip(p, b):
x += pi[0] * bi
y += pi[1] * bi
z += pi[2] * bi
result.append((x, y, z))
return result
def bezier(*coords):
"""Draws a cubic Bézier curve for the 4 control points."""
assert (len(coords) in (8, 12))
if len(coords) == 8:
ctrlpoints = coords[:2] + (0,) + coords[2:4] + (0,) + coords[4:6] + (
0,) + coords[6:] + (0,)
beginShape()
vertex(*ctrlpoints[0:3])
bezierVertex(*ctrlpoints[3:])
endShape()
class CatmullRomBlend:
"""Cubic Catmull Rom Blending"""
def __init__(self, tension=0.5):
self.tau = tension
def blendFactors(self, u):
"""Given a value for u, returns the blending factors for each
of the 4 control points."""
u2 = u * u
u3 = u2 * u
return [
-self.tau * u + 2 * self.tau * u2 - self.tau * u3,
1 + (self.tau - 3) * u2 + (2 - self.tau) * u3,
self.tau * u + (3 - 2 * self.tau) * u2 + (self.tau - 2) * u3,
-self.tau * u2 + self.tau * u3]
def tangentBlendFactors(self, u):
"""Given a value for u, returns the tangent blending factors for each
of the 4 control points."""
u2 = u * u
return [
-self.tau + 4 * self.tau * u - 3 * self.tau * u2,
(2 * self.tau - 6) * u + (6 - 3 * self.tau) * u2,
self.tau + (6 - 4 * self.tau) * u + (3 * self.tau - 6) * u2,
-2 * self.tau * u + 3 * self.tau * u2]
def blendPoint(self, u, p0, p1, p2, p3):
"""Returns the point obtained by blending pi with factor u."""
result = [0, 0, 0]
for b, p in zip(self.blendFactors(u), (p0, p1, p2, p3)):
for i, x in enumerate(p):
result[i] += p[i] * b
return result
def blendTangent(self, u, p0, p1, p2, p3):
"""Returns the curve tangent at the point obtained by blending pi with factor u."""
result = [0, 0, 0]
for b, p in zip(self.tangentBlendFactors(u), (p0, p1, p2, p3)):
for i, x in enumerate(p):
result[i] += p[i] * b
return result
def curveTightness(squishy):
"""Uses 'squishy' as the tension factor for the catmull-rom spline."""
shape.tension = (1 - squishy) / 2.0
def curveVertex(x, y, z=0):
"""Generates a cubic Catmull-Rom curve corresponding to interpolating
the three last points issued with earlier calls to curveVertex and this one.
"""
shape.crv.append((x, y, z))
if len(shape.crv) > 4:
shape.crv = shape.crv[-4:]
if len(shape.crv) == 4:
blend = CatmullRomBlend(shape.tension)
npts = shape.curveDetail
for i in range(npts + 1):
p = blend.blendPoint(float(i) / npts, *shape.crv)
vertex(*p)
def curve(*coords):
"""Generates a catmull-rom curve given 4 points. Takes either 8 numbers
for coordinates of 4 points in 2D or 12 numbers for 4 points in 3D"""
if len(coords) == 8:
p0, p1, p2, p3 = coords[0:2], coords[2:4], coords[4:6], coords[6:8]
else:
assert (len(coords) == 12)
p0, p1, p2, p3 = coords[0:3], coords[3:6], coords[6:9], coords[9:12]
blend = CatmullRomBlend(shape.tension)
beginShape()
npts = shape.curveDetail
for i in range(npts + 1):
p = blend.blendPoint(float(i) / npts, p0, p1, p2, p3)
vertex(*p)
endShape()
def curvePoint(a, b, c, d, t):
"""Evaluates the n'th coordinate of a cubic Catmull-Rom curve at parameter
t for control points having their n'th coordinate equal to a, b, c and d, respectively.
"""
blend = CatmullRomBlend(shape.tension)
return blend.blendPoint(t, (a,), (b,), (c,), (d,))[0]
def curveTangent(a, b, c, d, t):
"""Evaluates the n'th coordinate of the tangent at the point on a cubic Catmull-Rom
curve at parameter t for control points having their n'th coordinate equal to
a, b, c and d, respectively.
"""
blend = CatmullRomBlend(shape.tension)
return blend.blendTangent(t, (a,), (b,), (c,), (d,))[0]
def curveDetail(npts=shape.curveDetail):
"""Controls the number of samples per curve arc."""
shape.curveDetail = npts
def sphereDetail(*args):
"""Controls the how many segments are used per circle revolution while drawing a
sphere. The first and second parameters determine the number of segments used
longitudinally and latitudinally, respectively. If only one parameter is used, it
determines the total number of segments used per full circle revolution."""
if len(args) == 1:
shape.sphereDetail = (args[0], args[0])
elif len(args) == 2:
shape.sphereDetail = (args[0], args[1])
| 36.667539 | 92 | 0.520811 | 1,659 | 0.118399 | 0 | 0 | 0 | 0 | 0 | 0 | 3,539 | 0.252569 |
35adcf6adc687ab72f83ce164d7c0aba3ae2e753 | 22,531 | py | Python | ptpy/transports/usb.py | komodo108/sequoia-ptpy | 3395e68286ef0dc8a026e48a960ff4d0c65792a4 | [
"BSD-3-Clause"
] | null | null | null | ptpy/transports/usb.py | komodo108/sequoia-ptpy | 3395e68286ef0dc8a026e48a960ff4d0c65792a4 | [
"BSD-3-Clause"
] | null | null | null | ptpy/transports/usb.py | komodo108/sequoia-ptpy | 3395e68286ef0dc8a026e48a960ff4d0c65792a4 | [
"BSD-3-Clause"
] | null | null | null | '''This module implements the USB transport layer for PTP.
It exports the PTPUSB class. Both the transport layer and the basic PTP
implementation are Vendor agnostic. Vendor extensions should extend these to
support more operations.
'''
from __future__ import absolute_import
import atexit
import logging
import usb.core
import six
import array
from usb.util import (
endpoint_type, endpoint_direction, ENDPOINT_TYPE_BULK, ENDPOINT_TYPE_INTR,
ENDPOINT_OUT, ENDPOINT_IN,
)
from ..ptp import PTPError
from ..util import _main_thread_alive
from construct import (
Bytes, Container, Embedded, Enum, ExprAdapter, Int16ul, Int32ul, Pass,
Range, Struct,
)
from threading import Thread, Event, RLock
from six.moves.queue import Queue
from hexdump import hexdump
logger = logging.getLogger(__name__)
__all__ = ('USBTransport', 'find_usb_cameras')
__author__ = 'Luis Mario Domenzain'
PTP_USB_CLASS = 6
class find_class(object):
def __init__(self, class_, name=None):
self._class = class_
self._name = name
def __call__(self, device):
if device.bDeviceClass == self._class:
return (
self._name in usb.util.get_string(device, device.iProduct)
if self._name else True
)
for cfg in device:
intf = usb.util.find_descriptor(
cfg,
bInterfaceClass=self._class
)
if intf is not None:
return (
self._name in usb.util.get_string(device, device.iProduct)
if self._name else True
)
return False
def find_usb_cameras(name=None):
return usb.core.find(
find_all=True,
custom_match=find_class(PTP_USB_CLASS, name=name)
)
class USBTransport(object):
'''Implement USB transport.'''
def __init__(self, *args, **kwargs):
device = kwargs.get('device', None)
'''Instantiate the first available PTP device over USB'''
logger.debug('Init USB')
self.__setup_constructors()
# If no device is specified, find all devices claiming to be Cameras
# and get the USB endpoints for the first one that works.
if device is None:
logger.debug('No device provided, probing all USB devices.')
if isinstance(device, six.string_types):
name = device
logger.debug(
'Device name provided, probing all USB devices for {}.'
.format(name)
)
device = None
else:
name = None
devs = (
[device] if (device is not None)
else find_usb_cameras(name=name)
)
self.__claimed = False
self.__acquire_camera(devs)
self.__event_queue = Queue()
self.__event_shutdown = Event()
# Locks for different end points.
self.__inep_lock = RLock()
self.__intep_lock = RLock()
self.__outep_lock = RLock()
# Slightly redundant transaction lock to avoid catching other request's
# response
self.__transaction_lock = RLock()
self.__event_proc = Thread(
name='EvtPolling',
target=self.__poll_events
)
self.__event_proc.daemon = False
atexit.register(self._shutdown)
self.__event_proc.start()
def __available_cameras(self, devs):
for dev in devs:
if self.__setup_device(dev):
logger.debug('Found USB PTP device {}'.format(dev))
yield
else:
message = 'No USB PTP device found.'
logger.error(message)
raise PTPError(message)
def __acquire_camera(self, devs):
'''From the cameras given, get the first one that does not fail'''
for _ in self.__available_cameras(devs):
# Stop system drivers
try:
if self.__dev.is_kernel_driver_active(
self.__intf.bInterfaceNumber):
try:
self.__dev.detach_kernel_driver(
self.__intf.bInterfaceNumber)
except usb.core.USBError:
message = (
'Could not detach kernel driver. '
'Maybe the camera is mounted?'
)
logger.error(message)
except NotImplementedError as e:
logger.debug('Ignoring unimplemented function: {}'.format(e))
# Claim camera
try:
logger.debug('Claiming {}'.format(repr(self.__dev)))
usb.util.claim_interface(self.__dev, self.__intf)
self.__claimed = True
except Exception as e:
logger.warn('Failed to claim PTP device: {}'.format(e))
continue
self.__dev.reset()
break
else:
message = (
'Could not acquire any camera.'
)
logger.error(message)
raise PTPError(message)
def _shutdown(self):
logger.debug('Shutdown request')
self.__event_shutdown.set()
# Free USB resource on shutdown.
# Only join a running thread.
if self.__event_proc.is_alive():
self.__event_proc.join(2)
try:
if self.__claimed:
logger.debug('Release {}'.format(repr(self.__dev)))
usb.util.release_interface(self.__dev, self.__intf)
except Exception as e:
logger.warn(e)
# Helper methods.
# ---------------------
def __setup_device(self, dev):
'''Get endpoints for a device. True on success.'''
self.__inep = None
self.__outep = None
self.__intep = None
self.__cfg = None
self.__dev = None
self.__intf = None
# Attempt to find the USB in, out and interrupt endpoints for a PTP
# interface.
for cfg in dev:
for intf in cfg:
if intf.bInterfaceClass == PTP_USB_CLASS:
for ep in intf:
ep_type = endpoint_type(ep.bmAttributes)
ep_dir = endpoint_direction(ep.bEndpointAddress)
if ep_type == ENDPOINT_TYPE_BULK:
if ep_dir == ENDPOINT_IN:
self.__inep = ep
elif ep_dir == ENDPOINT_OUT:
self.__outep = ep
elif ((ep_type == ENDPOINT_TYPE_INTR) and
(ep_dir == ENDPOINT_IN)):
self.__intep = ep
if not (self.__inep and self.__outep and self.__intep):
self.__inep = None
self.__outep = None
self.__intep = None
else:
logger.debug('Found {}'.format(repr(self.__inep)))
logger.debug('Found {}'.format(repr(self.__outep)))
logger.debug('Found {}'.format(repr(self.__intep)))
self.__cfg = cfg
self.__dev = dev
self.__intf = intf
return True
return False
def __setup_constructors(self):
'''Set endianness and create transport-specific constructors.'''
# Set endianness of constructors before using them.
self._set_endian('little')
self.__Length = Int32ul
self.__Type = Enum(
Int16ul,
default=Pass,
Undefined=0x0000,
Command=0x0001,
Data=0x0002,
Response=0x0003,
Event=0x0004,
)
# This is just a convenience constructor to get the size of a header.
self.__Code = Int16ul
self.__Header = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'Code' / self.__Code,
'TransactionID' / self._TransactionID,
)
# These are the actual constructors for parsing and building.
self.__CommandHeader = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'OperationCode' / self._OperationCode,
'TransactionID' / self._TransactionID,
)
self.__ResponseHeader = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'ResponseCode' / self._ResponseCode,
'TransactionID' / self._TransactionID,
)
self.__EventHeader = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'EventCode' / self._EventCode,
'TransactionID' / self._TransactionID,
)
# Apparently nobody uses the SessionID field. Even though it is
# specified in ISO15740:2013(E), no device respects it and the session
# number is implicit over USB.
self.__Param = Range(0, 5, self._Parameter)
self.__CommandTransactionBase = Struct(
Embedded(self.__CommandHeader),
'Payload' / Bytes(
lambda ctx, h=self.__Header: ctx.Length - h.sizeof()
)
)
self.__CommandTransaction = ExprAdapter(
self.__CommandTransactionBase,
encoder=lambda obj, ctx, h=self.__Header: Container(
Length=len(obj.Payload) + h.sizeof(),
**obj
),
decoder=lambda obj, ctx: obj,
)
self.__ResponseTransactionBase = Struct(
Embedded(self.__ResponseHeader),
'Payload' / Bytes(
lambda ctx, h=self.__Header: ctx.Length - h.sizeof())
)
self.__ResponseTransaction = ExprAdapter(
self.__ResponseTransactionBase,
encoder=lambda obj, ctx, h=self.__Header: Container(
Length=len(obj.Payload) + h.sizeof(),
**obj
),
decoder=lambda obj, ctx: obj,
)
def __parse_response(self, usbdata):
'''Helper method for parsing USB data.'''
# Build up container with all PTP info.
logger.debug('Transaction:')
usbdata = bytearray(usbdata)
if logger.isEnabledFor(logging.DEBUG):
for l in hexdump(
six.binary_type(usbdata[:512]),
result='generator'
):
logger.debug(l)
transaction = self.__ResponseTransaction.parse(usbdata)
response = Container(
SessionID=self.session_id,
TransactionID=transaction.TransactionID,
)
logger.debug('Interpreting {} transaction'.format(transaction.Type))
if transaction.Type == 'Response':
response['ResponseCode'] = transaction.ResponseCode
response['Parameter'] = self.__Param.parse(transaction.Payload)
elif transaction.Type == 'Event':
event = self.__EventHeader.parse(
usbdata[0:self.__Header.sizeof()]
)
response['EventCode'] = event.EventCode
response['Parameter'] = self.__Param.parse(transaction.Payload)
else:
command = self.__CommandHeader.parse(
usbdata[0:self.__Header.sizeof()]
)
response['OperationCode'] = command.OperationCode
response['Data'] = transaction.Payload
return response
def __recv(self, event=False, wait=False, raw=False):
'''Helper method for receiving data.'''
# TODO: clear stalls automatically
ep = self.__intep if event else self.__inep
lock = self.__intep_lock if event else self.__inep_lock
usbdata = array.array('B', [])
with lock:
tries = 0
# Attempt to read a header
while len(usbdata) < self.__Header.sizeof() and tries < 5:
if tries > 0:
logger.debug('Data smaller than a header')
logger.debug(
'Requesting {} bytes of data'
.format(ep.wMaxPacketSize)
)
try:
usbdata += ep.read(
ep.wMaxPacketSize
)
except usb.core.USBError as e:
# Return None on timeout or busy for events
if (
(e.errno is None and
('timeout' in e.strerror.decode() or
'busy' in e.strerror.decode())) or
(e.errno == 110 or e.errno == 16 or e.errno == 5)
):
if event:
return None
else:
logger.warning('Ignored exception: {}'.format(e))
else:
logger.error(e)
raise e
tries += 1
logger.debug('Read {} bytes of data'.format(len(usbdata)))
if len(usbdata) == 0:
if event:
return None
else:
raise PTPError('Empty USB read')
if (
logger.isEnabledFor(logging.DEBUG) and
len(usbdata) < self.__Header.sizeof()
):
logger.debug('Incomplete header')
for l in hexdump(
six.binary_type(bytearray(usbdata)),
result='generator'
):
logger.debug(l)
header = self.__ResponseHeader.parse(
bytearray(usbdata[0:self.__Header.sizeof()])
)
if header.Type not in ['Response', 'Data', 'Event']:
raise PTPError(
'Unexpected USB transfer type. '
'Expected Response, Event or Data but received {}'
.format(header.Type)
)
while len(usbdata) < header.Length:
usbdata += ep.read(
min(
header.Length - len(usbdata),
# Up to 64kB
64 * 2**10
)
)
if raw:
return usbdata
else:
return self.__parse_response(usbdata)
def __send(self, ptp_container, event=False):
'''Helper method for sending data.'''
ep = self.__intep if event else self.__outep
lock = self.__intep_lock if event else self.__outep_lock
transaction = self.__CommandTransaction.build(ptp_container)
with lock:
try:
sent = 0
while sent < len(transaction):
sent = ep.write(
# Up to 64kB
transaction[sent:(sent + 64*2**10)]
)
except usb.core.USBError as e:
# Ignore timeout or busy device once.
if (
(e.errno is None and
('timeout' in e.strerror.decode() or
'busy' in e.strerror.decode())) or
(e.errno == 110 or e.errno == 16 or e.errno == 5)
):
logger.warning('Ignored USBError {}'.format(e.errno))
ep.write(transaction)
def __send_request(self, ptp_container):
'''Send PTP request without checking answer.'''
# Don't modify original container to keep abstraction barrier.
ptp = Container(**ptp_container)
# Send all parameters
#try:
# while not ptp.Parameter[-1]:
# ptp.Parameter.pop()
# if len(ptp.Parameter) == 0:
# break
#except IndexError:
# # The Parameter list is already empty.
# pass
# Send request
ptp['Type'] = 'Command'
ptp['Payload'] = self.__Param.build(ptp.Parameter)
self.__send(ptp)
def __send_data(self, ptp_container, data):
'''Send data without checking answer.'''
# Don't modify original container to keep abstraction barrier.
ptp = Container(**ptp_container)
# Send data
ptp['Type'] = 'Data'
ptp['Payload'] = data
self.__send(ptp)
@property
def _dev(self):
return None if self.__event_shutdown.is_set() else self.__dev
@_dev.setter
def _dev(self, value):
raise ValueError('Read-only property')
# Actual implementation
# ---------------------
def send(self, ptp_container, data):
'''Transfer operation with dataphase from initiator to responder'''
datalen = len(data)
logger.debug('SEND {} {} bytes{}'.format(
ptp_container.OperationCode,
datalen,
' ' + str(list(map(hex, ptp_container.Parameter)))
if ptp_container.Parameter else '',
))
with self.__transaction_lock:
self.__send_request(ptp_container)
self.__send_data(ptp_container, data)
# Get response and sneak in implicit SessionID and missing
# parameters.
response = self.__recv()
logger.debug('SEND {} {} bytes {}{}'.format(
ptp_container.OperationCode,
datalen,
response.ResponseCode,
' ' + str(list(map(hex, response.Parameter)))
if ptp_container.Parameter else '',
))
return response
def recv(self, ptp_container):
'''Transfer operation with dataphase from responder to initiator.'''
logger.debug('RECV {}{}'.format(
ptp_container.OperationCode,
' ' + str(list(map(hex, ptp_container.Parameter)))
if ptp_container.Parameter else '',
))
with self.__transaction_lock:
self.__send_request(ptp_container)
dataphase = self.__recv()
if hasattr(dataphase, 'Data'):
response = self.__recv()
if not (ptp_container.SessionID ==
dataphase.SessionID ==
response.SessionID):
self.__dev.reset()
raise PTPError(
'Dataphase session ID missmatch: {}, {}, {}.'
.format(
ptp_container.SessionID,
dataphase.SessionID,
response.SessionID
)
)
if not (ptp_container.TransactionID ==
dataphase.TransactionID ==
response.TransactionID):
self.__dev.reset()
raise PTPError(
'Dataphase transaction ID missmatch: {}, {}, {}.'
.format(
ptp_container.TransactionID,
dataphase.TransactionID,
response.TransactionID
)
)
if not (ptp_container.OperationCode ==
dataphase.OperationCode):
self.__dev.reset()
raise PTPError(
'Dataphase operation code missmatch: {}, {}.'.
format(
ptp_container.OperationCode,
dataphase.OperationCode
)
)
response['Data'] = dataphase.Data
else:
response = dataphase
logger.debug('RECV {} {}{}{}'.format(
ptp_container.OperationCode,
response.ResponseCode,
' {} bytes'.format(len(response.Data))
if hasattr(response, 'Data') else '',
' ' + str(list(map(hex, response.Parameter)))
if response.Parameter else '',
))
return response
def mesg(self, ptp_container):
'''Transfer operation without dataphase.'''
logger.debug('MESG {}{}'.format(
ptp_container.OperationCode,
' ' + str(list(map(hex, ptp_container.Parameter)))
if ptp_container.Parameter else '',
))
with self.__transaction_lock:
self.__send_request(ptp_container)
# Get response and sneak in implicit SessionID and missing
# parameters for FullResponse.
response = self.__recv()
logger.debug('MESG {} {}{}'.format(
ptp_container.OperationCode,
response.ResponseCode,
' ' + str(list(map(hex, response.Parameter)))
if response.Parameter else '',
))
return response
def event(self, wait=False):
'''Check event.
If `wait` this function is blocking. Otherwise it may return None.
'''
evt = None
usbdata = None
if wait:
usbdata = self.__event_queue.get(block=True)
elif not self.__event_queue.empty():
usbdata = self.__event_queue.get(block=False)
if usbdata is not None:
evt = self.__parse_response(usbdata)
return evt
def __poll_events(self):
'''Poll events, adding them to a queue.'''
while not self.__event_shutdown.is_set() and _main_thread_alive():
try:
evt = self.__recv(event=True, wait=False, raw=True)
if evt is not None:
logger.debug('Event queued')
self.__event_queue.put(evt)
except usb.core.USBError as e:
logger.error(
'{} polling exception: {}'.format(repr(self.__dev), e)
)
# check if disconnected
if e.errno == 19:
break
except Exception as e:
logger.error(
'{} polling exception: {}'.format(repr(self.__dev), e)
)
| 37.42691 | 79 | 0.509609 | 21,448 | 0.951933 | 325 | 0.014425 | 185 | 0.008211 | 0 | 0 | 4,155 | 0.184413 |
35ae9be77ddd9d5c96ad4c48dcd4d7d31ac05858 | 4,598 | py | Python | cablegate/cable/models.py | h3/django-cablegate | bffa2970a1fb21717a48cfce76b8a24f909acab0 | [
"BSD-3-Clause"
] | 1 | 2016-04-03T03:15:48.000Z | 2016-04-03T03:15:48.000Z | cablegate/cable/models.py | h3/django-cablegate | bffa2970a1fb21717a48cfce76b8a24f909acab0 | [
"BSD-3-Clause"
] | null | null | null | cablegate/cable/models.py | h3/django-cablegate | bffa2970a1fb21717a48cfce76b8a24f909acab0 | [
"BSD-3-Clause"
] | 1 | 2019-07-31T06:02:12.000Z | 2019-07-31T06:02:12.000Z | import re
from operator import itemgetter
from django.conf import settings
from django.utils import simplejson
from django.db import models
from nltk.tokenize.simple import SpaceTokenizer
from nltk.stem import LancasterStemmer
WORDS_IGNORED = (
'after', 'that', 'with', 'which', 'into', 'when', 'than', 'them', 'there', 'threw',
)
splitwords = SpaceTokenizer()
# http://code.google.com/p/nltk/source/browse/trunk/nltk/nltk/stem/lancaster.py
stemmer = LancasterStemmer()
class Cable(models.Model):
id = models.AutoField(primary_key=True)
date = models.DateTimeField(blank=True, null=True)
refid = models.CharField(max_length=250)
classification = models.CharField(max_length=250)
origin = models.CharField(max_length=250)
destination = models.TextField(blank=True, null=True)
header = models.TextField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
def __str__(self):
return '<cable object #%s %s %s %s>' % (self.pk, self.date, self.classification, self.origin)
def __unicode__(self):
return '%s - %s - %s' % (self.date, self.classification.lower(), self.origin)
class Meta:
db_table = u'cable'
class CableMetadata(models.Model):
cable = models.OneToOneField(Cable)
# Stats
words_freqdist = models.TextField(blank=True, null=True)
words_count = models.TextField(blank=True, null=True)
keywords = models.TextField(blank=True, null=True)
names = models.TextField(blank=True, null=True)
# Geo
origin_lat = models.CharField(max_length=250, blank=True, null=True)
origin_lon = models.CharField(max_length=250, blank=True, null=True)
destination_lat = models.CharField(max_length=250, blank=True, null=True)
destination_lon = models.CharField(max_length=250, blank=True, null=True)
def get_words_count(self, minlen=4, mincount=3):
"""
Count the number of times each word has appeared.
Based on http://code.google.com/p/nltk/source/browse/trunk/nltk/examples/school/words.py
"""
if not self.words_count or getattr(settings, 'DEV', False):
wordcounts = {}
stems = {}
out = []
content = re.sub("\n|\(|\)|\.|\d+|---+", "", self.cable.content)
words = splitwords.tokenize(content.lower())
# Calculate
for word in words:
stem = stemmer.stem(word)
if stem != word:
occ = 0
while stem in words:
words.remove(stem)
occ = occ + 1
if occ > 0:
stems[word] = (stem, occ)
for word in words:
if len(word) > minlen and word not in WORDS_IGNORED:
if word not in wordcounts:
wordcounts[word] = 0
wordcounts[word] += 1
for word in wordcounts:
if word in stems:
wordcounts[word] = wordcounts[word] + stems[word][1]
# Skim
for word in wordcounts:
if wordcounts[word] >= mincount:
label = word
if word in stems:
label = '%s/%s' % (word, stems[word][0])
out.append((label, wordcounts[word]))
# Sord and save
out = sorted(out, key=lambda i: i[1], reverse=True)
self.words_count = simplejson.dumps(out)
self.save()
return simplejson.loads(self.words_count)
def get_words_freqdist(self, num=25):
"""
Returns the words and their counts, in order of decreasing frequency.
Based on http://code.google.com/p/nltk/source/browse/trunk/nltk/examples/school/words.py
"""
if not self.words_freqdist:
out = {}
counts = self.get_words_count()
total = sum(counts.values())
cumulative = 0.0
sorted_word_counts = sorted(counts.items(), key=itemgetter(1), reverse=True)
for i in range(len(counts.values())):
word, count = sorted_word_counts[i]
cumulative += count * 100.0 / total
out[i] = [word, '%3.2d%%' % cumulative]
#print "%3d %3.2d%% %s" % (i, cumulative, word)
self.words_freqdist = out
self.save()
return self.words_freqdist
| 37.382114 | 101 | 0.565681 | 4,114 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.150065 |
35aec8279955fef5781af2402034c048af969760 | 618 | py | Python | ztools/Fs/__init__.py | ItsCinnabar/Mass_Custom_XCIs | 94551f43eeed3a6ab7a761fd41ff5500435910dc | [
"MIT"
] | 6 | 2019-05-08T18:52:48.000Z | 2019-10-01T06:58:20.000Z | ztools/Fs/__init__.py | ItsCinnabar/Mass_Custom_XCIs | 94551f43eeed3a6ab7a761fd41ff5500435910dc | [
"MIT"
] | null | null | null | ztools/Fs/__init__.py | ItsCinnabar/Mass_Custom_XCIs | 94551f43eeed3a6ab7a761fd41ff5500435910dc | [
"MIT"
] | 1 | 2020-01-01T07:44:06.000Z | 2020-01-01T07:44:06.000Z | from Fs.Xci import Xci
from Fs.pXci import uXci
from Fs.pXci import nXci
from Fs.Nca import Nca
from Fs.Nsp import Nsp
from Fs.Rom import Rom
from Fs.Nacp import Nacp
from Fs.Pfs0 import Pfs0
from Fs.Hfs0 import Hfs0
from Fs.Ticket import Ticket
from Fs.File import File
def factory(name):
if name.endswith('.xci'):
f = Xci()
elif name.endswith('.nsp'):
f = Nsp()
elif name.endswith('.nsx'):
f = Nsp()
elif name.endswith('.nca'):
f = Nca()
elif name.endswith('.nacp'):
f = Nacp()
elif name.endswith('.tik'):
f = Ticket()
elif name.endswith('.hfs0'):
f = Hfs0()
else:
f = File()
return f | 20.6 | 29 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.071197 |
35b0214bdc1e4c57ae70c73b860e17dcca99ae17 | 808 | py | Python | Days/Day 5 - Doesn't He Have Intern-Elves For This/Part 2.py | jamesjiang52/Advent-of-Code-2015 | 29c19eb11d9bde532bc2609c5b970a0a6924d11b | [
"MIT"
] | null | null | null | Days/Day 5 - Doesn't He Have Intern-Elves For This/Part 2.py | jamesjiang52/Advent-of-Code-2015 | 29c19eb11d9bde532bc2609c5b970a0a6924d11b | [
"MIT"
] | null | null | null | Days/Day 5 - Doesn't He Have Intern-Elves For This/Part 2.py | jamesjiang52/Advent-of-Code-2015 | 29c19eb11d9bde532bc2609c5b970a0a6924d11b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 3 10:27:50 2018
@author: James Jiang
"""
all_lines = [line.rstrip('\n') for line in open('Data.txt')]
def has_two_pairs(string):
for i in range(len(string) - 1):
pair = string[i:i + 2]
if (pair in string[:i]) or (pair in string[i + 2:]):
return True
else:
return False
def has_repeat_with_space(string):
for i in range(len(string) - 2):
if string[i] == string[i + 2]:
return True
else:
return False
def is_nice(string):
if (has_two_pairs(string) == True) and( has_repeat_with_space(string) == True):
return True
else:
return False
count = 0
for string in all_lines:
if is_nice(string) == True:
count += 1
print(count)
| 21.837838 | 83 | 0.566832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.126238 |
35b02597463fbb5f50f5b66d1352a253be0edf70 | 4,708 | py | Python | loggerBot.py | jskrist/channelLogger | 42d5820d29ce9213c823d76dbdc748e288f45eb8 | [
"MIT"
] | null | null | null | loggerBot.py | jskrist/channelLogger | 42d5820d29ce9213c823d76dbdc748e288f45eb8 | [
"MIT"
] | null | null | null | loggerBot.py | jskrist/channelLogger | 42d5820d29ce9213c823d76dbdc748e288f45eb8 | [
"MIT"
] | null | null | null | import asyncio, discord, json
from discord.ext.commands import Bot
from discord.ext import commands
from tinydb import TinyDB, Query
from tinydb.operations import delete, increment
'''
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SETUP
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
# Create a bot
bot = Bot(description="Channel Logger Bot by jskrist#3569", command_prefix="!", pm_help = True)
# Start or connect to a database to log the messages
db = TinyDB('data.json')
# This is a Query object to use when searching through the database
msg = Query()
usr = Query()
'''
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
HELPER FUNCTIONS
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
# this function returns a list of all the users that have posted to the server
def getPostingUsers():
postingUsers = set();
for item in db:
postingUsers.add(item['authorName'])
return postingUsers
async def addMsgToDB(message):
# Confirm that the message did not come from this Bot to make sure we don't get
# into an infinite loop if this bot send out any messages in this function also
# check that the first character of the message is not a "!" or "]", which would
# indicate a command
if (message.author.id != bot.user.id) & \
(message.content[0] != '!') & (message.content[0] != ']'):
# if the mesage content is not in the database yet
if not db.search(msg.content == message.content.lower()):
# Insert the content into the database, along with the name of the user that posted it.
# You could add any other data to the database at this point.
db.insert({'content': message.content.lower(), 'authorName': message.author.name})
'''
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
BOT EVENTS AND COMMANDS
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
# This function prints a message to the terminal/command window to let you know the bot started correctly
@bot.event
async def on_ready():
print('Bot is up and running.')
# when a message comes into the server, this function is executed
@bot.listen()
async def on_message(message):
await addMsgToDB(message)
# when a message on the server is edited, this function is executed
@bot.listen()
async def on_message_edit(msgBefore, msgAfter):
'''
update the database to reflect only the edited message. This could create a state where a
duplicate message is on the server, but not represented in the database, e.g.
User1 sends "Hello"
User2 sends "Hello"
Database no has {'content':"hello", "authorName":"User1"}
User1 edits post to say "Hello World"
Database now has {'content':"hello world", "authorName":"User1"}
Should it also contain a copy of the message "hello"? since User2 also sent it?
'''
# db.update({'content': msgAfter.content.lower()}, msg.content == msgBefore.content.lower())
'''
Alternatively, you could just add the updated message to the database:
'''
await addMsgToDB(msgAfter)
@bot.command(pass_context=True)
async def printDB(context):
# this command prints out the contents of the database. It should not be used with a large database.
# the database will be save into a file called data.json (see line 12 of this file).
for item in db:
await bot.send_message(context.message.channel, item)
@bot.command(pass_context=True)
async def stats(context):
# this command returns the stats for each user, at the moment that is just the number of messages
# each user has posted, but could be expanded however you'd like
postingUsers = getPostingUsers()
for user in postingUsers:
userMsgs = db.search(msg.authorName == user)
await bot.send_message(context.message.channel, '{0} has {1} messages'.format(user, len(userMsgs)))
@bot.command(pass_context=True)
async def clearDB_all(context):
# this command removes all of messages from the Database
db.purge()
@bot.command(pass_context=True)
async def clearDB_usr(context, User=""):
# this command removes all of messages in the Database from the given user
db.remove(usr.authorName == User)
@bot.command(pass_context=True)
async def clearDB_msg(context, Msg=""):
# this command removes the given messages from the Database if it exists
db.remove(msg.content == Msg.lower())
'''
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
STARTING THE BOT
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
# this opens up a file named botToken.txt which should contain a single line of text; the bot's token
with open('botToken.txt', 'r') as myfile:
botToken = myfile.read().replace('\n', '')
# start the bot
bot.run(botToken)
| 37.664 | 105 | 0.647409 | 0 | 0 | 0 | 0 | 2,135 | 0.453483 | 2,694 | 0.572218 | 3,037 | 0.645072 |
35b0660ef14051f12a9964eb3155f5bf5f684943 | 2,080 | py | Python | roomBasedLightControl/roomBasedLightControl.py | pippyn/appdaemon-scripts | 615cdfeaaf039ffbe1be041eb07c35a2494f008d | [
"MIT"
] | null | null | null | roomBasedLightControl/roomBasedLightControl.py | pippyn/appdaemon-scripts | 615cdfeaaf039ffbe1be041eb07c35a2494f008d | [
"MIT"
] | null | null | null | roomBasedLightControl/roomBasedLightControl.py | pippyn/appdaemon-scripts | 615cdfeaaf039ffbe1be041eb07c35a2494f008d | [
"MIT"
] | null | null | null | import appdaemon.plugins.hass.hassapi as hass
import datetime
import globals
#
# App which turns on the light based on the room the user is currently in
#
#
# Args:
# room_sensor: the sensor which shows the room the user is in. example: sensor.mqtt_room_user_one
# entity: The entity which gets turned on by alexa/snips. example: input_boolean.room_based_light
# mappings:
# livingroom:
# room: name of the room
# entity: entity to turn on
#
# Release Notes
#
# Version 1.2:
# None Check
#
# Version 1.1:
# Using globals
#
# Version 1.0:
# Initial Version
class RoomBasedLightControl(hass.Hass):
def initialize(self):
self.listen_state_handle_list = []
self.timer_handle_list = []
self.room_sensor = globals.get_arg(self.args,"room_sensor")
self.entity = globals.get_arg(self.args,"entity")
self.mappings = self.args["mappings"]
self.mappings_dict = {}
for mapping in self.mappings:
self.mappings_dict[self.mappings[mapping]["room"]] = self.mappings[mapping]["entity"]
self.listen_state_handle_list.append(self.listen_state(self.state_change, self.entity))
def state_change(self, entity, attributes, old, new, kwargs):
self.log("{} turned {}".format(self.friendly_name(self.entity),new))
room = self.get_state(self.room_sensor)
self.log("User is in room {}".format(room))
mapped_entity = self.mappings_dict.get(room)
self.log("Entity for that room is: {}".format(mapped_entity))
if mapped_entity != None:
if new == "on":
self.log("Turning {} on".format(mapped_entity))
self.turn_on(mapped_entity)
elif new == "off":
self.log("Turning {} off".format(mapped_entity))
self.turn_off(mapped_entity)
def terminate(self):
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle)
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle) | 33.015873 | 97 | 0.664423 | 1,510 | 0.725962 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.297115 |
35b0f708a1bc7275f2341dabd56cd8622b49aa36 | 4,074 | py | Python | Platypus StableSwap/emissions_rate.py | MattAHarrington/protocol-analysis | 50f15e186bb70b4c76c9e77c8fc832619382c8d0 | [
"MIT"
] | null | null | null | Platypus StableSwap/emissions_rate.py | MattAHarrington/protocol-analysis | 50f15e186bb70b4c76c9e77c8fc832619382c8d0 | [
"MIT"
] | null | null | null | Platypus StableSwap/emissions_rate.py | MattAHarrington/protocol-analysis | 50f15e186bb70b4c76c9e77c8fc832619382c8d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Calculating the emissions from deposits in Platypus stable accounts
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter
from strategy_const import *
from const import *
def boosted_pool_emission_rate(your_stable_deposit, vePTP_held, other_deposit_weights):
''' proportion of boosted pool emissions your deposits and vePTP earn
'''
your_boosted_pool_weight = np.sqrt(your_stable_deposit * vePTP_held)
return your_boosted_pool_weight / other_deposit_weights
def base_pool_emission_rate(your_stable_deposit, other_stable_deposits):
''' proportion of base pool emissions your deposits earn
'''
total_deposits = other_stable_deposits + your_stable_deposit
return your_stable_deposit / total_deposits
# define function with vectorize decorator for extensibility
@np.vectorize
def total_emissions_rate(stable_bankroll,
ptp_marketbuy_proportion):
'''
:stable_bankroll: total USD value of the stables you'd invest in the Platypus protocol
:ptp_marketbuy_proportion: proportion of stable_bankroll you'd use to marketbuy PTP for staking to vePTP
returns the number of PTP tokens you'd rececive given defined constants earlier in the notebook.
'''
n_PTP = (stable_bankroll * ptp_marketbuy_proportion) / PTP_PRICE
n_vePTP = HOURS_SPENT_STAKING * HOURLY_STAKED_PTP_vePTP_YIELD * n_PTP
stable_deposit = stable_bankroll * (1 - ptp_marketbuy_proportion)
# calculating lower bound on total deposit weights:
# assume all other deposits are from one wallet with all other staked PTP
# and it's been staking as long as you have
total_deposit_weights = GLOBAL_PTP_STAKED * HOURLY_STAKED_PTP_vePTP_YIELD * HOURS_SPENT_STAKING
boosted = boosted_pool_emission_rate(stable_deposit, n_vePTP, total_deposit_weights)
base = base_pool_emission_rate(stable_deposit, TVL - stable_deposit)
return (BOOSTING_POOL_ALLOCATION * boosted) + (BASE_POOL_ALLOCATION * base)
def plot_2d_returns(stable_bankroll, ptp_proportion, returns_array, as_percents = True):
"""Use matplotlib to plot the slope of returns across different bankroll strategies
"""
fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(18,9))
manifold = ax.plot_surface(stable_bankroll, ptp_proportion, returns_array,
cmap=cm.plasma, linewidth=0.5, antialiased=False)
# labels, titles, and axes
ax.set_title(f"Monthly Strategy Emissions given PTP staking for {round(HOURS_SPENT_STAKING / 24)} Days")
ax.xaxis.set_major_formatter(EngFormatter(unit="$", places=1, sep="\N{THIN SPACE}"))
ax.set_xlabel("Strategy Bankroll")
ax.yaxis.set_major_formatter(PercentFormatter(xmax=1, decimals=1))
ax.set_ylabel("Percent Market-Bought and Staked")
ax.zaxis.set_major_locator(LinearLocator(9))
ax.zaxis.set_major_formatter(PercentFormatter(xmax=1, decimals=4))
ax.set_zlabel("Percent of Emissions for Strategy")
# colorbar for scale
fig.colorbar(manifold, shrink=0.5, aspect=5, format=PercentFormatter(xmax=1, decimals=4))
plt.show()
def main():
print(f"Emissions calculations consider PTP/USD: ${round(PTP_PRICE, 3)}\n" +
f"Reflecting a FDMC of \t${round(FDMC / 10**6)}MM " +
f"({round(PERCENT_COINS_CIRCULATING * 100)}% of coins available)\n" +
f"and implying TVL of \t${round(TVL / 10**6)}MM " +
f"(Mcap/TVL: {round(1 / TVL_TO_CMC_RATIO, 4)})\n" +
f"with {round(GLOBAL_PTP_STAKED / 10**6, 2)}MM PTP staked for vePTP ({round(PERCENT_PTP_STAKED * 100)}%)")
# Create the mesh and calculate return rates
stable_bankroll, ptp_proportion = np.meshgrid(stable_deposit_range, ptp_market_buy_bankroll_proportion)
returns = total_emissions_rate(stable_bankroll, ptp_proportion)
# plotting time
plot_2d_returns(stable_bankroll, ptp_proportion, returns)
if __name__ == '__main__':
main()
| 42.884211 | 120 | 0.738095 | 0 | 0 | 0 | 0 | 1,188 | 0.291605 | 0 | 0 | 1,608 | 0.394698 |
35b1c053362f87cf922d6f371df237e7204dcea6 | 1,477 | py | Python | Tests/test_management_client.py | acronis/acronis-cyber-platform-python-samples | bfcbea06753a773ac889054f7e4dee2a6496a8de | [
"MIT"
] | 16 | 2019-10-12T12:14:57.000Z | 2021-11-29T12:06:58.000Z | Tests/test_management_client.py | acronis/acronis-cyber-platform-python-samples | bfcbea06753a773ac889054f7e4dee2a6496a8de | [
"MIT"
] | null | null | null | Tests/test_management_client.py | acronis/acronis-cyber-platform-python-samples | bfcbea06753a773ac889054f7e4dee2a6496a8de | [
"MIT"
] | 4 | 2019-10-18T17:16:14.000Z | 2021-11-16T20:35:43.000Z | """
@date 30.08.2019
@author Anna.Shavrina@acronis.com
@details :copyright: 2003–2019 Acronis International GmbH,
Rheinweg 9, 8200 Schaffhausen, Switzerland. All rights reserved.
"""
from ManagementAPI.ManagementClient.how_to_create_client import create_client
from ManagementAPI.ManagementClient.how_to_delete_client import delete_client
from ManagementAPI.ManagementClient.how_to_retrieve_client_info import \
get_client_info
from ManagementAPI.ManagementClient.how_to_update_client import update_client
from client import Client
from tools import GrantType
def test_create_and_delete_client():
auth_client = Client(grant_type=GrantType.client_credentials)
client_type = 'agent'
client = create_client(auth_client, client_type)
assert client['type'] == client_type
assert delete_client(auth_client, client['client_id'])
def test_update_client_info(client):
auth_client = Client(grant_type=GrantType.client_credentials)
agent_type, hostname = 'BackUpAgent1', 'NewHost'
client_info = update_client(
auth_client, client['client_id'], agent_type, hostname,
)
assert client_info
assert client_info['data']['hostname'] == hostname
assert client_info['data']['agent_type'] == agent_type
def test_get_client_info(client):
auth_client = Client(grant_type=GrantType.client_credentials)
client_info = get_client_info(auth_client, client['client_id'])
assert client_info['client_id'] == client['client_id']
| 36.925 | 77 | 0.786053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.209601 |
35b5a9f36ac319cd4dda6dbca8387f49972a915d | 6,276 | py | Python | holide1/src_test/unittests/test_school_holidays.py | SmartDataInnovationLab/holide-library | 2c2b023659fbaf40474caf693b57aafc98f45f7b | [
"MIT"
] | 1 | 2020-11-30T19:34:37.000Z | 2020-11-30T19:34:37.000Z | holide1/src_test/unittests/test_school_holidays.py | SmartDataInnovationLab/holide-library | 2c2b023659fbaf40474caf693b57aafc98f45f7b | [
"MIT"
] | 2 | 2021-06-08T09:25:47.000Z | 2021-06-08T09:26:34.000Z | holide1/src_test/unittests/test_school_holidays.py | SmartDataInnovationLab/holide-library | 2c2b023659fbaf40474caf693b57aafc98f45f7b | [
"MIT"
] | 1 | 2021-05-09T22:07:25.000Z | 2021-05-09T22:07:25.000Z | #!/usr/bin/env python3
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0, parentdir)
import unittest
import datetime
from datetime import date
from src.holide import Holide
from src_test.unittests.testhelper import school_holiday_data
from src_test.unittests.testhelper import test_zipcodes_data
from src_test.unittests.testhelper import test_federal_state_data
class SchoolHolidaysTest(unittest.TestCase):
def setUp(self):
self.holideo = Holide.from_path(parentdir + '/testfiles/cache.json')
def test_holidays_federal_states(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
for holidays in federal_state_row['holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertTrue(self.holideo.is_school_holiday_in_federal_state(date, federal_state),
(federal_state + ': ' + str(date)))
def test_holidays_federal_state_codes(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
federal_state_code = test_federal_state_data.federal_state_iso[federal_state]
for holidays in federal_state_row['holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertTrue(self.holideo.is_school_holiday_in_federal_state(date, federal_state_code),
(federal_state + ' / ' + federal_state_code + ': ' + str(date)))
def test_holidays_zipcodes(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
zipcode = test_zipcodes_data.get_zipcodes(federal_state)[0]
for holidays in federal_state_row['holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertTrue(self.holideo.is_school_holiday_at_zipcode(date, zipcode),
(federal_state + ' (' + str(zipcode) + '): ' + str(date)))
def test_holidays_many_zipcodes(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
zipcode_list = test_zipcodes_data.get_zipcodes(federal_state)
for zipcode in zipcode_list:
for holidays in federal_state_row['holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertTrue(self.holideo.is_school_holiday_at_zipcode(date, zipcode),
(federal_state + ' (' + str(zipcode) + '): ' + str(date)))
def test_no_holidays_federal_states(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
for holidays in federal_state_row['not-holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertFalse(self.holideo.is_school_holiday_in_federal_state(date, federal_state),
(federal_state + ': ' + str(date)))
def test_no_holidays_federal_state_codes(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
federal_state_code = test_federal_state_data.federal_state_iso[federal_state]
for holidays in federal_state_row['not-holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertFalse(self.holideo.is_school_holiday_in_federal_state(date, federal_state_code),
(federal_state + ' / ' + federal_state_code + ': ' + str(date)))
def test_no_holidays_zipcodes(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
zipcode = test_zipcodes_data.get_zipcodes(federal_state)[0]
for holidays in federal_state_row['not-holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertFalse(self.holideo.is_school_holiday_at_zipcode(date, zipcode),
(federal_state + ' (' + str(zipcode) + '): ' + str(date)))
def test_no_holidays_many_zipcodes(self):
for federal_state_row in school_holiday_data.federal_state_holiday:
federal_state = federal_state_row['federal_state']
zipcode_list = test_zipcodes_data.get_zipcodes(federal_state)
for zipcode in zipcode_list:
for holidays in federal_state_row['not-holiday']:
date = holidays['starts_on']
date -= datetime.timedelta(days=1)
end_date = holidays['ends_on']
while date != end_date:
date += datetime.timedelta(days=1)
self.assertFalse(self.holideo.is_school_holiday_at_zipcode(date, zipcode),
(federal_state + ' (' + str(zipcode) + '): ' + str(date)))
def test_zipcode_belongs_to_multiple_federal_states(self):
for zip_code in test_zipcodes_data.more_than_one_federal_state:
with self.assertRaises(Exception): self.holideo.is_school_holiday_at_zipcode(date(2019, 5, 5), zip_code)
if __name__ == "__main__":
unittest.main()
| 39.721519 | 110 | 0.688177 | 5,704 | 0.908859 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.077279 |
35b60f638217a742f0dcf90d5b6242bfd1caa4d0 | 4,705 | py | Python | migrations/migrate.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | migrations/migrate.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | migrations/migrate.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | import pkgutil
import sys
from argparse import ArgumentParser
from importlib import import_module
from typing import Any, List, Type
from datastore.migrations import BaseMigration, MigrationException, PrintFunction, setup
class BadMigrationModule(MigrationException):
pass
class InvalidMigrationCommand(MigrationException):
def __init__(self, command: str) -> None:
super().__init__(f"Invalid migration command: {command}")
class MigrationWrapper:
def __init__(self, verbose: bool = False, print_fn: PrintFunction = print) -> None:
migrations = MigrationWrapper.load_migrations()
self.handler = setup(verbose, print_fn)
self.handler.register_migrations(*migrations)
@staticmethod
def load_migrations(
base_migration_module_pypath: str = None,
) -> List[Type[BaseMigration]]:
if not base_migration_module_pypath:
base_module = __name__.rsplit(".", 1)[0]
if base_module == "__main__":
base_migration_module_pypath = "migrations"
else:
base_migration_module_pypath = base_module + ".migrations"
base_migration_module = import_module(base_migration_module_pypath)
module_names = {
name
for _, name, is_pkg in pkgutil.iter_modules(base_migration_module.__path__) # type: ignore
if not is_pkg
}
migration_classes: List[Type[BaseMigration]] = []
for module_name in module_names:
module_pypath = f"{base_migration_module_pypath}.{module_name}"
migration_module = import_module(module_pypath)
if not hasattr(migration_module, "Migration"):
raise BadMigrationModule(
f"The module {module_pypath} does not have a class called 'Migration'"
)
migration_class = migration_module.Migration # type: ignore
if not issubclass(migration_class, BaseMigration):
raise BadMigrationModule(
f"The class 'Migration' in module {module_pypath} does not inherit from 'BaseMigration'"
)
migration_classes.append(migration_class)
return migration_classes
def execute_command(self, command: str) -> Any:
if command == "migrate":
self.handler.migrate()
elif command == "finalize":
self.handler.finalize()
elif command == "reset":
self.handler.reset()
elif command == "clear-collectionfield-tables":
self.handler.delete_collectionfield_aux_tables()
elif command == "stats":
return self.handler.get_stats()
else:
raise InvalidMigrationCommand(command)
def get_parser() -> ArgumentParser:
parent_parser = ArgumentParser(
description="Migration tool for allying migrations to the datastore."
)
parent_parser.add_argument(
"--verbose",
"-v",
required=False,
default=False,
action="store_true",
help="Enable verbose output",
)
subparsers = parent_parser.add_subparsers(title="commands", dest="command")
subparsers.add_parser(
"migrate",
add_help=False,
description="The migrate parser",
help="Migrate the datastore.",
)
subparsers.add_parser(
"finalize",
add_help=False,
description="The finalize parser",
help="Finalize the datastore migrations.",
)
subparsers.add_parser(
"reset",
add_help=False,
description="The reset parser",
help="Reset all ongoing (not finalized) migrations.",
)
subparsers.add_parser(
"clear-collectionfield-tables",
add_help=False,
description="The clear-collectionfield-tables parser",
help="Clear all data from these auxillary tables. Can be done to clean up diskspace, but only when the datastore is offile.",
)
subparsers.add_parser(
"stats",
add_help=False,
description="The stats parser",
help="Print some stats about the current migration state.",
)
return parent_parser
def main() -> int:
parser = get_parser()
args = parser.parse_args()
handler = MigrationWrapper(args.verbose)
if not args.command:
print("No command provided.\n")
parser.print_help()
return 1
else:
try:
handler.execute_command(args.command)
except InvalidMigrationCommand:
print(f"Unknown command {args.command}\n")
parser.print_help()
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| 32.902098 | 133 | 0.636557 | 2,523 | 0.536238 | 0 | 0 | 1,514 | 0.321785 | 0 | 0 | 1,039 | 0.220829 |
35b754ce093c02acd53d79d1aafbde7ead2584ed | 2,221 | py | Python | src/refactor/parallel.py | luislorenzom/b33th0v3n | cf2665a51ed6779093c273cf9d7c404dd9222493 | [
"MIT"
] | null | null | null | src/refactor/parallel.py | luislorenzom/b33th0v3n | cf2665a51ed6779093c273cf9d7c404dd9222493 | [
"MIT"
] | null | null | null | src/refactor/parallel.py | luislorenzom/b33th0v3n | cf2665a51ed6779093c273cf9d7c404dd9222493 | [
"MIT"
] | null | null | null | from types import FunctionType
import numpy as np
import pandas as pd
from functools import partial
from multiprocessing import Pool, cpu_count
def get_levenshtein_distance(str1: str, str2: str) -> float:
"""
Computes the Levenshtein distance between two strings
:param str1: first string
:param str2: second string
:return: the distance between the two params
"""
size_x = len(str1) + 1
size_y = len(str2) + 1
matrix = np.zeros((size_x, size_y))
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if str1[x - 1] == str2[y - 1]:
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1],
matrix[x, y - 1] + 1
)
else:
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1] + 1,
matrix[x, y - 1] + 1
)
return matrix[size_x - 1, size_y - 1]
def add_distance_column(filename: str, df: pd.DataFrame) -> pd.DataFrame:
"""
Add new column to df which contains distance computed using filename
:param filename: filename to compare to df
:param df: df with artist or tracks names
:return: df with new column
"""
df['distances'] = df.applymap(lambda x: get_levenshtein_distance(filename, x))
return df
def parallelize_dataframe(df: pd.DataFrame, func: FunctionType, word: str, n_cores: int = cpu_count() - 1) -> pd.DataFrame:
"""
Apply certain func against dataframe parallelling the application
:param df: DataFrame which contains the required by func
:param func: func that will be parallelize through df
:param word: to compute the distance using
:param n_cores: thread to parallelize the function
:return: DataFrame after func applied
"""
df_split = np.array_split(df, n_cores) # TODO: add df length check to get n_cores
pool = Pool(n_cores)
f = partial(func, word)
df = pd.concat(pool.map(f, df_split))
pool.close()
pool.join()
return df
| 30.013514 | 123 | 0.594327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 791 | 0.356146 |
35b8e7d7bd8c548bc9dcbb5f851dbbc98f40925a | 3,540 | py | Python | pygbif/caching.py | bartaelterman/pygbif | 110e0cbcd8cb2b1813f6e27d66e95a95579d4643 | [
"MIT"
] | 37 | 2015-03-20T13:50:27.000Z | 2021-07-10T11:23:16.000Z | pygbif/caching.py | bartaelterman/pygbif | 110e0cbcd8cb2b1813f6e27d66e95a95579d4643 | [
"MIT"
] | 82 | 2015-10-30T06:12:48.000Z | 2021-07-13T12:20:57.000Z | pygbif/caching.py | bartaelterman/pygbif | 110e0cbcd8cb2b1813f6e27d66e95a95579d4643 | [
"MIT"
] | 12 | 2015-03-20T13:50:29.000Z | 2020-09-23T08:53:46.000Z | import requests_cache
import os.path
import tempfile
try:
from requests_cache import remove_expired_responses
except ModuleNotFoundError:
from requests_cache.core import remove_expired_responses
def caching(
cache=False,
name=None,
backend="sqlite",
expire_after=86400,
allowable_codes=(200,),
allowable_methods=("GET",),
):
"""
pygbif caching management
:param cache: [bool] if ``True`` all http requests are cached. if ``False`` (default),
no http requests are cached.
:param name: [str] the cache name. when backend=sqlite, this is the path for the
sqlite file, ignored if sqlite not used. if not set, the file is put in your
temporary directory, and therefore is cleaned up/deleted after closing your
python session
:param backend: [str] the backend, one of:
- ``sqlite`` sqlite database (default)
- ``memory`` not persistent, stores all data in Python dict in memory
- ``mongodb`` (experimental) MongoDB database (pymongo < 3.0 required)
- ``redis`` stores all data on a redis data store (redis required)
:param expire_after: [str] timedelta or number of seconds after cache will be expired
or None (default) to ignore expiration. default: 86400 seconds (24 hrs)
:param allowable_codes: [tuple] limit caching only for response with this codes
(default: 200)
:param allowable_methods: [tuple] cache only requests of this methods
(default: ‘GET’)
:return: sets options to be used by pygbif, returns the options you selected
in a hash
Note: setting cache=False will turn off caching, but the backend data still
persists. thus, you can turn caching back on without losing your cache.
this also means if you want to delete your cache you have to do it yourself.
Note: on loading pygbif, we clean up expired responses
Usage::
import pygbif
# caching is off by default
from pygbif import occurrences
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# turn caching on
pygbif.caching(True)
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# set a different backend
pygbif.caching(cache=True, backend="redis")
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# set a different backend
pygbif.caching(cache=True, backend="mongodb")
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# set path to a sqlite file
pygbif.caching(name = "some/path/my_file")
"""
default_name = "pygbif_requests_cache"
if not cache:
requests_cache.uninstall_cache()
CACHE_NAME = None
else:
if name is None and backend == "sqlite":
CACHE_NAME = os.path.join(tempfile.gettempdir(), default_name)
else:
CACHE_NAME = default_name
requests_cache.install_cache(
cache_name=CACHE_NAME, backend=backend, expire_after=expire_after
)
remove_expired_responses()
cache_settings = {
"cache": cache,
"name": CACHE_NAME,
"backend": backend,
"expire_after": expire_after,
"allowable_codes": allowable_codes,
"allowable_methods": allowable_methods,
}
return cache_settings
| 34.705882 | 90 | 0.663559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,554 | 0.720655 |
35bbfe712541d7c2876a8f989c17d4e15afb5e72 | 1,441 | py | Python | mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/lang/types.py | MochalovaAn/llvm | 528aa5ca4aa9df447dc3497ef19da3b124e88d7d | [
"Apache-2.0"
] | null | null | null | mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/lang/types.py | MochalovaAn/llvm | 528aa5ca4aa9df447dc3497ef19da3b124e88d7d | [
"Apache-2.0"
] | null | null | null | mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/lang/types.py | MochalovaAn/llvm | 528aa5ca4aa9df447dc3497ef19da3b124e88d7d | [
"Apache-2.0"
] | null | null | null | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Facility for symbolically referencing type variables.
Type variables are instances of the TypeVar class, which is uniqued by name.
An "expando" accessor `TV` is provided that generates a named TypeVar for
any attribute access:
>>> TV.T
TypeVar(T)
>>> TV.T is TV.U
False
>>> TV.T is TV.T
True
"""
from enum import Enum
from typing import Dict
__all__ = [
"TypeVar",
"TV",
# TypeVar aliases.
"T",
"U",
"V",
]
class TypeVar:
"""A replaceable type variable.
Type variables are uniqued by name.
"""
ALL_TYPEVARS = dict() # type: Dict[str, "TypeVar"]
def __new__(cls, name: str):
existing = cls.ALL_TYPEVARS.get(name)
if existing is not None:
return existing
new = super().__new__(cls)
new.name = name
cls.ALL_TYPEVARS[name] = new
return new
def __repr__(self):
return f"TypeVar({self.name})"
@classmethod
def create_expando(cls):
"""Create an expando class that creates unique type vars on attr access."""
class ExpandoTypeVars:
def __getattr__(self, n):
return cls(n)
return ExpandoTypeVars()
# Expando access via TV.foo
TV = TypeVar.create_expando()
# Some common type name aliases.
T = TV.T
U = TV.U
V = TV.V
| 20.585714 | 80 | 0.66898 | 665 | 0.461485 | 0 | 0 | 232 | 0.160999 | 0 | 0 | 811 | 0.562804 |
35bc18ea4360aa47732f68c69e53ac55b41e9de8 | 2,867 | py | Python | run_exp/run_theory.py | andeyeluguo/AI_physicist | b242204da5a284cd22175bae66e6b4f79814ceeb | [
"MIT"
] | 25 | 2019-10-22T16:49:45.000Z | 2021-12-21T03:53:59.000Z | run_exp/run_theory.py | andeyeluguo/AI_physicist | b242204da5a284cd22175bae66e6b4f79814ceeb | [
"MIT"
] | 1 | 2021-01-21T15:57:19.000Z | 2021-04-04T15:51:27.000Z | run_exp/run_theory.py | andeyeluguo/AI_physicist | b242204da5a284cd22175bae66e6b4f79814ceeb | [
"MIT"
] | 10 | 2019-10-30T03:42:32.000Z | 2022-03-18T14:20:48.000Z | import os, sys
exp_id=[
"exp1.0",
]
env_source=[
"file",
]
exp_mode = [
"continuous",
#"newb",
#"base",
]
num_theories_init=[
4,
]
pred_nets_neurons=[
8,
]
pred_nets_activation=[
"linear",
# "leakyRelu",
]
domain_net_neurons=[
8,
]
domain_pred_mode=[
"onehot",
]
mse_amp=[
1e-7,
]
simplify_criteria=[
'\("DLs",0,3,"relative"\)',
]
scheduler_settings=[
'\("ReduceLROnPlateau",40,0.1\)',
]
optim_type=[
'\("adam",5e-3\)',
]
optim_domain_type=[
'\("adam",1e-3\)',
]
reg_amp=[
1e-8,
]
reg_domain_amp = [
1e-5,
]
batch_size = [
2000,
]
loss_core = [
"DLs",
]
loss_order = [
-1,
]
loss_decay_scale = [
"None",
]
is_mse_decay = [
False,
]
loss_balance_model_influence = [
False,
]
num_examples = [
20000,
]
iter_to_saturation = [
5000,
]
MDL_mode = [
"both",
]
num_output_dims = [
2,
]
num_layers = [
3,
]
is_pendulum = [
False,
]
date_time = [
"10-9",
]
seed = [
0,
30,
60,
90,
120,
150,
180,
210,
240,
270,
]
def assign_array_id(array_id, param_list):
if len(param_list) == 0:
print("redundancy: {0}".format(array_id))
return []
else:
param_bottom = param_list[-1]
length = len(param_bottom)
current_param = param_bottom[array_id % length]
return assign_array_id(int(array_id / length), param_list[:-1]) + [current_param]
array_id = int(sys.argv[1])
param_list = [exp_id,
env_source,
exp_mode,
num_theories_init,
pred_nets_neurons,
pred_nets_activation,
domain_net_neurons,
domain_pred_mode,
mse_amp,
simplify_criteria,
scheduler_settings,
optim_type,
optim_domain_type,
reg_amp,
reg_domain_amp,
batch_size,
loss_core,
loss_order,
loss_decay_scale,
is_mse_decay,
loss_balance_model_influence,
num_examples,
iter_to_saturation,
MDL_mode,
num_output_dims,
num_layers,
is_pendulum,
date_time,
seed,
]
param_chosen = assign_array_id(array_id, param_list)
exec_str = "python ../theory_learning/theory_exp.py"
for param in param_chosen:
exec_str += " {0}".format(param)
exec_str += " {0}".format(array_id)
print(param_chosen)
print(exec_str)
from shutil import copyfile
current_PATH = os.path.dirname(os.path.realpath(__file__))
def make_dir(filename):
import os
import errno
if not os.path.exists(os.path.dirname(filename)):
print("directory {0} does not exist, created.".format(os.path.dirname(filename)))
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
print(exc)
raise
filename = "../data/" + "{0}_{1}/".format(param_chosen[0], param_chosen[-2])
make_dir(filename)
fc = "run_theory.py"
if not os.path.isfile(filename + fc):
copyfile(current_PATH + "/" + fc, filename + fc)
os.system(exec_str)
| 14.123153 | 89 | 0.647018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 365 | 0.127311 |
35bd4ab483f73871726c5a33d5a113b95bd2e29f | 10,304 | py | Python | lib/dataset/cao_cifar.py | jrcai/ACE | 1e2b04d1cf4bb517f107664ac489a1a96e95a4c1 | [
"MIT"
] | 18 | 2021-08-06T01:15:32.000Z | 2022-03-14T07:09:39.000Z | lib/dataset/cao_cifar.py | jrcai/BagofTricks-LT | d75b195367e3d535d316d134ec4bbef4bb7fcbdd | [
"MIT"
] | 2 | 2021-09-24T03:29:17.000Z | 2021-11-22T19:18:58.000Z | lib/dataset/cao_cifar.py | jrcai/BagofTricks-LT | d75b195367e3d535d316d134ec4bbef4bb7fcbdd | [
"MIT"
] | 2 | 2021-10-17T18:09:20.000Z | 2021-11-08T04:19:19.000Z | # To ensure fairness, we use the same code in LDAM (https://github.com/kaidic/LDAM-DRW) to produce long-tailed CIFAR datasets.
import torchvision
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import random
import os
import cv2
import time
import json
import copy
from utils.utils import get_category_list
import math
class IMBALANCECIFAR10(torchvision.datasets.CIFAR10):
cls_num = 10
def __init__(self, mode, cfg, root = '~/dataset/cifar', imb_type='exp',
transform=None, target_transform=None, download=True):
train = True if mode == "train" else False
super(IMBALANCECIFAR10, self).__init__(root, train, transform, target_transform, download)
self.cfg = cfg
self.train = train
self.cfg = cfg
self.input_size = cfg.INPUT_SIZE
self.color_space = cfg.COLOR_SPACE
print("Use {} Mode to train network".format(self.color_space))
rand_number = cfg.DATASET.IMBALANCECIFAR.RANDOM_SEED
if self.train:
np.random.seed(rand_number)
random.seed(rand_number)
imb_factor = self.cfg.DATASET.IMBALANCECIFAR.RATIO
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
self.transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
else:
self.data_format_transform()
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
self.data = self.all_info
'''
load the generated CAM-based dataset
'''
if self.cfg.DATASET.USE_CAM_BASED_DATASET and mode == 'train':
assert os.path.isfile(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH), \
'the CAM-based generated json file does not exist!'
self.data = json.load(open(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH))
new_data = []
for info in self.data:
if 'fpath' not in info:
new_data.append(copy.deepcopy(info))
continue
img = self._get_image(info)
new_data.append({
'image': img,
'category_id': info['category_id']
})
self.data = new_data
print("{} Mode: Contain {} images".format(mode, len(self.data)))
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.train:
self.class_weight, self.sum_weight = self.get_weight(self.get_annotations(), self.cls_num)
self.class_dict = self._get_class_dict()
print('-'*20+'in imbalance cifar dataset'+'-'*20)
print('class_dict is: ')
print(self.class_dict)
print('class_weight is: ')
print(self.class_weight)
num_list, cat_list = get_category_list(self.get_annotations(), self.cls_num, self.cfg)
self.instance_p = np.array([num / sum(num_list) for num in num_list])
self.class_p = np.array([1/self.cls_num for _ in num_list])
num_list = [math.sqrt(num) for num in num_list]
self.square_p = np.array([num / sum(num_list) for num in num_list])
self.class_dict, self.origin_class_dict = self._get_class_dict()
def update(self, epoch):
self.epoch = epoch
print('epoch in dataset', self.epoch)
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "progressive":
self.progress_p = epoch/self.cfg.TRAIN.MAX_EPOCH * self.class_p + (1-epoch/self.cfg.TRAIN.MAX_EPOCH)*self.instance_p
print('self.progress_p', self.progress_p)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.train:
assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive']
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance":
sample_class = random.randint(0, self.cls_num - 1)
elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square":
sample_class = np.random.choice(np.arange(self.cls_num), p=self.square_p)
else:
sample_class = np.random.choice(np.arange(self.cls_num), p=self.progress_p)
sample_indexes = self.class_dict[sample_class]
index = random.choice(sample_indexes)
img, target = self.data[index]['image'], self.data[index]['category_id']
meta = dict()
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, meta
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.data) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def reset_epoch(self, cur_epoch):
self.epoch = cur_epoch
def _get_class_dict(self):
class_dict = dict()
for i, anno in enumerate(self.data):
cat_id = anno["category_id"]
if not cat_id in class_dict:
class_dict[cat_id] = []
class_dict[cat_id].append(i)
return class_dict
def get_weight(self, annotations, num_classes):
num_list = [0] * num_classes
cat_list = []
for anno in annotations:
category_id = anno["category_id"]
num_list[category_id] += 1
cat_list.append(category_id)
max_num = max(num_list)
class_weight = [max_num / i for i in num_list]
sum_weight = sum(class_weight)
return class_weight, sum_weight
def _get_trans_image(self, img_idx):
now_info = self.data[img_idx]
img = now_info['image']
img = Image.fromarray(img)
return self.transform(img)[None, :, :, :]
def get_num_classes(self):
return self.cls_num
def get_annotations(self):
annos = []
for d in self.all_info:
annos.append({'category_id': int(d['category_id'])})
return annos
def imread_with_retry(self, fpath):
retry_time = 10
for k in range(retry_time):
try:
img =cv2.imread(fpath)
if img is None:
print("img is None, try to re-read img")
continue
return img#.convert('RGB')
except Exception as e:
if k == retry_time - 1:
assert False, "pillow open {} failed".format(fpath)
time.sleep(0.1)
def _get_image(self, now_info):
fpath = os.path.join(now_info["fpath"])
img = self.imread_with_retry(fpath)
if self.color_space == "RGB":
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
# np.random.shuffle(classes)
self.num_per_cls_dict = dict()
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(targets_np == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
for img in self.data[selec_idx, ...]:
new_data.append({
'image': img,
'category_id': the_class
})
self.all_info = new_data
def data_format_transform(self):
new_data = []
targets_np = np.array(self.targets, dtype=np.int64)
assert len(targets_np) == len(self.data)
for i in range(len(self.data)):
new_data.append({
'image': self.data[i],
'category_id': targets_np[i],
})
self.all_info = new_data
def __len__(self):
return len(self.data)
class IMBALANCECIFAR100(IMBALANCECIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
cls_num = 100
if __name__ == '__main__':
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = IMBALANCECIFAR100(root='/mnt/data3/zhangys/data/cifar', train=True,
download=True, transform=transform)
trainloader = iter(trainset)
data, label = next(trainloader)
| 36.8 | 128 | 0.58948 | 9,570 | 0.928766 | 0 | 0 | 0 | 0 | 0 | 0 | 1,519 | 0.147418 |
35be620af3553df16e0f3406f42cf348349c4566 | 6,428 | py | Python | fs.py | titouanc/docfub | a06fdb9100c85ac3d80aa6e3102029436963d27c | [
"MIT"
] | 1 | 2018-03-26T15:58:49.000Z | 2018-03-26T15:58:49.000Z | fs.py | titouanc/docfub | a06fdb9100c85ac3d80aa6e3102029436963d27c | [
"MIT"
] | 1 | 2021-06-01T22:25:28.000Z | 2021-06-01T22:25:28.000Z | fs.py | titouanc/docfub | a06fdb9100c85ac3d80aa6e3102029436963d27c | [
"MIT"
] | null | null | null | import os
import errno
import stat
import logging
from io import BytesIO
from time import time, mktime, strptime
from fuse import FuseOSError, Operations, LoggingMixIn
logger = logging.getLogger('dochub_fs')
def wrap_errno(func):
"""
@brief Transform Exceptions happening inside func into meaningful
errno if possible
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError:
raise FuseOSError(errno.ENOENT)
except ValueError:
raise FuseOSError(errno.EINVAL)
return wrapper
class Node:
"""
@brief Map Dochub API nodes onto filesystem nodes.
Takes a JSON serialized representation of Dochub objects,
and expose useful attributes
"""
def __init__(self, serialized, fs):
self.serialized = serialized
self.fs = fs
def sub_node(self, serialized):
return Node(serialized, self.fs)
@property
def is_category(self):
return 'courses' in self.serialized and 'children' in self.serialized
@property
def is_course(self):
return 'slug' in self.serialized
@property
def is_document(self):
return 'votes' in self.serialized
@property
def is_dir(self):
return self.is_category or self.is_course
@property
def name(self):
if self.is_course:
return "{slug} {name}".format(**self.serialized)
if self.is_document:
return "{name}{file_type}".format(**self.serialized)
return self.serialized['name']
@property
def size(self):
return self.serialized.get('file_size', 4096)
@property
def ctime(self):
if 'date' in self.serialized:
t = strptime(self.serialized['date'], "%Y-%m-%dT%H:%M:%S.%fZ")
return int(mktime(t))
return self.fs.mount_time
atime = ctime
mtime = ctime
def getattr(self):
mode = (0o500|stat.S_IFDIR) if self.is_dir else (0o400|stat.S_IFREG)
return {
'st_mode': mode,
'st_ctime': self.ctime,
'st_mtime': self.mtime,
'st_atime': self.atime,
'st_nlink': 1,
'st_uid': self.fs.uid,
'st_gid': self.fs.gid,
'st_size': self.size,
}
@property
def children(self):
if not self.is_dir:
raise ValueError(
"Attempt to get direcctory children on non-directory %s" %
self.serialized['name']
)
if self.is_category:
children = self.serialized['children'] + self.serialized['courses']
elif self.is_course:
r = self.fs.api.get_course(self.serialized['slug'])
children = r['document_set']
return {child.name: child for child in map(self.sub_node, children)}
@property
def content(self):
if not self.is_document:
raise ValueError(
"Attempt to get file content on non-file %s" %
self.serialized['name']
)
return self.fs.api.get_document(self.serialized['id'])
def find(self, path):
if len(path) > 0:
return self.children[path[0]].find(path[1:])
else:
return self
class DocumentUpload:
"""
@brief A file created locally, being buffered before posting to the
server.
"""
def __init__(self, fs, course, name):
self.fs = fs
self.io = BytesIO()
self.ctime = time()
self.mtime, self.atime = self.ctime, self.ctime
self.name, self.ext = name.split('.', 1)
self.course = course
@property
def size(self):
return self.io.tell()
def getattr(self):
return {
'st_mode': 0o200 | stat.S_IFREG,
'st_ctime': self.ctime,
'st_mtime': self.mtime,
'st_atime': self.atime,
'st_nlink': 1,
'st_uid': self.fs.uid,
'st_gid': self.fs.gid,
'st_size': self.size,
}
def do_upload(self):
self.io.seek(0)
self.fs.api.add_document(course_slug=self.course.serialized['slug'],
name=self.name, file=self.io,
filename='.'.join([self.name, self.ext]))
def to_breadcrumbs(path):
res = []
prefix, name = os.path.split(path)
while name:
res = [name] + res
prefix, name = os.path.split(prefix)
return res
class DochubFileSystem(LoggingMixIn, Operations):
"""
@brief Implementation of filesystem operations
"""
def __init__(self, api):
self.api = api
self.mount_time = int(time())
self.uid, self.gid = os.getuid(), os.getgid()
self.uploads = {}
tree = self.api.get_tree()
assert len(tree) == 1
self.tree = Node(tree[0], self)
@wrap_errno
def find_path(self, path):
if path in self.uploads:
return self.uploads[path]
return self.tree.find(to_breadcrumbs(path))
def getattr(self, path, fh=None):
return self.find_path(path).getattr()
def readdir(self, path, fh=None):
node = self.find_path(path)
return ['.', '..'] + list(node.children.keys())
def read(self, path, size, offset, fh=None):
node = self.find_path(path)
return node.content[offset:offset+size]
def create(self, path, mode):
directory, name = os.path.split(path)
parent = self.find_path(directory)
if not parent.is_course:
raise Exception()
if (mode & stat.S_IFREG):
logger.info("Create file %s", path)
self.uploads[path] = DocumentUpload(self, parent, name)
return 3
def release(self, path, fh):
"""
@brief When the file is closed, perform the actual upload to DocHub
"""
if path in self.uploads and self.uploads[path].size > 0:
upload = self.uploads.pop(path)
upload.do_upload()
def write(self, path, data, offset, fh=None):
if path in self.uploads:
upload = self.uploads[path]
if offset != upload.size:
upload.io.seek(offset)
self.uploads[path].io.write(data)
return len(data)
return -1
| 28.192982 | 80 | 0.564872 | 5,629 | 0.8757 | 0 | 0 | 1,933 | 0.300716 | 0 | 0 | 1,053 | 0.163815 |
35beb2659c3525943e08592cd4e9ebc8b9fd9ed7 | 2,239 | py | Python | algolab_class_API/migrations/0011_auto_20190110_1307.py | KMU-algolab/algolab_class | fdf22cd10d5af71eae63e259c4f88f2b55b44ec7 | [
"MIT"
] | 1 | 2019-01-10T05:46:09.000Z | 2019-01-10T05:46:09.000Z | algolab_class_API/migrations/0011_auto_20190110_1307.py | KMU-algolab/algolab_class | fdf22cd10d5af71eae63e259c4f88f2b55b44ec7 | [
"MIT"
] | 7 | 2018-12-25T15:59:49.000Z | 2019-01-10T05:45:25.000Z | algolab_class_API/migrations/0011_auto_20190110_1307.py | KMU-algolab/algolab_class | fdf22cd10d5af71eae63e259c4f88f2b55b44ec7 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.4 on 2019-01-10 04:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('algolab_class_API', '0010_submithistory'),
]
operations = [
migrations.RemoveField(
model_name='boardquestion',
name='context',
),
migrations.RemoveField(
model_name='boardquestion',
name='context_type',
),
migrations.RemoveField(
model_name='boardreply',
name='context',
),
migrations.AddField(
model_name='boardquestion',
name='contents',
field=models.TextField(db_column='Contents', default='내용을 입력하세요.', verbose_name='내용'),
),
migrations.AddField(
model_name='boardquestion',
name='contents_type',
field=models.CharField(choices=[('NOTICE', '공지사항'), ('QUESTION', '질문')], db_column='ContentsType', default='QUESTION', max_length=10, verbose_name='글 종류'),
),
migrations.AddField(
model_name='boardreply',
name='contents',
field=models.TextField(db_column='Contents', default='내용을 입력하세요.', verbose_name='내용'),
),
migrations.AlterField(
model_name='boardquestion',
name='write_time',
field=models.DateTimeField(db_column='WriteTime', verbose_name='작성 시간'),
),
migrations.AlterField(
model_name='course',
name='manager',
field=models.ForeignKey(db_column='Manager', on_delete=django.db.models.deletion.DO_NOTHING, related_name='courseManager_set', to=settings.AUTH_USER_MODEL, verbose_name='교수자'),
),
migrations.AlterField(
model_name='submithistory',
name='status',
field=models.CharField(choices=[('NOT_SOLVED', 'NotSolved'), ('SOLVED', 'Solved'), ('COMPILE_ERROR', 'CompileError'), ('TIME_OVER', 'TimeOver'), ('RUNTIME_ERROR', 'RuntimeError'), ('SERVER_ERROR', 'ServerError')], db_column='Status', default='NOT_SOLVED', max_length=10, verbose_name='제출 결과'),
),
]
| 38.603448 | 305 | 0.604734 | 2,160 | 0.931436 | 0 | 0 | 0 | 0 | 0 | 0 | 717 | 0.309185 |
35befecc57c6a5bb3923fbaf46cad1525bfacb20 | 11,266 | py | Python | spiderpy/spiderapi.py | peternijssen/python-itho-daalderop-api | a4c2b1488e7f32b892004df1ccfeb902f8e19135 | [
"MIT"
] | null | null | null | spiderpy/spiderapi.py | peternijssen/python-itho-daalderop-api | a4c2b1488e7f32b892004df1ccfeb902f8e19135 | [
"MIT"
] | null | null | null | spiderpy/spiderapi.py | peternijssen/python-itho-daalderop-api | a4c2b1488e7f32b892004df1ccfeb902f8e19135 | [
"MIT"
] | null | null | null | """ Python wrapper for the Spider API """
from __future__ import annotations
import json
import logging
import time
from datetime import datetime, timedelta
from typing import Any, Dict, ValuesView
from urllib.parse import unquote
import requests
from spiderpy.devices.powerplug import SpiderPowerPlug
from spiderpy.devices.thermostat import SpiderThermostat
BASE_URL = "https://spider-api.ithodaalderop.nl"
AUTHENTICATE_URL = BASE_URL + "/api/tokens"
DEVICES_URL = BASE_URL + "/api/devices"
ENERGY_DEVICES_URL = BASE_URL + "/api/devices/energy/energyDevices"
POWER_PLUGS_URL = BASE_URL + "/api/devices/energy/smartPlugs"
ENERGY_MONITORING_URL = BASE_URL + "/api/monitoring/15/devices"
REFRESH_RATE = 120
_LOGGER = logging.getLogger(__name__)
class SpiderApi:
""" Interface class for the Spider API """
def __init__(
self, username: str, password: str, refresh_rate: int = REFRESH_RATE
) -> None:
""" Constructor """
self._username = ""
for char in username:
self._username += hex(ord(char)).lstrip("0x")
self._password = password
self._thermostats: Dict[Any, Any] = {}
self._power_plugs: Dict[Any, Any] = {}
self._last_refresh: int = 0
self._access_token: str = ""
self._refresh_token: str = ""
self._token_expires_at = datetime.now() - timedelta(days=1)
self._token_expires_in = None
self._refresh_rate: int = refresh_rate
def update(self) -> None:
""" Update the cache """
current_time = int(time.time())
if current_time >= (self._last_refresh + self._refresh_rate):
self.update_thermostats()
self.update_power_plugs()
self._last_refresh = current_time
def update_thermostats(self) -> None:
""" Retrieve thermostats """
results = self._request_update(DEVICES_URL)
if results is False:
return
for thermostat in results:
if thermostat["type"] == 105:
self._thermostats[thermostat["id"]] = SpiderThermostat(thermostat)
def get_thermostats(self) -> ValuesView[SpiderThermostat]:
""" Get all thermostats """
self.update()
return self._thermostats.values()
def get_thermostat(self, unique_id: str) -> SpiderThermostat | None:
""" Get a thermostat by id """
self.update()
if unique_id in self._thermostats:
return self._thermostats[unique_id]
return None
def set_temperature(self, thermostat: SpiderThermostat, temperature: float) -> bool:
""" Set the temperature. Unfortunately, the API requires the complete object"""
if thermostat.set_temperature(temperature):
url = DEVICES_URL + "/" + thermostat.id
try:
self._request_action(url, json.dumps(thermostat.data))
return True
except SpiderApiException:
_LOGGER.error(f"Unable to set temperature to {temperature}.")
return False
def set_operation_mode(
self, thermostat: SpiderThermostat, operation_mode: str
) -> bool:
""" Set the operation mode. Unfortunately, the API requires the complete object"""
if thermostat.set_operation_mode(operation_mode):
url = DEVICES_URL + "/" + thermostat.id
try:
self._request_action(url, json.dumps(thermostat.data))
return True
except SpiderApiException:
_LOGGER.error(
f"Unable to set operation mode to {operation_mode}. Is this operation mode supported?"
)
return False
def set_fan_speed(self, thermostat: SpiderThermostat, fan_speed: str) -> bool:
""" Set the fan speed. Unfortunately, the API requires the complete object"""
if thermostat.set_fan_speed(fan_speed):
url = DEVICES_URL + "/" + thermostat.id
try:
self._request_action(url, json.dumps(thermostat.data))
return True
except SpiderApiException:
_LOGGER.error(
f"Unable to set fan speed to {fan_speed}. Is this fan speed supported?"
)
return False
def update_power_plugs(self) -> None:
""" Retrieve power plugs """
results = self._request_update(ENERGY_DEVICES_URL)
if results is False:
return
for power_plug in results:
if power_plug["isSwitch"]:
today = (
datetime.today()
.replace(hour=00, minute=00, second=00)
.strftime("%s")
)
energy_url = (
ENERGY_MONITORING_URL
+ "/"
+ power_plug["energyDeviceId"]
+ "/?take=96&start="
+ str(today)
+ "000"
)
energy_results = self._request_update(energy_url)
if energy_results is False:
continue
try:
power_plug["todayUsage"] = float(
energy_results[0]["totalEnergy"]["normal"]
) + float(energy_results[0]["totalEnergy"]["low"])
except IndexError:
_LOGGER.error("Unable to get today energy usage for power plug")
self._power_plugs[power_plug["id"]] = SpiderPowerPlug(power_plug)
def get_power_plugs(self) -> ValuesView[SpiderPowerPlug]:
""" Get all power plugs """
self.update()
return self._power_plugs.values()
def get_power_plug(self, unique_id: str) -> SpiderPowerPlug | None:
""" Get a power plug by id """
self.update()
if unique_id in self._power_plugs:
return self._power_plugs[unique_id]
return None
def turn_power_plug_on(self, power_plug: SpiderPowerPlug) -> bool:
""" Turn the power_plug on"""
if power_plug.turn_on():
url = POWER_PLUGS_URL + "/" + power_plug.id + "/switch"
try:
self._request_action(url, "true")
return True
except SpiderApiException:
_LOGGER.error("Unable to turn power plug on.")
return False
def turn_power_plug_off(self, power_plug: SpiderPowerPlug) -> bool:
""" Turn the power plug off"""
if power_plug.turn_off():
url = POWER_PLUGS_URL + "/" + power_plug.id + "/switch"
try:
self._request_action(url, "false")
return True
except SpiderApiException:
_LOGGER.error("Unable to turn power plug off.")
return False
def _is_authenticated(self) -> bool:
""" Check if access token is expired """
if self._refresh_token == "":
self._request_login()
if datetime.now() > self._token_expires_at:
self._refresh_access_token()
return True
return False
def _request_action(self, url: str, data: str) -> None:
""" Perform a request to execute an action """
self._is_authenticated()
headers = {
"authorization": "Bearer " + self._access_token,
"Content-Type": "application/json",
"X-Client-Platform": "android-phone",
"X-Client-Version": "1.5.9 (3611)",
"X-Client-Library": "SpiderPy",
}
try:
response = requests.request("PUT", url, data=data, headers=headers)
except Exception as exception:
raise SpiderApiException(exception) from exception
if response.status_code == 401:
raise SpiderApiException("Access denied. Failed to refresh?")
if response.status_code != 200:
raise SpiderApiException(
f"Unable to perform action. Status code: {response.status_code}. Data: {data}"
)
def _request_update(self, url: str) -> Dict[Any, Any]:
""" Perform a request to update information """
self._is_authenticated()
headers = {
"authorization": "Bearer " + self._access_token,
"Content-Type": "application/json",
"X-Client-Platform": "android-phone",
"X-Client-Version": "1.5.9 (3611)",
"X-Client-Library": "SpiderPy",
}
try:
response = requests.request("GET", url, headers=headers)
except Exception as exception:
raise SpiderApiException(exception) from exception
if response.status_code == 401:
raise SpiderApiException("Access denied. Failed to refresh?")
if response.status_code != 200:
raise SpiderApiException(
f"Unable to request update. Status code: {response.status_code}"
)
return response.json()
def _request_login(self) -> None:
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"X-Client-Platform": "android-phone",
"X-Client-Version": "1.5.9 (3611)",
"X-Client-Library": "SpiderPy",
}
payload = {
"grant_type": "password",
"username": self._username,
"password": self._password,
}
try:
response = requests.request(
"POST", AUTHENTICATE_URL, data=payload, headers=headers
)
except Exception as exception:
raise UnauthorizedException(exception) from exception
if response.status_code != 200:
raise SpiderApiException(
f"Unable to request login. Status code: {response.status_code}"
)
data = response.json()
self._access_token = data["access_token"]
self._refresh_token = unquote(data["refresh_token"])
self._token_expires_in = data["expires_in"]
self._token_expires_at = datetime.now() + timedelta(
0, (int(data["expires_in"]) - 20)
)
def _refresh_access_token(self) -> None:
""" Refresh access_token """
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"X-Client-Platform": "android-phone",
"X-Client-Version": "1.5.9 (3611)",
"X-Client-Library": "SpiderPy",
}
payload = {"grant_type": "refresh_token", "refresh_token": self._refresh_token}
response = requests.request(
"POST", AUTHENTICATE_URL, data=payload, headers=headers
)
data = response.json()
if response.status_code != 200:
raise SpiderApiException(
f"Unable to refresh access token. Status code: {response.status_code}"
)
self._access_token = data["access_token"]
self._refresh_token = unquote(data["refresh_token"])
self._token_expires_in = data["expires_in"]
self._token_expires_at = datetime.now() + timedelta(
0, (int(data["expires_in"]) - 20)
)
class UnauthorizedException(Exception):
pass
class SpiderApiException(Exception):
pass
| 33.933735 | 106 | 0.582105 | 10,507 | 0.932629 | 0 | 0 | 0 | 0 | 0 | 0 | 2,550 | 0.226345 |
35bf68b8af9e43feff762af717348577fdc8b44c | 3,548 | py | Python | http_nudger/persister.py | askolosov/http-nudger | c483ffdef0df57ef625d0912cc2317d3c67f2303 | [
"MIT"
] | null | null | null | http_nudger/persister.py | askolosov/http-nudger | c483ffdef0df57ef625d0912cc2317d3c67f2303 | [
"MIT"
] | null | null | null | http_nudger/persister.py | askolosov/http-nudger | c483ffdef0df57ef625d0912cc2317d3c67f2303 | [
"MIT"
] | null | null | null | """
Persister module contains part of the http-nudger which consumes
records from Kafka and stores them into the database
"""
import json
import logging
from pathlib import Path
from typing import List
import aiokafka
import asyncpg
from .helpers import create_kafka_consumer, create_postgres_connection_pool
from .url_status import UrlStatus
logger = logging.getLogger(__name__)
# pylint: disable-msg=too-many-arguments
async def persister_loop(
kafka_bootstrap_servers: str,
kafka_topic: str,
kafka_key: Path,
kafka_cert: Path,
kafka_ca: Path,
kafka_consumer_group: str,
postgres_host: str,
postgres_port: int,
postgres_db: str,
postgres_user: str,
postgres_password: str,
) -> None:
kafka_consumer = create_kafka_consumer(
kafka_bootstrap_servers,
kafka_consumer_group,
kafka_key,
kafka_cert,
kafka_ca,
)
pg_conn_pool = await create_postgres_connection_pool(
postgres_host,
postgres_port,
postgres_db,
postgres_user,
postgres_password,
)
async with kafka_consumer as kafka_consumer, pg_conn_pool.acquire() as pg_conn:
logger.info("Creating tables in the database...")
await create_tables(pg_conn)
kafka_consumer.subscribe(topics=[kafka_topic])
while True:
batch = await consume_batch(kafka_consumer)
if batch:
logger.info(
"A batch of %d URL statuses was consumed. Storing...", len(batch)
)
await store_batch(pg_conn, batch)
await kafka_consumer.commit()
async def consume_batch(
consumer: aiokafka.AIOKafkaConsumer, timeout: int = 10 * 1000
) -> List[UrlStatus]:
records = await consumer.getmany(timeout_ms=timeout)
batch = []
for msgs in records.values():
for msg in msgs:
try:
url_status = UrlStatus.from_json(msg.value)
batch.append(url_status)
except (TypeError, json.JSONDecodeError, ValueError):
logger.warning("Skipping message due to wrong format: %s", msg)
return batch
async def create_tables(conn: asyncpg.Connection) -> None:
await conn.execute(
"""
CREATE TABLE IF NOT EXISTS url_statuses(
id serial PRIMARY KEY,
timestamp timestamp with time zone,
url text,
status_code smallint,
failure_reason text,
response_time float,
regexp text,
regexp_matched bool
)
"""
)
async def store_batch(conn: asyncpg.Connection, batch: List[UrlStatus]) -> None:
async with conn.transaction():
# TODO: for DB efficency on higher throughputs this should be
# rewritten to use single multi-row INSERT
for url_status in batch:
await conn.execute(
"""
INSERT INTO url_statuses(
timestamp,
url,
status_code,
failure_reason,
response_time,
regexp,
regexp_matched
) VALUES($1, $2, $3, $4, $5, $6, $7)
""",
url_status.timestamp,
url_status.url,
url_status.status_code,
url_status.failure_reason,
url_status.response_time,
url_status.regexp,
url_status.regexp_matched,
)
| 29.081967 | 85 | 0.598365 | 0 | 0 | 0 | 0 | 0 | 0 | 3,112 | 0.877114 | 1,060 | 0.29876 |
35c0bcc2adb3ea68d0b4f4ffb1f220f03d52c1be | 724 | py | Python | var/spack/repos/builtin/packages/liblzf/package.py | BenWibking/spack | 49b3b43a4a9375210b578635d9240875a5f3106b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/liblzf/package.py | BenWibking/spack | 49b3b43a4a9375210b578635d9240875a5f3106b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/liblzf/package.py | joequant/spack | e028ee0d5903045e1cdeb57550cbff61f2ffb2fa | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Liblzf(AutotoolsPackage):
"""LibLZF is a very small data compression library.
It consists of only two .c and two .h files and is very easy to incorporate into
your own programs. The compression algorithm is very, very fast, yet still written
in portable C."""
homepage = "http://oldhome.schmorp.de/marc/liblzf.html"
url = "http://dist.schmorp.de/liblzf/liblzf-3.6.tar.gz"
version('3.6', sha256='9c5de01f7b9ccae40c3f619d26a7abec9986c06c36d260c179cedd04b89fb46a')
| 36.2 | 93 | 0.740331 | 503 | 0.694751 | 0 | 0 | 0 | 0 | 0 | 0 | 603 | 0.832873 |
35c0d7b1584b734cc6675d562fe57b7a5059350f | 15,565 | py | Python | blaze/compute/tests/test_elwise_eval.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | 1 | 2018-01-24T08:54:04.000Z | 2018-01-24T08:54:04.000Z | blaze/compute/tests/test_elwise_eval.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | null | null | null | blaze/compute/tests/test_elwise_eval.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from dynd import nd, ndt
import blaze
import unittest
import tempfile
import os, os.path
import glob
import shutil
import blaze
# Useful superclass for disk-based tests
class MayBePersistentTest(unittest.TestCase):
disk = None
def setUp(self):
if self.disk == 'BLZ':
prefix = 'blaze-' + self.__class__.__name__
suffix = '.blz'
path1 = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
os.rmdir(path1)
self.ddesc1 = blaze.BLZ_DDesc(path1, mode='w')
path2 = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
os.rmdir(path2)
self.ddesc2 = blaze.BLZ_DDesc(path2, mode='w')
path3 = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
os.rmdir(path3)
self.ddesc3 = blaze.BLZ_DDesc(path3, mode='w')
elif self.disk == 'HDF5':
prefix = 'hdf5-' + self.__class__.__name__
suffix = '.hdf5'
dpath = "/earray"
h, path1 = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(h) # close the non needed file handle
self.ddesc1 = blaze.HDF5_DDesc(path1, dpath, mode='w')
h, path2 = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(h)
self.ddesc2 = blaze.HDF5_DDesc(path2, dpath, mode='w')
h, path3 = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(h)
self.ddesc3 = blaze.HDF5_DDesc(path3, dpath, mode='w')
else:
self.ddesc1 = None
self.ddesc2 = None
self.ddesc3 = None
def tearDown(self):
if self.disk:
self.ddesc1.remove()
self.ddesc2.remove()
self.ddesc3.remove()
# Check for arrays that fit in the chunk size
class evalTest(unittest.TestCase):
vm = "numexpr" # if numexpr not available, it will fall back to python
N = 1000
def test00(self):
"""Testing elwise_eval() with only scalars and constants"""
a = 3
cr = blaze._elwise_eval("2 * a", vm=self.vm)
self.assert_(cr == 6, "eval does not work correctly")
def test01(self):
"""Testing with only blaze arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a)
d = blaze.array(b)
cr = blaze._elwise_eval("c * d", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test02(self):
"""Testing with only numpy arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
cr = blaze._elwise_eval("a * b", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing with only dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = nd.array(a)
d = nd.array(b)
cr = blaze._elwise_eval("c * d", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing with a mix of blaze, numpy and dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b)
d = nd.array(a)
cr = blaze._elwise_eval("a * b + d", vm=self.vm)
nr = a * b + d
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test05(self):
"""Testing with a mix of scalars and blaze, numpy and dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b)
d = nd.array(a)
cr = blaze._elwise_eval("a * b + d + 2", vm=self.vm)
nr = a * b + d + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test06(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
# Check for arrays that fit in the chunk size
# Using the Python VM (i.e. Blaze machinery) here
class evalPythonTest(evalTest):
vm = "python"
# Check for arrays that are larger than a chunk
class evalLargeTest(evalTest):
N = 10000
# Check for arrays that are larger than a chunk
# Using the Python VM (i.e. Blaze machinery) here
class evalPythonLargeTest(evalTest):
N = 10000
vm = "python"
# Check for arrays stored on-disk, but fit in a chunk
# Check for arrays that fit in memory
class storageTest(MayBePersistentTest):
N = 1000
vm = "numexpr"
disk = "BLZ"
def test00(self):
"""Testing elwise_eval() with only blaze arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d", vm=self.vm, ddesc=self.ddesc3)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test01(self):
"""Testing elwise_eval() with blaze arrays and constants"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing elwise_eval() with blaze and dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing elwise_eval() with blaze, dynd and numpy arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("a * c + d", vm=self.vm, ddesc=self.ddesc3)
nr = a * c + d
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test05(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b, ddesc=self.ddesc1)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm, ddesc=self.ddesc3)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
# Check for arrays stored on-disk, but fit in a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonTest(storageTest):
vm = "python"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeTest(storageTest):
N = 10000
# Check for arrays stored on-disk, but are larger than a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonLargeTest(storageTest):
N = 10000
vm = "python"
# Check for arrays stored on-disk, but fit in a chunk
class storageHDF5Test(storageTest):
disk = "HDF5"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeHDF5Test(storageTest):
N = 10000
disk = "HDF5"
####################################
# Multidimensional tests start now
####################################
# Check for arrays that fit in a chunk
class evalMDTest(unittest.TestCase):
N = 10
M = 100
vm = "numexpr"
def test00(self):
"""Testing elwise_eval() with only blaze arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a)
d = blaze.array(b)
cr = blaze._elwise_eval("c * d", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test01(self):
"""Testing elwise_eval() with blaze arrays and scalars"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a)
d = blaze.array(b)
cr = blaze._elwise_eval("c * d + 2", vm=self.vm)
nr = a * b + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test02(self):
"""Testing elwise_eval() with pure dynd arrays and scalars"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = nd.array(a)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 2", vm=self.vm)
nr = a * b + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing elwise_eval() with blaze and dynd arrays and scalars"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 2", vm=self.vm)
nr = a * b + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
def test05(self):
"""Testing reductions on blaze arrays and axis=0"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b)
cr = blaze._elwise_eval("sum(b + 2, axis=0)", vm=self.vm)
nr = np.sum(b + 2, axis=0)
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test06(self):
"""Testing reductions on blaze arrays and axis=1"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
self.assertRaises(NotImplementedError,
blaze._elwise_eval, "sum([[1,2],[3,4]], axis=1)")
# Check for arrays that fit in a chunk
# Using the Python VM (i.e. Blaze machinery) here
class evalPythonMDTest(evalMDTest):
vm = "python"
# Check for arrays that does not fit in a chunk
class evalLargeMDTest(evalMDTest):
N = 100
M = 100
# Check for arrays that does not fit in a chunk, but using python VM
class evalPythonLargeMDTest(evalMDTest):
N = 100
M = 100
vm = "python"
# Check for arrays that fit in a chunk (HDF5)
class evalMDHDF5Test(evalMDTest):
disk = "HDF5"
# Check for arrays that does not fit in a chunk (HDF5)
class evalLargeMDHDF5Test(evalMDTest):
N = 100
M = 100
disk = "HDF5"
# Check for arrays stored on-disk, but fit in a chunk
# Check for arrays that fit in memory
class storageMDTest(MayBePersistentTest):
N = 10
M = 100
vm = "numexpr"
disk = "BLZ"
def test00(self):
"""Testing elwise_eval() with only blaze arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d", vm=self.vm, ddesc=self.ddesc3)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test01(self):
"""Testing elwise_eval() with blaze arrays and constants"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing elwise_eval() with blaze and dynd arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing elwise_eval() with blaze, dynd and numpy arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("a * c + d", vm=self.vm, ddesc=self.ddesc3)
nr = a * c + d
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test05(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b, ddesc=self.ddesc1)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm, ddesc=self.ddesc3)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
def test06(self):
"""Testing reductions on blaze arrays and axis=0"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b, ddesc=self.ddesc1)
cr = blaze._elwise_eval("sum(b, axis=0)",
vm=self.vm, ddesc=self.ddesc3)
nr = np.sum(b, axis=0)
assert_array_equal(cr, nr, "eval does not work correctly")
# Check for arrays stored on-disk, but fit in a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonMDTest(storageMDTest):
vm = "python"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeMDTest(storageMDTest):
N = 500
# Check for arrays stored on-disk, but are larger than a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonLargeMDTest(storageMDTest):
N = 500
vm = "python"
# Check for arrays stored on-disk, but fit in a chunk
class storageMDHDF5Test(storageMDTest):
disk = "HDF5"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeMDHDF5Test(storageMDTest):
N = 500
disk = "HDF5"
if __name__ == '__main__':
unittest.main()
| 35.946882 | 76 | 0.595952 | 13,405 | 0.861227 | 0 | 0 | 0 | 0 | 0 | 0 | 4,826 | 0.310055 |
35c0ed329cd7d8d2056b7e34556321128f797142 | 1,457 | py | Python | torch3d/models/pointnet2.py | zhangmozhe/torch3d | d47e9b243e520f9c0c72a26c271d2c7ad242cb65 | [
"BSD-3-Clause"
] | null | null | null | torch3d/models/pointnet2.py | zhangmozhe/torch3d | d47e9b243e520f9c0c72a26c271d2c7ad242cb65 | [
"BSD-3-Clause"
] | null | null | null | torch3d/models/pointnet2.py | zhangmozhe/torch3d | d47e9b243e520f9c0c72a26c271d2c7ad242cb65 | [
"BSD-3-Clause"
] | 1 | 2020-06-03T15:19:25.000Z | 2020-06-03T15:19:25.000Z | import torch
import torch.nn as nn
from torch3d.nn import SetAbstraction
class PointNetSSG(nn.Module):
"""
PointNet++ single-scale grouping architecture from the `"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space" <https://arxiv.org/abs/1706.02413>`_ paper.
Args:
in_channels (int): Number of channels in the input point set
num_classes (int): Number of classes in the dataset
dropout (float, optional): Dropout rate in the classifier. Default: 0.5
""" # noqa
def __init__(self, in_channels, num_classes, dropout=0.5):
super(PointNetSSG, self).__init__()
self.sa1 = SetAbstraction(in_channels, [64, 64, 128], 512, 32, 0.2, bias=False)
self.sa2 = SetAbstraction(128 + 3, [128, 128, 256], 128, 64, 0.4, bias=False)
self.sa3 = SetAbstraction(256 + 3, [256, 512, 1024], 1, 128, 0.8, bias=False)
self.mlp = nn.Sequential(
nn.Linear(1024, 512, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(dropout),
nn.Linear(512, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(dropout),
)
self.fc = nn.Linear(256, num_classes)
def forward(self, x):
x = self.sa1(x)
x = self.sa2(x)
x = self.sa3(x)
x = x.squeeze(2)
x = self.mlp(x)
x = self.fc(x)
return x
| 35.536585 | 184 | 0.59094 | 1,381 | 0.947838 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.289636 |
35c12c665a38adb43714ee6a7e812bc5fee89003 | 325 | py | Python | tests/test_engine.py | znhv/winsio | 4d4e69961285ea3dcebc5ad6358e2d753d6b4f9d | [
"MIT"
] | null | null | null | tests/test_engine.py | znhv/winsio | 4d4e69961285ea3dcebc5ad6358e2d753d6b4f9d | [
"MIT"
] | null | null | null | tests/test_engine.py | znhv/winsio | 4d4e69961285ea3dcebc5ad6358e2d753d6b4f9d | [
"MIT"
] | null | null | null | import unittest.mock as mock
import pytest
from brainstorm.scripts import engine
from brainstorm.games import calc
# def test_player_ready(monkeypatch):
# with monkeypatch.context() as m:
# m.setattr('builtins.input', lambda prompt="": "y")
# result = engine.player_ready()
# assert result is True
| 25 | 60 | 0.707692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.621538 |
35c16cadc73c78b1e8e17420e27d77517793b258 | 1,328 | py | Python | Day01.py | peiliming007/PythonByJoker | f457706bcdc43c8743dd88a210c5ca0b28aabd21 | [
"Apache-2.0"
] | null | null | null | Day01.py | peiliming007/PythonByJoker | f457706bcdc43c8743dd88a210c5ca0b28aabd21 | [
"Apache-2.0"
] | null | null | null | Day01.py | peiliming007/PythonByJoker | f457706bcdc43c8743dd88a210c5ca0b28aabd21 | [
"Apache-2.0"
] | null | null | null | #1
celsius=float(input("请输入一个摄氏度:>>"))
fahrenheit=(9 / 5) *celsius + 32
print("华氏温度为:%.1f" % fahrenheit)
#2
radius=float(input("请输入圆柱体的半径:>>"))
length=float(input("请输入圆柱体的高:>>"))
area= radius*radius*3.14159265
volume=area*length
print("The area is %.4f" % area )
print("The volume is %.1f" % volume)
#3
feet=float(input("请输入需要转换的英里值:>>"))
meter=feet*0.305
print("对应的米数为: %.4f" % meter)
#4
M=float(input("Enter amount of water in kilogram: "))
initiaTemperature=float(input("Enter the initia temperature: "))
finalTemperature=float(input("Enter the final temperature: "))
Q= M *(finalTemperature -initiaTemperature)*4184
print("The energy needed is %.1f" % Q)
#5
balance=float(input("输入差额:>>"))
interestrate=float(input("输入年利率:>>"))
interestrate=balance*(interestrate/1200)
print("下月要付的利息为: %.5f" % interestrate)
#6
v0,v1,t=map(float,input('输入初始速度:,输入末速度:,输入占用的时间:').split(','))
a=(v1-v0)/t
print("平均加速度为:%.4f" % a)
#7
S=float(input("输入存款金额:>>"))
one=S *(1+0.00417)
two=(S+one)*(1+0.00417)
theer=(S+two)*(1+0.00417)
four=(S+theer)*(1+0.00417)
five=(S+four)*(1+0.00417)
six=(S+five)*(1+0.00417)
print("六个月后账户里的金额为:%.2f"% six)
#8
num=int(input("请输入一个0-1000的整数:>>"))
a=int(num%100)
b=int(a%10)
c=int(a/10)
d=int(num/100)
sum = b + c +d
print("各位数和为:", sum)
| 21.419355 | 65 | 0.637048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.413071 |
35c4595b0ebd1fa1dbe98d7023f5193c7b298c15 | 5,595 | py | Python | botx/bots/bots.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 13 | 2021-01-21T12:43:10.000Z | 2022-03-23T11:11:59.000Z | botx/bots/bots.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 259 | 2020-02-26T08:51:03.000Z | 2022-03-23T11:08:36.000Z | botx/bots/bots.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 5 | 2019-12-02T16:19:22.000Z | 2021-11-22T20:33:34.000Z | """Implementation for bot classes."""
import asyncio
from dataclasses import InitVar, field
from typing import Any, Callable, Dict, List
from weakref import WeakSet
from loguru import logger
from pydantic.dataclasses import dataclass
from botx import concurrency, exception_handlers, exceptions, shared, typing
from botx.bots.mixins import (
clients,
collectors,
exceptions as exception_mixin,
lifespan,
middlewares,
)
from botx.clients.clients import async_client, sync_client as synchronous_client
from botx.collecting.collectors.collector import Collector
from botx.dependencies.models import Depends
from botx.middlewares.authorization import AuthorizationMiddleware
from botx.middlewares.exceptions import ExceptionMiddleware
from botx.models import credentials, datastructures, menu
from botx.models.messages.message import Message
@dataclass(config=shared.BotXDataclassConfig)
class Bot( # noqa: WPS215
collectors.BotCollectingMixin,
clients.ClientsMixin,
lifespan.LifespanMixin,
middlewares.MiddlewareMixin,
exception_mixin.ExceptionHandlersMixin,
):
"""Class that implements bot behaviour."""
dependencies: InitVar[List[Depends]] = field(default=None)
bot_accounts: List[credentials.BotXCredentials] = field(default_factory=list)
startup_events: List[typing.BotLifespanEvent] = field(default_factory=list)
shutdown_events: List[typing.BotLifespanEvent] = field(default_factory=list)
client: async_client.AsyncClient = field(init=False)
sync_client: synchronous_client.Client = field(init=False)
collector: Collector = field(init=False)
exception_middleware: ExceptionMiddleware = field(init=False)
state: datastructures.State = field(init=False)
dependency_overrides: Dict[Callable, Callable] = field(
init=False,
default_factory=dict,
)
tasks: WeakSet = field(init=False, default_factory=WeakSet)
async def __call__(self, message: Message) -> None:
"""Iterate through collector, find handler and execute it, running middlewares.
Arguments:
message: message that will be proceed by handler.
"""
self.tasks.add(asyncio.ensure_future(self.exception_middleware(message)))
def __post_init__(self, dependencies: List[Depends]) -> None:
"""Initialize special fields.
Arguments:
dependencies: initial background dependencies for inner collector.
"""
self.state = datastructures.State()
self.client = async_client.AsyncClient()
self.sync_client = synchronous_client.Client()
self.collector = Collector(
dependencies=dependencies,
dependency_overrides_provider=self,
)
self.exception_middleware = ExceptionMiddleware(self.collector)
self.add_exception_handler(
exceptions.DependencyFailure,
exception_handlers.dependency_failure_exception_handler,
)
self.add_exception_handler(
exceptions.NoMatchFound,
exception_handlers.no_match_found_exception_handler,
)
self.add_middleware(AuthorizationMiddleware)
async def status(self, *args: Any, **kwargs: Any) -> menu.Status:
"""Generate status object that could be return to BotX API on `/status`.
Arguments:
args: additional positional arguments that will be passed to callable
status function.
kwargs: additional key arguments that will be passed to callable
status function.
Returns:
Built status for returning to BotX API.
"""
status = menu.Status()
for handler in self.handlers:
if callable(handler.include_in_status):
include_in_status = await concurrency.callable_to_coroutine(
handler.include_in_status,
*args,
**kwargs,
)
else:
include_in_status = handler.include_in_status
if include_in_status:
status.result.commands.append(
menu.MenuCommand(
description=handler.description or "",
body=handler.body,
name=handler.name,
),
)
return status
async def execute_command(self, message: dict) -> None:
"""Process data with incoming message and handle command inside.
Arguments:
message: incoming message to bot.
"""
logger.bind(botx_bot=True, payload=message).debug("process incoming message")
msg = Message.from_dict(message, self)
# raise UnknownBotError if not registered.
self.get_account_by_bot_id(msg.bot_id)
await self(msg)
async def authorize(self, *args: Any) -> None:
"""Process auth for each bot account."""
for account in self.bot_accounts:
try:
token = await self.get_token(
account.host,
account.bot_id,
account.signature,
)
except (exceptions.BotXAPIError, exceptions.BotXConnectError) as exc:
logger.bind(botx_bot=True).warning(
f"Credentials `host - {account.host}, " # noqa: WPS305
f"bot_id - {account.bot_id}` are invalid. "
f"Reason - {exc.message_template}",
)
continue
account.token = token
| 36.331169 | 87 | 0.648794 | 4,685 | 0.837355 | 0 | 0 | 4,731 | 0.845576 | 2,719 | 0.48597 | 1,187 | 0.212154 |
35c7b5af3988bd0290ffa3910c3ac39d7b3a8d95 | 4,242 | py | Python | orka_inventory.py | jeff-vincent/orka-ansible-dynamic-inventory | 0a0e19597d5cd06e6bb0214a96826376dca76a1e | [
"MIT"
] | null | null | null | orka_inventory.py | jeff-vincent/orka-ansible-dynamic-inventory | 0a0e19597d5cd06e6bb0214a96826376dca76a1e | [
"MIT"
] | null | null | null | orka_inventory.py | jeff-vincent/orka-ansible-dynamic-inventory | 0a0e19597d5cd06e6bb0214a96826376dca76a1e | [
"MIT"
] | 1 | 2022-01-05T20:31:53.000Z | 2022-01-05T20:31:53.000Z | #!/usr/bin/python3
import argparse
import json
import os
import subprocess
class OrkaAnsibleInventory:
def __init__(self):
self.vm_data = None
self.filtered_data = None
self.inventory = {
'group': {'hosts': []},
'vars': [],
'_meta': {
'hostvars': {}
}
}
def get_current_vm_data(self):
"""Get current VM data related to the current CLI user.
Note
----
The user must be logged in to the Orka CLI.
"""
completed_process = subprocess.run(
['orka', 'vm', 'list', '--json'],
capture_output=True)
dict_string = completed_process.stdout.decode('utf-8')
data = json.loads(dict_string)
self.vm_data = data['virtual_machine_resources']
def get_deployed_vms(self):
"""Filter current VM data to isolate deployed VMs."""
self.filtered_data = \
[i for i in self.vm_data if i['vm_deployment_status'] == 'Deployed']
def get_vm_by_host_name(self, host_name):
"""Filter current VM data to isolate named VM.
Args:
host_name: string: the VM name to match.
"""
self.filtered_data = \
[i for i in self.vm_data if host_name == i['status'][0]['virtual_machine_name']]
def get_name_contains_vms(self, name_contains):
"""Filter current VM data to isolate VMs by partial name match.
Args:
name_contains: string: partial match sort key for deployed VMs.
"""
nc = name_contains.lower()
self.filtered_data = \
[i for i in self.filtered_data if nc in i['status'][0]['virtual_machine_name'].lower()]
# def _build_vars(self):
# """Build the vars dict to pass to Ansible"""
# ansible_ssh_user = os.environ.get('ANSIBLE_SSH_USER')
# ansible_ssh_pass = os.environ.get('ANSIBLE_SSH_PASS')
# ansible_connection = 'ssh'
# return {
# 'ansible_connection': ansible_connection,
# 'ansible_ssh_user': ansible_ssh_user,
# 'ansible_ssh_pass': ansible_ssh_pass
# }
def create_inventory(self):
"""Create the inventory object to return to Ansible."""
hosts = []
ansible_ssh_user = os.environ.get('ANSIBLE_SSH_USER')
ansible_ssh_pass = os.environ.get('ANSIBLE_SSH_PASS')
for i in self.filtered_data:
ip_address = i['status'][0]['virtual_machine_ip']
hosts.append(ip_address)
self.inventory['_meta']['hostvars'][ip_address] = \
{'ansible_ssh_port': i['status'][0]['ssh_port'],
'ansible_ssh_user': ansible_ssh_user,
'ansible_ssh_pass': ansible_ssh_pass,
'ansible_connection': 'ssh'}
self.inventory['group']['hosts'] = hosts
# varss = self._build_vars()
# self.inventory['vars'] = varss
print(json.dumps(self.inventory))
return json.dumps(self.inventory)
def parse_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--list', help='list deployed VMs',
action='store_true')
group.add_argument('--host', help='get host by name', action='store',
dest='host_name')
return parser.parse_args()
def main(args, name_contains):
if args.host_name:
host_name = args.host_name
inventory_creator = OrkaAnsibleInventory()
inventory_creator.get_current_vm_data()
inventory_creator.get_vm_by_host_name(host_name)
inventory_creator.create_inventory()
elif args.list:
inventory_creator = OrkaAnsibleInventory()
inventory_creator.get_current_vm_data()
inventory_creator.get_deployed_vms()
if name_contains:
inventory_creator.get_name_contains_vms(name_contains)
inventory_creator.create_inventory()
else:
print('Warning: you must pass either `--list` or `--host <hostname>` argument.')
if __name__ == '__main__':
args = parse_args()
name_contains = os.environ.get('ANSIBLE_NAME_CONTAINS')
main(args, name_contains)
| 32.883721 | 99 | 0.610797 | 2,975 | 0.70132 | 0 | 0 | 0 | 0 | 0 | 0 | 1,579 | 0.37223 |
35c890b336369e56ae920d6e9a1f19392e8e1399 | 8,647 | py | Python | tests/test_release_summary.py | kids-first/kf-task-release-reports | 6c973fc82b7dab89c45832dd0b61a1ef85a5ebf7 | [
"Apache-2.0"
] | null | null | null | tests/test_release_summary.py | kids-first/kf-task-release-reports | 6c973fc82b7dab89c45832dd0b61a1ef85a5ebf7 | [
"Apache-2.0"
] | 11 | 2018-10-26T17:09:17.000Z | 2019-09-23T14:53:53.000Z | tests/test_release_summary.py | kids-first/kf-task-release-reports | 6c973fc82b7dab89c45832dd0b61a1ef85a5ebf7 | [
"Apache-2.0"
] | null | null | null | import boto3
import datetime
import requests
import pytest
from unittest.mock import patch
from reports.reporting import release_summary
from collections import Counter
from functools import partial
ENTITIES = [
'participants',
'biospecimens',
'phenotypes',
'genomic-files',
'study-files',
'read-groups',
'diagnoses',
'sequencing-experiments',
'families'
]
def test_get_studies(client, mocked_apis):
""" Test that a task is set to canceled after being initialized """
db = boto3.resource('dynamodb')
db = boto3.client('dynamodb')
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
studies, version, state = release_summary.get_studies('RE_00000000')
assert studies == ['SD_00000000']
assert version == '0.0.0'
assert state == 'staged'
mock_request.assert_called_with(
'http://coordinator/releases/RE_00000000?limit=100',
timeout=10)
@pytest.mark.parametrize("entity", ENTITIES)
def test_entity_counts(client, entity, mocked_apis):
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
r = release_summary.count_entity('SD_00000000', entity)
assert r == 1
def test_count_study(client, mocked_apis):
""" Test that entities are counted within a study """
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
r = release_summary.count_study('SD_00000000')
assert r == {k: 1 for k in ENTITIES}
def test_count_studies(client, mocked_apis):
""" Test that study counts are aggregated across studies """
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
studies = ['SD_00000000', 'SD_00000001']
study_counts = {study: Counter(release_summary.count_study(study))
for study in studies}
r = release_summary.collect_counts(study_counts)
assert r == {k: 2 for k in ENTITIES + ['studies']}
def test_run(client, mocked_apis):
""" Test that study counts are aggregated across studies """
db = boto3.resource('dynamodb')
release_table = db.Table('release-summary')
study_table = db.Table('study-summary')
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
r = release_summary.run('TA_00000000', 'RE_00000000')
assert all(k in r for k in ENTITIES)
assert r['release_id'] == 'RE_00000000'
assert r['task_id'] == 'TA_00000000'
assert release_table.item_count == 1
assert study_table.item_count == 1
st = study_table.get_item(Key={
'release_id': 'RE_00000000',
'study_id': 'SD_00000000'
})['Item']
assert st['study_id'] == 'SD_00000000'
assert st['version'] == '0.0.0'
assert st['state'] == 'staged'
assert all(st[k] == 1 for k in ENTITIES)
def test_get_report(client, mocked_apis):
""" Test that api returns release summary """
db = boto3.resource('dynamodb')
table = db.Table('release-summary')
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
r = release_summary.run('TA_00000000', 'RE_00000000')
assert table.item_count == 1
resp = client.get('/reports/releases/RE_00000000')
assert all(k in resp.json for k in ENTITIES)
assert all(resp.json[k] == 1 for k in ENTITIES)
assert resp.json['release_id'] == 'RE_00000000'
assert resp .json['task_id'] == 'TA_00000000'
assert 'SD_00000000' in resp.json['study_summaries']
st = resp.json['study_summaries']['SD_00000000']
def test_report_not_found(client, mocked_apis):
resp = client.get('/reports/releases/RE_XXXXXXXX')
assert resp.status_code == 404
assert 'could not find a report for release RE_' in resp.json['message']
def test_publish(client, mocked_apis):
""" Test that release and study summary rows are updated upon publish """
db = boto3.resource('dynamodb')
release_table = db.Table('release-summary')
study_table = db.Table('study-summary')
def _test_summaries(state, version):
assert release_table.item_count == 1
re = release_table.get_item(Key={
'release_id': 'RE_00000000',
'study_id': 'SD_00000000'
})['Item']
assert re['release_id'] == 'RE_00000000'
assert re['version'] == version
assert re['state'] == state
assert study_table.item_count == 1
st = study_table.get_item(Key={
'release_id': 'RE_00000000',
'study_id': 'SD_00000000'
})['Item']
assert st['study_id'] == 'SD_00000000'
assert st['version'] == version
assert st['state'] == state
assert all(st[k] == 1 for k in ENTITIES)
with patch('requests.get') as mock_request:
# The release has been run as candidate release 0.0.3
mock_request.side_effect = partial(mocked_apis, version='0.0.3')
r = release_summary.run('TA_00000000', 'RE_00000000')
_test_summaries('staged', '0.0.3')
# Now the release has been published and its version number bumped
# in the coordinator to 0.1.0
mock_request.side_effect = partial(mocked_apis, version='0.1.0')
r = release_summary.publish('RE_00000000')
_test_summaries('published', '0.1.0')
def test_publish_does_not_exist(client, mocked_apis):
"""
Test behavior if a release is published and one of the summary rows
do not exist
"""
db = boto3.resource('dynamodb')
release_table = db.Table('release-summary')
study_table = db.Table('study-summary')
with patch('requests.get') as mock_request:
mock_request.side_effect = partial(mocked_apis, version='0.0.3')
r = release_summary.run('TA_00000000', 'RE_00000000')
assert release_table.item_count == 1
assert study_table.item_count == 1
# Now delete the summaries, as if it never existed
release_table.delete_item(Key={
'release_id': 'RE_00000000'
})
study_table.delete_item(Key={
'release_id': 'RE_00000000',
'study_id': 'SD_00000000'
})
assert release_table.item_count == 0
assert study_table.item_count == 0
# Publish the release
mock_request.side_effect = partial(mocked_apis, version='0.1.0')
r = release_summary.publish('RE_00000000')
# There should still be no summary rows
assert release_table.item_count == 0
assert study_table.item_count == 0
def test_get_report_per_study(client, mocked_apis):
""" Test that api returns release summary for specific study"""
db = boto3.resource('dynamodb')
table = db.Table('release-summary')
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
s = release_summary.run('TA_00000000', 'RE_00000000')
assert table.item_count == 1
resp = client.get('/reports/RE_00000000/SD_00000000')
assert all(k in resp.json for k in ENTITIES)
assert all(resp.json[k] == 1 for k in ENTITIES)
assert resp.json['release_id'] == 'RE_00000000'
assert resp .json['task_id'] == 'TA_00000000'
assert 'SD_00000000' in resp.json['study_id']
def test_get_report_per_study_not_found(client):
resp = client.get('/reports/RE_XXXXXXXX/SD_XXXXXXXX')
assert resp.status_code == 404
assert 'could not find study'
' report for release RE_' in resp.json['message']
assert 'and study id SD_' in resp.json['message']
def test_get_report_per_study_filter_by_state(client, mocked_apis):
""" Test that api returns release summary for specific study"""
db = boto3.resource('dynamodb')
table = db.Table('release-summary')
with patch('requests.get') as mock_request:
mock_request.side_effect = mocked_apis
s = release_summary.run('TA_00000000', 'RE_00000000')
assert table.item_count == 1
resp = client.get('/reports/studies/SD_00000000?state=staged')
r1 = resp.json['releases'][0]['RE_00000000']
assert all(k in r1 for k in ENTITIES)
assert all(r1
[k] == 1 for k in ENTITIES)
assert r1['release_id'] == 'RE_00000000'
assert r1['task_id'] == 'TA_00000000'
assert 'SD_00000000' in r1['study_id']
def test_get_report_per_study_filter_by_state_not_found(client):
resp = client.get('/reports/studies/SD_XXXXXXXX?state=published')
assert resp.status_code == 404
assert 'could not find study'
' report for study SD_' in resp.json['message']
| 35.731405 | 77 | 0.663814 | 0 | 0 | 0 | 0 | 278 | 0.03215 | 0 | 0 | 2,831 | 0.327397 |
35cadb5be4b90796980534e95cf42d32733a5cb1 | 875 | py | Python | apps/payroll/models/employee.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 115 | 2019-08-18T16:12:54.000Z | 2022-03-29T14:17:20.000Z | apps/payroll/models/employee.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 22 | 2019-09-09T01:34:54.000Z | 2022-03-12T00:33:40.000Z | apps/payroll/models/employee.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 83 | 2019-08-17T17:09:20.000Z | 2022-03-25T04:46:53.000Z | # Django Library
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from apps.base.models import PyFather
# Tabla de Empleados
class PyEmployee(PyFather):
name = models.CharField('Nombre', max_length=80)
name2 = models.CharField('Segundo Nombre', max_length=80, blank=True)
first_name = models.CharField('Apellido Paterno', max_length=80, blank=True)
last_name = models.CharField('Apellido Materno', max_length=80, blank=True)
phone = models.CharField('Teléfono', max_length=20, blank=True)
email = models.CharField('Correo', max_length=40, blank=True)
def get_absolute_url(self):
return reverse('payroll:employee-detail', kwargs={'pk': self.pk})
def __str__(self):
return format(self.name)
| 35 | 80 | 0.742857 | 614 | 0.700913 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.187215 |
35cba22e1908f51ae40f6b613a82f59b7cb71986 | 89 | py | Python | apps/players/apps.py | xeroz/admin-django | e10c5eb87f9aea7a714e5be2307969a3b998706c | [
"MIT"
] | 12 | 2018-03-20T21:38:53.000Z | 2021-10-31T10:00:12.000Z | apps/players/apps.py | xeroz/admin-django | e10c5eb87f9aea7a714e5be2307969a3b998706c | [
"MIT"
] | 79 | 2018-03-18T14:26:47.000Z | 2022-03-01T15:51:40.000Z | apps/players/apps.py | xeroz/admin-django | e10c5eb87f9aea7a714e5be2307969a3b998706c | [
"MIT"
] | 4 | 2018-05-18T15:39:56.000Z | 2020-10-29T09:28:41.000Z | from django.apps import AppConfig
class PlayersConfig(AppConfig):
name = 'players'
| 14.833333 | 33 | 0.752809 | 52 | 0.58427 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.101124 |
35cbb4dfe9729e65b9b76b8a0c6acb3a8449dd0a | 8,063 | py | Python | tas/__main__.py | lispsil/tas | cccc48b3de23050acd094d62b9dae9ff2db81ca8 | [
"MIT"
] | 1 | 2020-03-12T15:29:41.000Z | 2020-03-12T15:29:41.000Z | tas/__main__.py | lispsil/tas | cccc48b3de23050acd094d62b9dae9ff2db81ca8 | [
"MIT"
] | null | null | null | tas/__main__.py | lispsil/tas | cccc48b3de23050acd094d62b9dae9ff2db81ca8 | [
"MIT"
] | 1 | 2021-10-10T18:21:24.000Z | 2021-10-10T18:21:24.000Z | import os
import signal
import atexit
import json
import time
from pathlib import Path
import subprocess
import argparse
import pprint
from distutils.util import strtobool
children_pid = []
@atexit.register
def kill_child():
for child_pid in children_pid:
os.kill(child_pid, signal.SIGTERM)
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_parser.add_argument(
"cmd", type=str, choices=["new", "del", "run", "set", "list"], help="Main command",
)
args = cmd_parser.parse_known_args()[0]
PATH_TO_SETTINGS = Path.home() / Path(".tas.json")
def get_default_settings():
return {
"namespaces": [
{
"name": "default",
"path": str(Path.home() / Path("Documents/tas_projects/")),
}
],
"templates": [
{
"name": "default",
"actions": [
{"type": "venv"},
{"type": "dir", "path": "py"},
{"type": "dir", "path": "py/src"},
{"type": "dir", "path": "sql"},
{"type": "dir", "path": "resources"},
{"type": "file", "path": "README.md"},
{"type": "requirements", "packages": ["jupyter"]},
{"type": "file_link", "url": ""}
],
}
],
"projects": [],
}
def load_settings():
if not PATH_TO_SETTINGS.exists():
return get_default_settings()
with open(PATH_TO_SETTINGS) as f:
return json.load(f)
def save_settings():
with open(PATH_TO_SETTINGS, "w+") as f:
json.dump(settings, f, ensure_ascii=False, indent=4)
def lookup_in_list_of_dicts(l, name, return_index=False):
for i, val in enumerate(l):
if val["name"] == name:
return val if not return_index else (i, val)
return None if not return_index else (None, None)
def get_proj(args, should_exist, ns_path):
proj_name = f"{args.namespace}.{args.name}"
proj_path = Path(ns_path) / Path(args.name)
exists = lookup_in_list_of_dicts(settings["projects"], proj_name)
if exists and not should_exist:
raise Exception("Project already exists!")
elif not exists and should_exist:
raise Exception("Project not found!")
return exists if exists else {"name": proj_name, "path": proj_path}
def get_args(cmd):
# TODO: create allowed combinations of args
if cmd == "set":
args_parser = argparse.ArgumentParser(parents=[cmd_parser])
args_parser.add_argument("namespace", type=str, help="Namespace")
args_parser.add_argument("path", type=str, help="PosixPath")
elif cmd == "del":
args_parser = argparse.ArgumentParser(parents=[cmd_parser])
args_parser.add_argument("name", type=str, help="Name of an object")
args_parser.add_argument(
"-namespace", "-ns", type=str, default="default", help="Namespace"
)
args_parser.add_argument("type", type=str, choices=["n", "p"], default="p")
elif cmd == "list":
args_parser = argparse.ArgumentParser(parents=[cmd_parser])
args_parser.add_argument(
"type", type=str, choices=["n", "t", "p", "a"], default="p"
)
elif cmd == "new":
args_parser = argparse.ArgumentParser(parents=[cmd_parser])
args_parser.add_argument("name", type=str, help="Name")
args_parser.add_argument(
"-template", "-t", type=str, default="default", help="Template"
)
args_parser.add_argument(
"-namespace", "-ns", type=str, default="default", help="Namespace"
)
args_parser.add_argument("-path", "-p", type=str, help="PosixPath")
elif cmd == "run":
args_parser = argparse.ArgumentParser(parents=[cmd_parser])
args_parser.add_argument(
"-namespace", "-ns", type=str, default="default", help="Namespace"
)
args_parser.add_argument("name", type=str, help="Project name")
return args_parser.parse_args()
def interactive_y_n(question):
while True:
try:
reply = str(input(question + " (y/n): ")).lower().strip()
return strtobool(reply)
except ValueError as e:
pprint.pprint("Please enter yes or no!")
pass
settings = load_settings()
if __name__ == "__main__":
extra_args = get_args(args.cmd)
if args.cmd == "set":
# TODO: make it interactive?
ns_id, ns = lookup_in_list_of_dicts(
settings["namespaces"], extra_args.namespace, return_index=True
)
if ns_id != None:
settings["namespaces"][ns_id] = {**ns, "path": extra_args.path}
else:
settings["namespaces"].append(
{"name": extra_args.namespace, "path": extra_args.path}
)
save_settings()
elif args.cmd == "del":
# TODO: interactive and delete projects
if extra_args.type == "n":
target = "namespaces"
name = args.name
elif extra_args.type == "p":
target = "projects"
ns = lookup_in_list_of_dicts(settings["namespaces"], extra_args.namespace)
proj = get_proj(extra_args, True, ns["path"])
name = proj["name"]
target_id, ns = lookup_in_list_of_dicts(
settings[target], name, return_index=True
)
if target_id is None:
raise Exception("No such name!")
del settings[target][target_id]
save_settings()
elif args.cmd == "list":
if extra_args.type == "n":
pprint.pprint(settings["namespaces"])
elif extra_args.type == "p":
pprint.pprint(settings["projects"])
elif extra_args.type == "t":
pprint.pprint(settings["templates"])
elif extra_args.type == "a":
pprint.pprint(settings)
elif args.cmd == "new":
ns = lookup_in_list_of_dicts(settings["namespaces"], extra_args.namespace)
proj = get_proj(extra_args, False, ns["path"])
template = lookup_in_list_of_dicts(settings["templates"], extra_args.template)
if proj["path"].exists():
if not interactive_y_n("Path already exists. Should we proceed?"):
exit()
else:
proj["path"].mkdir(parents=True)
for action in template["actions"]:
if action["type"] == "dir":
(proj["path"] / Path(action["path"])).mkdir(
parents=False, exist_ok=True
)
elif action["type"] == "file":
filepath = proj["path"] / Path(action["path"])
filepath.touch()
elif action["type"] == "requirements":
os.chdir(proj["path"])
subprocess.call(
[
"python",
"-m",
"venv",
"--system-site-packages",
str(proj["path"] / Path("env")),
]
)
if action["packages"]:
subprocess.call(
["./env/bin/python", "-m", "pip", "install"]
+ action["packages"]
)
filepath = proj["path"] / Path("requirements.txt")
with filepath.open("w+") as f:
f.write("\n".join(action["packages"]))
settings["projects"].append({"name": proj["name"], "path": str(proj["path"])})
save_settings()
elif args.cmd == "run":
ns = lookup_in_list_of_dicts(settings["namespaces"], extra_args.namespace)
proj = get_proj(extra_args, True, ns["path"])
os.chdir(Path(proj["path"]))
child = subprocess.Popen(
["./env/bin/python", "-m", "jupyter", "notebook", "--log-level=0"]
)
children_pid.append(child.pid)
time.sleep(2)
while not interactive_y_n("Would you like to end?"):
continue
| 36.156951 | 87 | 0.549423 | 0 | 0 | 0 | 0 | 112 | 0.013891 | 0 | 0 | 1,592 | 0.197445 |
35cc4a008448b3ebfc8b1a072bfd124465d46230 | 2,410 | py | Python | 1._metis_models_and_data_2019-12-16b/METIS source code/Indicators/kpis/Transmission usage.py | tamas-borbath/METIS | cf3dc0f0f96ccb6ad72ddf15ba7e31ddc6339036 | [
"CC-BY-4.0"
] | 1 | 2021-12-07T09:05:48.000Z | 2021-12-07T09:05:48.000Z | 1._metis_models_and_data_2019-12-16b/METIS source code/Indicators/kpis/Transmission usage.py | tamas-borbath/METIS_data_and_scripts | cf3dc0f0f96ccb6ad72ddf15ba7e31ddc6339036 | [
"CC-BY-4.0"
] | null | null | null | 1._metis_models_and_data_2019-12-16b/METIS source code/Indicators/kpis/Transmission usage.py | tamas-borbath/METIS_data_and_scripts | cf3dc0f0f96ccb6ad72ddf15ba7e31ddc6339036 | [
"CC-BY-4.0"
] | null | null | null | ########################################################
# Copyright (c) 2015-2017 by European Commission. #
# All Rights Reserved. #
########################################################
extends("BaseKPI.py")
"""
Transmission usage (%)
----------------------
Indexed by
* scope
* delivery point (dummy)
* energy
* test case
* transmission
The instant transmission usage of an interconnection is the ratio of electricity or gas flowing through the transmission over its capacity.
The KPI computes the yearly average value of instant transmission usage, for a given transmission:
.. math:: transmissionUsage_{transmission} = \\small \\frac{mean(instantTransmissionUsage^{transmission})}{installedCapacity^{transmission}} (\\%)
"""
def computeIndicator(context, indexFilter, paramsIndicator, kpiDict):
timeStepDuration = getTimeStepDurationInHours(context)
selectedScopes = indexFilter.filterIndexList(0, getScopes())
selectedEnergies = indexFilter.filterIndexList(1, getEnergies(context, includedEnergies = {ELECTRICITY, GAS}))
selectedTestCases = indexFilter.filterIndexList(2, context.getResultsIndexSet())
selectedAssets = indexFilter.filterIndexList(3, getAssets(context, includedTechnologies = TRANSMISSION_TYPES))
selectedAssetsByScope = getAssetsByScope(context, selectedScopes, includedAssetsName = selectedAssets)
capacitiesDict = getTransmissionCapacity(context, selectedScopes, selectedTestCases, selectedEnergies, selectedAssetsByScope)
transmissionDict = getTransmittedEnergy(context, selectedScopes, selectedTestCases, selectedEnergies, selectedAssetsByScope)
for index in capacitiesDict:
totalCapacity = capacitiesDict[index].getSumValue()
if totalCapacity != 0:
kpiDict[index] = 100 * transmissionDict[index].getSumValue() / totalCapacity
return kpiDict
def get_indexing(context):
baseIndexList = [getScopesIndexing(), getEnergiesIndexing(context, includedEnergies = {ELECTRICITY, GAS}), getTestCasesIndexing(context), getAssetsIndexing(context, includedTechnologies = TRANSMISSION_TYPES)]
return baseIndexList
IndicatorLabel = "Transmission usage"
IndicatorUnit = "%"
IndicatorDeltaUnit = "%"
IndicatorDescription = "Usage of a transmission"
IndicatorParameters = []
IndicatorIcon = ""
IndicatorCategory = "Results>Transmission"
IndicatorTags = "Power System, Gas System, Power Markets" | 41.551724 | 209 | 0.734855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.36556 |
35cc8dce8cfa78125ee76beb2078c100cbc1294f | 672 | py | Python | apps/bloguser/migrations/0003_auto_20180505_1717.py | dryprojects/MyBlog | ec04ba2bc658e96cddeb1d4766047ca8e89ff656 | [
"BSD-3-Clause"
] | 2 | 2021-08-17T13:29:21.000Z | 2021-09-04T05:00:01.000Z | apps/bloguser/migrations/0003_auto_20180505_1717.py | dryprojects/MyBlog | ec04ba2bc658e96cddeb1d4766047ca8e89ff656 | [
"BSD-3-Clause"
] | 1 | 2020-07-16T11:22:32.000Z | 2020-07-16T11:22:32.000Z | apps/bloguser/migrations/0003_auto_20180505_1717.py | dryprojects/MyBlog | ec04ba2bc658e96cddeb1d4766047ca8e89ff656 | [
"BSD-3-Clause"
] | 1 | 2020-09-18T10:41:59.000Z | 2020-09-18T10:41:59.000Z | # Generated by Django 2.0.3 on 2018-05-05 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bloguser', '0002_auto_20180504_1808'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='image_url',
field=models.CharField(default='', max_length=100, verbose_name='用户头像url'),
),
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, default='bloguser/avatar.png', upload_to='bloguser/images/%Y/%m', verbose_name='用户头像'),
),
]
| 28 | 135 | 0.610119 | 595 | 0.864826 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.295058 |
35ccd2a1b21c453583e810926aac3c4fcc8ecf4b | 10,808 | py | Python | qrcodescanner.py | globophobe/pygame-qrcode-demo | 3a375b4e658712fed56df0005314bdd0d92ccfff | [
"MIT"
] | 3 | 2015-04-08T12:54:06.000Z | 2016-08-13T04:32:40.000Z | qrcodescanner.py | globophobe/qrcodescanner-base | 3a375b4e658712fed56df0005314bdd0d92ccfff | [
"MIT"
] | null | null | null | qrcodescanner.py | globophobe/qrcodescanner-base | 3a375b4e658712fed56df0005314bdd0d92ccfff | [
"MIT"
] | 3 | 2018-01-16T02:49:18.000Z | 2019-05-21T19:52:34.000Z | # -*- coding: utf-8 -*-
import os
import datetime
import logging
import requests
import numpy
import cv2
import zbar
from Queue import Queue
from threading import Thread
from PIL import Image
logger = logging.getLogger(__name__)
TEMP_DIR = os.path.join(os.getcwd(), 'temp')
def get_temp_dir():
"""Create TEMP_DIR if it doesn't exist"""
if not os.path.exists(TEMP_DIR):
os.mkdir(TEMP_DIR)
return TEMP_DIR
def thumbnail(picture, size=0.50):
"""Thumbnail the picture"""
width, height = picture.size
w, h = int(width * size), int(height * size)
picture.thumbnail((w, h), Image.ANTIALIAS)
return picture
def save_picture(picture, path, filename):
"""Save picture to filesystem, return the path"""
# Unfortunately, StringIO was unsatisfactory
# StringIO size exceeds size of filesystem save. Why??
storage = os.path.join(path, filename)
picture.save(storage, optimize=True, format='JPEG')
return storage
def delete_picture(path):
"""Delete the file, with a try except clause"""
try:
os.remove(path)
# Gee! Thanks Windows
except:
pass
def prepare_msg(qrcode, picture, timestamp):
"""Prepare message to send to server"""
timestamp = datetime.datetime.strftime(timestamp, '%Y%m%d%H%M%S%f')
filename = '{}.jpeg'.format(timestamp)
temp_storage = save_picture(picture, get_temp_dir(), filename)
data = dict(qrcode=qrcode, timestamp=timestamp)
files = {'picture': temp_storage}
return filename, data, files
def server_auth(queue, url, qrcode, picture, timestamp, timeout=5):
"""Send message to server for auth"""
filename, data, files = prepare_msg(qrcode, picture, timestamp)
try:
if logger.getEffectiveLevel() >= logging.INFO:
# Profile the request
start = datetime.datetime.now()
r = requests.post(url, data=data, files=files, timeout=timeout)
if logger.getEffectiveLevel >= logging.INFO:
# Profile the request
end = datetime.datetime.now()
elapsed_time = (end - start).total_seconds()
logger.info('Elapsed time was {} seconds'.format(elapsed_time))
except Exception as e:
response = None
# Did the request timeout?
if isinstance(e, requests.exceptions.Timeout):
response = dict(network_timeout=True)
else:
response = r.json()
finally:
delete_picture(os.path.join(get_temp_dir(), filename))
queue.put(response)
class QRCodeScanner(object):
def __init__(
self,
url=None,
max_responses=2,
timeout=5,
ok_color=(0, 0, 255),
not_ok_color=(255, 0, 0),
box_width=1,
debug=False
):
self.url = url
self.timeout = timeout
self.max_responses
self.thread = None
self.queue = Queue()
# Init zbar.
self.scanner = zbar.ImageScanner()
# Disable all zbar symbols.
self.scanner.set_config(0, zbar.Config.ENABLE, 0)
# Enable QRCodes.
self.scanner.set_config(zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1)
# Highlight scanned QR Codes.
self.ok_color = ok_color
self.not_ok_color = not_ok_color
self.box_width = box_width
self.successes = 0
self.debug = debug
def main(self, frame, timestamp):
"""Main function"""
self.before_zbar(timestamp)
frame, qrcodes = self.zbar(frame)
if len(qrcodes) > 0:
self.auth(frame, qrcodes, timestamp)
frame = self.after_zbar(frame, qrcodes, timestamp)
self.process_results_from_queue(timestamp)
return frame
def auth(self, frame, qrcodes, timestamp):
"""Auth with server"""
if self.url is not None:
qrcode = self.get_next_qrcode(frame, qrcodes)
if qrcode is not None:
if len(self.responses) > self.max_responses:
frame = Image.fromarray(frame)
self.launch_thread(self.url, qrcode, frame, timestamp)
def get_next_qrcode(self, frame, qrcodes):
"""Returns the largest valid QR code, which is neither the
active QR code nor throttled"""
height, width = frame.shape[:2]
frame_size = width * height
target = None
targets = [
dict(
qrcode=qrcode,
size=self.qrcode_size(qrcodes[qrcode])
)
for qrcode in qrcodes
]
targets = sorted(targets, key=lambda k: k['size'])
for target in targets:
qrcode = target['qrcode']
qrcode_size = target['size'] / frame_size
qrcode_size = round(qrcode_size, 4)
if self.debug:
logger.info('QRcode percent of frame: {}%'.format(
qrcode_size
))
# Throttle requests for the same QR code.
if self.active_qrcode != qrcode:
# Throttle requests for cached QR codes.
if not self.is_qrcode_throttled(qrcode):
# Ensure the QR code is valid.
is_valid = self.is_valid_qrcode(qrcode)
if self.debug:
logger.info('QRcode is valid: {}'.format(is_valid))
if is_valid:
if self.max_qrcode_size > 0:
if qrcode_size > self.max_qrcode_size:
self.max_size_exceeded = True
break
if not self.max_size_exceeded:
return qrcode
def is_valid_qrcode(self, qrcode):
"""Intended to be overriden by subclass."""
return True if qrcode is not None else False
def is_qrcode_throttled(self, qrcode):
for throttle in (self.ok_throttle_dict, self.not_ok_throttle_dict):
if qrcode in throttle:
return True
def get_qrcode_size(self, qrcode):
contour = numpy.array(qrcode, dtype=numpy.int32)
return cv2.contourArea(contour)
def before_zbar(self, timestamp):
"""Remove expired QR codes from throttle dict"""
for throttle in (self.ok_throttle_dict, self.not_ok_throttle_dict):
delete = []
for qrcode in throttle:
expired = (throttle[qrcode] <= datetime.datetime.now())
if expired:
delete.append(qrcode)
for qrcode in delete:
del throttle[qrcode]
def zbar(self, frame):
"""Scan frame using ZBar"""
qrcodes = {}
# Convert to grayscale, as binarization requires
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# Apply Otsu Binarization
_, threshold = cv2.threshold(
gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
)
try:
# Convert to string, as ZBar requires
pil_image = Image.fromarray(threshold)
width, height = pil_image.size
raw = pil_image.tostring()
except:
logger.error('Error converting to PIL image')
else:
try:
image = zbar.Image(width, height, 'Y800', raw)
except:
logger.error('Error converting to ZBar image')
else:
self.scanner.scan(image)
for qrcode in image:
location = []
for point in qrcode.location:
location.append(point)
qrcodes[qrcode.data] = location
if self.debug:
self.successes += 1
if self.debug:
frame = cv2.cvtColor(threshold, cv2.COLOR_GRAY2RGB)
return frame, qrcodes
def after_zbar(self, frame, qrcodes, timestamp):
"""Intended to be overridden by subclass. Currently, draws boxes
around QR codes"""
frame = self.draw_boxes(qrcodes, frame)
return frame
def draw_box(self, frame, location, color, width):
"""Draw a box around around QR code"""
for index in range(len(location)):
if (index + 1) == len(location):
next_index = 0
else:
next_index = index + 1
# From OpenCV 3.0.0, cv2.LINE_AA was renamed cv2.CV_AA
if cv2.__version__ >= '3.0.0':
cv2.line(
frame,
location[index], location[next_index],
color,
width,
lineType=cv2.LINE_AA
)
else:
cv2.line(
frame,
location[index], location[next_index],
color,
width,
cv2.CV_AA
)
return frame
def is_thread_running(self):
"""Check if the thread is running"""
# Is a thread active?
if self.thread is not None:
if self.thread.is_alive():
return True
def launch_thread(self, url, qrcode, frame, timestamp):
"""Launch a thread to auth against server with requests library"""
try:
self.thread = Thread(
target=server_auth,
args=(
self.queue,
url,
qrcode,
Image.fromarray(frame),
timestamp
)
).start()
except:
logger.error('Thread failed to start')
else:
self.after_thread_started(qrcode, timestamp)
def after_thread_started(self, qrcode, timestamp):
"""Runs after thread is started. Throttles not OK results"""
# Throttle requests
self.not_ok_throttle_dict[qrcode] = (
timestamp + datetime.timedelta(seconds=self.not_ok_throttle)
)
self.active_qrcode = qrcode
logger.info('Sent QRcode to server {}'.format(self.active_qrcode))
def process_results_from_queue(self, timestamp):
"""Throttles OK results. Prepares response for GUI"""
if not self.queue.empty():
# Clear active qrcode
self.active_qrcode = None
response = self.queue.get()
if response is not None:
# Response is OK. Flag the QR code as OK, and throttle it
if 'qrcode' in response:
qrcode = response['qrcode']
ok_throttle = datetime.timedelta(seconds=self.ok_throttle)
self.ok_throttle_dict[qrcode] = timestamp + ok_throttle
self.responses.append(response)
| 34.864516 | 78 | 0.560696 | 8,291 | 0.767117 | 0 | 0 | 0 | 0 | 0 | 0 | 1,817 | 0.168116 |
35cdb055a4bb2320fd07ae79ac8bec7d003524cf | 4,495 | py | Python | rescape_region/schema_models/settings/settings_schema.py | calocan/rescape-region | 8cb0c4d5e2f4c92939eb9a30473808decdb4bef2 | [
"MIT"
] | 1 | 2021-05-08T12:04:23.000Z | 2021-05-08T12:04:23.000Z | rescape_region/schema_models/settings/settings_schema.py | rescapes/rescape-region | b247aa277928d126bcf020c8204994b00ae4c18d | [
"MIT"
] | 15 | 2021-04-06T18:05:04.000Z | 2022-03-12T00:22:35.000Z | rescape_region/schema_models/settings/settings_schema.py | rescapes/rescape-region | b247aa277928d126bcf020c8204994b00ae4c18d | [
"MIT"
] | null | null | null | import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
DENY, resolver_for_dict_field
from rescape_graphene import enforce_unique_props
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, update_or_create_with_revision, \
top_level_allowed_filter_arguments
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_python_helpers import ramda as R
from rescape_region.models.settings import Settings
from rescape_region.schema_models.scope.region.region_schema import RegionType
from .settings_data_schema import SettingsDataType, settings_data_fields
raw_settings_fields = dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE),
# This refers to the SettingsDataType, which is a representation of all the json fields of Settings.data
data=dict(graphene_type=SettingsDataType, fields=settings_data_fields, default=lambda: dict()),
**reversion_and_safe_delete_types
)
class SettingsType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = Settings
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
SettingsType._meta.fields['data'] = Field(
SettingsDataType,
resolver=resolver_for_dict_field
)
settings_fields = merge_with_django_properties(SettingsType, raw_settings_fields)
settings_mutation_config = dict(
class_name='Settings',
crud={
CREATE: 'createSettings',
UPDATE: 'updateSettings'
},
resolve=guess_update_or_create
)
class SettingsQuery(ObjectType):
settings = graphene.List(
SettingsType,
**top_level_allowed_filter_arguments(settings_fields, RegionType)
)
def resolve_settings(self, info, **kwargs):
q_expressions = process_filter_kwargs(Settings, **R.merge(dict(deleted__isnull=True), kwargs))
return Settings.objects.filter(
*q_expressions
)
class UpsertSettings(Mutation):
"""
Abstract base class for mutation
"""
settings = Field(SettingsType)
@transaction.atomic
@login_required
def mutate(self, info, settings_data=None):
# We must merge in existing settings.data if we are updating data
if R.has('id', settings_data) and R.has('data', settings_data):
# New data gets priority, but this is a deep merge.
settings_data['data'] = R.merge_deep(
Settings.objects.get(id=settings_data['id']).data,
settings_data['data']
)
# Make sure that all props are unique that must be, either by modifying values or erring.
modified_settings_data = enforce_unique_props(settings_fields, settings_data)
update_or_create_values = input_type_parameters_for_update_or_create(settings_fields, modified_settings_data)
settings, created = update_or_create_with_revision(Settings, update_or_create_values)
return UpsertSettings(settings=settings)
class CreateSettings(UpsertSettings):
"""
Create Settings mutation class
"""
class Arguments:
settings_data = type('CreateSettingsInputType', (InputObjectType,),
input_type_fields(settings_fields, CREATE, SettingsType))(required=True)
class UpdateSettings(UpsertSettings):
"""
Update Settings mutation class
"""
class Arguments:
settings_data = type('UpdateSettingsInputType', (InputObjectType,),
input_type_fields(settings_fields, UPDATE, SettingsType))(required=True)
class SettingsMutation(graphene.ObjectType):
create_settings = CreateSettings.Field()
update_settings = UpdateSettings.Field()
graphql_update_or_create_settings = graphql_update_or_create(settings_mutation_config, settings_fields)
graphql_query_settings = graphql_query(SettingsType, settings_fields, 'settings')
| 36.844262 | 117 | 0.754171 | 2,301 | 0.511902 | 0 | 0 | 913 | 0.203115 | 0 | 0 | 784 | 0.174416 |
35d3caf546997ac4a3e9ab5b7bbe7a7b734d8898 | 72,977 | py | Python | juju/client/_client8.py | wallyworld/python-libjuju | 1de9ad1cea120a691041c243877a6316ba648900 | [
"Apache-2.0"
] | null | null | null | juju/client/_client8.py | wallyworld/python-libjuju | 1de9ad1cea120a691041c243877a6316ba648900 | [
"Apache-2.0"
] | 1 | 2019-02-11T10:00:15.000Z | 2019-02-11T10:06:20.000Z | juju/client/_client8.py | CanonicalBootStack/python-libjuju | 43fb846042f4cd7d31ae11b1542862477bd76d36 | [
"Apache-2.0"
] | null | null | null | # DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
class ApplicationFacade(Type):
name = 'Application'
version = 8
schema = {'definitions': {'AddApplicationUnits': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'attach-storage': {'items': {'type': 'string'},
'type': 'array'},
'num-units': {'type': 'integer'},
'placement': {'items': {'$ref': '#/definitions/Placement'},
'type': 'array'},
'policy': {'type': 'string'}},
'required': ['application',
'num-units',
'placement'],
'type': 'object'},
'AddApplicationUnitsResults': {'additionalProperties': False,
'properties': {'units': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['units'],
'type': 'object'},
'AddRelation': {'additionalProperties': False,
'properties': {'endpoints': {'items': {'type': 'string'},
'type': 'array'},
'via-cidrs': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['endpoints'],
'type': 'object'},
'AddRelationResults': {'additionalProperties': False,
'properties': {'endpoints': {'patternProperties': {'.*': {'$ref': '#/definitions/CharmRelation'}},
'type': 'object'}},
'required': ['endpoints'],
'type': 'object'},
'ApplicationCharmRelations': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationCharmRelationsResults': {'additionalProperties': False,
'properties': {'charm-relations': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['charm-relations'],
'type': 'object'},
'ApplicationConfigSet': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'config': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'}},
'required': ['application', 'config'],
'type': 'object'},
'ApplicationConfigSetArgs': {'additionalProperties': False,
'properties': {'Args': {'items': {'$ref': '#/definitions/ApplicationConfigSet'},
'type': 'array'}},
'required': ['Args'],
'type': 'object'},
'ApplicationConfigUnsetArgs': {'additionalProperties': False,
'properties': {'Args': {'items': {'$ref': '#/definitions/ApplicationUnset'},
'type': 'array'}},
'required': ['Args'],
'type': 'object'},
'ApplicationConstraint': {'additionalProperties': False,
'properties': {'constraints': {'$ref': '#/definitions/Value'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['constraints'],
'type': 'object'},
'ApplicationDeploy': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'attach-storage': {'items': {'type': 'string'},
'type': 'array'},
'channel': {'type': 'string'},
'charm-url': {'type': 'string'},
'config': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'config-yaml': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'},
'devices': {'patternProperties': {'.*': {'$ref': '#/definitions/Constraints'}},
'type': 'object'},
'endpoint-bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'num-units': {'type': 'integer'},
'placement': {'items': {'$ref': '#/definitions/Placement'},
'type': 'array'},
'policy': {'type': 'string'},
'resources': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'series': {'type': 'string'},
'storage': {'patternProperties': {'.*': {'$ref': '#/definitions/Constraints'}},
'type': 'object'}},
'required': ['application',
'series',
'charm-url',
'channel',
'num-units',
'config-yaml',
'constraints'],
'type': 'object'},
'ApplicationDestroy': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationExpose': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationGet': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationGetConfigResults': {'additionalProperties': False,
'properties': {'Results': {'items': {'$ref': '#/definitions/ConfigResult'},
'type': 'array'}},
'required': ['Results'],
'type': 'object'},
'ApplicationGetConstraintsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ApplicationConstraint'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'ApplicationGetResults': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'application-config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'charm': {'type': 'string'},
'config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'constraints': {'$ref': '#/definitions/Value'},
'series': {'type': 'string'}},
'required': ['application',
'charm',
'config',
'constraints',
'series'],
'type': 'object'},
'ApplicationMetricCredential': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'metrics-credentials': {'items': {'type': 'integer'},
'type': 'array'}},
'required': ['application',
'metrics-credentials'],
'type': 'object'},
'ApplicationMetricCredentials': {'additionalProperties': False,
'properties': {'creds': {'items': {'$ref': '#/definitions/ApplicationMetricCredential'},
'type': 'array'}},
'required': ['creds'],
'type': 'object'},
'ApplicationOfferDetails': {'additionalProperties': False,
'properties': {'application-description': {'type': 'string'},
'bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'endpoints': {'items': {'$ref': '#/definitions/RemoteEndpoint'},
'type': 'array'},
'offer-name': {'type': 'string'},
'offer-url': {'type': 'string'},
'offer-uuid': {'type': 'string'},
'source-model-tag': {'type': 'string'},
'spaces': {'items': {'$ref': '#/definitions/RemoteSpace'},
'type': 'array'},
'users': {'items': {'$ref': '#/definitions/OfferUserDetails'},
'type': 'array'}},
'required': ['source-model-tag',
'offer-uuid',
'offer-url',
'offer-name',
'application-description'],
'type': 'object'},
'ApplicationSet': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'options': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'}},
'required': ['application', 'options'],
'type': 'object'},
'ApplicationSetCharm': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'channel': {'type': 'string'},
'charm-url': {'type': 'string'},
'config-settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'config-settings-yaml': {'type': 'string'},
'force': {'type': 'boolean'},
'force-series': {'type': 'boolean'},
'force-units': {'type': 'boolean'},
'resource-ids': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'storage-constraints': {'patternProperties': {'.*': {'$ref': '#/definitions/StorageConstraints'}},
'type': 'object'}},
'required': ['application',
'charm-url',
'channel',
'force',
'force-units',
'force-series'],
'type': 'object'},
'ApplicationSetCharmProfile': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'charm-url': {'type': 'string'}},
'required': ['application',
'charm-url'],
'type': 'object'},
'ApplicationUnexpose': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationUnset': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'options': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['application', 'options'],
'type': 'object'},
'ApplicationUpdate': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'charm-url': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'},
'force': {'type': 'boolean'},
'force-charm-url': {'type': 'boolean'},
'force-series': {'type': 'boolean'},
'min-units': {'type': 'integer'},
'settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'settings-yaml': {'type': 'string'}},
'required': ['application',
'charm-url',
'force-charm-url',
'force-series',
'force',
'settings-yaml'],
'type': 'object'},
'ApplicationsDeploy': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/ApplicationDeploy'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'CharmRelation': {'additionalProperties': False,
'properties': {'interface': {'type': 'string'},
'limit': {'type': 'integer'},
'name': {'type': 'string'},
'optional': {'type': 'boolean'},
'role': {'type': 'string'},
'scope': {'type': 'string'}},
'required': ['name',
'role',
'interface',
'optional',
'limit',
'scope'],
'type': 'object'},
'ConfigResult': {'additionalProperties': False,
'properties': {'config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['config'],
'type': 'object'},
'Constraints': {'additionalProperties': False,
'properties': {'Count': {'type': 'integer'},
'Pool': {'type': 'string'},
'Size': {'type': 'integer'}},
'required': ['Pool', 'Size', 'Count'],
'type': 'object'},
'ConsumeApplicationArg': {'additionalProperties': False,
'properties': {'ApplicationOfferDetails': {'$ref': '#/definitions/ApplicationOfferDetails'},
'application-alias': {'type': 'string'},
'external-controller': {'$ref': '#/definitions/ExternalControllerInfo'},
'macaroon': {'$ref': '#/definitions/Macaroon'}},
'required': ['ApplicationOfferDetails'],
'type': 'object'},
'ConsumeApplicationArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/ConsumeApplicationArg'},
'type': 'array'}},
'type': 'object'},
'DestroyApplicationInfo': {'additionalProperties': False,
'properties': {'destroyed-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'},
'destroyed-units': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'},
'detached-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'type': 'object'},
'DestroyApplicationParams': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'destroy-storage': {'type': 'boolean'}},
'required': ['application-tag'],
'type': 'object'},
'DestroyApplicationResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/DestroyApplicationInfo'}},
'type': 'object'},
'DestroyApplicationResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/DestroyApplicationResult'},
'type': 'array'}},
'type': 'object'},
'DestroyApplicationUnits': {'additionalProperties': False,
'properties': {'unit-names': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['unit-names'],
'type': 'object'},
'DestroyApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/DestroyApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'DestroyConsumedApplicationParams': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'}},
'required': ['application-tag'],
'type': 'object'},
'DestroyConsumedApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/DestroyConsumedApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'DestroyRelation': {'additionalProperties': False,
'properties': {'endpoints': {'items': {'type': 'string'},
'type': 'array'},
'relation-id': {'type': 'integer'}},
'required': ['relation-id'],
'type': 'object'},
'DestroyUnitInfo': {'additionalProperties': False,
'properties': {'destroyed-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'},
'detached-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'type': 'object'},
'DestroyUnitParams': {'additionalProperties': False,
'properties': {'destroy-storage': {'type': 'boolean'},
'unit-tag': {'type': 'string'}},
'required': ['unit-tag'],
'type': 'object'},
'DestroyUnitResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/DestroyUnitInfo'}},
'type': 'object'},
'DestroyUnitResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/DestroyUnitResult'},
'type': 'array'}},
'type': 'object'},
'DestroyUnitsParams': {'additionalProperties': False,
'properties': {'units': {'items': {'$ref': '#/definitions/DestroyUnitParams'},
'type': 'array'}},
'required': ['units'],
'type': 'object'},
'Entities': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'Entity': {'additionalProperties': False,
'properties': {'tag': {'type': 'string'}},
'required': ['tag'],
'type': 'object'},
'Error': {'additionalProperties': False,
'properties': {'code': {'type': 'string'},
'info': {'$ref': '#/definitions/ErrorInfo'},
'message': {'type': 'string'}},
'required': ['message', 'code'],
'type': 'object'},
'ErrorInfo': {'additionalProperties': False,
'properties': {'macaroon': {'$ref': '#/definitions/Macaroon'},
'macaroon-path': {'type': 'string'}},
'type': 'object'},
'ErrorResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'}},
'type': 'object'},
'ErrorResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ErrorResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'ExternalControllerInfo': {'additionalProperties': False,
'properties': {'addrs': {'items': {'type': 'string'},
'type': 'array'},
'ca-cert': {'type': 'string'},
'controller-alias': {'type': 'string'},
'controller-tag': {'type': 'string'}},
'required': ['controller-tag',
'controller-alias',
'addrs',
'ca-cert'],
'type': 'object'},
'LXDProfileUpgradeMessages': {'additionalProperties': False,
'properties': {'application': {'$ref': '#/definitions/Entity'},
'watcher-id': {'type': 'string'}},
'required': ['application',
'watcher-id'],
'type': 'object'},
'LXDProfileUpgradeMessagesResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'message': {'type': 'string'},
'unit-name': {'type': 'string'}},
'required': ['unit-name',
'message'],
'type': 'object'},
'LXDProfileUpgradeMessagesResults': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/LXDProfileUpgradeMessagesResult'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'Macaroon': {'additionalProperties': False, 'type': 'object'},
'NotifyWatchResult': {'additionalProperties': False,
'properties': {'NotifyWatcherId': {'type': 'string'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['NotifyWatcherId'],
'type': 'object'},
'OfferUserDetails': {'additionalProperties': False,
'properties': {'access': {'type': 'string'},
'display-name': {'type': 'string'},
'user': {'type': 'string'}},
'required': ['user',
'display-name',
'access'],
'type': 'object'},
'Placement': {'additionalProperties': False,
'properties': {'directive': {'type': 'string'},
'scope': {'type': 'string'}},
'required': ['scope', 'directive'],
'type': 'object'},
'RelationSuspendedArg': {'additionalProperties': False,
'properties': {'message': {'type': 'string'},
'relation-id': {'type': 'integer'},
'suspended': {'type': 'boolean'}},
'required': ['relation-id',
'message',
'suspended'],
'type': 'object'},
'RelationSuspendedArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/RelationSuspendedArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'RemoteEndpoint': {'additionalProperties': False,
'properties': {'interface': {'type': 'string'},
'limit': {'type': 'integer'},
'name': {'type': 'string'},
'role': {'type': 'string'}},
'required': ['name',
'role',
'interface',
'limit'],
'type': 'object'},
'RemoteSpace': {'additionalProperties': False,
'properties': {'cloud-type': {'type': 'string'},
'name': {'type': 'string'},
'provider-attributes': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'provider-id': {'type': 'string'},
'subnets': {'items': {'$ref': '#/definitions/Subnet'},
'type': 'array'}},
'required': ['cloud-type',
'name',
'provider-id',
'provider-attributes',
'subnets'],
'type': 'object'},
'ScaleApplicationInfo': {'additionalProperties': False,
'properties': {'num-units': {'type': 'integer'}},
'required': ['num-units'],
'type': 'object'},
'ScaleApplicationParams': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'scale': {'type': 'integer'},
'scale-change': {'type': 'integer'}},
'required': ['application-tag',
'scale'],
'type': 'object'},
'ScaleApplicationResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/ScaleApplicationInfo'}},
'type': 'object'},
'ScaleApplicationResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ScaleApplicationResult'},
'type': 'array'}},
'type': 'object'},
'ScaleApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/ScaleApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'SetConstraints': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'}},
'required': ['application', 'constraints'],
'type': 'object'},
'StorageConstraints': {'additionalProperties': False,
'properties': {'count': {'type': 'integer'},
'pool': {'type': 'string'},
'size': {'type': 'integer'}},
'type': 'object'},
'StringResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'string'}},
'required': ['result'],
'type': 'object'},
'Subnet': {'additionalProperties': False,
'properties': {'cidr': {'type': 'string'},
'life': {'type': 'string'},
'provider-id': {'type': 'string'},
'provider-network-id': {'type': 'string'},
'provider-space-id': {'type': 'string'},
'space-tag': {'type': 'string'},
'status': {'type': 'string'},
'vlan-tag': {'type': 'integer'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['cidr',
'vlan-tag',
'life',
'space-tag',
'zones'],
'type': 'object'},
'UnitsResolved': {'additionalProperties': False,
'properties': {'all': {'type': 'boolean'},
'retry': {'type': 'boolean'},
'tags': {'$ref': '#/definitions/Entities'}},
'type': 'object'},
'UpdateSeriesArg': {'additionalProperties': False,
'properties': {'force': {'type': 'boolean'},
'series': {'type': 'string'},
'tag': {'$ref': '#/definitions/Entity'}},
'required': ['tag', 'force', 'series'],
'type': 'object'},
'UpdateSeriesArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/UpdateSeriesArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'Value': {'additionalProperties': False,
'properties': {'arch': {'type': 'string'},
'container': {'type': 'string'},
'cores': {'type': 'integer'},
'cpu-power': {'type': 'integer'},
'instance-type': {'type': 'string'},
'mem': {'type': 'integer'},
'root-disk': {'type': 'integer'},
'spaces': {'items': {'type': 'string'},
'type': 'array'},
'tags': {'items': {'type': 'string'},
'type': 'array'},
'virt-type': {'type': 'string'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'}},
'properties': {'AddRelation': {'properties': {'Params': {'$ref': '#/definitions/AddRelation'},
'Result': {'$ref': '#/definitions/AddRelationResults'}},
'type': 'object'},
'AddUnits': {'properties': {'Params': {'$ref': '#/definitions/AddApplicationUnits'},
'Result': {'$ref': '#/definitions/AddApplicationUnitsResults'}},
'type': 'object'},
'CharmConfig': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConfigResults'}},
'type': 'object'},
'CharmRelations': {'properties': {'Params': {'$ref': '#/definitions/ApplicationCharmRelations'},
'Result': {'$ref': '#/definitions/ApplicationCharmRelationsResults'}},
'type': 'object'},
'Consume': {'properties': {'Params': {'$ref': '#/definitions/ConsumeApplicationArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Deploy': {'properties': {'Params': {'$ref': '#/definitions/ApplicationsDeploy'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Destroy': {'properties': {'Params': {'$ref': '#/definitions/ApplicationDestroy'}},
'type': 'object'},
'DestroyApplication': {'properties': {'Params': {'$ref': '#/definitions/DestroyApplicationsParams'},
'Result': {'$ref': '#/definitions/DestroyApplicationResults'}},
'type': 'object'},
'DestroyConsumedApplications': {'properties': {'Params': {'$ref': '#/definitions/DestroyConsumedApplicationsParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'DestroyRelation': {'properties': {'Params': {'$ref': '#/definitions/DestroyRelation'}},
'type': 'object'},
'DestroyUnit': {'properties': {'Params': {'$ref': '#/definitions/DestroyUnitsParams'},
'Result': {'$ref': '#/definitions/DestroyUnitResults'}},
'type': 'object'},
'DestroyUnits': {'properties': {'Params': {'$ref': '#/definitions/DestroyApplicationUnits'}},
'type': 'object'},
'Expose': {'properties': {'Params': {'$ref': '#/definitions/ApplicationExpose'}},
'type': 'object'},
'Get': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGet'},
'Result': {'$ref': '#/definitions/ApplicationGetResults'}},
'type': 'object'},
'GetCharmURL': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGet'},
'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'GetConfig': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConfigResults'}},
'type': 'object'},
'GetConstraints': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConstraintsResults'}},
'type': 'object'},
'GetLXDProfileUpgradeMessages': {'properties': {'Params': {'$ref': '#/definitions/LXDProfileUpgradeMessages'},
'Result': {'$ref': '#/definitions/LXDProfileUpgradeMessagesResults'}},
'type': 'object'},
'ResolveUnitErrors': {'properties': {'Params': {'$ref': '#/definitions/UnitsResolved'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ScaleApplications': {'properties': {'Params': {'$ref': '#/definitions/ScaleApplicationsParams'},
'Result': {'$ref': '#/definitions/ScaleApplicationResults'}},
'type': 'object'},
'Set': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSet'}},
'type': 'object'},
'SetApplicationsConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationConfigSetArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetCharm': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSetCharm'}},
'type': 'object'},
'SetCharmProfile': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSetCharmProfile'}},
'type': 'object'},
'SetConstraints': {'properties': {'Params': {'$ref': '#/definitions/SetConstraints'}},
'type': 'object'},
'SetMetricCredentials': {'properties': {'Params': {'$ref': '#/definitions/ApplicationMetricCredentials'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetRelationsSuspended': {'properties': {'Params': {'$ref': '#/definitions/RelationSuspendedArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Unexpose': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUnexpose'}},
'type': 'object'},
'Unset': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUnset'}},
'type': 'object'},
'UnsetApplicationsConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationConfigUnsetArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Update': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUpdate'}},
'type': 'object'},
'UpdateApplicationSeries': {'properties': {'Params': {'$ref': '#/definitions/UpdateSeriesArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'WatchLXDProfileUpgradeNotifications': {'properties': {'Params': {'$ref': '#/definitions/Entity'},
'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
@ReturnMapping(AddRelationResults)
async def AddRelation(self, endpoints):
'''
endpoints : typing.Sequence[str]
Returns -> typing.Mapping[str, ~CharmRelation]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddRelation',
version=8,
params=_params)
_params['endpoints'] = endpoints
reply = await self.rpc(msg)
return reply
@ReturnMapping(AddApplicationUnitsResults)
async def AddUnits(self, application, num_units, placement):
'''
application : str
num_units : int
placement : typing.Sequence[~Placement]
Returns -> typing.Sequence[str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddUnits',
version=8,
params=_params)
_params['application'] = application
_params['num-units'] = num_units
_params['placement'] = placement
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConfigResults)
async def CharmConfig(self, entities):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ConfigResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='CharmConfig',
version=8,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationCharmRelationsResults)
async def CharmRelations(self, application):
'''
application : str
Returns -> typing.Sequence[str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='CharmRelations',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Consume(self, args):
'''
args : typing.Sequence[~ConsumeApplicationArg]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Consume',
version=8,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Deploy(self, applications):
'''
applications : typing.Sequence[~ApplicationDeploy]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Deploy',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Destroy(self, application):
'''
application : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Destroy',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(DestroyApplicationResults)
async def DestroyApplication(self, applications):
'''
applications : typing.Sequence[~DestroyApplicationParams]
Returns -> typing.Sequence[~DestroyApplicationResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyApplication',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def DestroyConsumedApplications(self, applications):
'''
applications : typing.Sequence[~DestroyConsumedApplicationParams]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyConsumedApplications',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def DestroyRelation(self, endpoints):
'''
endpoints : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyRelation',
version=8,
params=_params)
_params['endpoints'] = endpoints
reply = await self.rpc(msg)
return reply
@ReturnMapping(DestroyUnitResults)
async def DestroyUnit(self, units):
'''
units : typing.Sequence[~DestroyUnitParams]
Returns -> typing.Sequence[~DestroyUnitResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyUnit',
version=8,
params=_params)
_params['units'] = units
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def DestroyUnits(self, unit_names):
'''
unit_names : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyUnits',
version=8,
params=_params)
_params['unit-names'] = unit_names
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Expose(self, application):
'''
application : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Expose',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetResults)
async def Get(self, application):
'''
application : str
Returns -> typing.Union[str, typing.Mapping[str, typing.Any], _ForwardRef('Value')]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Get',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def GetCharmURL(self, application):
'''
application : str
Returns -> typing.Union[_ForwardRef('Error'), str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetCharmURL',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConfigResults)
async def GetConfig(self, entities):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ConfigResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetConfig',
version=8,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConstraintsResults)
async def GetConstraints(self, entities):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ApplicationConstraint]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetConstraints',
version=8,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(LXDProfileUpgradeMessagesResults)
async def GetLXDProfileUpgradeMessages(self, application, watcher_id):
'''
application : Entity
watcher_id : str
Returns -> typing.Sequence[~LXDProfileUpgradeMessagesResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetLXDProfileUpgradeMessages',
version=8,
params=_params)
_params['application'] = application
_params['watcher-id'] = watcher_id
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def ResolveUnitErrors(self, all_, retry, tags):
'''
all_ : bool
retry : bool
tags : Entities
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='ResolveUnitErrors',
version=8,
params=_params)
_params['all'] = all_
_params['retry'] = retry
_params['tags'] = tags
reply = await self.rpc(msg)
return reply
@ReturnMapping(ScaleApplicationResults)
async def ScaleApplications(self, applications):
'''
applications : typing.Sequence[~ScaleApplicationParams]
Returns -> typing.Sequence[~ScaleApplicationResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='ScaleApplications',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Set(self, application, options):
'''
application : str
options : typing.Mapping[str, str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Set',
version=8,
params=_params)
_params['application'] = application
_params['options'] = options
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetApplicationsConfig(self, args):
'''
args : typing.Sequence[~ApplicationConfigSet]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetApplicationsConfig',
version=8,
params=_params)
_params['Args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetCharm(self, application, channel, charm_url, config_settings, config_settings_yaml, force_series, force_units, resource_ids, storage_constraints):
'''
application : str
channel : str
charm_url : str
config_settings : typing.Mapping[str, str]
config_settings_yaml : str
force_series : bool
force_units : bool
resource_ids : typing.Mapping[str, str]
storage_constraints : typing.Mapping[str, ~StorageConstraints]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetCharm',
version=8,
params=_params)
_params['application'] = application
_params['channel'] = channel
_params['charm-url'] = charm_url
_params['config-settings'] = config_settings
_params['config-settings-yaml'] = config_settings_yaml
_params['force-series'] = force_series
_params['force-units'] = force_units
_params['resource-ids'] = resource_ids
_params['storage-constraints'] = storage_constraints
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetCharmProfile(self, application, charm_url):
'''
application : str
charm_url : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetCharmProfile',
version=8,
params=_params)
_params['application'] = application
_params['charm-url'] = charm_url
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetConstraints(self, application, constraints):
'''
application : str
constraints : Value
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetConstraints',
version=8,
params=_params)
_params['application'] = application
_params['constraints'] = constraints
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetMetricCredentials(self, creds):
'''
creds : typing.Sequence[~ApplicationMetricCredential]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetMetricCredentials',
version=8,
params=_params)
_params['creds'] = creds
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetRelationsSuspended(self, args):
'''
args : typing.Sequence[~RelationSuspendedArg]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetRelationsSuspended',
version=8,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Unexpose(self, application):
'''
application : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Unexpose',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Unset(self, application, options):
'''
application : str
options : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Unset',
version=8,
params=_params)
_params['application'] = application
_params['options'] = options
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def UnsetApplicationsConfig(self, args):
'''
args : typing.Sequence[~ApplicationUnset]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='UnsetApplicationsConfig',
version=8,
params=_params)
_params['Args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Update(self, application, charm_url, constraints, force_charm_url, force_series, min_units, settings, settings_yaml):
'''
application : str
charm_url : str
constraints : Value
force_charm_url : bool
force_series : bool
min_units : int
settings : typing.Mapping[str, str]
settings_yaml : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Update',
version=8,
params=_params)
_params['application'] = application
_params['charm-url'] = charm_url
_params['constraints'] = constraints
_params['force-charm-url'] = force_charm_url
_params['force-series'] = force_series
_params['min-units'] = min_units
_params['settings'] = settings
_params['settings-yaml'] = settings_yaml
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def UpdateApplicationSeries(self, args):
'''
args : typing.Sequence[~UpdateSeriesArg]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='UpdateApplicationSeries',
version=8,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResult)
async def WatchLXDProfileUpgradeNotifications(self, tag):
'''
tag : str
Returns -> typing.Union[str, _ForwardRef('Error')]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='WatchLXDProfileUpgradeNotifications',
version=8,
params=_params)
_params['tag'] = tag
reply = await self.rpc(msg)
return reply
| 57.057858 | 163 | 0.357113 | 72,747 | 0.996848 | 0 | 0 | 18,398 | 0.252107 | 17,243 | 0.23628 | 25,949 | 0.355578 |
35d5d3f3a69a394f6e66a0e4e4556dcfbb654d90 | 4,637 | py | Python | dectate/app.py | morepath/dectate | 511920acbeba5b070c532c9b0fa54b53c2baeb0a | [
"BSD-3-Clause"
] | 23 | 2016-03-24T20:13:43.000Z | 2021-08-10T05:16:41.000Z | dectate/app.py | morepath/dectate | 511920acbeba5b070c532c9b0fa54b53c2baeb0a | [
"BSD-3-Clause"
] | 46 | 2016-03-29T14:03:39.000Z | 2021-04-18T14:39:54.000Z | dectate/app.py | morepath/dectate | 511920acbeba5b070c532c9b0fa54b53c2baeb0a | [
"BSD-3-Clause"
] | 6 | 2016-04-06T14:08:36.000Z | 2020-04-19T14:22:38.000Z | import sys
from .config import Configurable, Directive, commit, create_code_info
class Config:
"""The object that contains the configurations.
The configurations are specified by the :attr:`Action.config`
class attribute of :class:`Action`.
"""
pass
class AppMeta(type):
"""Dectate metaclass.
Sets up ``config`` and ``dectate`` class attributes.
"""
def __new__(cls, name, bases, d):
extends = [base.dectate for base in bases if hasattr(base, "dectate")]
d["config"] = config = Config()
d["dectate"] = configurable = Configurable(extends, config)
result = super().__new__(cls, name, bases, d)
configurable.app_class = result
return result
class App(metaclass=AppMeta):
"""A configurable application object.
Subclass this in your framework and add directives using
the :meth:`App.directive` decorator.
Set the ``logger_name`` class attribute to the logging prefix
that Dectate should log to. By default it is ``"dectate.directive"``.
"""
logger_name = "dectate.directive"
"""The prefix to use for directive debug logging."""
dectate = None
"""A dectate Configurable instance is installed here.
This is installed when the class object is initialized, so during
import-time when you use the ``class`` statement and subclass
:class:`dectate.App`.
This keeps tracks of the registrations done by using directives as long
as committed configurations.
"""
config = None
"""Config object that contains the configuration after commit.
This is installed when the class object is initialized, so during
import-time when you use the ``class`` statement and subclass
:class:`dectate.App`, but is only filled after you commit the
configuration.
This keeps the final configuration result after commit. It is
a very dumb object that has no methods and is just a container for
attributes that contain the real configuration.
"""
@classmethod
def get_directive_methods(cls):
for name in dir(cls):
attr = getattr(cls, name)
im_func = getattr(attr, "__func__", None)
if im_func is None:
continue
if hasattr(im_func, "action_factory"):
yield name, attr
@classmethod
def commit(cls):
"""Commit this class and any depending on it.
This is intended to be overridden by subclasses if committing
the class also commits other classes automatically, such as in
the case in Morepath when one app is mounted into another. In
such case it should return an iterable of all committed
classes.
:return: an iterable of committed classes
"""
commit(cls)
return [cls]
@classmethod
def is_committed(cls):
"""True if this app class was ever committed.
:return: bool that is ``True`` when the app was committed before.
"""
return cls.dectate.committed
@classmethod
def clean(cls):
"""A method that sets or restores the state of the class.
Normally Dectate only sets up configuration into the ``config``
attribute, but in some cases you may touch other aspects of the
class during configuration time. You can override this classmethod
to set up the state of the class in its pristine condition.
"""
pass
def directive(action_factory):
"""Create a classmethod to hook action to application class.
You pass in a :class:`dectate.Action` or a
:class:`dectate.Composite` subclass and can attach the result as a
class method to an :class:`dectate.App` subclass::
class FooAction(dectate.Action):
...
class MyApp(dectate.App):
my_directive = dectate.directive(MyAction)
Alternatively you can also define the direction inline using
this as a decorator::
class MyApp(dectate.App):
@directive
class my_directive(dectate.Action):
...
:param action_factory: an action class to use as the directive.
:return: a class method that represents the directive.
"""
def method(cls, *args, **kw):
frame = sys._getframe(1)
code_info = create_code_info(frame)
return Directive(action_factory, code_info, cls, args, kw)
# sphinxext and App.get_action_classes need to recognize this
method.action_factory = action_factory
method.__doc__ = action_factory.__doc__
method.__module__ = action_factory.__module__
return classmethod(method)
| 31.544218 | 78 | 0.665301 | 3,375 | 0.727841 | 294 | 0.063403 | 1,423 | 0.306879 | 0 | 0 | 3,148 | 0.678887 |
35d5e04e5892b72fc7057d291530a91e4883bc62 | 1,127 | py | Python | app/user/serializers.py | falleng0d/medicar-backend | 30bedff54ae84da7a67350852cd508c54e5bf6e7 | [
"MIT"
] | null | null | null | app/user/serializers.py | falleng0d/medicar-backend | 30bedff54ae84da7a67350852cd508c54e5bf6e7 | [
"MIT"
] | null | null | null | app/user/serializers.py | falleng0d/medicar-backend | 30bedff54ae84da7a67350852cd508c54e5bf6e7 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from django.contrib.auth import get_user_model # If used custom user model
from rest_framework import serializers
UserModel = get_user_model()
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
def create(self, validated_data):
email = validated_data.get('email', None)
first_name = validated_data.get('first_name', '')
user = UserModel.objects.create_user(
username=validated_data['username'],
password=validated_data['password'],
email=email,
first_name=first_name,
)
return user
def to_representation(self, instance):
instance = super(UserSerializer, self).to_representation(instance)
return OrderedDict([(key, instance[key])
for key in instance if key not in ['email', 'first_name']
or (instance[key] is not None and len(instance[key]) > 1)])
class Meta:
model = UserModel
fields = ("id", "username", "password", "email", "first_name")
| 34.151515 | 87 | 0.645075 | 942 | 0.835847 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.11535 |
35d619e412abb96bf3e95ecb361e6a948cab08cb | 2,542 | py | Python | easy-receptive-fields-pytorch/receptivefield/tests/test_pytorch.py | Swinsie/cv-rep-fork | 1c3454934645e3f9afa39ba90c3b216eb9ed7f75 | [
"Apache-2.0"
] | null | null | null | easy-receptive-fields-pytorch/receptivefield/tests/test_pytorch.py | Swinsie/cv-rep-fork | 1c3454934645e3f9afa39ba90c3b216eb9ed7f75 | [
"Apache-2.0"
] | null | null | null | easy-receptive-fields-pytorch/receptivefield/tests/test_pytorch.py | Swinsie/cv-rep-fork | 1c3454934645e3f9afa39ba90c3b216eb9ed7f75 | [
"Apache-2.0"
] | null | null | null | import pytest
import torch.nn as nn
from numpy.testing import assert_allclose
from receptivefield.pytorch import PytorchReceptiveField
from receptivefield.image import get_default_image
from receptivefield.types import ImageShape
class Linear(nn.Module):
"""An identity activation function"""
def forward(self, x):
return x
class SimpleVGG(nn.Module):
def __init__(self, disable_activations: bool = False):
super(SimpleVGG, self).__init__()
self.blocks = self._build_blocks(disable_activations)
self.feature_maps = None
def forward(self, x):
self.feature_maps = []
for block in self.blocks:
for layer in block:
x = layer(x)
self.feature_maps.append(x)
return x
def _build_blocks(self, disable_activations: bool):
activation = lambda: Linear() if disable_activations else nn.ReLU()
block1 = [
nn.Conv2d(3, 64, kernel_size=3),
activation(),
nn.Conv2d(64, 64, kernel_size=3),
activation(),
nn.AvgPool2d(kernel_size=2, stride=2),
]
block2 = [
nn.Conv2d(64, 128, kernel_size=3),
activation(),
nn.Conv2d(128, 128, kernel_size=3),
activation(),
nn.AvgPool2d(kernel_size=2, stride=2),
]
block3 = [
nn.Conv2d(128, 256, kernel_size=3),
activation(),
nn.Conv2d(256, 256, kernel_size=3),
activation(),
nn.AvgPool2d(kernel_size=2, stride=2),
]
return [nn.Sequential(*block1), nn.Sequential(*block2), nn.Sequential(*block3)]
def model_fn() -> nn.Module:
model = SimpleVGG(disable_activations=True)
model.eval()
return model
def get_test_image(shape=(64, 64), tile_factor=0):
image = get_default_image(shape=shape, tile_factor=tile_factor)
return image
def test_example_network():
input_shape = [96, 96, 3]
rf = PytorchReceptiveField(model_fn)
rf_params = rf.compute(input_shape=ImageShape(*input_shape))
assert_allclose(rf_params[0].rf.size, (6, 6))
assert_allclose(rf_params[0].rf.stride, (2, 2))
rs = 6 + (2 + 2 + 1) * 2
assert_allclose(rf_params[1].rf.size, (rs, rs))
assert_allclose(rf_params[1].rf.stride, (4, 4))
rs = 6 + (2 + 2 + 1) * 2 + (2 + 2 + 1) * 4
assert_allclose(rf_params[2].rf.size, (rs, rs))
assert_allclose(rf_params[2].rf.stride, (8, 8))
if __name__ == "__main__":
pytest.main([__file__])
| 29.218391 | 87 | 0.616444 | 1,443 | 0.567663 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.018489 |
35d72c0843e71e8c73c42625fa71b7593cdf2c85 | 192 | py | Python | KSFGHAction/__init__.py | KOLANICH-GHActions/KSFGHAction.py | e1d54ae0043d93d8b190f8e758b978bf8e779c51 | [
"Unlicense"
] | null | null | null | KSFGHAction/__init__.py | KOLANICH-GHActions/KSFGHAction.py | e1d54ae0043d93d8b190f8e758b978bf8e779c51 | [
"Unlicense"
] | 3 | 2019-12-23T22:58:40.000Z | 2019-12-25T11:20:13.000Z | KSFGHAction/__init__.py | KOLANICH/KSFGHAction.py | e1d54ae0043d93d8b190f8e758b978bf8e779c51 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import typing
from .utils import ClassDictMeta
from .issueParser import *
from .linter import *
from miniGHAPI.GitHubAPI import *
from miniGHAPI.GHActionsEnv import *
| 19.2 | 36 | 0.791667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.114583 |
35da15160bebb0c093e96b03a913df011244fd8f | 831 | py | Python | model_zoo/jag_utils/python/build_inclusive_from_exclusive.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 194 | 2016-07-19T15:40:21.000Z | 2022-03-19T08:06:10.000Z | model_zoo/jag_utils/python/build_inclusive_from_exclusive.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 1,021 | 2016-07-19T12:56:31.000Z | 2022-03-29T00:41:47.000Z | model_zoo/jag_utils/python/build_inclusive_from_exclusive.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 74 | 2016-07-28T18:24:00.000Z | 2022-01-24T19:41:04.000Z | import sys
if len(sys.argv) != 4 :
print 'usage:', sys.argv[0], 'index_fn id_mapping_fn output_fn'
exit(9)
a = open(sys.argv[1])
a.readline()
header = a.readline()
dir = a.readline()
#build map: filename -> set of bad samples
mp = {}
mp_good = {}
mp_bad = {}
for line in a :
t = line.split()
mp[t[0]] = set()
mp_good[t[0]] = t[1]
mp_bad[t[0]] = t[2]
for id in t[3:] :
mp[t[0]].add(id)
a.close()
out = open(sys.argv[3], 'w')
out.write('CONDUIT_HDF5_INCLUSION\n')
out.write(header)
out.write(dir)
a = open(sys.argv[2])
bad = 0
for line in a :
t = line.split()
fn = t[0]
out.write(fn + ' ' + mp_good[fn] + ' ' + mp_bad[fn] + ' ')
for id in t[1:] :
if id not in mp[fn] :
out.write(id + ' ')
else :
bad += 1
out.write('\n')
out.close()
print header
print 'num found bad:', bad
| 17.680851 | 65 | 0.56438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.174489 |
35db3c607e8460d5391f5e65c6e7e40b3c6921ac | 7,147 | py | Python | app/tests/functional/test_user.py | sun-fengcai/flask_template | fc6c5963ac9ac632c83d81c7b62ab74d7e99d02d | [
"MIT"
] | 3 | 2019-11-16T06:51:17.000Z | 2019-11-21T01:18:31.000Z | app/tests/functional/test_user.py | sun-fengcai/flask_template | fc6c5963ac9ac632c83d81c7b62ab74d7e99d02d | [
"MIT"
] | 27 | 2019-11-17T13:56:49.000Z | 2021-06-28T12:04:18.000Z | app/tests/functional/test_user.py | sun-fengcai/flask_template | fc6c5963ac9ac632c83d81c7b62ab74d7e99d02d | [
"MIT"
] | 1 | 2021-04-23T23:57:28.000Z | 2021-04-23T23:57:28.000Z | import json
from app import utils
def test_add_user(test_app, test_database):
client = test_app.test_client()
response = client.post(
"/users",
data=json.dumps(
{"username": "onlinejudge95", "email": "onlinejudge95@gmail.com",}
),
content_type="application/json",
)
assert response.status_code == 201
data = json.loads(response.data.decode())
assert "success" in data["status"]
assert "onlinejudge95@gmail.com was added!" in data["message"]
assert "public_id" in data["data"].keys()
def test_add_user_empty_data(test_app, test_database):
client = test_app.test_client()
response = client.post(
"/users", data=json.dumps({}), content_type="application/json"
)
assert response.status_code == 400
data = json.loads(response.data.decode())
assert "fail" in data["status"]
assert "Empty payload" in data["message"]
def test_add_user_invalid_payload(test_app, test_database):
client = test_app.test_client()
response = client.post(
"/users",
data=json.dumps({"email": "mayankdcoder@gmail.com"}),
content_type="application/json",
)
assert response.status_code == 400
data = json.loads(response.data.decode())
assert "fail" in data["status"]
assert "Invalid payload" in data["message"]
def test_add_user_duplicate_email(test_app, test_database):
client = test_app.test_client()
client.post(
"/users",
data=json.dumps(
{"username": "onlinejudge95", "email": "onlinejudge95@gmail.com"}
),
content_type="application/json",
)
response = client.post(
"/users",
data=json.dumps(
{"username": "onlinejudge95", "email": "onlinejudge95@gmail.com"}
),
content_type="application/json",
)
assert response.status_code == 400
data = json.loads(response.data.decode())
assert "fail" in data["status"]
assert (
"User with email onlinejudge95@gmail.com already exists"
in data["message"]
)
def test_get_user(test_app, test_database):
public_id = utils.add_user(
{"username": "onlinejudge95", "email": "onlinejudge95@gmail.com"}
)
client = test_app.test_client()
response = client.get(f"/users/{public_id}")
assert response.status_code == 200
data = json.loads(response.data.decode())
assert "success" in data["status"]
assert "onlinejudge95" in data["data"]["username"], data["data"]
assert "onlinejudge95@gmail.com" in data["data"]["email"]
def test_get_user_invalid_id(test_app, test_database):
client = test_app.test_client()
response = client.get("/users/123")
assert response.status_code == 404
data = json.loads(response.data.decode())
assert "fail" in data["status"]
assert "User with id 123 does not exists" in data["message"]
def test_get_users(test_app, test_database):
utils.recreate_db()
utils.add_user(
{"username": "mayankdcoder", "email": "mayankdcoder@gmail.com"}
)
utils.add_user(
{"username": "mayankdcoder1", "email": "mayankdcoder1@gmail.com"}
)
client = test_app.test_client()
response = client.get("/users")
assert response.status_code == 200
data = json.loads(response.data.decode())
assert "success" in data["status"]
assert len(data["data"]["users"]) == 2
assert "mayankdcoder" in data["data"]["users"][0]["username"]
assert "mayankdcoder1" in data["data"]["users"][1]["username"]
assert "mayankdcoder@gmail.com" in data["data"]["users"][0]["email"]
assert "mayankdcoder1@gmail.com" in data["data"]["users"][1]["email"]
def test_remove_user(test_app, test_database):
utils.recreate_db()
public_id = utils.add_user(
{"username": "removed", "email": "remove@gmail.com"}
)
client = test_app.test_client()
resp_one = client.get("/users")
data = json.loads(resp_one.data.decode())
assert resp_one.status_code == 200
assert len(data["data"]["users"]) == 1
resp_two = client.delete(f"/users/{public_id}")
data = json.loads(resp_two.data.decode())
assert resp_two.status_code == 200
assert "remove@gmail.com was removed!" in data["message"]
assert "success" in data["status"]
resp_three = client.get("/users")
data = json.loads(resp_three.data.decode())
assert resp_three.status_code == 200
assert len(data["data"]["users"]) == 0
def test_remove_user_incorrect_id(test_app, test_database):
client = test_app.test_client()
resp = client.delete("/users/999")
data = json.loads(resp.data.decode())
assert resp.status_code == 404
assert "User with id 999 does not exists" in data["message"]
assert "fail" in data["status"]
def test_update_user(test_app, test_database):
utils.recreate_db()
public_id = utils.add_user(
{"username": "update", "email": "update@gmail.com"}
)
client = test_app.test_client()
resp_one = client.put(
f"/users/{public_id}",
data=json.dumps({"username": "me", "email": "me@gmail.com"}),
content_type="application/json",
)
data = json.loads(resp_one.data.decode())
assert resp_one.status_code == 200
assert f"{public_id} was updated!" in data["message"]
assert "success" in data["status"]
resp_two = client.get(f"/users/{public_id}")
data = json.loads(resp_two.data.decode())
assert resp_two.status_code == 200
assert "me" in data["data"]["username"], data["data"]
assert "me@gmail.com" in data["data"]["email"]
assert "success" in data["status"]
def test_update_user_wrong_permission(test_app, test_database):
utils.recreate_db()
public_id = utils.add_user(
{"username": "update", "email": "update@gmail.com"}
)
client = test_app.test_client()
resp = client.put(
f"/users/{public_id}",
data=json.dumps({"public_id": "123"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 403
assert "Can not modify public_id attribute" in data["message"]
assert "fail" in data["status"]
def test_update_user_does_not_exist(test_app, test_database):
client = test_app.test_client()
resp = client.put(
"/users/999",
data=json.dumps({"username": "me", "email": "me@testdriven.io"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 404
assert "User with id 999 does not exists" in data["message"]
assert "fail" in data["status"]
def test_update_user_empty_json(test_app, test_database):
utils.recreate_db()
public_id = utils.add_user(
{"username": "update", "email": "update@gmail.com"}
)
client = test_app.test_client()
resp = client.put(
f"/users/{public_id}",
data=json.dumps({}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 400
assert "Empty payload" in data["message"]
assert "fail" in data["status"]
| 31.484581 | 78 | 0.646705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,018 | 0.282356 |
35dd29f78d96791ddc38a66b186a7265c181d8b6 | 15,814 | py | Python | MultiAV/MultiAV.py | Virag007/Multi-Malware-Detection-Engine-based-on-Blockchain | 80e91b2f2ec15d271141568451492fefd88f8ace | [
"MIT"
] | 2 | 2020-11-18T10:00:22.000Z | 2020-12-18T13:48:25.000Z | MultiAV/MultiAV.py | Virag007/Multi-Malware-Detection-Engine-based-on-Blockchain | 80e91b2f2ec15d271141568451492fefd88f8ace | [
"MIT"
] | null | null | null | MultiAV/MultiAV.py | Virag007/Multi-Malware-Detection-Engine-based-on-Blockchain | 80e91b2f2ec15d271141568451492fefd88f8ace | [
"MIT"
] | 1 | 2020-12-18T13:48:27.000Z | 2020-12-18T13:48:27.000Z | from threading import *
from tkinter import *
from tkinter.filedialog import askopenfilename
import tkinter, tkinter.scrolledtext
import os
import sys
import urllib.request
import glob
import time
import hashlib
import quarantaene
from vta import vtapi
import argparse
os_name = sys.platform
terminations = []
if "win" in os_name:
if not os.path.exists("MultiAV\\Quarantine\\"):
os.makedirs("MultiAV\\Quarantine\\")
quarantine_folder = "MultiAV\\Quarantine\\*"
file_to_quarantine = "MultiAV\\Quarantine\\"
transfer = os. getcwd() + "\\Transfer\\*"
else:
if not os.path.exists("MultiAV/Quarantine/"):
os.makedirs("MultiAV/Quarantine/")
quarantine_folder = "MultiAV/Quarantine/*"
file_to_quarantine = "MultiAV/Quarantine/"
transfer = os. getcwd() + "/Transfer/*"
main = None
update_button = None
details_button = None
scan_button = None
quit_button = None
b_delete = None
b_delete_all = None
b_restore = None
b_restore_all = None
b_add_file = None
text_box = None
li = None
file= None
def quarantine():
global text_box
global terminations
global li
global b_delete
global b_delete_all
global b_restore
global b_restore_all
global b_add_file
k = 0
while True:
tmp = len(li.get(k))
if tmp == 0:
break
else:
li.delete(0, tmp)
k += 1
li.update()
terminations = glob.glob(quarantine_folder)
if terminations == []:
text_box.insert(END, "[ + ] No files in quarantine\n", "positive")
text_box.tag_config('positive', foreground="green")
text_box.see(END)
text_box.update()
else:
text_box.insert(END, "[ + ] Files in quarantine:\n", "positive")
text_box.tag_config('positive', foreground="green")
text_box.see(END)
text_box.update()
for i in terminations:
text_box.insert(END, "[ * ] " + i + "\n", "info")
text_box.tag_config("info", background = "red")
text_box.see(END)
text_box.update()
li.insert(END, i)
li.update()
b_delete_all["command"] =lambda:button_action_handler("delete_all")
b_delete["command"] = lambda:button_action_handler("delete")
b_restore["command"] = lambda:button_action_handler("restore")
b_restore_all["command"] = lambda:button_action_handler("restore_all")
b_add_file["command"] = lambda:button_action_handler("add_file")
def delete(file, ALL):
global li
global text_box
global terminations
if len(terminations) != 0:
if ALL == 1:
for i in range(len(terminations)):
os.remove(terminations[i])
text_box.insert(END, "[ + ] Deletion successful: \n" + terminations[i] + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
li.delete(0, len(terminations[i]))
li.update()
elif ALL == 0:
os.remove(file)
li.delete(ACTIVE, len(file))
li.update()
text_box.insert(END, "[ + ] Deletion successful:\n" + file + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
terminations = glob.glob(quarantine_folder)
for i in terminations:
li.insert(END, i)
li.update()
else:
text_box.insert(END, "[ - ] Unable to locate any files\n", "negative")
text_box.tag_config("negative", foreground="red")
text_box.see(END)
text_box.update()
def restore(file, ALL):
global li
global text_box
global terminations
if len(terminations) != 0:
if ALL == 1:
for i in range(len(terminations)):
quarantaene.decode_base64(terminations[i])
text_box.insert(END, "[ + ] Successfully restored\n" + terminations[i] + "\n", 'positive')
text_box.tag_config('positive', foreground="green")
text_box.see(END)
text_box.update()
li.delete(0, len(terminations[i]))
li.update()
elif ALL == 0:
quarantaene.decode_base64(file)
li.delete(ACTIVE, len(file))
text_box.insert(END, "[ + ] Successfully restored\n" + file + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
terminations = glob.glob(quarantine_folder)
for i in terminations:
li.insert(END, i)
li.update()
else:
text_box.insert(END, "[ - ] Unable to locate any files\n", "negative")
text_box.tag_config("negative", foreground="red")
text_box.see(END)
text_box.update()
def add_file_to_quarantine():
global li
global terminations
file = askopenfilename()
quarantaene.encode_base64(file, file_to_quarantine)
text_box.insert(END, "[ + ] Moved to quarantine:\n" + file + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
li.update()
k = 0
while True:
tmp = len(li.get(k))
if tmp == 0:
break
else:
li.delete(0, tmp)
k += 1
li.update()
terminations = glob.glob(quarantine_folder)
for i in terminations:
li.insert(END, i)
li.update()
def parse_options():
parser = argparse.ArgumentParser()
parser.add_argument("-F", "--results-file", dest="sfile",
help="Get report of previously scanned file. If the "
"given filename cannot be found/opened, we'll assume "
"it's a hash.")
parser.add_argument("-f", "--file", dest="file",
help="Scan file")
parser.add_argument("-v", "--verbose", default=False, action="store_true",
dest="verbose", help="Print complete reply")
return parser.parse_args()
def automatic_scan(path):
global text_box
global md5hash
match = False
file = path
start = time.time()
text_box.insert(END, "[ * ] Scanning " + file + "\n")
text_box.see(END)
text_box.update()
arg = parse_options()
arg.file=file
vt = vtapi(arg.verbose)
vt.sendfile(arg.file)
try:
f = open(file, "rb")
content = f.read()
f.close()
content = create_md5(content)
md5hash=content.decode("utf-8")
text_box.insert(END, "MD5-Hash: " + md5hash + "\n")
text_box.see(END)
text_box.update()
except MemoryError:
text_box.insert(END, "[ - ] Unable to create MD5-Hash:\n----->MemoryError!\n", 'negative')
text_box.insert(END, "[ ! ] Only select files under 1 GB\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
except Exception as e:
text_box.insert(END, "[ ! ] Unable to handle problem\n[ ! ] Try again/file might be corrupted\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
while(True):
scan_result=vt.print_scan_results(vt.results("file", md5hash))
if(scan_result!=0):
un=scan_result.count("Clean")
line_count=scan_result.count("\n")-2
percent=100-((un/line_count)*100)
if(percent!=0):
match=True
break
text_box.insert(END, "[ * ] Scan duration: {0}\n".format(round(time.time()-start, 2)))
text_box.see(END)
text_box.update()
if (match==True):
quarantaene.encode_base64(file, file_to_quarantine)
text_box.insert(END, "[ ! ] Threat found: {0}%\n[ ! ] File was moved into quarantine\n".format(percent), "important")
text_box.tag_config("important", foreground="red")
text_box.see(END)
text_box.update()
else:
text_box.insert(END, "[ + ] No threat was found\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
def scan():
global text_box
global md5hash
match = False
file = askopenfilename()
start = time.time()
text_box.insert(END, "[ * ] Scanning " + file + "\n")
text_box.see(END)
text_box.update()
arg = parse_options()
arg.file=file
vt = vtapi(arg.verbose)
vt.sendfile(arg.file)
try:
f = open(file, "rb")
content = f.read()
f.close()
content = create_md5(content)
md5hash=content.decode("utf-8")
text_box.insert(END, "MD5-Hash: " + md5hash + "\n")
text_box.see(END)
text_box.update()
except MemoryError:
text_box.insert(END, "[ - ] Unable to create MD5-Hash:\n----->MemoryError!\n", 'negative')
text_box.insert(END, "[ ! ] Only select files under 1 GB\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
except Exception as e:
text_box.insert(END, "[ ! ] Unable to handle problem\n[ ! ] Try again/file might be corrupted\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
while(True):
scan_result=vt.print_scan_results(vt.results("file", md5hash))
if(scan_result!=0):
un=scan_result.count("Clean")
line_count=scan_result.count("\n")-2
percent=100-((un/line_count)*100)
if(percent!=0):
match=True
break
text_box.insert(END, "[ * ] Scan duration: {0}\n".format(round(time.time()-start, 2)))
text_box.see(END)
text_box.update()
if (match==True):
quarantaene.encode_base64(file, file_to_quarantine)
text_box.insert(END, "[ ! ] Threat found: {0}%\n[ ! ] File was moved into quarantine\n".format(percent), "important")
text_box.tag_config("important", foreground="red")
text_box.see(END)
text_box.update()
else:
text_box.insert(END, "[ + ] No threat was found\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
def create_md5(content):
md = hashlib.md5()
md.update(content)
return bytes(md.hexdigest(), "utf-8")
def detailedReport():
global text_box
global md5hash
arg = parse_options()
arg.sfile=md5hash
vt = vtapi(arg.verbose)
scan_result1=vt.print_scan_results(vt.results("file", md5hash))
text_box.insert(END, scan_result1)
text_box.see(END)
text_box.update()
def update():
global text_box
def closing():
main.destroy()
sys.exit()
def button_action_handler(s):
global text_box
global b_delete
global b_delete_all
global b_restore
global b_restore_all
global b_add_file
global li
if s == "delete":
tb = Thread(target=delete, args=(li.get(ACTIVE),0))
tb.start()
if s == "delete_all":
tb = Thread(target=delete, args=(0,1))
tb.start()
if s == "restore":
tb = Thread(target=restore, args=(li.get(ACTIVE),0))
tb.start()
if s == "restore_all":
tb = Thread(target=restore, args=(0,1))
tb.start()
if s == "add_file":
tb = Thread(target=add_file_to_quarantine)
tb.start()
if s == "details_button":
tb = Thread(target=detailedReport)
tb.start()
if s == "scan_button":
tb = Thread(target=scan)
tb.start()
if s == "update_button":
tb = Thread(target=update)
tb.start()
if s == "details_button":
tb = Thread(target=detailedReport)
tb.start()
if s == "quarantine_button":
if li.winfo_viewable() == 0:
b_delete.place(x = 605, y = 109)
b_delete_all.place(x = 605, y = 134)
b_restore.place(x = 605, y = 159)
b_restore_all.place(x = 605, y = 184)
b_add_file.place(x = 605, y = 209)
li.place(x = 605, y = 0)
tb = Thread(target=quarantine)
tb.start()
if li.winfo_viewable() == 1:
b_delete.place_forget()
b_delete_all.place_forget()
b_restore.place_forget()
b_restore_all.place_forget()
b_add_file.place_forget()
li.place_forget()
if s == "quit_button":
tb = Thread(target=closing)
tb.start()
def gui_thread():
global main
global update_button
global details_button
global scan_button
global url_scan_button
global url_scan_button
global quit_button
global text_box
global li
global b_delete
global b_delete_all
global b_restore
global b_restore_all
global b_add_file
main = tkinter.Tk()
main.title("MultiAV")
main.wm_iconbitmap("")
main.geometry("800x240")
main.resizable(False, False)
hoehe = 2
breite = 16
scan_button = tkinter.Button(main,text = "Scan", command=lambda:button_action_handler("scan_button"), height = hoehe, width = breite)
scan_button.grid(row = 0, column = 0)
details_button = tkinter.Button(main,text = "Detailed Result", command=lambda:button_action_handler("details_button"), height = hoehe, width = breite)
details_button.grid(row = 1, column = 0)
update_button = tkinter.Button(main,text = "Update", command=lambda:button_action_handler("update_button"), height = hoehe, width = breite)
update_button.grid(row = 2, column = 0)
quarantine_button = tkinter.Button(main,text = "Quarantine", command=lambda:button_action_handler("quarantine_button"), height = hoehe, width = breite)
quarantine_button.grid(row = 3, column = 0)
quit_button = tkinter.Button(main,text = "Close", command=lambda:button_action_handler("quit_button"), height = hoehe, width = breite)
quit_button.grid(row = 4, column = 0, sticky="w")
b_delete = tkinter.Button(main,text = "Remove current", height=0, width = 21, justify=CENTER)
b_delete_all = tkinter.Button(main,text = "Remove all", height = 0, width = 21, justify=CENTER)
b_restore = tkinter.Button(main,text = "Restore current", height=0, width = 21, justify=CENTER)
b_restore_all = tkinter.Button(main,text = "Restore all", height = 0, width = 21, justify=CENTER)
b_add_file = tkinter.Button(main,text = "Add file", height = 0, width = 21, justify=CENTER)
b_delete.place(x = 605, y = 109)
b_delete_all.place(x = 605, y = 134)
b_restore.place(x = 605, y = 159)
b_restore_all.place(x = 605, y = 184)
b_add_file.place(x = 605, y = 209)
b_delete.place_forget()
b_delete_all.place_forget()
b_restore.place_forget()
b_restore_all.place_forget()
b_add_file.place_forget()
text_box = tkinter.scrolledtext.ScrolledText(main)
text_box.place(height = 240, width = 454,x = 153, y = 0)
li = tkinter.Listbox(main, height=6, width = 24)
li.place(x = 605, y = 0)
li.place_forget()
text_box.insert(END, "Your System is Protected\n", "VIP")
text_box.tag_config("VIP", background='yellow')
text_box.insert(END, "[ + ] Preparing the program\n", 'positive')
text_box.tag_config('positive', foreground='green')
text_box.see(END)
text_box.update()
list_of_files = glob.glob(transfer)
if(len(list_of_files)>0):
latest_file = max(list_of_files, key=os.path.getctime)
automatic_scan(latest_file)
main.mainloop()
t_main = Thread(target=gui_thread)
t_main.start()
| 34.008602 | 155 | 0.608511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,533 | 0.160175 |
35de2fad41fb3b2060ce525003204693b12af0ed | 8,421 | py | Python | model.py | ishine/Speaker_Verification | 69935cf422c2fbfe1258c5b4e682c46cf07ec4f4 | [
"MIT"
] | 337 | 2018-11-12T14:52:43.000Z | 2022-03-09T09:23:33.000Z | model.py | ishine/Speaker_Verification | 69935cf422c2fbfe1258c5b4e682c46cf07ec4f4 | [
"MIT"
] | 27 | 2018-12-17T09:53:18.000Z | 2022-02-17T01:43:51.000Z | model.py | ishine/Speaker_Verification | 69935cf422c2fbfe1258c5b4e682c46cf07ec4f4 | [
"MIT"
] | 112 | 2018-11-29T05:29:31.000Z | 2022-03-23T09:23:19.000Z | import tensorflow as tf
import numpy as np
import os
import time
from utils import random_batch, normalize, similarity, loss_cal, optim
from configuration import get_config
from tensorflow.contrib import rnn
config = get_config()
def train(path):
tf.reset_default_graph() # reset graph
# draw graph
batch = tf.placeholder(shape= [None, config.N*config.M, 40], dtype=tf.float32) # input batch (time x batch x n_mel)
lr = tf.placeholder(dtype= tf.float32) # learning rate
global_step = tf.Variable(0, name='global_step', trainable=False)
w = tf.get_variable("w", initializer= np.array([10], dtype=np.float32))
b = tf.get_variable("b", initializer= np.array([-5], dtype=np.float32))
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # define lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# loss
sim_matrix = similarity(embedded, w, b)
print("similarity matrix size: ", sim_matrix.shape)
loss = loss_cal(sim_matrix, type=config.loss)
# optimizer operation
trainable_vars= tf.trainable_variables() # get variable list
optimizer= optim(lr) # get optimizer (type is determined by configuration)
grads, vars= zip(*optimizer.compute_gradients(loss)) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, 3.0) # l2 norm clipping by 3
grads_rescale= [0.01*grad for grad in grads_clip[:2]] + grads_clip[2:] # smaller gradient scale for w, b
train_op= optimizer.apply_gradients(zip(grads_rescale, vars), global_step= global_step) # gradient update operation
# check variables memory
variable_count = np.sum(np.array([np.prod(np.array(v.get_shape().as_list())) for v in trainable_vars]))
print("total variables :", variable_count)
# record loss
loss_summary = tf.summary.scalar("loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
# training session
with tf.Session() as sess:
tf.global_variables_initializer().run()
os.makedirs(os.path.join(path, "Check_Point"), exist_ok=True) # make folder to save model
os.makedirs(os.path.join(path, "logs"), exist_ok=True) # make folder to save log
writer = tf.summary.FileWriter(os.path.join(path, "logs"), sess.graph)
epoch = 0
lr_factor = 1 # lr decay factor ( 1/2 per 10000 iteration)
loss_acc = 0 # accumulated loss ( for running average of loss)
for iter in range(config.iteration):
# run forward and backward propagation and update parameters
_, loss_cur, summary = sess.run([train_op, loss, merged],
feed_dict={batch: random_batch(), lr: config.lr*lr_factor})
loss_acc += loss_cur # accumulated loss for each 100 iteration
if iter % 10 == 0:
writer.add_summary(summary, iter) # write at tensorboard
if (iter+1) % 100 == 0:
print("(iter : %d) loss: %.4f" % ((iter+1),loss_acc/100))
loss_acc = 0 # reset accumulated loss
if (iter+1) % 10000 == 0:
lr_factor /= 2 # lr decay
print("learning rate is decayed! current lr : ", config.lr*lr_factor)
if (iter+1) % 10000 == 0:
saver.save(sess, os.path.join(path, "./Check_Point/model.ckpt"), global_step=iter//10000)
print("model is saved!")
# Test Session
def test(path):
tf.reset_default_graph()
# draw graph
enroll = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # enrollment batch (time x batch x n_mel)
verif = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # verification batch (time x batch x n_mel)
batch = tf.concat([enroll, verif], axis=1)
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # make lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# enrollment embedded vectors (speaker model)
enroll_embed = normalize(tf.reduce_mean(tf.reshape(embedded[:config.N*config.M, :], shape= [config.N, config.M, -1]), axis=1))
# verification embedded vectors
verif_embed = embedded[config.N*config.M:, :]
similarity_matrix = similarity(embedded=verif_embed, w=1., b=0., center=enroll_embed)
saver = tf.train.Saver(var_list=tf.global_variables())
with tf.Session() as sess:
tf.global_variables_initializer().run()
# load model
print("model path :", path)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=os.path.join(path, "Check_Point"))
ckpt_list = ckpt.all_model_checkpoint_paths
loaded = 0
for model in ckpt_list:
if config.model_num == int(model.split('-')[-1]): # find ckpt file which matches configuration model number
print("ckpt file is loaded !", model)
loaded = 1
saver.restore(sess, model) # restore variables from selected ckpt file
break
if loaded == 0:
raise AssertionError("ckpt file does not exist! Check config.model_num or config.model_path.")
print("test file path : ", config.test_path)
# return similarity matrix after enrollment and verification
time1 = time.time() # for check inference time
if config.tdsv:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False, noise_filenum=1),
verif:random_batch(shuffle=False, noise_filenum=2)})
else:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False),
verif:random_batch(shuffle=False, utter_start=config.M)})
S = S.reshape([config.N, config.M, -1])
time2 = time.time()
np.set_printoptions(precision=2)
print("inference time for %d utterences : %0.2fs"%(2*config.M*config.N, time2-time1))
print(S) # print similarity matrix
# calculating EER
diff = 1; EER=0; EER_thres = 0; EER_FAR=0; EER_FRR=0
# through thresholds calculate false acceptance ratio (FAR) and false reject ratio (FRR)
for thres in [0.01*i+0.5 for i in range(50)]:
S_thres = S>thres
# False acceptance ratio = false acceptance / mismatched population (enroll speaker != verification speaker)
FAR = sum([np.sum(S_thres[i])-np.sum(S_thres[i,:,i]) for i in range(config.N)])/(config.N-1)/config.M/config.N
# False reject ratio = false reject / matched population (enroll speaker = verification speaker)
FRR = sum([config.M-np.sum(S_thres[i][:,i]) for i in range(config.N)])/config.M/config.N
# Save threshold when FAR = FRR (=EER)
if diff> abs(FAR-FRR):
diff = abs(FAR-FRR)
EER = (FAR+FRR)/2
EER_thres = thres
EER_FAR = FAR
EER_FRR = FRR
print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)"%(EER,EER_thres,EER_FAR,EER_FRR))
| 49.828402 | 134 | 0.609785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,256 | 0.267902 |