hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d70a8b78aca6d858249d4a0c40efeae5a1ad298 | 5,333 | py | Python | openfda/deploy/tests/adae/test_endpoint.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 388 | 2015-01-09T18:50:35.000Z | 2022-03-24T10:15:23.000Z | openfda/deploy/tests/adae/test_endpoint.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 150 | 2015-01-21T20:30:54.000Z | 2022-03-28T20:46:29.000Z | openfda/deploy/tests/adae/test_endpoint.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 113 | 2015-01-31T21:24:16.000Z | 2022-01-30T15:17:28.000Z | # coding=utf-8
import inspect
import sys
from openfda.tests.api_test_helpers import *
def test_nullified_records():
NULLIFIED = ['USA-FDACVM-2018-US-045311', 'USA-FDACVM-2018-US-048571', 'USA-FDACVM-2018-US-046672',
'USA-FDACVM-2017-US-070108', 'USA-FDACVM-2017-US-002864', 'USA-FDACVM-2017-US-002866',
'USA-FDACVM-2017-US-052458', 'USA-FDACVM-2017-US-055193', 'USA-FDACVM-2017-US-043931',
'USA-FDACVM-2018-US-002321', 'USA-FDACVM-2017-US-042492', 'USA-FDACVM-2018-US-044065'
]
for case_num in NULLIFIED:
meta, results = fetch(
'/animalandveterinary/event.json?search=unique_aer_id_number:' + case_num)
eq_(results, None)
def test_single_ae_record():
meta, results = fetch(
'/animalandveterinary/event.json?search=unique_aer_id_number:USA-USFDACVM-2015-US-094810')
eq_(len(results), 1)
ae = results[0]
eq_("USA-USFDACVM-2015-US-094810", ae["unique_aer_id_number"])
eq_(None, ae.get("@id"))
eq_("N141251", ae["report_id"])
eq_("20150126", ae["original_receive_date"])
eq_("Food and Drug Administration Center for Veterinary Medicine", ae["receiver"]["organization"])
eq_("7500 Standish Place (HFV-210) Room N403", ae["receiver"]["street_address"])
eq_("Rockville", ae["receiver"]["city"])
eq_("MD", ae["receiver"]["state"])
eq_("20855", ae["receiver"]["postal_code"])
eq_("USA", ae["receiver"]["country"])
eq_("Other", ae["primary_reporter"])
eq_("Safety Issue", ae["type_of_information"])
eq_("true", ae["serious_ae"])
eq_("1", ae["number_of_animals_treated"])
eq_("1", ae["number_of_animals_affected"])
eq_("Dog", ae["animal"]["species"])
eq_("Male", ae["animal"]["gender"])
eq_("Neutered", ae["animal"]["reproductive_status"])
eq_("NOT APPLICABLE", ae["animal"]["female_animal_physiological_status"])
eq_("1.00", ae["animal"]["age"]["min"])
eq_(None, ae["animal"]["age"].get("max"))
eq_("Year", ae["animal"]["age"]["unit"])
eq_("Measured", ae["animal"]["age"]["qualifier"])
eq_("38.419", ae["animal"]["weight"]["min"])
eq_(None, ae["animal"]["weight"].get("max"))
eq_("Kilogram", ae["animal"]["weight"]["unit"])
eq_("Measured", ae["animal"]["weight"]["qualifier"])
eq_("false", ae["animal"]["breed"]["is_crossbred"])
eq_("Retriever - Labrador", ae["animal"]["breed"]["breed_component"])
eq_("Recovered/Normal", ae["outcome"][0]["medical_status"])
eq_("1", ae["outcome"][0]["number_of_animals_affected"])
eq_("Good", ae["health_assessment_prior_to_exposure"]["condition"])
eq_("Veterinarian", ae["health_assessment_prior_to_exposure"]["assessed_by"])
eq_("20141222", ae["onset_date"])
eq_({'value': '4', 'unit': 'Week'}, ae.get("duration"))
eq_("11", ae["reaction"][0]["veddra_version"])
eq_("129", ae["reaction"][0]["veddra_term_code"])
eq_("Vocalisation", ae["reaction"][0]["veddra_term_name"])
eq_("1", ae["reaction"][0]["number_of_animals_affected"])
eq_("Actual", ae["reaction"][0]["accuracy"])
eq_("11", ae["reaction"][1]["veddra_version"])
eq_("960", ae["reaction"][1]["veddra_term_code"])
eq_("Pruritus", ae["reaction"][1]["veddra_term_name"])
eq_("1", ae["reaction"][1]["number_of_animals_affected"])
eq_("Actual", ae["reaction"][1]["accuracy"])
eq_(None, ae.get("time_between_exposure_and_onset"))
eq_("false", ae["treated_for_ae"])
eq_(1, len(ae["drug"]))
eq_("20141222", ae["drug"][0]["first_exposure_date"])
eq_("20141222", ae["drug"][0]["last_exposure_date"])
eq_("Animal Owner", ae["drug"][0]["administered_by"])
eq_("Topical", ae["drug"][0]["route"])
eq_("1", ae["drug"][0]["dose"]["numerator"])
eq_("tube", ae["drug"][0]["dose"]["numerator_unit"])
eq_("1", ae["drug"][0]["dose"]["denominator"])
eq_("dose", ae["drug"][0]["dose"]["denominator_unit"])
eq_('false', ae["drug"][0].get("used_according_to_label"))
eq_('Overdosed', ae["drug"][0].get("off_label_use"))
eq_("false", ae["drug"][0]["previous_exposure_to_drug"])
eq_(None, ae["drug"][0].get("previous_ae_to_drug"))
eq_(None, ae["drug"][0].get("ae_abated_after_stopping_drug"))
eq_(None, ae["drug"][0].get("ae_reappeared_after_resuming_drug"))
eq_(None, ae["drug"][0].get("manufacturing_date"))
eq_('KP09ECX KP09C4D', ae["drug"][0].get("lot_number"))
eq_('2017-01', ae["drug"][0].get("lot_expiration"))
eq_('000859-2339', ae["drug"][0].get("product_ndc"))
eq_("MSK", ae["drug"][0]["brand_name"])
eq_('Solution', ae["drug"][0]["dosage_form"])
eq_("MSK", ae["drug"][0]["manufacturer"]["name"])
eq_("USA-USFDACVM-N141251", ae["drug"][0]["manufacturer"]["registration_number"])
eq_(None, ae["drug"][0].get("number_of_defective_items"))
eq_(None, ae["drug"][0].get("number_of_items_returned"))
eq_("QP54AB52", ae["drug"][0]["atc_vet_code"])
eq_("Imidacloprid", ae["drug"][0]["active_ingredients"][0]["name"])
eq_("500", ae["drug"][0]["active_ingredients"][0]["dose"]["numerator"])
eq_("Milligram", ae["drug"][0]["active_ingredients"][0]["dose"]["numerator_unit"])
eq_("5", ae["drug"][0]["active_ingredients"][0]["dose"]["denominator"])
eq_("mL", ae["drug"][0]["active_ingredients"][0]["dose"]["denominator_unit"])
if __name__ == '__main__':
all_functions = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for key, func in all_functions:
if key.find("test_") > -1:
func()
| 42.664 | 101 | 0.649916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,213 | 0.602475 |
2d72528a1f010274ea124fa51f712d501f63eda5 | 6,972 | py | Python | dutil.py | Zverik/podcast_duration | dad0719c3876c21c174d7a34678aa3abdefe39a4 | [
"WTFPL"
] | 1 | 2019-02-26T17:26:24.000Z | 2019-02-26T17:26:24.000Z | dutil.py | Zverik/podcast_duration | dad0719c3876c21c174d7a34678aa3abdefe39a4 | [
"WTFPL"
] | null | null | null | dutil.py | Zverik/podcast_duration | dad0719c3876c21c174d7a34678aa3abdefe39a4 | [
"WTFPL"
] | null | null | null | import requests
import re
import json
import datetime
import os
DATE_FORMAT = '%Y-%m-%d'
DATA_PATH = 'rupodcast_lengths.json'
def extract_hms(g):
return float(g[0] or 0) * 60 + float(g[1]) + float(g[2]) / 60
def extract_rss(dur):
g = dur.split(':')
while len(g) < 3:
g = [0] + g
return float(g[0] or 0) * 60 + float(g[1]) + float(g[2]) / 60
def parse_soundcloud_date(m):
return None
MONTHS = {m: i+1 for i, m in enumerate(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])}
def parse_overcast_date(m):
month = MONTHS.get(m[0])
if not month:
return None
year = int(m[2] or datetime.date.today().year)
return year, month, int(m[1])
def parse_rss_date(m):
month = MONTHS.get(m[1])
if not month:
return None
return int(m[2]), month, int(m[0])
class ByHandDurationParser:
@staticmethod
def findall(s):
return [p for p in s.split(',') if 1 <= len(p) < 6]
EXTRACTORS = {
'byhand': {
'duration': (ByHandDurationParser, lambda x: float(x)),
'date': (re.compile(r'(\d\d)(\d\d)(\d\d)'),
lambda m: (2000 + int(m[0]), int(m[1]), int(m[2]))),
},
'overcast': {
'duration': (re.compile(r'•\s+(\d+) min'), lambda x: float(x)),
'date': (re.compile(r'<div class="caption2[^"]+">\s+([A-Z][a-z]{2})\s+'
'(\d+)(?:, (\d{4}))?\s+(?:•[^<]+)?</div>', re.S),
parse_overcast_date),
},
'soundcloud': {
'duration': (re.compile(r'meta itemprop="duration" content="PT(\d\d)H(\d\d)M(\d\d)S"'),
extract_hms),
'date': (re.compile(r'<time pubdate>(\d{4})/(\d\d)/(\d\d) '),
lambda m: (int(m[0]), int(m[1]), int(m[2]))),
},
'vk': {
'duration': (
re.compile(r'<div class="[^"]*audio_row__duration[^"]*">'
'(?:(\d):)?(\d+):(\d+)</div>'.encode()),
extract_hms),
},
'rss': {
'duration': (re.compile(r'<itunes:duration>\s*([\d:]+)\s*</itunes:duration>', re.M | re.S),
extract_rss),
'date': (re.compile(r'<pubDate>[^<\d]*(\d+) ([A-Z][a-z]{2}) (\d{4}) '), parse_rss_date),
},
'spotify': {
'duration': (re.compile(r'<span class="total-duration">(?:(\d+):)?(\d+):(\d+)</span>'),
extract_hms),
'date': (re.compile(r'<span class="artists-albums">(\d\d)/(\d\d)/(\d{4})</span>'),
lambda m: (int(m[2]), int(m[0]), int(m[1]))),
},
}
def download_data():
resp = requests.get('https://russiancast.club/data.json')
return resp.json()
def read_lengths():
if not os.path.exists(DATA_PATH):
return {}
with open(DATA_PATH, 'r') as f:
return json.load(f)
def write_lengths(lengths):
with open(DATA_PATH, 'w') as f:
json.dump(lengths, f, ensure_ascii=False)
def parse_text(ex_name, text):
result = {}
for k, parser in EXTRACTORS[ex_name].items():
m = parser[0].findall(text)
if m and parser:
result[k] = [parser[1](g) for g in m]
return result
def get_durations(podcast):
mins = {}
for name in EXTRACTORS:
if name in ('vk', 'byhand'):
continue
if podcast.get(name):
resp = requests.get(podcast[name])
res = parse_text(name, resp.text)
if res:
mins[name] = res
return mins
def find_longest_mins(lengths):
durs = [v.get('duration', []) for v in lengths.values()]
ll = [[x for x in p if x >= 1] for p in durs]
return sorted(ll, key=lambda d: len(d))[-1]
def find_longest_dates(lengths):
dates = [v.get('date', []) for v in lengths.values()]
return sorted(dates, key=lambda d: len(d))[-1]
def find_medians(mins):
mins.sort()
if len(mins) == 1:
median = round(mins[0])
elif len(mins) % 2 == 0:
median = round((float(mins[len(mins)//2-1]) + mins[len(mins)//2]) / 2.0)
else:
median = round(mins[len(mins)//2])
if len(mins) <= 2:
dmed = 0
elif len(mins) <= 5:
dmed = 1
elif len(mins) <= 10:
dmed = 2
else:
dmed = len(mins) // 5
med_low = round(mins[dmed])
med_high = round(mins[-1-dmed])
return median, med_low, med_high
def format_medians(median, med_low, med_high):
def r5(n):
if n < 14:
return n
return round(n/5.0)*5
def minut(n):
if n % 10 == 1 and n % 100 != 11:
return 'минута'
if n % 10 in (2, 3, 4) and n % 100 not in (12, 13, 14):
return 'минуты'
return 'минут'
need_two = med_high-med_low > 10 and med_high * 1.0 / med_low > 1.5
if need_two:
res = '{}–{} {}'.format(r5(med_low), r5(med_high), minut(r5(med_high)))
else:
res = '{} {}'.format(r5(median), minut(r5(median)))
return res
def get_latest_date(dates):
if not dates:
return None
return datetime.date(*max(dates)).strftime(DATE_FORMAT)
def get_median_interval(dates):
if len(dates) < 2:
return None
today = datetime.date.today()
days = sorted((today - datetime.date(*d)).days for d in dates)
daydiffs = [days[i+1] - days[i] for i in range(len(days)-1)]
daydiffs = [d for d in daydiffs if d > 0]
# print(daydiffs)
if not daydiffs:
return None
# Take last 20, so that format changes do not affect the result
if len(daydiffs) > 20:
daydiffs = daydiffs[:20]
return find_medians(daydiffs)
def format_interval(median, med_low, med_high):
if not median:
return ''
if med_high > 40:
return 'нерегулярно'
if med_high > med_low * 3 and median > 14:
return 'нерегулярно'
if median == 1:
return 'ежедневно'
if median == 2:
return 'через день'
if 3 <= median <= 5:
return 'дважды в неделю'
if 6 <= median <= 9:
return 'еженедельно'
if 10 <= median <= 17:
return 'раз в две недели'
if 18 <= median <= 25:
return 'раз в три недели'
if 26 <= median <= 40:
return 'ежемесячно'
else:
return 'реже раза в месяц'
return 'нерегулярно'
def gen_additional_fields(lengths):
result = {}
if not lengths:
return result
mins = find_longest_mins(lengths)
if mins:
meds = find_medians(mins)
result['duration'] = format_medians(*meds)
dates = find_longest_dates(lengths)
if dates:
meds = get_median_interval(dates)
latest = get_latest_date(dates)
age = (datetime.datetime.now() - datetime.datetime.strptime(
latest, DATE_FORMAT)).days
result['active'] = age <= (32 if not meds else max(32, meds[1] + meds[2]))
if meds:
result['frequency'] = format_interval(*meds)
return result
| 28 | 99 | 0.536862 | 125 | 0.017571 | 0 | 0 | 93 | 0.013073 | 0 | 0 | 1,317 | 0.185128 |
2d74ac43e77b3bc75d905f63bb070721d4e2b893 | 557 | py | Python | apps/rates/urls.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/rates/urls.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/rates/urls.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from . import views
urlpatterns = [
url(r'^$', views.rate_list, name='list'),
url(r'^(?P<pk>\d+)/$', views.rate_as_field, name='as_field'),
url(r'^suggestions/$', views.suggestions, name='suggestions'),
url(r'^create/$', views.create_rate, name='create'),
#url(r'^user_rate/(?P<username>[\w\s@+.-]+)/$', views.edit_user_rate, name='edit_user_rate'),
url(r'^(?P<pk>\d+)/edit/$', views.edit_rate, name='edit'),
url(r'^(?P<pk>\d+)/delete/$', views.delete_rate, name='delete'),
]
| 42.846154 | 97 | 0.626571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.432675 |
2d7725aa76b9cb8657abbe0c436fc2ad37634dab | 1,100 | py | Python | icasf/utils.py | human-ai2025/Intent_detection_slot_filling | a1d590b85ceea15ac0c528a9092cd79786f82c7e | [
"MIT"
] | null | null | null | icasf/utils.py | human-ai2025/Intent_detection_slot_filling | a1d590b85ceea15ac0c528a9092cd79786f82c7e | [
"MIT"
] | null | null | null | icasf/utils.py | human-ai2025/Intent_detection_slot_filling | a1d590b85ceea15ac0c528a9092cd79786f82c7e | [
"MIT"
] | null | null | null | # importing libraries
import numpy as np
import pandas as pd
import random
import torch
def set_seeds(seed=1234):
"""[Set seeds for reproducibility.]
Keyword Arguments:
seed {int} -- [The seed value] (default: {1234})
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("[INFO] THE SEED IS ", seed)
def set_device(cuda=True):
"""[To set the type of machine CPU or GPU]
Keyword Arguments:
cuda {bool} -- [To use GPU or not] (default: {True})
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("[INFO] THE DEVICE IS ", device)
return device
paths = {
"train_path1": "data/atis/train/seq.in",
"train_path2": "data/atis/train/seq.out",
"train_path3": "data/atis/train/label",
"valid_path1": "data/atis/dev/seq.in",
"valid_path2": "data/atis/dev/seq.out",
"valid_path3": "data/atis/dev/label",
"test_path1": "data/atis/test/seq.in",
"test_path2": "data/atis/test/seq.out",
"test_path3":"data/atis/test/label"
}
| 26.190476 | 73 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.594545 |
2d7a3e516a3dc6332a0d0d4fc1a519e619d09c80 | 95 | py | Python | back/treatments/apps.py | EDario333/idia | 21cab7057f924c58ec098c27effcee1a8f0dc94e | [
"BSD-3-Clause"
] | null | null | null | back/treatments/apps.py | EDario333/idia | 21cab7057f924c58ec098c27effcee1a8f0dc94e | [
"BSD-3-Clause"
] | 5 | 2021-03-11T05:33:41.000Z | 2022-02-27T10:21:50.000Z | back/treatments/apps.py | EDario333/idia | 21cab7057f924c58ec098c27effcee1a8f0dc94e | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class TreatmentsConfig(AppConfig):
name = 'treatments'
| 15.833333 | 34 | 0.768421 | 58 | 0.610526 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.126316 |
2d7be3678b02b1a94e7d1293d4c7eaa5d0726a38 | 1,602 | py | Python | Numbers/alarm.py | arindampradhan/Projects | 5677e05ea56ffea2334d65b275e8920b14980ac2 | [
"MIT"
] | 10 | 2016-07-11T22:09:46.000Z | 2021-12-22T18:59:07.000Z | Numbers/alarm.py | arindampradhan/Projects | 5677e05ea56ffea2334d65b275e8920b14980ac2 | [
"MIT"
] | 1 | 2015-02-26T17:00:13.000Z | 2015-02-26T17:00:13.000Z | Numbers/alarm.py | arindampradhan/Projects | 5677e05ea56ffea2334d65b275e8920b14980ac2 | [
"MIT"
] | 41 | 2015-03-04T00:14:53.000Z | 2022-01-19T14:16:24.000Z | """
Alarm Clock - A simple clock where it plays a sound after
X number of minutes/seconds or at a particular time.
Dependencies:
pyglet
pip install pyglet
"""
import time
import winsound
import pyglet
def play(hh, mm):
not_alarmed = 1
while(not_alarmed):
cur_time = list(time.localtime()) # get the time right now
hour = cur_time[3] # find the hour
minute = cur_time[4] # and the minute
if hour == hh and minute == mm:
song = pyglet.media.load('bin/sound.wav')
song.play() # play the sound
pyglet.app.run()
not_alarmed = 0 # stop the loop
if __name__ == '__main__':
print """
1. Play sound after X minutes
2. Play sound at an exact time
"""
choice = input('What do you want to do? ')
if choice == 1:
mins = input('How many minutes from now? ')
hh_from_now = mins / 60 # if minutes > 60, this will adjust the hours
mm_from_now = mins % 60 # and then the minutes
cur_time = list(time.localtime()) # get the time right now
hour = cur_time[3] # find the current hour
minute = cur_time[4] # and the current minute
hh = (hour + hh_from_now+(minute+mm_from_now)/60) % 24 # cycle through the clock if hh > 24
mm = (minute + mm_from_now) % 60 # cycle through the clock if mm > 60
play(hh, mm)
elif choice == 2:
hh = input('What hour do you want to wake up (0-23)? ')
mm = input('What minute do you want to wake up (0-59)? ')
play(hh, mm)
| 32.04 | 100 | 0.581773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.455056 |
2d7dcc785b405d3f6e78cbe6c3e04130c286b168 | 3,107 | py | Python | tests/test_in_serializers.py | expobrain/drf-compound-fields | c1ea78705fb460fabde48984a7d195d71adee68a | [
"BSD-3-Clause"
] | null | null | null | tests/test_in_serializers.py | expobrain/drf-compound-fields | c1ea78705fb460fabde48984a7d195d71adee68a | [
"BSD-3-Clause"
] | null | null | null | tests/test_in_serializers.py | expobrain/drf-compound-fields | c1ea78705fb460fabde48984a7d195d71adee68a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES # NOQA
SECRET_KEY = 's3cr3t'
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class ListSerializer(serializers.Serializer):
emails = ListField(serializers.EmailField(), required=False)
def test_non_list():
serializer = ListSerializer(data={'emails': 'notAList'})
assert not serializer.is_valid(), 'Non-list value should not be allowed'
assert 'emails' in serializer.errors, 'Non-list value should produce a field error'
assert serializer.errors['emails'], 'Non-list value error should be non-empty'
def test_invalid_list_item():
serializer = ListSerializer(data={'emails': ['some.where@out.there', 'notAnEmail']})
assert not serializer.is_valid(), 'Invalid list-item should not be allowed'
assert 'emails' in serializer.errors, 'Invalid list-item should produce a field error'
assert serializer.errors['emails'], 'Invalid list-item errors should be non-empty {0}'.format(
serializer.errors['emails'])
assert [1] == list(six.iterkeys(serializer.errors['emails'][0]))
def test_empty_list():
serializer = ListSerializer(data={'emails': []})
assert serializer.is_valid(), 'Empty list should be allowed'
def test_valid_list():
serializer = ListSerializer(data={'emails': ['some.where@out.there']})
assert serializer.is_valid(), 'Valid list should be allowed'
class DictSerializer(serializers.Serializer):
emails = DictField(serializers.EmailField(), required=False)
def test_non_dict():
serializer = DictSerializer(data={'emails': 'notADict'})
assert not serializer.is_valid(), 'Non-dict value should not be allowed'
assert 'emails' in serializer.errors, 'Non-dict value should produce a field error'
assert serializer.errors['emails'], 'Non-dict value error should be non-empty'
def test_invalid_dict_value():
serializer = DictSerializer(data={'emails': {'a': 'some.where@out.there',
'b': 'notAnEmail'}})
assert not serializer.is_valid(), 'Invalid dict-value should not be allowed'
assert 'emails' in serializer.errors, 'Invalid dict-value should produce a field error'
assert serializer.errors['emails'], 'Invalid dict-value errors should be non-empty {0}'.format(
serializer.errors['emails'])
assert ['b'] == list(six.iterkeys(serializer.errors['emails'][0]))
def test_empty_dict():
serializer = DictSerializer(data={'emails': {}})
assert serializer.is_valid(), 'Empty dict should be allowed'
def test_valid_dict():
serializer = DictSerializer(data={'emails': {'a': 'some.where@out.there'}})
assert serializer.is_valid(), 'Valid dict shouild be allowed'
| 34.910112 | 99 | 0.717734 | 220 | 0.070808 | 0 | 0 | 0 | 0 | 0 | 0 | 1,219 | 0.39234 |
2d7e4675f7508f5659dc4db59ff7f4d23a5f9db7 | 642 | py | Python | reframe/utility/json.py | stevenvdb/reframe | be9de13dc16d2c9fb4b760e07ac986b04f8ed880 | [
"BSD-3-Clause"
] | null | null | null | reframe/utility/json.py | stevenvdb/reframe | be9de13dc16d2c9fb4b760e07ac986b04f8ed880 | [
"BSD-3-Clause"
] | null | null | null | reframe/utility/json.py | stevenvdb/reframe | be9de13dc16d2c9fb4b760e07ac986b04f8ed880 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import json
class _ReframeJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__rfm_json_encode__'):
return obj.__rfm_json_encode__()
return json.JSONEncoder.default(self, obj)
def dump(obj, fp, **kwargs):
kwargs['cls'] = _ReframeJsonEncoder
return json.dump(obj, fp, **kwargs)
def dumps(obj, **kwargs):
kwargs['cls'] = _ReframeJsonEncoder
return json.dumps(obj, **kwargs)
| 25.68 | 76 | 0.705607 | 217 | 0.338006 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.342679 |
2d7f27789173a96e9e9a14e59877eda7d7c585f8 | 3,072 | py | Python | occo/enactor/upkeep.py | occopus/enactor | 01d5417b2239c3f6693d767224c3495089b8d9d3 | [
"Apache-2.0"
] | null | null | null | occo/enactor/upkeep.py | occopus/enactor | 01d5417b2239c3f6693d767224c3495089b8d9d3 | [
"Apache-2.0"
] | null | null | null | occo/enactor/upkeep.py | occopus/enactor | 01d5417b2239c3f6693d767224c3495089b8d9d3 | [
"Apache-2.0"
] | 3 | 2015-11-12T09:36:13.000Z | 2017-11-30T10:04:38.000Z | ### Copyright 2014, MTA SZTAKI, www.sztaki.hu
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
"""
Upkeep algorithms to be used before making an Enactor pass.
.. moduleauthor:: Adam Visegradi <adam.visegradi@sztaki.mta.hu>
"""
import occo.infobroker as ib
import occo.util.factory as factory
import occo.constants.status as nodestate
import logging
log = logging.getLogger('occo.upkeep')
datalog = logging.getLogger('occo.data.upkeep')
class Upkeep(factory.MultiBackend):
def __init__(self):
self.infobroker = ib.main_info_broker
def acquire_dynamic_state(self, infra_id):
raise NotImplementedError()
@factory.register(Upkeep, 'noop')
class DefaultUpkeep(Upkeep):
def acquire_dynamic_state(self, infra_id):
return self.infobroker.get('infrastructure.state', infra_id, True)
@factory.register(Upkeep, 'basic')
class BasicUpkeep(Upkeep):
def __init__(self):
super(BasicUpkeep, self).__init__()
import occo.infobroker
self.uds = occo.infobroker.main_uds
def is_failed(self, node):
return node['state'] == nodestate.FAIL
def is_shutdown(self, node):
return node['state'] == nodestate.SHUTDOWN
def acquire_dynamic_state(self, infra_id):
log.debug('Acquiring state of %r', infra_id)
dynamic_state = self.infobroker.get(
'infrastructure.state', infra_id, True)
datalog.debug('%r', dynamic_state)
log.debug('Processing failed nodes in %r', infra_id)
nodes = [node
for instances in list(dynamic_state.values())
for node in list(instances.values())]
failed_nodes, remove_nodes = [], []
for node in nodes:
failed = self.is_failed(node)
shutdown = self.is_shutdown(node)
if failed or shutdown:
if failed:
failed_nodes.append(node)
remove_nodes.append(node)
del dynamic_state[node['resolved_node_definition']['name']][node['node_id']]
if len(failed_nodes)>0:
log.info('Archiving failed instances of %r: %r',
infra_id, [i['node_id'] for i in failed_nodes])
self.uds.store_failed_nodes(infra_id, *failed_nodes)
if len(remove_nodes)>0:
remove_ids = [i['node_id'] for i in remove_nodes]
log.info('Removing lost instances from %r: %r',
infra_id, remove_ids)
self.uds.remove_nodes(infra_id, *remove_ids)
return dynamic_state, failed_nodes
| 35.310345 | 92 | 0.659831 | 2,037 | 0.663086 | 0 | 0 | 1,917 | 0.624023 | 0 | 0 | 1,028 | 0.334635 |
2d7ff8efa71157528cf59e7762c185eb4caf5b50 | 147 | py | Python | Chapter08/chapter8_sflowtool_1.py | stavsta/Mastering-Python-Networking-Second-Edition | 9999d2e415a1eb9c653ac3507500da7ddac2b556 | [
"MIT"
] | 107 | 2017-03-31T09:39:47.000Z | 2022-01-10T17:43:12.000Z | Chapter08/chapter8_sflowtool_1.py | muzhang90/Mastering-Python-Networking-Third-Edition | f8086fc9a28e441cf8c31099d16839c2e868c7fc | [
"MIT"
] | 3 | 2020-03-29T14:14:43.000Z | 2020-10-29T18:21:09.000Z | Chapter08/chapter8_sflowtool_1.py | muzhang90/Mastering-Python-Networking-Third-Edition | f8086fc9a28e441cf8c31099d16839c2e868c7fc | [
"MIT"
] | 98 | 2017-02-25T17:55:43.000Z | 2022-02-20T19:06:06.000Z | #!/usr/bin/env python3
import sys, re
for line in iter(sys.stdin.readline, ''):
if re.search('agent ', line):
print(line.strip())
| 14.7 | 41 | 0.605442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.217687 |
2d80556385c4d4bedc014437f9eac4454a0a8d16 | 1,277 | py | Python | examples/fuzz.py | defparam/haptyc | 9baecd797081bbf6edada2db9aad3b1fd3331190 | [
"Apache-2.0"
] | 74 | 2021-09-09T22:19:22.000Z | 2022-01-12T00:52:08.000Z | examples/fuzz.py | defparam/haptyc | 9baecd797081bbf6edada2db9aad3b1fd3331190 | [
"Apache-2.0"
] | null | null | null | examples/fuzz.py | defparam/haptyc | 9baecd797081bbf6edada2db9aad3b1fd3331190 | [
"Apache-2.0"
] | 15 | 2021-09-18T21:01:50.000Z | 2022-02-01T18:59:59.000Z | from haptyc import *
from base64 import b64encode, b64decode
import json
class TestLogic(Transform):
#
# test_h1: Decodes base64, fuzzes using random_insert, Re-encodes base64
# Number of tests: 50
#
@ApplyIteration(50)
def test_h1(self, data, state):
data = b64decode(data)
data = random_insert(data,list("'"))
data = b64encode(data)
return data
#
# test_jsonfuzz: Deserialize JSON
# Loop through every key
# Decodes base64
# fuzzes using random_insert
# Re-encodes base64
# Serialize JSON
# Number of tests: 50
#
@ApplyIteration(50)
def test_jsonfuzz(self, data, state):
JA = json.loads(data)
for key in JA:
JA[key] = b64encode(random_insert(b64decode(JA[key]), list("!@#$%^&*()")))
return json.dumps(JA)
def queueRequests(target, wordlists):
engine = RequestEngine(endpoint=target.endpoint, concurrentConnections=1, requestsPerConnection=1, pipeline=0)
TestFactory = TestLogic(target.req)
for test in TestFactory:
engine.queue(test)
def handleResponse(req, interesting):
table.add(req)
| 31.146341 | 115 | 0.591229 | 877 | 0.686766 | 0 | 0 | 423 | 0.331245 | 0 | 0 | 365 | 0.285826 |
2d805588965de2f3350c8e40dca405d56b0fa7da | 1,271 | py | Python | nginx-with-mtls-and-appserver/appserver/app.py | fshmcallister/examples | e2052778cbd531bf716131b8311e87c4ee005f07 | [
"MIT"
] | 6 | 2019-10-18T13:32:46.000Z | 2020-06-20T17:45:43.000Z | nginx-with-mtls-and-appserver/appserver/app.py | fshmcallister/examples | e2052778cbd531bf716131b8311e87c4ee005f07 | [
"MIT"
] | 4 | 2019-07-18T16:05:57.000Z | 2021-09-23T23:27:06.000Z | nginx-with-mtls-and-appserver/appserver/app.py | fshmcallister/examples | e2052778cbd531bf716131b8311e87c4ee005f07 | [
"MIT"
] | 3 | 2019-07-15T13:16:49.000Z | 2020-01-09T09:39:33.000Z | import re
from flask import Flask, request
app = Flask(__name__)
def generate_whitelist():
whitelist = []
with open('/whitelist.txt', 'r') as f:
for line in f.readlines():
if line.strip().endswith('d.wott.local'):
whitelist.append(line.strip())
return whitelist
def grant_client_access(headers):
"""
We need to check for:
* 'Ssl-Client-Verify' = 'SUCCESS'
* 'Ssl-Client' = 'CN=x.d.wott.local,O=Web of Trusted Things\\, Ltd,ST=London,C=UK')
"""
if not headers.get('Ssl-Client-Verify') == 'SUCCESS':
return False
whitelist = generate_whitelist()
print('Device whitelist: {}'.format(whitelist))
# Extract the Common Name from the certificate
matchObj = re.match(
r'.*CN=(.*.d.wott.local)',
headers.get('Ssl-Client'),
re.M | re.I
)
print('Got request from {}'.format(matchObj.group(1)))
# Match the device against the whitelist
if matchObj.group(1) in whitelist:
print('{} found in whitelist'.format(matchObj.group(1)))
return True
return False
@app.route('/')
def hello_world():
if grant_client_access(request.headers):
return 'Access granted!\n'
else:
return 'Access denied!\n'
| 24.442308 | 88 | 0.610543 | 0 | 0 | 0 | 0 | 158 | 0.124312 | 0 | 0 | 455 | 0.357986 |
2d81d3087ce7b2f4ba15e8c7dc9496c1d42f9104 | 1,676 | py | Python | Database/flask-sqlalchemy/one_to_many.py | amamov/cs001 | 5753f28e74e2330837d22142cff4713801c77a2d | [
"MIT"
] | 5 | 2021-02-21T17:10:03.000Z | 2022-03-04T21:17:50.000Z | flask-sqlalchemy/one_to_many.py | amamov/pythonic | 95f8f7dca9d01f11ecdf4b26b46afe41dc20b0d0 | [
"MIT"
] | null | null | null | flask-sqlalchemy/one_to_many.py | amamov/pythonic | 95f8f7dca9d01f11ecdf4b26b46afe41dc20b0d0 | [
"MIT"
] | 3 | 2021-02-25T17:53:57.000Z | 2021-06-25T17:25:44.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from pathlib import Path
app = Flask(__name__)
BASE_DIR = Path(__file__).resolve().parent
DB_PATH = str(BASE_DIR / "one_to_many.sqlite")
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + DB_PATH
app.config["SQLALCHEMY_COMMIT_ON_SUBMIT"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
class Board(db.Model):
# Many
__tablename__ = "amamov_board" # tablename을 지정하지 않으면 class 명의 소문자이다.
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128))
writer_id = db.Column(
db.Integer, db.ForeignKey("amamov_user.id", ondelete="CASCADE")
)
def __repr__(self):
return f"<Board {self.title}>"
class User(db.Model):
# One
__tablename__ = "amamov_user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(128))
boards = db.relationship(
"Board", # 어떤 클래스와 관계를 가지는가
backref="writer", # 상대방이 참조하는 이름
lazy="dynamic", # 어떻게 둘을 연결하는가(연결하는 방식:dynamic)
passive_deletes=True,
)
def __repr__(self):
return f"<User {self.username}>"
db.create_all()
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True)
"""
amamov = User(username='amamov')
db.session.add(amamov)
db.session.commit()
board1 = Board(title='hello1', writer_id=amamov.id)
board2 = Board(title='hello2', writer=amamov) # User의 backref="writer"로 접근 가능
db.session.add_all([board1, board2])
db.session.commit()
amamov = User.query.filter(User.username=='amamov').first()
amamov.boards.all()
# [<Board hello1>, <Board hello2>]
"""
| 23.942857 | 77 | 0.679594 | 896 | 0.498886 | 0 | 0 | 0 | 0 | 0 | 0 | 883 | 0.491648 |
2d83d5ac7ec09bb8e96ad46997e6e2b1b848abb9 | 231 | py | Python | hcap_utils/contrib/material/views/__init__.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | null | null | null | hcap_utils/contrib/material/views/__init__.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | 31 | 2020-04-11T13:38:17.000Z | 2021-09-22T18:51:11.000Z | hcap_utils/contrib/material/views/__init__.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | 1 | 2020-04-08T17:04:39.000Z | 2020-04-08T17:04:39.000Z | from .create_model_view import CreateModelView
from .delete_model_view import DeleteModelView
from .detail_model_view import DetailModelView
from .list_model_view import ListModelView
from .update_model_view import UpdateModelView
| 38.5 | 46 | 0.891775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2d849f6ef02f2ba2420e4777e5f788eedd4a8971 | 357 | py | Python | src/emr/scripts/util.py | anorth848/aws-data-analytics | 7721f1214f163a9a76f8e797edd3a1e300a5e2ab | [
"MIT"
] | 1 | 2022-02-18T22:28:36.000Z | 2022-02-18T22:28:36.000Z | src/emr/scripts/util.py | anorth848/aws-data-analytics | 7721f1214f163a9a76f8e797edd3a1e300a5e2ab | [
"MIT"
] | null | null | null | src/emr/scripts/util.py | anorth848/aws-data-analytics | 7721f1214f163a9a76f8e797edd3a1e300a5e2ab | [
"MIT"
] | null | null | null | import boto3
import logging
import json
def get_secret(secret):
client = boto3.client('secretsmanager')
logging.info(f'Retrieving secret {secret}')
response = client.get_secret_value(SecretId=secret)
logging.debug(f'Retrieved Secret ARN {response["ARN"]} VersionId {response["VersionId"]}')
return json.loads(response['SecretString'])
| 29.75 | 94 | 0.742297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.37535 |
2d860cb528497ffbf5b2990f064b4319234cb072 | 4,204 | py | Python | boids/boids.py | PaulAustin/sb7-pgz | fca3e50132b9d1894fb348b2082e83ce7b937b19 | [
"MIT"
] | 1 | 2022-02-21T15:54:01.000Z | 2022-02-21T15:54:01.000Z | boids/boids.py | PaulAustin/sb7-pgz | fca3e50132b9d1894fb348b2082e83ce7b937b19 | [
"MIT"
] | null | null | null | boids/boids.py | PaulAustin/sb7-pgz | fca3e50132b9d1894fb348b2082e83ce7b937b19 | [
"MIT"
] | 2 | 2020-11-21T16:34:22.000Z | 2021-01-27T10:30:34.000Z | # Ported from JavaSript version to Python and Pygame Zero
# Designed to work well with mu-editor environment.
#
# Original by Ben Eater at https://github.com/beneater/boids (MIT License)
# No endorsement implied.
import random
HEIGHT = 600 # window height
WIDTH = 700 # window width
MARGIN = 150 # disstance to start avoid edge
NUM_BOIDS = 100
VISUAL_RANGE = 70 # range of influence for most algoriths
SPEED_LIMIT_UPPER = 15 # boids canonly fly so fast.
SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow
SPEED_INIT = 15 # range for random velocity
MIN_DISTANCE = 10 # the distance to stay away from other boids
AVOID_FACTOR = 0.05 # % location change if too close
CENTERING_FACTOR = 0.005 # % location change to pull to center
MATCHING_FACTOR = 0.015 # % velocity change if close
g_boids = []
class Boid:
pass
def init_boids():
boids = []
for i in range(NUM_BOIDS):
boid = Boid()
boid.loc = complex(
(random.randint(0, WIDTH)),
(random.randint(0, HEIGHT)))
boid.vel = complex(
(random.randint(0, SPEED_INIT)),
(random.randint(0, SPEED_INIT)))
boid.history = []
boids.append(boid)
return boids
def keep_within_bounds(boid) :
# Constrain a boid to within the window. If it gets too close to an edge,
# nudge it back in and reverse its direction.
if (boid.loc.real < MARGIN):
boid.vel += 0.4+0j
if (boid.loc.real > WIDTH - MARGIN) :
boid.vel += -0.4+0j
if (boid.loc.imag < MARGIN) :
boid.vel += 0+0.4j
if (boid.loc.imag > HEIGHT - MARGIN) :
boid.vel += 0-0.4j
return
def fly_towards_center(boid):
# Find the center of mass of the other boids and
# adjust velocity slightly to point towards the
# center of mass.
center = 0+0j
num_neighbors = 0
for other_boid in g_boids :
if abs(boid.loc - other_boid.loc) < VISUAL_RANGE :
center += other_boid.loc
num_neighbors += 1
if num_neighbors > 0 :
center = center / num_neighbors
boid.loc += (center - boid.loc) * CENTERING_FACTOR
def avoid_others(boid):
# Move away from other boids that are too close to avoid colliding
move = 0+0j
for other_boid in g_boids :
if not (other_boid is boid) :
if abs(boid.loc - other_boid.loc) < MIN_DISTANCE :
move += boid.loc - other_boid.loc
boid.vel += move * AVOID_FACTOR
def match_velocity(boid):
# Find the average velocity (speed and direction) of the other boids and
# adjust velocity slightly to match.
avg_vel = 0+0j
num_neighbors = 0
for otherBoid in g_boids:
if abs(boid.loc - otherBoid.loc) < VISUAL_RANGE :
avg_vel += otherBoid.vel
num_neighbors += 1
if num_neighbors > 0:
avg_vel /= num_neighbors
boid.vel += (avg_vel - boid.vel) * MATCHING_FACTOR
def limit_speed(boid):
# Speed will naturally vary in flocking behavior,
# but real animals can't go arbitrarily fast (or slow)
speed = abs(boid.vel)
if (speed > SPEED_LIMIT_UPPER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_UPPER
if (speed < SPEED_LIMIT_LOWER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_LOWER
return
def draw_boid(boid):
screen.draw.filled_circle((boid.loc.real, boid.loc.imag), 5, (0, 255, 0))
tail = boid.loc + boid.vel * 1.8
screen.draw.line(
(boid.loc.real, boid.loc.imag),
(tail.real, tail.imag),
(0, 255, 0))
# angle = math.atan2(boid.vel.real, boid.vel.imag)
return
def draw():
screen.fill((0, 0, 0))
for boid in g_boids:
draw_boid(boid)
def update():
for boid in g_boids:
# Apply rules
fly_towards_center(boid)
avoid_others(boid)
match_velocity(boid)
limit_speed(boid)
keep_within_bounds(boid)
# Update the position based on the current velocity
boid.loc += boid.vel
# boid.history.append(boid.loc)
# boid.history = boid.history.slice(-50);
g_boids = init_boids()
| 29.815603 | 77 | 0.620124 | 20 | 0.004757 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.292816 |
2d8645dc45088fa874c2aeccbf853372bdcf387c | 507 | py | Python | notebook/03-udacityIntroductionToMachineLearning/projects/datasets_questions/utils/read_names.py | EmanuelFontelles/machineLearning | 26e810c2dbc89c2076b312d02a957ab6f8dee7d8 | [
"MIT"
] | 2 | 2018-11-09T03:49:31.000Z | 2019-06-28T17:24:04.000Z | notebook/03-udacityIntroductionToMachineLearning/projects/datasets_questions/utils/read_names.py | lucasvt01/Machine_Learning | 6ee17061b77d23c966ad7700712938bfe00ef9c1 | [
"MIT"
] | null | null | null | notebook/03-udacityIntroductionToMachineLearning/projects/datasets_questions/utils/read_names.py | lucasvt01/Machine_Learning | 6ee17061b77d23c966ad7700712938bfe00ef9c1 | [
"MIT"
] | 2 | 2020-05-09T00:49:56.000Z | 2021-08-28T07:24:46.000Z | import pandas as pd
import sys
from os import system
sys.path.append('../final_project/')
sys.path.append('../')
def readNames(inputFile='new_poi_names.txt'):
'''
A function to read names data from a file create by a data cache
Returns:
Returns a data frame that contains data from 'poi_names.txt'
'''
#bash_command = 'bash script.sh'
#system(bash_command)
data = pd.read_csv(inputFile, skiprows=2, delimiter=';', header=None, names=['Ans', 'Name'])
return(data) | 25.35 | 96 | 0.672584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.538462 |
2d8781eedd833caad160a7f046c6482144d1ef2b | 1,270 | py | Python | launch/test/legacy/launch_counter.py | stonier/launch | e8704247708eb017c388aaf8606e9dbb6971239b | [
"Apache-2.0"
] | null | null | null | launch/test/legacy/launch_counter.py | stonier/launch | e8704247708eb017c388aaf8606e9dbb6971239b | [
"Apache-2.0"
] | null | null | null | launch/test/legacy/launch_counter.py | stonier/launch | e8704247708eb017c388aaf8606e9dbb6971239b | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tempfile import NamedTemporaryFile
from launch.legacy.exit_handler import ignore_exit_handler
from launch.legacy.output_handler import FileOutput
def launch(launch_descriptor, argv):
counter_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'counter.py')
with NamedTemporaryFile(mode='w', prefix='foo_', delete=False) as h:
foo_filename = h.name
ld = launch_descriptor
ld.add_process(
cmd=[sys.executable, '-u', counter_file, '--limit', '15', '--sleep', '0.5'],
name='foo',
output_handlers=[FileOutput(filename=foo_filename)],
exit_handler=ignore_exit_handler,
)
| 35.277778 | 89 | 0.734646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.507874 |
2d888527e706a87e3f98f2c6cc06bcf9c253256b | 4,833 | py | Python | exporter/config.py | TaIos/code_generator | d01eca1cf3506e2b24fdf537de44d38d0f1dab0d | [
"MIT"
] | 2 | 2021-02-12T12:07:26.000Z | 2022-02-10T23:49:43.000Z | exporter/config.py | TaIos/code_generator | d01eca1cf3506e2b24fdf537de44d38d0f1dab0d | [
"MIT"
] | 1 | 2020-12-05T10:38:31.000Z | 2020-12-05T10:38:31.000Z | exporter/config.py | TaIos/exporter | d01eca1cf3506e2b24fdf537de44d38d0f1dab0d | [
"MIT"
] | null | null | null | import pathlib
class ExporterConfig:
def __init__(self, github_token, gitlab_token):
self.github_token = github_token
self.gitlab_token = gitlab_token
class ConfigLoader:
@classmethod
def load(cls, cfg):
"""
Load and validate application configuration for GitHub and GitLab access
:param cfg: :class:`ConfigParser` object containing GitHub and GitLab tokens
:return: :class:`ExporterConfig` containing GitHub and Gitlab tokens
"""
if not cfg.has_section('github') and not cfg.has_section('gitlab'):
raise ValueError("No section: 'github' and 'gitlab'")
if not cfg.has_section('github'):
raise ValueError("No section: 'github'")
if not cfg.has_section('gitlab'):
raise ValueError("No section: 'gitlab'")
if not cfg.get('gitlab', 'token', fallback=None):
raise ValueError("No 'token' in section 'gitlab'")
if not cfg.get('github', 'token', fallback=None):
raise ValueError("No 'token' in section 'github'")
return ExporterConfig(
github_token=cfg.get('github', 'token'),
gitlab_token=cfg.get('gitlab', 'token')
)
class LineParser:
@classmethod
def parse(cls, line):
"""
Parse and validate splitted lines. Each line contains specification for each exported project,
eg ``A -> B private``
:param line: splitted line containing specification for exported project
:return: parsed line
"""
s = line.split(' ')
if len(s) == 1:
return cls._parse_line_with_split_len_1(s)
elif len(s) == 2:
return cls._parse_line_with_split_len_2(s)
elif len(s) == 3:
return cls._parse_line_with_split_len_3(s)
elif len(s) == 4:
return cls._parse_line_with_split_len_4(s)
else:
raise ValueError(f"Invalid number of entries on line '{line}'")
@staticmethod
def _parse_line_with_split_len_1(s):
if len(s[0]):
return [s[0], s[0]]
if len(s[0]) == 0:
raise ValueError(f"Empty line is not allowed.")
raise ValueError()
@staticmethod
def _parse_line_with_split_len_2(s):
if s[1] not in ['public', 'private']:
raise ValueError(f"Invalid visibility specifier '{s[1]}'")
if s[0]:
return [s[0], s[0], s[1]]
raise ValueError()
@staticmethod
def _parse_line_with_split_len_3(s):
if s[1] != '->':
raise ValueError(f"Invalid separator '{s[1]}'")
if len(s[0]) and len(s[2]):
return [s[0], s[2]]
raise ValueError()
@staticmethod
def _parse_line_with_split_len_4(s):
a, b, = LineParser._parse_line_with_split_len_3(s[:3])
c = s[3]
if c in ['private', 'public']:
return [a, b, c]
else:
raise ValueError(f"Invalid visibility specifier '{c}'")
class ProjectLoader:
@staticmethod
def _check_unique_values(lines):
dst = list(map(lambda x: x[1], lines))
if len(dst) != len(set(dst)):
raise ValueError("GitHub names must be unique.")
@classmethod
def load_parsed(cls, lines):
"""
Load and validate application projects configuration file
:param lines: lines of projects configuration file
:return: parsed and validated lines of projects configuration
"""
lines_parsed = list(map(LineParser.parse, lines))
cls._check_unique_values(lines_parsed)
return lines_parsed
@classmethod
def load(cls, project_file):
"""
Load and validate application projects configuration file
:param project_file: text input with projects file
:return: same as :func:`load_parsed`
"""
lines = [x.strip() for x in project_file.read().splitlines()]
if len(lines) == 0:
raise ValueError("File is empty.")
return cls.load_parsed(lines)
class ProjectNormalizer:
@classmethod
def normalize(cls, projects, visibility):
"""
Add default project visibility to parsed projects file if it is not already present.
Eg add ``private`` or ``public``.
:param projects: parsed projects file
:param visibility: default visibility
:return: normalized parsed project file, where each projects contains visibility
"""
for i, p in enumerate(projects):
if len(p) == 1:
projects[i] = [p[0], p[0], visibility]
elif len(p) == 2:
projects[i] = [p[0], p[1], visibility]
elif len(p) == 3:
pass
else:
raise ValueError(f"Line '{p}'")
| 32.006623 | 102 | 0.589696 | 4,803 | 0.993793 | 0 | 0 | 4,508 | 0.932754 | 0 | 0 | 1,762 | 0.364577 |
2d8947d4985d3f74c9b311aff5ca0779c9ce0c9d | 3,613 | py | Python | mars/learn/tests/test_wrappers.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:03:48.000Z | 2022-02-02T03:03:48.000Z | mars/learn/tests/test_wrappers.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | mars/learn/tests/test_wrappers.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from ... import tensor as mt
from ..wrappers import ParallelPostFit
def test_parallel_post_fit_basic(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
clf = ParallelPostFit(GradientBoostingClassifier())
clf.fit(X, y)
assert isinstance(clf.predict(X), mt.Tensor)
assert isinstance(clf.predict_proba(X), mt.Tensor)
result = clf.score(X, y)
expected = clf.estimator.score(X, y)
assert result.fetch() == expected
clf = ParallelPostFit(LinearRegression())
clf.fit(X, y)
with pytest.raises(
AttributeError, match="The wrapped estimator (.|\n)* 'predict_proba' method."
):
clf.predict_proba(X)
def test_parallel_post_fit_predict(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
base = LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs")
wrap = ParallelPostFit(LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs"))
base.fit(X, y)
wrap.fit(X, y)
result = wrap.predict(X)
expected = base.predict(X)
np.testing.assert_allclose(result, expected)
result = wrap.predict_proba(X)
expected = base.predict_proba(X)
np.testing.assert_allclose(result, expected)
result = wrap.predict_log_proba(X)
expected = base.predict_log_proba(X)
np.testing.assert_allclose(result, expected)
def test_parallel_post_fit_transform(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
base = PCA(random_state=0)
wrap = ParallelPostFit(PCA(random_state=0))
base.fit(raw_x, raw_y)
wrap.fit(X, y)
result = base.transform(X)
expected = wrap.transform(X)
np.testing.assert_allclose(result, expected, atol=0.1)
def test_parallel_post_fit_multiclass(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
raw_x, raw_y = make_classification(n_classes=3, n_informative=4)
X, y = mt.tensor(raw_x, chunk_size=50), mt.tensor(raw_y, chunk_size=50)
clf = ParallelPostFit(
LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs", multi_class="auto")
)
clf.fit(X, y)
result = clf.predict(X)
expected = clf.estimator.predict(X)
np.testing.assert_allclose(result, expected)
result = clf.predict_proba(X)
expected = clf.estimator.predict_proba(X)
np.testing.assert_allclose(result, expected)
result = clf.predict_log_proba(X)
expected = clf.estimator.predict_log_proba(X)
np.testing.assert_allclose(result, expected)
| 33.453704 | 88 | 0.729588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 665 | 0.184058 |
2d8b618cd92e8a0d5a05adebae79ed5b9c19a3f8 | 620 | py | Python | setup.py | mayankgiri619/Speech-Recognizer-cum-Voice-Typing-Editor | dc5b0c9b783e105efe06220daf4db3bd8c84c87a | [
"MIT"
] | 3 | 2018-08-21T14:34:27.000Z | 2018-08-21T14:40:04.000Z | setup.py | mayankgiri619/Speech-Recognizer-cum-Voice-Typing-Editor | dc5b0c9b783e105efe06220daf4db3bd8c84c87a | [
"MIT"
] | null | null | null | setup.py | mayankgiri619/Speech-Recognizer-cum-Voice-Typing-Editor | dc5b0c9b783e105efe06220daf4db3bd8c84c87a | [
"MIT"
] | 3 | 2019-06-30T13:46:31.000Z | 2021-04-23T09:35:55.000Z | import cx_Freeze
import sys
base = None
if sys.platform == 'win32':
base = "Win32GUI"
executables = [cx_Freeze.Executable("Speech_Recognizer.py", base=base, icon = "icon.ico")]
cx_Freeze.setup(
name = "Speech Recognizer",
author = "Mayank Kumar Giri",
options = {"build_exe":{"packages":["tkinter", "speech_recognition", "threading", "time"], "include_files":["icon.ico", "wait.ico", "mic.ico", "save.ico"]}},
version = "1.0",
description = "Speech Recognizer cum Text Editor that facilitates voice typing using Google Speech Recognition API",
executables = executables
) | 34.444444 | 162 | 0.669355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.506452 |
2d8f6e8ccc46303dc79e17c51afb21313146a6d3 | 7,101 | py | Python | checkers/checker_optimization_knapsack.py | aosokin/assignment_checker_email | ce6efe6902625a463e25e22add14d9616b2d13ee | [
"MIT"
] | 4 | 2016-07-03T18:51:45.000Z | 2021-10-13T12:00:47.000Z | checkers/checker_optimization_knapsack.py | aosokin/assignment_checker_email | ce6efe6902625a463e25e22add14d9616b2d13ee | [
"MIT"
] | null | null | null | checkers/checker_optimization_knapsack.py | aosokin/assignment_checker_email | ce6efe6902625a463e25e22add14d9616b2d13ee | [
"MIT"
] | null | null | null |
import sys
import numpy as np
import os
import math
import chardet
import io
COMPARISON_ACCURACY = 1e-2
MAX_SCORE = 1.0
HALF_SCORE = 0.5
SUBMISSION_SCORE = 0.0
MESSAGES_SCORE = {}
MESSAGES_SCORE[SUBMISSION_SCORE] = lambda value, value_to_get : \
'Your submission output is good, but the solution objective value %.2f is insufficient for full credit. For a higher grade, you will need to improve the objective value to %.2f or better.'%(value, value_to_get)
MESSAGES_SCORE[HALF_SCORE] = lambda value, value_to_get : \
'Good Optimization. Your algorithm does some basic optimization but your solution objective value %.2f can be improved significantly. For a higher grade, you will need to improve the objective value to %.2f or better.'%(value, value_to_get)
MESSAGES_SCORE[MAX_SCORE] = lambda value, value_to_get : \
'Awesome Optimization! Your objective value %.2f is great and you get the full score.'%(value)
VALUE_TESTS = {}
# value for half mark, for the full mark, optimal value (can replaced with a place-holder)
VALUE_TESTS['knapsack_test1.txt'.lower()] = [92000,99798,99798]
VALUE_TESTS['knapsack_test2.txt'.lower()] = [100062,100236,100236]
VALUE_TESTS['knapsack_test3.txt'.lower()] = [3966813,3967028,3967180]
VALUE_TESTS['knapsack_test4.txt'.lower()] = [109869,109899,109899]
VALUE_TESTS['knapsack_test5.txt'.lower()] = [1099870,1099881,1099893]
# the default weight for all the tests is 1.0
TEST_WEIGHT = {}
MODE = 'max' # 'min' or 'max'
def are_equal_floats(tested, correct):
return abs(tested - correct) <= COMPARISON_ACCURACY
def assign_score(value, test_name):
score = None
value_to_improve = None
if test_name.lower() in VALUE_TESTS:
test_data = VALUE_TESTS[test_name.lower()]
score = SUBMISSION_SCORE
value_to_improve = test_data[0]
if MODE == 'max':
if value >= test_data[0]:
score = HALF_SCORE
value_to_improve = test_data[1]
if value >= test_data[1]:
score = MAX_SCORE
value_to_improve = None
else:
if value <= test_data[0]:
score = HALF_SCORE
value_to_improve = test_data[1]
if value <= test_data[1]:
score = MAX_SCORE
value_to_improve = None
return score, value_to_improve
def decode_lines(data_file):
# detect encoding
with open(data_file, 'rb') as file:
raw = file.read(1024) # at most 1024 bytes are returned
charenc = chardet.detect(raw)['encoding']
input_data = []
with io.open(data_file,'r', encoding=charenc) as file:
for line in file:
line = line.encode("ascii", "ignore")
input_data.append(line)
return input_data
def read_numbers(data_file):
input_data = decode_lines(data_file)
numbers = np.array([])
for i_line in range(len(input_data)):
entries = input_data[i_line].split()
entries = filter(None, entries) # remove empty entries
line_numbers = [ float(x) if x.lower != "inf" else float("inf") for x in entries ]
numbers = np.append(numbers, line_numbers)
return numbers
def read_data(data_file):
numbers = read_numbers(data_file)
cur_entry = 0
# number of nodes
num_items = int(numbers[cur_entry])
cur_entry += 1
# maximum capacity of the knapsack
capacity = float(numbers[cur_entry])
cur_entry += 1
# get data on the items
value = np.zeros(num_items, dtype = 'float')
size = np.zeros(num_items, dtype = 'float')
for i_item in range(num_items):
value[i_item] = float(numbers[cur_entry])
cur_entry += 1
size[i_item] = float(numbers[cur_entry])
cur_entry += 1
return value, size, capacity
def check_feasibility( submitted_solution, value, size, capacity ):
n = len(value)
submitted_value = submitted_solution[0]
computed_size = 0
computed_value = 0
message = ''
for i_item in range(n):
cur_item = submitted_solution[i_item + 1]
if are_equal_floats(cur_item, 1):
computed_value += value[i_item]
computed_size += size[i_item]
elif not are_equal_floats(cur_item, 0):
message += 'Value %f corresponding to item %d is not valid.'%(cur_item, i_item)
message += ' Expecting 0 or 1.\n'
return None, message
if not are_equal_floats(computed_value, submitted_value):
message += 'The value of the solution is computed incorrectly: %d instead of %d.'%(submitted_value, computed_value)
message += ' Using correct value %d.\n'%(computed_value)
else:
message += 'The produced configuration is feasible and the objective value is correct.\n'
if computed_size > capacity:
message += 'The submitted solution is not feasible: size %d exceeds capacity %d.\n'%(computed_size, capacity)
return None, message
return computed_value, message
if __name__ == '__main__':
if len(sys.argv) == 4:
test_file = sys.argv[1].strip()
correct_file = sys.argv[2].strip()
tested_file = sys.argv[3].strip()
try:
tested_numbers = read_numbers(tested_file)
except:
print(0.0)
print('Failed to read file', os.path.basename(tested_file))
sys.exit(0)
correct_numbers = read_numbers(correct_file)
value, size, capacity = read_data(test_file)
if len(tested_numbers) != len(correct_numbers):
print(0.0)
print('Infeasible answer')
print('Wrong number of entries in file %s'%(os.path.basename(tested_file)), '(%d instead of %d).'%(len(tested_numbers), len(correct_numbers)))
else:
is_correct = True
submitted_value, feasibility_message = check_feasibility( tested_numbers, value, size, capacity )
if submitted_value is None:
print(0.0)
print(feasibility_message, end='')
else:
test_name = os.path.basename(test_file)
score, value_to_improve = assign_score(submitted_value, test_name)
print(score, value_to_improve)
if score is not None:
if test_name.lower() in TEST_WEIGHT:
print(score * TEST_WEIGHT[test_name.lower()])
else:
print(score)
print(feasibility_message, end='')
print(MESSAGES_SCORE[score](submitted_value, value_to_improve))
else:
print(0.0)
print('Could not grade your solution for unknown reason. Please, contact the instructors to resolve this issue.')
else:
print('Expecting 3 command line arguments: test_file, correct_answer, tested_answer')
| 37.373684 | 245 | 0.619068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,580 | 0.222504 |
2d8f8e28942f195a55b92faceed62f651846b560 | 332 | py | Python | src/485-max-consecutive-ones.py | sahilrider/LeetCode-Solutions | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | [
"MIT"
] | 2 | 2020-03-06T11:44:25.000Z | 2020-03-13T20:07:48.000Z | src/485-max-consecutive-ones.py | sahilrider/LeetCode-Solutions | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | [
"MIT"
] | null | null | null | src/485-max-consecutive-ones.py | sahilrider/LeetCode-Solutions | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | [
"MIT"
] | null | null | null | '''https://leetcode.com/problems/max-consecutive-ones/'''
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
i, j = 0, 0
ans = 0
while j<len(nums):
if nums[j]==0:
ans = max(ans, j-i)
i = j+1
j+=1
return max(ans, j-i) | 27.666667 | 61 | 0.472892 | 273 | 0.822289 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.171687 |
2d8fae0946e143eccda5adf40ae6e9f29db66db6 | 1,508 | py | Python | setup.py | milnus/Phupa | 67822d445c0c235b4652d3a269e846d2babd1b9c | [
"MIT"
] | 1 | 2021-11-02T03:22:42.000Z | 2021-11-02T03:22:42.000Z | setup.py | milnus/Phupa | 67822d445c0c235b4652d3a269e846d2babd1b9c | [
"MIT"
] | 3 | 2021-04-15T01:22:31.000Z | 2022-01-28T02:33:06.000Z | setup.py | milnus/primer_site_extraction_software | 67822d445c0c235b4652d3a269e846d2babd1b9c | [
"MIT"
] | 1 | 2021-11-29T00:07:02.000Z | 2021-11-29T00:07:02.000Z | #!/usr/bin/env python
from setuptools import setup
LONG_DESCRIPTION = \
'''The program extracts regions of interest from Fasta or Genome Feature Format (GFF) genomes.
This is done given a set of seed sequences given as nucleotide strings in a multi-line fasta file.
The program can output fasta and GFF outputs or regions, and will giv multiple outputs around regions and their evidence.
The program takes in multiple fasta or GFF files at a single time and can also take multiple seed sequence pairs.'''
setup(
name='Magphi',
version='0.1.6',
author='Magnus Ganer Jespersen',
author_email='magnus.ganer.j@gmail.com',
packages=['Magphi'],
package_dir={'Magphi': 'Magphi'},
entry_points={
'console_scripts': ['Magphi = Magphi.__main__:main']
},
url='https://github.com/milnus/Magphi',
license='MIT license',
description=('A bioinformatics tool allowing for examnination and extraction of genomic features using seed sequences.'),
long_description=LONG_DESCRIPTION,
install_requires=['biopython==1.79',
'pybedtools'],
keywords=['Genomic', 'extraction', 'bacteria', 'prokaryotes', 'bioinformatics'],
classifiers=[
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Development Status :: 4 - Beta']
)
| 39.684211 | 125 | 0.68634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,073 | 0.711538 |
2d902e63cf6b01f219487429775dd10a277526c1 | 56 | py | Python | tests/__init__.py | EmmanuelObo/python-coord | edcecc881c26b81f3a74db140fdb9dfb2106fb3d | [
"MIT"
] | null | null | null | tests/__init__.py | EmmanuelObo/python-coord | edcecc881c26b81f3a74db140fdb9dfb2106fb3d | [
"MIT"
] | 1 | 2021-03-25T22:00:56.000Z | 2021-03-25T22:00:56.000Z | tests/__init__.py | EmmanuelObo/python-coord | edcecc881c26b81f3a74db140fdb9dfb2106fb3d | [
"MIT"
] | null | null | null | from tests import test_bike
from tests import test_curb
| 18.666667 | 27 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2d90fd798725f293a72746db65ecf6534864d958 | 599 | py | Python | tranquil/solve.py | skyeto/actf2021 | f5f4e280ac459f8d01943f87512cf2b466588f50 | [
"MIT"
] | null | null | null | tranquil/solve.py | skyeto/actf2021 | f5f4e280ac459f8d01943f87512cf2b466588f50 | [
"MIT"
] | null | null | null | tranquil/solve.py | skyeto/actf2021 | f5f4e280ac459f8d01943f87512cf2b466588f50 | [
"MIT"
] | null | null | null | from pwn import *
from pwnlib.elf import *
## Get the offset
## Should be @ ► 0x401260 <vuln+92> ret <0x6161617461616173>
## cyclic -c amd64 -l 0x61616174
## 72
#payload = cyclic(100)
#p = process('./tranquil')
#gdb.attach(p, gdbscript="""
#continue
#""")
#print(p.readline())
#p.sendline(payload)
#print(p.readline())
## Overflow!
context.binary = e = ELF('./tranquil')
rop = ROP(e)
rop.call('win')
payload = cyclic(72)
payload += rop.chain()
io = remote('shell.actf.co', 21830)
print(io.recvline())
print(payload)
io.send(payload)
io.send('\n')
print(io.recvline())
print(io.recvline()) | 18.71875 | 66 | 0.661102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.532446 |
2d9380bab4266fca29ab688eb70ff5eb6fd17ab1 | 990 | py | Python | Matrix/Leetcode 909. Snakes and Ladders.py | kaizhengny/LeetCode | 67d64536ab80f4966699fe7460d165f2a98d6a82 | [
"MIT"
] | 31 | 2020-06-23T00:40:04.000Z | 2022-01-08T11:06:24.000Z | Matrix/Leetcode 909. Snakes and Ladders.py | kaizhengny/LeetCode | 67d64536ab80f4966699fe7460d165f2a98d6a82 | [
"MIT"
] | null | null | null | Matrix/Leetcode 909. Snakes and Ladders.py | kaizhengny/LeetCode | 67d64536ab80f4966699fe7460d165f2a98d6a82 | [
"MIT"
] | 7 | 2020-04-30T08:46:03.000Z | 2021-08-28T16:25:54.000Z | class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
n = len(board)
q = collections.deque()
q.append(1)
visited = set()
visited.add(1)
step = 0
while q:
size = len(q)
for _ in range(size):
num = q.popleft()
if num == n*n:
return step
for i in range(1,7):
if num+i > n*n:
break
nxt = self.getValue(board, num+i)
if nxt == -1:
nxt = num+i
if nxt not in visited:
q.append(nxt)
visited.add(nxt)
step += 1
return -1
def getValue(self, board, num):
n = len(board)
x = (num-1)//n
y = (num-1)%n
if x%2 == 1:
y = n-1-y
x = n-1-x
return board[x][y] | 28.285714 | 62 | 0.360606 | 990 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2d94558c7c873c9ba1bee3f6b9671d5e8e49c3c7 | 5,788 | py | Python | model.py | Vishal2188/TherISuRNet---A-Computationally-Efficient-Thermal-Image-Super-Resolution-Network | c2579121b39a536a946adc26c740dc9a7e32296b | [
"MIT"
] | 12 | 2021-03-31T10:10:13.000Z | 2022-02-24T11:11:16.000Z | model.py | Vishal2188/TherISuRNet---A-Computationally-Efficient-Thermal-Image-Super-Resolution-Network | c2579121b39a536a946adc26c740dc9a7e32296b | [
"MIT"
] | 6 | 2021-04-02T20:44:18.000Z | 2021-11-10T10:21:04.000Z | model.py | Vishal2188/TherISuRNet---A-Computationally-Efficient-Thermal-Image-Super-Resolution-Network | c2579121b39a536a946adc26c740dc9a7e32296b | [
"MIT"
] | 2 | 2021-04-15T11:44:05.000Z | 2021-09-01T15:28:10.000Z | import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
class Generator:
def __init__(self, learning_rate=1e-4, num_blocks=6):
self.learning_rate = learning_rate
self.num_blocks = num_blocks
def pelu(self, x):
with tf.variable_scope(x.op.name + '_activation', initializer=tf.constant_initializer(1.0), reuse=tf.AUTO_REUSE):
shape = x.get_shape().as_list()[1:]
alpha = tf.get_variable('alpha', 1, constraint=lambda t: tf.maximum(t, 0.1))
beta = tf.get_variable('beta', 1, constraint=lambda t: tf.maximum(t, 0.1))
positive = tf.nn.relu(x) * alpha / (beta + 1e-9)
negative = alpha * (tf.exp((-tf.nn.relu(-x)) / (beta + 1e-9)) - 1)
return negative + positive
def adaptive_global_average_pool_2d(self, x):
c = x.get_shape()[-1]
ADAP2d = tf.reshape(tf.reduce_mean(x, axis=[1, 2]), (-1, 1, 1, c))
return ADAP2d
def channel_attention(self, x, f, reduction):
skip_conn = tf.identity(x, name='identity')
x = self.adaptive_global_average_pool_2d(x)
x = tf.layers.conv2d(x, kernel_size=1, filters=f//reduction, strides=1, padding='same')
x = self.pelu(x)
x = tf.layers.conv2d(x, kernel_size=1, filters=f, strides=1, padding='same')
x = tf.nn.sigmoid(x)
CA = tf.multiply(skip_conn, x)
return CA
def ResidualBlock(self, x, kernel_size, filters, strides=1):
x = tf.layers.conv2d(x, kernel_size=1, filters=filters, strides=1, padding='same')
skip = x
x1 = x
for i in range(3):
tm1 = slim.conv2d(x1, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm1 = self.pelu(tm1)
tm1 = slim.conv2d(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm1 = self.pelu(tm1)
tm1 = slim.conv2d(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm1 = self.channel_attention(tm1, f=filters, reduction=4)
x1 = tf.concat([x1,tm1], axis=3)
x2 = x
for i in range(3):
tm2 = slim.conv2d(x2, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm2 = self.pelu(tm2)
tm2 = slim.conv2d(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm2 = self.pelu(tm2)
tm2 = slim.conv2d(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm2 = self.channel_attention(tm2, f=filters, reduction=4)
x2 = tf.concat([x2,tm2], axis=3)
x3 = x
for i in range(3):
tm3 = slim.conv2d(x3, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm3 = self.pelu(tm3)
tm3 = slim.conv2d(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm3 = self.pelu(tm3)
tm3 = slim.conv2d(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm3 = self.channel_attention(tm3, f=filters, reduction=4)
x3 = tf.concat([x3,tm3], axis=3)
x5 = tf.concat(values=[x1, x2, x3], axis=3, name='stack0')
x6 = tf.layers.conv2d(x5, kernel_size=1, filters=filters, strides=strides, padding='same', use_bias=False)
x7 = skip + x6
return x7
def Upsample2xBlock(self, x, kernel_size, filters, strides):
#size = tf.shape(x)
#h = size[1]
#w = size[2]
#x = tf.image.resize_nearest_neighbor(x, size=[h * 3, w * 3], align_corners=False, name=None)
x = tf.layers.conv2d(x, kernel_size=kernel_size, filters=filters, strides=strides, padding='same')
x = tf.depth_to_space(x, 2)
x = self.pelu(x)
return x
def ThermalSR(self, x, reuse=False, isTraining=True):
with tf.variable_scope("ThermalSR", reuse=reuse) as scope:
x4 = tf.layers.conv2d(x, kernel_size=7, filters=64, strides=1, padding='same')
x4 = self.pelu(x4)
skip = x4
# Global Residual Learning
size = tf.shape(x)
h = size[1]
w = size[2]
x_GRL = tf.image.resize_bicubic(x, size=[h * 4, w * 4], align_corners=False, name=None)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=64, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=16, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=3, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
for i in range(4):
x4 = self.ResidualBlock(x4, kernel_size=1, filters=64, strides=1)
x4 = tf.layers.conv2d(x4, kernel_size=1, filters=64, strides=1, padding='same', use_bias=False)
x4 = self.pelu(x4)
x4 = tf.concat([x4, skip], axis=3)
x4 = tf.layers.conv2d(x4, kernel_size=3, filters=64, strides=1, padding='same', use_bias=False)
x4 = self.pelu(x4)
x4 = x4 + skip
with tf.variable_scope('Upsamplingconv_stage_1'):
xUP = self.Upsample2xBlock(x4, kernel_size=3, filters=64, strides = 1)
xUP = tf.layers.conv2d(xUP, kernel_size=1, filters=64, strides=1, padding='same', use_bias=False)
xUP = self.pelu(xUP)
skip1 = xUP
for i in range(2):
x5 = self.ResidualBlock(xUP, kernel_size=1, filters=32, strides=1)
x5 = tf.layers.conv2d(x5, kernel_size=1, filters=32, strides=1, padding='same', use_bias=False)
x5 = self.pelu(x5)
x5 = tf.concat([x5, skip1], axis=3)
x5 = tf.layers.conv2d(x5, kernel_size=3, filters=64, strides=1, padding='same', use_bias=False)
x5 = self.pelu(x5)
x5 = x5 + skip1
with tf.variable_scope('Upsamplingconv_stage_2'):
x6 = self.Upsample2xBlock(x5, kernel_size=3, filters=64, strides = 1)
x6 = tf.layers.conv2d(x6, kernel_size=3, filters=64, strides=1, padding='same', name='forward_4')
x6 = self.pelu(x6)
x6 = tf.layers.conv2d(x6, kernel_size=3, filters=3, strides=1, padding='same', name='forward_5')
x6 = self.pelu(x6)
x_final = x6 + x_GRL
return x_final
| 41.640288 | 117 | 0.636835 | 5,703 | 0.985314 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.066171 |
2d961bbd9e62206e0f980c876fa9c77bbf669f26 | 10,337 | py | Python | authordetect/tokenizers/nltk.py | fabianfallasmoya/authorship_classification | fbcc1d21407cd65b5909fc04209d4517e6a033a1 | [
"MIT"
] | 2 | 2020-10-16T19:27:21.000Z | 2021-11-04T15:06:54.000Z | authordetect/tokenizers/nltk.py | fabianfallasmoya/authorship_classification | fbcc1d21407cd65b5909fc04209d4517e6a033a1 | [
"MIT"
] | 24 | 2020-07-13T01:49:36.000Z | 2020-10-30T21:54:00.000Z | facet/tokenizer/nltk.py | edponce/FACET | 0dca2d728813a4865e72b2e8fd6b114a0c63d5b0 | [
"MIT"
] | null | null | null | import nltk
from .base import BaseTokenizer
from typing import (
Tuple,
Iterator,
)
__all__ = ['NLTKTokenizer']
class NLTKTokenizer(BaseTokenizer):
"""NLTK-based Treebank tokenizer.
Args:
sentencizer (str): Name of sentencizer for text.
chunker (str): Phrase chunker where 'noun' uses nouns only,
'noun_chunks' uses basic noun chunking, 'pos_chunks' uses
parts-of-speech for chunking, None uses window-based
tokenization. If chunking is enabled then 'window',
'stopwords', and 'min_token_length' parameters are not used.
tokenizer (str): Name of tokenizer for sentences.
lemmatizer (str): Name of lemmatizer for tokens. None = disabled.
language (str): Language to use for processing corpora.
"""
NAME = 'nltk'
# For reference only, these are the universal POS tags.
# https://spacy.io/api/annotation#pos-universal
_UNIVERSAL_POS_TAGS = (
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'CCONJ', 'DET', 'INTJ',
'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ',
'SYM', 'VERB', 'X', 'SPACE',
)
_SENTENCIZER_MAP = {
'line': nltk.tokenize.LineTokenizer,
'punctuation': nltk.tokenize.PunktSentenceTokenizer,
}
_TOKENIZER_MAP = {
# NOTE: The following tokenizers raise 'NotImplementedError'
# for 'span_tokenize()'.
# 'nltk': nltk.tokenize.NLTKWordTokenizer,
# 'toktok': nltk.tokenize.ToktokTokenizer,
'treebank': nltk.tokenize.TreebankWordTokenizer,
# NOTE: Will be deprecated in v3.2.5, NLTK recommends
# nltk.parse.corenlp.CoreNLPTokenizer, but this does not exists.
# 'stanford': nltk.tokenize.StanfordSegmenter,
'punctuation': nltk.tokenize.WordPunctTokenizer,
'space': nltk.tokenize.SpaceTokenizer,
'whitespace': nltk.tokenize.WhitespaceTokenizer,
}
_LEMMATIZER_MAP = {
'ci': nltk.stem.Cistem,
'isri': nltk.stem.ISRIStemmer,
'lancaster': nltk.stem.LancasterStemmer,
'porter': nltk.stem.PorterStemmer,
'snowball': nltk.stem.SnowballStemmer,
'rslps': nltk.stem.RSLPStemmer,
'wordnet': nltk.stem.WordNetLemmatizer,
}
def __init__(
self,
*,
sentencizer: str = 'punctuation',
chunker: str = None,
tokenizer: str = 'treebank',
lemmatizer: str = 'snowball',
language: str = 'english',
**kwargs,
):
# Set class's stop words, then initialize base class to allow
# customization of stop words.
try:
type(self)._STOPWORDS = set(nltk.corpus.stopwords.words(language))
except ValueError as ex:
raise ex(f"Model for NLTK language '{language}' is invalid.")
super().__init__(**kwargs)
chunker_func_map = {
'nouns': self._tokenize_with_nouns,
'noun_chunks': self._tokenize_with_noun_chunks,
'pos_chunks': self._tokenize_with_pos_chunks,
# Let base class handle tokenization window.
None: super().tokenize,
}
self._sentencizer = type(self)._SENTENCIZER_MAP[sentencizer]()
self._chunker = chunker_func_map[chunker]
self._tokenizer = type(self)._TOKENIZER_MAP[tokenizer]()
# NOTE: Need to set 'language' before '_get_lemmatizer()'.
self._language = language
self._lemmatizer = self._get_lemmatizer(lemmatizer)
self._parser = nltk.RegexpParser('NP: {<ADJ>*<NOUN>}')
def _get_lemmatizer(self, lemmatizer: str):
if lemmatizer is None:
return lemmatizer
# NOTE: This may trigger a LookupError if the stemmer/lemmatizer
# resource is not found/installed.
elif lemmatizer == 'snowball':
_lemmatizer = (
type(self)._LEMMATIZER_MAP[lemmatizer](self._language)
)
else:
_lemmatizer = type(self)._LEMMATIZER_MAP[lemmatizer]()
# NOTE: In NLTK, WordNetLemmatizer API differs from stemmers.
if lemmatizer == 'wordnet':
_lemmatizer.stem = _lemmatizer.lemmatize
# NOTE: This may trigger a LookupError if the stemmer/lemmatizer
# resource is not found/installed.
_lemmatizer.stem('testing')
return _lemmatizer
def _pos_tag(
self,
text: Tuple[int, int, str],
) -> Iterator[Tuple[Tuple[int, int], Tuple[str, str]]]:
"""Parts-of-speech tagging."""
spans = []
tokens = []
for begin, end in self._tokenizer.span_tokenize(text[2]):
spans.append((text[0] + begin, text[0] + end - 1))
tokens.append(text[2][begin:end])
# NOTE: Language for nltk.pos_tag() is based on
# ISO 639-2 (3 letter code). We take the first 3-letters of language
# set for nltk.stopwords.words() although this is not always correct,
# but we chose this approach for simplicity.
yield from zip(
spans,
nltk.pos_tag(tokens, tagset='universal', lang=self._language[:3])
)
def _is_valid_token(self, token: str):
return (
len(token) >= self._min_token_length
and token not in self._stopwords
)
def tokenize(self, text):
# NOTE: Support raw strings to allow invoking directly, that is,
# it is not necessary to 'sentencize()' first.
yield from self._chunker(
(0, len(text) - 1, text)
if isinstance(text, str)
else text
)
def _sentencize(self, text):
yield from (
(begin, end - 1, text[begin:end])
for begin, end in self._sentencizer.span_tokenize(text)
)
def _lemmatize(self, text: str) -> str:
return (
text
if self._lemmatizer is None
else self._lemmatizer.stem(text)
)
def _tokenize(self, text: Tuple[int, int, str]):
sentence = text[2]
# for begin, end in self._tokenizer.span_tokenize(sentence):
# token = sentence[begin:end]
# if self._is_valid_token(token):
# yield (
# text[0] + begin,
# text[0] + end - 1,
# self._lemmatize(token),
# )
yield from (
(
text[0] + begin,
text[0] + end - 1,
self._lemmatize(sentence[begin:end]),
)
for begin, end in self._tokenizer.span_tokenize(sentence)
if self._is_valid_token(sentence[begin:end])
)
def _tokenize_with_nouns(self, text: Tuple[int, int, str]):
"""Tokenizer for single nouns."""
def is_valid_pos(pos: str):
return pos in ('NOUN', 'PROPN', 'X')
yield from (
(*span, self._lemmatize(token))
for span, (token, pos) in self._pos_tag(text)
if is_valid_pos(pos) and self._is_valid_token(token)
)
def _tokenize_with_noun_chunks(self, text: Tuple[int, int, str]):
"""Tokenizer for noun chunks."""
def is_valid_pos(node: 'nltk.tree.Tree'):
return isinstance(node, nltk.tree.Tree) and node.label() == 'NP'
# Parser requires tags in an iterable, so we unpack them.
spans, tags = zip(*self._pos_tag(text))
# NOTE: Traverse parser tree assuming it has height = 3.
spans = iter(spans)
for node in self._parser.parse(tags):
span = next(spans)
if is_valid_pos(node):
begin = span[0]
for _ in range(len(node) - 1):
span = next(spans)
yield (
begin,
span[1],
' '.join(map(lambda t: self._lemmatize(t[0]), node)),
)
def _tokenize_with_pos_chunks(self, text: Tuple[int, int, str]):
"""Phrase tokenizer with parts-of-speech tags for marking bounds."""
def is_valid_pos(pos: str):
return pos in (
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'DET', 'NOUN', 'PROPN',
'PART', 'VERB', 'X',
)
def is_valid_begin_pos(pos: str):
return pos in ('ADJ', 'ADV', 'DET', 'NOUN', 'PROPN', 'VERB', 'X')
def is_valid_middle_pos(pos: str):
return pos in (
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'DET', 'NOUN', 'PROPN',
'PART', 'VERB', 'X',
)
def is_valid_end_pos(pos: str):
return pos in ('NOUN', 'PROPN', 'VERB', 'X')
spans = []
tokens = []
for span, (token, pos) in self._pos_tag(text):
if not is_valid_pos(pos):
continue
# Flag for not duplicating flush of a single token valid
# as both, end and begin POS.
is_end_token = False
# Check for end token first:
# Handle single word tokens
# An end token can also be a begin token of another phrase
if is_valid_end_pos(pos):
# NOTE: Split based on chunk size to improve performance.
if len(spans) == 0:
if self._is_valid_token(token):
is_end_token = True
yield (*span, self._lemmatize(token))
else:
is_end_token = True
tokens.append(token)
yield (
spans[0][0],
span[1],
' '.join(map(lambda t: self._lemmatize(t), tokens)),
)
spans = []
tokens = []
if (
is_valid_begin_pos(pos)
or (len(tokens) > 0 and is_valid_middle_pos(pos))
):
spans.append(span)
tokens.append(token)
# Use remaining chunk span if not a single end token
if len(spans) > 0 and not is_end_token:
yield (
spans[0][0],
spans[-1][1],
' '.join(map(lambda t: self._lemmatize(t), tokens)),
)
# spans = []
# tokens = []
| 35.159864 | 78 | 0.546387 | 10,212 | 0.987908 | 5,592 | 0.540969 | 0 | 0 | 0 | 0 | 3,392 | 0.328142 |
2d96973e83e7a2807cf2780063d803366b4ad276 | 10,185 | py | Python | vcs/editors/marker.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 11 | 2018-10-10T03:14:33.000Z | 2022-01-05T14:18:15.000Z | vcs/editors/marker.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 196 | 2018-03-21T19:44:56.000Z | 2021-12-21T21:56:24.000Z | vcs/editors/marker.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 5 | 2019-12-09T21:54:45.000Z | 2022-03-20T04:22:14.000Z | from vcs import vtk_ui
from vcs.colorpicker import ColorPicker
from vcs.vtk_ui import behaviors
from vcs.VCS_validation_functions import checkMarker
import vtk
import vcs.vcs2vtk
from . import priority
import sys
class MarkerEditor(
behaviors.ClickableMixin, behaviors.DraggableMixin, priority.PriorityEditor):
"""
Editor for marker objects
Ctrl + click to drop a new marker, toolbar to configure, priority, draggable + handles on each marker.
"""
def __init__(self, interactor, marker, index, display, configurator):
self.interactor = interactor
self.marker = marker
self.index = index
self.configurator = configurator
actors = display.backend["vtk_backend_marker_actors"][index]
self.glyph, self.glyph_source, self.polydata, self.actor, self.geo = actors
self.display = display
self.handles = []
for ind, x in enumerate(marker.x[index]):
y = marker.y[index][ind]
h = vtk_ui.Handle(
self.interactor, (x, y), dragged=self.adjust, color=(
0, 0, 0), normalize=True)
h.show()
self.handles.append(h)
self.toolbar = vtk_ui.toolbar.Toolbar(
self.interactor,
"Marker Options")
self.toolbar.show()
self.toolbar.add_button(["Change Color"], action=self.change_color)
self.toolbar.add_slider_button(
marker.size[index],
1,
300,
"Marker Size",
update=self.set_size)
self.type_bar = self.toolbar.add_toolbar(
"Marker Type",
open_label="Change")
shapes = marker_shapes()
shapes.insert(0, "Select Shape")
self.shape_button = self.type_bar.add_button(
shapes,
action=self.change_shape)
wmos = wmo_shapes()
wmos.insert(0, "Select WMO Marker")
self.wmo_button = self.type_bar.add_button(
wmos,
action=self.change_wmo)
if self.marker.type[self.index] in shapes:
self.shape_button.set_state(
shapes.index(
self.marker.type[
self.index]))
else:
self.wmo_button.set_state(wmos.index(self.marker.type[self.index]))
# Used to store the color picker when it's active
self.picker = None
prop = vtk.vtkTextProperty()
prop.SetBackgroundColor(.87, .79, .55)
prop.SetBackgroundOpacity(1)
prop.SetColor(0, 0, 0)
self.tooltip = vtk_ui.Label(
self.interactor,
"%s + Click to place new markers." %
("Cmd" if sys.platform == "darwin" else "Ctrl"),
textproperty=prop)
self.tooltip.left = 0
self.tooltip.top = self.interactor.GetRenderWindow(
).GetSize()[1] - self.tooltip.get_dimensions()[1]
self.tooltip.show()
super(MarkerEditor, self).__init__()
self.register()
def get_object(self):
return self.marker
def handle_click(self, point):
x, y = point
# Control drops a new instance
return self.in_bounds(x, y) or self.toolbar.in_toolbar(
x, y) or self.current_modifiers()["control"]
def is_object(self, marker):
return self.marker == marker
def place(self):
for h in self.handles:
h.place()
self.toolbar.place()
def render(self):
from vcs.vtk_ui.manager import get_manager
m = get_manager(self.interactor)
m.queue_render()
def update_shape(self):
# Update the glyph for the marker to reflect the new shape
self.glyph_source, self.polydata = vcs.vcs2vtk.prepGlyph(
self.glyph, self.marker, self.index)
self.display.backend["vtk_backend_marker_actors"][
self.index] = (
self.glyph,
self.glyph_source,
self.polydata,
self.actor,
self.geo)
# Have to rescale the glyph now... work that out later with charles
self.render()
def change_shape(self, index):
if index != 0:
self.marker.type[self.index] = marker_shapes()[index - 1]
self.wmo_button.set_state(0)
self.update_shape()
else:
self.change_wmo(1)
def change_wmo(self, index):
if index != 0:
self.marker.type[self.index] = wmo_shapes()[index - 1]
self.shape_button.set_state(0)
self.update_shape()
else:
self.change_shape(1)
def set_size(self, size):
self.marker.size[self.index] = size
self.update_shape()
def change_color(self, state):
if self.picker:
self.picker.make_current()
else:
self.picker = ColorPicker(
500,
500,
self.marker.colormap,
self.marker.color[
self.index],
parent_interactor=self.interactor,
on_save=self.set_color,
on_cancel=self.cancel_color)
def set_color(self, colormap, color):
self.marker.colormap = colormap
self.marker.color[self.index] = color
del self.picker
self.picker = None
vcs.vcs2vtk.setMarkerColor(
self.actor.GetProperty(),
self.marker,
self.marker.color[
self.index])
self.render()
def cancel_color(self):
del self.picker
self.picker = None
def click_release(self):
x, y = self.event_position()
if self.current_modifiers()["control"]:
h = vtk_ui.Handle(
self.interactor, (x, y), dragged=self.adjust, color=(
0, 0, 0), normalize=True)
h.show()
self.handles.append(h)
self.marker.x[self.index].append(x)
self.marker.y[self.index].append(y)
self.sync_positions()
def adjust(self, handle, dx, dy):
ind = self.handles.index(handle)
self.marker.x[self.index][ind] += dx
self.marker.y[self.index][ind] += dy
self.sync_positions()
def in_bounds(self, x, y):
w, h = self.interactor.GetRenderWindow().GetSize()
return inside_marker(
self.marker, x, y, w, h, index=self.index) is not None
def right_release(self):
x, y = self.event_position()
if self.in_bounds(x, y):
points = list(zip(self.marker.x[self.index], self.marker.y[self.index]))
size = self.marker.size[self.index]
screen_width, screen_height = self.interactor.GetRenderWindow(
).GetSize()
w, h = float(size) / screen_width, float(size) / screen_height
for ind, point in enumerate(points):
m_x, m_y = point
if x > m_x - w and x < m_x + w and y > m_y - h and y < m_y + h:
break
del self.marker.x[self.index][ind]
del self.marker.y[self.index][ind]
self.handles[ind].detach()
del self.handles[ind]
if len(self.marker.x[self.index]) == 0:
del self.marker.x[self.index]
del self.marker.y[self.index]
del self.marker.type[self.index]
del self.marker.color[self.index]
if len(self.marker.x) == 0:
self.delete()
return
self.sync_positions()
def detach(self):
self.unregister()
if self.picker:
self.picker.close()
self.picker = None
self.toolbar.detach()
for h in self.handles:
h.detach()
self.tooltip.detach()
def delete(self):
self.actor.SetVisibility(0)
self.configurator.deactivate(self)
def update_priority(self):
maxLayers = self.interactor.GetRenderWindow().GetNumberOfLayers()
new_layer = self.marker.priority * 10000 + 1 + \
self.configurator.displays.index(self.display)
if new_layer + 1 > maxLayers:
self.interactor.GetRenderWindow().SetNumberOfLayers(new_layer + 1)
self.actor.SetLayerNumber(new_layer)
self.render()
def sync_positions(self):
# Sync all points
points = self.glyph.GetInput().GetPoints()
for i, (x, y) in enumerate(
zip(self.marker.x[self.index], self.marker.y[self.index])):
if i == points.GetNumberOfPoints():
points.InsertNextPoint(x, y, 0)
else:
points.SetPoint(i, x, y, 0)
self.glyph.GetInput().Modified()
self.render()
__shape_cache = {}
def marker_shapes():
# Returns all shapes that are supported (skips star for now), indexed
# numerically
shapes = []
for i in range(1, 20):
if i in __shape_cache:
shapes.append(__shape_cache[i])
else:
try:
val = checkMarker(None, "type", i)
shapes.append(val)
__shape_cache[i] = val
except ValueError:
pass
return shapes
def wmo_shapes():
wmo = []
for i in range(100, 203):
if i in __shape_cache:
wmo.append(__shape_cache[i])
else:
try:
val = checkMarker(None, "type", i)
wmo.append(val)
__shape_cache[i] = val
except ValueError:
pass
return wmo
def inside_marker(marker, x, y, screen_width, screen_height, index=None):
if index is None:
index = list(range(len(marker.x)))
else:
index = [index]
for ind in index:
marker_x, marker_y = marker.x[ind], marker.y[ind]
coords = list(zip(marker_x, marker_y))
size = marker.size[ind]
w, h = float(size) / screen_width, float(size) / screen_height
for m_x, m_y in coords:
if x > m_x - w and x < m_x + w and y > m_y - h and y < m_y + h:
return ind
return None
| 30.494012 | 106 | 0.559352 | 8,575 | 0.841924 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.067354 |
2d97ba6d06d356159a1be569b516b82e7fd9edeb | 3,608 | py | Python | tinkoff/invest/_errors.py | forked-group/invest-python | 3398391f5bb4a52020c312855de175cfe8cdc021 | [
"Apache-2.0"
] | 41 | 2022-01-21T05:38:57.000Z | 2022-03-30T03:54:41.000Z | tinkoff/invest/_errors.py | forked-group/invest-python | 3398391f5bb4a52020c312855de175cfe8cdc021 | [
"Apache-2.0"
] | 20 | 2022-01-24T05:46:02.000Z | 2022-03-31T16:54:04.000Z | tinkoff/invest/_errors.py | forked-group/invest-python | 3398391f5bb4a52020c312855de175cfe8cdc021 | [
"Apache-2.0"
] | 15 | 2022-01-25T06:53:27.000Z | 2022-03-30T03:49:07.000Z | from functools import wraps
from typing import Any, Callable, TypeVar, cast
from grpc import Call, RpcError
from grpc.aio import AioRpcError
from .exceptions import AioRequestError, RequestError
from .logging import get_metadata_from_aio_error, get_metadata_from_call, log_error
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
def handle_request_error(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except RpcError as e:
if issubclass(type(e), Call):
metadata = get_metadata_from_call(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise RequestError(
e.code(), e.details(), metadata # type:ignore
) from e
raise
return cast(TFunc, wrapper)
return decorator
def handle_request_error_gen(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
yield from func(*args, **kwargs)
except RpcError as e:
if issubclass(type(e), Call):
metadata = get_metadata_from_call(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise RequestError(
e.code(), e.details(), metadata # type:ignore
) from e
raise
return cast(TFunc, wrapper)
return decorator
def handle_aio_request_error(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return await func(*args, **kwargs)
except AioRpcError as e:
metadata = get_metadata_from_aio_error(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise AioRequestError(
e.code(), e.details(), metadata # type:ignore
) from e
return cast(TFunc, wrapper)
return decorator
def handle_aio_request_error_gen(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
async for result in func(*args, **kwargs):
yield result
except AioRpcError as e:
metadata = get_metadata_from_aio_error(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise AioRequestError(
e.code(), e.details(), metadata # type:ignore
) from e
return cast(TFunc, wrapper)
return decorator
| 33.407407 | 83 | 0.504435 | 0 | 0 | 1,658 | 0.459534 | 2,667 | 0.739191 | 1,225 | 0.339523 | 239 | 0.066242 |
2d97f03499e19a5c1f5c27d7a74182ab0d44f492 | 4,049 | py | Python | src/controllerarena/controllers/refVec.py | VerifiableRobotics/controller-arena | 4506ef47404de85ec0511594740e53c27a21ef88 | [
"BSD-3-Clause"
] | null | null | null | src/controllerarena/controllers/refVec.py | VerifiableRobotics/controller-arena | 4506ef47404de85ec0511594740e53c27a21ef88 | [
"BSD-3-Clause"
] | null | null | null | src/controllerarena/controllers/refVec.py | VerifiableRobotics/controller-arena | 4506ef47404de85ec0511594740e53c27a21ef88 | [
"BSD-3-Clause"
] | null | null | null | # code for python reference dipole vector field controller
# these functions require stuff
#from mathFuns import *
from numpy import *
from math import *
class refVec:
# define the constructor
def __init__(self, q_0, controller_flag):
# Initialize controller state
self.phi_prev = None
self.q_prev = q_0
self.e_int_w = 0
self.e_int_u = 0
# set gains
self.k_p_u = 1 # u indicates it is an position gain. p indicates it is a proportional gain.
self.k_p_w = 3 # w indicates it is an angular gain. p indicates it is a proportional gain.
if controller_flag == 1: # PID
self.k_i_w = 1
self.k_i_u = 1
self.k_d = -1 # the derivative gain is only on the angle
elif controller_flag == 2: # PI
self.k_i_w = 1
self.k_i_u = 1
self.k_d = 0
elif controller_flag == 3: # PD
self.k_i_w = 0
self.k_i_u = 0
self.k_d = -1
else: # P
self.k_i_w = 0
self.k_i_u = 0
self.k_d = 0
def get_output(self, q_d, q, dt): # obtain reference vector field value
F = self.get_vector_field(q, q_d) # F is an column vector
## obtain control signal as a fcn of reference vector field value
u = self.get_control(q, q_d, F, dt)
return u
def get_vector_field(self, q, q_d):
# return type: numpy array
# note: unsure if this vector field was just an example from the paper!!
# compute vector field F
# unpack
# x = q[0][0]
# y = q[1][0]
# x_d = q_d[0][0]
# y_d = q_d[1][0]
# #
# # compute [taken from paper draft], where r = [1;0] and lambda = 3
# Fx = 2*(x - x_d)**2 - (y - y_d)**2
# Fy = 3*(x - x_d)*(y - y_d)
# F = array([[Fx],[Fy]])
lamb = 3
theta_d = q_d[2][0]
delta_p = q[0:2] - q_d[0:2] # location - location_desired
r = array([[cos(theta_d)],[sin(theta_d)]])
F = lamb*(dot(transpose(r), delta_p)[0][0])*delta_p - r*(dot(transpose(delta_p), delta_p)[0][0]) # should be col vector
print F
return F # col vector
def get_control(self, q, q_d, F, dt):
# I think that this control law is not a function of the vector field, and that it should
# work if F(q) changes
#
# compute control signal u
delta_p = q[0:2] - q_d[0:2] # location - location_desired
self.e_int_w += self.sub_angles(q[2][0],q_d[2][0])*dt # accumulate angular error
self.e_int_u += linalg.norm(delta_p)*dt # accumulate position error
theta = q[2][0]
# unpack gains
k_p_u = self.k_p_u
k_p_w = self.k_p_w
k_i_w = self.k_i_w
k_i_u = self.k_i_u
k_d = self.k_d
Fx = F[0][0]
Fy = F[1][0]
phi = atan2(Fy,Fx)
# backward finite difference for phidot
if self.phi_prev == None: # if this is the first pass through the controller, phi_dot = 0
self.phi_prev = phi
# end if
phi_dot = (phi-self.phi_prev)/dt
self.phi_prev = phi
q_dot = (q-self.q_prev)/dt
self.q_prev = q
# controller
v = -k_p_u*sign( dot(transpose(delta_p), array([[cos(theta)],[sin(theta)]]) )[0][0] )*tanh(linalg.norm(delta_p)**2) - k_i_u*self.e_int_u
w = -k_p_w*self.sub_angles(theta, phi) - k_i_w*self.e_int_w - k_d*phi_dot # k_d determines whether derivative term is used, k_i for i term
u = array([[v], [w]])
print u
return u
def update_state(self, q_d, q, dt):
# x_k+1 = 0
pass
def sub_angles(self, ang1, ang2):
return (ang1 - ang2 + pi)%(2*pi) - pi
# For future:
# pass r vector as parameter
# low pass filtering for derivatives (PD control?) [phidot]
# visual stuff
# global feedback plan is the ref vecf field
# controller is a function of vector field, but you can use a better controller to get better performance
| 34.313559 | 147 | 0.568288 | 3,619 | 0.893801 | 0 | 0 | 0 | 0 | 0 | 0 | 1,629 | 0.402322 |
2d986f68f511ab1e314274eb389796705ee7e764 | 7,066 | py | Python | script/trainer/trainer.py | Intelligent-Systems-Lab/ISL-BCFL | 42ceb86708a76e28b31c22b33c15ee9a6a745ec7 | [
"Apache-2.0"
] | null | null | null | script/trainer/trainer.py | Intelligent-Systems-Lab/ISL-BCFL | 42ceb86708a76e28b31c22b33c15ee9a6a745ec7 | [
"Apache-2.0"
] | null | null | null | script/trainer/trainer.py | Intelligent-Systems-Lab/ISL-BCFL | 42ceb86708a76e28b31c22b33c15ee9a6a745ec7 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader, TensorDataset, Dataset
from torch.utils.data.sampler import SubsetRandomSampler
from torch import optim
import pandas as pd
import sys
sys.path.append('./proto')
import trainer_pb2
import trainer_pb2_grpc
import time
from concurrent import futures
import logging
import grpc
import argparse
import base64
import io
import ipfshttpclient
torch.nn.Module.dump_patches = True
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
# class Model(nn.Module):
# def __init__(self):
# super().__init__()
# self.hidden = nn.Linear(784, 20)
# self.output = nn.Linear(20, 10)
# def forward(self, x):
# x = self.hidden(x)
# x = torch.sigmoid(x)
# x = self.output(x)
# return x
class Model(nn.Module):
def __init__(self):
super().__init__()
self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Dropout(0.25),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.25))
self.classifier = nn.Sequential(nn.Linear(576, 256),
nn.Dropout(0.5),
nn.Linear(256, 47))
def forward(self, x):
x = self.cnn(x)
x = x.view(x.size(0), -1) # flatten layer
x = self.classifier(x)
return x
class Trainer(trainer_pb2_grpc.TrainerServicer):
def __init__(self, csvdata, device, batch):
self.dloader = getdataloader(csvdata, batch=batch)
self.device = device
# self.batch = batch
while True:
try:
self.client = ipfshttpclient.connect("/ip4/172.168.10.10/tcp/5001/http")
break
except:
print("Waiting for ipfs services at : 172.168.10.10:5001")
time.sleep(1)
def Train(self, request, result):
#print(request.BaseModel)
print("Training...")
result = trainOneEp(self.client.cat(request.BaseModel).decode(), self.dloader, self.device)
hashresult = self.client.add_str(result)
return trainer_pb2.TrainResult(Round=request.Round, Result=hashresult)
def serve(data, port, dev, bat):
print("Read dataset : ",data)
print("Using : ",dev)
print("Port : ",port)
time.sleep(2)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
trainer_pb2_grpc.add_TrainerServicer_to_server(Trainer(data, dev, bat), server)
server.add_insecure_port('0.0.0.0:'+port)
server.start()
server.wait_for_termination()
def trainOneEp(bmodel, dloader, device):
#return bmodel
model = Model()
model = base642fullmodel(bmodel)
#return fullmodel2base64(model)
print(model)
# loss_function = nn.CrossEntropyLoss()
# optimizer = optim.SGD(model.parameters(), lr=0.005)
optimizer = optim.RMSprop(model.parameters(), lr=0.001)
loss_function = nn.CrossEntropyLoss()
if (device=="GPU"):
model.cuda()
model.train()
for data, target in dloader:
if (device=="GPU"):
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
#data = data.view(data.size(0),-1)
output = model(data.float())
loss = loss_function(output, target)
loss.backward()
optimizer.step()
#model.eval()
#print(model)
if (device=="GPU"):
model.cpu()
bmodel_ = fullmodel2base64(model)
#print(bmodel_)
return bmodel_
# def getdataloader(dset = '/home/tedbest/Documents/mnist_train_0.csv'):
# #print(dset)
# train = pd.read_csv(dset)
# train_labels = train['label'].values
# train_data = train.drop(labels = ['label'], axis = 1)
# train_data = train_data.values.reshape(-1,28, 28)
# train_images_tensor = torch.tensor(train_data)/255.0
# train_labels_tensor = torch.tensor(train_labels)
# mnist = TensorDataset(train_images_tensor, train_labels_tensor)
# trainloader = DataLoader(mnist, batch_size=256, shuffle= True)
# return trainloader
class MNISTDataset(Dataset):
"""MNIST dataset"""
def __init__(self, feature, target, transform=None):
self.X = []
self.Y = target
if transform is not None:
for i in range(len(feature)):
self.X.append(transform(feature[i]))
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
def getdataloader(dset = './mnist_test.csv', batch=256):
#print(dset)
train = pd.read_csv(dset)
train_labels = train['label'].values
train_data = train.drop(labels = ['label'], axis = 1)
train_data = train_data.values.reshape(-1,28, 28)
featuresTrain = torch.from_numpy(train_data)
targetsTrain = torch.from_numpy(train_labels)
data_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine(degrees=45, translate=(0.1, 0.1), scale=(0.8, 1.2)),
transforms.ToTensor()]
)
train_set = MNISTDataset(featuresTrain.float(), targetsTrain, transform=data_transform)
trainloader = torch.utils.data.DataLoader(train_set, batch_size = batch, shuffle = True, num_workers=4)
return trainloader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default="/home/tedbest/Documents/mnist_train_0.csv")
parser.add_argument('--port', type=str, default="63387")
parser.add_argument('--device', type=str, default="CPU") # GPU/CPU
parser.add_argument('--batch', type=int, default=256)
parser.add_argument('-f')
args = parser.parse_args()
if (args.device=="GPU"):
if torch.cuda.is_available():
print("GPU found.")
else:
print("GPU not found.")
exit(0)
logging.basicConfig()
serve(args.data, args.port, args.device, args.batch) | 31.544643 | 107 | 0.601472 | 2,568 | 0.363431 | 0 | 0 | 0 | 0 | 0 | 0 | 1,491 | 0.21101 |
2d987782f603dc9882b5729a821433527afffdcc | 1,475 | py | Python | polls/views.py | alejandro-medici/django_docker | 6fe847837c244f506bd35809f2444f182bfc6500 | [
"MIT"
] | 1 | 2021-12-02T22:44:24.000Z | 2021-12-02T22:44:24.000Z | polls/views.py | alejandro-medici/django_docker | 6fe847837c244f506bd35809f2444f182bfc6500 | [
"MIT"
] | null | null | null | polls/views.py | alejandro-medici/django_docker | 6fe847837c244f506bd35809f2444f182bfc6500 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.template.loader import get_template
from .models import Poll
from time import timezone
from datetime import date
# Create your views here.
def index(request):
myTemplate = get_template('./index.html')
print(myTemplate)
return render(request, myTemplate)
def detail(request, poll_id):
return HttpResponse("You're looking at poll %s." % poll_id)
def results(request, poll_id):
return HttpResponse("You're looking at the results of poll %s." % poll_id)
# CRUD
def create(request):
newPoll = Poll(question="What's up?", pub_date= date.today())
newPoll.save()
return HttpResponse("You're creating a new poll with id %s." % newPoll.id)
def update(request, id):
response = HttpResponse("You must send data using POST")
if request.method == 'POST':
response = HttpResponse("You're updating poll %s." % id)
myPoll = Poll.objects.get(id=id)
myPoll.question = request.POST['question']
myPoll.save()
return response
def delete(request, id):
myPoll = Poll.objects.get(id=id)
myPoll.delete()
return HttpResponse("You're deleting poll %s." % id)
def read(request, id):
myPoll = Poll.objects.get(id=id)
return HttpResponse("You're reading a poll. %s " % myPoll)
#### SECURITY PENDING TOPICS
# Toda conexion con el back deberia tener un token.
# Todo API/CRUD tiene que tener un limite de queries... | 30.102041 | 79 | 0.699661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.290847 |
2d9a25324f21790c34116f8d2994220e3c4fb627 | 3,644 | py | Python | train.py | lyth031/ptb_lm | 71f687fdf41c6b981a306269c1341ea8a8347bb6 | [
"MIT"
] | null | null | null | train.py | lyth031/ptb_lm | 71f687fdf41c6b981a306269c1341ea8a8347bb6 | [
"MIT"
] | null | null | null | train.py | lyth031/ptb_lm | 71f687fdf41c6b981a306269c1341ea8a8347bb6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
import time
import input as ip
import lm
import config as cf
import numpy as np
def run_epoch(session, model, eval_op=None, verbose=False):
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {"cost": model.cost, "final_state": model.final_state}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.inputs.num_slice):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.inputs.slice_size
if verbose and step % (model.inputs.num_slice // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.inputs.num_slice, np.exp(costs / iters),
iters * model.inputs.batch_size /
(time.time() - start_time)))
return np.exp(costs / iters)
def main(_):
config = cf.Config()
eval_config = cf.Config()
eval_config.batch_size = 1
eval_config.num_steps = 1
train_data, valid_data, test_data, _ = ip.get_raw_data(config.data_path)
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = ip.Input(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = lm.LMModel(is_training=True, config=config, inputs=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Training Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = ip.Input(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = lm.LMModel(is_training=False, config=config, inputs=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = ip.Input(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = lm.LMModel(is_training=False, config=eval_config, inputs=test_input)
sv = tf.train.Supervisor(logdir=config.save_path)
config_proto = tf.ConfigProto(allow_soft_placement=False)
with sv.managed_session(config=config_proto) as session:
for i in range(config.max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.lr_const_epoch, 0)
m.update_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.optim, verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
print("Saving model to %s." % config.save_path)
sv.saver.save(session, config.save_path, global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run()
| 40.043956 | 92 | 0.621844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.105653 |
2d9c04e64991b8b9420152f31a373fa2bb0db9b8 | 9,312 | py | Python | PICdecompression.py | lukestaniscia/PIC | 65e46bad184a17827234fc94313509c456b81986 | [
"MIT"
] | 2 | 2022-01-28T01:33:36.000Z | 2022-02-13T11:07:22.000Z | PICdecompression.py | lukestaniscia/PIC | 65e46bad184a17827234fc94313509c456b81986 | [
"MIT"
] | null | null | null | PICdecompression.py | lukestaniscia/PIC | 65e46bad184a17827234fc94313509c456b81986 | [
"MIT"
] | null | null | null | # PIC Decompressor
# By: Luke Staniscia
#import used libraries/packages
import math
from PIL import Image, ImageOps, ImageEnhance
import time
def key0(x):
return x[0]
def key1(x):
return (x[0]*maxBits + x[1])*1800 + int(x[2][2:],2)
def sph2Cart(cord, precision = 3):
az = cord[0]
elev = cord[1]
r = cord[2]
az = az*(math.pi/180)
elev = elev*(math.pi/180)
x = round(r*math.sin(elev)*math.cos(az),precision)
y = round(r*math.sin(elev)*math.sin(az),precision)
z = round(r*math.cos(elev),precision)
return [x, y, z]
def reverseTranslate(cord, precision = 3):
newCord = [0,0,0]
for i in range(3):
newCord[i] = round(cord[i] + globalCentroid[i],precision)
return newCord
def PICdecompress(path, epsilon = 0.25, returnStatistics = False, notificationFrequency = 500):
filename = ""
i = len(path) - 1
while path[i] != "/" and i > -1:
filename = path[i] + filename
i = i - 1
print("##########$$$$$$$$$$########## DECOMPRESSING " + filename + " ##########$$$$$$$$$$##########")
startTime = time.time() #start tracking decompression time
global bytesPerTenthDegree
bytesPerTenthDegree = epsilon
print("READING PARAMETER FILE")
parametersFile = open(path + "_parameters.bin", "rb")
parameterBitsRead = ""
byte = parametersFile.read(1)
while byte:
newBits = bin(byte[0])[2:]
while len(newBits) < 8:
newBits = "0" + newBits
parameterBitsRead = parameterBitsRead + newBits
byte = parametersFile.read(1)
parametersFile.close()
firstByte = True
tracker = 1
parameters = []
currentParameterBits = ""
while len(parameterBitsRead) > 0:
currentBits = parameterBitsRead[:8]
parameterBitsRead = parameterBitsRead[8:]
if currentBits[0] == "1" and firstByte == False: #new value started
print("Reading Parameter " + str(tracker + 1))
parameter = int(currentParameterBits[1:],2)
if currentParameterBits[0] == "1":
parameter = (-1)*parameter
parameters = parameters + [parameter]
currentParameterBits = currentBits[1:]
tracker = tracker + 1
else:
currentParameterBits = currentParameterBits + currentBits[1:]
firstByte = False
print("Reading Parameter " + str(tracker + 1))
parameter = int(currentParameterBits[1:],2)
if currentParameterBits[0] == "1":
parameter = (-1)*parameter
parameters = parameters + [parameter]
print("Adjusting and Assiging Parameters")
for i in range(3):
parameters[len(parameters)-1-i] = parameters[len(parameters)-1-i]/1000 #converting integer encoded values back to floats
numImages = parameters[0]
croppingParameters = parameters[1:len(parameters)-3]
global globalCentroid
globalCentroid = parameters[len(parameters)-3:len(parameters)]
print("READING IMAGE FILES")
Images = []
for k in range(numImages):
print("Reading Image " + str(k + 1) + " of " + str(numImages))
imageObject = Image.open(path + "_img_" + str(k+1) + ".png")
imageWidth = imageObject.width
imageHeight = imageObject.height
Images = Images + [[[0 for row in range(imageHeight)] for col in range(imageWidth)]]
pixels = imageObject.load()
for i in range(imageWidth):
for j in range(imageHeight):
Images[k][i][j] = pixels[i,j]
print("REVERSING CROPPING")
for i in range(numImages):
print("Reversing Horizontal Cropping on Image " + str(i + 1) + " of " + str(numImages))
imageHeight = len(Images[i][0])
Images[i] = [[0 for row in range(imageHeight)] for j in range(croppingParameters[2*i])] + Images[i] #adding back all black columns on the left and right sides of the image(s)
if i == 0:
imageWidth = len(Images[0])
else:
Images[i] = Images[i] + [[0 for row in range(imageHeight)] for j in range(imageWidth - len(Images[i]))]
print("Reversing Vertical Cropping on Image " + str(i + 1) + " of " + str(numImages))
for j in range(imageWidth):
Images[i][j] = [0 for row in range(croppingParameters[2*i+1])] + Images[i][j] + [0 for row in range(math.floor(3600*bytesPerTenthDegree - imageHeight - croppingParameters[2*i+1]))] #adding back all black rows on the top and bottom sides of the image(s)
print("EXTRACTING DATA FROM IMAGE")
tracker = 0
queue = []
global maxBits
maxBits = len(Images[0][0])*8
for image in Images:
for i in range(len(image)):
if (i + 1) % notificationFrequency == 0:
print("Extracting Data from Image " + str(tracker + 1) + " of " + str(numImages) + "; Column " + str(i + 1) + " of " + str(len(image)))
data = ""
for j in range(len(image[0])):
newBits = bin(image[i][j])[2:]
while len(newBits) < 8:
newBits = "0" + newBits
data = data + newBits
j = 0
while len(data) > 0:
if data[0] == "0":
data = data[1:]
j = j + 1
else:
if data[1] == "0": #short data point
queue = queue + [[i, j, data[0:2 + math.ceil(math.log(1801,2))]]]
data = data[2 + math.ceil(math.log(1801,2)):]
j = j + 2 + math.ceil(math.log(1801,2))
else: #long data point
queue = queue + [[i, j, data[0:2 + math.ceil(math.log(1801,2)) + math.ceil(math.log(maxBits,2))]]]
data = data[2 + math.ceil(math.log(1801,2)) + math.ceil(math.log(maxBits,2)):]
j = j + 2 + math.ceil(math.log(1801,2)) + math.ceil(math.log(maxBits,2))
tracker = tracker + 1
print("ADJUSTING EXTRACTED DATA")
tracker = 0
for data in queue:
if (tracker + 1) % notificationFrequency == 0:
print("Adjusting Data Point " + str(tracker + 1) + " of " + str(len(queue)))
if data[2][1] == "0": #short data point
data[1] = math.trunc(data[1] - data[1] % (bytesPerTenthDegree*8)) #small adjustment to primary intended position
else: #long data point
data[1] = math.trunc((data[1] - int(data[2][2:2 + math.ceil(math.log(maxBits,2))],2) - bytesPerTenthDegree*8) % maxBits) #adjust to primary intented position
data[2] = data[2][0] + "0" + data[2][2 + math.ceil(math.log(maxBits,2)):] #remove pointer
tracker = tracker + 1
queue.sort(key = key1) #sort queue so data points are in a standardized order
print("Reversing Transformations")
tracker = 0
maxLengthCartCords = [0, 0, 0]
adjustedQueue = []
for data in queue:
if (tracker + 1) % notificationFrequency == 0:
print("Writing Data Point " + str(tracker + 1) + " of " + str(len(queue)))
r = round(data[0]/10,1)
elev = round(int(data[2][2:],2)/10,1) #first two bits are encoding data
az = round(data[1]/(8*10*bytesPerTenthDegree),1)
cartCords = reverseTranslate(sph2Cart([az,elev,r]))
for i in range(3):
cord = str(round(cartCords[i],3))
while len(cord.split(".")[1]) < 3:
cord = cord + "0"
cartCords[i] = cord
if len(cord) > maxLengthCartCords[i]:
maxLengthCartCords[i] = len(cord)
adjustedQueue = adjustedQueue + [cartCords]
tracker = tracker + 1
queue = adjustedQueue
print("WRITING DECOMPRESSED FILE")
print("Recombining Metadata and Coordinates")
metaFile = open(path + "_meta.txt", "r")
firstMetaLine = metaFile.readline()
if firstMetaLine[:5] == "data_":
filenameExtension = ".cif"
else:
filenameExtension = ".pdb"
tracker = 0
i = 0
writeQueue = [firstMetaLine]
atomQueue = []
if filenameExtension == ".pdb":
for entry in metaFile:
if (tracker + 1) % notificationFrequency == 0:
print("Recombining Line #" + str(tracker + 1))
if entry[:4] != "ATOM" and entry[:6] != "HETATM":
writeQueue = writeQueue + [entry]
else:
writeQueue = writeQueue + [""]
cartCords = queue[i]
i = i + 1
for j in range(3):
while len(cartCords[j]) < 8:
cartCords[j] = " " + cartCords[j]
s = entry[:30]
for j in range(3):
s = s + cartCords[j]
s = s + entry[30:]
atomQueue = atomQueue + [[int(entry[6:11]), s]]
tracker = tracker + 1
else:
for entry in metaFile:
if (tracker + 1) % notificationFrequency == 0:
print("Recombining Line #" + str(tracker + 1))
if entry[:4] != "ATOM" and entry[:6] != "HETATM":
writeQueue = writeQueue + [entry]
else:
writeQueue = writeQueue + [""]
cartCords = queue[i]
i = i + 1
for j in range(3):
while len(cartCords[j]) < maxLengthCartCords[j]:
cartCords[j] = cartCords[j] + " "
tokens = entry.split(" ")
previousToken = ""
adjustedTokens = []
for token in tokens:
if token == "":
previousToken = previousToken + " "
else:
adjustedTokens = adjustedTokens + [previousToken]
previousToken = token
tokens = adjustedTokens[1:] + [previousToken]
s = ""
for j in range(len(tokens)):
if j != 10:
s = s + tokens[j] + " "
else:
for k in range(3):
s = s + cartCords[k] + " "
s = s + tokens[10] + " "
s = s[:len(s) - 1]
atomQueue = atomQueue + [[int(tokens[1]), s]]
tracker = tracker + 1
atomQueue.sort(key = key0)
print("Writing Decompressed File")
tracker = 0
i = 0
decompressedFile = open(path + '_decompressed' + filenameExtension,'w')
for entry in writeQueue:
if (tracker + 1) % notificationFrequency == 0:
print("Writing Line " + str(tracker + 1) + " of " + str(len(writeQueue)))
if entry == "":
decompressedFile.write(atomQueue[i][1])
i = i + 1
else:
decompressedFile.write(entry)
tracker = tracker + 1
decompressedFile.close()
endTime = time.time() #stop tracking decompression time
if returnStatistics == True:
print("COMPUTING DECOMPRESSION TIME")
decompressionTime = endTime - startTime
return [numImages, decompressionTime]
| 34.361624 | 255 | 0.641753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,500 | 0.161082 |
2d9ca014f6148bba68bdfa5b4c7582e543d8a366 | 320 | py | Python | FlyBIDS/utils.py | PennLINC/FlyBIDS | 0b44d624c75f537c668d75664c239c51100bdf8d | [
"MIT"
] | null | null | null | FlyBIDS/utils.py | PennLINC/FlyBIDS | 0b44d624c75f537c668d75664c239c51100bdf8d | [
"MIT"
] | null | null | null | FlyBIDS/utils.py | PennLINC/FlyBIDS | 0b44d624c75f537c668d75664c239c51100bdf8d | [
"MIT"
] | 1 | 2021-11-25T21:33:13.000Z | 2021-11-25T21:33:13.000Z | import re
def get_nested(dct, *keys):
for key in keys:
try:
dct = dct[key]
except (KeyError, TypeError):
return None
return dct
def extract(string, pattern):
found = re.search(pattern, string)
if found:
return found.group(0)
else:
return ''
| 17.777778 | 38 | 0.553125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.00625 |
2d9d64a82c22c23125bda210680688498f197ef6 | 16,493 | py | Python | bayes_optim/acquisition_optim/one_plus_one_cma_es.py | zdanial/Bayesian-Optimization | a4779e992da15d21fa3fc425293cfb1f2621f81f | [
"BSD-3-Clause"
] | null | null | null | bayes_optim/acquisition_optim/one_plus_one_cma_es.py | zdanial/Bayesian-Optimization | a4779e992da15d21fa3fc425293cfb1f2621f81f | [
"BSD-3-Clause"
] | null | null | null | bayes_optim/acquisition_optim/one_plus_one_cma_es.py | zdanial/Bayesian-Optimization | a4779e992da15d21fa3fc425293cfb1f2621f81f | [
"BSD-3-Clause"
] | null | null | null | import logging
from copy import copy
from typing import Callable, Dict, List, Union
import numpy as np
from scipy.linalg import solve_triangular
from ..search_space import RealSpace, SearchSpace
from ..utils import dynamic_penalty, get_logger, handle_box_constraint, set_bounds
Vector = List[float]
Matrix = List[Vector]
__authors__ = ["Hao Wang"]
class OnePlusOne_CMA(object):
"""(1+1)-CMA-ES"""
def __init__(
self,
search_space: SearchSpace,
obj_fun: Callable,
args: Dict = None,
h: Callable = None,
g: Callable = None,
x0: Union[str, Vector, np.ndarray] = None,
sigma0: Union[float] = None,
C0: Union[Matrix, np.ndarray] = None,
ftarget: Union[int, float] = None,
max_FEs: Union[int, str] = np.inf,
minimize: bool = True,
n_restart: int = 0,
xtol: float = 1e-4,
ftol: float = 1e-4,
verbose: bool = False,
log_file: str = None,
random_seed: int = 42,
**kwargs,
):
"""Hereafter, we use the following customized
types to describe the usage:
- Vector = List[float]
- Matrix = List[Vector]
Parameters
----------
dim : int
Dimensionality of the search space.
obj_fun : Callable
The objective function to be minimized.
args: Tuple
The extra parameters passed to function `obj_fun`.
h : Callable, optional
The equality constraint function, by default None.
g : Callable, optional
The inequality constraint function, by default None.
x0 : Union[str, Vector, np.ndarray], optional
The initial guess (by default None) which must fall between lower
and upper bounds, if non-infinite values are provided for `lb` and
`ub`. Note that, `x0` must be provided when `lb` and `ub` both
take infinite values.
sigma0 : Union[float], optional
The initial step size, by default None
C0 : Union[Matrix, np.ndarray], optional
The initial covariance matrix which must be positive definite,
by default None. Any non-positive definite input will be ignored.
lb : Union[float, str, Vector, np.ndarray], optional
The lower bound of search variables. When it is not a `float`,
it must have the same length as `upper`, by default `-np.inf`.
ub : Union[float, str, Vector, np.ndarray], optional
The upper bound of search variables. When it is not a `float`,
it must have the same length as `lower`, by default `np.inf`.
ftarget : Union[int, float], optional
The target value to hit, by default None.
max_FEs : Union[int, str], optional
Maximal number of function evaluations to make, by default `np.inf`.
minimize : bool, optional
To minimize or maximize, by default True.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence, by default 1e-4.
ftol : float, optional
Absolute error in func(xopt) between iterations that is acceptable
for convergence, by default 1e-4.
n_restart : int, optional
The maximal number of random restarts to perform when stagnation is
detected during the run. The random restart can be switched off by
setting `n_restart` to zero (the default value).
verbose : bool, optional
Verbosity of the output, by default False.
logger : str, optional
Name of the logger file, by default None, which turns off the
logging behaviour.
random_seed : int, optional
The seed for pseudo-random number generators, by default None.
"""
assert isinstance(search_space, RealSpace)
lb, ub = list(zip(*search_space.bounds))
self.search_space = search_space
self.dim: int = search_space.dim
self.obj_fun: Callable = obj_fun
self.h: Callable = h
self.g: Callable = g
self.minimize: bool = minimize
self.ftarget: float = ftarget
self.lb: np.ndarray = set_bounds(lb, self.dim)
self.ub: np.ndarray = set_bounds(ub, self.dim)
self.sigma = sigma0
self.sigma0 = self.sigma
self.args: Dict = args if args else {}
self.n_restart: int = max(0, int(n_restart))
self._restart: bool = False
self.xopt: np.ndarray = None
self.fopt: float = None
self.fopt_penalized: float = None
self.eval_count: int = 0
self.iter_count: int = 0
self.max_FEs: int = int(eval(max_FEs)) if isinstance(max_FEs, str) else max_FEs
self._better = (lambda a, b: a <= b) if self.minimize else (lambda a, b: a >= b)
self._init_aux_var(kwargs)
self._init_covariance(C0)
self._init_logging_var()
self.stop_dict: Dict = {}
self._exception: bool = False
self.verbose: bool = verbose
self.logger: logging.Logger = get_logger(
logger_id=self.__class__.__name__, file=log_file, console=verbose
)
self.random_seed = random_seed
# parameters for stopping criteria
# NOTE: `self._delta_f = self.ftol / self._w ** (5 * self.dim)`
# and `self._w = 0.9` lead to a tolerance of
# ~`5 * self.dim` iterations of stagnation.
self.xtol: float = xtol
self.ftol: float = ftol
self._w: float = 0.9
self._delta_x: float = self.xtol / self._w ** (5 * self.dim)
self._delta_f: float = self.ftol / self._w ** (5 * self.dim)
self._stop: bool = False
# set the initial search point
self.x = x0
def _init_aux_var(self, opts):
self.prob_target = opts["p_succ_target"] if "p_succ_target" in opts else 2 / 11
self.threshold = opts["p_threshold"] if "p_threshold" in opts else 0.44
self.d = opts["d"] if "d" in opts else 1 + self.dim / 2
self.ccov = opts["ccov"] if "ccov" in opts else 2 / (self.dim ** 2 + 6)
self.cp = opts["cp"] if "cp" in opts else 1 / 12
self.cc = opts["cc"] if "cc" in opts else 2 / (self.dim + 2)
self.success_rate: float = self.prob_target
self.pc: np.ndarray = np.zeros(self.dim)
self._coeff: float = self.cc * (2 - self.cc)
def _init_covariance(self, C):
if C is None:
self._C = np.eye(self.dim)
self._A = np.eye(self.dim)
else:
self.C = C
def _init_logging_var(self):
# parameters for logging the history
self.hist_fopt: List = []
self.hist_fopt_penalized: List = []
self.hist_xopt: List = []
self._hist_delta_x: List = []
self._hist_delta_f: List = []
@property
def random_seed(self):
return self._random_seed
@random_seed.setter
def random_seed(self, seed):
if seed:
self._random_seed = int(seed)
if self._random_seed:
np.random.seed(self._random_seed)
@property
def C(self):
return self._C
@C.setter
def C(self, C):
if C is not None:
try:
A = np.linalg.cholesky(C)
if np.all(np.isreal(A)):
# TODO: `_A` should be a private attribute
self._A = A
self._C = C
except np.linalg.LinAlgError:
pass
@property
def x(self):
return self._x
@x.setter
def x(self, x):
if x is not None:
x = eval(x) if isinstance(x, str) else x
x = np.asarray(x)
assert np.all(x - self.lb >= 0)
assert np.all(x - self.ub <= 0)
else:
# sample `x` u.a.r. in `[lb, ub]`
assert all(~np.isinf(self.lb)) & all(~np.isinf(self.ub))
x = (self.ub - self.lb) * np.random.rand(self.dim) + self.lb
self._x = x
y = self.evaluate(x)
penalty = self.penalize(x)
self.tell(x, y, penalty)
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, sigma):
if sigma is None:
assert all(~np.isinf(self.lb)) & all(~np.isinf(self.ub))
sigma = np.max(self.ub - self.lb) / 5
assert sigma > 0
self._sigma = sigma
def run(self):
while not self._stop:
self.step()
return self.xopt, self.fopt, self.stop_dict
def step(self):
x = self.ask()
y = self.evaluate(x)
self.tell(x, y, self.penalize(x))
self.logging()
self.check_stop()
self.restart()
def penalize(self, x: np.ndarray):
"""Calculate the dynamic penalty once the constraint functions are provided
Parameters
----------
x : np.ndarray
the trial point to check against the constraints
"""
return dynamic_penalty(x, self.iter_count + 1, self.h, self.g, minimize=self.minimize)
def evaluate(self, x: np.ndarray) -> np.ndarray:
self.eval_count += 1
if isinstance(self.args, (list, tuple)):
fval = self.obj_fun(x, *self.args)
elif isinstance(self.args, dict):
fval = self.obj_fun(x, **self.args)
return fval
def restart(self):
if self._restart:
self.logger.info("restarting... ")
self.x = None
self.sigma = self.sigma0
self.pc = np.zeros(self.dim)
self._C = np.eye(self.dim)
self._A = np.eye(self.dim)
self._delta_x = self.xtol * 200
self._delta_f = self.ftol * 200
self.stop_dict = {}
self.n_restart -= 1
def ask(self) -> np.ndarray:
"""The mutation operator
Parameters
----------
n_point : int, optional
The number of mutants, which is always 1. This argument is only
meant to keep the function interface consistant.
Returns
-------
np.ndarray
The mutation vector
"""
z = np.random.randn(self.dim).dot(self._A.T)
x = self._x + self.sigma * z
x = handle_box_constraint(x, self.lb, self.ub)
# rounding if a coarser numerical precision is provided
x = self.search_space.round(x).ravel()
# NOTE: experimental correction to the step-size when the box constraints are violated
# self.sigma = np.min(np.abs((x - self._x) / z))
return x
def tell(self, x: np.ndarray, y: np.ndarray, penalty: float = 0):
if self._stop:
self.logger.info("The optimizer is stopped and `tell` should not be called.")
return
# TODO: this might not be uncessary
if hasattr(y, "__iter__"):
y = y[0]
if hasattr(penalty, "__iter__"):
penalty = penalty[0]
y_penalized = y + penalty
if self.xopt is None:
self.fopt = y
self.fopt_penalized = y_penalized
self.xopt = x
return
success = self._better(y_penalized, self.fopt_penalized)
z = (x - self._x) / self._sigma
self._update_step_size(success)
self._delta_f *= self._w
self._delta_x *= self._w
if success:
self._delta_f += (1 - self._w) * abs(self.fopt_penalized - y_penalized)
self._delta_x += (1 - self._w) * np.sqrt(sum((self._x - x) ** 2))
self.fopt_penalized = y_penalized
self._x = copy(x)
self._update_covariance(z)
if success and penalty == 0:
self.xopt = copy(self._x)
self.fopt = y
self._handle_exception()
self.iter_count += 1
if self.verbose:
self.logger.info(f"iteration {self.iter_count}")
self.logger.info(f"fopt: {self.fopt}")
if self.h is not None or self.g is not None:
_penalty = (self.fopt - self.fopt_penalized) * (-1) ** self.minimize
self.logger.info(f"penalty: {_penalty[0]:.4e}")
self.logger.info(f"xopt: {self.xopt.tolist()}")
self.logger.info(f"sigma: {self._sigma}\n")
def logging(self):
self.hist_fopt += [self.fopt]
self.hist_xopt += [self.xopt.tolist()]
self.hist_fopt_penalized += [self.fopt_penalized]
def check_stop(self):
if self.ftarget is not None and self._better(self.fopt, self.ftarget):
self.stop_dict["ftarget"] = self.fopt
if self.eval_count >= self.max_FEs:
self.stop_dict["FEs"] = self.eval_count
# TODO: add this as an option: lower and upper bounds for regular sigmas
if self.sigma < 1e-8 or self.sigma > 1e8:
self.stop_dict["sigma"] = self.sigma
if self._delta_f < self.ftol:
self.stop_dict["ftol"] = self._delta_f
if self._delta_x < self.xtol:
self.stop_dict["xtol"] = self._delta_x
if "ftarget" in self.stop_dict or "FEs" in self.stop_dict:
self._stop = True
else:
if self.n_restart > 0:
self._restart = bool(self.stop_dict)
else:
self._stop = bool(self.stop_dict)
def _update_covariance(self, z):
if self.success_rate < self.threshold:
self.pc = (1 - self.cc) * self.pc + np.sqrt(self._coeff) * z
self._C = (1 - self.ccov) * self._C + self.ccov * np.outer(self.pc, self.pc)
else:
self.pc = (1 - self.cc) * self.pc
self._C = (1 - self.ccov * (1 - self._coeff)) * self._C + self.ccov * np.outer(self.pc, self.pc)
self._C = np.triu(self._C) + np.triu(self._C, 1).T
self._update_A(self._C)
def _update_step_size(self, success):
prob_target = self.prob_target
self.success_rate = (1 - self.cp) * self.success_rate + self.cp * success
self._sigma *= np.exp((self.success_rate - prob_target) / (1 - prob_target) / self.d)
def _update_A(self, C):
if np.any(np.isinf(C)):
self._exception = True
else:
try:
A = np.linalg.cholesky(C)
if np.any(~np.isreal(A)):
self._exception = True
else:
self._A = A
except np.linalg.LinAlgError:
self._exception = True
def _handle_exception(self):
if self._sigma < 1e-8 or self._sigma > 1e8:
self._exception = 1
if self._exception:
self._C = np.eye(self.dim)
self.pc = np.zeros(self.dim)
self._A = np.eye(self.dim)
self._sigma = self.sigma0
self._exception = False
class OnePlusOne_Cholesky_CMA(OnePlusOne_CMA):
"""(1+1)-Cholesky-CMA-ES improves its base class algorithm by taking advantage of
Cholesky's decomposition to update the covariance, which is computationally cheaper
"""
def _init_covariance(self, C):
reset = False
if C is not None:
try:
A = np.linalg.cholesky(C)
if np.any(~np.isreal(A)):
reset = True
else:
self.A = A
except np.linalg.LinAlgError:
reset = True
if C is None or reset:
self.A = np.eye(self.dim)
@property
def A(self):
return self._A
@A.setter
def A(self, A):
assert np.all(np.triu(A, k=1).ravel() == 0)
self._A = A
self._A_inv = solve_triangular(A, np.eye(self.dim), lower=True)
def _update_covariance(self, z):
cb = self.ccov
if self.success_rate < self.threshold:
self.pc = (1 - self.cc) * self.pc + np.sqrt(self._coeff) * z
ca = 1 - self.ccov
else:
self.pc = (1 - self.cc) * self.pc
ca = (1 - self.ccov) + self.ccov * self.cc * (2 - self.cc)
w = self.pc.dot(self._A_inv.T)
w_ = w.dot(self._A_inv)
L = np.sum(w ** 2)
self._A += (np.sqrt(1 + L * cb / ca) - 1) / L * np.outer(self.pc, w)
self._A *= np.sqrt(ca)
self._A_inv -= (1 - 1 / np.sqrt(1 + L * cb / ca)) / L * np.outer(w, w_)
self._A_inv *= 1 / np.sqrt(ca)
| 35.166311 | 108 | 0.560056 | 16,135 | 0.978294 | 0 | 0 | 1,771 | 0.107379 | 0 | 0 | 4,587 | 0.278118 |
2d9e596053cbf26c358f0b0d5fec8cd4ccabf164 | 114 | py | Python | app/models.py | emmapraise/tweekners | c4c1af7bbf606d1e1b031482ebe9bed8fb62292a | [
"MIT"
] | 1 | 2021-04-30T15:55:00.000Z | 2021-04-30T15:55:00.000Z | app/models.py | emmapraise/tweekners | c4c1af7bbf606d1e1b031482ebe9bed8fb62292a | [
"MIT"
] | null | null | null | app/models.py | emmapraise/tweekners | c4c1af7bbf606d1e1b031482ebe9bed8fb62292a | [
"MIT"
] | null | null | null | from . import db
class User(db.Model):
""" Data Model for User Account"""
__tablename__ = 'Users'
| 22.8 | 38 | 0.605263 | 88 | 0.77193 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.359649 |
2da001704782c80e590fd5c245e888c659cc6581 | 4,526 | py | Python | tests/tests/test_provides_depends.py | NilsOlavKJohansen/integration | f6f70ad03e2518d76b90ebbfe37ff309a3c442e7 | [
"Apache-2.0"
] | null | null | null | tests/tests/test_provides_depends.py | NilsOlavKJohansen/integration | f6f70ad03e2518d76b90ebbfe37ff309a3c442e7 | [
"Apache-2.0"
] | 66 | 2021-02-01T06:47:58.000Z | 2022-02-28T16:03:48.000Z | tests/tests/test_provides_depends.py | NilsOlavKJohansen/integration | f6f70ad03e2518d76b90ebbfe37ff309a3c442e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import time
from .. import conftest
from ..common_setup import enterprise_no_client
from .common_update import update_image, common_update_procedure
from .mendertesting import MenderTesting
from ..MenderAPI import auth, devauth, deploy, image, logger
from testutils.infra.device import MenderDevice
class TestProvidesDependsEnterprise(MenderTesting):
def test_update_provides_depends(self, enterprise_no_client):
"""
Perform two consecutive updates, the first adds virtual provides
to the artifact and the second artifact depends on these provides.
"""
# Create tenant user
auth.reset_auth_token()
auth.new_tenant("admin", "bob@builder.org", "secret-service", "enterprise")
token = auth.current_tenant["tenant_token"]
# Create client setup with tenant token
enterprise_no_client.new_tenant_docker_client("mender-client", token)
mender_device = MenderDevice(enterprise_no_client.get_mender_clients()[0])
host_ip = enterprise_no_client.get_virtual_network_host_ip()
# Wait for ssh to be open
mender_device.ssh_is_opened()
# Check that the device has authorized with the backend.
devauth.get_devices(expected_devices=1)
devauth.accept_devices(1)
assert len(devauth.get_devices_status("accepted")) == 1
# Update client with and artifact with custom provides
def prepare_provides_artifact(artifact_file, artifact_id):
cmd = (
# Package tests folder in the artifact, just a random folder.
"directory-artifact-gen -o %s -n %s -t docker-client -d /tmp/test_file_update_module tests -- --provides rootfs-image.directory.foo:bar"
% (artifact_file, artifact_id)
)
logger.info("Executing: " + cmd)
subprocess.check_call(cmd, shell=True)
return artifact_file
deployment_id, _ = common_update_procedure(
make_artifact=prepare_provides_artifact,
# We use verify_status=False, because update module updates are so
# quick that it sometimes races past the 'inprogress' status without
# the test framework having time to register it. That's not really
# the part we're interested in though, so just skip it.
verify_status=False,
)
deploy.check_expected_status("finished", deployment_id)
# Issue another update which depends on the custom provides
def prepare_depends_artifact(artifact_file, artifact_id):
cmd = (
# Package tests folder in the artifact, just a random folder.
"directory-artifact-gen -o %s -n %s -t docker-client -d /tmp/test_file_update_module tests -- --depends rootfs-image.directory.foo:bar"
% (artifact_file, artifact_id)
)
logger.info("Executing: " + cmd)
subprocess.check_call(cmd, shell=True)
return artifact_file
deployment_id, _ = common_update_procedure(
make_artifact=prepare_depends_artifact, verify_status=False,
)
deploy.check_expected_status("finished", deployment_id)
# Issue a third update with the same update as previous, this time
# with insufficient provides -> no artifact status
deployment_id, _ = common_update_procedure(
make_artifact=prepare_depends_artifact, verify_status=False
)
# Retry for at most 60 seconds checking for deployment status update
stat = None
noartifact = 0
for i in range(60):
time.sleep(1)
stat = deploy.get_statistics(deployment_id)
if stat.get("noartifact") == 1:
noartifact = 1
break
assert stat is not None
assert noartifact == 1
| 42.299065 | 152 | 0.670349 | 3,578 | 0.790544 | 0 | 0 | 0 | 0 | 0 | 0 | 2,008 | 0.443659 |
2da05b7bb91fd333642597816cb7392931c551a1 | 276 | py | Python | tests/_support/docstrings.py | techtonik/invoke | f776d27b3cac96f98dd5ad9d1bb3b41a3e54346a | [
"BSD-2-Clause"
] | null | null | null | tests/_support/docstrings.py | techtonik/invoke | f776d27b3cac96f98dd5ad9d1bb3b41a3e54346a | [
"BSD-2-Clause"
] | null | null | null | tests/_support/docstrings.py | techtonik/invoke | f776d27b3cac96f98dd5ad9d1bb3b41a3e54346a | [
"BSD-2-Clause"
] | null | null | null | from invoke import task
@task
def no_docstring():
pass
@task
def one_line():
"""foo
"""
@task
def two_lines():
"""foo
bar
"""
@task
def leading_whitespace():
"""
foo
"""
@task(aliases=('a', 'b'))
def with_aliases():
"""foo
"""
| 9.857143 | 25 | 0.514493 | 0 | 0 | 0 | 0 | 242 | 0.876812 | 0 | 0 | 75 | 0.271739 |
2da1a492db1b897c4bb9696be1e2b24cd6f6083c | 280 | py | Python | love_release/utils.py | toxinu/pylove-release | 92d879cbaa7cd6e5adfe21151617377bde7ba358 | [
"Zlib"
] | 1 | 2017-02-26T10:50:03.000Z | 2017-02-26T10:50:03.000Z | love_release/utils.py | toxinu/pylove-release | 92d879cbaa7cd6e5adfe21151617377bde7ba358 | [
"Zlib"
] | 1 | 2020-12-09T08:15:16.000Z | 2020-12-09T08:15:16.000Z | love_release/utils.py | toxinu/pylove-release | 92d879cbaa7cd6e5adfe21151617377bde7ba358 | [
"Zlib"
] | null | null | null | from subprocess import PIPE
from subprocess import Popen
def run(command):
assert (isinstance(command, list)), "Command must be a list"
p = Popen(command, stdout=PIPE, stderr=PIPE)
s, e = p.communicate()
return s.decode('utf-8'), e.decode('utf-8'), p.returncode
| 28 | 64 | 0.689286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.135714 |
2da324e1630916481c6ac8f3476873373cb7e770 | 12,748 | py | Python | gender_converter/model/loss.py | roebel/DeepGC | 03eee63ff9d9f4daa34435ddca530b262f097ea6 | [
"MIT"
] | null | null | null | gender_converter/model/loss.py | roebel/DeepGC | 03eee63ff9d9f4daa34435ddca530b262f097ea6 | [
"MIT"
] | 1 | 2021-08-11T06:41:56.000Z | 2021-08-11T06:41:56.000Z | gender_converter/model/loss.py | roebel/DeepGC | 03eee63ff9d9f4daa34435ddca530b262f097ea6 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers import Layer
from debugprint import print_debug
from .utils import get_mask_from_lengths
class ParrotLoss():
def __init__(self, hparams):
super(ParrotLoss, self).__init__()
self.hidden_dim = hparams.encoder_embedding_dim
self.mel_hidden_dim = hparams.mel_embedding_dim
self.contr_w = hparams.contrastive_loss_w
self.spenc_w = hparams.speaker_encoder_loss_w
self.texcl_w = hparams.text_classifier_loss_w
self.spadv_w = hparams.speaker_adversial_loss_w
self.spcla_w = hparams.speaker_classifier_loss_w
self.n_symbols = hparams.n_symbols
self.fine_tune = hparams.fine_tune
# speaker classif logit from mel hidden can be at text rate or at frame rate
self.spksclassif_at_mel_rate = hparams.spksclassif_at_mel_rate
if 'speaker_adversial_loss_type' in hparams:
# adevrsarial loss measures (l2, l1, KL))
self.speaker_adversial_loss_type = hparams.speaker_adversial_loss_type
else:
# default (from the original paper/code)
self.speaker_adversial_loss_type = 'l2'
print_debug('spk adv loss type: ' + self.speaker_adversial_loss_type)
def contrastive_loss(self, text_hidden, mel_hidden, mel_lengths, eps=1e-5):
"""
Zhang's basic constrastive loss
"""
# ### CONTRASTIVE LOSS
n_frames = mel_hidden.shape[1] # n_frames = T
# 1) contrastive mask #
# # [B, T] -> [B, T, T] (tile)
contrast_mask1 = tf.tile(tf.expand_dims(get_mask_from_lengths(mel_lengths), axis=2), [1, 1, n_frames])
# # [B, T] -> [B, T, T] (tile)
contrast_mask2 = tf.tile(tf.expand_dims(get_mask_from_lengths(mel_lengths), axis=1), [1, n_frames, 1])
# # [B, T, T]
contrast_mask = tf.cast(contrast_mask1 & contrast_mask2, tf.float32)
# text_hidden [B, T, emb_size]
# mel_hidden [B, T, emb_size]
text_hidden_normed = text_hidden / (tf.norm(text_hidden, axis=2, keepdims=True) + eps)
mel_hidden_normed = mel_hidden / (tf.norm(mel_hidden, axis=2, keepdims=True) + eps)
# (x - y) ** 2 = x ** 2 + y ** 2 - 2xy
# [batch_size, T, 1]
distance_matrix_xx = tf.reduce_sum(text_hidden_normed ** 2, axis=2, keepdims=True)
distance_matrix_yy = tf.reduce_sum(mel_hidden_normed ** 2, axis=2)
# [batch_size, 1, T]
distance_matrix_yy = tf.expand_dims(distance_matrix_yy, axis=1)
# [batch_size, T, T]
distance_matrix_xy = text_hidden_normed @ tf.transpose(mel_hidden_normed, (0, 2, 1))
# [batch_size, T, T]
distance_matrix = distance_matrix_xx + distance_matrix_yy - 2 * distance_matrix_xy
identity_mat = tf.eye(distance_matrix.shape[1])
margin = 1.
contrast_loss = identity_mat * distance_matrix + \
(1. - identity_mat) * tf.maximum(margin - distance_matrix, tf.zeros_like(distance_matrix))
contrast_loss = tf.reduce_sum(contrast_loss*contrast_mask) / tf.reduce_sum(contrast_mask)
return contrast_loss
def compute_loss(self, model_outputs, targets, speaker_target, input_text=False, eps=1e-5):
(predicted_mel, predicted_mel_post, mel_lengths, text_lengths,
speaker_logit_from_mel, speaker_logit_from_mel_hidden_text_or_mel_rate,
expand_mat_padded, text_input_padded, text_hidden, mel_hidden, mel_hidden_text_or_mel_rate,
text_logit_from_mel_hidden, text_target_text_level, mat_onehot_padded) = model_outputs
mel_target = targets
mel_mask = get_mask_from_lengths(mel_lengths)
mel_mask = tf.expand_dims(mel_mask, axis=1)
# mel_mask = tf.keras.backend.cast(tf.tile(mel_mask, [1, mel_target.shape[1], 1]), dtype='float32')
# replicate mel_mask over mel features axis
mel_mask = tf.tile(tf.keras.backend.cast(mel_mask, dtype='float32'), [1, mel_target.shape[1], 1])
# n_frames = mel_hidden.shape[1] # n_frames = T
recon_loss = tf.reduce_sum(tf.abs(mel_target-predicted_mel)*mel_mask)/tf.reduce_sum(mel_mask)
recon_post_loss = tf.reduce_sum(tf.abs(mel_target-predicted_mel_post)*mel_mask)/tf.reduce_sum(mel_mask)
# contrastive loss
contrast_loss = self.contrastive_loss(text_hidden, mel_hidden, mel_lengths, eps)
if not self.fine_tune:
# speaker classification loss from mel speaker space, at text frame rate
# speaker_logit_from_mel_int = tf.cast(speaker_logit_from_mel, tf.int16)
speaker_encoder_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\
(speaker_target, speaker_logit_from_mel)
predicted_speaker = tf.cast(tf.math.argmax(speaker_logit_from_mel, axis=1), dtype=tf.int16)
speaker_encoder_acc = tf.reduce_sum(tf.cast((predicted_speaker == speaker_target), tf.float32)) \
/ speaker_target.shape[0]
else:
speaker_encoder_loss = tf.convert_to_tensor(0., dtype=tf.dtypes.float32)
speaker_encoder_acc = tf.convert_to_tensor(0., dtype=tf.dtypes.float32)
if self.fine_tune:
n_speakers = 2
else:
n_speakers = speaker_logit_from_mel_hidden_text_or_mel_rate.shape[2]
n_text_frames = speaker_logit_from_mel_hidden_text_or_mel_rate.shape[1]
text_mask = get_mask_from_lengths(text_lengths)
sc_mel_mask = get_mask_from_lengths(mel_lengths) # mask for speaker classifier at mel rate
if not self.fine_tune:
text_mask = tf.expand_dims(text_mask, axis=1)
sc_mel_mask = tf.expand_dims(sc_mel_mask, axis=1)
text_mask_float = tf.keras.backend.cast(text_mask, dtype='float32')
sc_mel_mask_float = tf.keras.backend.cast(sc_mel_mask, dtype='float32')
# # speaker classification losses
# # fader losses
# speaker classification loss from mel linguistic space
if self.spksclassif_at_mel_rate:
sc_mask_float = sc_mel_mask_float
else:
sc_mask_float = text_mask_float
if self.fine_tune:
# there is only 1 dimension for the speaker "code" (2 speakers!)
# these two lines change
speaker_logit_flatten = tf.keras.backend.flatten(speaker_logit_from_mel_hidden_text_or_mel_rate)
predicted_speaker = tf.cast(speaker_logit_flatten > 0., dtype=tf.int16)
speaker_target_ling = tf.tile(tf.expand_dims(speaker_target, axis=1), [1, n_text_frames])
speaker_target_flatten = tf.keras.backend.flatten(speaker_target_ling)
sc_mask_float = tf.keras.backend.flatten(sc_mask_float)
speaker_classification_acc = tf.reduce_sum(tf.cast((predicted_speaker == speaker_target_flatten),
tf.float32) * sc_mask_float) \
/ tf.reduce_sum(sc_mask_float)
# this line changes
loss = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True) \
(speaker_target_flatten, speaker_logit_flatten)
speaker_classification_loss = tf.reduce_sum(loss * sc_mask_float) / tf.reduce_sum(sc_mask_float)
# speaker adversival loss from mel hidden at frame rate
if self.speaker_adversial_loss_type == 'l2':
loss = tf.math.pow(tf.abs(tf.nn.sigmoid(speaker_logit_flatten) - 0.5), 2)
elif self.speaker_adversial_loss_type == 'l1':
loss = tf.abs(tf.nn.sigmoid(speaker_logit_flatten) - 0.5)
elif self.speaker_adversial_loss_type == 'KL':
# use inverse Kullback-Leibler divergence for 2 speakers = 2 probabilities p and 1-p
epsilon = 1e-12 # to avoid problems with log
ref_prob = 1. / n_speakers
target_prob = (1 - epsilon) * tf.nn.sigmoid(speaker_logit_flatten) + epsilon
loss = (1-target_prob)*tf.math.log((1-target_prob)/ref_prob) + target_prob*tf.math.log(target_prob/ref_prob)
speaker_adversial_loss = tf.reduce_sum(loss * sc_mask_float) / tf.reduce_sum(sc_mask_float)
else:
speaker_logit_flatten = tf.reshape(speaker_logit_from_mel_hidden_text_or_mel_rate, [-1, n_speakers])
predicted_speaker = tf.cast(tf.math.argmax(speaker_logit_flatten, axis=1), dtype=tf.int16)
speaker_target_ling = tf.tile(tf.expand_dims(speaker_target, axis=1), [1, n_text_frames])
speaker_target_flatten = tf.keras.backend.flatten(speaker_target_ling)
speaker_classification_acc = tf.reduce_sum(tf.cast((predicted_speaker == speaker_target_flatten),
tf.float32)*tf.keras.backend.flatten(sc_mask_float))\
/ tf.reduce_sum(sc_mask_float)
loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE,
from_logits=True)\
(speaker_target_flatten, speaker_logit_flatten)
speaker_classification_loss = tf.reduce_sum(loss*tf.keras.backend.flatten(sc_mask_float))\
/ tf.reduce_sum(sc_mask_float)
# speaker adversarial loss from mel hidden at frame rate
flatten_target = 1. / n_speakers # * tf.ones_like(speaker_logit_flatten)
if self.speaker_adversial_loss_type == 'l2':
loss = tf.math.pow(tf.abs(tf.nn.softmax(speaker_logit_flatten, axis=1) - flatten_target), 2)
elif self.speaker_adversial_loss_type == 'l1':
loss = tf.abs(tf.nn.softmax(speaker_logit_flatten, axis=1) - flatten_target)
elif self.speaker_adversial_loss_type == 'KL':
# use inverse Kullback-Leibler divergence
epsilon = 1e-12 # to avoid problems with log
ref_prob = 1. / n_speakers # flatten_target
target_prob = (1 - epsilon) * tf.nn.softmax(speaker_logit_flatten, axis=1) + epsilon
loss = target_prob*tf.math.log(target_prob/ref_prob)
# not sure of this (mask)
mask = tf.reshape(tf.tile(tf.transpose(sc_mask_float, (0, 2, 1)),
[1, 1, n_speakers]), [-1, n_speakers])
speaker_adversial_loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
# text classification loss
# text classification loss from mel hidden at text rate
# compress from mel rate to text rate (normalize by the phone durations)
text_logit_from_mel_hidden_text_rate = expand_mat_padded @ text_logit_from_mel_hidden
# input the actual text at phone level rather than compress from mel level!
text_logit_flatten = tf.reshape(text_logit_from_mel_hidden_text_rate, [-1, self.n_symbols])
text_target_flatten = tf.keras.backend.flatten(text_target_text_level)
predicted_text = tf.cast(tf.math.argmax(text_logit_flatten, axis=1), dtype=tf.int16)
text_classification_acc = tf.reduce_sum(tf.cast((predicted_text == text_target_flatten),
tf.float32)*tf.keras.backend.flatten(text_mask_float))\
/ tf.reduce_sum(text_mask_float)
loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE,
from_logits=True)\
(text_target_flatten, text_logit_flatten)
text_classification_loss = tf.reduce_sum(loss*tf.keras.backend.flatten(text_mask_float)) / \
tf.reduce_sum(text_mask_float)
loss_list = [recon_loss, recon_post_loss, speaker_encoder_loss, speaker_classification_loss,
speaker_adversial_loss, text_classification_loss, contrast_loss]
accuracy_list = [speaker_encoder_acc, speaker_classification_acc, text_classification_acc]
combined_loss1 = recon_loss + self.spenc_w * speaker_encoder_loss + self.spadv_w * speaker_adversial_loss + \
self.texcl_w * text_classification_loss + self.contr_w * contrast_loss + recon_post_loss
# self.contr_w * contrast_loss + \
# + self.texcl_w * text_classification_loss + \
# self.spadv_w * speaker_adversial_loss
combined_loss2 = self.spcla_w * speaker_classification_loss
return loss_list, accuracy_list, combined_loss1, combined_loss2
| 56.15859 | 124 | 0.655005 | 12,603 | 0.988626 | 0 | 0 | 0 | 0 | 0 | 0 | 1,983 | 0.155554 |
2da34ae804d338824cab481265b913a1023b155d | 2,509 | py | Python | pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/filters/markdown.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/filters/markdown.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/filters/markdown.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | """Markdown filters
This file contains a collection of utility filters for dealing with
markdown within Jinja templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import subprocess
from io import TextIOWrapper, BytesIO
try:
from .markdown_mistune import markdown2html_mistune
except ImportError as e:
# store in variable for Python 3
_mistune_import_error = e
def markdown2html_mistune(source):
"""mistune is unavailable, raise ImportError"""
raise ImportError("markdown2html requires mistune: %s" % _mistune_import_error)
from nbconvert.utils.pandoc import pandoc
from nbconvert.utils.exceptions import ConversionException
from nbconvert.utils.version import check_version
from ipython_genutils.py3compat import cast_bytes
__all__ = [
'markdown2html',
'markdown2html_pandoc',
'markdown2html_mistune',
'markdown2latex',
'markdown2rst',
]
def markdown2latex(source, markup='markdown', extra_args=None):
"""Convert a markdown string to LaTeX via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid markdown.
markup : string
Markup used by pandoc's reader
default : pandoc extended markdown
(see http://johnmacfarlane.net/pandoc/README.html#pandocs-markdown)
Returns
-------
out : string
Output as returned by pandoc.
"""
return pandoc(source, markup, 'latex', extra_args=extra_args)
def markdown2html_pandoc(source, extra_args=None):
"""Convert a markdown string to HTML via pandoc"""
extra_args = extra_args or ['--mathjax']
return pandoc(source, 'markdown', 'html', extra_args=extra_args)
# The mistune renderer is the default, because it's simple to depend on it
markdown2html = markdown2html_mistune
def markdown2rst(source, extra_args=None):
"""Convert a markdown string to ReST via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid markdown.
Returns
-------
out : string
Output as returned by pandoc.
"""
return pandoc(source, 'markdown', 'rst', extra_args=extra_args)
| 28.511364 | 87 | 0.721403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,522 | 0.606616 |
2da37e3c9de143cff5afbbd07d8f713a7806daa6 | 3,006 | py | Python | tests/test_python_simulation_function.py | tanxicccc/rsopt | 8705e937f95a4bbe6ed3fb1a04b78f724a5f3931 | [
"Apache-2.0"
] | null | null | null | tests/test_python_simulation_function.py | tanxicccc/rsopt | 8705e937f95a4bbe6ed3fb1a04b78f724a5f3931 | [
"Apache-2.0"
] | null | null | null | tests/test_python_simulation_function.py | tanxicccc/rsopt | 8705e937f95a4bbe6ed3fb1a04b78f724a5f3931 | [
"Apache-2.0"
] | null | null | null | import unittest
import sys
import inspect
from unittest import mock
import numpy as np
import rsopt.libe_tools.simulation_functions.python_simulation_functions as pyfunc
import rsopt.optimizer as opt
radiamodule = mock.MagicMock()
sys.modules["radia"] = radiamodule
from rsopt.codes.radia.sim_functions import hybrid_undulator
from test_configuration import parameters_dict, settings_dict
test_function_signature = inspect.signature(hybrid_undulator)
x_vec = [1, 2, 3, 4, 5]
H = np.array([x_vec], dtype=[('x', float)])
sim_specs = {'out': [('f', float), ('fvec', float, 4)]}
class DummyJob:
pass
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.optimizer = opt.Optimizer()
self.optimizer.set_parameters(parameters_dict)
self.optimizer.set_settings(settings_dict)
def test_class_signature(self):
dummy_job = DummyJob()
dummy_job.execute = None
pf = pyfunc.PythonFunction(dummy_job, self.optimizer._config.parameters(job=0),
self.optimizer._config.settings(job=0))
base_signature = settings_dict.copy()
pyfunc._merge_dicts(parameters_dict, base_signature)
self.assertEqual(pf.signature.keys(), base_signature.keys())
def test_x_from_H(self):
test_x = pyfunc.get_x_from_H(H)
self.assertTrue(np.all(test_x == x_vec))
def test_compose_args(self):
dummy_job = DummyJob()
dummy_job.execute = None
pf = pyfunc.PythonFunction(dummy_job, self.optimizer._config.parameters(job=0),
self.optimizer._config.settings(job=0))
_, kwargs = pf.compose_args(x_vec, pf.signature)
for base_key, base_value in zip(parameters_dict.keys(), x_vec):
self.assertEqual(kwargs[base_key], base_value)
def test_function_call_function(self):
objective = mock.MagicMock(name='hybrid_undulator',
return_value=lambda *args, **kwargs: (args, kwargs))
dummy_job = DummyJob()
dummy_job.execute = objective()
pf = pyfunc.PythonFunction(dummy_job, self.optimizer._config.parameters(job=0),
self.optimizer._config.settings(job=0))
kwargs = {key: i for i, key in enumerate(self.optimizer._config.get_parameters_list('get_parameter_names'))}
pyfunc._merge_dicts(settings_dict, kwargs, depth=1)
_, f = pf.call_function(kwargs)
self.assertEqual(f.keys(), kwargs.keys())
def test_format_evaluation(self):
dummy_job = DummyJob()
dummy_job.execute = None
pf = pyfunc.PythonFunction(dummy_job, self.optimizer._config.parameters(job=0),
self.optimizer._config.settings(job=0))
pf.sim_specs = sim_specs
result = (x_vec[0], x_vec[1:])
f = pf.format_evaluation(result)
self.assertEqual(f['f'][0], x_vec[0])
self.assertTrue(np.all(f['fvec'][0] == x_vec[1:]))
| 37.111111 | 116 | 0.659348 | 2,423 | 0.806055 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.023952 |
2da5fddf872be8bcdd6b64ff00d82f7bd32eb0ed | 10,646 | py | Python | ranker/views.py | shreyashc/firecube | 4e3da7b66398cafb54182afdf34bfe76fd160376 | [
"Apache-2.0"
] | 1 | 2020-06-23T13:04:07.000Z | 2020-06-23T13:04:07.000Z | ranker/views.py | shreyashc/firecube | 4e3da7b66398cafb54182afdf34bfe76fd160376 | [
"Apache-2.0"
] | 4 | 2021-03-30T13:41:15.000Z | 2021-06-04T23:28:51.000Z | ranker/views.py | shreyashc/firecube | 4e3da7b66398cafb54182afdf34bfe76fd160376 | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import os
import random
import re
from urllib.parse import quote
import lxml
import pafy
import requests
import youtube_dl
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
JsonResponse)
from django.shortcuts import redirect, render, reverse
from django.template.defaultfilters import filesizeformat
from django.utils.http import urlencode
from django.views.decorators.csrf import csrf_exempt
from pydub import AudioSegment
from utils import ytscrapper
def home(request):
return render(request, 'index.html')
def about(request):
return render(request, 'about.html')
def movies(request):
urlOfTrending = "https://www.imdb.com/india/released/"
requestOfTrending = requests.get(urlOfTrending)
soupOfTrending = BeautifulSoup(requestOfTrending.content, 'lxml')
rawListOfTrending = soupOfTrending.find_all(
'div', {"class": "trending-list-rank-item-data-container"})
finalTrendingList = [
{
"title": trend.text.strip().split("\n")[2]
}
for trend in rawListOfTrending
]
urlOfTrendingGlobal = "https://www.imdb.com/india/global/"
try:
requestOfTrendingGlobal = requests.get(urlOfTrendingGlobal)
except:
return HttpResponse("Server Error")
soupOfTrendingGlobal = BeautifulSoup(
requestOfTrendingGlobal.content, 'lxml')
rawListOfTrendingGlobal = soupOfTrendingGlobal.find_all(
'div', {"class": "trending-list-rank-item-data-container"})
finalTrendingListGlobal = [
{
"title": trend.text.strip().split("\n")[2]
}
for trend in rawListOfTrendingGlobal
]
context = {
'title': "Trending",
'local_list_name': "Trending Movies/Web Series (India)",
'local_list': finalTrendingList,
'global_list_name': "Trending Movies/Web Series(Global)",
'global_list': finalTrendingListGlobal,
'week': datetime.date.today(),
}
return render(request, 'trending.html', context)
def toptwohundred(request):
urlOfBB200 = "https://www.billboard.com/charts/billboard-200"
try:
requestOfBB200 = requests.get(urlOfBB200)
except:
return HttpResponse("Server error")
soupOfBB200 = BeautifulSoup(requestOfBB200.content, 'lxml')
rawListOfBB200 = soupOfBB200.find_all(
'span', {"class": "chart-element__information"})
week = soupOfBB200.find(
'button', {"class": "date-selector__button button--link"})
current_week = week.text.strip()
finalBB200List = [
{"name": song.text.strip().split(
"\n")[0], "artist":song.text.strip().split("\n")[1]}
for song in rawListOfBB200[:201]
]
context = {
'song_list': finalBB200List,
'week': current_week,
'list_name': "billboard Top 200 Songs",
'title': "Billboard 200"
}
return render(request, 'toptwohundred.html', context)
def hothundred(request):
urlOfHot100 = "https://www.billboard.com/charts/hot-100"
try:
requestOfHot100 = requests.get(urlOfHot100)
except:
return HttpResponse("server error")
soupOfHot100 = BeautifulSoup(requestOfHot100.content, 'lxml')
rawListOfHot100 = soupOfHot100.find_all(
'span', {"class": "chart-element__information"})
week = soupOfHot100.find(
'button', {"class": "date-selector__button button--link"})
current_week = week.text.strip()
finalHot100List = [
{"name": song.text.strip().split(
"\n")[0], "artist":song.text.strip().split("\n")[1]}
for song in rawListOfHot100[:201]
]
context = {
'song_list': finalHot100List,
'week': current_week,
'list_name': "billboard hot 100 Songs",
}
return render(request, 'toptwohundred.html', context)
def kannadatopfifty(request):
url_for_kannada_topfifty_request = "https://gaana.com/playlist/gaana-dj-kannada-top-20"
try:
r = requests.get(url_for_kannada_topfifty_request)
except:
return HttpResponse("Server Error")
try:
soup = BeautifulSoup(r.content, 'lxml')
except:
soup = BeautifulSoup(r.content, 'html.parser')
rawKanSongs = soup.find_all(
'div', {"class": "playlist_thumb_det"})
anchors_in_kan_songs = [
song_div.find_all('a') for song_div in rawKanSongs
]
final_kan_songs = [
get_formatted_song(anchor_tags)
for anchor_tags in anchors_in_kan_songs
]
print(final_kan_songs)
context = {
'song_list': final_kan_songs,
'list_name': "Kannada Weekly Top 50 Songs",
'week': datetime.date.today(),
'title': 'Kannada Top 50'
}
return render(request, 'toptwohundred.html', context)
def hinditopfifty(request):
url_hindi_topfifty = "https://gaana.com/playlist/gaana-dj-bollywood-top-50-1"
try:
response = requests.get(url_hindi_topfifty)
except:
return HttpResponse("Server Error")
try:
soup = BeautifulSoup(response.content, 'lxml')
except:
soup = BeautifulSoup(response.content, 'html.parser')
date = datetime.date.today()
rawHindiSongs = soup.find_all(
'div', {"class": "playlist_thumb_det"})
anchors_in_hindi_songs = [
song_div.find_all('a') for song_div in rawHindiSongs
]
final_hindi_songs = [
get_formatted_song(anchor_tags)
for anchor_tags in anchors_in_hindi_songs
]
context = {
'song_list': final_hindi_songs,
'list_name': "Hindi Weekly Top 50 Songs",
'week': datetime.date.today(),
'title': 'Hindi Top 50'
}
return render(request, 'toptwohundred.html', context)
def ytredirect(request):
video_name = str(request.GET['query'])
redirect_url = ytscrapper.getYtUrl(video_name)
if redirect_url is None:
return HttpResponse("Server Busy! Please Try again")
return HttpResponseRedirect(redirect_url)
def download_from_name(request):
video_name = str(request.GET['query'])
video_url = ytscrapper.getYtUrl(video_name)
if video_url is None:
return HttpResponse("Could Not Find Video")
redirect_url = reverse('ytdownloader') + f'?video_url={video_url}'
return redirect(redirect_url)
def youtube(request):
return render(request, 'youtube_from.html')
def ytdownloader(request):
ytApiKey = settings.YT_API_KEY
pafy.set_api_key(ytApiKey)
video_url = request.GET['video_url']
try:
video = pafy.new(video_url)
except:
context = {
'error': "invalid url"
}
return render(request, 'youtube_from.html', context)
video_audio_streams = [
{
'resolution': s.resolution.split("x")[1]+"p", # 360p,720p..
'extension': s.extension,
'file_size': filesizeformat(s.get_filesize()),
'video_url': s.url + "&title=" + video.title
}
for s in video.streams
]
audio_streams = [
{
'bitrate': s.rawbitrate // 1000, # bps -> kbps
'extension': s.extension,
'file_size': filesizeformat(s.get_filesize()),
'video_url': s.url + "&title=" + video.title
}
for s in video.audiostreams
]
context = {
'streams': video_audio_streams,
'audio_streams': audio_streams,
'meta': {
'title': video.title,
'thumb': video.bigthumbhd.replace("http://", "https://"),
'duration': video.duration,
'published': video.published,
'viewcount': video.viewcount,
'videoid': video.videoid
}
}
return render(request, 'download.html', context)
@csrf_exempt
def get_download_url(request):
ytApiKey = settings.YT_API_KEY
pafy.set_api_key(ytApiKey)
data = request.body.decode('utf-8')
req_data = json.loads(data)
videoid = req_data['videoid']
idx = int(req_data['idx'])
stream_type = req_data['stream_type']
try:
video = pafy.new(videoid)
if stream_type == 'audio-mp3':
stream = video.audiostreams[idx]
_filename = video.title + \
str(stream.rawbitrate // 1000) + "."+stream.extension
_filename = normalizeFilename(_filename)
filepath_temp = os.path.join(settings.MEDIA_ROOT, _filename)
stream.download(filepath=filepath_temp, quiet=True)
sound = AudioSegment.from_file(
os.path.join(settings.MEDIA_ROOT, _filename))
filepath_temp = os.path.join(
settings.MEDIA_ROOT, _filename.replace("."+stream.extension, ".mp3"))
sound.export(filepath_temp, format="mp3",
bitrate=str(stream.rawbitrate // 1000)+"K")
filepath_temp = "/media/" + \
_filename.replace("."+stream.extension, ".mp3")
elif stream_type == 'audio':
stream = video.audiostreams[idx]
_filename = video.title + \
str(stream.rawbitrate // 1000) + "."+stream.extension
_filename = normalizeFilename(_filename)
filepath_temp = os.path.join(settings.MEDIA_ROOT, _filename)
stream.download(filepath=filepath_temp, quiet=True)
filepath_temp = "/media/" + _filename
elif stream_type == 'video':
stream = video.streams[idx]
_filename = video.title + \
stream.resolution.split("x")[1]+"p" + "." + stream.extension
_filename = normalizeFilename(_filename)
filepath_temp = os.path.join(settings.MEDIA_ROOT, _filename)
stream.download(filepath=filepath_temp, quiet=False)
filepath_temp = "/media/" + _filename
except Exception as e:
print(e)
return JsonResponse(status=400, data={'message': "could not find video/audio"})
return JsonResponse({'filepath': filepath_temp})
def normalizeFilename(filename):
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_filename = re.sub(rstr, "", filename)
return new_filename.strip()
def get_formatted_song(anchor_tags):
formatted_song = {}
for anchor_tag in anchor_tags:
if 'song' in anchor_tag.get('href'):
formatted_song['name'] = anchor_tag.text
if 'artist' in anchor_tag.get('href'):
formatted_song['artist'] = anchor_tag.text
return formatted_song
| 29.490305 | 91 | 0.629814 | 0 | 0 | 0 | 0 | 2,237 | 0.210126 | 0 | 0 | 2,077 | 0.195097 |
2da60021013430b40d9e59ed61bc0bac8ce56e08 | 416 | py | Python | spotdl/get-file-name.py | Shaxadhere/spotdl | fc7d587a86b886fa4e020ac825d1748a7776de32 | [
"MIT"
] | 25 | 2019-02-21T09:31:56.000Z | 2022-03-13T15:36:24.000Z | spotdl/get-file-name.py | Shaxadhere/spotdl | fc7d587a86b886fa4e020ac825d1748a7776de32 | [
"MIT"
] | 2 | 2019-09-02T20:04:44.000Z | 2019-12-27T22:13:13.000Z | spotdl/get-file-name.py | Shaxadhere/spotdl | fc7d587a86b886fa4e020ac825d1748a7776de32 | [
"MIT"
] | 9 | 2019-10-01T12:44:29.000Z | 2021-03-24T10:09:03.000Z | from spotdl import handle
from spotdl import const
from spotdl import downloader
import os
import sys
const.args = handle.get_arguments(to_group=True)
track = downloader.Downloader(raw_song=const.args.song[0])
track_title = track.refine_songname(track.content.title)
track_filename = track_title + const.args.output_ext
track_download_path = os.path.join(const.args.folder, track_filename)
print(track_filename) | 26 | 69 | 0.822115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2da6673de1f45443b481635cfc357d6aa84b8b6e | 10,180 | py | Python | jax/_src/numpy/ndarray.py | tianjuchen/jax | 4755dc3fee8a33030c0571ecf4217c5656e3170d | [
"Apache-2.0"
] | null | null | null | jax/_src/numpy/ndarray.py | tianjuchen/jax | 4755dc3fee8a33030c0571ecf4217c5656e3170d | [
"Apache-2.0"
] | null | null | null | jax/_src/numpy/ndarray.py | tianjuchen/jax | 4755dc3fee8a33030c0571ecf4217c5656e3170d | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ndarray is defined as an virtual abstract base class.
import abc
from typing import Any, Optional, Tuple, Union
from jax import core
from jax.interpreters import pxla
from jax._src import device_array
import numpy as np
class ArrayMeta(abc.ABCMeta):
"""Metaclass for overriding ndarray isinstance checks."""
def __instancecheck__(self, instance):
# Allow tracer instances with avals that are instances of UnshapedArray.
# We could instead just declare Tracer an instance of the ndarray type, but
# there can be traced values that are not arrays. The main downside here is
# that isinstance(x, ndarray) might return true but
# issubclass(type(x), ndarray) might return false for an array tracer.
try:
return (hasattr(instance, "aval") and
isinstance(instance.aval, core.UnshapedArray))
except AttributeError:
super().__instancecheck__(instance)
class ndarray(metaclass=ArrayMeta):
dtype: np.dtype
ndim: int
shape: Tuple[int, ...]
size: int
def __init__(self, shape, dtype=None, buffer=None, offset=0, strides=None,
order=None):
raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly."
" Use jax.numpy.array, or jax.numpy.zeros instead.")
@abc.abstractmethod
def __getitem__(self, key, indices_are_sorted=False,
unique_indices=False) -> Any: ...
@abc.abstractmethod
def __setitem__(self, key, value) -> Any: ...
@abc.abstractmethod
def __len__(self) -> Any: ...
@abc.abstractmethod
def __iter__(self) -> Any: ...
@abc.abstractmethod
def __reversed__(self) -> Any: ...
# Comparisons
@abc.abstractmethod
def __lt__(self, other) -> Any: ...
@abc.abstractmethod
def __le__(self, other) -> Any: ...
@abc.abstractmethod
def __eq__(self, other) -> Any: ...
@abc.abstractmethod
def __ne__(self, other) -> Any: ...
@abc.abstractmethod
def __gt__(self, other) -> Any: ...
@abc.abstractmethod
def __ge__(self, other) -> Any: ...
# Unary arithmetic
@abc.abstractmethod
def __neg__(self) -> Any: ...
@abc.abstractmethod
def __pos__(self) -> Any: ...
@abc.abstractmethod
def __abs__(self) -> Any: ...
@abc.abstractmethod
def __invert__(self) -> Any: ...
# Binary arithmetic
@abc.abstractmethod
def __add__(self, other) -> Any: ...
@abc.abstractmethod
def __sub__(self, other) -> Any: ...
@abc.abstractmethod
def __mul__(self, other) -> Any: ...
@abc.abstractmethod
def __matmul__(self, other) -> Any: ...
@abc.abstractmethod
def __truediv__(self, other) -> Any: ...
@abc.abstractmethod
def __floordiv__(self, other) -> Any: ...
@abc.abstractmethod
def __mod__(self, other) -> Any: ...
@abc.abstractmethod
def __divmod__(self, other) -> Any: ...
@abc.abstractmethod
def __pow__(self, other) -> Any: ...
@abc.abstractmethod
def __lshift__(self, other) -> Any: ...
@abc.abstractmethod
def __rshift__(self, other) -> Any: ...
@abc.abstractmethod
def __and__(self, other) -> Any: ...
@abc.abstractmethod
def __xor__(self, other) -> Any: ...
@abc.abstractmethod
def __or__(self, other) -> Any: ...
@abc.abstractmethod
def __radd__(self, other) -> Any: ...
@abc.abstractmethod
def __rsub__(self, other) -> Any: ...
@abc.abstractmethod
def __rmul__(self, other) -> Any: ...
@abc.abstractmethod
def __rmatmul__(self, other) -> Any: ...
@abc.abstractmethod
def __rtruediv__(self, other) -> Any: ...
@abc.abstractmethod
def __rfloordiv__(self, other) -> Any: ...
@abc.abstractmethod
def __rmod__(self, other) -> Any: ...
@abc.abstractmethod
def __rdivmod__(self, other) -> Any: ...
@abc.abstractmethod
def __rpow__(self, other) -> Any: ...
@abc.abstractmethod
def __rlshift__(self, other) -> Any: ...
@abc.abstractmethod
def __rrshift__(self, other) -> Any: ...
@abc.abstractmethod
def __rand__(self, other) -> Any: ...
@abc.abstractmethod
def __rxor__(self, other) -> Any: ...
@abc.abstractmethod
def __ror__(self, other) -> Any: ...
@abc.abstractmethod
def __bool__(self) -> Any: ...
@abc.abstractmethod
def __complex__(self) -> Any: ...
@abc.abstractmethod
def __int__(self) -> Any: ...
@abc.abstractmethod
def __float__(self) -> Any: ...
@abc.abstractmethod
def __round__(self, ndigits=None) -> Any: ...
@abc.abstractmethod
def __index__(self) -> Any: ...
# np.ndarray methods:
@abc.abstractmethod
def all(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,
keepdims=None) -> Any: ...
@abc.abstractmethod
def any(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,
keepdims=None) -> Any: ...
@abc.abstractmethod
def argmax(self, axis: Optional[int] = None, out=None, keepdims=None) -> Any: ...
@abc.abstractmethod
def argmin(self, axis: Optional[int] = None, out=None, keepdims=None) -> Any: ...
@abc.abstractmethod
def argpartition(self, kth, axis=-1, kind='introselect', order=None) -> Any: ...
@abc.abstractmethod
def argsort(self, axis: Optional[int] = -1, kind='quicksort', order=None) -> Any: ...
@abc.abstractmethod
def astype(self, dtype) -> Any: ...
@abc.abstractmethod
def choose(self, choices, out=None, mode='raise') -> Any: ...
@abc.abstractmethod
def clip(self, a_min=None, a_max=None, out=None) -> Any: ...
@abc.abstractmethod
def compress(self, condition, axis: Optional[int] = None, out=None) -> Any: ...
@abc.abstractmethod
def conj(self) -> Any: ...
@abc.abstractmethod
def conjugate(self) -> Any: ...
@abc.abstractmethod
def copy(self) -> Any: ...
@abc.abstractmethod
def cumprod(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype=None, out=None) -> Any: ...
@abc.abstractmethod
def cumsum(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype=None, out=None) -> Any: ...
@abc.abstractmethod
def diagonal(self, offset=0, axis1: int = 0, axis2: int = 1) -> Any: ...
@abc.abstractmethod
def dot(self, b, *, precision=None) -> Any: ...
@abc.abstractmethod
def flatten(self) -> Any: ...
@property
@abc.abstractmethod
def imag(self) -> Any: ...
@abc.abstractmethod
def item(self, *args) -> Any: ...
@abc.abstractmethod
def max(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,
keepdims=None, initial=None, where=None) -> Any: ...
@abc.abstractmethod
def mean(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,
out=None, keepdims=False, *, where=None,) -> Any: ...
@abc.abstractmethod
def min(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,
keepdims=None, initial=None, where=None) -> Any: ...
@property
@abc.abstractmethod
def nbytes(self) -> Any: ...
@abc.abstractmethod
def nonzero(self, *, size=None, fill_value=None) -> Any: ...
@abc.abstractmethod
def prod(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,
out=None, keepdims=None, initial=None, where=None) -> Any: ...
@abc.abstractmethod
def ptp(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,
keepdims=False,) -> Any: ...
@abc.abstractmethod
def ravel(self, order='C') -> Any: ...
@property
@abc.abstractmethod
def real(self) -> Any: ...
@abc.abstractmethod
def repeat(self, repeats, axis: Optional[int] = None, *,
total_repeat_length=None) -> Any: ...
@abc.abstractmethod
def reshape(self, *args, order='C') -> Any: ...
@abc.abstractmethod
def round(self, decimals=0, out=None) -> Any: ...
@abc.abstractmethod
def searchsorted(self, v, side='left', sorter=None) -> Any: ...
@abc.abstractmethod
def sort(self, axis: Optional[int] = -1, kind='quicksort', order=None) -> Any: ...
@abc.abstractmethod
def squeeze(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Any: ...
@abc.abstractmethod
def std(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype=None, out=None, ddof=0, keepdims=False, *, where=None) -> Any: ...
@abc.abstractmethod
def sum(self, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,
out=None, keepdims=None, initial=None, where=None) -> Any: ...
@abc.abstractmethod
def swapaxes(self, axis1: int, axis2: int) -> Any: ...
@abc.abstractmethod
def take(self, indices, axis: Optional[int] = None, out=None,
mode=None) -> Any: ...
@abc.abstractmethod
def tobytes(self, order='C') -> Any: ...
@abc.abstractmethod
def tolist(self) -> Any: ...
@abc.abstractmethod
def trace(self, offset=0, axis1: int = 0, axis2: int = 1, dtype=None,
out=None) -> Any: ...
@abc.abstractmethod
def transpose(self, *args) -> Any: ...
@abc.abstractmethod
def var(self, axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype=None, out=None, ddof=0, keepdims=False, *, where=None) -> Any: ...
@abc.abstractmethod
def view(self, dtype=None, type=None) -> Any: ...
# Even though we don't always support the NumPy array protocol, e.g., for
# tracer types, for type checking purposes we must declare support so we
# implement the NumPy ArrayLike protocol.
def __array__(self) -> Any: ...
# JAX extensions
@property
@abc.abstractmethod
def at(self) -> Any: ...
@property
@abc.abstractmethod
def aval(self) -> Any: ...
@property
@abc.abstractmethod
def weak_type(self) -> bool: ...
ndarray.register(device_array.DeviceArray)
for t in device_array.device_array_types:
ndarray.register(t)
ndarray.register(pxla._SDA_BASE_CLASS)
| 34.391892 | 87 | 0.654224 | 9,225 | 0.906189 | 0 | 0 | 7,547 | 0.741356 | 0 | 0 | 1,464 | 0.143811 |
2da77faddf4b0a9e2d1e82abb164f673b693fc5a | 2,047 | py | Python | tests/cursor_test.py | lunixbochs/bearfield | 1dd2f6932af900393ca764d8aa1ec6c043dd24ed | [
"BSD-3-Clause"
] | 1 | 2020-02-10T04:10:47.000Z | 2020-02-10T04:10:47.000Z | tests/cursor_test.py | lunixbochs/bearfield | 1dd2f6932af900393ca764d8aa1ec6c043dd24ed | [
"BSD-3-Clause"
] | null | null | null | tests/cursor_test.py | lunixbochs/bearfield | 1dd2f6932af900393ca764d8aa1ec6c043dd24ed | [
"BSD-3-Clause"
] | null | null | null | """Tests for the cursor module."""
from __future__ import absolute_import
from . import common
from bearfield import cursor, Document, Field, Query
class TestCursor(common.TestCase):
"""Test the Cursor class."""
class Document(Document):
class Meta:
connection = 'test'
index = Field(int)
name = Field(str)
def setUp(self):
super(TestCursor, self).setUp()
self.collection = self.connection['cursor']
self.docs = [
{'index': 1, 'name': 'first'},
{'index': 2, 'name': 'second'},
]
for doc in self.docs:
doc['_id'] = self.collection.insert(doc)
def test_connection(self):
"""Cursor.connection"""
cur = cursor.Cursor(self.Document(), self.collection, None, None, False)
self.assertEqual(cur.connection, self.connection, "cursor connection is incorrect")
def test_find(self):
"""Cursor.find"""
q1 = Query({'index': 1})
q2 = Query({'name': 'first'})
qr = q1 & q2
cur = cursor.Cursor(self.Document(), self.collection, q1, None, False)
cur = cur.find(q2)
self.assertEqual(cur.query.criteria, qr.criteria, "cursor has invalid criteria")
def test_getitem(self):
"""Cursor.__getitem___"""
cur = cursor.Cursor(self.Document(), self.collection, {'index': 1}, None, False)
doc = cur[0]
have = doc._encode()
want = {'_id': doc._id}
want.update(self.docs[0])
self.assertEqual(have, want, "returned document is incorrect")
def test_iter(self):
"""Cursor.__iter__"""
cur = cursor.Cursor(self.Document(), self.collection, {'index': 1}, None, False)
it = cur.__iter__()
self.assertIsInstance(it, cursor.Cursor, "returned value has invalid type")
def test_close(self):
"""Cursor.close"""
cur = cursor.Cursor(self.Document(), self.collection, {'index': 1}, None, False)
cur.close()
cur.count()
cur.close()
| 32.492063 | 91 | 0.587201 | 1,896 | 0.926234 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.194431 |
2da7cbb4021480710bdd309c7b2cd4bb7eabafd6 | 1,417 | py | Python | probability_combinatorics/combinatoric.py | codecakes/random_games | 1e670021ec97a196726e937e658878dc63ba9d34 | [
"MIT"
] | null | null | null | probability_combinatorics/combinatoric.py | codecakes/random_games | 1e670021ec97a196726e937e658878dc63ba9d34 | [
"MIT"
] | null | null | null | probability_combinatorics/combinatoric.py | codecakes/random_games | 1e670021ec97a196726e937e658878dc63ba9d34 | [
"MIT"
] | null | null | null | from decimal import Decimal
from math import e, factorial
def combination(num, den):
"""
Find nCr or (n r), the Binomial Coefficients
"""
dec1 = dec2 = Decimal(1)
if 0 <= den <= num:
diff = num - den
if num-diff < num-den:
temp = diff
other = den
else:
temp = den
other = diff
for _ in xrange(num-temp):
dec1 *= num
num -= 1
for _ in xrange(other):
dec2 *= other
other -= 1
del temp, other, diff
return dec1/dec2
def permutation(num, den):
"""
Find nPr
"""
dec1 = dec2 = Decimal(1)
if 0 <= den <= num:
for _ in xrange(den):
dec1 *= num
num -= 1
return dec1
#more on Binomial P: http://stattrek.com/probability-distributions/binomial.aspx
def binomial_probability_distro(n, r, p):
return float(combination(n, r)) * (p**r) * (1-p)**(n-r)
expected_value = lambda k, n, r, p: k * binomial_probability_distro(n,r,p)
def total_expected_value(n, p):
return sum([expected_value(k, n, k, p) for k in xrange(1, n+1)])
def poisson_value(Y, r):
"""
Given Expected Value E(X) = Y = n(num of trails) * p (Probability)
p = Y/n;
Probability of Event X happening = (n r)(Y/n)**r * (1- Y/n)**(n-r) = Y**r/r! * e**-Y
"""
return (float(Y**r)/factorial(r)) * e**(-Y)
| 26.240741 | 88 | 0.531404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.245589 |
2da83aad3727d404e8eb161796f19ec8b932423c | 471 | py | Python | tests/conftest.py | bcsummers/falcon-provider-redis | fb2644530196448aff6c1924d8bd533f0ce4ee8c | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | bcsummers/falcon-provider-redis | fb2644530196448aff6c1924d8bd533f0ce4ee8c | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | bcsummers/falcon-provider-redis | fb2644530196448aff6c1924d8bd533f0ce4ee8c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Testing conf module."""
# third-party
import pytest
from falcon import testing
from .app import app_hook, app_middleware
@pytest.fixture
def client_hook() -> testing.TestClient:
"""Create testing client fixture for hook app"""
return testing.TestClient(app_hook)
@pytest.fixture
def client_middleware() -> testing.TestClient:
"""Create testing client fixture for middleware app"""
return testing.TestClient(app_middleware)
| 23.55 | 58 | 0.73673 | 0 | 0 | 0 | 0 | 316 | 0.670913 | 0 | 0 | 164 | 0.348195 |
2da8c8644c0e6cac3083e9093738eebd0e2b170b | 1,574 | py | Python | sortingview/SpikeSortingView/create_position_pdf_plot.py | garrettmflynn/sortingview | 0bb3df40d5d031ec651c4821f928787bbee71fbb | [
"Apache-2.0"
] | 2 | 2021-11-19T04:51:42.000Z | 2022-03-12T23:36:19.000Z | sortingview/experimental/SpikeSortingView/create_position_pdf_plot.py | magland/sortingview | 0b1be9d55048cd4b8a0b6b6733bd7d35cb440aa7 | [
"Apache-2.0"
] | 172 | 2021-05-10T17:39:15.000Z | 2022-03-18T21:46:15.000Z | sortingview/SpikeSortingView/create_position_pdf_plot.py | garrettmflynn/sortingview | 0bb3df40d5d031ec651c4821f928787bbee71fbb | [
"Apache-2.0"
] | 2 | 2021-08-29T20:13:57.000Z | 2022-03-12T23:36:34.000Z | from typing import List, Union
import numpy as np
from .Figure import Figure
def create_position_pdf_plot(*, start_time_sec: np.float32, sampling_frequency: np.float32, pdf: np.ndarray, label: str):
# Nt = pdf.shape[0]
# Np = pdf.shape[1]
A = pdf
B = A / np.reshape(np.repeat(np.max(A, axis=1), A.shape[1]), A.shape)
B = (B * 100).astype(np.uint8)
data = {
'type': 'PositionPdfPlot',
'pdf': B,
'samplingFrequency': sampling_frequency,
'startTimeSec': start_time_sec
}
return Figure(
data=data,
label=label
)
def create_live_position_pdf_plot(*, start_time_sec: np.float32, end_time_sec: np.float32, sampling_frequency: np.float32, num_positions: int, pdf_object: dict, segment_size: int, multiscale_factor: int, label: str):
data = {
'type': 'LivePositionPdfPlot',
'pdfObject': pdf_object,
'startTimeSec': start_time_sec,
'endTimeSec': end_time_sec,
'numPositions': num_positions,
'samplingFrequency': sampling_frequency,
'segmentSize': segment_size,
'multiscaleFactor': multiscale_factor
}
return Figure(
data=data,
label=label
)
# def _get_subsample_inds(timestamps: np.array, sampling_frequency: float):
# dt = 1 / sampling_frequency
# ret = []
# last_t = timestamps[0] - dt * 2
# for i in range(len(timestamps)):
# delta = timestamps[i] - last_t
# if delta >= dt * 0.95:
# ret.append(i)
# last_t = timestamps[i]
# return ret
| 31.48 | 216 | 0.622618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.365311 |
2da911fdfe3d966fb015f3569a027ce8ff19ecfb | 976 | py | Python | Prototype Pygame/arrays.py | KValexander/own_rts | 6bfeadb5c5d29461471c84d883b616117cea79f5 | [
"MIT"
] | null | null | null | Prototype Pygame/arrays.py | KValexander/own_rts | 6bfeadb5c5d29461471c84d883b616117cea79f5 | [
"MIT"
] | null | null | null | Prototype Pygame/arrays.py | KValexander/own_rts | 6bfeadb5c5d29461471c84d883b616117cea79f5 | [
"MIT"
] | null | null | null | # Connect files
from configs import *
# Arrays
items = []
selectedItems = []
# Interface arrays
buttons = []
surfaces = []
# Getting item
def getItemById(ident):
for item in items:
if item.id == ident:
return item
# Removing item
def removeItem(item):
items.remove(item)
# Removing items
def removeItems():
for item in selectedItems:
items.remove(item)
clearSelection()
# Adding items in selection items
def addSelection(item):
item.selected = True
selectedItems.append(item)
# Clear selected items
def clearSelection():
for item in items:
item.selected = False
selectedItems.clear()
# Clear buttons
def clearButtons():
buttons.clear()
# Clear surfaces
def clearSurfaces():
surfaces.clear()
# Import templates
from templates import Worker
# Adding item
def addItem(case, counter, x, y, faction):
if(case == "worker"):
item = Worker(counter, x, y, faction)
if(case == "soldier"):
item = Soldier(counter, x, y, faction)
items.append(item)
| 16.542373 | 42 | 0.71209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.22541 |
2daba1326e1f0508a50319a46a2356919a73deaf | 6,236 | py | Python | webapp/app.py | liaosvcaf/earlyAlertOfLaws | 9c348304c977bf8266f41a26e8ef1b9625bf2624 | [
"BSD-3-Clause"
] | null | null | null | webapp/app.py | liaosvcaf/earlyAlertOfLaws | 9c348304c977bf8266f41a26e8ef1b9625bf2624 | [
"BSD-3-Clause"
] | 32 | 2019-06-16T18:37:56.000Z | 2020-04-13T05:46:51.000Z | webapp/app.py | liaosvcaf/earlyAlertOfLaws | 9c348304c977bf8266f41a26e8ef1b9625bf2624 | [
"BSD-3-Clause"
] | 1 | 2019-06-19T19:26:47.000Z | 2019-06-19T19:26:47.000Z | import os
import markdown2
from forms import AddKeywordForm, SubscribeEmailForm, TimeWindowForm
from flask import (flash, render_template, request, escape, redirect, url_for,
session, abort)
from flask_paginate import Pagination, get_page_parameter
from parsing.notifications import send_email_subs_start_notification
from parsing.parsing_options import (email_server, email_acc, email_port,
email_pass)
from init_app import app
from models import Bill
def get_all_keywords():
with open('keywords.txt', 'r') as f:
kws = [kw.strip() for kw in f.read().splitlines() if kw.strip()]
return kws
def delete_email_from_list(email):
with open('subscribed_emails.txt', 'r') as f:
lines = [line.split(":") for line in f.read().splitlines()]
lines_updated = [":".join(line) for line in lines if line[0] != email]
with open('subscribed_emails.txt', 'w') as f:
f.write("\n".join(lines_updated))
def subscribe_email(email, kws, time_limit):
delete_email_from_list(email)
with open('subscribed_emails.txt', 'a') as f:
f.write("\n" + email + ":" + kws + ":" + time_limit + "\n")
def unsubscribe_email(email):
delete_email_from_list(email)
@app.route('/')
def redirect_main_page():
return redirect(url_for('search', search='all'))
@app.route('/search/<search>', methods=['GET', 'POST'])
def search(search):
# time window in years
time_window = "20y"
session_tw = session.get("time_window", None)
if session_tw:
time_window = session_tw
per_page = 10
page = request.args.get(get_page_parameter(), type=int, default=1)
offset = (page - 1) * per_page
if search == 'all':
query = get_all_keywords()
else:
query = [search]
bills, total = Bill.get_monitoring_results(query, page=page,
per_page=per_page,
time_limit=time_window)
pagination = Pagination(page=page, total=total, per_page=per_page,
offset=offset,
css_framework='bootstrap4')
return render_template('results.html',
results=bills,
per_page=per_page,
page=page,
pagination=pagination,
escape=escape)
@app.route('/configure', methods=['GET', 'POST'])
def configure():
if request.method == 'POST':
if request.form.get("action_type") == "add":
new_keyword = request.form.get('new_kw')
new_keyword = new_keyword.lower()
try:
with open('keywords.txt', 'a') as f:
f.write('\n' + new_keyword + '\n')
flash(f'New keyword {new_keyword} added')
except Exception as e:
flash(f'Error adding new keyword: ' + str(e))
elif request.form.get("action_type") == "delete":
kw_to_delete = request.form.get('name')
try:
kws = get_all_keywords()
new_kws_list = list(filter(lambda kw: kw != kw_to_delete, kws))
with open('keywords.txt', 'w') as f:
f.write('\n'.join(new_kws_list))
except Exception as e:
flash("Error deletuing keyword: " + str(e))
kws = []
elif request.form.get("action_type") == "change_tw":
time_window = request.form.get("window")
session["time_window"] = time_window
#add_new_kw_form = AddKeywordForm(request.form)
add_new_kw_form = AddKeywordForm()
try:
kws = get_all_keywords()
print("Keywords: ", kws)
except Exception as e:
flash("Error getting keywords: " + str(e))
kws = []
form_tw = TimeWindowForm()
return render_template('configure.html',
keywords=kws,
form_add=add_new_kw_form,
form_tw=form_tw)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
if request.method == 'POST':
email = request.form.get('email')
kws = request.form.get('kws')
kws = [kw.strip() for kw in kws.split(",")]
time_limit = request.form.get('time_limit')
try:
subscribe_email(email, ",".join(kws), time_limit)
send_email_subs_start_notification(email, kws, time_limit, email_server,
email_acc, email_port, email_pass)
except Exception as e:
flash(f'Error: ' + str(e))
else:
flash('Subscription successsful')
flash("Check your email. If you didn't receive email, view spam folder")
form = SubscribeEmailForm(request.form)
#form.time_limit.choices = get_time_windows()
return render_template('subscribe.html', form=form)
@app.route('/unsubs/<email>')
def unsubscribe(email):
try:
unsubscribe_email(email)
flash(f'Unubscribed successsful')
except Exception as e:
flash(f'Error: ' + str(e))
return render_template('unsubscribe.html')
@app.route('/links', methods=['GET'])
def links():
try:
with open('links.txt', 'r') as f:
lines = [line.strip() for line in f.readlines()]
links = [line.split(',') for line in lines]
except:
links = []
return render_template('links.html', links=links)
@app.route('/help', methods=['GET'])
def help_route():
if not os.path.exists("help.md"):
abort(404)
with open("help.md", "r") as f:
page_html = markdown2.markdown(f.read())
return render_template('help.html', page_html=page_html)
@app.route('/bills/<bill_leginfo_id>')
def bill_info(bill_leginfo_id):
bill = Bill.query.filter(Bill.leginfo_id==bill_leginfo_id).first()
return render_template('bill_page.html', bill=bill)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
#app.run("localhost", port=8080)
app.run("0.0.0.0", port=80)
| 36.899408 | 85 | 0.581944 | 0 | 0 | 0 | 0 | 4,835 | 0.775337 | 0 | 0 | 1,080 | 0.173188 |
2daf875f81bee0d399c3a9fc2f2a690e79795655 | 46,895 | py | Python | colossalai/nn/layer/parallel_1d/layers.py | jiangz17THU/ColossalAI | 354b7954d1fa6b21a5ba566f0d5ec099280ad315 | [
"Apache-2.0"
] | null | null | null | colossalai/nn/layer/parallel_1d/layers.py | jiangz17THU/ColossalAI | 354b7954d1fa6b21a5ba566f0d5ec099280ad315 | [
"Apache-2.0"
] | null | null | null | colossalai/nn/layer/parallel_1d/layers.py | jiangz17THU/ColossalAI | 354b7954d1fa6b21a5ba566f0d5ec099280ad315 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from collections import OrderedDict
from typing import Callable, Tuple
import torch
import torch.nn.functional as F
from colossalai.communication import broadcast
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.kernel import LayerNorm
from colossalai.nn import init as init
from colossalai.registry import LAYERS
from colossalai.utils.checkpointing import (broadcast_state_dict, gather_tensor_parallel_state_dict,
partition_tensor_parallel_state_dict)
from colossalai.utils.cuda import get_current_device
from torch import Tensor
from torch.nn.parameter import Parameter
from ..vanilla import VanillaPatchEmbedding
from ..base_layer import ParallelLayer
from ..colossalai_layer._utils import ColossalaiModule
from ..utils import divide, set_tensor_parallel_attribute_by_partition
from ._utils import (gather_forward_split_backward, get_parallel_input, reduce_grad, reduce_input, set_parallel_input,
split_forward_gather_backward)
@LAYERS.register_module
class Linear1D(ColossalaiModule):
r"""Linear layer for 1D parallelism.
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
gather_output (bool, optional): Whether to call all-gather on output, defaults to False.
skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to False
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
parallel_input = get_parallel_input()
if not parallel_input:
layer = Linear1D_Col(in_features,
out_features,
bias=bias,
dtype=dtype,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
else:
layer = Linear1D_Row(in_features,
out_features,
bias=bias,
dtype=dtype,
parallel_input=parallel_input,
skip_bias_add=skip_bias_add,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
super().__init__(layer)
@LAYERS.register_module
class LayerNorm1D(ColossalaiModule):
r"""
Layer Normalization for colossalai
:param normalized_shape: input shape from an expected input
of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1]
\times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
:type normalized_shape: int
:param eps: a value added to the denominator for numerical stability, defaults to 1e-05
:type eps: float, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self, normalized_shape: int, eps=1e-05, dtype=None):
norm = LayerNorm(normalized_shape, eps=eps, device=get_current_device(), dtype=dtype)
super().__init__(norm)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = broadcast_state_dict(local_state, ParallelMode.PARALLEL_1D)
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
super()._save_to_state_dict(destination, prefix, keep_vars)
@LAYERS.register_module
class Classifier1D(ParallelLayer):
r"""RowLinear with given weight. Classifier of 1D parallelism.
Args:
in_features (int): size of each input sample.
num_classes (int): number of classes.
weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.parallel_input = get_parallel_input()
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(torch.empty(self.num_classes, self.input_size_per_partition, **factory_kwargs))
self.has_weight = True
if bias:
self.bias = Parameter(torch.empty(self.num_classes, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = False
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D)
def _set_tensor_parallel_attributes(self):
if self.has_weight:
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
if self.has_weight:
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict()
if self.has_weight:
local_state[weight_key] = self.weight
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in Classifier1D forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
input_ = input_
else:
assert divide(input_.shape[-1], gpc.tensor_parallel_size) == self.weight.shape[-1], \
'Invalid shapes in Classifier1D forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1] * gpc.tensor_parallel_size)
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if self.bias is not None:
output = output + self.bias
return output
@LAYERS.register_module
class VocabParallelClassifier1D(ParallelLayer):
r"""ColLinear with given weight. Classifier of 1D parallelism.
Args:
in_features (int): size of each input sample.
num_classes (int): number of classes.
weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.parallel_input = get_parallel_input()
# Divide the weight matrix along the last dimension.
self.num_classes_per_partition = divide(num_classes, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(torch.empty(self.num_classes_per_partition, self.in_features, **factory_kwargs))
self.has_weight = True
if bias:
self.bias = Parameter(torch.empty(self.num_classes_per_partition, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = True
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
if self.has_weight:
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict()
if self.has_weight:
local_state[weight_key] = self.weight
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in VocabParallelClassifier1D forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
output = F.linear(input_parallel, self.weight, self.bias)
return output
@LAYERS.register_module
class Linear1D_Col(ParallelLayer):
r"""Linear layer with column parallelism.
The linear layer is defined as :math:`Y = XA + b`. A is parallelized along
its second dimension as :math:`A = [A_1, ..., A_p]`.
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
gather_output (bool, optional): If true, call all-gather on output and make Y available
to all GPUs, otherwise, every GPU will have its output
which is :math:`Y_i = XA_i`, defaults to False
skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to Fals
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
self.out_features_per_partition = divide(out_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(self.out_features_per_partition, self.in_features, **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(self.out_features_per_partition, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
is_parallel_output = not self.gather_output
set_parallel_input(is_parallel_output)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict({weight_key: self.weight})
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
else:
output = output_parallel
if self.skip_bias_add:
return output, self.bias
else:
return output
@LAYERS.register_module
class Linear1D_Row(ParallelLayer):
r""" Linear layer with row parallelism
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
parallel_input (bool, optional): If set to ``True``, it's assumed that the input is split, defaults to False.
skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to Fals
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
parallel_input: bool = True,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.parallel_input = parallel_input
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(self.out_features, self.input_size_per_partition, **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict({weight_key: self.weight})
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
input_ = input_
else:
assert divide(input_.shape[-1], gpc.tensor_parallel_size) == self.weight.shape[-1], \
'Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1] * gpc.tensor_parallel_size)
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if not self.skip_bias_add:
if self.bias is not None:
output = output + self.bias
return output
else:
return output, self.bias
@LAYERS.register_module
class Embedding1D(ParallelLayer):
r"""Embedding for 1D parallelism.
Args:
num_embeddings (int): number of embeddings.
embedding_dim (int): dimension of embedding.
padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient;
therefore, the embedding vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”, defaults to None.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
he initializer of weight, defaults to normal initializer.
The ``args`` and ``kwargs`` used in :class:`torch.nn.functional.embedding` should contain:
::
max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is
renormalized to have norm max_norm. Note: this will modify weight in-place.
norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse
of frequency of the words in the mini-batch. Default False.
sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False.
More details about ``args`` and ``kwargs`` could be found in
`Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
embed_dim_per_partition = divide(embedding_dim, gpc.tensor_parallel_size)
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.weight = Parameter(
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, gpc.tensor_parallel_size)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: -1},
partition_states={weight_key: True})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
local_state = OrderedDict({weight_key: self.weight})
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: -1},
partition_states={weight_key: True},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
output_parallel = F.embedding(input_, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
return output
@LAYERS.register_module
class VocabParallelEmbedding1D(torch.nn.Module):
r"""Embedding parallelized in the vocabulary dimension.
Args:
num_embeddings (int): number of embeddings.
embedding_dim (int): dimension of embedding.
padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient;
therefore, the embedding vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”, defaults to None.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
he initializer of weight, defaults to normal initializer.
The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain:
::
max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is
renormalized to have norm max_norm. Note: this will modify weight in-place.
norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse
of frequency of the words in the mini-batch. Default False.
sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False.
More details about ``args`` and ``kwargs`` could be found in
`Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_.
More details about initializer please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
tensor_parallel_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
tensor_parallel_rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
self.num_embeddings_per_partition = divide(num_embeddings, tensor_parallel_size)
self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition
self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition
self.weight = Parameter(
torch.empty((self.num_embeddings_per_partition, self.embed_dim), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, gpc.tensor_parallel_size)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None and \
self.padding_idx >= self.vocab_start_index and self.padding_idx < self.vocab_end_index:
with torch.no_grad():
self.weight[self.padding_idx - self.vocab_start_index].fill_(0)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: 0},
partition_states={weight_key: True})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
local_state = OrderedDict({weight_key: self.weight})
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: 0},
partition_states={weight_key: True},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
output_parallel = F.embedding(masked_input, self.weight, self.padding_idx, *self.embed_args,
**self.embed_kwargs)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.
# Reduce across all the model parallel GPUs.
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
return output
@LAYERS.register_module
class Dropout1D(ParallelLayer):
"""Dropout layer of 1D parallelism.
Args:
p (float, optional): probability of an element to be zeroed, defaults 0.5.
inplace (bool, optional): whether to do dropout in-place, default to be False.
"""
def __init__(self, p: float = 0.5, inplace: bool = False):
super().__init__()
self.parallel_input = get_parallel_input()
self.p = p
self.inplace = inplace
def forward(self, input_: Tensor) -> Tensor:
if self.parallel_input:
with seed(ParallelMode.TENSOR):
output = F.dropout(input_, self.p, self.training, self.inplace)
else:
output = F.dropout(input_, self.p, self.training, self.inplace)
return output
@LAYERS.register_module
class PatchEmbedding1D(ColossalaiModule):
"""
2D Image to Patch Embedding
:param img_size: image size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param in_chans: number of channels of input image
:type in_chans: int
:param embed_size: size of embedding
:type embed_size: int
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param flatten: whether to flatten output tensor, defaults to True
:type flatten: bool, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
:param position_embed_initializer: The intializer of position embedding, defaults to zero
:type position_embed_initializer: typing.Callable, optional
"""
def __init__(self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
dtype: torch.dtype = None,
flatten: bool = True,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
position_embed_initializer: Callable = init.zeros_()):
embed = VanillaPatchEmbedding(img_size,
patch_size,
in_chans,
embed_size,
dtype=dtype,
flatten=flatten,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
position_embed_initializer=position_embed_initializer)
super().__init__(embed)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
param_keys = [prefix + 'weight', prefix + 'bias', prefix + 'cls_token', prefix + 'pos_embed']
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
for key in param_keys:
param = state_dict.pop(key, None)
if param is not None:
local_state[key] = param
local_state = broadcast_state_dict(local_state, ParallelMode.PARALLEL_1D)
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
super()._save_to_state_dict(destination, prefix, keep_vars)
| 47.803262 | 126 | 0.57906 | 45,440 | 0.968808 | 0 | 0 | 45,680 | 0.973925 | 0 | 0 | 12,278 | 0.261774 |
2dafc2ffcc26838e9b8053b355658613f3b90115 | 1,027 | py | Python | 3rdparty/wsgi_intercept/test/test_mechanoid.py | arda2525/fixofx | 1792d94697af682ca1d4a75cfefe98465d95a288 | [
"Apache-2.0"
] | 50 | 2015-01-01T00:14:04.000Z | 2020-11-26T04:44:30.000Z | 3rdparty/wsgi_intercept/test/test_mechanoid.py | arda2525/fixofx | 1792d94697af682ca1d4a75cfefe98465d95a288 | [
"Apache-2.0"
] | 3 | 2016-01-31T17:14:41.000Z | 2017-03-01T13:36:17.000Z | 3rdparty/wsgi_intercept/test/test_mechanoid.py | arda2525/fixofx | 1792d94697af682ca1d4a75cfefe98465d95a288 | [
"Apache-2.0"
] | 15 | 2015-10-29T09:04:21.000Z | 2022-01-19T17:33:25.000Z | #! /usr/bin/env python2.3
from wsgi_intercept.mechanoid_intercept import Browser
from nose.tools import with_setup
import wsgi_intercept
from wsgi_intercept import test_wsgi_app
###
_saved_debuglevel = None
def install(port=80):
_saved_debuglevel, wsgi_intercept.debuglevel = wsgi_intercept.debuglevel, 1
wsgi_intercept.add_wsgi_intercept('some_hopefully_nonexistant_domain', port, test_wsgi_app.create_fn)
def uninstall():
wsgi_intercept.debuglevel = _saved_debuglevel
@with_setup(install, uninstall)
def test_success():
b = Browser()
b.open('http://some_hopefully_nonexistant_domain:80/')
assert test_wsgi_app.success()
@with_setup(install, uninstall)
def test_https_success():
b = Browser()
b.open('https://some_hopefully_nonexistant_domain/')
assert test_wsgi_app.success()
@with_setup(lambda: install(443), uninstall)
def test_https_specific_port_success():
b = Browser()
b.open('https://some_hopefully_nonexistant_domain:443/')
assert test_wsgi_app.success() | 30.205882 | 105 | 0.772152 | 0 | 0 | 0 | 0 | 528 | 0.514119 | 0 | 0 | 201 | 0.195716 |
2db05706caff34651b92ff77057d0e7d52474883 | 3,538 | py | Python | kneejerk/cli.py | NapsterInBlue/kneejerk | 8ea78e0e57c5078155983ac2bdf163ac1e14c639 | [
"MIT"
] | null | null | null | kneejerk/cli.py | NapsterInBlue/kneejerk | 8ea78e0e57c5078155983ac2bdf163ac1e14c639 | [
"MIT"
] | 15 | 2019-01-20T02:32:59.000Z | 2020-03-31T01:18:40.000Z | kneejerk/cli.py | NapsterInBlue/kneejerk | 8ea78e0e57c5078155983ac2bdf163ac1e14c639 | [
"MIT"
] | null | null | null | import click
import pathlib
import os
from kneejerk.image_server import score_images_in_dir
from kneejerk.data.saver import persist_scores, persist_metadata
from kneejerk.data.transfer import segment_data_from_csv, transfer_normalized_image_data
from kneejerk.data.utils import _get_classes, _get_max_image_dim, _ensure_path_exists
@click.group()
@click.pass_context
def main(ctx):
ctx.obj = dict()
@main.command(help='Cycle through a directory and score images')
@click.option('--input_dir', '-i', help='Location of the images.',
default='.')
@click.option('--output_dir', '-o', help='Location to output .csv file.',
default='.')
@click.option('--shuffle', '-s', help='Shuffle served image order',
default=1)
@click.option('--file-name', '-f', help='Name of .csv file',
default='output.csv')
@click.option('--min', 'min_', help='Minimum acceptable score', default='0')
@click.option('--max', 'max_', help='Maximum acceptable score', default='1')
@click.option('--limit', '-l', help="Limit the number of images to serve")
@click.pass_context
def score(ctx, output_dir, input_dir, file_name, shuffle, min_, max_, limit):
ctx.obj['min_val'] = min_
ctx.obj['max_val'] = max_
if limit:
ctx.obj['limit'] = int(limit)
if file_name[-4:] != '.csv':
file_name += '.csv'
input_dir = pathlib.Path(input_dir).resolve()
output_dir = pathlib.Path(output_dir).resolve()
click.echo(f'Input dir {input_dir}')
click.echo(f'Output dir {output_dir}')
output_path = output_dir.joinpath(file_name)
fpaths, scores = score_images_in_dir(input_dir, shuffle_files=shuffle)
# bit of helpful error handling if user doesn't provide any images
for val in os.listdir(input_dir):
if val[-3:].lower() in ['png', 'jpg']:
break
else:
print("\n\nDidn't find image at directory:", input_dir)
persist_scores(fpaths, scores, output_path)
@main.command(help='Use a kneejerk-generated csv to organize your files')
@click.option('--file_name', '-f', help='Name of .csv file', required=True)
@click.option('--consider_size', '-c', help='Consider the size of the images',
default=0)
@click.option('--rescale_len', '-r', help='Height/width to rescale the data to',
default=200)
@click.option('--trainpct', help='Percentage of data to train on',
default=.70)
@click.option('--testpct', help='Percentage of data to test on',
default=.20)
@click.option('--valpct', help='Percentage of data to validate on',
default=.10)
@click.pass_context
def transfer(ctx, file_name, consider_size, rescale_len, trainpct, testpct, valpct):
ctx.obj['file_name'] = file_name
ctx.obj['consider_size'] = consider_size
ctx.obj['rescale_len'] = rescale_len
ctx.obj['max_image_dim'] = _get_max_image_dim(file_name)
dirname = file_name[:-4]
ctx.obj['dirname'] = dirname
classes = _get_classes(file_name)
data_splits = ['train', 'test']
if valpct:
data_splits += ['val']
for split in data_splits:
for class_ in classes:
_ensure_path_exists(os.path.join(dirname, split, class_))
train, test, cross_val = segment_data_from_csv(trainpct, testpct, valpct)
transfer_normalized_image_data(train, 'train')
transfer_normalized_image_data(test, 'test')
if valpct:
transfer_normalized_image_data(cross_val, 'val')
persist_metadata()
if __name__ == '__main__':
main()
| 34.349515 | 88 | 0.672131 | 0 | 0 | 0 | 0 | 3,156 | 0.892029 | 0 | 0 | 1,005 | 0.284059 |
2db0fa793cb0563f92fe9fa1fd0a8a2de5faad8e | 7,277 | py | Python | magic.py | githoniel/ac7-ultrawide | 1e28fbc01ddca8c06fd43025f864c2a9b8b20fd3 | [
"MIT"
] | null | null | null | magic.py | githoniel/ac7-ultrawide | 1e28fbc01ddca8c06fd43025f864c2a9b8b20fd3 | [
"MIT"
] | null | null | null | magic.py | githoniel/ac7-ultrawide | 1e28fbc01ddca8c06fd43025f864c2a9b8b20fd3 | [
"MIT"
] | null | null | null | import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob
# Must be in game root folder.
if not os.path.isfile('Ace7Game.exe'):
wait = input('Ace7Game.exe not found in this folder. Press any key to close...')
sys.exit(0)
# Get resolution from OS.
u32 = ctypes.windll.user32
u32.SetProcessDPIAware()
[res_w, res_h] = [u32.GetSystemMetrics(0), u32.GetSystemMetrics(1)]
res_y = 1080
res_x = res_w * (res_y / res_h)
# Get confirmation from user.
print('Your screen size appears to be ' + str(res_w) + 'x' + str(res_h) + '.')
prompt = ''
while prompt.lower() != 'y':
prompt = input('Is that correct? Y to continue, N to cancel:')
if prompt.lower() == 'n':
print('Canceled.')
sys.exit(0)
# Determine FOV hex value.
print('Determining FOV hex value...')
if res_x in [2560, 2304]: # This value is for 2560x1080, 2560x1200 monitors.
fov_hex = 'AA05333C'
elif res_x in [2580, 2322]: # This value is for 3440x1440, 3440x1600 monitors.
fov_hex = 'EDD1333C'
elif res_x in [3840, 3456]: # This value is for dual 16:9, 16:10 monitors.
fov_hex = 'FCCF653C'
elif res_x in [5760, 5184]: # This value is for triple 16:9, 16:10 monitors.
fov_hex = '707B8B3C'
elif res_x in [1920, 1728]: # This value is for single 16:9, 16:10 monitors.
fov_hex = '35FA0E3C'
else:
print('Unknown resolution or aspect ratio. Quitting.')
sys.exit(0)
# Back up the game exe.
print('Backing up the game exe...')
if not os.path.isfile('Ace7Game.exe_orig'):
shutil.copy2('Ace7Game.exe','Ace7Game.exe_orig')
# Overwrite FOV value in game exe.
print('Modifying the game exe...')
with open('Ace7Game.exe','rb+') as exe:
exe.seek(int('DD52E2A', 16)) # address to remove black bars
exe.write(binascii.a2b_hex('00'))
exe.seek(int('258B4D8', 16)) # address of field of view
exe.write(binascii.a2b_hex(fov_hex))
exe.close()
# Check for 3Dmigoto zip file.
print('Checking for 3Dmigoto zip file...')
tdm_regex = '3Dmigoto-*.zip'
tdm_list = glob.glob(tdm_regex)
if not tdm_list:
print('3Dmigoto zip file not found. Quitting.')
sys.exit(0)
tdm_zip = tdm_list[0]
tdm_dir = tdm_zip[:tdm_zip.rfind('.')]
# Unpack 3Dmigoto.
print('Unpacking ' + tdm_zip + '...')
zip_ref = zipfile.ZipFile(tdm_zip, 'r')
zip_ref.extractall(tdm_dir)
zip_ref.close()
# Copy files from x64 folder to game root folder.
print('Installing 3Dmigoto...')
for item in os.listdir(tdm_dir + '/x64'):
tdm_item = tdm_dir + '/x64/' + item
try:
if not os.path.exists(item):
shutil.copytree(tdm_item, item)
except:
if not os.path.exists(item):
shutil.copy2(tdm_item, item)
# Create Mods folder if it doesn't exist.
if not os.path.isdir('Mods'):
os.mkdir('Mods')
# Set up shader filenames.
github_url = 'https://raw.githubusercontent.com/mpm11011/ac7-ultrawide/master/'
hud_filename = '9958a636cbef5557-ps_replace.txt'
map_filename = 'e6f41464a78a35c4-ps_replace.txt'
char_filename = 'f355a6eae7adfe8e-ps_replace.txt'
map_m7_filename = '27f3e07e177ddf67-ps_replace.txt'
char_m7_filename = 'f904af6042b80b52-ps_replace.txt'
mp_hud_filename = '6dcdbf6042a8a27a-ps_replace.txt'
mp_pause_filename = 'c75a35eef5821976-ps_replace.txt'
mp_map_filename = 'ec51646d13b1fd16-ps_replace.txt'
subtitles_filename = 'da86a094e768f000-vs_replace.txt'
subtitles_hud_checker = 'hudtextfix.ini'
# Download shaders.
print('Downloading shader files...')
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + hud_filename, 'ShaderFixes/' + hud_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + map_filename, 'ShaderFixes/' + map_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + char_filename, 'ShaderFixes/' + char_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + map_m7_filename, 'ShaderFixes/' + map_m7_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + char_m7_filename, 'ShaderFixes/' + char_m7_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + mp_hud_filename, 'ShaderFixes/' + mp_hud_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + mp_pause_filename, 'ShaderFixes/' + mp_pause_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + mp_map_filename, 'ShaderFixes/' + mp_map_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + subtitles_filename, 'ShaderFixes/' + subtitles_filename)
urllib.request.urlretrieve(github_url + 'Mods/' + subtitles_hud_checker, 'Mods/' + subtitles_hud_checker)
# Modify shader fix for resolution width.
print('Modifying shader files for resolution...')
delta_x = (res_x - 1920) / 3840 # divide by 1920, then divide by 2.
delta_x = round(delta_x, 4)
with open('ShaderFixes/' + hud_filename,'r+') as hud_file:
hud_file.seek(769) # number of bytes to line needing change
hud_file.write(' r1.x -= ' + str(delta_x) + ';')
hud_file.close()
with open('ShaderFixes/' + map_filename,'r+') as map_file:
map_file.seek(1035) # number of bytes to line needing change
map_file.write(' r0.x -= ' + str(delta_x) + ';')
map_file.close()
with open('ShaderFixes/' + char_filename,'r+') as char_file:
char_file.seek(1035) # number of bytes to line needing change
char_file.write(' r0.x -= ' + str(delta_x) + ';')
char_file.close()
with open('ShaderFixes/' + map_m7_filename,'r+') as map_m7_file:
map_m7_file.seek(1038) # number of bytes to line needing change
map_m7_file.write(' r1.x -= ' + str(delta_x) + ';')
map_m7_file.close()
with open('ShaderFixes/' + char_m7_filename,'r+') as char_m7_file:
char_m7_file.seek(1038) # number of bytes to line needing change
char_m7_file.write(' r1.x -= ' + str(delta_x) + ';')
char_m7_file.close()
with open('ShaderFixes/' + mp_hud_filename,'r+') as mp_hud_file:
mp_hud_file.seek(769) # number of bytes to line needing change
mp_hud_file.write(' r1.x -= ' + str(delta_x) + ';')
mp_hud_file.close()
with open('ShaderFixes/' + mp_pause_filename,'r+') as mp_pause_file:
mp_pause_file.seek(1108) # number of bytes to line needing change
mp_pause_file.write(' r0.x -= ' + str(delta_x) + ';')
mp_pause_file.close()
with open('ShaderFixes/' + mp_map_filename,'r+') as mp_map_file:
mp_map_file.seek(1108) # number of bytes to line needing change
mp_map_file.write(' r0.x -= ' + str(delta_x) + ';')
mp_map_file.close()
# Modifying subtitles fix for resolution width.
delta_o = 1 - ((16/9) * (res_h/res_w))
delta_o = round(delta_o, 4)
with open('ShaderFixes/' + subtitles_filename,'r+') as subtitles_file:
subtitles_file.seek(1368) # number of bytes to line needing change
subtitles_file.write(' o0.x+=' + str(delta_o) + ';')
subtitles_file.close()
# Disable shader hunting and enable Mods folder in config file.
print('Modifying d3dx.ini...')
with open('d3dx.ini','r+') as ini:
ini_data = ini.read()
ini.close()
ini_data = ini_data.replace(';include_recursive = Mods','include_recursive = Mods')
ini_data = ini_data.replace('hunting=1','hunting=0')
with open('d3dx.ini','w') as ini:
ini.write(ini_data);
ini.close()
wait = input('Script complete. Press any key to close.')
| 34.325472 | 113 | 0.694929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,995 | 0.411571 |
2db16944c7d0a8a83740304bb1e35db5c0789ebe | 5,202 | py | Python | DigitRecognition.py | Michael-Kidd/4th-Year---Emerging-Technology | ec24a32e08cff691cbfd888fe6ba92db0678d57f | [
"MIT"
] | null | null | null | DigitRecognition.py | Michael-Kidd/4th-Year---Emerging-Technology | ec24a32e08cff691cbfd888fe6ba92db0678d57f | [
"MIT"
] | null | null | null | DigitRecognition.py | Michael-Kidd/4th-Year---Emerging-Technology | ec24a32e08cff691cbfd888fe6ba92db0678d57f | [
"MIT"
] | null | null | null | # Tkinter is Python's de-facto standard GUI (Graphical User Interface) package.
import tkinter as tk
import keras as kr
import numpy as np
import matplotlib.pyplot as plt
import math
import sklearn.preprocessing as pre
import gzip
import PIL
from PIL import Image, ImageDraw
import os.path
width = 280
height = 280
center = height//2
white = (255, 255, 255)
black = (0,0,0)
def testImage(img):
global result, model
img = np.array(list(img)).reshape(1,784)
result.config(text='You Wrote the Number '+str(model.predict_classes(img)))
def nueralNet():
# global variables - in place of static variables
global model
# Read in images for training
with gzip.open('data/train-images-idx3-ubyte.gz', 'rb') as f:
train_img = f.read()
# read in labels for training
with gzip.open('data/train-labels-idx1-ubyte.gz', 'rb') as f:
train_lbl = f.read()
with gzip.open('data/t10k-images-idx3-ubyte.gz', 'rb') as f:
test_img = f.read()
with gzip.open('data/t10k-labels-idx1-ubyte.gz', 'rb') as f:
test_lbl = f.read()
# Add a hidden layer with 1000 neurons and an input layer with 784.
model.add(kr.layers.Dense(512, input_dim=784, activation="relu", kernel_initializer="normal"))
model.add(kr.layers.Dense(10, activation="softmax", kernel_initializer="normal"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# reshape the images and labels.
train_img = ~np.array(list(train_img[16:])).reshape(60000, 1, 784).astype(np.uint8)
train_lbl = np.array(list(train_lbl[ 8:])).astype(np.uint8)
train_img = train_img/ 255
train_lbl = kr.utils.to_categorical(train_lbl)
# reshape the image array
inputs = train_img.reshape(60000, 784)
# Binarize labels in a one-vs-all fashion
encoder = pre.LabelBinarizer()
# Trains the model for a fixed number of epochs (iterations on a dataset).
encoder.fit(train_lbl)
outputs = encoder.transform(train_lbl)
# Train the model
model.fit(inputs, outputs, epochs=150, batch_size=100)
test_img = ~np.array(list(test_img[16:])).reshape(10000, 784).astype(np.uint8) / 255.0
test_lbl = np.array(list(test_lbl[ 8:])).astype(np.uint8)
saveModel()
def clearCanvas(event):
# global variables
global image1, draw
# clears the canvas seen by the user
cv.delete("all")
# clear the pillow image that is not seen by the user
image1 = PIL.Image.new("RGB", (width, height), black)
draw = ImageDraw.Draw(image1)
def save():
global image1
# resize the image so it matches the mnist data set conditions
img = image1.resize((28, 28), Image.BICUBIC)
# save the image
img.save("data/image.png")
# read back in the image,
# I chose to do it this way in case i wanted to give it an image
# or have the user do it
img = imageprepare('data/image.png')
# attempt to load the model data
loadModel()
# test our image
testImage(img)
def paint(event):
# creates a line using mouse events
x1, y1 = (event.x - 1), (event.y - 1)
x2, y2 = (event.x + 1), (event.y + 1)
# create a dot using these positions
# pillow image - not seen by user
cv.create_oval(x1, y1, x2, y2, fill="black",width=12)
# canvas image - seen by user
draw.line([x1, y1, x2, y2],fill="white",width=12)
def imageprepare(argv):
# read in an image and greyscale
im = Image.open(argv).convert('L')
# uncomment to view the incoming image
# im.show()
# get the data from the image
tv = list(im.getdata())
# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tv = [(255 - x) * 1.0 / 255.0 for x in tv]
# return the image data
return tv
def saveModel():
global model
# save the current model
kr.models.save_model(
model,
"data/model.h5py",
overwrite=True,
include_optimizer=True
)
def loadModel():
global model
# if the model file exists load it
if os.path.isfile('data/model.h5py'):
model = kr.models.load_model('data/model.h5py')
else:
# if the file doesnt exist
# start the nueral network training curremntly set to 150 epochs
nueralNet()
# Start a neural network, building it by layers.
# using sequential model
model = kr.models.Sequential()
# new pillow image that later will be saved
image1 = PIL.Image.new("RGB", (width, height), black)
draw = ImageDraw.Draw(image1)
# set the position of the windows
root = tk.Tk()
root.geometry("+{xPos}+{yPos}".format(xPos = 0, yPos = 0))
# Tkinter create a canvas to draw on
cv = tk.Canvas(root, width=width, height=height, bg='white')
# pack the gui
cv.pack()
# left click
cv.bind("<B1-Motion>", paint)
# right click
cv.bind('<Button-3>', clearCanvas)
# create text and buttons
button = tk.Button(text="Check Number",command=save)
text1 = tk.Label(text="Left Click Draw")
text2 = tk.Label(text="Right Click Clear")
result = tk.Label(text="You have not Checked a Number yet")
# pack the canvas
text1.pack()
text2.pack()
button.pack()
result.pack()
root.mainloop() | 25.37561 | 98 | 0.660323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,024 | 0.389081 |
2db293b083a219201f316a4494a2574920bb381f | 1,007 | py | Python | sort/insertion.py | Wind2esg/python3sort | ead213f75a54da7d476865e3cf0a551ab0970d7d | [
"MIT"
] | 1 | 2018-01-30T04:44:05.000Z | 2018-01-30T04:44:05.000Z | sort/insertion.py | Wind2esg/python3sort | ead213f75a54da7d476865e3cf0a551ab0970d7d | [
"MIT"
] | null | null | null | sort/insertion.py | Wind2esg/python3sort | ead213f75a54da7d476865e3cf0a551ab0970d7d | [
"MIT"
] | null | null | null | # python3 sort <http://github.com/Wind2esg/python3sort>
# Copyright 2018 Wind2esg
# Released under the MIT license <http://github.com/Wind2esg/python3sort/LICENSE>
# Build a sorted range from 0 to i - 1, then try to find the position for the i item
# Because it is sorted in the range, when finding, compare the i item with the mid of the range
# insert the i item then update the sorted range from the position to i.
from _comparer import int_comparer
def find_posion(array, start, end, item, compare=int_comparer):
if compare(item, array[end // 2]) > 0:
start = end // 2
else:
end = end // 2
if start == end:
return start
else:
return find_posion(array, start, end)
def insertion(array, compare=int_comparer):
for i in range(1, len(array)):
position = find_posion(array, 0, i - 1, array[i])
tmp = array[i]
for j in range(i, position, -1):
array[j] = array[j - 1]
array[position] = tmp
return arrayy
| 32.483871 | 95 | 0.653426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.409136 |
2db2c86f651f174e6af10c9853850aca24373576 | 571 | py | Python | organization/urls.py | adwait-thattey/raygun_api | 9d5571de452fbf70d34b9583ebc42eb662292f61 | [
"MIT"
] | null | null | null | organization/urls.py | adwait-thattey/raygun_api | 9d5571de452fbf70d34b9583ebc42eb662292f61 | [
"MIT"
] | 7 | 2020-06-06T01:40:06.000Z | 2022-02-10T09:12:56.000Z | organization/urls.py | adwait-thattey/raygun_api | 9d5571de452fbf70d34b9583ebc42eb662292f61 | [
"MIT"
] | 1 | 2021-08-16T13:23:34.000Z | 2021-08-16T13:23:34.000Z | from django.urls import path
from .views import OrganizationView, ServiceView, ServiceListView
from registration import views
app_name = "organization"
urlpatterns = [
path('<org_name>/service/<ticket>/', ServiceView.as_view(), name='service_view_get'),
path('<org_name>/service/', ServiceView.as_view(), name='service_view_post'),
path('<org_name>/services/', ServiceListView.as_view(), name='service_view_list'),
path('<org_name>/', OrganizationView.as_view(), name='org_view_get'),
path('', OrganizationView.as_view(), name='org_view_post'),
]
| 35.6875 | 89 | 0.732049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.327496 |
2db31ca93e416c8f0b58659d1cea6bcc728c9c86 | 1,977 | py | Python | examples/layer_metrics.py | rolandproud/pyechometrics | 6036772b5e3acd329cb7bda84331c291e8fb334d | [
"MIT"
] | null | null | null | examples/layer_metrics.py | rolandproud/pyechometrics | 6036772b5e3acd329cb7bda84331c291e8fb334d | [
"MIT"
] | null | null | null | examples/layer_metrics.py | rolandproud/pyechometrics | 6036772b5e3acd329cb7bda84331c291e8fb334d | [
"MIT"
] | 1 | 2021-06-30T23:16:03.000Z | 2021-06-30T23:16:03.000Z | # -*- coding: utf-8 -*-
"""
Summarise Sound Scattering Layers (SSLs)
@author: Roland Proud
"""
## import packages
import matplotlib.pyplot as plt
import gzip
import pickle
import numpy as np
from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv
## import pyechometrics modules
from pyechometrics.metrics import stats, dims, nasc
## get Sv data and mask
def get_obj(filepath):
f = gzip.open(filepath,'rb')
obj = pickle.load(f,encoding = 'bytes')
f.close()
return obj
## noise_level
noise_level = -999
## read Sv
Sv18 = get_obj('./data/PS_Sv18.pklz')
## get SSL mask - see 'ident_SSLs' example in pyechomask
Sv18mask = get_obj('./data/SSL_flag_mask_18.pklz')
## plot
plt.figure(1)
plt.subplot(211)
plot_Sv(Sv18)
plt.subplot(212)
plot_Sv(Sv18,mask = Sv18mask)
plt.title('SSL identification - 18 kHz echosounder data')
plt.show()
## sample interval in meters for this echogram
sample_int = 0.2 ## in meters
## calculate NASC (include all SSLs)
NASC = nasc(Sv18, sample_int, mask = Sv18mask)
## plot NASC by ping
plt.plot(NASC)
plt.xlabel('ping')
plt.ylabel(r'NASC $m^2nmi^{-2}$')
plt.title('NASC values for SSLs')
plt.show()
## save plot
#save_png_plot('./','NASCexampleWiki')
## make binary mask for a single sound scattering layer (SSL) (Sv18mask == 2)
SSLmask = np.zeros(Sv18mask.shape)
SSLmask[Sv18mask == 2] = 1
## get SSL stats and dimensions
SSL_mean, SSL_median, SSL_std, n = stats(Sv18, mask = SSLmask)
mean_row, mean_height, mean_col, mean_length = dims(Sv18, mask = SSLmask)
## change row to depth
mean_depth = mean_row * sample_int
mean_height = mean_height * sample_int
## plot a pseudo SSL using metrics
## *assume single normal distribution
plot_pseudo_SSL(SSL_mean,SSL_std,mean_height,mean_depth)
plt.ylabel('depth (m)')
plt.xlabel('pings')
plt.title('pseudo DSL produced using summary metrics',fontsize = 16)
plt.show()
## save plot
#save_png_plot('./','exampleWiki')
| 22.724138 | 77 | 0.711179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 888 | 0.449165 |
2db77d4290e85a408a47162d591a4a6dfa173183 | 10,334 | py | Python | chb/graphics/DotCfg.py | orinatic/CodeHawk-Binary | 8b4fd728213e629736d5ece840ea3b43cea53f30 | [
"MIT"
] | null | null | null | chb/graphics/DotCfg.py | orinatic/CodeHawk-Binary | 8b4fd728213e629736d5ece840ea3b43cea53f30 | [
"MIT"
] | null | null | null | chb/graphics/DotCfg.py | orinatic/CodeHawk-Binary | 8b4fd728213e629736d5ece840ea3b43cea53f30 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import chb.util.graphutil as UG
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, TYPE_CHECKING
from chb.util.DotGraph import DotGraph
if TYPE_CHECKING:
import chb.app.CfgBlock
import chb.app.Function
import chb.app.Instruction
class DotCfg:
def __init__(
self,
graphname: str,
fn: "chb.app.Function.Function",
looplevelcolors: List[str] = [], # [ color numbers ]
showpredicates: bool = False, # show branch predicates on edges
showcalls: bool = False, # show call instrs on nodes
showinstr_opcodes: bool = False, # show all instrs on nodes
showinstr_text: bool = False, # show all instr annotations on nodes
showstores: bool = False, # show all STR and STRB and STRH instr annotations
mips: bool = False, # for mips subtract 4 from block end addr
sink: str = None, # restrict paths to basic block destination
segments: List[str] = [], # restrict paths to include these basic blocks
# replacement text for node and edge labels
replacements: Dict[str, str] = {}) -> None:
self.fn = fn
self.graphname = graphname
self.looplevelcolors = looplevelcolors
self.showpredicates = showpredicates
self.showcalls = showcalls
self.showinstr_opcodes = showinstr_opcodes
self.showinstr_text = showinstr_text
self.showstores = showstores
self.mips = mips
self.sink = sink
self.segments = segments
self.replacements = replacements
self.pathnodes: Set[str] = set([])
self.dotgraph = DotGraph(graphname)
def build(self) -> DotGraph:
if self.sink is not None:
self.restrict_nodes(self.sink)
elif len(self.segments) > 0:
self.restrict_paths(self.segments)
else:
self.pathnodes = set(self.fn.cfg.blocks.keys())
for n in self.fn.cfg.blocks:
self.add_cfg_node(n)
for e in self.fn.cfg.edges:
self.add_cfg_edge(e)
return self.dotgraph
def restrict_nodes(self, sink: str) -> None:
nodes = self.fn.cfg.blocks
edges = self.fn.cfg.edges # adjacency list n -> [ n ]
if sink not in nodes:
print('Sink ' + sink + ' not found in nodes')
self.pathnodes = set(nodes.keys())
return
g = UG.DirectedGraph(list(nodes.keys()), edges)
g.find_paths(self.fn.faddr, sink)
for p in g.paths:
print('Path: ' + str(p))
self.pathnodes = self.pathnodes.union(p)
if len(self.pathnodes) == 0:
self.pathnodes = set(nodes.keys())
def restrict_paths(self, segments: List[str]) -> None:
nodes = self.fn.cfg.blocks
edges = self.fn.cfg.edges
for b in segments:
if b not in list(nodes.keys()):
print('Segment ' + b + ' not found in nodes')
self.pathnodes = set(nodes.keys())
return
segments = [self.fn.faddr] + segments
g = UG.DirectedGraph(list(nodes.keys()), edges)
for i in range(len(segments) - 1):
src = segments[i]
dst = segments[i+1]
g.find_paths(src, dst)
for p in g.paths:
print('Path: ' + str(p))
self.pathnodes = self.pathnodes.union(p)
if len(self.pathnodes) == 0:
self.pathnodes = set(nodes.keys())
def get_branch_instruction(
self,
edge: str) -> "chb.app.Instruction.Instruction":
srcblock = self.fn.cfg.blocks[edge]
instraddr = srcblock.lastaddr
if instraddr.startswith('B'):
ctxtaddr = instraddr[2:].split('_')
iaddr_i = int(ctxtaddr[1], 16)
if self.mips:
iaddr_i -= 4 # delay slot
instraddr = 'B:' + ctxtaddr[0] + '_' + hex(iaddr_i)
else:
instraddr_i = int(instraddr, 16)
if self.mips:
instraddr_i -= 4 # take into account delay slot
instraddr = hex(instraddr_i)
return self.fn.instruction(instraddr)
def to_json(self) -> Dict[str, Any]:
d: Dict[str, Any] = {}
d['nodes'] = []
d['edges'] = {}
for n in self.fn.cfg.blocks:
d['nodes'].append(str(n))
for e in self.fn.cfg.edges:
d['edges'][str(e)] = {}
def default() -> None:
for tgt in self.fn.cfg.edges[e]:
d['edges'][str(e)][str(tgt)] = 'none'
if len(self.fn.cfg.edges[e]) > 1:
branchinstr = self.get_branch_instruction(e)
if branchinstr.is_branch_instruction:
ftconditions = branchinstr.ft_conditions
if len(ftconditions) > 1:
for i, tgt in enumerate(self.fn.cfg.edges[e]):
d['edges'][str(e)][str(tgt)] = ftconditions[i]
else:
default()
else:
default()
else:
default()
return d
def replace_text(self, txt: str) -> str:
result = txt
for src in sorted(self.replacements, key=lambda x: len(x), reverse=True):
result = result.replace(src, self.replacements[src])
return result
def add_cfg_node(self, n: str) -> None:
if n not in self.pathnodes:
return
basicblock = self.fn.block(str(n))
blocktxt = str(n)
color = 'lightblue'
if self.showinstr_opcodes:
instrs = basicblock.instructions.values()
pinstrs = [i.opcodetext for i in instrs]
blocktxt = (
blocktxt
+ "\\n"
+ "\\n".join(pinstrs))
elif self.showinstr_text:
instrs = basicblock.instructions.values()
pinstrs = [i.annotation for i in instrs]
blocktxt = (
blocktxt
+ "\\n"
+ "\\n".join(pinstrs))
elif self.showcalls or self.showstores:
if self.showcalls:
callinstrs = basicblock.call_instructions
pcallinstrs = [i.annotation for i in callinstrs]
print(' \n'.join([str(a) for a in pcallinstrs]))
if len(callinstrs) > 0:
blocktxt = (
blocktxt
+ '\\n'
+ '\\n'.join(pcallinstrs))
if self.showstores:
storeinstrs = basicblock.store_instructions
pstoreinstrs = [i.annotation for i in storeinstrs]
print(' \n'.join([str(a) for a in pstoreinstrs]))
if len(storeinstrs) > 0:
blocktxt = (
blocktxt
+ "\\n"
+ "\\n".join(pstoreinstrs))
if len(self.looplevelcolors) > 0:
looplevels = self.fn.cfg.loop_levels(n)
if len(looplevels) > 0:
level = len(looplevels)
if level > len(self.looplevelcolors):
color = self.looplevelcolors[-1]
else:
color = self.looplevelcolors[level-1]
# if n == self.fn.faddr:
# color = 'purple'
blocktxt = self.replace_text(blocktxt)
self.dotgraph.add_node(str(n), labeltxt=str(blocktxt), color=color)
def add_cfg_edge(self, e: str) -> None:
if e not in self.pathnodes:
return
def default() -> None:
for tgt in self.fn.cfg.edges[e]:
if tgt in self.pathnodes:
self.dotgraph.add_edge(str(e), str(tgt), labeltxt=None)
labeltxt: Optional[str] = None
if len(self.fn.cfg.edges[e]) > 1:
if self.showpredicates:
branchinstr = self.get_branch_instruction(e)
if branchinstr and branchinstr.is_branch_instruction:
ftconditions = branchinstr.ft_conditions
if len(ftconditions) == 2:
for i, tgt in enumerate(self.fn.cfg.edges[e]):
if tgt in self.pathnodes:
labeltxt = str(ftconditions[i])
labeltxt = self.replace_text(labeltxt)
self.dotgraph.add_edge(
str(e), str(tgt), labeltxt=labeltxt)
else:
default()
else:
default()
else:
default()
else:
default()
| 40.210117 | 89 | 0.533869 | 8,567 | 0.829011 | 0 | 0 | 0 | 0 | 0 | 0 | 2,211 | 0.213954 |
2db7c1f7bd22a70de17e35de2236e6155f5a96dc | 534 | py | Python | AEC.py | apayeur/GIF-Ca | 7ba9e715d79aa3a733f417f7dfce81842041e7ec | [
"MIT"
] | 1 | 2020-06-05T15:34:34.000Z | 2020-06-05T15:34:34.000Z | src/AEC.py | apayeur/GIF-K | 36228dfe5ffc2cded3f3224e289bd60e6bc7b93c | [
"MIT"
] | null | null | null | src/AEC.py | apayeur/GIF-K | 36228dfe5ffc2cded3f3224e289bd60e6bc7b93c | [
"MIT"
] | null | null | null | import abc
from Experiment import *
class AEC :
"""
Abstract class defining an interface for performing active electrode compensation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def performAEC(self, experiment):
"""
This method should preprocess all the traces in the experiment and compute V given V_rec.
"""
@abc.abstractmethod
def plot(self):
"""
Plot the filter of AEC.
"""
| 18.413793 | 97 | 0.554307 | 464 | 0.868914 | 0 | 0 | 292 | 0.546816 | 0 | 0 | 258 | 0.483146 |
2db8d601711034292e85c17511fbcd5cd25b13fb | 6,126 | py | Python | locust/test/test_env.py | radhakrishnaakamat/locust | 51b1d5038a2be6e2823b2576c4436f2ff9f7c7c2 | [
"MIT"
] | 1 | 2022-02-26T00:17:46.000Z | 2022-02-26T00:17:46.000Z | locust/test/test_env.py | radhakrishnaakamat/locust | 51b1d5038a2be6e2823b2576c4436f2ff9f7c7c2 | [
"MIT"
] | 1 | 2020-12-29T04:26:09.000Z | 2020-12-29T04:26:09.000Z | locust/test/test_env.py | radhakrishnaakamat/locust | 51b1d5038a2be6e2823b2576c4436f2ff9f7c7c2 | [
"MIT"
] | 1 | 2022-02-25T14:23:40.000Z | 2022-02-25T14:23:40.000Z | from locust import (
constant,
)
from locust.env import Environment, LoadTestShape
from locust.user import (
User,
task,
)
from locust.user.task import TaskSet
from .testcases import LocustTestCase
from .fake_module1_for_env_test import MyUserWithSameName as MyUserWithSameName1
from .fake_module2_for_env_test import MyUserWithSameName as MyUserWithSameName2
class TestEnvironment(LocustTestCase):
def test_user_classes_count(self):
class MyUser1(User):
wait_time = constant(0)
@task
def my_task(self):
pass
class MyUser2(User):
wait_time = constant(0)
@task
def my_task(self):
pass
environment = Environment(user_classes=[MyUser1, MyUser2])
self.assertDictEqual({"MyUser1": MyUser1, "MyUser2": MyUser2}, environment.user_classes_by_name)
def test_user_classes_with_same_name_is_error(self):
with self.assertRaises(ValueError) as e:
environment = Environment(user_classes=[MyUserWithSameName1, MyUserWithSameName2])
self.assertEqual(
e.exception.args[0],
"The following user classes have the same class name: locust.test.fake_module1_for_env_test.MyUserWithSameName, locust.test.fake_module2_for_env_test.MyUserWithSameName",
)
def test_assign_equal_weights(self):
def verify_tasks(u, target_tasks):
self.assertEqual(len(u.tasks), len(target_tasks))
tasks = [t.__name__ for t in u.tasks]
self.assertEqual(len(tasks), len(set(tasks)))
self.assertEqual(set(tasks), set(target_tasks))
# Base case
class MyUser1(User):
wait_time = constant(0)
@task(4)
def my_task(self):
pass
@task(1)
def my_task_2(self):
pass
environment = Environment(user_classes=[MyUser1])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["my_task", "my_task_2"])
# Testing nested task sets
class MyUser2(User):
@task
class TopLevelTaskSet(TaskSet):
@task
class IndexTaskSet(TaskSet):
@task(10)
def index(self):
self.client.get("/")
@task
def stop(self):
self.client.get("/hi")
@task(2)
def stats(self):
self.client.get("/stats/requests")
environment = Environment(user_classes=[MyUser2])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["index", "stop", "stats"])
# Testing task assignment via instance variable
def outside_task():
pass
def outside_task_2():
pass
class SingleTaskSet(TaskSet):
tasks = [outside_task, outside_task, outside_task_2]
class MyUser3(User):
tasks = [SingleTaskSet, outside_task]
environment = Environment(user_classes=[MyUser3])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["outside_task", "outside_task_2"])
# Testing task assignment via dict
class DictTaskSet(TaskSet):
def dict_task_1():
pass
def dict_task_2():
pass
def dict_task_3():
pass
tasks = {
dict_task_1: 5,
dict_task_2: 3,
dict_task_3: 1,
}
class MyUser4(User):
tasks = [DictTaskSet, SingleTaskSet, SingleTaskSet]
# Assign user tasks in dict
environment = Environment(user_classes=[MyUser4])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["outside_task", "outside_task_2", "dict_task_1", "dict_task_2", "dict_task_3"])
class MyUser5(User):
tasks = {
DictTaskSet: 5,
SingleTaskSet: 3,
outside_task: 6,
}
environment = Environment(user_classes=[MyUser5])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["outside_task", "outside_task_2", "dict_task_1", "dict_task_2", "dict_task_3"])
def test_user_classes_with_zero_weight_are_removed(self):
class MyUser1(User):
wait_time = constant(0)
weight = 0
@task
def my_task(self):
pass
class MyUser2(User):
wait_time = constant(0)
weight = 1
@task
def my_task(self):
pass
environment = Environment(user_classes=[MyUser1, MyUser2])
self.assertEqual(len(environment.user_classes), 1)
self.assertIs(environment.user_classes[0], MyUser2)
def test_all_user_classes_with_zero_weight_raises_exception(self):
class MyUser1(User):
wait_time = constant(0)
weight = 0
@task
def my_task(self):
pass
class MyUser2(User):
wait_time = constant(0)
weight = 0
@task
def my_task(self):
pass
with self.assertRaises(ValueError) as e:
environment = Environment(user_classes=[MyUser1, MyUser2])
self.assertEqual(
e.exception.args[0],
"There are no users with weight > 0.",
)
def test_shape_class_attribute(self):
class SubLoadTestShape(LoadTestShape):
"""Inherited from locust.env.LoadTestShape"""
with self.assertRaisesRegex(
ValueError, r"instance of LoadTestShape or subclass LoadTestShape", msg="exception message is mismatching"
):
Environment(user_classes=[MyUserWithSameName1], shape_class=SubLoadTestShape)
| 30.326733 | 182 | 0.579987 | 5,751 | 0.938786 | 0 | 0 | 916 | 0.149527 | 0 | 0 | 735 | 0.11998 |
2db96537947c81899effbd609c7adfa473931eeb | 33 | py | Python | datasets/__init__.py | Masterchef365/pvcnn | db13331a46f672e74e7b5bde60e7bf30d445cd2d | [
"MIT"
] | 477 | 2019-12-10T01:03:43.000Z | 2022-03-28T14:10:08.000Z | datasets/__init__.py | chaomath/pvcnn | 8f07316611067e9a0e2df8b35e4a729a03e0806b | [
"MIT"
] | 57 | 2019-12-10T10:14:26.000Z | 2022-03-26T04:59:43.000Z | datasets/__init__.py | chaomath/pvcnn | 8f07316611067e9a0e2df8b35e4a729a03e0806b | [
"MIT"
] | 126 | 2019-12-10T07:59:50.000Z | 2022-03-12T07:21:19.000Z | from datasets.s3dis import S3DIS
| 16.5 | 32 | 0.848485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2dba6814e9a58be8a1046bd1b724d2d25bc21d0e | 1,930 | py | Python | schedule/main/utils/fetch_data.py | DSD-ESDC-EDSC/dynamic-org-chart-scripts | a2247ac85ddadc5b33256a501be16bf787b395b6 | [
"MIT"
] | null | null | null | schedule/main/utils/fetch_data.py | DSD-ESDC-EDSC/dynamic-org-chart-scripts | a2247ac85ddadc5b33256a501be16bf787b395b6 | [
"MIT"
] | null | null | null | schedule/main/utils/fetch_data.py | DSD-ESDC-EDSC/dynamic-org-chart-scripts | a2247ac85ddadc5b33256a501be16bf787b395b6 | [
"MIT"
] | null | null | null | import csv
from io import BytesIO
import pandas as pd
from urllib.request import urlopen
from zipfile import ZipFile
def fetch_geds(url, subset=None):
'''
Fetches the geds dataset from Canada's Open Data Portal
Args:
url:
A string containing the url to the Canada Open Data Portal web page
that downloads a zipped csv containing the geds dataset.
subset:
A string containing the acronym found in the "Department Acronym"
field in the geds dataframe (e.g. "ESDC-EDSC") - used to build the
org chart tool for only a subset of geds.
Returns:
df:
A pandas dataframe containing the original contents of the zipped
csv file.
'''
# Fetch the response from the geds url
resp = urlopen(url)
# Extract the file from the bytes object returned by urlopen
zipped_file = ZipFile(BytesIO(resp.read()))
# Extract the csv contents line-by-line
lines = []
# Note that zipped_file.namelist() returns ['gedsOpenData.csv'], so
# zipped_file.namelist()[0] returns the file name
for idx, line in enumerate(zipped_file.open(zipped_file.namelist()[0]).readlines()):
# Need to use the csv module to read the string returned by line.decode()
# Reason is csv module contains the logic to parse commas that are
# contained within double quotes.
decoded = [str(line.decode('ISO-8859-1'))]
line = [item for item in csv.reader(decoded)][0]
# There are a few observations (~90) that are not parsed correctly - this
# needs to be investigated further.
if len(line) == 44:
lines.append(line)
# Convert to pandas dataframe
df = pd.DataFrame(lines[1:], columns=lines[0])
# Select a subset of the dataframe (if any)
if subset is not None:
df = df[df["Department Acronym"] == subset]
return df | 39.387755 | 88 | 0.650259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,238 | 0.641451 |
2dbaf351886ba4462165fc13883318f911a808c6 | 3,210 | py | Python | grid.py | Data-Mechanics/bps-simulated-students | eb96fcb1185c5b00b822a36ce74bcbead87d5e16 | [
"MIT"
] | 2 | 2018-02-14T01:33:39.000Z | 2019-04-03T07:08:15.000Z | grid.py | data-mechanics/bps-simulated-data | eb96fcb1185c5b00b822a36ce74bcbead87d5e16 | [
"MIT"
] | null | null | null | grid.py | data-mechanics/bps-simulated-data | eb96fcb1185c5b00b822a36ce74bcbead87d5e16 | [
"MIT"
] | 2 | 2018-02-14T01:33:46.000Z | 2019-04-03T07:08:17.000Z | """
grid.py
Module containing class for working with a street grid.
"""
import json
import geojson
import geopy.distance
import shapely.geometry
from geoql import geoql
import geoleaflet
import folium
import rtree
import networkx
from tqdm import tqdm
class Grid():
@staticmethod
def prepare(file_segments, file_segments_filtered):
'''
Prepare a "clean" segments file given an input segments file.
'''
segments = geoql.load(open(file_segments, 'r'))
features = []
for f in tqdm(segments.features, desc='Filtering road segments'):
if f.type == 'Feature':
f.properties = []
features.append(f)
segments.features = features
segments = segments.node_edge_graph()
segments.dump(open(file_segments_filtered, 'w'), sort_keys=True)
@staticmethod
def segments_networkx(segments):
'''
Convert a GeoJSON graph generated by the geoql function
node_edge_graph() into a networkx representation.
'''
graph = networkx.Graph()
for (j, feature) in tqdm(list(enumerate(segments['features'])), desc='Building segments graph'):
if feature.type == "Point":
(lon, lat) = feature.coordinates
graph.add_node((lon, lat))
elif feature.type == "Feature":
coords = [tuple(c) for c in feature.geometry.coordinates]
for i in range(len(coords)-1):
(s, t) = (coords[i], coords[i+1])
graph.add_edge(s, t, index=j, distance=geopy.distance.vincenty(s, t).miles)
return graph
@staticmethod
def segments_rtree(segments):
'''
Build an R-tree using the GeoJSON road segments data. Separate
trees are built for nodes and for edges.
'''
(nodes_rtree, edges_rtree) = (rtree.index.Index(), rtree.index.Index())
for i in tqdm(range(len(segments['features'])), desc='Building segments R-tree'):
feature = segments['features'][i]
if feature.type == 'Point':
(lon, lat) = feature.coordinates
nodes_rtree.insert(i, (lon, lat, lon, lat))
elif feature.type == 'Feature':
edges_rtree.insert(i, shapely.geometry.shape(feature['geometry']).bounds)
return (nodes_rtree, edges_rtree)
def __init__(self, file_path):
self.segments = geojson.load(open(file_path, 'r'))
self.graph = self.segments_networkx(self.segments)
(rtree_nodes, rtree_edges) = self.segments_rtree(self.segments)
self.rtree_nodes = rtree_nodes
self.rtree_edges = rtree_edges
def intersection_nearest(self, lon_lat):
(lon, lat) = lon_lat
index = next(self.rtree_nodes.nearest((lon,lat,lon,lat), 1))
return self.segments['features'][index].coordinates
if __name__ == "__main__":
# The following is used to generate the "prepared" road segment data.
Grid.prepare('input/segments-boston.geojson', 'input/segments-prepared.geojson')
#open('output/segments.html', 'w').write(geoleaflet.html(Grid('input/segments-prepared.geojson').segments))
## eof | 37.764706 | 111 | 0.62648 | 2,648 | 0.824922 | 0 | 0 | 2,110 | 0.657321 | 0 | 0 | 861 | 0.268224 |
2dbb30295e3227128b76736e47f8b0f48dfcbaa4 | 6,019 | py | Python | lost_ds/vis/vis.py | l3p-cv/lost_ds | 4a2f3ef027128b759d28e67cb1fdaa0a557e343c | [
"MIT"
] | 1 | 2022-03-30T11:29:57.000Z | 2022-03-30T11:29:57.000Z | lost_ds/vis/vis.py | l3p-cv/lost_ds | 4a2f3ef027128b759d28e67cb1fdaa0a557e343c | [
"MIT"
] | null | null | null | lost_ds/vis/vis.py | l3p-cv/lost_ds | 4a2f3ef027128b759d28e67cb1fdaa0a557e343c | [
"MIT"
] | null | null | null | import os
from tqdm import tqdm
from joblib import Parallel, delayed
try:
import seaborn as sns
except:
pass
import numpy as np
import cv2
from lost_ds.util import get_fs
from lost_ds.geometry.lost_geom import LOSTGeometries
from lost_ds.functional.api import remove_empty
def get_fontscale(fontscale, thickness, img_h, text_max_h_frac=0.04):
if isinstance(fontscale, (int, float)):
return fontscale
elif fontscale=='auto':
text_h = int(text_max_h_frac * img_h)
fontscale = cv2.getFontScaleFromHeight(cv2.FONT_HERSHEY_SIMPLEX,
max(text_h, 10),
thickness)
return fontscale
def get_thickness(line_thickness, img_h, thickness_max_h_frac=0.002):
if line_thickness == 'auto':
return int(thickness_max_h_frac * img_h)
else:
return line_thickness
def vis_sample(img, df, line_thickness=3, color=(0, 0, 255),
lbl_col='anno_lbl', lost_geometries:LOSTGeometries=None,
blow_up=None, radius=2, fontscale=2):
'''Visualize annos of an image
Args:
img (np.ndarray): image to draw on
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize. If df is None a random image from df will be
sampled.
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lost_geometries (LOSTGeometries): LOSTGeometries instance to use, will
create a new one if None
blow_up (): TODO: implement
Returns:
np.array: Image painted with annotations.
'''
df = remove_empty(df, 'anno_data')
if len(df) > 0:
geom = lost_geometries
if lost_geometries is None:
geom = LOSTGeometries()
anno_data = list(df['anno_data'])
anno_conf = None
if hasattr(df, 'anno_confidence'):
anno_conf = list(df['anno_confidence'])
anno_lbl = list(df[lbl_col])
anno_dtype = list(df['anno_dtype'])
anno_style = list(df['anno_style'])
anno_format = list(df['anno_format'])
thickness = get_thickness(line_thickness, img.shape[0])
fontscale = get_fontscale(fontscale, thickness, img.shape[0])
thickness = max(1, thickness)
img = geom.draw(img, anno_data, anno_conf, anno_lbl, anno_dtype,
anno_style, anno_format, thickness, fontscale, color,
radius)
return img
def vis_and_store(df, out_dir, lbl_col='anno_lbl', color=(0, 0, 255),
line_thickness=2, fontscale=2, filesystem=None,
radius=2):
'''Visualize annotations and store them to a folder
Args:
df (pd.DataFrame): Optional dataset in lost format to visualize
out_dir (str): Directory to store the visualized annotations
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lbl_col (str): column containing the labels
radius (int): radius to draw for points/circles
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
'''
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
def vis_img(img_path, df_vis):
geom = LOSTGeometries()
out_path = os.path.join(out_dir, os.path.basename(img_path))
if df_vis['anno_data'].notnull().any():
img = fs.read_img(img_path)
img = vis_sample(img=img, df=df_vis, line_thickness=line_thickness,
color=color, lbl_col=lbl_col, lost_geometries=geom,
radius=radius, fontscale=fontscale)
fs.write_img(img, out_path)
else:
fs.copy(img_path, out_path)
Parallel(n_jobs=-1)(delayed(vis_img)(path, df_vis)
for path, df_vis in tqdm(df.groupby('img_path'),
desc='visualize'))
# for path, df_vis in tqdm(df.groupby('img_path'), desc='visualize'):
# vis_img(path, df_vis)
def vis_semantic_segmentation(df, out_dir, n_classes, palette='dark',
seg_path_col='seg_path', filesystem=None):
"""Visualize the stored semantic segmentations by coloring it
Args:
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize.
out_dir (str): path to store images
n_classes (int): number of classes occuring in pixelmaps, number of
different colors needed for visualization
palette (str): seaborn color palette i.e. 'dark', 'bright', 'pastel',...
refer https://seaborn.pydata.org/tutorial/color_palettes.html
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
"""
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
palette = sns.color_palette(palette, n_classes)
palette = [(np.array(x)*255).astype(np.uint8) for x in palette]
segmentations = df[seg_path_col].unique()
def vis_seg(seg_path):
seg = fs.read_img(seg_path)
vis = np.zeros(seg.shape[:2] + (3,))
for i in range(n_classes):
vis = np.where(seg==i, palette[i], vis)
fs.write_img(vis, os.path.join(out_dir, seg_path.split('/')[-1]))
Parallel(n_jobs=-1)(delayed(vis_seg)(seg_path)
for seg_path in tqdm(segmentations, desc='vis sem. seg.'))
| 39.860927 | 81 | 0.615052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,391 | 0.397242 |
2dbb32b6fb7e99eba9946c0267ff01781d71e612 | 16,638 | py | Python | cadnano/views/pathview/prexovermanager.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 1 | 2022-03-27T14:37:32.000Z | 2022-03-27T14:37:32.000Z | cadnano/views/pathview/prexovermanager.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | null | null | null | cadnano/views/pathview/prexovermanager.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T02:29:38.000Z | 2021-01-22T02:29:38.000Z | from collections import deque
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QGraphicsRectItem
from cadnano.gui.palette import getNoPen
from cadnano.proxies.cnenum import StrandType
from .pathextras import PreXoverItem
class PreXoverManager(QGraphicsRectItem):
"""Summary
Attributes:
active_pxis (dict): Description
hovered_items (list): Description
HUE_FACTOR (float): Description
KEYMAP (TYPE): Description
neighbor_prexover_items (dict): Description
part_item (TYPE): Description
prexover_item_map (dict): Description
pxi_pool (TYPE): Description
virtual_helix_item (VirtualHelixItem): Description
"""
HUE_FACTOR = 1.6
KEYMAP = {i: getattr(Qt, 'Key_%d' % i) for i in range(10)}
def __init__(self, part_item):
"""Summary
Args:
part_item (TYPE): Description
"""
super(QGraphicsRectItem, self).__init__(part_item)
self.part_item = part_item
self.virtual_helix_item = None
self.setPen(getNoPen())
self._colors = []
# dictionary of tuple of a (PreXoverItem, List[PreXoverItem])
# for activating on hover events
self.prexover_item_map = {}
self.neighbor_prexover_items = {} # just a dictionary of neighbors
self.hovered_items = []
self._key_press_dict = {}
# for reuse of PreXoverItem objects
self.pxi_pool = deque()
self.active_pxis = {}
# end def
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
### ACCESSORS ###
def window(self):
"""Summary
Returns:
TYPE: Description
"""
return self._parent.window()
def virtualHelixItem(self):
"""Summary
Returns:
TYPE: Description
"""
return self.virtual_helix_item
# end def
def addKeyPress(self, key_int, info):
"""Summary
Args:
key_int (TYPE): Description
info (TYPE): Description
Returns:
TYPE: Description
"""
qtkey = self.KEYMAP[key_int]
self._key_press_dict[qtkey] = info
### EVENT HANDLERS ###
### PRIVATE SUPPORT METHODS ###
def updateBasesPerRepeat(self, step_size):
"""Recreates colors, all vhi
Args:
step_size (TYPE): Description
"""
hue_scale = step_size*self.HUE_FACTOR
self._colors = [QColor.fromHsvF(i / hue_scale, 0.75, 0.8).name()
for i in range(int(step_size))]
# self.removeRepeats()
# self.addRepeats()
# end def
def handlePreXoverKeyPress(self, key):
"""Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
"""
# print("handling key", key, self.KEYMAP.get(key, None))
if key not in self._key_press_dict:
return
# active item
part = self.part_item.part()
active_id_num, a_is_fwd, a_idx, a_to_id = part.active_base_info
a_strand_type = StrandType.FWD if a_is_fwd else StrandType.REV
neighbor_id_num, n_is_fwd, n_idx, n_to_id = self._key_press_dict[key]
n_strand_type = StrandType.FWD if n_is_fwd else StrandType.REV
if not part.hasStrandAtIdx(active_id_num, a_idx)[a_strand_type]:
print("no active strand", key)
return
if not part.hasStrandAtIdx(neighbor_id_num, n_idx)[n_strand_type]:
print("no neighbor strand", key)
return
a_strandset = part.getStrandSets(active_id_num)[a_strand_type]
n_strandset = part.getStrandSets(neighbor_id_num)[n_strand_type]
a_strand = a_strandset.getStrand(a_idx)
n_strand = n_strandset.getStrand(n_idx)
if a_strand.hasXoverAt(a_idx):
return
if n_strand.hasXoverAt(n_idx):
return
# SPECIAL CASE: neighbor already has a 3' end, and active has
# a 5' end, so assume the user wants to install a returning xover
if a_strand.idx5Prime() == a_idx and n_strand.idx3Prime() == n_idx:
part.createXover(n_strand, n_idx, a_strand, a_idx)
return
# DEFAULT CASE: the active strand acts as strand5p,
# install a crossover to the neighbor acting as strand3p
if a_strand_type == n_strand_type:
if a_is_fwd:
if part.isAGreaterThanB_Z(active_id_num, a_idx,
neighbor_id_num, n_idx):
part.createXover(n_strand, n_idx, a_strand, a_idx)
else:
part.createXover(a_strand, a_idx, n_strand, n_idx)
else:
if part.isAGreaterThanB_Z(active_id_num, a_idx,
neighbor_id_num, n_idx):
part.createXover(a_strand, a_idx, n_strand, n_idx)
else:
part.createXover(n_strand, n_idx, a_strand, a_idx)
else:
part.createXover(a_strand, a_idx, n_strand, n_idx)
# end def
def updateTurnsPerRepeat(self):
"""Summary
Returns:
TYPE: Description
"""
# end def
def part(self):
"""Summary
Returns:
TYPE: Description
"""
return self.parentItem().part()
### PUBLIC SUPPORT METHODS ###
def getItem(self, id_num, is_fwd, idx):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd (TYPE): Description
idx (int): the base index within the virtual helix
Returns:
TYPE: Description
"""
return self.prexover_item_map[(id_num, is_fwd, idx)]
# end def
def clearPreXoverItems(self):
"""Summary
Returns:
TYPE: Description
"""
# self.deactivateNeighbors()
self.hovered_items = []
pxi_pool = self.pxi_pool
active_pxis = self.active_pxis
while active_pxis:
k, x = active_pxis.popitem()
x.shutdown()
pxi_pool.append(x)
self.prexover_item_map = {}
for x in self.neighbor_prexover_items.values():
x.shutdown()
pxi_pool.append(x)
self._key_press_dict = {}
self.neighbor_prexover_items = {}
# end def
@staticmethod
def getPoolItem(pool, cls, *args):
"""grab an item from a pool if there is one and reconfigure it
otherwise, create a new object of type `cls`
Useful to avoid issues with deleting animations
Args:
pool (TYPE): Description
cls (TYPE): Description
*args (TYPE): Description
"""
if len(pool) > 0:
item = pool.pop()
item.resetItem(*args)
return item
else:
return cls(*args)
# end def
def reset(self):
"""Summary
Returns:
TYPE: Description
"""
self.clearPreXoverItems()
self.virtual_helix_item = None
# end def
def activateVirtualHelix(self, virtual_helix_item, this_idx, per_neighbor_hits):
"""Populate self.prexover_item_map dictionary which maps a tuple
of (id_num, is_fwd, idx) to a given PreXoverItem and a List of neighbor PreXoverItems
This also deactivates any previusly active VirtualHelix
Args:
virtual_helix_item (cadnano.guil.views.pathview.virtualhelixitem.VirtualHelixItem)
this_idx (int): the base index within the virtual helix
per_neighbor_hits (Tuple())
"""
# print("ACTIVATING VH", virtual_helix_item.idNum())
# 1. Clear all PreXoverItems
self.clearPreXoverItems()
pxis = self.prexover_item_map
neighbor_pxis_dict = self.neighbor_prexover_items # for avoiding duplicates
part_item = self.part_item
pxi_pool = self.pxi_pool
getPoolItem = self.getPoolItem
bpr = virtual_helix_item.getProperty('bases_per_repeat')
self.virtual_helix_item = virtual_helix_item
self.updateBasesPerRepeat(bpr)
# the list of neighbors per strand
id_num = virtual_helix_item.idNum()
fwd_st_type, rev_st_type = True, False # for clarity in the call to constructors
# start, length = part_item.part().normalizedRange(id_num, this_idx)
active_pxis = self.active_pxis
# 1. Construct PXIs for the active virtual_helix_item
for neighbor_id, hits in per_neighbor_hits.items():
fwd_axis_hits, rev_axis_hits = hits
# Track active and neighbor idxs in flat list
# so we can look for idx pairs
fwd_active_idxs = [i[0] for i in fwd_axis_hits]
rev_active_idxs = [i[0] for i in rev_axis_hits]
fwd_neighbor_idxs = [j for k in [i[2] for i in fwd_axis_hits] for j in k]
rev_neighbor_idxs = [j for k in [i[1] for i in rev_axis_hits] for j in k]
nvhi = part_item.idToVirtualHelixItem(neighbor_id)
# n_step_size = nvhi.getProperty('bases_per_repeat')
for idx, fwd_idxs, rev_idxs in fwd_axis_hits:
# print("f fwd_active_idxs", fwd_active_idxs)
nearby_idxs = []
if idx-1 in fwd_active_idxs:
nearby_idxs.append(idx-1)
if idx+1 in fwd_active_idxs:
nearby_idxs.append(idx+1)
apxi = getPoolItem(pxi_pool,
PreXoverItem,
virtual_helix_item, fwd_st_type, idx,
nearby_idxs, neighbor_id, self
)
# apxi = active_pxis[(fwd_st_type, idx)]
apxi.enableActive(True, to_vh_id_num=neighbor_id)
active_pxis[(fwd_st_type, idx)] = apxi
neighbor_pxis = []
pxis[(id_num, fwd_st_type, idx)] = (apxi, neighbor_pxis)
# print("f fwd_neighbor_idxs", fwd_neighbor_idxs)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in rev_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in rev_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, fwd_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# print("f rev_neighbor_idxs", rev_neighbor_idxs)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in fwd_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in fwd_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, rev_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for idx, fwd_idxs, rev_idxs in rev_axis_hits:
# print("r rev_active_idxs", rev_active_idxs)
nearby_idxs = []
if idx-1 in rev_active_idxs:
nearby_idxs.append(idx-1)
if idx+1 in rev_active_idxs:
nearby_idxs.append(idx+1)
apxi = getPoolItem(pxi_pool,
PreXoverItem,
virtual_helix_item, rev_st_type, idx,
nearby_idxs, neighbor_id, self
)
# apxi = active_pxis[(rev_st_type, idx)]
apxi.enableActive(True, to_vh_id_num=neighbor_id)
active_pxis[(rev_st_type, idx)] = apxi
neighbor_pxis = []
pxis[(id_num, rev_st_type, idx)] = (apxi, neighbor_pxis)
# print("r fwd_neighbor_idxs", fwd_neighbor_idxs)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in rev_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in rev_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, fwd_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# print("r rev_neighbor_idxs", rev_neighbor_idxs)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in fwd_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in fwd_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, rev_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# end for per_neighbor_hits
# end def
def activateNeighbors(self, id_num, is_fwd, idx):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd (TYPE): Description
idx (int): the base index within the virtual helix
Returns:
TYPE: Description
"""
# print("ACTIVATING neighbors", id_num, idx)
item = self.prexover_item_map.get((id_num, is_fwd, idx))
if item is None:
apxi = self.active_pxis.get((is_fwd, idx))
if apxi is not None:
apxi.setActiveHovered(True)
self.hovered_items.append(apxi)
else:
pxi, neighbor_list = item
# print("Should have {} neighbors".format(len(neighbor_list)))
for k, npxi in enumerate(neighbor_list):
npxi.activateNeighbor(pxi, shortcut=str(k))
self.addKeyPress(k, npxi.getInfo())
self.hovered_items.append(npxi)
# end def
def deactivateNeighbors(self):
"""Summary
Returns:
TYPE: Description
"""
self._key_press_dict = {}
while self.hovered_items:
self.hovered_items.pop().deactivateNeighbor()
def updateModelActiveBaseInfo(self, pre_xover_info):
"""Notify model of pre_xover_item hover state.
Args:
pre_xover_info (Tuple): from call to getInfo()
"""
self.part_item.part().setActiveBaseInfo(pre_xover_info)
# end def
def isVirtualHelixActive(self, id_num):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
Returns:
TYPE: Description
"""
return self.part_item.part().isVirtualHelixActive(id_num)
# end def
# end class
| 36.486842 | 108 | 0.536783 | 16,362 | 0.983411 | 0 | 0 | 528 | 0.031735 | 0 | 0 | 4,808 | 0.288977 |
2dbba15b3067323a927d0aa7d2634d1361341f4c | 782 | py | Python | tests/api_test.py | pmav99/sysfacts | 09a5658a8f4e789db71844759dd4ae61369f4f4a | [
"MIT"
] | 2 | 2019-01-25T17:32:49.000Z | 2020-11-06T19:45:40.000Z | tests/api_test.py | pmav99/sysfacts | 09a5658a8f4e789db71844759dd4ae61369f4f4a | [
"MIT"
] | 3 | 2019-01-25T23:36:42.000Z | 2019-01-26T23:20:09.000Z | tests/api_test.py | pmav99/sysfacts | 09a5658a8f4e789db71844759dd4ae61369f4f4a | [
"MIT"
] | null | null | null | import json
import pytest
import sysfacts
@pytest.fixture(scope="module")
def collected_data():
return sysfacts.collect_facts()
def test_imports():
from sysfacts import collect_facts
def test_return_type(collected_data):
assert isinstance(collected_data, dict)
def test_keys_are_present(collected_data):
keys = {
"timestamp",
"os_release",
"lsb_release",
"distro_release",
"uname",
"cpu_info",
"memory_info",
"swap_info",
"cpu_usage",
}
collected_keys = collected_data.keys()
assert keys == set(collected_keys), keys.symmetric_difference(collected_keys)
def test_is_json_serializable(collected_data):
out = json.dumps(collected_data)
assert isinstance(out, str)
| 19.55 | 81 | 0.680307 | 0 | 0 | 0 | 0 | 89 | 0.113811 | 0 | 0 | 112 | 0.143223 |
2dbd516e74ed752446450ca98d09c2ca319fdd11 | 521 | py | Python | MerginLetters.py | SandraCoburn/python-code-challenges | 52ca026c02a45cadc890d01fc095d39d42b36d4c | [
"MIT"
] | null | null | null | MerginLetters.py | SandraCoburn/python-code-challenges | 52ca026c02a45cadc890d01fc095d39d42b36d4c | [
"MIT"
] | null | null | null | MerginLetters.py | SandraCoburn/python-code-challenges | 52ca026c02a45cadc890d01fc095d39d42b36d4c | [
"MIT"
] | null | null | null |
def mergingLetters(s, t):
#edge cases
mergedStr = ""
firstChar = list(s)
secondChar = list(t)
for i, ele in enumerate(secondChar):
if i < len(firstChar):
mergedStr = mergedStr + firstChar[i]
print('first pointer', firstChar[i], mergedStr)
if i < len(secondChar):
mergedStr = mergedStr + secondChar[i]
print('second pointer', secondChar[i], "merged",mergedStr)
return mergedStr
print(mergingLetters('abcd', 'jjjjjjj')) | 26.05 | 71 | 0.589251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.128599 |
2dbe52d295d10c6a1ea04833833e85f2bfabaaa8 | 2,696 | py | Python | loutilities/flask_helpers/decorators.py | louking/loutilities | 7a7bb27b09b8d6e3a411153b604858aaec397fc6 | [
"Apache-2.0"
] | 1 | 2020-03-16T12:47:08.000Z | 2020-03-16T12:47:08.000Z | loutilities/flask_helpers/decorators.py | louking/loutilities | 7a7bb27b09b8d6e3a411153b604858aaec397fc6 | [
"Apache-2.0"
] | 35 | 2015-07-11T14:57:30.000Z | 2022-03-12T00:53:44.000Z | loutilities/flask_helpers/decorators.py | louking/loutilities | 7a7bb27b09b8d6e3a411153b604858aaec397fc6 | [
"Apache-2.0"
] | null | null | null | '''
decorators - decorators to help with flask applications
'''
# standard
from datetime import timedelta
from functools import update_wrapper
# pypi
from flask import make_response, request, current_app
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
'''
crossdomain decorator (from https://web.archive.org/web/20190128010149/http://flask.pocoo.org/snippets/56/)
:param methods: Optionally a list of methods that are allowed for this view. If not provided it will allow all methods that are implemented.
:param headers: Optionally a list of headers that are allowed for this request.
:param origin: '*' to allow all origins, otherwise a string with a URL or a list of URLs that might access the resource.
:param max_age: The number of seconds as integer or timedelta object for which the preflighted request is valid.
:param attach_to_all: True if the decorator should add the access control headers to all HTTP methods or False if it should only add them to OPTIONS responses.
:param automatic_options: If enabled the decorator will use the default Flask OPTIONS response and attach the headers there, otherwise the view function will be called to generate an appropriate response.
'''
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator | 44.196721 | 208 | 0.676187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,197 | 0.443991 |
2dbe7b6b088d005ebb2e9f0a7622bd914d341736 | 631 | py | Python | modules/secondarybase64_layer5.py | bobombobo/python-obfuscator | c92271a6a482d74d3ac5fc3525261f486914ce16 | [
"CC0-1.0"
] | 1 | 2021-05-18T16:04:31.000Z | 2021-05-18T16:04:31.000Z | modules/secondarybase64_layer5.py | bobombobo/python-obfuscator | c92271a6a482d74d3ac5fc3525261f486914ce16 | [
"CC0-1.0"
] | 1 | 2021-05-31T08:09:47.000Z | 2021-06-01T16:12:05.000Z | modules/secondarybase64_layer5.py | bobombobo/python-obfuscator | c92271a6a482d74d3ac5fc3525261f486914ce16 | [
"CC0-1.0"
] | null | null | null | def secondarybase64_layer5(nearing_the_end_script):
import base64
print("Secondary base64 encrypting")
joe = (nearing_the_end_script)
spliting = joe.encode('utf-8')
spliting = base64.b64encode(spliting)
spliting = spliting.decode('utf-8')
split_strings = []
n = int((len(spliting))/20)
for index in range(0, len(spliting), n):
split_strings.append(spliting[index : index + n])
lmaooo = ('"'+ '"+"'.join(split_strings) + '"')
dude_im_so_done_with_this = '''import base64;exec((base64.b64decode(({lmaooo}).encode('utf-8'))).decode('utf-8'))'''.format(lmaooo=lmaooo)
return(dude_im_so_done_with_this)
| 37.117647 | 140 | 0.700475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.22504 |
2dbea13bd2722e39d52ed915d89d5e1fea406ab7 | 458 | py | Python | Forms/InstagramLoginForm.py | CT83/PyMultiPoster | 2ecd96764f21a88486f87cc55099ddff8d106e4f | [
"Apache-2.0"
] | null | null | null | Forms/InstagramLoginForm.py | CT83/PyMultiPoster | 2ecd96764f21a88486f87cc55099ddff8d106e4f | [
"Apache-2.0"
] | null | null | null | Forms/InstagramLoginForm.py | CT83/PyMultiPoster | 2ecd96764f21a88486f87cc55099ddff8d106e4f | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import PasswordField, SubmitField, StringField
from wtforms.validators import DataRequired, Length
class InstagramLoginForm(FlaskForm):
username = StringField('Instagram Username', validators=[DataRequired(),
Length(min=6, max=20)])
password = PasswordField('Instagram Password', validators=[DataRequired()])
submit = SubmitField('Save')
| 41.636364 | 84 | 0.679039 | 311 | 0.679039 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.100437 |
2dbf56a3c63f5d01e7044ebe76a8ea82211353a1 | 5,117 | py | Python | tests/flask/test_oauth2/test_token_introspection.py | tk193192/authlib | 4c60a628f64c6d385a06ea55e416092726b94d07 | [
"BSD-3-Clause"
] | 2 | 2021-04-26T18:17:37.000Z | 2021-04-28T21:39:45.000Z | tests/flask/test_oauth2/test_token_introspection.py | tk193192/authlib | 4c60a628f64c6d385a06ea55e416092726b94d07 | [
"BSD-3-Clause"
] | null | null | null | tests/flask/test_oauth2/test_token_introspection.py | tk193192/authlib | 4c60a628f64c6d385a06ea55e416092726b94d07 | [
"BSD-3-Clause"
] | 1 | 2019-10-07T02:01:48.000Z | 2019-10-07T02:01:48.000Z | from flask import json
from authlib.flask.oauth2.sqla import create_query_token_func
from authlib.oauth2.rfc7662 import IntrospectionEndpoint
from .models import db, User, Client, Token
from .oauth2_server import TestCase
from .oauth2_server import create_authorization_server
query_token = create_query_token_func(db.session, Token)
class MyIntrospectionEndpoint(IntrospectionEndpoint):
def query_token(self, token, token_type_hint, client):
return query_token(token, token_type_hint, client)
def introspect_token(self, token):
user = User.query.get(token.user_id)
return {
"active": not token.revoked,
"client_id": token.client_id,
"username": user.username,
"scope": token.scope,
"sub": user.get_user_id(),
"aud": token.client_id,
"iss": "https://server.example.com/",
"exp": token.get_expires_at(),
"iat": token.issued_at,
}
class IntrospectTokenTest(TestCase):
def prepare_data(self):
server = create_authorization_server(self.app)
server.register_endpoint(MyIntrospectionEndpoint)
user = User(username='foo')
db.session.add(user)
db.session.commit()
client = Client(
user_id=user.id,
client_id='introspect-client',
client_secret='introspect-secret',
redirect_uri='http://a.b/c',
scope='profile',
)
db.session.add(client)
db.session.commit()
def create_token(self):
token = Token(
user_id=1,
client_id='introspect-client',
token_type='bearer',
access_token='a1',
refresh_token='r1',
scope='profile',
expires_in=3600,
)
db.session.add(token)
db.session.commit()
def test_invalid_client(self):
self.prepare_data()
rv = self.client.post('/oauth/introspect')
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
headers = {'Authorization': 'invalid token_string'}
rv = self.client.post('/oauth/introspect', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
headers = self.create_basic_header(
'invalid-client', 'introspect-secret'
)
rv = self.client.post('/oauth/introspect', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
headers = self.create_basic_header(
'introspect-client', 'invalid-secret'
)
rv = self.client.post('/oauth/introspect', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
def test_invalid_token(self):
self.prepare_data()
headers = self.create_basic_header(
'introspect-client', 'introspect-secret'
)
rv = self.client.post('/oauth/introspect', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_request')
rv = self.client.post('/oauth/introspect', data={
'token_type_hint': 'refresh_token',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_request')
rv = self.client.post('/oauth/introspect', data={
'token': 'a1',
'token_type_hint': 'unsupported_token_type',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'unsupported_token_type')
rv = self.client.post('/oauth/introspect', data={
'token': 'invalid-token',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['active'], False)
rv = self.client.post('/oauth/introspect', data={
'token': 'a1',
'token_type_hint': 'refresh_token',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['active'], False)
def test_introspect_token_with_hint(self):
self.prepare_data()
self.create_token()
headers = self.create_basic_header(
'introspect-client', 'introspect-secret'
)
rv = self.client.post('/oauth/introspect', data={
'token': 'a1',
'token_type_hint': 'access_token',
}, headers=headers)
self.assertEqual(rv.status_code, 200)
resp = json.loads(rv.data)
self.assertEqual(resp['client_id'], 'introspect-client')
def test_introspect_token_without_hint(self):
self.prepare_data()
self.create_token()
headers = self.create_basic_header(
'introspect-client', 'introspect-secret'
)
rv = self.client.post('/oauth/introspect', data={
'token': 'a1',
}, headers=headers)
self.assertEqual(rv.status_code, 200)
resp = json.loads(rv.data)
self.assertEqual(resp['client_id'], 'introspect-client')
| 34.574324 | 67 | 0.604065 | 4,775 | 0.933164 | 0 | 0 | 0 | 0 | 0 | 0 | 1,079 | 0.210866 |
2dc102802d9ae34a2d04e2d9f5c68f1bb09bad45 | 7,031 | py | Python | modules/database.py | MrEluzium/UlvicationBot | 87a591726161166072f59306175c1c7ef5a7f713 | [
"Apache-2.0"
] | null | null | null | modules/database.py | MrEluzium/UlvicationBot | 87a591726161166072f59306175c1c7ef5a7f713 | [
"Apache-2.0"
] | null | null | null | modules/database.py | MrEluzium/UlvicationBot | 87a591726161166072f59306175c1c7ef5a7f713 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Артём Воронов
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite3
import os
from modules.baselogger import get_logger
log = get_logger("db")
class DataBase:
def __init__(self, path):
"""
May create and connect to unique SQLite3 database
:param path: Path to the db file
"""
try:
self.__path = path
if not os.path.exists("data/databases"):
if not os.path.exists("data"):
os.mkdir("data")
os.mkdir("data/databases")
log.info(f"Created dir: data/databases/")
self.__conn = sqlite3.connect(self.__path)
self.__cursor = self.__conn.cursor()
except sqlite3.Error as e:
log.error(e)
def create_table(self, name, *args, id_replace=None, check_mode=False):
"""
Create a new table
:param name: Name of new table
:param args: strings of columns for table. Def: id PRIMARY KEY UNIQUE NOT NULL
:param id_replace: replace id column
:param check_mode: If true, create new table only if it doesn't exist
:return: bool
"""
try:
columns = ""
for arg in args:
columns = columns + ", " + arg
if check_mode:
name = "if not exists " + name # if check_mode = True, we get "CREATE TABLE if not exists" execute
if id_replace:
self.__cursor.execute(f"""CREATE TABLE {name} ({id_replace}{columns});""")
else:
self.__cursor.execute(f"""CREATE TABLE {name} (id PRIMARY KEY UNIQUE NOT NULL{columns});""")
self.__conn.commit()
log.info(f"[{self.__path}] Create new table {name} with (id PRIMARY KEY UNIQUE NOT NULL{columns})")
return True
except sqlite3.Error as e:
handler_errors = ["table Guilds already exists", "table Members already exists", "table Orgs already exists", "table Shop already exists"]
if str(e) in handler_errors:
if str(e) == "table Guilds already exists":
log.info("Table Guilds already exists, reading guilds data.")
return False
log.error(e)
return False
def insert(self, table, columns="", values=""):
"""
Represent an INSERT SQlite3 execute
:param table: Table name
:param columns: Columns you want to insert
:param values: Values of inserting columns
:return: bool
"""
try:
if columns:
columns = f"({columns})"
self.__cursor.execute(
f"""INSERT INTO {table}{columns} VALUES({values});""")
self.__conn.commit()
log.info(f"[{self.__path}] Insert row in {table}{columns} VALUES({values})")
return True
except sqlite3.Error as e:
log.error(e)
return False
def update(self, table, key_column, key, column, value):
"""
Represent an UPDATE SQlite3 execute
:param table: Table name
:param key_column: Column by which you want to search for an updating row
:param key: Value of key column in the updating row
:param column: Column you want to update
:param value: New value for updating column
:return: bool
"""
try:
self.__cursor.execute(
f"""UPDATE {table} SET {column} = {value} where {key_column} = {key};""")
self.__conn.commit()
return True
except sqlite3.Error as e:
log.error(e)
return False
def delete(self, table, key_column, key):
"""
Represent an DELETE SQlite3 execute
:param table: Table name
:param key_column: Column by which you want to search for an deleting row
:param key: Value of key column in the deleting row
:return: bool
"""
try:
self.__cursor.execute(f"""DELETE FROM {table} where {key_column} = {key};""")
self.__conn.commit()
return True
except sqlite3.Error as e:
log.error(e)
return False
def read(self, table, key_column, key, columns_to_read="*"):
"""
Represent an SELECT.fetchone SQlite3 execute
:param table: Table name
:param key_column: Column by which you want to search for an deleting row
:param key: Value of key column in the deleting row
:param columns_to_read: Default set to all
:return: result
"""
try:
result = self.__cursor.execute(f"""SELECT {columns_to_read} FROM {table} where {key_column} = {key};""").fetchone()
return result
except sqlite3.Error as e:
log.error(e)
return None
def read_many(self, table, size, columns_to_read="*"):
"""
Represent an SELECT.fetchmany SQlite3 execute
:param table: Table name
:param size: Number of selected rows
:param columns_to_read: Default set to all
:return: result
"""
try:
result = self.__cursor.execute(f"""SELECT {columns_to_read} FROM {table};""").fetchmany(size)
return result
except sqlite3.Error as e:
log.error(e)
return None
def read_all(self, table, columns_to_read="*"):
"""
Represent an SELECT.fetchall SQlite3 execute
:param table: Table name
:param columns_to_read: Default set to all
:return: result
"""
try:
result = self.__cursor.execute(f"""SELECT {columns_to_read} FROM {table};""").fetchall()
return result
except sqlite3.Error as e:
log.error(e)
return None
def read_all_by_order(self, table, order_key, columns_to_read="*", mod="ASC"):
"""
Represent an ordered SELECT.fetchall SQlite3 execute
:param table: Table name
:param order_key: Column by which you want to sort
:param columns_to_read: Default set to all
:param mod: Sorting order: ASC (default) / DESC
:return: result
"""
try:
result = self.__cursor.execute(f"SELECT {columns_to_read} FROM {table} ORDER BY {order_key} {mod};").fetchall()
return result
except sqlite3.Error as e:
log.error(e)
return None
| 37.005263 | 150 | 0.582563 | 6,360 | 0.903024 | 0 | 0 | 0 | 0 | 0 | 0 | 3,927 | 0.557575 |
2dc2a50bc68f5c4de74bd70134b3c6049945496f | 1,203 | py | Python | common-mk/mojom_bindings_generator_wrapper.py | strassek/chromiumos-platform2 | 12c953f41f48b8a6b0bd1c181d09bdb1de38325c | [
"BSD-3-Clause"
] | 4 | 2020-07-24T06:54:16.000Z | 2021-06-16T17:13:53.000Z | common-mk/mojom_bindings_generator_wrapper.py | strassek/chromiumos-platform2 | 12c953f41f48b8a6b0bd1c181d09bdb1de38325c | [
"BSD-3-Clause"
] | 1 | 2021-04-02T17:35:07.000Z | 2021-04-02T17:35:07.000Z | common-mk/mojom_bindings_generator_wrapper.py | strassek/chromiumos-platform2 | 12c953f41f48b8a6b0bd1c181d09bdb1de38325c | [
"BSD-3-Clause"
] | 1 | 2020-11-04T22:31:45.000Z | 2020-11-04T22:31:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Thin wrapper of Mojo's mojom_bindings_generator.py.
To generate C++ files from mojom, it is necessary to run
mojom_bindings_generator.py three times
- without --generate_non_variant_code or --generate_non_variant_code
- with --generate_non_variant_code only
- with both --generate_non_variant_code and --generate_message_ids
However, gni's "rule" does not support multiple "action"s. So, instead,
use this simple python wrapper.
Usage:
python mojom_bindings_generator_wrapper.py ${libbase_ver} \
${MOJOM_BINDINGS_GENERATOR} \
[... and more args/flags to be passed to the mojom_bindings_generator.py]
"""
from __future__ import print_function
import subprocess
import sys
def main(argv):
subprocess.check_call(argv[2:])
subprocess.check_call(argv[2:] + ['--generate_non_variant_code'])
subprocess.check_call(argv[2:] + ['--generate_non_variant_code',
'--generate_message_ids'])
if __name__ == '__main__':
main(sys.argv)
| 30.075 | 77 | 0.73483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 922 | 0.766417 |
2dc525d7b867d2625c1781dd2d072f7fa9a50ee6 | 4,799 | py | Python | script.module.placenta/lib/resources/lib/sources/en/to_be_fixed/sitedown/onlinemovies.py | parser4life/tantrumrepo | 3b37145f4772409e538cbddb0b7aa23be525772a | [
"Beerware"
] | 1 | 2021-05-09T19:55:51.000Z | 2021-05-09T19:55:51.000Z | script.module.placenta/lib/resources/lib/sources/en/to_be_fixed/sitedown/onlinemovies.py | parser4life/tantrumrepo | 3b37145f4772409e538cbddb0b7aa23be525772a | [
"Beerware"
] | null | null | null | script.module.placenta/lib/resources/lib/sources/en/to_be_fixed/sitedown/onlinemovies.py | parser4life/tantrumrepo | 3b37145f4772409e538cbddb0b7aa23be525772a | [
"Beerware"
] | 2 | 2020-04-01T22:11:12.000Z | 2020-05-07T23:54:52.000Z | # NEEDS FIXING
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['onlinemovies.tube', 'watchonline.pro']
self.base_link = 'http://watchonline.pro'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})
y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i]
y = re.findall('(\d{4})', y[0])[0]
if not y == year: raise Exception()
else:
#url = '%s/watch/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
url = link.replace('\/', '/')
url = client.replaceHTMLCodes(url)
url = 'http:' + url if url.startswith('//') else url
url = url.encode('utf-8')
if not '.php' in url: raise Exception()
r = client.request(url, timeout='10')
s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)
for i in s:
try: r += jsunpack.unpack(i)
except: pass
r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| 34.52518 | 182 | 0.470723 | 3,941 | 0.821213 | 0 | 0 | 0 | 0 | 0 | 0 | 1,289 | 0.268598 |
2dc6b64392d41e06755147486243708c0cbcc454 | 2,362 | py | Python | venv/src/pages/forms.py | ddelgadoJS/ProyectoWeb | f899c910bf16a79d5c3498bc6e8aa6b741fb56e1 | [
"MIT"
] | 1 | 2019-10-28T03:44:38.000Z | 2019-10-28T03:44:38.000Z | venv/src/pages/forms.py | ddelgadoJS/ProyectoWeb | f899c910bf16a79d5c3498bc6e8aa6b741fb56e1 | [
"MIT"
] | null | null | null | venv/src/pages/forms.py | ddelgadoJS/ProyectoWeb | f899c910bf16a79d5c3498bc6e8aa6b741fb56e1 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import *
class EmpresaCreateForm(forms.ModelForm):
class Meta:
model = Empresa
fields = [
'nombre',
'description',
'direccion',
'horario',
'telefono',
'correo',
'serv_origen',
'serv_destino',
'latitud',
'longitud'
]
class EmpresaUpdateForm(forms.ModelForm):
class Meta:
model = Empresa
fields = [
'nombre',
'description',
'direccion',
'horario',
'telefono',
'correo',
'serv_origen',
'serv_destino',
'latitud',
'longitud'
]
class UsuarioUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = [
'username',
'email',
'first_name',
'last_name'
]
class RutaCreateForm(forms.ModelForm):
class Meta:
model = Ruta
fields = [
'empresa',
'nombre',
'description',
'costo',
'horario',
'duracion_viaje',
'inclusivo',
'origen_latitud',
'origen_longitud',
'destino_latitud',
'destino_longitud'
]
class RutaUpdateForm(forms.ModelForm):
class Meta:
model = Ruta
fields = [
'empresa',
'nombre',
'description',
'costo',
'horario',
'duracion_viaje',
'inclusivo',
'origen_latitud',
'origen_longitud',
'destino_latitud',
'destino_longitud'
]
class ParadaCreateForm(forms.ModelForm):
class Meta:
model = Parada
fields = [
'ruta',
'nombre',
'description',
'horario',
'latitud',
'longitud'
]
class ParadaUpdateForm(forms.ModelForm):
class Meta:
model = Parada
fields = [
'ruta',
'nombre',
'description',
'horario',
'latitud',
'longitud'
] | 22.495238 | 54 | 0.453006 | 2,198 | 0.930567 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.271804 |
2dc76e500ca4b409bef1091d33217d948f97e80c | 5,639 | py | Python | tests/test_connections.py | iparaskev/py_connect | 43476cddfb25130d058fcf59928454f867af8feb | [
"BSD-3-Clause"
] | 5 | 2021-03-19T07:05:50.000Z | 2021-03-31T22:53:52.000Z | tests/test_connections.py | iparaskev/py_connect | 43476cddfb25130d058fcf59928454f867af8feb | [
"BSD-3-Clause"
] | null | null | null | tests/test_connections.py | iparaskev/py_connect | 43476cddfb25130d058fcf59928454f867af8feb | [
"BSD-3-Clause"
] | null | null | null | """test_connections.py"""
import unittest
import os
from py_connect import ConnectionsHandler
from py_connect import Board, Peripheral, SensorTypes
cons_path = \
"/".join(os.path.abspath(__file__).split("/")[:-2]) + "/test_connections/"
class TestConnection(unittest.TestCase):
def test_gpio_connection(self):
connections = ConnectionsHandler(cons_path + "debug_connection.cd")
gpio_con = connections.connections["rpi_sonar"]
gpio_con_2 = connections.connections["rpi_sonar_2"]
# Good connection
# Just check args
self.assertEqual(gpio_con.name, "rpi_sonar", "Wrong connection name.")
self.assertIsInstance(gpio_con.board, Board, "Should be board.")
self.assertIsInstance(gpio_con.peripheral, Peripheral,
"Should be peripheral.")
self.assertEqual(gpio_con.board.name, "rpi_3b_plus",
"Should be rpi_3b_plus")
self.assertEqual(gpio_con.peripheral.name, "hc_sr04",
"Should be hc_sr04")
# Check right power connections.
self.assertEqual(gpio_con.power_connections[0].pin_1,
self.target(gpio_con.board.pins, "gnd_1"),
"Should be gnd_1.")
self.assertEqual(gpio_con.power_connections[0].pin_2,
self.target(gpio_con.peripheral.pins, "gnd"),
"Should be gnd.")
self.assertEqual(gpio_con.power_connections[1].pin_1,
self.target(gpio_con.board.pins, "power_5v_1"),
"Should be power_5v_1.")
self.assertEqual(gpio_con.power_connections[1].pin_2,
self.target(gpio_con.peripheral.pins, "vcc"),
"Should be vcc.")
# Check right hw_connections
self.assertEqual(gpio_con.hw_connections[0].hwint_1,
self.target(gpio_con.board.hw_interfaces, "bcm_7"),
"Should be bcm_23")
self.assertEqual(gpio_con.hw_connections[0].hwint_2,
self.target(gpio_con.peripheral.hw_interfaces, "echo"),
"Should be echo")
self.assertEqual(gpio_con.hw_connections[1].hwint_1,
self.target(gpio_con.board.hw_interfaces, "bcm_24"),
"Should be bcm_24")
self.assertEqual(gpio_con.hw_connections[1].hwint_2,
self.target(gpio_con.peripheral.hw_interfaces, "trigger"),
"Should be trigger.")
# Different objects of same device
self.assertNotEqual(gpio_con.peripheral, gpio_con_2.peripheral,
"Should be different.")
def target(self, ls, name):
for p in ls:
if p.name == name:
return p
def test_i2c(self):
connections = ConnectionsHandler(cons_path + "debug_connection.cd")
i2c_con = connections.connections["rpi_bme680"]
self.assertEqual(i2c_con.hw_connections[0].hwint_1,
self.target(i2c_con.board.hw_interfaces, "i2c_1"),
"Should be i2c_1 of rpi.")
self.assertEqual(i2c_con.hw_connections[0].hwint_2,
self.target(i2c_con.peripheral.hw_interfaces, "i2c_0"),
"Should be i2c_0 of bme.")
self.assertEqual(i2c_con.hw_connections[0].slave_address, int(0x77),
"Should be 0x77.")
def test_spi(self):
connections = ConnectionsHandler(cons_path + "debug_connection.cd")
spi_con = connections.connections["rpi_icm"]
self.assertEqual(spi_con.hw_connections[0].hwint_1,
self.target(spi_con.board.hw_interfaces, "spi_0"),
"Should be spi_0 of rpi.")
self.assertEqual(spi_con.hw_connections[0].hwint_2,
self.target(spi_con.peripheral.hw_interfaces, "spi_0"),
"Should be spi_0 of bme.")
ce_index = spi_con.hw_connections[0].ce_index
ce_pin = spi_con.hw_connections[0].hwint_1.ce[ce_index]
self.assertEqual(ce_pin.name, "bcm_8", "Should be bcm_8.")
def test_comm_endpoint(self):
connections = ConnectionsHandler(cons_path + "debug_connection.cd")
con = connections.connections["rpi_sonar"]
# Simple communication checks
self.assertEqual(con.com_endpoint.topic_name, "sonar_1.distance")
self.assertEqual(con.com_endpoint.conn_params.username,
"testuser", "Should be testuser.")
self.assertEqual(con.com_endpoint.conn_params.password,
"testuser", "Should be testuser.")
self.assertEqual(con.com_endpoint.conn_params.host,
"r4a-platform.ddns.net", "Should be ...")
self.assertEqual(con.com_endpoint.conn_params.port,
5782, "Should be 5782")
self.assertEqual(con.com_endpoint.msg.msg_entries[0].type,
SensorTypes.DISTANCE)
self.assertEqual(con.com_endpoint.msg.msg_entries[0].frequency, 10)
con = connections.connections["rpi_bme680"]
self.assertEqual(con.com_endpoint.msg.msg_entries[0].type,
SensorTypes.ENV)
#self.assertEqual(con.com_endpoint.msg.msg_entries[1].type,
# SensorTypes.HUMIDITY)
#self.assertEqual(con.com_endpoint.msg.msg_entries[2].type,
# SensorTypes.GAS)
if __name__ == "__main__":
unittest.main()
| 45.845528 | 83 | 0.597446 | 5,343 | 0.947508 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.218301 |
2dc9476cbdfe54502530421ec62dd23ee2ee1011 | 4,624 | py | Python | p3iv_utils/src/p3iv_utils/coordinate_transformation.py | fzi-forschungszentrum-informatik/P3IV | 51784e6dc03dcaa0ad58a5078475fa4daec774bd | [
"BSD-3-Clause"
] | 4 | 2021-07-27T06:56:22.000Z | 2022-03-22T11:21:30.000Z | p3iv_utils/src/p3iv_utils/coordinate_transformation.py | fzi-forschungszentrum-informatik/P3IV | 51784e6dc03dcaa0ad58a5078475fa4daec774bd | [
"BSD-3-Clause"
] | null | null | null | p3iv_utils/src/p3iv_utils/coordinate_transformation.py | fzi-forschungszentrum-informatik/P3IV | 51784e6dc03dcaa0ad58a5078475fa4daec774bd | [
"BSD-3-Clause"
] | 1 | 2021-10-10T01:56:44.000Z | 2021-10-10T01:56:44.000Z | # This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import numpy as np
import lanelet2.geometry
from lanelet2.core import BasicPoint2d, LaneletSequence
from lanelet2.geometry import ArcCoordinates
class CoordinateTransform(object):
def __init__(self, centerline):
if isinstance(centerline, (np.ndarray, list)):
if isinstance(centerline[0], (lanelet2.core.Lanelet, lanelet2.core.ConstLanelet)):
llt_sq = LaneletSequence(centerline)
self._centerline = lanelet2.geometry.to2D(llt_sq.centerline)
elif isinstance(centerline[0], (np.ndarray, list)):
# create a linestring from Cartesian points
points = []
for i, xy in enumerate(centerline):
points.append(lanelet2.core.Point3d(i, xy[0], xy[1], 0.0))
ls = lanelet2.core.LineString3d(0, points)
self._centerline = lanelet2.geometry.to2D(ls)
else:
raise TypeError
elif isinstance(centerline, (lanelet2.core.LineString3d, lanelet2.core.ConstLineString3d)):
self._centerline = lanelet2.geometry.to2D(llt_sq.centerline)
elif isinstance(centerline, (lanelet2.core.LineString2d, lanelet2.core.ConstLineString2d)):
self._centerline = centerline
else:
raise TypeError
def iterate(func):
def wrapper(self, input_coordinates):
input_coordinates = np.asarray(input_coordinates)
flag = False
if len(input_coordinates.shape) is 1:
input_coordinates = input_coordinates.reshape(-1, 2)
flag = True
output_coordinates = func(self, input_coordinates)
if flag:
# Reshape to (2, )
return output_coordinates[0]
else:
return output_coordinates
return wrapper
@iterate
def xy2ld(self, input_coordinates):
"""
Cartesian -> Frenet
"""
output_coordinates = np.empty((len(input_coordinates), 2))
for i in range(len(input_coordinates)):
frenet = lanelet2.geometry.toArcCoordinates(
self._centerline, self._convert2basicPoint2d(input_coordinates[i])
)
output_coordinates[i] = np.asarray([frenet.length, frenet.distance])
return output_coordinates
@iterate
def ld2xy(self, input_coordinates):
"""
Frenet -> Cartesian
"""
output_coordinates = np.empty((len(input_coordinates), 2))
for i in range(len(input_coordinates)):
cartesian = lanelet2.geometry.fromArcCoordinates(
self._centerline, self._convert2arcCoordinates(input_coordinates[i])
)
output_coordinates[i] = np.asarray([cartesian.x, cartesian.y])
return output_coordinates
def expand(self, cartesian_position, longitudinal_position_arr, ignore_lateral_offset=False):
"""
Given a motion profile in arc-length-coordinates, expand the dimension and transform it to Cartesian.
Arguments
---------
cartesian_position: np.ndarray
Initial Cartesian coordinates [x, y]
longitudinal_position_arr: np.ndarray
Logitudinal position array
ignore_lateral_offset: bool
Flag to ignore current lateral offset
"""
# typecast if list
longitudinal_position_arr = np.asarray(longitudinal_position_arr)
offset_l, offset_d = self.xy2ld(cartesian_position)
ld_array = np.zeros([len(longitudinal_position_arr), 2])
ld_array[:, 0] = longitudinal_position_arr + offset_l
if not ignore_lateral_offset:
ld_array[:, 1] = np.linspace(offset_d, 0.0, len(longitudinal_position_arr))
return self.ld2xy(ld_array)
@staticmethod
def _convert2basicPoint2d(input_coordinates):
"""
Typecasting for Lanelet2.
"""
cartesian = lanelet2.core.BasicPoint2d()
cartesian.x, cartesian.y = np.asarray(input_coordinates, dtype=np.float64)
return cartesian
@staticmethod
def _convert2arcCoordinates(input_coordinates):
"""
Typecasting for Lanelet2.
"""
frenet = lanelet2.geometry.ArcCoordinates()
frenet.length, frenet.distance = np.asarray(input_coordinates, dtype=np.float64)
return frenet
| 39.186441 | 118 | 0.640138 | 4,254 | 0.919983 | 0 | 0 | 1,533 | 0.331531 | 0 | 0 | 903 | 0.195285 |
2dc96003810a3b3ddd4e4137fa58deb740206c23 | 668 | py | Python | web/pyserver/workers/board/views/board.py | abhatikar/training_extensions | 1c96e0f5f39688f8b79735e8dfa90646afc3d5e6 | [
"Apache-2.0"
] | 2 | 2021-01-07T05:09:17.000Z | 2021-10-15T05:13:46.000Z | web/pyserver/workers/board/views/board.py | abhatikar/training_extensions | 1c96e0f5f39688f8b79735e8dfa90646afc3d5e6 | [
"Apache-2.0"
] | 9 | 2021-09-08T03:12:59.000Z | 2022-03-12T00:57:19.000Z | web/pyserver/workers/board/views/board.py | abhatikar/training_extensions | 1c96e0f5f39688f8b79735e8dfa90646afc3d5e6 | [
"Apache-2.0"
] | null | null | null | import aiohttp_cors
from aiohttp import web
from common.utils.run_cmd import run
class Tensorboard(web.View, aiohttp_cors.CorsViewMixin):
async def get(self):
print("Start")
folder = self.request.query.get("folder")
cmd_check = 'ps a'
o, e = await run(cmd_check)
print(cmd_check)
cmd_stop = 'pkill -f tensorboard'
_, _ = await run(cmd_stop)
print(cmd_stop)
cmd_start = f'tensorboard --logdir {folder}/tf_logs --port 6006 --host idlp_tensorboard_worker'
o, e = await run(cmd_start, forget=True)
print(cmd_start)
print("end")
return web.Response(status=200)
| 29.043478 | 103 | 0.637725 | 583 | 0.872754 | 0 | 0 | 0 | 0 | 521 | 0.77994 | 131 | 0.196108 |
2dca7a410a4b4fc054c136085f57855c6543b78e | 716 | py | Python | molecule/default/tests/test_role.py | boutetnico/ansible-role-nodejs | de9d70556decbf76d9a939499c42edc85e2d65e8 | [
"MIT"
] | null | null | null | molecule/default/tests/test_role.py | boutetnico/ansible-role-nodejs | de9d70556decbf76d9a939499c42edc85e2d65e8 | [
"MIT"
] | null | null | null | molecule/default/tests/test_role.py | boutetnico/ansible-role-nodejs | de9d70556decbf76d9a939499c42edc85e2d65e8 | [
"MIT"
] | null | null | null | import pytest
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
('nodejs'),
])
def test_packages_are_installed(host, name):
package = host.package(name)
assert package.is_installed
@pytest.mark.parametrize('path,user,group', [
('/usr/bin/node', 'root', 'root'),
('/usr/bin/ncu', 'root', 'root'),
('/usr/bin/yarn', 'root', 'root'),
])
def test_binaries_are_installed(host, path, user, group):
binary = host.file(path)
assert binary.exists
assert binary.is_file
assert binary.user == user
assert binary.group == group
| 23.866667 | 63 | 0.699721 | 0 | 0 | 0 | 0 | 521 | 0.727654 | 0 | 0 | 141 | 0.196927 |
2dcaf1c764e04a5631433a84042c46eeee80f0f9 | 28 | py | Python | losses/__init__.py | xieqk/SEF | 2163a159933d8d9ecad5ff9341cfa626f662a778 | [
"MIT"
] | null | null | null | losses/__init__.py | xieqk/SEF | 2163a159933d8d9ecad5ff9341cfa626f662a778 | [
"MIT"
] | null | null | null | losses/__init__.py | xieqk/SEF | 2163a159933d8d9ecad5ff9341cfa626f662a778 | [
"MIT"
] | null | null | null | from .ranking import Triplet | 28 | 28 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2dcbf62d4bf655fa54c52c4d475fe511450f1255 | 1,216 | py | Python | GTFtools.py | zhenglabuthscsa/PRADA2 | d050971dc6e0a8ab11f8575787b2f4848502ed21 | [
"MIT"
] | null | null | null | GTFtools.py | zhenglabuthscsa/PRADA2 | d050971dc6e0a8ab11f8575787b2f4848502ed21 | [
"MIT"
] | null | null | null | GTFtools.py | zhenglabuthscsa/PRADA2 | d050971dc6e0a8ab11f8575787b2f4848502ed21 | [
"MIT"
] | 1 | 2021-10-18T23:44:54.000Z | 2021-10-18T23:44:54.000Z | """
created by Juechen Yang at 1/8/19
"""
import os
import pandas as pd
def export_to_bed(gtf,intermediate_file_dir, lincRNA):
if lincRNA:
lincRNAIDs = pd.read_csv(os.path.join(intermediate_file_dir, 'intersect_total.txt'), names=['ids'], sep='\t')
exons = gtf[(gtf.feature == 'exon') & (gtf.seqname != 'chrM') & (gtf.gene_type ==
'protein_coding') | (gtf.gene_id.isin(lincRNAIDs['ids']))][
['seqname', 'start', 'end', 'strand', 'gene_id', 'gene_name', 'gene_type']]
else:
exons = gtf[(gtf.feature == 'exon') & (gtf.seqname != 'chrM') & (gtf.gene_type ==
'protein_coding')][
['seqname', 'start', 'end', 'strand', 'gene_id', 'gene_name', 'gene_type']]
exons.start = exons.start - 1
exons.to_csv(os.path.join(intermediate_file_dir, 'exon.bed'), index=None, header=False, sep='\t')
def check_gtf_tsv(gtf_tsv, annotation_file):
if not os.path.isfile(gtf_tsv):
from gtfparse import read_gtf
parsed_gtf = read_gtf(annotation_file)
parsed_gtf.to_csv(gtf_tsv, sep='\t', index=None)
| 46.769231 | 133 | 0.563322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.222862 |
2dcc54fe3a394449dcf9c0f9b37aad39578e1f30 | 8,767 | py | Python | dlclabel/io.py | jonmatthis/napari-DeepLabCut | 90e2fd230d9f4ab493698c619e976235df4adfeb | [
"MIT"
] | null | null | null | dlclabel/io.py | jonmatthis/napari-DeepLabCut | 90e2fd230d9f4ab493698c619e976235df4adfeb | [
"MIT"
] | null | null | null | dlclabel/io.py | jonmatthis/napari-DeepLabCut | 90e2fd230d9f4ab493698c619e976235df4adfeb | [
"MIT"
] | null | null | null | import glob
import numpy as np
import os
import pandas as pd
import yaml
from dask_image.imread import imread
from dlclabel import misc
from itertools import groupby
from napari.layers import Shapes
from napari.plugins._builtins import napari_write_shapes
from napari.types import LayerData
from skimage.io import imsave
from skimage.util import img_as_ubyte
from typing import Any, Dict, List, Optional, Sequence, Union
SUPPORTED_IMAGES = "jpg", "jpeg", "png"
def handle_path(path: Union[str, Sequence[str]]) -> Union[str, Sequence[str]]:
"""Dispatch files in folder to the relevant plugin readers."""
paths = [path] if isinstance(path, str) else path
paths = [os.fspath(path) for path in paths]
if not isinstance(paths, (tuple, list)):
raise ValueError("'path' argument must be a string, list, or tuple")
# Test first whether a 'labeled-data' folder was passed in
if len(paths) == 1:
path = paths[0]
if os.path.isdir(path):
files = os.listdir(path)
images = ""
for file in files:
if any(file.endswith(ext) for ext in SUPPORTED_IMAGES):
images = os.path.join(path, f"*{os.path.splitext(file)[1]}")
break
if not images:
raise IOError("No supported images were found.")
datafile = ""
for file in files:
if file.endswith(".h5"):
datafile = os.path.join(path, "*.h5")
break
if datafile:
return [images, datafile]
return [images]
return paths
def _populate_metadata(
header: misc.DLCHeader,
*,
labels: Optional[Sequence[str]] = None,
ids: Optional[Sequence[str]] = None,
likelihood: Optional[Sequence[float]] = None,
paths: Optional[List[str]] = None,
size: Optional[int] = 8,
pcutoff: Optional[float] = 0.6,
colormap: Optional[str] = "viridis",
) -> Dict:
if labels is None:
labels = header.bodyparts
if ids is None:
ids = header.individuals
if likelihood is None:
likelihood = np.ones(len(labels))
label_colors = misc.build_color_cycle(len(header.bodyparts), colormap)
id_colors = misc.build_color_cycle(len(header.individuals), colormap)
face_color_cycle_maps = {
"label": dict(zip(header.bodyparts, label_colors)),
"id": dict(zip(header.individuals, id_colors)),
}
return {
"name": "keypoints",
"text": "label",
"properties": {
"label": list(labels),
"id": list(ids),
"likelihood": likelihood,
"valid": likelihood > pcutoff,
},
"face_color_cycle": label_colors,
"edge_color": "valid",
"edge_color_cycle": ["black", "red"],
"size": size,
"metadata": {
"header": header,
"face_color_cycle_maps": face_color_cycle_maps,
"paths": paths or [],
},
}
def _load_config(config_path: str):
with open(config_path) as file:
return yaml.safe_load(file)
def read_config(configname: str) -> List[LayerData]:
config = _load_config(configname)
header = misc.DLCHeader.from_config(config)
metadata = _populate_metadata(
header,
size=config["dotsize"],
pcutoff=config["pcutoff"],
colormap=config["colormap"],
)
metadata["name"] = f"CollectedData_{config['scorer']}"
return [(None, metadata, "points")]
def read_images(path: Union[str, List[str]]) -> List[LayerData]:
if isinstance(path, list):
root, ext = os.path.splitext(path[0])
path = os.path.join(os.path.dirname(root), f"*{ext}")
# Retrieve filepaths exactly as parsed by pims
filepaths = []
for filepath in sorted(glob.glob(path)):
_, *relpath = filepath.rsplit(os.sep, 3)
filepaths.append(os.path.join(*relpath))
params = {
"name": "images",
"metadata": {
"paths": filepaths,
"root": os.path.split(path)[0]
}
}
return [(imread(path), params, "image")]
def read_hdf(filename: str) -> List[LayerData]:
layers = []
for filename in glob.glob(filename):
temp = pd.read_hdf(filename)
header = misc.DLCHeader(temp.columns)
temp = temp.droplevel("scorer", axis=1)
if "individuals" not in temp.columns.names:
# Append a fake level to the MultiIndex
# to make it look like a multi-animal DataFrame
old_idx = temp.columns.to_frame()
old_idx.insert(0, "individuals", "")
temp.columns = pd.MultiIndex.from_frame(old_idx)
df = temp.stack(["individuals", "bodyparts"]).reset_index()
nrows = df.shape[0]
data = np.empty((nrows, 3))
image_paths = df["level_0"]
if np.issubdtype(image_paths.dtype, np.number):
image_inds = image_paths.values
paths2inds = []
else:
image_inds, paths2inds = misc.encode_categories(image_paths, return_map=True)
data[:, 0] = image_inds
data[:, 1:] = df[["y", "x"]].to_numpy()
metadata = _populate_metadata(
header,
labels=df["bodyparts"],
ids=df["individuals"],
likelihood=df.get("likelihood"),
paths=list(paths2inds),
)
metadata["name"] = os.path.split(filename)[1].split(".")[0]
metadata["metadata"]["root"] = os.path.split(filename)[0]
layers.append((data, metadata, "points"))
return layers
def write_hdf(filename: str, data: Any, metadata: Dict) -> Optional[str]:
temp = pd.DataFrame(data[:, -1:0:-1], columns=["x", "y"])
properties = metadata["properties"]
meta = metadata["metadata"]
temp["bodyparts"] = properties["label"]
temp["individuals"] = properties["id"]
temp["inds"] = data[:, 0].astype(int)
temp["likelihood"] = properties["likelihood"]
temp["scorer"] = meta["header"].scorer
df = temp.set_index(["scorer", "individuals", "bodyparts", "inds"]).stack()
df.index = df.index.set_names("coords", -1)
df = df.unstack(["scorer", "individuals", "bodyparts", "coords"])
df.index.name = None
if not properties["id"][0]:
df = df.droplevel("individuals", axis=1)
df = df.reindex(meta["header"].columns, axis=1)
if meta["paths"]:
df.index = [meta["paths"][i] for i in df.index]
name = metadata["name"]
root = meta["root"]
if "machine" in name: # We are attempting to save refined model predictions
df.drop("likelihood", axis=1, level="coords", inplace=True)
header = misc.DLCHeader(df.columns)
gt_file = ""
for file in os.listdir(root):
if file.startswith("CollectedData") and file.endswith("h5"):
gt_file = file
break
if gt_file: # Refined predictions must be merged into the existing data
df_gt = pd.read_hdf(os.path.join(root, gt_file))
new_scorer = df_gt.columns.get_level_values("scorer")[0]
header.scorer = new_scorer
df.columns = header.columns
df = pd.concat((df, df_gt))
df = df[~df.index.duplicated(keep="first")]
name = os.path.splitext(gt_file)[0]
else:
# Let us fetch the config.yaml file to get the scorer name...
project_folder = root.rsplit(os.sep, 2)[0]
config = _load_config(os.path.join(project_folder, "config.yaml"))
new_scorer = config["scorer"]
header.scorer = new_scorer
df.columns = header.columns
name = f"CollectedData_{new_scorer}"
df.sort_index(inplace=True)
filename = name + ".h5"
df.to_hdf(os.path.join(root, filename), key="df_with_missing")
return filename
def write_masks(foldername: str, data: Any, metadata: Dict) -> Optional[str]:
folder, _ = os.path.splitext(foldername)
os.makedirs(folder, exist_ok=True)
filename = os.path.join(folder, "{}_obj_{}.png")
shapes = Shapes(data, shape_type="polygon")
meta = metadata["metadata"]
frame_inds = [int(array[0, 0]) for array in data]
shape_inds = []
for _, group in groupby(frame_inds):
shape_inds += range(sum(1 for _ in group))
masks = shapes.to_masks(mask_shape=meta["shape"][1:])
for n, mask in enumerate(masks):
image_name = os.path.basename(meta["paths"][frame_inds[n]])
output_path = filename.format(os.path.splitext(image_name)[0], shape_inds[n])
imsave(output_path, img_as_ubyte(mask).squeeze(), check_contrast=False)
napari_write_shapes(os.path.join(folder, "vertices.csv"), data, metadata)
return folder
| 37.306383 | 89 | 0.603399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,502 | 0.171324 |
2dcde6f1aeb6275a870472422f2f2e32d94b6cf9 | 537 | py | Python | mathematics/reverse-integer.py | Neulana/leetcode | 734078378f125a24d049061a34995c90b15cac43 | [
"Apache-2.0"
] | 2 | 2018-12-20T13:58:16.000Z | 2019-11-12T09:42:07.000Z | mathematics/reverse-integer.py | Neulana/leetcode | 734078378f125a24d049061a34995c90b15cac43 | [
"Apache-2.0"
] | null | null | null | mathematics/reverse-integer.py | Neulana/leetcode | 734078378f125a24d049061a34995c90b15cac43 | [
"Apache-2.0"
] | null | null | null | """
题目:
给定一个 32 位有符号整数,将整数中的数字进行反转。
示例 1:
输入: 123
输出: 321
示例 2:
输入: -123
输出: -321
示例 3:
输入: 120
输出: 21
注意:
假设我们的环境只能存储 32 位有符号整数,其数值范围是 [−2**31, 2**31 − 1]。根据这个假设,如果反转后的整数溢出,则返回 0。
"""
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
if x == 0:
return 0
elif x > 0:
y = int(str(x)[::-1].lstrip('0'))
else:
y = int('-' + str(x)[:0:-1].lstrip('0'))
return 0 if y > 2**31 - 1 or y < -2**31 else y
| 14.916667 | 74 | 0.458101 | 342 | 0.470426 | 0 | 0 | 0 | 0 | 0 | 0 | 446 | 0.61348 |
2dcefc70c84d3f4061e0e716788bdf5dca8ba63f | 74 | py | Python | lib/datatools/build/__init__.py | JokerWDL/PyAnomaly | cf93437e5d7ae87fa916141cf4b5cc2e929b8199 | [
"Apache-2.0"
] | 1 | 2020-11-22T17:55:10.000Z | 2020-11-22T17:55:10.000Z | lib/datatools/build/__init__.py | JokerWDL/PyAnomaly | cf93437e5d7ae87fa916141cf4b5cc2e929b8199 | [
"Apache-2.0"
] | null | null | null | lib/datatools/build/__init__.py | JokerWDL/PyAnomaly | cf93437e5d7ae87fa916141cf4b5cc2e929b8199 | [
"Apache-2.0"
] | null | null | null | from .. import dataclass # trigger the register in the dataclass package
| 37 | 73 | 0.783784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.648649 |
2dcf7a1623416f9f81a559b63f8c49bc133ec199 | 46,393 | py | Python | src/github3/repos/branch.py | thebrid/github3.py | 1d2d0dbd67576e3b399ffdf21f29f85b467da245 | [
"BSD-3-Clause"
] | null | null | null | src/github3/repos/branch.py | thebrid/github3.py | 1d2d0dbd67576e3b399ffdf21f29f85b467da245 | [
"BSD-3-Clause"
] | null | null | null | src/github3/repos/branch.py | thebrid/github3.py | 1d2d0dbd67576e3b399ffdf21f29f85b467da245 | [
"BSD-3-Clause"
] | null | null | null | """Implementation of a branch on a repository."""
import typing as t
from . import commit
from .. import decorators
from .. import models
if t.TYPE_CHECKING:
from .. import apps as tapps
from .. import users as tusers
from . import orgs
class _Branch(models.GitHubCore):
"""A representation of a branch on a repository.
See also https://developer.github.com/v3/repos/branches/
This object has the following attributes:
"""
# The Accept header will likely be removable once the feature is out of
# preview mode. See: http://git.io/v4O1e
PREVIEW_HEADERS = {"Accept": "application/vnd.github.loki-preview+json"}
class_name = "Repository Branch"
def _update_attributes(self, branch):
self.commit = commit.MiniCommit(branch["commit"], self)
self.name = branch["name"]
base = self.commit.url.split("/commit", 1)[0]
self._api = self._build_url("branches", self.name, base_url=base)
def _repr(self):
return f"<{self.class_name} [{self.name}]>"
def latest_sha(self, differs_from=""):
"""Check if SHA-1 is the same as the remote branch.
See: https://git.io/vaqIw
:param str differs_from:
(optional), sha to compare against
:returns:
string of the SHA or None
:rtype:
str on Python 3
"""
# If-None-Match returns 200 instead of 304 value does not have quotes
headers = {
"Accept": "application/vnd.github.v3.sha",
"If-None-Match": f'"{differs_from}"',
}
base = self._api.split("/branches", 1)[0]
url = self._build_url("commits", self.name, base_url=base)
resp = self._get(url, headers=headers)
if self._boolean(resp, 200, 304):
return resp.text
return None
@decorators.requires_auth
def protection(self) -> "BranchProtection":
"""Retrieve the protections enabled for this branch.
See:
https://developer.github.com/v3/repos/branches/#get-branch-protection
:returns:
The protections enabled for this branch.
:rtype:
:class:`~github3.repos.branch.BranchProtection`
"""
url = self._build_url("protection", base_url=self._api)
resp = self._get(url)
json = self._json(resp, 200)
return BranchProtection(json, self)
@decorators.requires_auth
def protect(
self,
required_status_checks: t.Optional[t.Mapping[str, t.Any]],
enforce_admins: t.Optional[bool],
required_pull_request_reviews: t.Optional[t.Mapping[str, t.Any]],
restrictions: t.Optional[t.Mapping[str, t.Sequence[str]]],
required_linear_history: t.Optional[bool] = None,
allow_force_pushes: t.Optional[bool] = None,
allow_deletions: t.Optional[bool] = None,
required_conversation_resolution: t.Optional[bool] = None,
) -> "BranchProtection":
"""Enable force push protection and configure status check enforcement.
See also:
https://docs.github.com/en/rest/reference/repos#update-branch-protection
.. versionchanged:: 3.0.0
The GitHub API changed since the last release this was updated in.
As such the parameters have to change here.
:param requireed_status_checks:
Required. Require status checks to pass before merging. Set to null
to disable.
:param enforce_admins:
Required. Enforce all configured restrictions for administrators.
Set to true to enforce required status checks for repository
administrators. Set to null to disable.
:param required_pull_request_reviews:
Required. Require at least one approving review on a pull request,
before merging. Set to null to disable.
:param restrictions:
Required. Restrict who can push to the protected branch. User,
app, and team restrictions are only available for
organization-owned repositories. Set to null to disable.
:param required_linear_history:
Enforces a linear commit Git history, which prevents anyone from
pushing merge commits to a branch. Set to true to enforce a linear
commit history. Set to false to disable a linear commit Git
history. Your repository must allow squash merging or rebase
merging before you can enable a linear commit history. Default:
false. For more information, see "Requiring a linear commit
history" in the GitHub Help documentation.
:param allow_force_pushes:
Permits force pushes to the protected branch by anyone with write
access to the repository. Set to true to allow force pushes. Set
to false or null to block force pushes. Default: false. For more
information, see "Enabling force pushes to a protected branch" in
the GitHub Help documentation."
:param allow_deletions:
Allows deletion of the protected branch by anyone with write
access to the repository. Set to false to prevent deletion of the
protected branch. Default: false. For more information, see
"Enabling force pushes to a protected branch" in the GitHub Help
documentation.
:param required_conversation_resolution:
Requires all conversations on code to be resolved before a pull
request can be merged into a branch that matches this rule. Set to
false to disable. Default: false.
:returns:
BranchProtection if successful
:rtype:
:class:`BranchProtection`
"""
edit = {
"required_status_checks": required_status_checks,
"enforce_admins": enforce_admins,
"required_pull_request_reviews": required_pull_request_reviews,
"restrictions": restrictions,
}
if required_linear_history is not None:
edit["required_linear_history"] = required_linear_history
if allow_force_pushes is not None:
edit["allow_force_pushes"] = allow_force_pushes
if allow_deletions is not None:
edit["allow_deletions"] = allow_deletions
if required_conversation_resolution is not None:
edit[
"required_conversation_resolution"
] = required_conversation_resolution
url = self._build_url("protection", base_url=self._api)
resp = self._put(url, json=edit)
json = self._json(resp, 200)
return BranchProtection(json, self)
@decorators.requires_auth
def sync_with_upstream(self) -> t.Mapping[str, str]:
"""Synchronize this branch with the upstream.
.. warning::
This API endpoint is still in Beta per gitHub
.. versionadded:: 3.0.0
Sync a branch of a forked repository to keep it up-to-date with the
upstream repository.
See also:
https://docs.github.com/en/rest/reference/repos#sync-a-fork-branch-with-the-upstream-repository
:returns:
The dictionary described in the documentation
:rtype:
dict
"""
base = self._api.split("/branches", 1)[0]
url = self._build_url("merge-upstream", base_url=base)
json = self._json(self._post(url), 200)
return json
@decorators.requires_auth
def unprotect(self) -> bool:
"""Disable protections on this branch."""
return self._boolean(
self._delete(self._build_url("protection", base_url=self._api)),
200,
403,
)
class Branch(_Branch):
"""The representation of a branch returned in a collection.
GitHub's API returns different amounts of information about repositories
based upon how that information is retrieved. This object exists to
represent the limited amount of information returned for a specific
branch in a collection. For example, you would receive this class when
calling :meth:`~github3.repos.repo.Repository.branches`. To provide a
clear distinction between the types of branches, github3.py uses different
classes with different sets of attributes.
This object has the same attributes as a
:class:`~github3.repos.branch.ShortBranch` as well as the following:
.. attribute:: links
The dictionary of URLs returned by the API as ``_links``.
.. attribute:: protected
A boolean attribute that describes whether this branch is protected or
not.
.. attribute:: original_protection
.. versionchanged:: 1.1.0
To support a richer branch protection API, this is the new name
for the information formerly stored under the attribute
``protection``.
A dictionary with details about the protection configuration of this
branch.
.. attribute:: protection_url
The URL to access and manage details about this branch's protection.
"""
class_name = "Repository Branch"
def _update_attributes(self, branch):
super()._update_attributes(branch)
self.commit = commit.ShortCommit(branch["commit"], self)
#: Returns '_links' attribute.
self.links = branch["_links"]
#: Provides the branch's protection status.
self.protected = branch["protected"]
self.original_protection = branch["protection"]
self.protection_url = branch["protection_url"]
if self.links and "self" in self.links:
self._api = self.links["self"]
class ShortBranch(_Branch):
"""The representation of a branch returned in a collection.
GitHub's API returns different amounts of information about repositories
based upon how that information is retrieved. This object exists to
represent the limited amount of information returned for a specific
branch in a collection. For example, you would receive this class when
calling :meth:`~github3.repos.repo.Repository.branches`. To provide a
clear distinction between the types of branches, github3.py uses different
classes with different sets of attributes.
This object has the following attributes:
.. attribute:: commit
A :class:`~github3.repos.commit.MiniCommit` representation of the
newest commit on this branch with the associated repository metadata.
.. attribute:: name
The name of this branch.
"""
class_name = "Short Repository Branch"
_refresh_to = Branch
@t.overload
def refresh(self, conditional: bool = False) -> Branch: # noqa: D102
...
class BranchProtection(models.GitHubCore):
"""The representation of a branch's protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
.. versionchanged:: 3.0.0
Added ``required_linear_history``, ``allow_force_pushes``,
``allow_deletions``, and ``required_conversation_resolution``.
This object has the following attributes:
.. attribute:: enforce_admins
A :class:`~github3.repos.branch.ProtectionEnforceAdmins` instance
representing whether required status checks are required for admins.
.. attribute:: restrictions
A :class:`~github3.repos.branch.ProtectionRestrictions` representing
who can push to this branch. Team and user restrictions are only
available for organization-owned repositories.
.. attribute:: required_pull_request_reviews
A :class:`~github3.repos.branch.ProtectionRequiredPullRequestReviews`
representing the protection provided by requiring pull request
reviews.
.. attribute:: required_status_checks
A :class:`~github3.repos.branch.ProtectionRequiredStatusChecks`
representing the protection provided by requiring status checks.
.. attribute:: required_linear_history
.. versionadded:: 3.0.0
A :class:`~github3.repos.branch.ProtectionRequiredLinearHistory`
representing the information returned by the API about this
protection.
.. attribute:: allow_force_pushes
.. versionadded:: 3.0.0
A :class:`~github3.repos.branch.ProtectionAllowForcePushes`
representing the information returned by the API about this
protection.
.. attribute:: allow_deletions
.. versionadded:: 3.0.0
A :class:`~github3.repos.branch.ProtectionAllowDeletions`
representing the information returned by the API about this
protection.
.. attribute:: required_conversation_resolution
.. versionadded:: 3.0.0
A
:class:`~github3.repos.branch.ProtectionRequiredConversationResolution`
representing the information returned by the API about this
protection.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
"""
def _update_attributes(self, protection):
self._api = protection["url"]
def _set_conditional_attr(name, cls):
value = protection.get(name)
setattr(self, name, value)
if getattr(self, name):
setattr(self, name, cls(value, self))
_set_conditional_attr("enforce_admins", ProtectionEnforceAdmins)
_set_conditional_attr("restrictions", ProtectionRestrictions)
_set_conditional_attr(
"required_pull_request_reviews",
ProtectionRequiredPullRequestReviews,
)
_set_conditional_attr(
"required_status_checks", ProtectionRequiredStatusChecks
)
_set_conditional_attr(
"required_linear_history", ProtectionRequiredLinearHistory
)
_set_conditional_attr(
"allow_force_pushes", ProtectionAllowForcePushes
)
_set_conditional_attr("allow_deleteions", ProtectionAllowDeletions)
_set_conditional_attr(
"required_conversation_resolution",
ProtectionRequiredConversationResolution,
)
@decorators.requires_auth
def update(
self,
enforce_admins=None,
required_status_checks=None,
required_pull_request_reviews=None,
restrictions=None,
):
"""Enable force push protection and configure status check enforcement.
See: http://git.io/v4Gvu
:param str enforce_admins:
(optional), Specifies the enforcement level of the status checks.
Must be one of 'off', 'non_admins', or 'everyone'. Use `None` or
omit to use the already associated value.
:param list required_status_checks:
(optional), A list of strings naming status checks that must pass
before merging. Use `None` or omit to use the already associated
value.
:param obj required_pull_request_reviews:
(optional), Object representing the configuration of Request Pull
Request Reviews settings. Use `None` or omit to use the already
associated value.
:param obj restrictions:
(optional), Object representing the configuration of Restrictions.
Use `None` or omit to use the already associated value.
:returns:
Updated branch protection
:rtype:
:class:`~github3.repos.branch.BranchProtection`
"""
current_status = {
"enforce_admins": getattr(self.enforce_admins, "enabled", False),
"required_status_checks": (
self.required_status_checks.as_dict()
if self.required_status_checks is not None
else None
),
"required_pull_request_reviews": (
self.required_pull_request_reviews.as_dict()
if self.required_pull_request_reviews is not None
else None
),
"restrictions": (
self.restrictions.as_dict()
if self.restrictions is not None
else None
),
}
edit = {
"enabled": True,
"enforce_admins": (
enforce_admins
if enforce_admins is not None
else current_status["enforce_admins"]
),
"required_status_checks": (
required_status_checks
if required_status_checks is not None
else current_status["required_status_checks"]
),
"required_pull_request_reviews": (
required_pull_request_reviews
if required_pull_request_reviews is not None
else current_status["required_pull_request_reviews"]
),
"restrictions": (
restrictions
if restrictions is not None
else current_status["restrictions"]
),
}
json = self._json(self._put(self._api, json=edit), 200)
self._update_attributes(json)
return self
@decorators.requires_auth
def delete(self) -> bool:
"""Remove branch protection.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
resp = self._delete(self._api)
return self._boolean(resp, 204, 404)
@decorators.requires_auth
def requires_signatures(self) -> bool:
"""Check if commit signatures are presently required.
:returns:
True if enabled, False otherwise
:rtype:
bool
"""
url = self._build_url("required_signatures", base_url=self._api)
resp = self._get(url)
if resp.status_code == 200:
return resp.json()["enabled"]
return False
@decorators.requires_auth
def require_signatures(self) -> bool:
"""Require commit signatures for commits to this branch.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
url = self._build_url("required_signatures", base_url=self._api)
resp = self._post(url)
return self._boolean(resp, 200, 404)
@decorators.requires_auth
def delete_signature_requirements(self) -> bool:
"""Stop requiring commit signatures for commits to this branch.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
url = self._build_url("required_signatures", base_url=self._api)
resp = self._delete(url)
return self._boolean(resp, 200, 404)
class ProtectionEnforceAdmins(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
`Admin enforcement of protected branch`_
GitHub's documentation of protecting a branch with admins
This object has the following attributes:
.. attribute:: enabled
A boolean attribute indicating whether the ``enforce_admins``
protection is enabled or disabled.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
.. _Admin enforcement of protected branch:
https://developer.github.com/v3/repos/branches/#get-admin-enforcement-of-protected-branch
"""
def _update_attributes(self, protection):
self._api = protection["url"]
self.enabled = protection["enabled"]
@decorators.requires_auth
def enable(self):
"""Enable Admin enforcement for protected branch."""
resp = self._post(self._api)
return self._boolean(resp, 200, 404)
@decorators.requires_auth
def disable(self):
"""Disable Admin enforcement for protected branch."""
resp = self._delete(self._api)
return self._boolean(resp, 204, 404)
class ProtectionRestrictions(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
`Branch restriction documentation`_
GitHub's description of branch restriction
This object has the following attributes:
.. attribute:: original_teams
List of :class:`~github3.orgs.ShortTeam` objects representing
the teams allowed to push to the protected branch.
.. attribute:: original_users
List of :class:`~github3.users.ShortUser` objects representing
the users allowed to push to the protected branch.
.. attribute:: teams_url
The URL to retrieve the list of teams allowed to push to the
protected branch.
.. attribute:: users_url
The URL to retrieve the list of users allowed to push to the
protected branch.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
.. _Branch restriction documentation:
https://help.github.com/articles/about-branch-restrictions
"""
def _update_attributes(self, protection):
from .. import apps, orgs, users
self._api = protection["url"]
self.users_url = protection["users_url"]
self.teams_url = protection["teams_url"]
self.apps_url = protection.get("apps_url")
self.original_users = protection["users"]
if self.original_users:
self.original_users = [
users.ShortUser(user, self) for user in self.original_users
]
self.original_teams = protection["teams"]
if self.original_teams:
self.original_teams = [
orgs.ShortTeam(team, self) for team in self.original_teams
]
self.original_apps = protection.get("apps")
if self.original_apps:
self.original_apps = [
apps.App(app, self) for app in self.original_apps
]
@decorators.requires_auth
def add_teams(
self, teams: t.Sequence[str]
) -> t.Sequence["orgs.ShortTeam"]:
"""Add teams to the protected branch.
See:
https://developer.github.com/v3/repos/branches/#add-team-restrictions-of-protected-branch
.. warning::
This will not update the object to replace the ``original_teams``
attribute.
:param list teams:
The list of the team names to have access to interact with
protected branch.
:returns:
List of added teams
:rtype:
List[github3.orgs.ShortTeam]
"""
from .. import orgs
resp = self._post(self.teams_url, data=teams)
json = self._json(resp, 200)
return [orgs.ShortTeam(team, self) for team in json] if json else []
@decorators.requires_auth
def add_users(
self, users: t.Sequence[str]
) -> t.Sequence["tusers.ShortUser"]:
"""Add users to protected branch.
See
https://developer.github.com/v3/repos/branches/#add-user-restrictions-of-protected-branch
.. warning::
This will not update the object to replace the ``original_users``
attribute.
:param list users:
The list of the user logins to have access to interact with
protected branch.
:returns:
List of added users
:rtype:
List[github3.users.ShortUser]
"""
from .. import users as _users
json = self._json(self._post(self.users_url, data=users), 200)
return [_users.ShortUser(user, self) for user in json] if json else []
@decorators.requires_auth
def apps(self, number: int = -1) -> t.Generator["tapps.App", None, None]:
"""Retrieve current list of apps with access to the protected branch.
See
https://docs.github.com/en/rest/reference/repos#get-apps-with-access-to-the-protected-branch
.. warning::
This will not update the object to replace the ``original_apps``
attribute.
:param int number:
Limit the number of apps returned
:returns:
An iterator of apps
:rtype:
:class:`~github3.apps.App`
"""
from .. import apps
return self._iter(int(number), self.apps_url, apps.App)
@decorators.requires_auth
def add_app_restrictions(
self, apps: t.Sequence[t.Union["tapps.App", str]]
) -> t.List["tapps.App"]:
"""Grant app push access to the current branch.
See
https://docs.github.com/en/rest/reference/repos#add-app-access-restrictions
Per GitHub's documentation above:
Grants the specified apps push access for this branch. Only
installed GitHub Apps with write access to the contents permission
can be added as authorized actors on a protected branch.
:param list apps:
List of slugs of apps to grant push access to the protected
branch. If you pass a list of :class:`~github3.apps.App` then the
library will retrieve the slug for you.
:returns:
List of apps with push access to the protected branch
:rtype:
List[:class:`~github3.apps.App`]
"""
from .. import apps as _apps
apps = [getattr(a, "slug", a) for a in apps]
json = self._json(self._post(self.apps_url, data=apps), 200)
return [_apps.App(a, self) for a in json]
@decorators.requires_auth
def replace_app_restrictions(
self, apps: t.Sequence[t.Union["tapps.App", str]]
) -> t.List["tapps.App"]:
"""Replace existing app push access with only those specified.
See
https://docs.github.com/en/rest/reference/repos#set-app-access-restrictions
Per GitHub's documentation above:
Replaces the list of apps that have push access to this branch.
This removes all apps that previously had push access and grants
push access to the new list of apps. Only installed GitHub Apps
with write access to the contents permission can be added as
authorized actors on a protected branch.
:param list apps:
List of slugs of apps to grant push access to the protected
branch. If you pass a list of :class:`~github3.apps.App` then the
library will retrieve the slug for you.
:returns:
List of apps with push access to the protected branch
:rtype:
List[:class:`~github3.apps.App`]
"""
from .. import apps as _apps
apps = [getattr(a, "slug", a) for a in apps]
json = self._json(self._put(self.apps_url, data=apps), 200)
return [_apps.App(a, self) for a in json]
@decorators.requires_auth
def remove_app_restrictions(
self, apps: t.Sequence[t.Union["tapps.App", str]]
) -> t.List["tapps.App"]:
"""Remove the apps' push access to the protected branch.
See
https://docs.github.com/en/rest/reference/repos#remove-app-access-restrictions
:param list apps:
List of slugs of apps to revoke push access to the protected
branch. If you pass a list of :class:`~github3.apps.App` then the
library will retrieve the slug for you.
:returns:
List of apps that still have push access
:rtype:
List[:class:`~github3.apps.App`]
"""
from .. import apps as _apps
apps = [getattr(a, "slug", a) for a in apps]
json = self._json(self._delete(self.apps_url, data=apps), 200)
return [_apps.App(a, self) for a in json]
@decorators.requires_auth
def delete(self) -> bool:
"""Completely remove restrictions of the protected branch.
See
https://developer.github.com/v3/repos/branches/#remove-user-restrictions-of-protected-branch
:returns:
True if successful, False otherwise.
:rtype:
bool
"""
resp = self._delete(self._api)
return self._boolean(resp, 204, 404)
@decorators.requires_auth
def remove_teams(
self, teams: t.Sequence[str]
) -> t.Sequence["orgs.ShortTeam"]:
"""Remove teams from protected branch.
See
https://developer.github.com/v3/repos/branches/#remove-team-restrictions-of-protected-branch
:param list teams:
The list of the team names to stop having access to interact with
protected branch.
:returns:
List of removed teams
:rtype:
List[github3.orgs.ShortTeam]
"""
from .. import orgs
resp = self._delete(self.teams_url, json=teams)
json = self._json(resp, 200)
return [orgs.ShortTeam(team, self) for team in json] if json else []
@decorators.requires_auth
def remove_users(
self, users: t.Sequence[str]
) -> t.Sequence["tusers.ShortUser"]:
"""Remove users from protected branch.
See
https://developer.github.com/v3/repos/branches/#remove-user-restrictions-of-protected-branch
:param list users:
The list of the user logins to stop having access to interact with
protected branch.
:returns:
List of removed users
:rtype:
List[github3.users.ShortUser]
"""
resp = self._delete(self.users_url, json=users)
json = self._json(resp, 200)
from .. import users as _users
return [_users.ShortUser(user, self) for user in json] if json else []
@decorators.requires_auth
def replace_teams(
self, teams: t.Sequence[str]
) -> t.Sequence["orgs.ShortTeam"]:
"""Replace teams that will have access to protected branch.
See
https://developer.github.com/v3/repos/branches/#replace-team-restrictions-of-protected-branch
:param list teams:
The list of the team names to have access to interact with
protected branch.
:returns:
List of teams that now have access to the protected branch
:rtype:
List[github3.orgs.ShortTeam]
"""
from .. import orgs
resp = self._put(self.teams_url, json=teams)
json = self._json(resp, 200)
return [orgs.ShortTeam(team, self) for team in json] if json else []
@decorators.requires_auth
def replace_users(
self, users: t.Sequence[str]
) -> t.Sequence["tusers.ShortUser"]:
"""Replace users that will have access to protected branch.
See
https://developer.github.com/v3/repos/branches/#replace-user-restrictions-of-protected-branch
:param list users:
The list of the user logins to have access to interact with
protected branch.
:returns:
List of users that now have access to the protected branch
:rtype:
List[github3.users.ShortUser]
"""
users_resp = self._put(self.users_url, json=users)
return self._boolean(users_resp, 200, 404)
def teams(
self, number: int = -1
) -> t.Generator["orgs.ShortTeam", None, None]:
"""Retrieve an up-to-date listing of teams.
:returns:
An iterator of teams
:rtype:
:class:`~github3.orgs.ShortTeam`
"""
from .. import orgs
return self._iter(
int(number),
self.teams_url,
orgs.ShortTeam,
)
def users(
self, number: int = -1
) -> t.Generator["tusers.ShortUser", None, None]:
"""Retrieve an up-to-date listing of users.
:returns:
An iterator of users
:rtype:
:class:`~github3.users.ShortUser`
"""
from .. import users
return self._iter(int(number), self.users_url, users.ShortUser)
class ProtectionRequiredPullRequestReviews(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection.
`Branch Required Pull Request Reviews`_
GitHub's documentation of required pull request review protections
This object has the folllowing attributes:
.. attribute:: dismiss_stale_reviews
A boolean attribute describing whether stale pull request reviews
should be automatically dismissed by GitHub.
.. attribute:: dismissal_restrictions
If specified, a :class:`~github3.repos.branch.ProtectionRestrictions`
object describing the dismissal restrictions for pull request reviews.
.. attribute:: require_code_owner_reviews
A boolean attribute describing whether to require "code owners" to
review a pull request before it may be merged.
.. attribute:: required_approving_review_count
An integer describing the number (between 1 and 6) of reviews required
before a pull request may be merged.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
.. _Branch Required Pull Request Reviews:
https://developer.github.com/v3/repos/branches/#get-pull-request-review-enforcement-of-protected-branch
"""
def _update_attributes(self, protection):
self._api = protection["url"]
self.dismiss_stale_reviews = protection["dismiss_stale_reviews"]
# Use a temporary value to stay under line-length restrictions
value = protection["require_code_owner_reviews"]
self.require_code_owner_reviews = value
# Use a temporary value to stay under line-length restrictions
value = protection["required_approving_review_count"]
self.required_approving_review_count = value
self.dismissal_restrictions = None
if "dismissal_restrictions" in protection:
self.dismissal_restrictions = ProtectionRestrictions(
protection["dismissal_restrictions"], self
)
@decorators.requires_auth
def update(
self,
dismiss_stale_reviews=None,
require_code_owner_reviews=None,
required_approving_review_count=None,
dismissal_restrictions=None,
):
"""Update the configuration for the Required Pull Request Reviews.
:param bool dismiss_stale_reviews:
Whether or not to dismiss stale pull request reviews automatically
:param bool require_code_owner_reviews:
Blocks merging pull requests until code owners review them
:param int required_approving_review_count:
The number of reviewers required to approve pull requests.
Acceptable values are between 1 and 6.
:param dict dismissal_restrictions:
An empty dictionary will disable this. This must have the
following keys: ``users`` and ``teams`` each mapping to a list
of user logins and team slugs respectively.
:returns:
A updated instance of the required pull request reviews.
:rtype:
:class:`~github3.repos.branch.ProtectionRequiredPullRequestReviews`
"""
existing_values = {
"dismiss_stale_reviews": self.dismiss_stale_reviews,
"dismissal_restrictions": {
"users": [
getattr(u, "login", u)
for u in getattr(
self.dismissal_restrictions, "original_users", []
)
],
"teams": [
getattr(t, "slug", t)
for t in getattr(
self.dismissal_restrictions, "original_teams", []
)
],
},
"require_code_owner_reviews": self.require_code_owner_reviews,
"required_approving_review_count": (
self.required_approving_review_count
),
}
update_json = {
"dismiss_stale_reviews": (
dismiss_stale_reviews
if dismiss_stale_reviews is not None
else existing_values["dismiss_stale_reviews"]
),
"require_code_owner_reviews": (
require_code_owner_reviews
if require_code_owner_reviews is not None
else existing_values["require_code_owner_reviews"]
),
"required_approving_review_count": (
required_approving_review_count
if required_approving_review_count is not None
else existing_values["required_approving_review_count"]
),
"dismissal_restrictions": (
dismissal_restrictions
if dismissal_restrictions is not None
else existing_values["dismissal_restrictions"]
),
}
resp = self._patch(self._api, json=update_json)
json = self._json(resp, 200)
if json:
self._update_attributes(json)
return self
@decorators.requires_auth
def delete(self):
"""Remove the Required Pull Request Reviews.
:returns:
Whether the operation finished successfully or not
:rtype:
bool
"""
resp = self._delete(self._api)
return self._boolean(resp, 204, 404)
class ProtectionRequiredStatusChecks(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
`Required Status Checks documentation`_
GitHub's description of required status checks
`Required Status Checks API documentation`_
The API documentation for required status checks
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
.. _Required Status Checks documentation:
https://help.github.com/articles/about-required-status-checks
.. _Required Status Checks API documentation:
https://developer.github.com/v3/repos/branches/#get-required-status-checks-of-protected-branch
"""
def _update_attributes(self, protection):
self._api = protection["url"]
self.strict = protection["strict"]
self.original_contexts = protection["contexts"]
self.contexts_url = protection["contexts_url"]
@decorators.requires_auth
def add_contexts(self, contexts):
"""Add contexts to the existing list of required contexts.
See:
https://developer.github.com/v3/repos/branches/#add-required-status-checks-contexts-of-protected-branch
:param list contexts:
The list of contexts to append to the existing list.
:returns:
The updated list of contexts.
:rtype:
list
"""
resp = self._post(self.contexts_url, data=contexts)
json = self._json(resp, 200)
return json
@decorators.requires_auth
def contexts(self):
"""Retrieve the list of contexts required as status checks.
See:
https://developer.github.com/v3/repos/branches/#list-required-status-checks-contexts-of-protected-branch
:returns:
A list of context names which are required status checks.
:rtype:
list
"""
resp = self._get(self.contexts_url)
json = self._json(resp, 200)
return json
@decorators.requires_auth
def remove_contexts(self, contexts):
"""Remove the specified contexts from the list of required contexts.
See:
https://developer.github.com/v3/repos/branches/#remove-required-status-checks-contexts-of-protected-branch
:param list contexts:
The context names to remove
:returns:
The updated list of contexts required as status checks.
:rtype:
list
"""
resp = self._delete(self.contexts_url, json=contexts)
json = self._json(resp, 200)
return json
@decorators.requires_auth
def replace_contexts(self, contexts):
"""Replace the existing contexts required as status checks.
See
https://developer.github.com/v3/repos/branches/#replace-required-status-checks-contexts-of-protected-branch
:param list contexts:
The names of the contexts to be required as status checks
:returns:
The new list of contexts required as status checks.
:rtype:
list
"""
resp = self._put(self.contexts_url, json=contexts)
json = self._json(resp, 200)
return json
@decorators.requires_auth
def delete_contexts(self, contexts):
"""Delete the contexts required as status checks.
See
https://developer.github.com/v3/repos/branches/#replace-required-status-checks-contexts-of-protected-branch
:param list contexts:
The names of the contexts to be required as status checks
:returns:
The updated list of contexts required as status checks.
:rtype:
list
"""
resp = self._delete(self.contexts_url, json=contexts)
return self._boolean(resp, 204, 404)
@decorators.requires_auth
def update(self, strict=None, contexts=None):
"""Update required status checks for the branch.
This requires admin or owner permissions to the repository and
branch protection to be enabled.
.. seealso::
`API docs`_
Descrption of how to update the required status checks.
:param bool strict:
Whether this should be strict protection or not.
:param list contexts:
A list of context names that should be required.
:returns:
A new instance of this class with the updated information
:rtype:
:class:`~github3.repos.branch.ProtectionRequiredStatusChecks`
.. links
.. _API docs:
https://developer.github.com/v3/repos/branches/#update-required-status-checks-of-protected-branch
"""
update_data = {}
json = None
if strict is not None:
update_data["strict"] = strict
if contexts is not None:
update_data["contexts"] = contexts
if update_data:
resp = self._patch(self.url, json=update_data)
json = self._json(resp, 200)
if json is not None:
self._update_attributes(json)
return self
@decorators.requires_auth
def delete(self):
"""Remove required status checks from this branch.
See:
https://developer.github.com/v3/repos/branches/#remove-required-status-checks-of-protected-branch
:returns:
True if successful, False otherwise
:rtype:
bool
"""
resp = self._delete(self.url)
return self._boolean(resp, 204, 404)
class ProtectionRequiredLinearHistory(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
This object has the following attributes:
.. attribute:: enabled
A boolean attribute indicating whether the ``required_linear_history``
protection is enabled or disabled.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
"""
def _update_attributes(self, protection):
self.enabled = protection["enabled"]
class ProtectionAllowForcePushes(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
This object has the following attributes:
.. attribute:: enabled
A boolean attribute indicating whether the ``allow_force_pushes``
protection is enabled or disabled.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
"""
def _update_attributes(self, protection):
self.enabled = protection["enabled"]
class ProtectionAllowDeletions(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
This object has the following attributes:
.. attribute:: enabled
A boolean attribute indicating whether the ``allow_deletions``
protection is enabled or disabled.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
"""
def _update_attributes(self, protection):
self.enabled = protection["enabled"]
class ProtectionRequiredConversationResolution(models.GitHubCore):
"""The representation of a sub-portion of branch protection.
.. seealso::
`Branch protection API documentation`_
GitHub's documentation of branch protection
This object has the following attributes:
.. attribute:: enabled
A boolean attribute indicating whether the
``required_conversation_resolution`` protection is enabled or
disabled.
.. links
.. _Branch protection API documentation:
https://developer.github.com/v3/repos/branches/#get-branch-protection
"""
def _update_attributes(self, protection):
self.enabled = protection["enabled"]
| 34.673393 | 115 | 0.633371 | 46,106 | 0.993814 | 0 | 0 | 28,021 | 0.603992 | 0 | 0 | 29,540 | 0.636734 |
2dd0dbf3c9583810719a7c459b8980e6849b03e3 | 696 | py | Python | buildings/gui/menu_frame.py | strk/nz-buildings | 8dc8ee19d322837380bb4f016b01eccee2c1bd0a | [
"PostgreSQL",
"CC-BY-4.0"
] | 2 | 2020-02-21T00:46:31.000Z | 2020-08-17T14:22:19.000Z | buildings/gui/menu_frame.py | strk/nz-buildings | 8dc8ee19d322837380bb4f016b01eccee2c1bd0a | [
"PostgreSQL",
"CC-BY-4.0"
] | 243 | 2018-12-16T22:01:54.000Z | 2022-01-10T20:09:24.000Z | buildings/gui/menu_frame.py | strk/nz-buildings | 8dc8ee19d322837380bb4f016b01eccee2c1bd0a | [
"PostgreSQL",
"CC-BY-4.0"
] | 1 | 2020-03-24T10:35:43.000Z | 2020-03-24T10:35:43.000Z | # -*- coding: utf-8 -*-
import os.path
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QFrame
from buildings.utilities.layers import LayerRegistry
# Get the path for the parent directory of this file.
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), "menu_frame.ui"))
class MenuFrame(QFrame, FORM_CLASS):
def __init__(self, dockwidget, parent=None):
"""Constructor."""
super(MenuFrame, self).__init__(parent)
self.setupUi(self)
self.txt_dashboard.viewport().setAutoFillBackground(False)
self.layer_registry = LayerRegistry()
| 27.84 | 88 | 0.719828 | 302 | 0.433908 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.156609 |
2dd15aa32b6924458b8fe69007859df334c4b9ae | 1,426 | py | Python | helpers.py | denizumutdereli/dialogflow_nlp_ai_powered_chat_bot | e624927527a32f21763d23cd585c868fe9190211 | [
"Apache-2.0"
] | 1 | 2022-01-13T00:35:20.000Z | 2022-01-13T00:35:20.000Z | helpers.py | denizumutdereli/dialogflow_nlp_ai_powered_chat_bot | e624927527a32f21763d23cd585c868fe9190211 | [
"Apache-2.0"
] | null | null | null | helpers.py | denizumutdereli/dialogflow_nlp_ai_powered_chat_bot | e624927527a32f21763d23cd585c868fe9190211 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import re
import emoji
import string
import time
import winsound
from bs4 import BeautifulSoup
from rich.progress import track
def clear():
os.system('cls' if os.name=='nt' else'clear')
def BeautifulSoupOp(text):
soup = BeautifulSoup(text.strip(),"html.parser")
for tag in soup.find_all('a'):
tag.replaceWith('')
for tag in soup.find_all('span'):
tag.replaceWith('')
for tag in soup.find_all('img'):
tag.replaceWith('')
text = soup.get_text()
text = remove_emoji(text)
return text
def remove_emoji(lastMessage):
translator = re.compile('[%s]' % re.escape(string.punctuation))
translator.sub(' ',lastMessage)
lastMessage = re.sub(r'[^\w\s]', '', lastMessage)
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
lastMessage = emoji_pattern.sub(r'', lastMessage)
lastMessage = re.sub(' +',' ', lastMessage).strip()
return lastMessage
def loading(rangeVal, description):
#for i in range(25):
#time.sleep(0.1)
#sys.stdout.write("\r" + self.loading[i % len(self.loading)])
#sys.stdout.flush()
for step in track(range(rangeVal), description=description):
time.sleep(0.1)
| 25.464286 | 64 | 0.674614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.293829 |
2dd1642cbad2de85e44adff8bc936f9819e814e3 | 3,749 | py | Python | gitver/config.py | movermeyer/gitver | 77d5a4420209a4ca00349b094eeca1f13e50d8e5 | [
"Apache-2.0"
] | 12 | 2015-03-31T18:19:42.000Z | 2019-08-14T10:56:00.000Z | gitver/config.py | movermeyer/gitver | 77d5a4420209a4ca00349b094eeca1f13e50d8e5 | [
"Apache-2.0"
] | 1 | 2015-04-21T12:44:40.000Z | 2019-07-15T07:12:15.000Z | gitver/config.py | movermeyer/gitver | 77d5a4420209a4ca00349b094eeca1f13e50d8e5 | [
"Apache-2.0"
] | 8 | 2016-03-25T16:43:13.000Z | 2021-11-26T10:44:57.000Z | #!/usr/bin/env python2
# coding=utf-8
"""
The default per-repository configuration
"""
import sys
import json
import string
from os.path import exists, dirname
from gitver.defines import CFGFILE
from termcolors import term, bold
default_config_text = """{
# automatically generated configuration file
#
# These defaults implement Semantic Versioning as described in the latest
# available documentation at http://semver.org/spec/v2.0.0.html
# by default, terminal output is NOT colorized for compatibility with older
# terminal emulators: you may enable this if you like a more modern look
"use_terminal_colors": false,
# prevent gitver from storing any information in its configuration directory
# if the .gitignore file doesn't exclude it from the repository
"safe_mode": true,
# default pre-release metadata when commit count > 0 AND
# no NEXT has been defined
"default_meta_pr_in_next_no_next": "NEXT",
# default pre-release metadata when commit count > 0
"default_meta_pr_in_next": "SNAPSHOT",
# default pre-release metadata prefix
"meta_pr_prefix": "-",
# default commit count prefix
"commit_count_prefix": ".",
# Python-based format string variable names are:
# maj, min, patch, rev, rev_prefix, meta_pr_prefix, meta_pr,
# commit_count_prefix, commit_count, build_id, build_id_full
#
# Note that prefixes will be empty strings if their valued counterpart
# doesn't have a meaningful value (i.e., 0 for commit count, no meta
# pre-release, ..)
# format string used to build the current version string when the
# commit count is 0
"format": "%(maj)s.%(min)s.%(patch)s%(rev_prefix)s%(rev)s%(meta_pr_prefix)s%(meta_pr)s",
# format string used to build the current version string when the
# commit count is > 0
"format_next": "%(maj)s.%(min)s.%(patch)s%(rev_prefix)s%(rev)s%(meta_pr_prefix)s%(meta_pr)s%(commit_count_prefix)s%(commit_count)s+%(build_id)s"
}"""
def remove_comments(text):
"""
Removes line comments denoted by sub-strings starting with a '#'
character from the specified string, construct a new text and returns it.
"""
data = string.split(text, '\n')
ret = ''
for line in data:
if not line.strip().startswith('#'):
ret += line
return ret
default_config = json.loads(remove_comments(default_config_text))
def create_default_configuration_file():
"""
Creates a default configuration file from the default gitver's
configuration text string in the predefined gitver's configuration
directory.
"""
if not exists(CFGFILE):
if exists(dirname(CFGFILE)):
with open(CFGFILE, 'w') as f:
f.writelines(default_config_text)
return True
return False
def load_user_config():
"""
Returns the gitver's configuration: tries to read the stored configuration
file and merges it with the default one, ensuring a valid configuration is
always returned.
"""
try:
with open(CFGFILE, 'r') as f:
data = ''
for line in f:
l = line.strip()
if not l.startswith('#'):
data += l
user = json.loads(data)
except IOError:
user = dict()
except (ValueError, KeyError) as v:
term.err("An error occured parsing the configuration file \"" +
CFGFILE + "\": " + v.message +
"\nPlease check its syntax or rename it and generate the "
"default one with the " + bold("gitver init") + " command.")
sys.exit(1)
# merge user with defaults
return dict(default_config, **user)
| 31.771186 | 148 | 0.654308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,561 | 0.683115 |
2dd2a210c5e9a90b7a25257a08a7c3946145f326 | 3,153 | py | Python | oracle/TLOracle/property_R2_2.py | fatmaf/ROSMonitoring | 25eb3142d697d307bb2fd33af9bf4cd5191c5b42 | [
"MIT",
"BSD-3-Clause"
] | 11 | 2020-02-18T18:51:01.000Z | 2022-03-16T12:18:51.000Z | oracle/TLOracle/property_R2_2.py | fatmaf/ROSMonitoring | 25eb3142d697d307bb2fd33af9bf4cd5191c5b42 | [
"MIT",
"BSD-3-Clause"
] | 6 | 2020-02-03T15:34:49.000Z | 2020-02-05T15:58:37.000Z | oracle/TLOracle/property_R2_2.py | fatmaf/ROSMonitoring | 25eb3142d697d307bb2fd33af9bf4cd5191c5b42 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-10-06T13:37:47.000Z | 2022-03-24T16:27:15.000Z | # MIT License
#
# Copyright (c) [2020] [Angelo Ferrando]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import oracle
# property to verify
PROPERTY = r'historically(({isTargetGrasped: true} -> {dx < 0.1, dy < 0.1, dz < 0.1, other_distances: true}) and ({trigger: false} -> {other_distances: true}))'
# predicates used in the property (initialization for time 0)
predicates = dict(
)
# in here we can add all the predicates we are interested in.. Of course, we also need to define how to translate Json messages to predicates.
# function to abstract a dictionary (obtained from Json message) into a list of predicates
def abstract_message(message):
if 'd1' in message and 'd2' in message and 'd3' in message and 'd4' in message and 'd5' in message and 'd6' in message and 'd7' in message and 'd8' in message:
predicates['dx'] = message['d8']['x']
predicates['dy'] = message['d8']['y']
predicates['dz'] = message['d8']['z']
predicates['other_distances'] = (message['d1']['x'] != 0 or message['d1']['y'] != 0 or message['d1']['z'] != 0) and (message['d2']['x'] != 0 or message['d2']['y'] != 0 or message['d2']['z'] != 0) and (message['d3']['x'] != 0 or message['d3']['y'] != 0 or message['d3']['z'] != 0) and (message['d4']['x'] != 0 or message['d4']['y'] != 0 or message['d4']['z'] != 0) and (message['d5']['x'] != 0 or message['d5']['y'] != 0 or message['d5']['z'] != 0) and (message['d6']['x'] != 0 or message['d6']['y'] != 0 or message['d6']['z'] != 0) and (message['d7']['x'] != 0 or message['d7']['y'] != 0 or message['d7']['z'] != 0)
if 'isTargetGrasped' in message:
predicates['isTargetGrasped'] = message['isTargetGrasped']
return predicates
# This function has to be defined by the user depending on the property defined.
# In this case we have just implemented a simple and general function which
# updates the predicates if it finds the topic in the list of predicates.
# Since the property is defined on predicates, we need this function to update the
# predicates each time a message is observed. This abstraction of course is totally
# dependent on the specific application.
| 64.346939 | 623 | 0.693942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,268 | 0.719315 |
2dd344951d9aeadc827ae7e0e3e8b87cd96497d0 | 3,305 | py | Python | 3. Others/Python_OOP_Passenger_Registration.py | PurveshMakode24/snippets | ba360b363c7b98528d6b7320dcc446a9e3febd4b | [
"MIT"
] | 1 | 2018-12-25T21:10:02.000Z | 2018-12-25T21:10:02.000Z | 3. Others/Python_OOP_Passenger_Registration.py | PurveshMakode24/snippets | ba360b363c7b98528d6b7320dcc446a9e3febd4b | [
"MIT"
] | 2 | 2019-10-01T16:07:46.000Z | 2019-10-01T16:07:47.000Z | 3. Others/Python_OOP_Passenger_Registration.py | PurveshMakode24/snippets | ba360b363c7b98528d6b7320dcc446a9e3febd4b | [
"MIT"
] | 4 | 2020-09-01T02:22:44.000Z | 2020-10-07T12:14:58.000Z | from random import randint
import re; import json
class Passenger:
def __init__(self, passengerId, passengerName, email, password, address, contact):
self.passengerId = passengerId
self.passengerName = passengerName
self.email = email
self.password = password
self.address = address
self.contact = contact
class ValidateField:
def __init__(self, passengerObj):
self.passengerObj = passengerObj
def checkPassengerName(self):
if not "".join(self.passengerObj.passengerName.split()).isalpha():
print("\nEnter a valid name.")
elif len(self.passengerObj.passengerName)>50:
print("\nName should not exceed 50 characters.")
else:
return True
def checkEmail(self):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,4}$'
if not re.match(regex, self.passengerObj.email):
print("\nPlease enter a valid email.")
else:
return True
def checkPassword(self):
if not len(self.passengerObj.password)>7:
print("\nPassword should be greater than 7.")
else:
return True
def checkAddress(self):
if len(self.passengerObj.address)>100:
print("\nAddress should not exceeds 100 characters.")
elif not len(self.passengerObj.address):
print("\nAddress should not be empty.")
else:
return True
def checkContact(self):
if len(str(self.passengerObj.contact))>10:
print("\nContact number should not exceeds 10 characters.")
else:
return True
def displayPassengers(passengerList):
if len(passengerList):
print(json.dumps([p.__dict__ for p in passengerList], indent=4))
else:
print("\nNo data found.")
def registration(passengerList):
try:
passengerId = int(randint(1000000,9999999) or 0000000)
print("\nPassenger ID:", passengerId)
passengerName = input("Enter the passenger name:")
email = input("Enter email:")
password = input("Enter password:")
address = input("Enter address:").capitalize()
contact = int(input("Enter contact number:"))
passengerObj = Passenger(passengerId, passengerName, email, password, address, contact)
v = ValidateField(passengerObj)
if v.checkPassengerName() and v.checkEmail() and v.checkPassword() and v.checkAddress() and v.checkContact():
passengerList.append(passengerObj)
print("\nPassenger Registration is Sucessful!")
except Exception as e:
print("Error:", e)
if __name__ == '__main__':
print("="*52+"\nPASSENGER REGISTRATION\n"+"="*52)
print("1. Enter 1 to register a passenger.")
print("2. Enter 2 to display all the registered passengers.")
print("3. Enter -1 to exit.\n"+"-"*52)
passengerList = []
while True:
c = int(input("Enter you choice:") or -1)
if c==1:
registration(passengerList)
elif c==2:
displayPassengers(passengerList)
else:
break
print("-"*52)
| 30.601852 | 117 | 0.586082 | 1,615 | 0.488654 | 0 | 0 | 0 | 0 | 0 | 0 | 673 | 0.203631 |
2dd39dc7304ad676d501a32885e968d0740bdaf7 | 1,379 | py | Python | software/examples/python/03-multiplexer/main.py | esysberlin/lufo-ifez-datenkonzentrator | f803724b316e2df46e69afab91afa22c64a8d920 | [
"MIT"
] | 2 | 2019-02-04T16:04:29.000Z | 2019-02-04T23:33:12.000Z | software/examples/python/03-multiplexer/main.py | esysberlin/lufo-ifez-datenkonzentrator | f803724b316e2df46e69afab91afa22c64a8d920 | [
"MIT"
] | null | null | null | software/examples/python/03-multiplexer/main.py | esysberlin/lufo-ifez-datenkonzentrator | f803724b316e2df46e69afab91afa22c64a8d920 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
from common import spi
def main():
multiplexer_state = spi.parse_response(spi.query('multiplexer', 'get_outputs'))
print("Current multiplexer state:")
_print_state(multiplexer_state)
print('')
for i in range(len(multiplexer_state)):
multiplexer_state[i] = _random_input()
print('Setting multiplexer to {} ...'.format(multiplexer_state))
print('Response: {}'.format(spi.parse_response(spi.query('multiplexer', 'set_outputs',
multiplexer_state))))
print('')
multiplexer_state = spi.parse_response(spi.query('multiplexer', 'get_outputs'))
print("New state:")
_print_state(multiplexer_state)
_MULTIPLEXER_INPUTS = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8',
'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8']
_MULTIPLEXER_OUTPUTS = ['OUT_A', 'OUT_B']
def _print_state(spi_response):
def print_output_state(name, state):
if state == 'NC':
print('{} is not connected.'.format(name))
else:
print('{} is connected to {}.'.format(name, state))
for i in range(len(spi_response)):
print_output_state(_MULTIPLEXER_OUTPUTS[i], spi_response[i])
def _random_input():
return _MULTIPLEXER_INPUTS[int(random.random() * len(_MULTIPLEXER_INPUTS))]
if __name__ == '__main__':
main()
| 28.729167 | 88 | 0.641769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.253807 |
2dd735db2a02bcc45cec0aa0e3b52d0a3bae2b8e | 118 | py | Python | test/a.py | atsuoishimoto/pyjf3 | 6f4b22e24c8f3bae5120b00e1de86e66fabb1785 | [
"Unlicense"
] | null | null | null | test/a.py | atsuoishimoto/pyjf3 | 6f4b22e24c8f3bae5120b00e1de86e66fabb1785 | [
"Unlicense"
] | 2 | 2015-10-01T20:41:28.000Z | 2016-03-14T14:50:41.000Z | test/a.py | atsuoishimoto/pyjf3 | 6f4b22e24c8f3bae5120b00e1de86e66fabb1785 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
import pyjf
print repr(u'使'.encode('euc-jp'))
print repr(pyjf.sjistoeuc(u'使'.encode('sjis')))
| 23.6 | 47 | 0.644068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.401639 |
2dd802a334b7dcb54120950c7e4f4e6a2c7dc01e | 437 | py | Python | rl_sandbox/priors/uniform.py | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 | [
"MIT"
] | 14 | 2020-11-09T22:05:37.000Z | 2022-02-11T12:41:33.000Z | rl_sandbox/priors/uniform.py | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 | [
"MIT"
] | null | null | null | rl_sandbox/priors/uniform.py | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 | [
"MIT"
] | null | null | null | import torch
from torch.distributions import Uniform
from rl_sandbox.constants import CPU
class UniformPrior:
def __init__(self, low, high, device=torch.device(CPU)):
self.device = device
self.dist = Uniform(low=low, high=high)
def sample(self, num_samples):
return self.dist.rsample(sample_shape=num_samples).to(self.device)
def lprob(self, samples):
return self.dist.log_prob(samples)
| 24.277778 | 74 | 0.709382 | 342 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2dd8addc613d001e1bcad53b369e5ef7379a46b5 | 2,759 | py | Python | preprocess/conll_to_factors.py | thilakshiK/wmt16-scripts | 7fa32971e2927c397434df2e0f171834abf0b07a | [
"MIT"
] | 132 | 2016-05-20T15:59:48.000Z | 2022-03-30T13:58:50.000Z | preprocess/conll_to_factors.py | chenyangh/wmt16-scripts | 9695851c150fedbac3418915b251a75292d7d527 | [
"MIT"
] | 14 | 2016-07-14T16:14:14.000Z | 2022-02-16T18:49:18.000Z | preprocess/conll_to_factors.py | chenyangh/wmt16-scripts | 9695851c150fedbac3418915b251a75292d7d527 | [
"MIT"
] | 66 | 2016-05-17T06:27:16.000Z | 2022-02-24T13:14:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
# Distributed under MIT license
# take conll file, and bpe-segmented text, and produce factored output
import sys
import re
from collections import namedtuple
Word = namedtuple(
'Word',
['pos', 'word', 'lemma', 'tag', 'morph', 'head', 'func', 'proj_head', 'proj_func'])
def escape_special_chars(line):
line = line.replace('\'', ''') # xml
line = line.replace('"', '"') # xml
line = line.replace('[', '[') # syntax non-terminal
line = line.replace(']', ']') # syntax non-terminal
line = line.replace('|', '|')
return line
def read_sentences(fobj):
sentence = []
for line in fobj:
if line == "\n":
yield sentence
sentence = []
continue
try:
(
pos,
word,
lemma,
tag,
tag2,
morph,
head,
func,
proj_head,
proj_func,
) = line.split()
except ValueError: # Word may be unicode whitespace.
(
pos,
word,
lemma,
tag,
tag2,
morph,
head,
func,
proj_head,
proj_func,
) = re.split(' *\t*', line.strip())
word = escape_special_chars(word)
lemma = escape_special_chars(lemma)
morph = morph.replace('|',',')
if proj_head == '_':
proj_head = head
proj_func = func
sentence.append(
Word(
int(pos), word, lemma, tag2, morph, int(head), func, int(proj_head),
proj_func))
def get_factors(sentence, idx):
word = sentence[idx]
factors = [word.lemma, word.tag, word.func]
return factors
#text file that has been preprocessed and split with BPE
bpe_file = open(sys.argv[1])
#conll file with annotation of original corpus; mapping is done by index, so number of sentences and words (before BPE) must match
conll_file = open(sys.argv[2])
conll_sentences = read_sentences(conll_file)
for line in bpe_file:
state = "O"
i = 0
sentence = conll_sentences.next()
for word in line.split():
factors = get_factors(sentence, i)
if word.endswith('@@'):
if state == "O" or state == "E":
state = "B"
elif state == "B" or state == "I":
state = "I"
else:
i += 1
if state == "B" or state == "I":
state = "E"
else:
state = "O"
sys.stdout.write('|'.join([word, state] + factors) + ' ')
sys.stdout.write('\n')
| 24.415929 | 130 | 0.507068 | 0 | 0 | 1,178 | 0.426966 | 0 | 0 | 0 | 0 | 630 | 0.228344 |
2dd8b59c63a68dfdad978b1ecaee6cd2d1f63d54 | 4,661 | py | Python | Training/Auto_Labelling/tests/test_utils.py | evamok/knowledge-extraction-recipes-forms | d42dc5a728d72fabc6a0c568834ef6786deb9116 | [
"MIT"
] | 93 | 2020-08-03T10:23:43.000Z | 2022-03-23T15:41:22.000Z | Training/Auto_Labelling/tests/test_utils.py | ciph3rwoman/knowledge-extraction-recipes-forms | ce1adbb0515d1b598ba89a4a06dd3cf554230565 | [
"MIT"
] | 11 | 2020-08-05T06:49:21.000Z | 2022-02-01T10:00:25.000Z | Training/Auto_Labelling/tests/test_utils.py | ciph3rwoman/knowledge-extraction-recipes-forms | ce1adbb0515d1b598ba89a4a06dd3cf554230565 | [
"MIT"
] | 39 | 2020-08-05T14:48:43.000Z | 2022-02-12T05:12:11.000Z | #!/usr/bin/python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import os
import json
from mock import MagicMock, patch, mock_open
from shared_code import utils
def test_is_url_returns_true_when_url_passed_in():
#arrange
input = "https://fake-url.com/data.txt"
#act
result = utils.is_url(input)
#assert
assert result is True
def test_is_url_returns_false_when_filepath_passed_in():
#arrange
input = "./data.txt"
#act
result = utils.is_url(input)
#assert
assert result is False
def test_is_url_raises_exception_when_None_passed_in():
#arrange
input = None
#act
with pytest.raises(ValueError) as excinfo:
utils.is_url(input)
#assert
assert "is_url input is None!" in str(excinfo.value)
def test_is_url_raises_exception_when_empty_string_passed_in():
#arrange
input = ""
#act
with pytest.raises(ValueError) as excinfo:
utils.is_url(input)
#assert
assert "is_url input is empty string!" in str(excinfo.value)
@patch('shared_code.utils.get_lookup_fields_from_file')
def test_get_lookup_fields_loads_file_when_input_not_url(fake_get_lookup_fields_from_file):
#arrange
input = "./data.txt"
#act
result = utils.get_lookup_fields(input)
#assert
assert fake_get_lookup_fields_from_file.called
@patch('shared_code.utils.get_lookup_fields_from_url')
def test_get_lookup_fields_loads_file_when_input_not_url(fake_get_lookup_fields_from_url):
#arrange
input = "https://fake-url.com/data.txt"
#act
result = utils.get_lookup_fields(input)
#assert
assert fake_get_lookup_fields_from_url.called
def test_get_lookup_fields_raises_exception_when_None_passed_in():
#arrange
input = None
# #act
# with pytest.raises(ValueError) as excinfo:
# utils.get_lookup_fields(input)
# #assert
# assert "is_url input is None!" in str(excinfo.value)
result = utils.get_lookup_fields(input)
assert result == None
def test_get_lookup_fields_raises_exception_when_empty_string_passed_in():
#arrange
input = ""
#act
# with pytest.raises(ValueError) as excinfo:
# utils.get_lookup_fields(input)
# #assert
# assert "is_url input is empty string!" in str(excinfo.value)
result = utils.get_lookup_fields(input)
assert result == None
def test_get_lookup_fields_raises_exception_when_number_passed_in():
#arrange
input = 124
# #act
# with pytest.raises(ValueError) as excinfo:
# utils.get_lookup_fields(input)
# #assert
# assert "is_url input is numeric! Must be string" in str(excinfo.value)
result = utils.get_lookup_fields(input)
assert result == None
def test_get_lookup_fields_should_only_accept_strings():
#arrange
def input():
print("a fake method")
# #act
# with pytest.raises(ValueError) as excinfo:
# utils.get_lookup_fields(input)
# #assert
# assert "is_url input must be string" in str(excinfo.value)
result = utils.get_lookup_fields(input)
assert result == None
@patch('requests.get')
def test_get_lookup_fields_fetches_json_from_url(fake_requests_get):
#arrange
input = "https://fake-url.com/data.txt"
#act
result = utils.get_lookup_fields(input)
#assert
fake_requests_get.assert_called_once_with(url=input)
def test_get_lookup_fields_fetches_json_from_url():
#arrange
input = "data.txt"
data = {
"foo" : "bar"
}
json_text = json.dumps(data)
#act
with patch('builtins.open',
mock_open(read_data=json_text),
create=True):
result = utils.get_lookup_fields(input)
#assert
assert result["foo"] == data["foo"]
def test_is_number_returns_true_when_int():
#arrange
input = 1
#act
result = utils.is_number(input)
#assert
assert result is True
def test_is_number_returns_true_when_float():
#arrange
input = 20.4
#act
result = utils.is_number(input)
#assert
assert result is True
def test_is_number_returns_true_when_complex():
#arrange
input = 1 + 2j
#act
result = utils.is_number(input)
#assert
assert result is True
def test_is_number_returns_false_when_string():
#arrange
input = 1
#act
result = utils.is_number(input)
#assert
assert result is True
def test_is_number_returns_false_when_lambda():
#arrange
input = lambda a : a + 10
#act
result = utils.is_number(input)
#assert
assert result is False
| 23.780612 | 91 | 0.692341 | 0 | 0 | 0 | 0 | 898 | 0.192663 | 0 | 0 | 1,363 | 0.292427 |