max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
pyAI-OpenMV4/2.机器视觉/5.人脸检测/2.人脸追踪/face_tracking.py | 01studio-lab/MicroPython_Examples | 73 | 12763251 | # 人脸追踪例程
#
# 这个例程展示了如何使用关键特征来追踪一个已经使用Haar Cascade检测出来的人脸。
# 程序第一阶段先使用 Haar Cascade 找出人脸.然后使用关键特征来学习,最后不停的找这个人脸。
# 关键特征点可以用来追踪任何栋。
#
#翻译:01Studio
import sensor, time, image
# Reset sensor
sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240)) #在VGA(640*480)下开个小窗口,相当于数码缩放。
sensor.set_pixformat(sensor.GRAYSCALE)
# 延时以便摄像头稳定工作
sensor.skip_frames(time = 2000)
# 加载 Haar Cascade 模型
# 默认使用25个步骤,减少步骤会加快速度但会影响识别成功率.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# 特征kpts1
kpts1 = None
# 找到人脸!
while (kpts1 == None):
img = sensor.snapshot()
img.draw_string(0, 0, "Looking for a face...")
# Find faces
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
# 将 ROI(x,y,w,h)往各个方向扩展 31 像素
face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2)
# 使用扩展后的 ROI 区域(人脸)学习关键点
kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face)
# 用矩形框展示人脸
img.draw_rectangle(objects[0])
# 打印关键点
print(kpts1)
img.draw_keypoints(kpts1, size=24)
img = sensor.snapshot()
time.sleep(2000) #暂停以便观察特征
# FPS clock
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
# 从图像中提取关键点
kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True)
if (kpts2):
# 跟关键点kpts1匹配
c=image.match_descriptor(kpts1, kpts2, threshold=85)
match = c[6] # C[6] 为 matches值,这个值越大表示匹配程度越高.
if (match>5): #设置当大于5的时候为匹配成功,并画图标示。打印相关信息。
img.draw_rectangle(c[2:6])
img.draw_cross(c[0], c[1], size=10)
print(kpts2, "matched:%d dt:%d"%(match, c[7]))
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
| 3 | 3 |
lang/py/pylib/code/warnings/warnings_showwarning.py | ch1huizong/learning | 1 | 12763252 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 <NAME> All rights reserved.
#
"""
"""
#end_pymotw_header
import warnings
import logging
logging.basicConfig(level=logging.INFO)
def send_warnings_to_log(message, category, filename, lineno, file=None):
logging.warning(
'%s:%s: %s:%s' %
(filename, lineno, category.__name__, message))
return
old_showwarning = warnings.showwarning
warnings.showwarning = send_warnings_to_log
warnings.warn('message')
| 2.34375 | 2 |
setup.py | stfc/HartreeParticleDSL | 0 | 12763253 | #!/usr/bin/env python3
"""Setup script. Used by easy_install and pip."""
import os
from setuptools import setup, find_packages
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
SRC_PATH = os.path.join(BASE_PATH, "src")
PACKAGES = find_packages(where=SRC_PATH)
NAME = 'HartreeParticleDSL'
AUTHOR = ("<NAME> <<EMAIL>>")
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/NYI'
DOWNLOAD_URL = 'https://github.com/NYI'
DESCRIPTION = ('HartreeParticleDSL - A Generic Particle DSL supporting a variety of backends')
LONG_DESCRIPTION = '''\
TBD
'''
LICENSE = ' TBD '
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: Utilities',
'Operating System :: POSIX',
'Operating System :: Unix']
VERSION = '0.0.1a'
if __name__ == '__main__':
def get_files(directory, install_path, valid_suffixes):
'''Utility routine that creates a list of 2-tuples, each consisting of
the target installation directory and a list of files
(specified relative to the project root directory).
:param str directory: the directory containing the required files.
:param str install_path: the location where the files will be placed.
:param valid_suffixes: the suffixes of the required files.
:type valid_suffixes: [str]
:returns: a list of 2-tuples, each consisting of the target \
installation directory and a list of files (specified relative \
to the project root directory).
:rtype: [(str, [str])]
'''
examples = []
for dirpath, _, filenames in os.walk(directory):
if ("__" not in dirpath) and filenames:
rel_path = os.path.relpath(dirpath, directory)
files = []
for filename in filenames:
if any([filename.endswith(suffix) for
suffix in valid_suffixes]):
files.append(
os.path.join(os.path.basename(install_path),
rel_path, filename))
if files:
examples.append((os.path.join(install_path, rel_path),
files))
return examples
# We have all of the example, tutorial and wrapper libraries files
# listed in MANIFEST.in but unless we specify them in the data_files
# argument of setup() they don't seem to get installed.
# Since the data_files argument doesn't accept wildcards we have to
# explicitly list every file we want.
# INSTALL_PATH controls where the files will be installed.
# VALID_SUFFIXES controls the type of files to include.
EGS_DIR = os.path.join(BASE_PATH, "examples")
INSTALL_PATH = os.path.join("share", "HartreeParticleDSL", "examples")
VALID_SUFFIXES = ["90", "py", "md", ".c", ".cl", "Makefile", ".mk"]
EXAMPLES = get_files(EGS_DIR, INSTALL_PATH, VALID_SUFFIXES)
LIBS_DIR = os.path.join(BASE_PATH, "lib")
INSTALL_PATH = os.path.join("share", "HartreeParticleDSL", "lib")
VALID_SUFFIXES = ["90", "sh", "py", "md", "Makefile", ".mk",
".jinja", "doxyfile"]
LIBS = get_files(LIBS_DIR, INSTALL_PATH, VALID_SUFFIXES)
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=(AUTHOR_EMAIL),
license=LICENSE,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
packages=PACKAGES,
package_dir={"": "src"},
install_requires=['pyparsing', 'fparser==0.0.12', 'configparser',
'six'],
extras_require={
'dag': ["graphviz"],
'doc': ["sphinx", "sphinxcontrib.bibtex < 2.0.0",
"sphinx_rtd_theme", "autoapi"],
'psydata': ["Jinja2"],
'test': ["pep8", "pylint", "pytest-cov", "pytest-pep8",
"pytest-pylint", "pytest-flakes", "pytest-pep257"],
},
include_package_data=True,
# scripts=['bin/psyclone', 'bin/genkernelstub', 'bin/psyad'],
data_files=LIBS
)
| 2.171875 | 2 |
course_project/K33401/Zolotov_Pavel/backend/classified/migrations/0001_initial.py | pavel-zolotov/ITMO-ICT-Frontend-2021 | 4 | 12763254 | <filename>course_project/K33401/Zolotov_Pavel/backend/classified/migrations/0001_initial.py
# Generated by Django 4.0.1 on 2022-01-22 17:45
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('rating', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('description', models.TextField(max_length=500)),
('verified', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Industry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(max_length=2000)),
('min_years_experience', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
('remote_available', models.BooleanField(default=False)),
('open', models.BooleanField(default=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classified.company')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='JobResponse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=500)),
('cv', models.URLField()),
('created_at', models.DateTimeField()),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classified.job')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='company',
name='industry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classified.industry'),
),
migrations.AddField(
model_name='company',
name='regions',
field=models.ManyToManyField(to='classified.Region'),
),
]
| 1.773438 | 2 |
pyzipcin/__init__.py | ravigoel08/Zipcin | 4 | 12763255 | <gh_stars>1-10
from .modules import *
"""
decode: Convert PINCODE to Information(District, State etc.).
encode: Convert District to PINCODE.
validate: Verify if Pincode is Correct or Not
"""
__version__ = "0.1.0"
__all__ = [decode.__name__, encode.__name__, validate.__name__]
| 1.828125 | 2 |
greykite/common/features/timeseries_features.py | CaduceusInc/greykite | 1 | 12763256 | # BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>, <NAME>, <NAME>, <NAME>
"""Functions to generate derived time features useful
in forecasting, such as growth, seasonality, holidays.
"""
import inspect
import math
import warnings
from datetime import datetime
import fbprophet.hdays as fbholidays
import holidays
import numpy as np
import pandas as pd
from scipy.special import expit
from greykite.common import constants as cst
def convert_date_to_continuous_time(dt):
"""Converts date to continuous time. Each year is one unit.
Parameters
----------
dt : datetime object
the date to convert
Returns
-------
conti_date : `float`
the date represented in years
"""
year_length = datetime(dt.year, 12, 31).timetuple().tm_yday
tt = dt.timetuple()
return (dt.year +
(tt.tm_yday - 1
+ dt.hour / 24
+ dt.minute / (24 * 60)
+ dt.second / (24 * 3600)) / float(year_length))
def get_default_origin_for_time_vars(df, time_col):
"""Sets default value for origin_for_time_vars
Parameters
----------
df : `pandas.DataFrame`
Training data. A data frame which includes the timestamp and value columns
time_col : `str`
The column name in `df` representing time for the time series data.
Returns
-------
dt_continuous_time : `float`
The time origin used to create continuous variables for time
"""
date = pd.to_datetime(df[time_col][0])
return convert_date_to_continuous_time(date)
def build_time_features_df(dt, conti_year_origin):
"""This function gets a datetime-like vector and creates new columns containing temporal
features useful for time series analysis and forecasting e.g. year, week of year, etc.
Parameters
----------
dt : array-like (1-dimensional)
A vector of datetime-like values
conti_year_origin : float
The origin used for creating continuous time.
Returns
-------
time_features_df : `pandas.DataFrame`
Dataframe with the following time features.
* "datetime": `datetime.datetime` object, a combination of date and a time
* "date": `datetime.date` object, date with the format (year, month, day)
* "year": integer, year of the date e.g. 2018
* "year_length": integer, number of days in the year e.g. 365 or 366
* "quarter": integer, quarter of the date, 1, 2, 3, 4
* "quarter_start": `pandas.DatetimeIndex`, date of beginning of the current quarter
* "quarter_length": integer, number of days in the quarter, 90/91 for Q1, 91 for Q2, 92 for Q3 and Q4
* "month": integer, month of the year, January=1, February=2, ..., December=12
* "month_length": integer, number of days in the month, 28/ 29/ 30/ 31
* "woy": integer, ISO 8601 week of the year where a week starts from Monday, 1, 2, ..., 53
* "doy": integer, ordinal day of the year, 1, 2, ..., year_length
* "doq": integer, ordinal day of the quarter, 1, 2, ..., quarter_length
* "dom": integer, ordinal day of the month, 1, 2, ..., month_length
* "dow": integer, day of the week, Monday=1, Tuesday=2, ..., Sunday=7
* "str_dow": string, day of the week as a string e.g. "1-Mon", "2-Tue", ..., "7-Sun"
* "str_doy": string, day of the year e.g. "2020-03-20" for March 20, 2020
* "hour": integer, discrete hours of the datetime, 0, 1, ..., 23
* "minute": integer, minutes of the datetime, 0, 1, ..., 59
* "second": integer, seconds of the datetime, 0, 1, ..., 3599
* "year_month": string, (year, month) e.g. "2020-03" for March 2020
* "year_woy": string, (year, week of year) e.g. "2020_42" for 42nd week of 2020
* "month_dom": string, (month, day of month) e.g. "02/20" for February 20th
* "year_woy_dow": string, (year, week of year, day of week) e.g. "2020_03_6" for Saturday of 3rd week in 2020
* "woy_dow": string, (week of year, day of week) e.g. "03_6" for Saturday of 3rd week
* "dow_hr": string, (day of week, hour) e.g. "4_09" for 9am on Thursday
* "dow_hr_min": string, (day of week, hour, minute) e.g. "4_09_10" for 9:10am on Thursday
* "tod": float, time of day, continuous, 0.0 to 24.0
* "tow": float, time of week, continuous, 0.0 to 7.0
* "tom": float, standardized time of month, continuous, 0.0 to 1.0
* "toq": float, time of quarter, continuous, 0.0 to 1.0
* "toy": float, standardized time of year, continuous, 0.0 to 1.0
* "conti_year": float, year in continuous time, eg 2018.5 means middle of the year 2018
* "is_weekend": boolean, weekend indicator, True for weekend, else False
* "dow_grouped": string, Monday-Thursday=1234-MTuWTh, Friday=5-Fri, Saturday=6-Sat, Sunday=7-Sun
* "ct1": float, linear growth based on conti_year_origin, -infinity to infinity
* "ct2": float, signed quadratic growth, -infinity to infinity
* "ct3": float, signed cubic growth, -infinity to infinity
* "ct_sqrt": float, signed square root growth, -infinity to infinity
* "ct_root3": float, signed cubic root growth, -infinity to infinity
"""
dt = pd.DatetimeIndex(dt)
if len(dt) == 0:
raise ValueError("Length of dt cannot be zero.")
# basic time features
date = dt.date
year = dt.year
year_length = (365.0 + dt.is_leap_year)
quarter = dt.quarter
month = dt.month
month_length = dt.days_in_month
# finds first day of quarter
quarter_start = pd.DatetimeIndex(
dt.year.map(str) + "-" + (3 * quarter - 2).map(int).map(str) + "-01")
next_quarter_start = dt + pd.tseries.offsets.QuarterBegin(startingMonth=1)
quarter_length = (next_quarter_start - quarter_start).days
# finds offset from first day of quarter (rounds down to nearest day)
doq = ((dt - quarter_start) / pd.to_timedelta("1D") + 1).astype(int)
# week of year, "woy", follows ISO 8601:
# - Week 01 is the week with the year's first Thursday in it.
# - A week begins with Monday and ends with Sunday.
# So the week number of the week that overlaps both years, is 1, 52, or 53,
# depending on whether it has more days in the previous year or new year.
# - e.g. Jan 1st, 2018 is Monday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 1, 2]
# - e.g. Jan 1st, 2019 is Tuesday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 2, 2]
# - e.g. Jan 1st, 2020 is Wednesday. woy of first 8 days = [1, 1, 1, 1, 1, 2, 2, 2]
# - e.g. Jan 1st, 2015 is Thursday. woy of first 8 days = [1, 1, 1, 1, 2, 2, 2, 2]
# - e.g. Jan 1st, 2021 is Friday. woy of first 8 days = [53, 53, 53, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2022 is Saturday. woy of first 8 days = [52, 52, 1, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2023 is Sunday. woy of first 8 days = [52, 1, 1, 1, 1, 1, 1, 1]
woy = dt.strftime("%V").astype(int)
doy = dt.dayofyear
dom = dt.day
dow = dt.strftime("%u").astype(int)
str_dow = dt.strftime("%u-%a") # e.g. 1-Mon, 2-Tue, ..., 7-Sun
hour = dt.hour
minute = dt.minute
second = dt.second
# grouped time feature
str_doy = dt.strftime("%Y-%m-%d") # e.g. 2020-03-20 for March 20, 2020
year_month = dt.strftime("%Y-%m") # e.g. 2020-03 for March 2020
month_dom = dt.strftime("%m/%d") # e.g. 02/20 for February 20th
year_woy = dt.strftime("%Y_%V") # e.g. 2020_42 for 42nd week of 2020
year_woy_dow = dt.strftime("%Y_%V_%u") # e.g. 2020_03_6 for Saturday of 3rd week in 2020
woy_dow = dt.strftime("%W_%u") # e.g. 03_6 for Saturday of 3rd week
dow_hr = dt.strftime("%u_%H") # e.g. 4_09 for 9am on Thursday
dow_hr_min = dt.strftime("%u_%H_%M") # e.g. 4_09_10 for 9:10am on Thursday
# derived time features
tod = hour + (minute / 60.0) + (second / 3600.0)
tow = dow - 1 + (tod / 24.0)
tom = (dom - 1 + (tod / 24.0)) / month_length
toq = (doq - 1 + (tod / 24.0)) / quarter_length
# time of year, continuous, 0.0 to 1.0. e.g. Jan 1, 12 am = 0/365, Jan 2, 12 am = 1/365, ...
# To handle leap years, Feb 28 = 58/365 - 59/365, Feb 29 = 59/365, Mar 1 = 59/365 - 60/365
# offset term is nonzero only in leap years
# doy_offset reduces doy by 1 from from Mar 1st (doy > 60)
doy_offset = (year_length == 366) * 1.0 * (doy > 60)
# tod_offset sets tod to 0 on Feb 29th (doy == 60)
tod_offset = 1 - (year_length == 366) * 1.0 * (doy == 60)
toy = (doy - 1 - doy_offset + (tod / 24.0) * tod_offset) / 365.0
# year of date in continuous time, eg 2018.5 means middle of year 2018
# this is useful for modeling features that do not care about leap year e.g. environmental variables
conti_year = year + (doy - 1 + (tod / 24.0)) / year_length
is_weekend = pd.Series(dow).apply(lambda x: x in [6, 7]).values # weekend indicator
# categorical var with levels (Mon-Thu, Fri, Sat, Sun), could help when training data are sparse.
dow_grouped = pd.Series(str_dow).apply(lambda x: "1234-MTuWTh" if (x in ["1-Mon", "2-Tue", "3-Wed", "4-Thu"]) else x).values
# growth terms
ct1 = conti_year - conti_year_origin
ct2 = signed_pow(ct1, 2)
ct3 = signed_pow(ct1, 3)
ct_sqrt = signed_pow(ct1, 1/2)
ct_root3 = signed_pow(ct1, 1/3)
# All keys must be added to constants.
features_dict = {
"datetime": dt,
"date": date,
"year": year,
"year_length": year_length,
"quarter": quarter,
"quarter_start": quarter_start,
"quarter_length": quarter_length,
"month": month,
"month_length": month_length,
"woy": woy,
"doy": doy,
"doq": doq,
"dom": dom,
"dow": dow,
"str_dow": str_dow,
"str_doy": str_doy,
"hour": hour,
"minute": minute,
"second": second,
"year_month": year_month,
"year_woy": year_woy,
"month_dom": month_dom,
"year_woy_dow": year_woy_dow,
"woy_dow": woy_dow,
"dow_hr": dow_hr,
"dow_hr_min": dow_hr_min,
"tod": tod,
"tow": tow,
"tom": tom,
"toq": toq,
"toy": toy,
"conti_year": conti_year,
"is_weekend": is_weekend,
"dow_grouped": dow_grouped,
"ct1": ct1,
"ct2": ct2,
"ct3": ct3,
"ct_sqrt": ct_sqrt,
"ct_root3": ct_root3,
}
df = pd.DataFrame(features_dict)
return df
def add_time_features_df(df, time_col, conti_year_origin):
"""Adds a time feature data frame to a data frame
:param df: the input data frame
:param time_col: the name of the time column of interest
:param conti_year_origin: the origin of time for the continuous time variable
:return: the same data frame (df) augmented with new columns
"""
df = df.reset_index(drop=True)
time_df = build_time_features_df(
dt=df[time_col],
conti_year_origin=conti_year_origin)
time_df = time_df.reset_index(drop=True)
return pd.concat([df, time_df], axis=1)
def get_holidays(countries, year_start, year_end):
"""This function extracts a holiday data frame for the period of interest
[year_start to year_end] for the given countries.
This is done using the holidays libraries in pypi:fbprophet and pypi:holidays
Implementation resembles that of `~fbprophet.make_holidays.make_holidays_df`
Parameters
----------
countries : `list` [`str`]
countries for which we need holidays
year_start : `int`
first year of interest, inclusive
year_end : `int`
last year of interest, inclusive
Returns
-------
holiday_df_dict : `dict` [`str`, `pandas.DataFrame`]
- key: country name
- value: data frame with holidays for that country
Each data frame has two columns: EVENT_DF_DATE_COL, EVENT_DF_LABEL_COL
"""
country_holiday_dict = {}
year_list = list(range(year_start, year_end + 1))
for country in countries:
try:
# Fetch the holidays from fbprophet holiday set
# Suppress the following warning for India:
# "We only support Diwali and Holi holidays from 2010 to 2025"
if country in ["India", "IN"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
country_holidays = getattr(fbholidays, country)(years=year_list)
else:
country_holidays = getattr(fbholidays, country)(years=year_list)
except AttributeError:
# Fetch the holidays from pypi:holidays set
try:
country_holidays = getattr(holidays, country)(years=year_list)
except AttributeError:
raise AttributeError(f"Holidays in {country} are not currently supported!")
country_df = pd.DataFrame({
cst.EVENT_DF_DATE_COL: list(country_holidays.keys()),
cst.EVENT_DF_LABEL_COL: list(country_holidays.values())})
country_df[cst.EVENT_DF_DATE_COL] = pd.to_datetime(country_df[cst.EVENT_DF_DATE_COL])
country_holiday_dict[country] = country_df
return country_holiday_dict
def get_available_holiday_lookup_countries(countries=None):
"""Returns list of available countries for modeling holidays
:param countries: List[str]
only look for available countries in this set
:return: List[str]
list of available countries for modeling holidays
"""
fb_countries = [
name for name, obj in inspect.getmembers(fbholidays)
if inspect.isclass(obj) and obj.__module__ == fbholidays.__name__]
holidays_countries = [
name for name, obj in inspect.getmembers(holidays)
if inspect.isclass(obj) and obj.__module__ == holidays.__name__]
all_countries = set(fb_countries + holidays_countries)
if countries is not None:
countries = set(countries)
found_countries = all_countries.intersection(countries)
else:
found_countries = all_countries
found_countries.discard("HolidayBase") # edge case, remove if found
return sorted(list(found_countries))
def get_available_holidays_in_countries(
countries,
year_start,
year_end):
"""Returns a dictionary mapping each country to its holidays
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: Dict[str, List[str]]
key: country name
value: list of holidays in that country between [year_start, year_end]
"""
country_holiday_dict = get_holidays(countries, year_start, year_end)
country_holiday_list = {country: list(sorted(set(df[cst.EVENT_DF_LABEL_COL].values)))
for country, df in country_holiday_dict.items()}
return country_holiday_list
def get_available_holidays_across_countries(
countries,
year_start,
year_end):
"""Returns a list of holidays that occur any of the countries
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: List[str]
names of holidays in any of the countries between [year_start, year_end]
"""
country_holiday_list = get_available_holidays_in_countries(
countries=countries,
year_start=year_start,
year_end=year_end)
holiday_across_countries = {
holiday for country, holiday_list in country_holiday_list.items()
for holiday in holiday_list}
return list(sorted(holiday_across_countries))
def add_daily_events(
df,
event_df_dict,
date_col=cst.EVENT_DF_DATE_COL,
regular_day_label=cst.EVENT_DEFAULT):
"""For each key of event_df_dict, it adds a new column to a data frame (df)
with a date column (date_col).
Each new column will represent the events given for that key.
Notes
-----
As a side effect, the columns in ``event_df_dict`` are renamed.
Parameters
----------
df : `pandas.DataFrame`
The data frame which has a date column.
event_df_dict : `dict` [`str`, `pandas.DataFrame`]
A dictionary of data frames, each representing events data
for the corresponding key.
Values are DataFrames with two columns:
- The first column contains the date. Must be at the same
frequency as ``df[date_col]`` for proper join. Must be in a
format recognized by `pandas.to_datetime`.
- The second column contains the event label for each date
date_col : `str`
Column name in ``df`` that contains the dates for joining against
the events in ``event_df_dict``.
regular_day_label : `str`
The label used for regular days which are not "events".
Returns
-------
df_daily_events : `pandas.DataFrame`
An augmented data frame version of df with new label columns --
one for each key of ``event_df_dict``.
"""
df[date_col] = pd.to_datetime(df[date_col])
for label, event_df in event_df_dict.items():
event_df = event_df.copy()
new_col = f"{cst.EVENT_PREFIX}_{label}"
event_df.columns = [date_col, new_col]
event_df[date_col] = pd.to_datetime(event_df[date_col])
df = df.merge(event_df, on=date_col, how="left")
df[new_col] = df[new_col].fillna(regular_day_label)
return df
def add_event_window(
df,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
events_name=""):
"""For a data frame of events with a time_col and label_col
it adds shifted events
prior and after the given events
For example if the event data frame includes the row
'2019-12-25, Christmas'
the function will produce dataframes with the events:
'2019-12-24, Christmas' and '2019-12-26, Christmas'
if pre_num and post_num are 1 or more.
:param df: pd.DataFrame
the events data frame with two columns 'time_col' and 'label_col'
:param time_col: str
The column with the timestamp of the events.
This can be daily but does not have to
:param label_col: str
the column with labels for the events
:param time_delta: str
the amount of the shift for each unit specified by a string
e.g. "1D" stands for one day delta
:param pre_num: int
the number of events to be added prior to the given event for each event in df
:param post_num: int
the number of events to be added after to the given event for each event in df
:param events_name: str
for each shift, we generate a new data frame
and those data frames will be stored in a dictionary with appropriate keys.
Each key starts with "events_name"
and follow up with:
"_minus_1", "_minus_2", "_plus_1", "_plus_2", ...
depending on pre_num and post_num
:return: dict[key: pd.Dataframe]
A dictionary of dataframes for each needed shift.
For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
df_dict = {}
pd_time_delta = pd.to_timedelta(time_delta)
for num in range(pre_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] - (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_minus_" + f"{(num + 1):.0f}"] = df0
for num in range(post_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] + (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_plus_" + f"{(num + 1):.0f}"] = df0
return df_dict
def get_evenly_spaced_changepoints_values(
df,
continuous_time_col="ct1",
n_changepoints=2):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:param n_changepoints: int
number of changepoints requested
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
if not n_changepoints > 0:
raise ValueError("n_changepoints must be > 0")
n = df.shape[0]
n_steps = n_changepoints + 1
step_size = n / n_steps
indices = np.floor(np.arange(start=1, stop=n_steps) * step_size)
return df[continuous_time_col][indices].values
def get_evenly_spaced_changepoints_dates(
df,
time_col,
n_changepoints):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param time_col: str
name of time column
:param n_changepoints: int
number of changepoints requested
:return: pd.Series
values of df[time_col] at the changepoints
"""
if not n_changepoints >= 0:
raise ValueError("n_changepoints must be >= 0")
changepoint_indices = np.floor(np.arange(start=1, stop=n_changepoints + 1) * (df.shape[0] / (n_changepoints + 1)))
changepoint_indices = df.index[np.concatenate([[0], changepoint_indices.astype(int)])]
return df.loc[changepoint_indices, time_col]
def get_custom_changepoints_values(
df,
changepoint_dates,
time_col=cst.TIME_COL,
continuous_time_col="ct1"):
"""Returns the values of continuous_time_col at the
requested changepoint_dates.
:param df: pd.DataFrame
training dataset. contains continuous_time_col and time_col
:param changepoint_dates: Iterable[Union[int, float, str, datetime]]
Changepoint dates, interpreted by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
ts = pd.to_datetime(df[time_col])
changepoint_dates = pd.to_datetime(changepoint_dates)
# maps each changepoint to first date >= changepoint in the dataframe
# if there is no such date, the changepoint is dropped (it would not be useful anyway)
changepoint_ts = [ts[ts >= date].min() for date in changepoint_dates if any(ts >= date)]
indices = ts.isin(changepoint_ts)
changepoints = df[indices][continuous_time_col].values
if changepoints.shape[0] == 0:
changepoints = None
return changepoints
def get_changepoint_string(changepoint_dates):
"""Gets proper formatted strings for changepoint dates.
The default format is "_%Y_%m_%d_%H". When necessary, it appends "_%M" or "_%M_%S".
Parameters
----------
changepoint_dates : `list`
List of changepoint dates, parsable by `pandas.to_datetime`.
Returns
-------
date_strings : `list[`str`]`
List of string formatted changepoint dates.
"""
changepoint_dates = list(pd.to_datetime(changepoint_dates))
time_format = "_%Y_%m_%d_%H"
if any([stamp.second != 0 for stamp in changepoint_dates]):
time_format += "_%M_%S"
elif any([stamp.minute != 0 for stamp in changepoint_dates]):
time_format += "_%M"
date_strings = [date.strftime(time_format) for date in changepoint_dates]
return date_strings
def get_changepoint_features(
df,
changepoint_values,
continuous_time_col="ct1",
growth_func=None,
changepoint_dates=None):
"""Returns features for growth terms with continuous time origins at
the changepoint_values (locations) specified
Generates a time series feature for each changepoint:
Let t = continuous_time value, c = changepoint value
Then the changepoint feature value at time point t is
`growth_func(t - c) * I(t >= c)`, where I is the indicator function
This represents growth as a function of time, where the time origin is
the changepoint
In the typical case where growth_func(0) = 0 (has origin at 0),
the total effect of the changepoints is continuous in time.
If `growth_func` is the identity function, and `continuous_time`
represents the year in continuous time, these terms form the basis for a
continuous, piecewise linear curve to the growth trend.
Fitting these terms with linear model, the coefficents represent slope
change at each changepoint
Intended usage
----------
To make predictions (on test set)
Allow growth term as a function of time to change at these points.
Parameters
----------
:param df: pd.Dataframe
The dataset to make predictions. Contains column continuous_time_col.
:param changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col]).
Should be determined from training data
:param continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term
If None, uses "ct1", linear growth
:param growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term
:param changepoint_dates: Optional[list]
List of change point dates, parsable by `pandas.to_datetime`.
:return: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features, 0-indexed
"""
if continuous_time_col is None:
continuous_time_col = "ct1"
if growth_func is None:
def growth_func(x):
return x
if changepoint_dates is not None:
time_postfixes = get_changepoint_string(changepoint_dates)
else:
time_postfixes = [""] * len(changepoint_values)
changepoint_df = pd.DataFrame()
for i, changepoint in enumerate(changepoint_values):
time_feature = np.array(df[continuous_time_col]) - changepoint # shifted time column (t - c_i)
growth_term = np.array([growth_func(max(x, 0)) for x in time_feature]) # growth as a function of time
time_feature_ind = time_feature >= 0 # Indicator(t >= c_i), lets changepoint take effect starting at c_i
new_col = growth_term * time_feature_ind
new_changepoint = pd.Series(new_col, name=f"{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}")
changepoint_df = pd.concat([changepoint_df, new_changepoint], axis=1)
return changepoint_df
def get_changepoint_values_from_config(
changepoints_dict,
time_features_df,
time_col=cst.TIME_COL):
"""Applies the changepoint method specified in `changepoints_dict` to return the changepoint values
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param time_features_df: pd.Dataframe
training dataset. contains column "continuous_time_col"
:param time_col: str
The column name in `time_features_df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
Used only in the "custom" method.
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
changepoint_values = None
if changepoints_dict is not None:
valid_changepoint_methods = ["uniform", "custom"]
changepoint_method = changepoints_dict.get("method")
continuous_time_col = changepoints_dict.get("continuous_time_col")
if changepoint_method is None:
raise Exception("changepoint method must be specified")
if changepoint_method not in valid_changepoint_methods:
raise NotImplementedError(
f"changepoint method {changepoint_method} not recognized. "
f"Must be one of {valid_changepoint_methods}")
if changepoint_method == "uniform":
if changepoints_dict["n_changepoints"] > 0:
params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {}
changepoint_values = get_evenly_spaced_changepoints_values(
df=time_features_df,
n_changepoints=changepoints_dict["n_changepoints"],
**params)
elif changepoint_method == "custom":
params = {}
if time_col is not None:
params["time_col"] = time_col
if continuous_time_col is not None:
params["continuous_time_col"] = continuous_time_col
changepoint_values = get_custom_changepoints_values(
df=time_features_df,
changepoint_dates=changepoints_dict["dates"],
**params)
return changepoint_values
def get_changepoint_features_and_values_from_config(
df,
time_col,
changepoints_dict=None,
origin_for_time_vars=None):
"""Extracts changepoints from changepoint configuration and input data
:param df: pd.DataFrame
Training data. A data frame which includes the timestamp and value columns
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param origin_for_time_vars: Optional[float]
The time origin used to create continuous variables for time
:return: Dict[str, any]
Dictionary with the requested changepoints and associated information
changepoint_df: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features for modeling the training data
changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col])
Can be used to generate changepoints for prediction.
continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term.
If None, uses "ct1", linear growth
Can be used to generate changepoints for prediction.
growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term.
Can be used to generate changepoints for prediction.
changepoint_cols: List[str]
Names of the changepoint columns for modeling
"""
# extracts changepoint values
if changepoints_dict is None:
changepoint_values = None
continuous_time_col = None
growth_func = None
else:
if origin_for_time_vars is None:
origin_for_time_vars = get_default_origin_for_time_vars(df, time_col)
time_features_df = build_time_features_df(
df[time_col],
conti_year_origin=origin_for_time_vars)
changepoint_values = get_changepoint_values_from_config(
changepoints_dict=changepoints_dict,
time_features_df=time_features_df,
time_col="datetime") # datetime column generated by `build_time_features_df`
continuous_time_col = changepoints_dict.get("continuous_time_col")
growth_func = changepoints_dict.get("growth_func")
# extracts changepoint column names
if changepoint_values is None:
changepoint_df = None
changepoint_cols = []
else:
if changepoints_dict is None:
changepoint_dates = None
elif changepoints_dict["method"] == "custom":
changepoint_dates = list(pd.to_datetime(changepoints_dict["dates"]))
elif changepoints_dict["method"] == "uniform":
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
).tolist()[1:] # the changepoint features does not include the growth term
else:
changepoint_dates = None
changepoint_df = get_changepoint_features(
df=time_features_df,
changepoint_values=changepoint_values,
continuous_time_col=continuous_time_col,
growth_func=growth_func,
changepoint_dates=changepoint_dates)
changepoint_cols = list(changepoint_df.columns)
return {
"changepoint_df": changepoint_df,
"changepoint_values": changepoint_values,
"continuous_time_col": continuous_time_col,
"growth_func": growth_func,
"changepoint_cols": changepoint_cols
}
def get_changepoint_dates_from_changepoints_dict(
changepoints_dict,
df=None,
time_col=None):
"""Gets the changepoint dates from ``changepoints_dict``
Parameters
----------
changepoints_dict : `dict` or `None`
The ``changepoints_dict`` which is compatible with
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
df : `pandas.DataFrame` or `None`, default `None`
The data df to put changepoints on.
time_col : `str` or `None`, default `None`
The column name of time column in ``df``.
Returns
-------
changepoint_dates : `list`
List of changepoint dates.
"""
if (changepoints_dict is None
or "method" not in changepoints_dict.keys()
or changepoints_dict["method"] not in ["auto", "uniform", "custom"]):
return None
method = changepoints_dict["method"]
if method == "custom":
# changepoints_dict["dates"] is `Iterable`, converts to list
changepoint_dates = list(changepoints_dict["dates"])
elif method == "uniform":
if df is None or time_col is None:
raise ValueError("When the method of ``changepoints_dict`` is 'uniform', ``df`` and "
"``time_col`` must be provided.")
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
)
# the output is `pandas.Series`, converts to list
changepoint_dates = changepoint_dates.tolist()[1:]
else:
raise ValueError("The method of ``changepoints_dict`` can not be 'auto'. "
"Please specify or detect change points first.")
return changepoint_dates
def add_event_window_multi(
event_df_dict,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
pre_post_num_dict=None):
"""For a given dictionary of events data frames with a time_col and label_col
it adds shifted events prior and after the given events
For example if the event data frame includes the row '2019-12-25, Christmas' as a row
the function will produce dataframes with the events '2019-12-24, Christmas' and '2019-12-26, Christmas' if
pre_num and post_num are 1 or more.
Parameters
----------
event_df_dict: `dict` [`str`, `pandas.DataFrame`]
A dictionary of events data frames
with each having two columns: ``time_col`` and ``label_col``.
time_col: `str`
The column with the timestamp of the events.
This can be daily but does not have to be.
label_col : `str`
The column with labels for the events.
time_delta : `str`, default "1D"
The amount of the shift for each unit specified by a string
e.g. '1D' stands for one day delta
pre_num : `int`, default 1
The number of events to be added prior to the given event for each event in df.
post_num: `int`, default 1
The number of events to be added after to the given event for each event in df.
pre_post_num_dict : `dict` [`str`, (`int`, `int`)] or None, default None
Optionally override ``pre_num`` and ``post_num`` for each key in ``event_df_dict``.
For example, if ``event_df_dict`` has keys "US" and "India", this parameter
can be set to ``pre_post_num_dict = {"US": [1, 3], "India": [1, 2]}``,
denoting that the "US" ``pre_num`` is 1 and ``post_num`` is 3, and "India" ``pre_num`` is 1
and ``post_num`` is 2. Keys not specified by ``pre_post_num_dict`` use the default given by
``pre_num`` and ``post_num``.
Returns
-------
df : `dict` [`str`, `pandas.DataFrame`]
A dictionary of dataframes for each needed shift. For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
if pre_post_num_dict is None:
pre_post_num_dict = {}
shifted_df_dict = {}
for event_df_key, event_df in event_df_dict.items():
if event_df_key in pre_post_num_dict.keys():
pre_num0 = pre_post_num_dict[event_df_key][0]
post_num0 = pre_post_num_dict[event_df_key][1]
else:
pre_num0 = pre_num
post_num0 = post_num
df_dict0 = add_event_window(
df=event_df,
time_col=time_col,
label_col=label_col,
time_delta=time_delta,
pre_num=pre_num0,
post_num=post_num0,
events_name=event_df_key)
shifted_df_dict.update(df_dict0)
return shifted_df_dict
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None):
"""Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn
:param k: int
fourier term
:param col_name: str
column in the dataframe used to generate fourier series
:param function_name: str
sin or cos
:param seas_name: strcols_interact
appended to new column names added for fourier terms
:return: str
column name in DataFrame returned by fourier_series_fcn
"""
# patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms
name = f"{function_name}{k:.0f}_{col_name}"
if seas_name is not None:
name = f"{name}_{seas_name}"
return name
def fourier_series_fcn(col_name, period=1.0, order=1, seas_name=None):
"""Generates a function which creates fourier series matrix for a column of an input df
:param col_name: str
is the column name in the dataframe which is to be used for
generating fourier series. It needs to be a continuous variable.
:param period: float
the period of the fourier series
:param order: int
the order of the fourier series
:param seas_name: Optional[str]
appended to new column names added for fourier terms.
Useful to distinguish multiple fourier
series on same col_name with different periods.
:return: callable
a function which can be applied to any data.frame df
with a column name being equal to col_name
"""
def fs_func(df):
out_df = pd.DataFrame()
out_cols = []
if col_name not in df.columns:
raise ValueError("The data frame does not have the column: " + col_name)
x = df[col_name]
x = np.array(x)
for i in range(order):
k = i + 1
sin_col_name = get_fourier_col_name(
k,
col_name,
function_name="sin",
seas_name=seas_name)
cos_col_name = get_fourier_col_name(
k,
col_name,
function_name="cos",
seas_name=seas_name)
out_cols.append(sin_col_name)
out_cols.append(cos_col_name)
omega = 2 * math.pi / period
u = omega * k * x
out_df[sin_col_name] = np.sin(u)
out_df[cos_col_name] = np.cos(u)
return {"df": out_df, "cols": out_cols}
return fs_func
def fourier_series_multi_fcn(
col_names,
periods=None,
orders=None,
seas_names=None):
"""Generates a func which adds multiple fourier series with multiple periods.
Parameters
----------
col_names : `list` [`str`]
the column names which are to be used to generate Fourier series.
Each column can have its own period and order.
periods: `list` [`float`] or None
the periods corresponding to each column given in col_names
orders : `list` [`int`] or None
the orders for each of the Fourier series
seas_names : `list` [`str`] or None
Appended to the Fourier series name.
If not provided (None) col_names will be used directly.
"""
k = len(col_names)
if periods is None:
periods = [1.0] * k
if orders is None:
orders = [1] * k
if len(periods) != len(orders):
raise ValueError("periods and orders must have the same length.")
def fs_multi_func(df):
out_df = None
out_cols = []
for i in range(k):
col_name = col_names[i]
period = periods[i]
order = orders[i]
seas_name = None
if seas_names is not None:
seas_name = seas_names[i]
func0 = fourier_series_fcn(
col_name=col_name,
period=period,
order=order,
seas_name=seas_name)
res = func0(df)
fs_df = res["df"]
fs_cols = res["cols"]
out_df = pd.concat([out_df, fs_df], axis=1)
out_cols = out_cols + fs_cols
return {"df": out_df, "cols": out_cols}
return fs_multi_func
def signed_pow(x, y):
""" Takes the absolute value of x and raises it to power of y.
Then it multiplies the result by sign of x.
This guarantees this function is non-decreasing.
This is useful in many contexts e.g. statistical modeling.
:param x: the base number which can be any real number
:param y: the power which can be any real number
:return: returns abs(x) to power of y multiplied by sign of x
"""
return np.sign(x) * np.power(np.abs(x), y)
def signed_pow_fcn(y):
return lambda x: signed_pow(x, y)
signed_sqrt = signed_pow_fcn(1 / 2)
signed_sq = signed_pow_fcn(2)
def logistic(x, growth_rate=1.0, capacity=1.0, floor=0.0, inflection_point=0.0):
"""Evaluates the logistic function at x with the specified growth rate,
capacity, floor, and inflection point.
:param x: value to evaluate the logistic function
:type x: float
:param growth_rate: growth rate
:type growth_rate: float
:param capacity: max value (carrying capacity)
:type capacity: float
:param floor: min value (lower bound)
:type floor: float
:param inflection_point: the t value of the inflection point
:type inflection_point: float
:return: value of the logistic function at t
:rtype: float
"""
return floor + capacity * expit(growth_rate * (x - inflection_point))
def get_logistic_func(growth_rate=1.0, capacity=1.0, floor=0.0, inflection_point=0.0):
"""Returns a function that evaluates the logistic function at t with the
specified growth rate, capacity, floor, and inflection point.
f(x) = floor + capacity / (1 + exp(-growth_rate * (x - inflection_point)))
:param growth_rate: growth rate
:type growth_rate: float
:param capacity: max value (carrying capacity)
:type capacity: float
:param floor: min value (lower bound)
:type floor: float
:param inflection_point: the t value of the inflection point
:type inflection_point: float
:return: the logistic function with specified parameters
:rtype: callable
"""
return lambda t: logistic(t, growth_rate, capacity, floor, inflection_point)
| 1.726563 | 2 |
bin/lib/releases.py | jfalcou/infra | 135 | 12763257 | <filename>bin/lib/releases.py<gh_stars>100-1000
from enum import Enum
from typing import Optional, Tuple
from attr import dataclass
@dataclass(frozen=True)
class Hash:
hash: str
def __str__(self) -> str:
return f'{str(self.hash[:6])}..{str(self.hash[-6:])}'
class VersionSource(Enum):
value: Tuple[int, str]
TRAVIS = (0, 'tr')
GITHUB = (1, 'gh')
def __lt__(self, other):
return self.value < other.value
def __str__(self):
return f'{self.value[1]}'
@dataclass(frozen=True, repr=False)
class Version:
source: VersionSource
number: int
@staticmethod
def from_string(version_str: str, assumed_source: VersionSource = VersionSource.GITHUB):
if '-' not in version_str:
return Version(assumed_source, int(version_str))
source, num = version_str.split('-')
for possible_source in list(VersionSource):
if possible_source.value[1] == source:
return Version(possible_source, int(num))
raise RuntimeError(f'Unknown source {source}')
def __str__(self):
return f'{self.source}-{self.number}'
def __repr__(self):
return str(self)
@dataclass
class Release:
version: Version
branch: str
key: str
info_key: str
size: int
hash: Hash
static_key: Optional[str] = None
| 2.453125 | 2 |
agent_code/amy_santiago/train.py | stefanDeveloper/bomberman | 3 | 12763258 | <reponame>stefanDeveloper/bomberman<filename>agent_code/amy_santiago/train.py
import pickle
import random
import numpy as np
from collections import namedtuple, deque
from typing import List
import torch
from torch import optim
import torch.nn.functional as F
import events as e
# This is only an example!
import settings
from .callbacks import state_to_features, ACTIONS
from .model import DQN
from .replay_memory import ReplayMemory
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
# Hyper parameters -- DO modify
TRANSITION_HISTORY_SIZE = 3 # keep only ... last transitions
RECORD_ENEMY_TRANSITIONS = 1.0 # record enemy transitions with probability ...
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 4
# Events
# Defined by ICEC 2019
LAST_MAN_STANDING = "LAST_MAN_STANDING"
CLOSER_TO_ENEMY = "CLOSER_TO_ENEMY"
CLOSEST_TO_ENEMY = "CLOSEST_TO_ENEMY"
FARTHER_TO_ENEMY = "FARTHER_TO_ENEMY"
DANGER_ZONE_BOMB = "DANGER_ZONE_BOMB"
SAFE_CELL_BOMB = "SAFE_CELL_BOMB"
ALREADY_VISITED_EVENT = "ALREADY_VISITED_EVENT"
def setup_training(self):
"""
Initialise self for training purpose.
This is called after `setup` in callbacks.py.
:param self: This object is passed to all callbacks and you can set arbitrary values.
"""
# Example: Setup an array that will note transition tuples
# (s, a, r, s')
self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)
n_actions = 6
self.policy_net = DQN(867, n_actions)
self.target_net = DQN(867, n_actions)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.visited = np.zeros((17, 17))
self.visited_before = np.zeros((17, 17))
self.steps_done = 0
self.rounds = 0
self.optimizer = optim.RMSprop(self.policy_net.parameters())
self.memory = ReplayMemory(100000)
def game_events_occurred(self, old_game_state: dict, self_action: str, new_game_state: dict, events: List[str]):
"""
Called once per step to allow intermediate rewards based on game events.
When this method is called, self.events will contain a list of all game
events relevant to your agent that occurred during the previous step. Consult
settings.py to see what events are tracked. You can hand out rewards to your
agent based on these events and your knowledge of the (new) game state.
This is *one* of the places where you could update your agent.
:param self: This object is passed to all callbacks and you can set arbitrary values.
:param old_game_state: The state that was passed to the last call of `act`.
:param self_action: The action that you took.
:param new_game_state: The state the agent is in now.
:param events: The events that occurred when going from `old_game_state` to `new_game_state`
"""
self.logger.debug(f'Encountered game event(s) {", ".join(map(repr, events))} in step {new_game_state["step"]}')
# Distances to enemies
if old_game_state:
_, _, _, pos_old = old_game_state["self"]
_, _, _, pos_current = new_game_state["self"]
# Bomb blast range
# TODO What happens if two bombs are in reach of current position?
current_bombs = new_game_state["bombs"]
is_getting_bombed = False
for (x, y), countdown in current_bombs:
for i in range(0, settings.BOMB_POWER + 1):
if new_game_state['field'][x + i, y] == -1:
break
# Check current position
if pos_current == (x + i, y):
is_getting_bombed = True
for i in range(0, settings.BOMB_POWER + 1):
if new_game_state['field'][x - i, y] == -1:
break
# Check current position
if pos_current == (x - i, y):
is_getting_bombed = True
for i in range(0, settings.BOMB_POWER + 1):
if new_game_state['field'][x, y + i] == -1:
break
# Check current position
if pos_current == (x, y + i):
is_getting_bombed = True
for i in range(0, settings.BOMB_POWER + 1):
if new_game_state['field'][x, y - i] == -1:
break
# Check current position
if pos_current == (x, y - i):
is_getting_bombed = True
if is_getting_bombed:
events.append(DANGER_ZONE_BOMB)
self.logger.debug(f'Add game event {DANGER_ZONE_BOMB} in step {new_game_state["step"]}')
else:
events.append(SAFE_CELL_BOMB)
self.logger.debug(f'Add game event {SAFE_CELL_BOMB} in step {new_game_state["step"]}')
if self.visited_before[pos_current[0]][pos_current[1]] == 1:
events.append(ALREADY_VISITED_EVENT)
self.logger.debug(f'Add game event {ALREADY_VISITED_EVENT} in step {new_game_state["step"]}')
self.visited_before = self.visited
self.visited[pos_current[0]][pos_current[1]] = 1
if old_game_state is not None:
self.memory.push(state_to_features(old_game_state), [ACTIONS.index(self_action)],
state_to_features(new_game_state),
reward_from_events(self, events))
optimize_model(self)
# state_to_features is defined in callbacks.py
self.transitions.append(
Transition(state_to_features(old_game_state), self_action, state_to_features(new_game_state),
reward_from_events(self, events)))
def end_of_round(self, last_game_state: dict, last_action: str, events: List[str]):
"""
Called at the end of each game or when the agent died to hand out final rewards.
This is similar to reward_update. self.events will contain all events that
occurred during your agent's final step.
This is *one* of the places where you could update your agent.
This is also a good place to store an agent that you updated.
:param self: The same object that is passed to all of your callbacks.
"""
self.logger.debug(f'Encountered event(s) {", ".join(map(repr, events))} in final step')
self.transitions.append(
Transition(state_to_features(last_game_state), [ACTIONS.index(last_action)], None,
reward_from_events(self, events)))
self.visited = np.zeros((17, 17))
self.visited_before = np.zeros((17, 17))
# Store the model
with open("my-saved-model.pt", "wb") as file:
pickle.dump(self.policy_net, file)
# Update the target network, copying all weights and biases in DQN
if self.rounds % TARGET_UPDATE == 0:
self.target_net.load_state_dict(self.policy_net.state_dict())
self.steps_done = 0
self.rounds += 1
def reward_from_events(self, events: List[str]) -> int:
"""
*This is not a required function, but an idea to structure your code.*
Here you can modify the rewards your agent get so as to en/discourage
certain behavior.
"""
game_rewards = {
e.COIN_COLLECTED: 1,
e.KILLED_OPPONENT: 1,
e.CRATE_DESTROYED: 1,
e.KILLED_SELF: -25,
e.INVALID_ACTION: -1,
e.WAITED: -1,
}
reward_sum = 0
for event in events:
if event in game_rewards:
reward_sum += game_rewards[event]
# Penalty per iteration
reward_sum -= 0.1
self.logger.info(f"Awarded {reward_sum} for events {', '.join(events)}")
return reward_sum
def optimize_model(self):
if len(self.memory) < BATCH_SIZE + 1:
return
transitions = self.memory.sample(BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), dtype=torch.bool)
state_batch = torch.tensor(batch.state).float()
next_state_batch = torch.tensor(batch.next_state).float()
reward_batch = torch.tensor(batch.reward).float()
non_final_next_states = torch.cat([s for s in next_state_batch
if s is not None])
action_batch = torch.tensor(np.asarray(batch.action, dtype=np.int64))
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = self.policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(BATCH_SIZE)
next_state_values[non_final_mask] = self.target_net(non_final_next_states.reshape(-1, 578)).max(1)[0]
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
| 2.4375 | 2 |
Backend/movie_reviews_back/api/views.py | Zhaisan/WEB-Project | 2 | 12763259 | <filename>Backend/movie_reviews_back/api/views.py
from django.http.response import JsonResponse
from api.models import *
from api.serializers import *
from rest_framework.decorators import api_view
from django.http.response import JsonResponse
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from django.shortcuts import Http404
from rest_framework.views import APIView
from rest_framework import status
# -------------------------------------FBV--------------------------------------
@api_view(['GET'])
def genres_list(request):
genres = Genre.objects.all()
serializer = GenreSerializer(genres, many=True)
return Response(serializer.data)
@api_view(['GET'])
def genres_movies(request, genre_id):
try:
movies = Movie.objects.filter(genre=genre_id)
except Movie.DoesNotExist as e:
return JsonResponse({'message': str(e)}, status=400)
serializer = MovieSerializer(movies, many=True)
return Response(serializer.data)
@api_view(['GET'])
def movies_list(request):
movies = Movie.objects.all()
serializers = MovieSerializer(movies, many=True)
return Response(serializers.data)
@api_view(['GET'])
def movies_detail(request, movie_id):
try:
movie = Movie.objects.get(id=movie_id)
except Movie.DoesNotExist as e:
return JsonResponse({'message': str(e)}, status=400)
serializer = MovieSerializer(movie)
return Response(serializer.data)
# ----------------------------------------CBV--------------------------------
class UsersListAPIView(APIView):
def get(self, request):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class UserDetailAPIView(APIView):
def get_object(self, pk):
try:
return User.objects.get(id=pk)
except User.DoesNotExist as e:
raise Http404
def get(self, request, pk=None):
user = self.get_object(pk)
serializer = UserSerializer(user)
return Response(serializer.data)
class CommentsListAPIView(APIView):
def get_objects(self, movie_id):
try:
return Comment.objects.filter(movie=movie_id)
except Comment.DoesNotExist as e:
raise Http404
def get(self, request, movie_id=None):
comments = self.get_objects(movie_id)
serializer = CommentSerializer(comments, many=True)
return Response(serializer.data)
def post(self, request, movie_id):
serializer = CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# permission_classes = (IsAuthenticated)
class CommentDetailAPIView(APIView):
def get_object(self, pk):
try:
return Comment.objects.get(id=pk)
except Comment.DoesNotExist as e:
raise Http404
def get(self, request, movie_id=None, pk=None):
comment = self.get_object(pk)
serializer = CommentSerializer(comment)
return Response(serializer.data)
def put(self, request, movie_id=None, pk=None):
comment = self.get_object(pk)
serializer = CommentSerializer(instance=comment, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
def delete(self, request, movie_id=None, pk=None):
comment = self.get_object(pk)
comment.delete()
return Response({'message': 'deleted'}, status=204)
| 2.234375 | 2 |
matmodlab2/materials/vonmises.py | matmodlab/matmodlab2 | 6 | 12763260 | <reponame>matmodlab/matmodlab2<filename>matmodlab2/materials/vonmises.py
import numpy as np
from ..core.logio import logger
from ..core.material import Material
from ..core.tensor import VOIGT
ROOT2 = np.sqrt(2.)
ROOT3 = np.sqrt(3.)
ROOT23 = np.sqrt(2./3.)
TOLER = 1e-8
class VonMisesMaterial(Material):
name = 'vonmises'
def __init__(self, **kwargs):
'''Set up the von Mises material
'''
# Check inputs
K = kwargs.get('K', 0.)
G = kwargs.get('G', 0.)
H = kwargs.get('H', 0.)
Y0 = kwargs.get('Y0', 0.)
BETA = kwargs.get('BETA', 0.)
errors = 0
if K <= 0.0:
errors += 1
logger.error('Bulk modulus K must be positive')
if G <= 0.0:
errors += 1
logger.error('Shear modulus G must be positive')
nu = (3.0 * K - 2.0 * G) / (6.0 * K + 2.0 * G)
if nu > 0.5:
errors += 1
logger.error('Poisson\'s ratio > .5')
if nu < -1.0:
errors += 1
logger.error('Poisson\'s ratio < -1.')
if nu < 0.0:
logger.warn('negative Poisson\'s ratio')
if abs(Y0) <= 1.E-12:
Y0 = 1.0e99
if errors:
raise ValueError('stopping due to previous errors')
self.params = {'K': K, 'G': G, 'Y0': Y0, 'H': H, 'BETA': BETA}
# Register State Variables
self.sdv_names = ['EQPS', 'Y',
'BS_XX', 'BS_YY', 'BS_ZZ', 'BS_XY', 'BS_XZ', 'BS_YZ',
'SIGE']
self.num_sdv = len(self.sdv_names)
def sdvini(self, statev):
Y0 = self.params['Y0']
return np.array([0.0, Y0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
def eval(self, time, dtime, temp, dtemp, F0, F,
stran, d, stress, statev, **kwargs):
"""Compute updated stress given strain increment"""
idx = lambda x: self.sdv_names.index(x.upper())
bs = np.array([statev[idx('BS_XX')],
statev[idx('BS_YY')],
statev[idx('BS_ZZ')],
statev[idx('BS_XY')],
statev[idx('BS_YZ')],
statev[idx('BS_XZ')]])
yn = statev[idx('Y')]
de = d / VOIGT * dtime
iso = de[:3].sum() / 3.0 * np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
dev = de - iso
stress_trial = stress + 3.0 * self.params['K'] * iso + 2.0 * self.params['G'] * dev
xi_trial = stress_trial - bs
xi_trial_eqv = self.eqv(xi_trial)
if xi_trial_eqv <= yn:
statev[idx('SIGE')] = xi_trial_eqv
return stress_trial, statev, None
else:
N = xi_trial - xi_trial[:3].sum() / 3.0 * np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
N = N / (ROOT23 * xi_trial_eqv)
deqps = (xi_trial_eqv - yn) / (3.0 * self.params['G'] + self.params['H'])
dps = 1. / ROOT23 * deqps * N
stress_final = stress_trial - 2.0 * self.params['G'] / ROOT23 * deqps * N
bs = bs + 2.0 / 3.0 * self.params['H'] * self.params['BETA'] * dps
statev[idx('EQPS')] += deqps
statev[idx('Y')] += self.params['H'] * (1.0 - self.params['BETA']) * deqps
statev[idx('BS_XX')] = bs[0]
statev[idx('BS_YY')] = bs[1]
statev[idx('BS_ZZ')] = bs[2]
statev[idx('BS_XY')] = bs[3]
statev[idx('BS_YZ')] = bs[4]
statev[idx('BS_XZ')] = bs[5]
statev[idx('SIGE')] = self.eqv(stress_final - bs)
return stress_final, statev, None
def eqv(self, sig):
# Returns sqrt(3 * rootj2) = sig_eqv = q
s = sig - sig[:3].sum() / 3.0 * np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
return 1. / ROOT23 * np.sqrt(np.dot(s[:3], s[:3]) + 2 * np.dot(s[3:], s[3:]))
| 2.25 | 2 |
week6/code6.py | suhas1999/EE2703 | 0 | 12763261 |
# coding: utf-8
# In[2]:
#start of code
#importing packages
import numpy as np
import scipy.signal as sp
import matplotlib.pyplot as plt
# In[3]:
def time_domain_output(f,H,t_start,t_end):
t = np.linspace(t_start,t_end,10*(t_end-t_start))
t2,y,svec=sp.lsim(H,f,t)
return y
# In[4]:
t_start = 0
t_end = 100
t = np.linspace(t_start,t_end,10*(t_end-t_start))
f1 = np.cos(1.5*t) * np.exp(-0.5*t)
#d2y + 2.25y = x
H=sp.lti([1],[1,0,2.25])
# In[5]:
y1 = time_domain_output(f1,H,t_start,t_end)
# In[14]:
plt.plot(t,y1)
plt.xlabel(r"t ---------->",size=15)
plt.ylabel(r"x ---------->",size=15)
plt.title(r"System with decay = 0.5",size=20)
plt.show()
# In[16]:
f2 = np.cos(1.5*t) * np.exp(-0.05*t)
# In[17]:
y2 = time_domain_output(f2,H,t_start,t_end)
# In[18]:
plt.plot(t,y2)
plt.xlabel(r"t ---------->",size=15)
plt.ylabel(r"x ---------->",size=15)
plt.title(r"System with decay = 0.05",size=20)
plt.show()
# In[19]:
def input(freq,damp_fac):
t = np.linspace(0,100,1000)
return np.cos(freq*t) * np.exp(-damp_fac*t)
# In[25]:
n=5
t = np.linspace(0,100,1000)
freq_range =np.linspace(1.4,1.6,n)
for freq in freq_range:
plt.plot(t,time_domain_output(input(freq,0.05),H,0,100))
plt.xlabel("t -------->",size=15)
plt.ylabel("x -------->",size =15)
plt.title(r"System response with Different Frequencies",size=15)
plt.legend(["Freq = ${:.2f}$".format(f) for f in freq_range])
plt.show()
# In[62]:
w,S,phi=H.bode()
plt.semilogx(w,S)
plt.plot(1.5,28,"ro",label=r"Resonance Frequency")
plt.title(r"Magnitude Bode plot with resonance freq = 1.5",size=14)
plt.xlabel(r"Freq in rad/s log(w) -------->",size=15)
plt.ylabel("Mag in dB -------->",size =15)
plt.legend()
plt.show()
plt.semilogx(w,phi)
#plt.plot(1.5,28,"ro",label=r"Resonance Frequency")
plt.title(r"Phase Bode plot with resonance freq = 1.5",size=14)
plt.xlabel(r"Freq in rad/s log(w) -------->",size=15)
plt.ylabel("Phase in degrees -------->",size =15)
plt.show()
# In[11]:
#eqn1 -- dx2 + x-y = 0
#Eqn2 --dy2 + 2(y-x) = 0
# In[52]:
#form eqn1 y = dx2 + x
#eq2 -- dx4+3dx2=0
xs = sp.lti([1,0,2],[1,0,3,0])
ys = sp.lti([2],[1,0,3,0])
# In[53]:
t = np.linspace(0,20,200)
# In[54]:
t1,x = sp.impulse(xs,None,t)
t2,y = sp.impulse(ys,None,t)
# In[72]:
plt.plot(t1,x,label=r"x(t)")
plt.plot(t2,y,label=r"y(t)")
plt.legend()
plt.xlabel("t ---------------->",size=15)
plt.title("Coupled Equation Response",size=15)
plt.show()
# In[77]:
H_circ1 = sp.lti(np.poly1d([10**12]),np.poly1d([1,10**8,10**12]))
w1,S1,phi1=H_circ1.bode()
plt.semilogx(w1,S1)
plt.xlabel("Frequency in rad/s",size=15)
plt.ylabel("Magnitude in dB",size=15)
plt.title("Magnitude plot",size=15)
plt.grid(True)
plt.show()
plt.semilogx(w1,phi1)
plt.xlabel("Frequency in rad/s",size=15)
plt.ylabel("Phase in degrees",size=15)
plt.title("Phase plot",size=15)
plt.grid(True)
plt.show()
# In[79]:
t_steady = np.linspace(0,10**-2,10**5)
in_steady = np.cos(10**3 * t_steady) - np.cos(10**6 * t_steady)
# In[80]:
t1,y_steady,svec1=sp.lsim(H_circ1,in_steady,t_steady)
# In[91]:
plt.plot(t1,y_steady)
plt.title("Steady state Response")
plt.ylabel(r"$V_{o}(t) --->$",size=15)
plt.xlabel(r"$t --->$",size=15)
plt.show()
# In[93]:
t_trans = np.linspace(0,35*10**-6,30*10**2+1)
in_trans = np.cos(10**3 * t_trans) - np.cos(10**6 * t_trans)
# In[94]:
t2,y_trans,svec2 = sp.lsim(H_circ1,in_trans,t_trans)
# In[95]:
plt.plot(t2,y_trans)
plt.title("Transient Response")
plt.ylabel(r"$V_{o}(t) --->$",size=15)
plt.xlabel(r"$t --->$",size=15)
plt.show()
| 2.71875 | 3 |
tests/unit/states/test_ssh_known_hosts.py | yuriks/salt | 1 | 12763262 | <filename>tests/unit/states/test_ssh_known_hosts.py
# -*- coding: utf-8 -*-
'''
:codeauthor: <NAME> <<EMAIL>>
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
# Import Salt Libs
import salt.states.ssh_known_hosts as ssh_known_hosts
class SshKnownHostsTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.ssh_known_hosts
'''
def setup_loader_modules(self):
return {ssh_known_hosts: {}}
# 'present' function tests: 1
def test_present(self):
'''
Test to verifies that the specified host is known by the specified user.
'''
name = 'github.com'
user = 'root'
key = '16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48'
fingerprint = [key]
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
with patch.dict(ssh_known_hosts.__opts__, {'test': True}):
with patch.object(os.path, 'isabs', MagicMock(return_value=False)):
comt = ('If not specifying a "user", '
'specify an absolute "config".')
ret.update({'comment': comt})
self.assertDictEqual(ssh_known_hosts.present(name), ret)
comt = ('Specify either "key" or "fingerprint", not both.')
ret.update({'comment': comt})
self.assertDictEqual(ssh_known_hosts.present(name, user, key=key,
fingerprint=[key]),
ret)
comt = ('Required argument "enc" if using "key" argument.')
ret.update({'comment': comt})
self.assertDictEqual(ssh_known_hosts.present(name, user, key=key),
ret)
mock = MagicMock(side_effect=['exists', 'add', 'update'])
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.check_known_host': mock}):
comt = ('Host github.com is already in .ssh/known_hosts')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(ssh_known_hosts.present(name, user), ret)
comt = ('Key for github.com is set to be'
' added to .ssh/known_hosts')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(ssh_known_hosts.present(name, user), ret)
comt = ('Key for github.com is set to be '
'updated in .ssh/known_hosts')
ret.update({'comment': comt})
self.assertDictEqual(ssh_known_hosts.present(name, user), ret)
with patch.dict(ssh_known_hosts.__opts__, {'test': False}):
result = {'status': 'exists', 'error': ''}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.set_known_host': mock}):
comt = ('github.com already exists in .ssh/known_hosts')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(ssh_known_hosts.present(name, user), ret)
result = {'status': 'error', 'error': ''}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.set_known_host': mock}):
ret.update({'comment': '', 'result': False})
self.assertDictEqual(ssh_known_hosts.present(name, user), ret)
result = {'status': 'updated', 'error': '',
'new': [{'fingerprint': fingerprint, 'key': key}],
'old': ''}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.set_known_host': mock}):
comt = ("{0}'s key saved to .ssh/known_hosts (key: {1})"
.format(name, key))
ret.update({'comment': comt, 'result': True,
'changes': {'new': [{'fingerprint': fingerprint,
'key': key}], 'old': ''}})
self.assertDictEqual(ssh_known_hosts.present(name, user,
key=key), ret)
comt = ("{0}'s key saved to .ssh/known_hosts (fingerprint: {1})"
.format(name, fingerprint))
ret.update({'comment': comt})
self.assertDictEqual(ssh_known_hosts.present(name, user), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to verifies that the specified host is not known by the given user.
'''
name = 'github.com'
user = 'root'
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
with patch.object(os.path, 'isabs', MagicMock(return_value=False)):
comt = ('If not specifying a "user", '
'specify an absolute "config".')
ret.update({'comment': comt})
self.assertDictEqual(ssh_known_hosts.absent(name), ret)
mock = MagicMock(return_value=False)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.get_known_host_entries': mock}):
comt = ('Host is already absent')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(ssh_known_hosts.absent(name, user), ret)
mock = MagicMock(return_value=True)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.get_known_host_entries': mock}):
with patch.dict(ssh_known_hosts.__opts__, {'test': True}):
comt = ('Key for github.com is set to be'
' removed from .ssh/known_hosts')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(ssh_known_hosts.absent(name, user), ret)
with patch.dict(ssh_known_hosts.__opts__, {'test': False}):
result = {'status': 'error', 'error': ''}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.rm_known_host': mock}):
ret.update({'comment': '', 'result': False})
self.assertDictEqual(ssh_known_hosts.absent(name, user),
ret)
result = {'status': 'removed', 'error': '',
'comment': 'removed'}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__,
{'ssh.rm_known_host': mock}):
ret.update({'comment': 'removed', 'result': True,
'changes': {'new': None, 'old': True}})
self.assertDictEqual(ssh_known_hosts.absent(name, user),
ret)
| 2.390625 | 2 |
src/f.py | kissy24/atcoder-past-exam | 0 | 12763263 | import re
def sort_by_double_camel(chars: str) -> str:
"""ダブルキャメルケースで分割ソートする
Args:
chars(str): ソート対象の文字列
Returns:
(list[str]): 昇順でソートされたダブルキャメルケース文字列
"""
double_camels = sorted(re.findall("[A-Z][a-z]*[A-Z]", chars), key=str.lower)
return "".join(double_camels)
def main():
print(sort_by_double_camel(input()))
if __name__ == "__main__":
main()
| 4.09375 | 4 |
src/webpub_manifest_parser/opds2/__init__.py | tdilauro/webpub-manifest-parser | 1 | 12763264 | <gh_stars>1-10
from webpub_manifest_parser.core import ManifestParser, ManifestParserFactory
from webpub_manifest_parser.opds2.registry import (
OPDS2CollectionRolesRegistry,
OPDS2LinkRelationsRegistry,
OPDS2MediaTypesRegistry,
)
from webpub_manifest_parser.opds2.semantic import OPDS2SemanticAnalyzer
from webpub_manifest_parser.opds2.syntax import OPDS2SyntaxAnalyzer
class OPDS2FeedParserFactory(ManifestParserFactory):
"""Factory creating OPDS 2.0 parser."""
def create(self):
"""Create a new OPDS 2.0 parser.
:return: OPDS 2.0 parser
:rtype: Parser
"""
media_types_registry = OPDS2MediaTypesRegistry()
link_relations_registry = OPDS2LinkRelationsRegistry()
collection_roles_registry = OPDS2CollectionRolesRegistry()
syntax_analyzer = OPDS2SyntaxAnalyzer()
semantic_analyzer = OPDS2SemanticAnalyzer(
media_types_registry, link_relations_registry, collection_roles_registry
)
parser = ManifestParser(syntax_analyzer, semantic_analyzer)
return parser
| 2.046875 | 2 |
python/coroutines/pyos5.py | ASMlover/study | 22 | 12763265 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2020 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from collections import deque
from typing import Any, Deque, Dict, Generator
TargetGenerator = Generator[Any, Any, Any]
class Task(object):
taskid = 0
def __init__(self, target: TargetGenerator):
Task.taskid += 1
self.tid = Task.taskid
self.target = target
self.sendval = None
def run(self) -> Any:
return self.target.send(self.sendval)
class SystemCall(object):
def handle(self):
pass
class Scheduler(object):
def __init__(self):
self.ready: Deque[Task] = deque()
self.taskmap: Dict[int, Task] = {}
def new(self, target: TargetGenerator) -> int:
new_task = Task(target)
self.taskmap[new_task.tid] = new_task
self.schedule(new_task)
return new_task.tid
def schedule(self, task: Task) -> None:
self.ready.append(task)
def exit(self, task: Task) -> None:
print(f"Task {task.tid} terminated")
del self.taskmap[task.tid]
def mainloop(self) -> None:
while self.taskmap:
task = self.ready.popleft()
try:
result = task.run()
if isinstance(result, SystemCall):
result.task = task
result.sched = self
result.handle()
continue
except StopIteration:
self.exit(task)
continue
self.schedule(task)
class GetTid(SystemCall):
def handle(self):
self.task.sendval = self.task.tid
self.sched.schedule(self.task)
class NewTask(SystemCall):
def __init__(self, target: TargetGenerator):
self.target = target
def handle(self):
tid = self.sched.new(self.target)
self.task.sendval = tid
self.sched.schedule(self.task)
class KillTask(SystemCall):
def __init__(self, tid):
self.tid = tid
def handle(self):
task = self.sched.taskmap.get(self.tid)
if task:
task.target.close()
self.task.sendval = True
else:
self.task.sendval = False
self.sched.schedule(self.task)
if __name__ == '__main__':
def foo():
mytid = yield GetTid()
while True:
print(f"I'm foo, {mytid}")
yield
def main():
child = yield NewTask(foo())
for _ in range(5):
yield
yield KillTask(child)
print(f"main done")
sched = Scheduler()
sched.new(main())
sched.mainloop()
| 1.8125 | 2 |
flaskr/cac.py | juliasliu/application | 0 | 12763266 | '''Python script to generate CAC'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class CAC:
def __init__(self, fin_perf, oper_metrics, oth_metrics):
print("INIT CAC")
self.fin_perf = pd.DataFrame(fin_perf)
self.oper_metrics = pd.DataFrame(oper_metrics)
self.oth_metrics = pd.DataFrame(oth_metrics)
def run(self):
self.clean_inputs()
print(self.fin_perf)
print(self.oper_metrics)
print(self.oth_metrics)
self.ttm_cac()
self.yoy_growth()
self.clean_outputs()
json = {
"CAC & CAC TTM": self.cac_ttm.to_dict(orient='records'),
"CAC YoY Growth": self.cac_yoy.to_dict(orient='records'),
}
return json
def clean_inputs(self):
self.fin_perf = self.fin_perf.copy()
self.fin_perf.set_index("Financial Performance", inplace=True)
self.fin_perf.apply(filter_to_dec_list)
self.oper_metrics = self.oper_metrics.copy()
self.oper_metrics.set_index("Operating Metrics", inplace=True)
self.oper_metrics.apply(filter_to_dec_list)
self.oth_metrics.set_index("Other Metrics", inplace=True)
self.oth_metrics.apply(filter_to_dec_list)
def clean_outputs(self):
self.cac_ttm = self.cac_ttm.astype(object)
self.cac_ttm.apply(nan_to_blank_list)
self.cac_ttm = self.cac_ttm.apply(numbers_with_commas_list)
self.cac_ttm = self.cac_ttm.drop(self.cac_ttm.columns[0], axis=1)
self.cac_ttm.reset_index(inplace=True)
self.cac_yoy = self.cac_yoy.astype(object)
self.cac_yoy.apply(nan_to_blank_list)
cac_yoy_copy = self.cac_yoy.copy()
self.cac_yoy = self.cac_yoy.apply(numbers_with_commas_list)
self.cac_yoy.loc['YoY growth'] = cac_yoy_copy.loc['YoY growth'].apply(dec_to_percents)
self.cac_yoy.loc['YoY growth*'] = cac_yoy_copy.loc['YoY growth*'].apply(dec_to_percents)
self.cac_yoy = self.cac_yoy.drop(self.cac_yoy.columns[0], axis=1)
self.cac_yoy.reset_index(inplace=True)
print("CAC & CAC TTM")
print(self.cac_ttm)
print("CAC YoY Growth")
print(self.cac_yoy)
def ttm_cac(self):
index = ["S&M", "Total Expense", "# of New Customers", "CAC", "TTM CAC"]
self.cac_ttm = pd.DataFrame(index=np.arange(len(index)), columns=self.fin_perf.columns)
self.cac_ttm.set_index(pd.Series(index, name=""), inplace=True)
self.cac_ttm.loc['S&M'] = -self.fin_perf.loc['S&M']*1000
self.cac_ttm.loc['Total Expense'] = self.cac_ttm.loc['S&M']
self.cac_ttm.loc['# of New Customers'] = self.oper_metrics.loc['A']
self.cac_ttm.loc['CAC'] = self.cac_ttm.loc['Total Expense'].div(self.cac_ttm.loc['# of New Customers'].replace({0:np.nan}))
self.cac_ttm.loc['TTM CAC'][:12] = ["N/A"]*12
for i in range(12, self.cac_ttm.shape[1]):
self.cac_ttm.loc['TTM CAC'][i] = self.cac_ttm.loc['Total Expense'].iloc[i-11:i+1].sum()/self.cac_ttm.loc['# of New Customers'].iloc[i-11:i+1].sum()
def yoy_growth(self):
index = ["TTM CAC", "YoY growth", "Avg ARR Per Customer", "YoY growth*"]
self.cac_yoy = pd.DataFrame(index=np.arange(len(index)), columns=self.fin_perf.columns)
self.cac_yoy.set_index(pd.Series(index, name=""), inplace=True)
self.cac_yoy.loc['TTM CAC'] = self.cac_ttm.loc['TTM CAC']
self.cac_yoy.loc['YoY growth'].iloc[:min(self.cac_yoy.shape[1], 24)] = [float("NaN")]*min(self.cac_yoy.shape[1], 24)
self.cac_yoy.loc['YoY growth*'].iloc[:min(self.cac_yoy.shape[1], 24)] = [float("NaN")]*min(self.cac_yoy.shape[1], 24)
self.cac_yoy.loc['Avg ARR Per Customer'] = self.oth_metrics.loc['Avg ARR per Customer']
if self.cac_yoy.shape[1] >= 24:
self.cac_yoy.loc['YoY growth'].iloc[24:] = list(self.cac_yoy.loc['TTM CAC'].iloc[24:].array/self.cac_yoy.loc['TTM CAC'].iloc[12:-12].array-1)
self.cac_yoy.loc['YoY growth*'].iloc[24:] = list(self.cac_yoy.loc['Avg ARR Per Customer'].iloc[24:].array/self.cac_yoy.loc['Avg ARR Per Customer'].iloc[12:-12].array-1)
| 2.421875 | 2 |
tests/test_config.py | gaby/wireguard | 11 | 12763267 |
import pytest
from unittest.mock import (
call,
mock_open,
patch,
)
from subnet import ip_network, IPv4Network, IPv4Address
from wireguard import (
Config,
ServerConfig,
Peer,
Server,
)
from wireguard.utils import IPAddressSet
def test_basic_server():
subnet = '192.168.0.0/24'
address = '192.168.0.1'
server = Server(
'test-server',
subnet,
address=address,
)
config = ServerConfig(server)
wg_config = config.local_config
config_lines = wg_config.split('\n')
# Ensure that [Interface] is first in the config, allowing for blank lines before
for line in config_lines:
if line:
assert line == '[Interface]'
break
# Check that these are on a line alone in the config output
assert f'Address = {address}/24' in config_lines
assert '# test-server' not in config_lines # Should only be present in Peer section on remote
assert '[Peer]' not in config_lines # We haven't configured any peers, so this shouldn't exist
# Check that these don't appear anywhere at all because of how basic this config is
for option in ['DNS', 'PreUp', 'PostUp', 'PreDown', 'PostDown', 'SaveConfig', 'MTU', 'Table', 'AllowedIPs', 'Endpoint', 'PersistentKeepalive', 'PresharedKey', 'PublicKey']:
assert f'{option} =' not in wg_config
def test_basic_peer():
address = '192.168.0.2'
peer = Peer(
'test-peer',
address=address,
)
config = Config(peer)
wg_config = config.local_config
config_lines = wg_config.split('\n')
# Ensure that [Interface] is first in the config, allowing for blank lines before
for line in config_lines:
if line:
assert line == '[Interface]'
break
assert f'Address = {address}/32' in config_lines
assert '# test-peer' not in config_lines # Should only be present in Peer section on remote
assert '[Peer]' not in config_lines # We haven't configured any peers, so this shouldn't exist
# Check that these don't appear anywhere at all because of how basic this config is
for option in ['DNS', 'PreUp', 'PostUp', 'PreDown', 'PostDown', 'SaveConfig', 'MTU', 'Table', 'AllowedIPs', 'Endpoint', 'PersistentKeepalive', 'PresharedKey', 'PublicKey']:
assert f'{option} =' not in wg_config
def test_inadmissible_non_peer():
class NonPeer():
attrib1 = IPAddressSet()
attrib2 = 'something'
with pytest.raises(ValueError) as exc:
config = Config(NonPeer())
assert 'provide a valid Peer' in str(exc.value)
def test_admissible_non_peer():
class NonPeer():
allowed_ips = IPAddressSet()
public_key = 'something'
config = Config(NonPeer())
for line in config.local_config.split('\n'):
if line:
assert line == '[Interface]'
assert '[Peer]' in config.remote_config
assert 'PublicKey = something' in config.remote_config
def test_write_server_config_no_params():
subnet = '192.168.0.0/24'
address = '192.168.0.1'
server = Server(
'test-server',
subnet,
address=address,
)
with patch('builtins.open', mock_open()) as mo:
server.config().write()
mo.assert_has_calls([
call('/etc/wireguard/wg0.conf', mode='w', encoding='utf-8'),
call('/etc/wireguard/wg0-peers.conf', mode='w', encoding='utf-8'),
], any_order=True)
@pytest.mark.parametrize(
('interface', 'path', 'full_path', 'peers_full_path'),
[
(None, None, '/etc/wireguard/wg0.conf', '/etc/wireguard/wg0-peers.conf',), # Default options
('wg3', None, '/etc/wireguard/wg3.conf', '/etc/wireguard/wg3-peers.conf',),
(None, '/opt/my-wg-dir', '/opt/my-wg-dir/wg0.conf', '/opt/my-wg-dir/wg0-peers.conf',),
('wg1', '/opt/my-other-wg-dir', '/opt/my-other-wg-dir/wg1.conf', '/opt/my-other-wg-dir/wg1-peers.conf',),
])
def test_write_server_config(interface, path, full_path, peers_full_path):
subnet = '192.168.0.0/24'
address = '192.168.0.1'
server = Server(
'test-server',
subnet,
address=address,
interface=interface
)
config = server.config()
assert config.full_path(path) == full_path
assert config.peers_full_path(path) == peers_full_path
with patch('builtins.open', mock_open()) as mo:
config.write(path)
mo.assert_has_calls([
call(full_path, mode='w', encoding='utf-8'),
call(peers_full_path, mode='w', encoding='utf-8'),
], any_order=True)
def test_write_peer_config_no_params():
address = '192.168.0.1'
peer = Peer(
'test-peer',
address=address,
)
with patch('builtins.open', mock_open()) as mo:
peer.config().write()
mo.assert_has_calls([
call('/etc/wireguard/wg0.conf', mode='w', encoding='utf-8'),
], any_order=True)
@pytest.mark.parametrize(
('interface', 'path', 'full_path',),
[
(None, None, '/etc/wireguard/wg0.conf',), # Default options
('wg3', None, '/etc/wireguard/wg3.conf',),
(None, '/opt/my-wg-dir', '/opt/my-wg-dir/wg0.conf',),
('wg1', '/opt/my-other-wg-dir', '/opt/my-other-wg-dir/wg1.conf',),
])
def test_write_peer_config(interface, path, full_path):
address = '192.168.0.2'
peer = Peer(
'test-peer',
address=address,
interface=interface,
)
config = Config(peer)
assert config.full_path(path) == full_path
with patch('builtins.open', mock_open()) as mo:
peer.config().write(path)
mo.assert_has_calls([
call(full_path, mode='w', encoding='utf-8'),
], any_order=True)
| 2.5 | 2 |
cloudwatch_metrics/cloudwatch_metrics/cloudwatch_metrics.py | tbma2014us/ops-tools | 2 | 12763268 | <gh_stars>1-10
import datetime
import logging
import os
import re
import signal
import subprocess
import sys
import threading
import time
from pprint import pformat
import argparse
import boto3
import botocore.client
import botocore.exceptions
import requests
__version__ = '1.0.0'
logger = logging.getLogger()
# noinspection PyTypeChecker
class ArgsParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
kwargs.setdefault(
'description',
'Runs as a service. Every 5 minutes puts custom metrics into CloudWatch')
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.formatter_class = argparse.ArgumentDefaultsHelpFormatter
self.epilog = '''
Configure your AWS access using: IAM, ~root/.aws/credentials, ~root/.aws/config, /etc/boto.cfg,
~root/.boto, or AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables
'''
self.options = None
self.add_argument('-p', '--profile', dest='profile', help='AWS profile to use')
self.add_argument('-r', '--region', dest='region', default='us-west-2', help='AWS region to connect')
self.add_argument('-i', '--interval', dest='interval', type=int, default=5, help='Sleep for that many minutes')
self.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Be verbose')
def error(self, message):
sys.stderr.write('Error: %s\n' % message)
self.print_help()
sys.exit(2)
def parse_args(self, *args, **kwargs):
options = argparse.ArgumentParser.parse_args(self, *args, **kwargs)
options.log_format = '[%(levelname)s] (%(process)d:%(filename)s:%(lineno)s) %(message)s'
options.name = os.path.basename(__file__)
options.interval *= 60
self.options = options
return options
def pick(iterable, _g, *_args):
vs = list(_args)
for i, arg in enumerate(_args):
for item in iterable:
v = _g(item, arg)
if v:
vs[i] = v
break
return vs
def collect_metrics():
data = list()
def collect(f):
name = f.__name__.title().replace('_', '')
for value in f():
data.append((name, value))
@collect
def memory_utilization():
with open('/proc/meminfo') as f:
def match(line, item):
meminfo_regex = re.compile(r'([A-Z][A-Za-z()_]+):\s+(\d+)(?: ([km]B))')
name, amount, unit = meminfo_regex.match(line).groups()
if name == item:
assert unit == 'kB'
return int(amount)
memtotal, memfree, buffers, _cached = pick(
f, match, 'MemTotal', 'MemFree', 'Buffers', 'Cached'
)
inactive = (memfree + buffers + _cached) / float(memtotal)
yield round(100 * (1 - inactive), 1), "Percent", ()
@collect
def disk_space_utilization():
with open('/proc/mounts') as f:
for line in f:
if not line.startswith('/'):
continue
device, _path, filesystem, options = line.split(' ', 3)
result = os.statvfs(_path)
if not result.f_blocks:
continue
free = result.f_bfree / float(result.f_blocks)
yield round(100 * (1 - free), 1), "Percent", (
("Filesystem", device),
("MountPath", _path)
)
@collect
def load_average():
with open('/proc/loadavg') as f:
line = f.read()
load = float(line.split(' ', 1)[0])
yield round(load, 2), "Count", ()
@collect
def network_connections():
i = 0
with open('/proc/net/tcp') as f:
for i, line in enumerate(f):
pass
yield i, "Count", (("Protocol", "TCP"), )
with open('/proc/net/udp') as f:
for i, line in enumerate(f):
pass
yield i, "Count", (("Protocol", "UDP"), )
@collect
def open_file_descriptor_count():
p = subprocess.Popen(['lsof'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t = threading.Timer(30, p.kill)
try:
t.start()
c = p.stdout.read().count('\n')
exit_code = p.wait()
finally:
t.cancel()
yield int(c) if exit_code == 0 else 0, "Count", ()
return data
def metrics(_options):
session = boto3.session.Session(
profile_name=_options.profile or None, region_name=_options.region)
data = collect_metrics()
try:
dimensions = ('InstanceId',
requests.get('http://169.254.169.254/latest/meta-data/instance-id', timeout=3).text
),
except requests.exceptions.ConnectionError:
raise SystemExit('Fatal Error: Not running on AWS EC2 instance')
for dimension in dimensions:
_options.verbose and logging.info('Collected metrics:\n' + pformat(data))
submit_metrics(session, _options.verbose, data, "System/Linux", dimension)
logger.info(
'Submitted %d metrics for dimension System/Linux: %s' % (len(data), dimension[0])
)
def submit_metrics(_session, verbose, data, namespace, *dimensions):
config = botocore.client.Config(connect_timeout=5, retries={'max_attempts': 0})
metric_data = list()
for name, (value, unit, metric_dimensions) in data:
metric_dimensions = tuple(metric_dimensions)
_dimensions = list()
for j, (_name, _value) in enumerate(dimensions + metric_dimensions):
_dimensions.append(
{
'Name': _name,
'Value': _value
}
)
metric_data.append(
{
'MetricName': name,
'Dimensions': _dimensions,
'Value': value,
'Unit': unit,
}
)
verbose and logging.info('Submitting metrics:\n' + pformat(metric_data))
try:
_session.client('cloudwatch', config=config).put_metric_data(
Namespace=namespace,
MetricData=metric_data
)
except botocore.exceptions.ClientError as e:
logging.error(e)
def sigterm_handler(*args):
sig_name = next(v for v, k in signal.__dict__.iteritems() if k == args[0])
logging.info('Exiting %s on %s' % (os.getpid(), sig_name))
sys.exit(0)
def main(args=sys.argv[1:]):
my_parser = ArgsParser()
options = my_parser.parse_args(args)
for s in [signal.SIGINT, signal.SIGTERM]:
# noinspection PyTypeChecker
signal.signal(s, sigterm_handler)
for m in ['boto3', 'botocore']:
not options.verbose and logging.getLogger(m).setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=options.log_format)
logging.info('Starting %s' % options.name)
while True:
next_run = datetime.datetime.now() + datetime.timedelta(seconds=options.interval)
metrics(options)
dt = next_run.replace(second=0, microsecond=0) - datetime.datetime.now()
sleep_time = dt.seconds + dt.microseconds / 1e6 if dt.days >= 0 else options.interval
options.verbose and logging.info('Sleeping for %s seconds' % sleep_time)
time.sleep(sleep_time)
if __name__ == '__main__':
main()
| 2.109375 | 2 |
src/common/database.py | asimonia/pricing-alerts | 0 | 12763269 | <reponame>asimonia/pricing-alerts
import pymongo
class Database:
URI = 'mongodb://127.0.0.1:27017'
DATABASE = None
@staticmethod
def initialize():
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['fullstack']
@staticmethod
def insert(collection, data):
"""Insert data into a collection"""
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
"""Find multiple document(s) within a collection"""
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
"""Find one document within a collection"""
return Database.DATABASE[collection].find_one(query)
@staticmethod
def update(collection, query, data):
"""Update a document within a collection"""
Database.DATABASE[collection].update(query, data, upsert=True)
@staticmethod
def remove(collection, query):
"""Remove a document within a collection"""
Database.DATABASE[collection].remove(query) | 2.609375 | 3 |
pyhole/plugins/search.py | jk0/pyhole | 12 | 12763270 | # Copyright 2010-2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Search Plugin"""
from BeautifulSoup import BeautifulSoup
from pyhole.core import plugin
from pyhole.core import request
from pyhole.core import utils
class Search(plugin.Plugin):
"""Provide access to search engines."""
@plugin.hook_add_command("urban")
@utils.require_params
@utils.spawn
def urban(self, message, params=None, **kwargs):
"""Search Urban Dictionary (ex: .urban <query>)."""
url = "http://www.urbandictionary.com/define.php"
response = request.get(url, params={"term": params})
if response.status_code != 200:
return
soup = BeautifulSoup(response.content)
try:
meaning = soup.find("div", {"class": "meaning"}).text
example = soup.find("div", {"class": "example"}).text
except AttributeError:
message.dispatch("No results found: '%s'" % params)
meaning = utils.decode_entities(meaning)
example = utils.decode_entities(example)
message.dispatch("%s (ex: %s)" % (meaning, example))
@plugin.hook_add_command("wikipedia")
@utils.require_params
@utils.spawn
def wikipedia(self, message, params=None, **kwargs):
"""Search Wikipedia (ex: .wikipedia <query>)."""
url = "https://en.wikipedia.org/w/api.php"
response = request.get(url, params={
"action": "query",
"generator": "allpages",
"gaplimit": 4,
"gapfrom": params,
"format": "json"
})
if response.status_code != 200:
return
pages = response.json()["query"]["pages"]
for page in pages.values():
title = page["title"]
title = title.replace(" ", "_")
message.dispatch("http://en.wikipedia.org/wiki/%s" % title)
| 3.015625 | 3 |
hlwtadmin/management/commands/resolve_orgs_without_concerts.py | Kunstenpunt/havelovewilltravel | 1 | 12763271 | <filename>hlwtadmin/management/commands/resolve_orgs_without_concerts.py
from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertOrganisation, Location
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
for organisation in Organisation.objects.filter(relationconcertorganisation__organisation__isnull=True).distinct():
venue = Venue.objects.filter(organisation=organisation).first()
if venue:
cas = ConcertAnnouncement.objects.filter(raw_venue=venue)
for ca in cas:
concert = ca.concert
if concert:
print(organisation, "has as concert", concert)
RelationConcertOrganisation.objects.create(
concert=concert,
organisation=organisation
)
| 2.25 | 2 |
modelchimp/migrations/0049_auto_20190516_0759.py | samzer/modelchimp-server | 134 | 12763272 | # Generated by Django 2.2 on 2019-05-16 07:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0048_auto_20190515_1032'),
]
operations = [
migrations.RemoveField(
model_name='experiment',
name='algorithm',
),
migrations.RemoveField(
model_name='experiment',
name='features',
),
migrations.RemoveField(
model_name='experiment',
name='platform',
),
migrations.RemoveField(
model_name='experiment',
name='platform_library',
),
]
| 1.460938 | 1 |
pywraps/py_expr.py | cclauss/src | 7 | 12763273 | # --------------------------------------------------------------------------
#<pycode(py_expr)>
try:
import types
import ctypes
# Callback for IDC func callback (On Windows, we use stdcall)
# typedef error_t idaapi idc_func_t(idc_value_t *argv,idc_value_t *r);
try:
_IDCFUNC_CB_T = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
except:
_IDCFUNC_CB_T = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
# A trampoline function that is called from idcfunc_t that will
# call the Python callback with the argv and r properly serialized to python
call_idc_func__ = ctypes.CFUNCTYPE(ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)(_ida_expr.py_get_call_idc_func())
except:
def call_idc_func__(*args):
warning("IDC extensions need ctypes library in order to work")
return 0
_IDCFUNC_CB_T = None
# --------------------------------------------------------------------------
EXTFUN_BASE = 0x0001
"""requires open database"""
EXTFUN_NORET = 0x0002
"""does not return. the interpreter may clean up its state before calling it."""
EXTFUN_SAFE = 0x0004
"""thread safe function. may be called"""
# --------------------------------------------------------------------------
class _IdcFunction(object):
"""
Internal class that calls pyw_call_idc_func() with a context
"""
def __init__(self, ctxptr):
self.ctxptr = ctxptr
# Take a reference to the ctypes callback
# (note: this will create a circular reference)
self.cb = _IDCFUNC_CB_T(self)
fp_ptr = property(lambda self: ctypes.cast(self.cb, ctypes.c_void_p).value)
def __call__(self, args, res):
return call_idc_func__(self.ctxptr, args, res)
# --------------------------------------------------------------------------
# Dictionary to remember IDC function names along with the context pointer
# retrieved by using the internal pyw_register_idc_func()
__IDC_FUNC_CTXS = {}
def del_idc_func(name):
"""
Unregisters the specified IDC function
@param name: IDC function name to unregister
@return: Boolean
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
if f is None:
return False # Not registered
# Break circular reference
del f.cb
# Delete the name from the dictionary
del __IDC_FUNC_CTXS[name]
# Delete the context and unregister the function
return _ida_expr.pyw_unregister_idc_func(f.ctxptr)
# --------------------------------------------------------------------------
def add_idc_func(name, fp, args, defvals=None, flags=0):
"""
Extends the IDC language by exposing a new IDC function that is backed up by a Python function
@param name: IDC function name to expose
@param fp: Python callable that will receive the arguments and return a tuple.
@param args: Arguments. A tuple of idaapi.VT_XXX constants
@param flags: IDC function flags. A combination of EXTFUN_XXX constants
@return: Boolean
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
# Registering a function that is already registered?
if f is not None:
# Unregister it first
del_idc_func(name)
# Convert the tupple argument info to a string
args = "".join([chr(x) for x in args])
# make sure we don't have an obvious discrepancy between
# the number of args, and the provided default values
if len(defvals) > len(args):
return False
vdefvals = idc_values_t()
if not _ida_expr.pyw_convert_defvals(vdefvals, defvals):
return False
# Create a context
ctxptr = _ida_expr.pyw_register_idc_func(name, args, fp)
if ctxptr == 0:
return False
# Bind the context with the IdcFunc object
f = _IdcFunction(ctxptr)
# Remember the Python context
__IDC_FUNC_CTXS[name] = f
# Register IDC function with a callback
return _ida_expr.py_add_idc_func(
name,
f.fp_ptr,
args,
vdefvals,
flags)
#</pycode(py_expr)>
#<pycode_BC695(py_expr)>
Compile=compile_idc_file
CompileEx=compile_idc_file
CompileLine=compile_idc_text
VT_STR2=VT_STR
VarCopy=copy_idcv
VarDelAttr=del_idcv_attr
VarDeref=deref_idcv
VarFirstAttr=first_idcv_attr
def VarGetAttr(obj, attr, res, may_use_getattr=False):
return get_idcv_attr(res, obj, attr, may_use_getattr)
VarGetClassName=get_idcv_class_name
VarGetSlice=get_idcv_slice
VarInt64=idcv_int64
VarLastAttr=last_idcv_attr
VarMove=move_idcv
VarNextAttr=next_idcv_attr
VarObject=idcv_object
VarPrevAttr=prev_idcv_attr
VarPrint=print_idcv
VarRef=create_idcv_ref
VarSetAttr=set_idcv_attr
VarSetSlice=set_idcv_slice
VarString2=idcv_string
VarSwap=swap_idcvs
def calc_idc_expr(where, expr, res):
return eval_idc_expr(res, where, expr)
def calcexpr(where, expr, res):
return eval_expr(res, where, expr)
def dosysfile(complain_if_no_file, fname):
return exec_system_script(fname, complain_if_no_file)
def execute(line):
return eval_idc_snippet(None, line)
py_set_idc_func_ex=py_add_idc_func
def set_idc_func_ex(name, fp=None, args=(), flags=0):
return add_idc_func(name, fp, args, (), flags)
#</pycode_BC695(py_expr)>
| 2.078125 | 2 |
dfirtrack_artifacts/tests/artifact/test_artifact_creator_forms.py | stuhli/dfirtrack | 273 | 12763274 | from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.forms import ArtifactCreatorForm
from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype
from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor
class ArtifactCreatorFormTestCase(TestCase):
"""artifact creator form tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_artifact_creator', password='<PASSWORD>'
)
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
System.objects.create(
system_name='system_1',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='system_2',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
# create object
tagcolor_1 = Tagcolor.objects.create(tagcolor_name='tagcolor_1')
# create object
Tag.objects.create(
tag_name='tag_1',
tagcolor=tagcolor_1,
)
Tag.objects.create(
tag_name='tag_2',
tagcolor=tagcolor_1,
)
# create object
Artifactpriority.objects.create(artifactpriority_name='prio_1')
# create object
Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1')
# create object
Artifacttype.objects.create(artifacttype_name='artifacttype_1')
Artifacttype.objects.create(artifacttype_name='artifacttype_2')
def test_artifact_creator_artifactpriority_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifactpriority'].label, 'Artifactpriority (*)')
def test_artifact_creator_artifactstatus_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifactstatus'].label, 'Artifactstatus (*)')
def test_artifact_creator_artifacttype_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifacttype'].label,
'Artifacttypes (*) - Will also be set as artifact names',
)
def test_artifact_creator_system_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['system'].label, 'Systems (*)')
def test_artifact_creator_tag_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['tag'].label, 'Tags')
def test_artifact_creator_analysisresult_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifact_note_analysisresult'].label, 'Analysis result'
)
def test_artifact_creator_external_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifact_note_external'].label, 'External note')
def test_artifact_creator_internal_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifact_note_internal'].label, 'Internal note')
def test_artifact_creator_name_choice_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['alternative_artifact_name_choice'].label,
'Use alternative artifact name',
)
def test_artifact_creator_name_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['alternative_artifact_name'].label, 'Alternative artifact name'
)
def test_artifact_creator_source_path_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifact_source_path'].label,
'Artifact source path (attention: will be set for all artifacts regardless of type)',
)
def test_artifact_creator_form_empty(self):
"""test minimum form requirements / INVALID"""
# get object
form = ArtifactCreatorForm(data={})
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifacttype_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifactpriority_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifactstatus_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_system_form_filled(self):
"""test minimum form requirements / VALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
system_2_id = System.objects.get(system_name='system_2').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
'system': [
system_1_id,
system_2_id,
],
}
)
# compare
self.assertTrue(form.is_valid())
def test_artifact_creator_all_fields_form_filled(self):
"""test additional form content"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
system_2_id = System.objects.get(system_name='system_2').system_id
# get object
tag_1_id = Tag.objects.get(tag_name='tag_1').tag_id
tag_2_id = Tag.objects.get(tag_name='tag_2').tag_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
'system': [
system_1_id,
system_2_id,
],
'tag': [
tag_1_id,
tag_2_id,
],
'artifact_note_analysisresult': 'lorem ipsum',
'artifact_note_external': 'lorem ipsum',
'artifact_note_internal': 'lorem ipsum',
'artifact_source_path': 'evil.exe',
}
)
# compare
self.assertTrue(form.is_valid())
def test_artifact_creator_alternative_name_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name': 'alternative name',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['alternative_artifact_name'],
['Either both or neither of the fields is required.'],
)
def test_artifact_creator_alternative_choice_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name_choice': True,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['alternative_artifact_name'],
['Either both or neither of the fields is required.'],
)
def test_artifact_creator_alternative_both_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name_choice': True,
'alternative_artifact_name': 'alternative name',
}
)
# compare
self.assertTrue(form.is_valid())
| 2.359375 | 2 |
Core/Rate.py | sybila/eBCSgen | 1 | 12763275 | import numpy as np
import sympy
from lark import Transformer, Tree
from sortedcontainers import SortedList
from TS.State import Vector
class Rate:
def __init__(self, expression):
self.expression = expression
def __eq__(self, other):
return self.expression == other.expression
def __repr__(self):
return str(self)
def __str__(self):
return self.expression if type(self.expression) == str else "".join(tree_to_string(self.expression))
def __hash__(self):
return hash(str(self))
def vectorize(self, ordering: SortedList, definitions: dict) -> list:
"""
Converts all occurrences of Complexes (resp. sub trees named agent)
with its vector representation. These are directly replaced within
the tree expression.
Moreover, in the process parameters are replaces with their values
(if given).
:param ordering: given SortedList of Complexes
:param definitions: dict of (param_name, value)
:return: list of transformed States (just for testing)
"""
vec = Vectorizer(ordering, definitions)
self.expression = vec.transform(self.expression)
return vec.visited
def evaluate(self, state) -> float:
"""
Evaluates all occurrences of States to a float using Evaluater.
It is done as intersection of particular state with given state
and sum of resulting elements.
If the result is nan, None is returned instead.
:param state: given state
:return: Sympy object for expression representation
"""
evaluater = Evaluater(state)
result = evaluater.transform(self.expression)
try:
value = sympy.sympify("".join(tree_to_string(result)), locals=evaluater.locals)
if value == sympy.nan:
return None
return value
except TypeError:
return None
def to_symbolic(self):
"""
Translates rate from vector representation to symbolic one
as a sum of particular components.
e.g. [1, 0, 1] -> (x_0 + x_2)
"""
transformer = SymbolicAgents()
self.expression = transformer.transform(self.expression)
def reduce_context(self) -> 'Rate':
"""
Reduces context of all Complexes to minimum.
:return: new Rate with reduced context
"""
transformer = ContextReducer()
expression = transformer.transform(self.expression)
return Rate(expression)
def get_params_and_agents(self):
"""
Extracts all agents (Complex objects) and params (strings) used in the rate expression.
:return: set of agents and params
"""
transformer = Extractor()
transformer.transform(self.expression)
return transformer.agents, transformer.params
def evaluate_direct(self, values, params) -> float:
"""
Evaluates
If the result is nan, None is returned instead.
:param values: given mapping complex -> count
:return: Sympy object for expression representation
"""
evaluater = DirectEvaluater(values, params)
result = evaluater.transform(self.expression)
try:
value = sympy.sympify("".join(tree_to_string(result)))
if value == sympy.nan:
return None
return value
except TypeError:
return None
# Transformers for Tree
class ContextReducer(Transformer):
def agent(self, matches):
return Tree("agent", [matches[0].reduce_context()])
class SymbolicAgents(Transformer):
def agent(self, vector):
vector = "(" + vector[0].to_ODE_string() + ")"
return Tree("agent", [vector])
class Vectorizer(Transformer):
def __init__(self, ordering, definitions):
super(Transformer, self).__init__()
self.ordering = ordering
self.definitions = definitions
self.visited = []
def agent(self, complex):
complex = complex[0]
result = np.zeros(len(self.ordering))
for i in range(len(self.ordering)):
if complex.compatible(self.ordering[i]):
result[i] = 1
result = Vector(result)
self.visited.append(result)
return Tree("agent", [result])
def rate_agent(self, matches):
return matches[1]
def param(self, matches):
return self.definitions.get(str(matches[0]), Tree("param", matches))
class Evaluater(Transformer):
def __init__(self, state):
super(Transformer, self).__init__()
self.state = state
self.locals = dict()
def agent(self, state):
return sum(self.state.content * state[0])
def param(self, matches):
name = matches[0]
self.locals[name] = sympy.Symbol(name)
return name
class DirectEvaluater(Transformer):
def __init__(self, values, params):
super(Transformer, self).__init__()
self.values = values
self.params = params
def rate_agent(self, matches):
return Tree('fun', [matches[1]])
def agent(self, matches):
return self.values.get(matches[0], 0)
def param(self, matches):
par = self.params.get(str(matches[0]), str(matches[0]))
return Tree('fun', [par])
class Extractor(Transformer):
def __init__(self):
super(Extractor, self).__init__()
self.agents = set()
self.params = set()
def agent(self, matches):
self.agents.add(matches[0])
return Tree("agent", matches)
def param(self, matches):
self.params.add(matches[0])
return Tree("param", matches)
def tree_to_string(tree):
if type(tree) == Tree:
return sum(list(map(tree_to_string, tree.children)), [])
else:
return [str(tree)]
| 2.75 | 3 |
python/src/gans/cycle_gan.py | d-ikeda-sakurasoft/deep-learning | 0 | 12763276 | import sys, time, os, json
import numpy as np
import matplotlib.pylab as plt
from PIL import Image
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from google.colab import drive
def Unet(img_shape):
def conv2d(x, filters):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
x = InstanceNormalization()(x)
return x
def deconv2d(x, contracting_path, filters, drop_rate=0):
x = UpSampling2D(2)(x)
x = Conv2D(filters, 4, padding='same', activation='relu')(x)
if drop_rate:
x = Dropout(drop_rate)(x)
x = InstanceNormalization()(x)
return Concatenate()([x, contracting_path])
img = Input(img_shape)
#エンコーダー
c1 = conv2d(img, 32)
c2 = conv2d(c1, 64)
c3 = conv2d(c2, 128)
#中間層
x = conv2d(c3, 256)
#デコーダー
x = deconv2d(x, c3, 128)
x = deconv2d(x, c2, 64)
x = deconv2d(x, c1, 32)
#元サイズ出力
x = UpSampling2D(2)(x)
x = Conv2D(img_shape[-1], 4, padding='same', activation='tanh')(x)
return Model(img, x)
def Discriminator(img_shape):
def d_layer(x, filters, bn=True):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
if bn:
x = InstanceNormalization()(x)
return x
img = Input(img_shape)
#PatchGANのサイズまで畳み込み
x = d_layer(img, 64, False)
x = d_layer(x, 128)
x = d_layer(x, 256)
x = d_layer(x, 512)
#0〜1ラベル出力
x = Conv2D(1, 4, padding='same')(x)
return Model(img, x)
def CycleGAN(gen_AB, gen_BA, disc_A, disc_B, img_shape):
img_A = Input(img_shape)
img_B = Input(img_shape)
fake_B = gen_AB(img_A)
fake_A = gen_BA(img_B)
reconstr_A = gen_BA(fake_B)
reconstr_B = gen_AB(fake_A)
img_A_id = gen_BA(img_A)
img_B_id = gen_AB(img_B)
valid_A = disc_A(fake_A)
valid_B = disc_B(fake_B)
return Model([img_A, img_B],
[valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id])
def load_datasets(path, train_num, img_shape):
return np.memmap(path, dtype=np.uint8, mode="r", shape=(train_num,)+img_shape)
def get_json(json_name, init_func):
if os.path.isfile(json_name):
with open(json_name) as f:
return json.load(f)
else:
return init_func()
def train():
#ドライブをマウントしてフォルダ作成
drive_root = '/content/drive'
drive.mount(drive_root)
datasets_dir = "%s/My Drive/datasets"%drive_root
train_dir = "%s/My Drive/train/cycle128"%drive_root
imgs_dir = "%s/imgs"%train_dir
os.makedirs(imgs_dir, exist_ok=True)
#教師データ
train_num = 30000
test_num = 6000
img_size = 128
data_num = train_num + test_num
img_shape = (img_size,img_size,3)
train_A = load_datasets("%s/color%d_%d.npy"%(datasets_dir,img_size,data_num), data_num, img_shape)
train_B = load_datasets("%s/gray%d_%d.npy"%(datasets_dir,img_size,data_num), data_num, (img_size,img_size))
#訓練回数
epochs = 200
batch_size = 100
batch_num = train_num // batch_size
#前回までの訓練情報
info_path = "%s/info.json"%train_dir
info = get_json(info_path, lambda: {"epoch":0})
last_epoch = info["epoch"]
#PatchGAN
patch_shape = (img_size//16, img_size//16, 1)
real = np.ones((batch_size,) + patch_shape)
fake = np.zeros((batch_size,) + patch_shape)
#モデル
lambda_cycle = 10.0
lambda_id = 0.1 * lambda_cycle
opt = Adam(0.0002, 0.5)
gen_AB_path = "%s/gen_AB.h5"%train_dir
gen_BA_path = "%s/gen_BA.h5"%train_dir
disc_A_path = "%s/disc_A.h5"%train_dir
disc_B_path = "%s/disc_B.h5"%train_dir
if os.path.isfile(disc_B_path):
gen_AB = load_model(gen_AB_path, custom_objects={'InstanceNormalization': InstanceNormalization})
gen_BA = load_model(gen_BA_path, custom_objects={'InstanceNormalization': InstanceNormalization})
disc_A = load_model(disc_A_path, custom_objects={'InstanceNormalization': InstanceNormalization})
disc_B = load_model(disc_B_path, custom_objects={'InstanceNormalization': InstanceNormalization})
print_img(last_epoch, gen_BA, train_A, train_B, 0, train_num, "train", img_size)
print_img(last_epoch, gen_BA, train_A, train_B, train_num, test_num, "test", img_size)
else:
gen_AB = Unet(img_shape)
gen_BA = Unet(img_shape)
disc_A = Discriminator(img_shape)
disc_B = Discriminator(img_shape)
disc_A.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
disc_B.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
disc_A.trainable = False
disc_B.trainable = False
cycle_gan = CycleGAN(gen_AB, gen_BA, disc_A, disc_B, img_shape)
cycle_gan.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],
loss_weights=[1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id], optimizer=opt)
#エポック
for e in range(last_epoch, epochs):
start = time.time()
#ミニバッチ
for i in range(batch_num):
#バッチ範囲をランダム選択
idx = np.random.choice(train_num, batch_size, replace=False)
imgs_A = train_A[idx].astype(np.float32) / 255
idx = np.random.choice(train_num, batch_size, replace=False)
imgs_B = convert_rgb(train_B[idx]).astype(np.float32) / 255
#識別訓練
fake_B = gen_AB.predict(imgs_A)
fake_A = gen_BA.predict(imgs_B)
d_loss_real = disc_A.train_on_batch(imgs_A, real)
d_loss_fake = disc_A.train_on_batch(fake_A, fake)
d_loss_A = np.add(d_loss_real, d_loss_fake) * 0.5
d_loss_real = disc_B.train_on_batch(imgs_B, real)
d_loss_fake = disc_B.train_on_batch(fake_B, fake)
d_loss_B = np.add(d_loss_real, d_loss_fake) * 0.5
d_loss = np.add(d_loss_A, d_loss_B) * 0.5
#生成訓練
g_loss = cycle_gan.train_on_batch([imgs_A, imgs_B],
[real, real, imgs_A, imgs_B, imgs_A, imgs_B])
#ログ
print("\repoch:%d/%d batch:%d/%d %ds d_loss:%s g_loss:%s" %
(e+1,epochs, (i+1),batch_num, (time.time()-start), d_loss[0], g_loss[0]), end="")
sys.stdout.flush()
print()
#画像生成テスト
if (e+1) % 10 == 0 or e == 0:
print_img(e+1, gen_BA, train_A, train_B, 0, train_num, "train", img_size)
print_img(e+1, gen_BA, train_A, train_B, train_num, test_num, "test", img_size)
#重みの保存
gen_AB.save(gen_AB_path)
gen_BA.save(gen_BA_path)
disc_A.save(disc_A_path)
disc_B.save(disc_B_path)
info["epoch"] += 1
with open(info_path, "w") as f:
json.dump(info, f)
def convert_rgb(train_B):
return np.array([np.asarray(Image.fromarray(x).convert("RGB")) for x in train_B])
def print_img(epoch, gen, train_A, train_B, offset, limit, title, img_size):
#データをランダム選択
num = 10
idx = np.random.choice(limit, num, replace=False) + offset
imgs_A = train_A[idx]
imgs_B = convert_rgb(train_B[idx])
#生成してみる
fake_A = gen.predict(imgs_B.astype(np.float32) / 255)
fake_A = (fake_A * 255).clip(0).astype(np.uint8)
#繋げる
imgs_A = np.concatenate(imgs_A, axis=1)
imgs_B = np.concatenate(imgs_B, axis=1)
fake_A = np.concatenate(fake_A, axis=1)
imgs = np.concatenate((imgs_B,imgs_A,fake_A), axis=0)
#プロット
plt.figure(figsize=(20, 6))
plt.title(title)
plt.imshow(imgs)
plt.axis('off')
plt.show()
#保存
Image.fromarray(imgs).save("%s/cycle%d_%d_%s.png"%(imgs_dir,img_size,epoch,title))
#実行
train()
| 2.515625 | 3 |
python_patterns/factory_method.py | detector-m/AppPythonServer | 0 | 12763277 | <filename>python_patterns/factory_method.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :factory_method.py
@说明 :工厂方法模式(Factory Method)
https://www.cnblogs.com/taosiyu/p/11293949.html
@时间 :2020/08/14 22:33:15
@作者 :Riven
@版本 :1.0.0
'''
'''
工厂方法模式(Factory Method):
定义一个用于创建对象的接口(工厂接口),让子类决定实例化哪一个产品类。
角色:
抽象工厂角色(Creator)
具体工厂角色(Concrete Creator)
抽象产品角色(Product)
具体产品角色(Concrete Product)
工厂方法模式相比简单工厂模式将每个具体产品都对应了一个具体工厂。
适用场景:
需要生产多种、大量复杂对象的时候。
需要降低耦合度的时候。
当系统中的产品种类需要经常扩展的时候。
优点:
每个具体产品都对应一个具体工厂类,不需要修改工厂类代码
隐藏了对象创建的实现细节
缺点:
每增加一个具体产品类,就必须增加一个相应的具体工厂类
'''
from abc import abstractclassmethod, ABCMeta
class Payment(metaclass=ABCMeta):
@abstractclassmethod
def pay(self, money):
pass
class Alipay(Payment):
def pay(self, money):
print(f'支付宝支付{money}元')
class WeChatPay(Payment):
def pay(self, money):
print(f'微信支付{money}元')
class PaymentFactory(metaclass=ABCMeta):
@abstractclassmethod
def create_payment(self):
pass
class AlipayFactory(PaymentFactory):
def create_payment(self):
return Alipay()
class WeChatPayFactory(PaymentFactory):
def create_payment(self):
return WeChatPay()
if __name__ == '__main__':
af = AlipayFactory()
ali = af.create_payment()
ali.pay(120)
wf = WeChatPayFactory()
wchat = wf.create_payment()
wchat.pay(100)
| 3.5 | 4 |
transfermarkt/__init__.py | omerbd21/transfermarkt | 0 | 12763278 | <gh_stars>0
from .team import Team
from .player import Player | 1.148438 | 1 |
dynamic-programming/on_the_way_home.py | izan-majeed/archives | 0 | 12763279 | def ways(n, m):
grid = [[None]*m]*n
for i in range(m):
grid[n-1][i] = 1
for i in range(n):
grid[i][m-1] = 1
for i in range(n-2,-1,-1):
for j in range(m-2,-1,-1):
grid[i][j] = grid[i][j+1] + grid[i+1][j]
return grid[0][0]
if __name__ == "__main__":
t = int(input("Number of times you want to run this Program: "))
for i in range(t):
n, m = map(int, input(f"\n{i+1}. Grid Size (n*m): ").split())
result = ways(n, m)
print(f"There are {result} ways.\n")
| 3.46875 | 3 |
05-Lists_Advanced/Exercises/7-Decipher_This.py | eclipse-ib/Software-University-Fundamentals_Module | 0 | 12763280 | message = input().split()
def asci_change(message):
numbers = [num for num in i if num.isdigit()]
numbers_in_chr = chr(int(''.join(numbers)))
letters = [letter for letter in i if not letter.isdigit()]
letters_split = ''.join(letters)
final_word = numbers_in_chr + str(letters_split)
return final_word
def index_change(message):
letters = [letter for letter in j]
letters[1], letters[-1] = letters[-1], letters[1]
letters_split = ''.join(letters)
return letters_split
index_i = 0
for i in message:
message[index_i] = asci_change(message)
index_i += 1
index_j = 0
for j in message:
message[index_j] = index_change(message)
index_j += 1
print(' '.join(message)) | 3.796875 | 4 |
cnn/struct/updatelist_module.py | hslee1539/GIS_GANs | 0 | 12763281 | <filename>cnn/struct/updatelist_module.py
from import_lib import lib
from cnn.struct.updateset_module import UpdateSet
from ctypes import Structure, c_int, POINTER, c_float
class _UpdateList(Structure):
_fields_ = [
('sets', POINTER(UpdateSet)),
('setSize', c_int)
]
def _create(size):
return lib.cnn_create_updatelist(size)
def _release_deep(pUpdateList):
lib.cnn_release_updatelist_deep(pUpdateList)
def _getSets(self):
return self.contents.sets
def _getSetSize(self):
return self.contents.setSize
def _setSets(self, value):
self.contents.sets = value
def _setSetSize(self, value):
self.contents.setSize = value
lib.cnn_create_updatelist.argtypes = [c_int]
lib.cnn_create_updatelist.restype = POINTER(_UpdateList)
lib.cnn_release_updatelist_deep.argtypes = [POINTER(_UpdateList)]
UpdateList = POINTER(_UpdateList)
UpdateList.__doc__ = 'cnn_UpdateList의 구조체 포인터인 클래스로, 프로퍼티와 메소드를 제공합니다.'
UpdateList.sets = property(_getSets, _setSets)
UpdateList.setSize = property(_getSetSize, _setSetSize)
UpdateList.create = staticmethod(_create)
UpdateList._release = _release_deep
| 2.4375 | 2 |
constants/download_data.py | zagaran/instant-census | 1 | 12763282 | <reponame>zagaran/instant-census
TS_FORMAT = "%Y-%m-%d__%I-%M-%S%p"
JAVASCRIPT_NULL_VALUE = "__NULL__" | 0.925781 | 1 |
privacy_preserving.py | jchenglin/CODE-FOR-FBDE-MODEL | 0 | 12763283 | <reponame>jchenglin/CODE-FOR-FBDE-MODEL
import os
import argparse
from utils import *
from FBDE import FBDE
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of FBDE"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='[train / test]')
parser.add_argument('--dataset', type=str, default='FBPP LEVEL II', help='dataset_name')
parser.add_argument('--decay_epoch', type=int, default=50, help='decay epoch')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch size')
parser.add_argument('--decay_flag', type=str2bool, default=True, help='The decay_flag')
parser.add_argument('--epoch', type=int, default=30, help='The number of epochs to run')
parser.add_argument('--save_freq', type=int, default=1000, help='The number of ckpt_save_freq')
parser.add_argument('--print_freq', type=int, default=500, help='The number of image_print_freq')
parser.add_argument('--iteration', type=int, default=10000, help='The number of training iterations')
parser.add_argument('--adv_weight', type=int, default=1, help='Weight about GAN')
parser.add_argument('--pixel_weight', type=int, default=10, help='weight about pixel loss')
parser.add_argument('--contrastive_weight', type=int, default=2, help='weight about contrastive loss')
parser.add_argument('--lr_for_g', type=float, default=0.0001, help='The learning rate')
parser.add_argument('--lr_for_d', type=float, default=0.0002, help='The learning rate')
parser.add_argument('--margin', type=float, default=0.1, help='The margin of triplets loss')
parser.add_argument('--nce_temp', type=float, default=0.07, help='The nce_temp for NCE loss')
parser.add_argument('--gan_type', type=str, default='lscut', help='[sgan/hinge/wgan/ragan/lscut/lsgan/pulsgan]')
parser.add_argument('--smoothing', type=str2bool, default=True, help='AdaLIN smoothing effect')
parser.add_argument('--num_patches', type=int, default=256, help='The num_patches size of contrastive loss')
parser.add_argument('--n_res', type=int, default=4, help='The number of resblock')
parser.add_argument('--n_critic', type=int, default=1, help='The number of critic')
parser.add_argument('--sn', type=str2bool, default=True, help='using spectral norm')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--n_dis', type=int, default=4, help='The number of discriminator layer')
parser.add_argument('--img_size', type=int, default=256, help='The size of image')
parser.add_argument('--img_ch', type=int, default=3, help='The size of image channel')
parser.add_argument('--log_dir', type=str, default='logs', help='Directory name to save training logs')
parser.add_argument('--augment_flag', type=str2bool, default=True, help='Image augmentation use or not')
parser.add_argument('--result_dir', type=str, default='results', help='Directory name to save the generated images')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint', help='Directory name to save the checkpoints')
parser.add_argument('--sample_dir', type=str, default='samples', help='Directory name to save the samples on training')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --checkpoint_dir
check_folder(args.checkpoint_dir)
# --result_dir
check_folder(args.result_dir)
# --result_dir
check_folder(args.log_dir)
# --sample_dir
check_folder(args.sample_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
""" main """
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
# open session
# os.environ["CUDA_VISIBLE_DEVICES"] = '1'
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
gan = FBDE(sess, args)
# build graph
gan.build_model()
# show network architecture
show_all_variables()
if args.phase == 'train' :
gan.train()
print(" [*] Training finished!")
if args.phase == 'test' :
gan.test()
print(" [*] Test finished!")
if __name__ == '__main__':
main() | 2.71875 | 3 |
shuffles.py | trigunshin/tsm | 0 | 12763284 | <reponame>trigunshin/tsm
class DE:
def __init__(self, reagent_per_craft, reagent_price, name="None"):
self.reagent_per_craft = reagent_per_craft
self.reagent_price = reagent_price
self.name = name
# DE constants
self.dust_per_shard = 20
self.base_dust_per_de = 10
self.shard_percent = .13
self.dust_percent = 1 - self.shard_percent
def _results_per_destroy(self):
return self.dust_percent * self.base_dust_per_de + self.shard_percent * self.dust_per_shard
def price(self):
# use a function to allow editing of the object
reagent_per_result = self.reagent_per_craft / self._results_per_destroy()
break_even = (reagent_per_result * self.reagent_price)
return break_even
def p(self):
return self.price()/100/100
class Ore(DE):
def __init__(self, reagent_price):
# ignore 30/30 crafts, they can be reflected in price
DE.__init__(self, 60, reagent_price, 'ore')
class Ink(DE):
def __init__(self, reagent_price):
DE.__init__(self, 20, self.ink_price(reagent_price), 'ink')
def ink_price(self, average_herb_cost):
# price * 5 herbs / 2.3 inks (err on underestimating x3 ink proc)
return average_herb_cost * 5 / 2.3
def info():
# note: 1000 ore ~= 200 dust
o_90 = Ore(9000)
o_80 = Ore(8000)
o_70 = Ore(7000)
o_60 = Ore(6000)
ores = [o_90, o_80, o_70, o_60]
i15 = Ink(15000)
i105 = Ink(10500)
i1 = Ink(10000)
i09 = Ink(9000)
inks = [i15, i105, i1, i09]
for cur in [ores, inks]:
for reagent, result in [(d.reagent_price, d.p()) for d in cur]:
print 'name:%s\treagent: %i\tresult_cost:%f' % (d.name, reagent, result)
# starflower @
# note: 1000 ore ~= 200 dust
o_90 = Ore(9000)
o_80 = Ore(8000)
o_70 = Ore(7000)
o_60 = Ore(6000)
possibilities = [o_90, o_80, o_70, o_60]
[d.p() for d in possibilities]
i15 = Ink(15000)
i105 = Ink(10500)
i1 = Ink(10000)
i09 = Ink(9000)
inks = [i15, i1, i09]
[d.p() for d in inks]
| 2.796875 | 3 |
fhi_lib/distance_estimator.py | yhsueh/FHI_RCNN | 0 | 12763285 | import argparse
import os
import sys
import cv2
import numpy as np
from matplotlib import pyplot as plt
from functools import cmp_to_key
from fhi_lib.geometry import Point, Line
class DistanceEstimator():
def __init__(self, img):
self.img = img
self.panel_length = 2235
self.scale_length = 100
def initialize(self):
self.__find_scales()
self.__form_reference_points()
self.__shift_accessory_coordinate_init()
print('Estimator initialized')
def initialize_with_pt(self, pt):
self.__find_scales()
self.__form_reference_points()
self.vertical_pt2 = Point(pt)
self.__shift_accessory_coordinate_init()
print('Estimator initialized')
def display_reference_pts(self, img):
img = cv2.circle(img, self.origin.get_point_tuple(), 20, (0,0,0), 3)
img = cv2.circle(img, self.horizontal_pt.get_point_tuple(), 20, (0,255,0), 3)
img = cv2.circle(img, self.vertical_pt.get_point_tuple(), 20, (255,0,0), 3)
img = cv2.circle(img, self.vertical_pt2.get_point_tuple(), 20, (255,0,0), 3)
img = cv2.circle(img, self.origin.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.horizontal_pt.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.vertical_pt.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.vertical_pt2.get_point_tuple(), 0, (0,0,255), 3)
return img
def estimate(self, pt_itr):
img_intersection = self.__shift_accessory_coordinate(pt_itr)
dist = self.__cross_ratio(img_intersection)
caption = '{}\n'.format(int(dist))
return caption
def __find_scales(self):
### Image Processing, convert rgb to hsv and find the scale by its color ###
blur = cv2.GaussianBlur(self.img, (5,5), 0)
img_hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
img_threshold = cv2.inRange(img_hsv, (45,20,230), (90,220,255))
morphology_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilation = cv2.dilate(img_threshold, morphology_kernel, iterations=3)
thresh = cv2.erode(dilation, morphology_kernel, iterations=3)
'''
compare_img = np.hstack((img_threshold,thresh))
plt.imshow(compare_img)
plt.show()
'''
### Crop the image as we know the scale is always on the left half of the image ###
cropped_thresh = thresh[:, 0:int(thresh.shape[1]/2)]
contours, _ = cv2.findContours(image=cropped_thresh,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE)
### Discard contours that are not quadrilaterals and smaller than 4000 pixels###
result_contours = {}
epsilon = 30
minimal_area = 1000
for contour in contours:
contour_area = cv2.contourArea(contour)
if contour_area > minimal_area:
hull = cv2.convexHull(contour)
approxCurve = cv2.approxPolyDP(hull, epsilon, True)
if len(approxCurve) == 4:
result_contours.update({contour_area : [approxCurve, contour]})
self.__verify_shape(result_contours)
# sort the dictionary based on the size of the area
result_contours = sorted(result_contours.items())
# pick the contour with the largest area as near scale, and the second as far scale
self.near_scale = result_contours[1]
self.far_scale = result_contours[0]
def __verify_shape(self, result_contours):
# For a parallel shape, the length of the two opposite sides should be approximately the same.
tolerance = 0.55
remove_keys = []
for key in result_contours.keys():
pts = result_contours[key][0]
pts = pts[:,0,:]
pt1 = Point(pts[0])
pt2 = Point(pts[1])
pt3 = Point(pts[2])
pt4 = Point(pts[3])
dist1_2 = pt1.get_distance(pt2).astype(np.int)
dist3_4 = pt3.get_distance(pt4).astype(np.int)
dist1_4 = pt1.get_distance(pt4).astype(np.int)
dist2_3 = pt2.get_distance(pt3).astype(np.int)
if np.absolute(dist1_2 - dist3_4) / np.min([dist1_2, dist3_4])> tolerance:
remove_keys.append(key)
continue
elif np.absolute(dist1_4 - dist2_3) / np.min([dist1_4, dist2_3])> tolerance:
remove_keys.append(key)
continue
for remove_key in remove_keys:
del result_contours[remove_key]
def __form_reference_points(self):
self.near_scale[1][0] = self.near_scale[1][0][:,0,:]
self.far_scale[1][0] = self.far_scale[1][0][:,0,:]
self.far_scale[1][0] = self.__set_orientation_hull(self.far_scale[1][0])
self.near_scale[1][0] = self.__set_orientation_hull(self.near_scale[1][0])
self.origin = Point(self.near_scale[1][0][1])
self.vertical_pt = Point(self.near_scale[1][0][0])
self.horizontal_pt = Point(self.near_scale[1][0][3])
self.vertical_pt2 = Point(self.far_scale[1][0][0])
def __set_orientation_hull(self, scale):
# Assuming the scale is placed on the left half of the image.
# The first vertex should be top left. If it's not the case, then reorder the verticies.
order = scale[:,0].argsort()
if order[0].astype(int) == 0:
## 1 2 ##
## 0 3 ##
# The first vertex is at bottom left instead of top left. Reorder the verticies.
scale = scale[[1,0,3,2]]
elif order[0].astype(int) == 1:
## 2 3 ##
## 1 0 ##
# The first vertex is at bottom left instead of top left. Reorder the verticies.
scale = scale[[2,1,0,3]]
elif order[0].astype(int) == 2:
## 3 0 ##
## 2 1 ##
scale = scale[[3,2,1,0]]
elif order[0].astype(int) == 3:
## 0 1 ##
## 3 2 ##
scale = scale[[0,3,2,1]]
return scale
def __shift_accessory_coordinate_init(self):
math_origin = self.origin.switch_coordinate_system(self.img)
math_horizontal_pt = self.horizontal_pt.switch_coordinate_system(self.img)
math_vertical_pt2 = self.vertical_pt2.switch_coordinate_system(self.img)
math_vertical_pt = self.vertical_pt.switch_coordinate_system(self.img)
self.vertical_reference_line = Line(math_origin, math_vertical_pt2)
self.horizontal_reference_line = Line(math_vertical_pt, math_horizontal_pt)
def __shift_accessory_coordinate(self, pt):
math_pt = pt.switch_coordinate_system(self.img)
slope_proj, intercept_proj = math_pt.get_projected_line(self.horizontal_reference_line.get_slope())
math_intersection = self.vertical_reference_line.calculate_intersection(slope_proj, intercept_proj)
img_intersection = math_intersection.switch_coordinate_system(self.img)
return img_intersection
def __cross_ratio(self, intersection):
### AC*BD/(CD*AB) = A'C'*B'D'/(C'D'*A'B') ###
# Image cross ratio
# AB(scale_length): origin to vertical_pt (scale_pixel_dist)
# CD: accessory_pt to vertical_pt2
# BD: vertical_pt to vertical_pt2
# AC(interested_length): origin to accessory_pt
AB = self.origin.get_distance(self.vertical_pt.get_point())
CD = intersection.get_distance(self.vertical_pt2.get_point())
BD = self.vertical_pt.get_distance(self.vertical_pt2.get_point())
AC = self.origin.get_distance(intersection.get_point())
image_ratio = AC*BD/CD/AB
# World cross ratio
ABw = self.scale_length
ADw = self.panel_length
BDw = self.panel_length - self.scale_length
ACw = image_ratio*ABw*ADw/(BDw+image_ratio*ABw)
return ACw | 2.46875 | 2 |
tests/test_scan.py | translationalneurosurgery/app-vernier | 2 | 12763286 | <reponame>translationalneurosurgery/app-vernier
from subprocess import Popen, PIPE
def test_scan():
p = Popen(["vernier-lsl", "--scan"], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
assert (
"Available devices. Default sensors are marked by *." in out.decode()
)
| 2.28125 | 2 |
tests/rekey.py | g-ongetta/CCF | 0 | 12763287 | <reponame>g-ongetta/CCF
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.ccf
import infra.notification
import suite.test_requirements as reqs
import infra.e2e_args
import time
from loguru import logger as LOG
@reqs.description("Rekey the ledger once")
@reqs.supports_methods("mkSign")
@reqs.at_least_n_nodes(1)
def test(network, args):
primary, _ = network.find_primary()
# Retrieve current index version to check for sealed secrets later
with primary.node_client() as nc:
check_commit = infra.checker.Checker(nc)
res = nc.rpc("mkSign")
check_commit(res, result=True)
version_before_rekey = res.commit
network.consortium.rekey_ledger(member_id=1, remote_node=primary)
network.wait_for_sealed_secrets_at_version(version_before_rekey)
return network
# Run some write transactions against the logging app
def record_transactions(primary, txs_count=1):
with primary.node_client() as nc:
check_commit = infra.checker.Checker(nc)
with primary.user_client() as c:
for i in range(1, txs_count):
check_commit(
c.rpc("LOG_record", {"id": i, "msg": f"entry #{i}"}), result=True
)
def run(args):
hosts = ["localhost", "localhost"]
with infra.ccf.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb,
) as network:
network.start_and_join(args)
primary, _ = network.find_primary()
record_transactions(primary)
test(network, args)
record_transactions(primary)
if __name__ == "__main__":
args = infra.e2e_args.cli_args()
args.package = args.app_script and "libluageneric" or "liblogging"
run(args)
| 1.882813 | 2 |
tefla/core/encoder.py | mkulariya1/tefla | 40 | 12763288 | <filename>tefla/core/encoder.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from collections import namedtuple
import copy
import yaml
import numpy as np
from .layers import conv1d, avg_pool_1d
from .rnn_cell import LSTMCell, MultiRNNCell, ExtendedMultiRNNCell
from . import logger as log
import six
import tensorflow as tf
core_rnn_cell = tf.contrib.rnn
EncoderOutput = namedtuple("EncoderOutput",
"outputs final_state attention_values attention_values_length")
class abstractstaticmethod(staticmethod):
"""Decorates a method as abstract and static."""
__slots__ = ()
def __init__(self, function):
super(abstractstaticmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
@six.add_metaclass(abc.ABCMeta)
class Configurable(object):
"""Interface for all classes that are configurable via a parameters
dictionary.
Args:
params: A dictionary of parameters.
mode: A value in tf.contrib.learn.ModeKeys
"""
def __init__(self, params, mode, reuse=None):
self._params = _parse_params(params, self.default_params())
self._mode = mode
self._reuse = reuse
self._print_params()
def _print_params(self):
"""Logs parameter values."""
classname = self.__class__.__name__
log.info("Creating %s in mode=%s", classname, self._mode)
log.info("\n%s", yaml.dump({classname: self._params}))
@property
def mode(self):
"""Returns a value in tf.contrib.learn.ModeKeys."""
return self._mode
@property
def reuse(self):
"""Returns a value in tf.contrib.learn.ModeKeys."""
return self._reuse
@property
def params(self):
"""Returns a dictionary of parsed parameters."""
return self._params
@abstractstaticmethod
def default_params():
"""Returns a dictionary of default parameters.
The default parameters are used to define the expected type of
passed parameters. Missing parameter values are replaced with the
defaults returned by this method.
"""
raise NotImplementedError
class GraphModule(object):
"""Convenience class that makes it easy to share variables. Each insance of
this class creates its own set of variables, but each subsequent execution of
an instance will re-use its variables.
Graph components that define variables should inherit from this class
and implement their logic in the `_build` method.
"""
def __init__(self, name):
"""Initialize the module. Each subclass must call this constructor with a
name.
Args:
name: Name of this module. Used for `tf.make_template`.
"""
self.name = name
self._template = tf.make_template(name, self._build, create_scope_now_=True)
# Docstrings for the class should be the docstring for the _build method
self.__doc__ = self._build.__doc__
self.__call__.__func__.__doc__ = self._build.__doc__
def _build(self, *args, **kwargs):
"""Subclasses should implement their logic here."""
raise NotImplementedError
def __call__(self, *args, **kwargs):
return self._template(*args, **kwargs)
def variable_scope(self):
"""Returns the proper variable scope for this module."""
return tf.variable_scope(self._template.variable_scope)
@six.add_metaclass(abc.ABCMeta)
class Encoder(GraphModule, Configurable):
"""Abstract encoder class. All encoders should inherit from this.
Args:
params: A dictionary of hyperparameters for the encoder.
name: A variable scope for the encoder graph.
"""
def __init__(self, params, mode, reuse, name):
GraphModule.__init__(self, name)
Configurable.__init__(self, params, mode, reuse)
def _build(self, inputs, *args, **kwargs):
return self.encode(inputs, *args, **kwargs)
@abc.abstractmethod
def encode(self, *args, **kwargs):
"""Encodes an input sequence.
Args:
inputs: The inputs to encode. A float32 tensor of shape [B, T, ...].
sequence_length: The length of each input. An int32 tensor of shape [T].
Returns:
An `EncoderOutput` tuple containing the outputs and final state.
"""
raise NotImplementedError
class ConvEncoder(Encoder):
"""A deep convolutional encoder, as described in
https://arxiv.org/abs/1611.02344. The encoder supports optional positions
embeddings.
Params:
attention_cnn.units: Number of units in `cnn_a`. Same in each layer.
attention_cnn.kernel_size: Kernel size for `cnn_a`.
attention_cnn.layers: Number of layers in `cnn_a`.
embedding_dropout_keep_prob: Dropout keep probability
applied to the embeddings.
output_cnn.units: Number of units in `cnn_c`. Same in each layer.
output_cnn.kernel_size: Kernel size for `cnn_c`.
output_cnn.layers: Number of layers in `cnn_c`.
position_embeddings.enable: If true, add position embeddings to the
inputs before pooling.
position_embeddings.combiner_fn: Function used to combine the
position embeddings with the inputs. For example, `tensorflow.add`.
position_embeddings.num_positions: Size of the position embedding matrix.
This should be set to the maximum sequence length of the inputs.
"""
def __init__(self, params, mode, reuse=None, name="conv_encoder"):
super(ConvEncoder, self).__init__(params, mode, reuse, name)
self._combiner_fn = tf.multiply
@staticmethod
def default_params():
return {
"attention_cnn.units": 512,
"attention_cnn.kernel_size": 3,
"attention_cnn.layers": 15,
"embedding_dropout_keep_prob": 0.8,
"output_cnn.units": 256,
"output_cnn.kernel_size": 3,
"output_cnn.layers": 5,
"position_embeddings.enable": True,
"position_embeddings.combiner_fn": "tensorflow.multiply",
"position_embeddings.num_positions": 100,
}
def encode(self, inputs, sequence_length):
if self.params["position_embeddings.enable"]:
positions_embed = _create_position_embedding(
embedding_dim=inputs.get_shape().as_list()[-1],
num_positions=self.params["position_embeddings.num_positions"],
lengths=sequence_length,
maxlen=tf.shape(inputs)[1])
inputs = self._combiner_fn(inputs, positions_embed)
# Apply dropout to embeddings
inputs = tf.contrib.layers.dropout(
inputs=inputs,
keep_prob=self.params["embedding_dropout_keep_prob"],
is_training=self.mode == tf.contrib.learn.ModeKeys.TRAIN)
with tf.variable_scope("cnn_a"):
cnn_a_output = inputs
for layer_idx in range(self.params["attention_cnn.layers"]):
with tf.variable_scope('conv1d_' + str(layer_idx), reuse=self._reuse):
next_layer = conv1d(
cnn_a_output,
self.params["attention_cnn.units"],
self._mode,
self._reuse,
filter_size=self.params["attention_cnn.kernel_size"],
padding="SAME",
activation=None)
# Add a residual connection, except for the first layer
if layer_idx > 0:
next_layer += cnn_a_output
cnn_a_output = tf.tanh(next_layer)
with tf.variable_scope("cnn_c"):
cnn_c_output = inputs
for layer_idx in range(self.params["output_cnn.layers"]):
with tf.variable_scope('conv1d_o_' + str(layer_idx), reuse=self._reuse):
next_layer = conv1d(
cnn_c_output,
self.params["output_cnn.units"],
self._mode,
self._reuse,
filter_size=self.params["output_cnn.kernel_size"],
padding="SAME",
activation=None)
# Add a residual connection, except for the first layer
if layer_idx > 0:
next_layer += cnn_c_output
cnn_c_output = tf.tanh(next_layer)
final_state = tf.reduce_mean(cnn_c_output, 1)
return EncoderOutput(
outputs=cnn_a_output,
final_state=final_state,
attention_values=cnn_c_output,
attention_values_length=sequence_length)
class PoolingEncoder(Encoder):
"""An encoder that pools over embeddings, as described in
https://arxiv.org/abs/1611.02344. The encoder supports optional positions
embeddings and a configurable pooling window.
Params:
dropout_keep_prob: Dropout keep probability applied to the embeddings.
pooling_fn: The 1-d pooling function to use, e.g.
`tensorflow.layers.average_pooling1d`.
pool_size: The pooling window, passed as `pool_size` to
the pooling function.
strides: The stride during pooling, passed as `strides`
the pooling function.
position_embeddings.enable: If true, add position embeddings to the
inputs before pooling.
position_embeddings.combiner_fn: Function used to combine the
position embeddings with the inputs. For example, `tensorflow.add`.
position_embeddings.num_positions: Size of the position embedding matrix.
This should be set to the maximum sequence length of the inputs.
"""
def __init__(self, params, mode, reuse, name="pooling_encoder"):
super(PoolingEncoder, self).__init__(params, mode, reuse, name)
self._pooling_fn = avg_pool_1d
self._combiner_fn = tf.multiply
@staticmethod
def default_params():
return {
"dropout_keep_prob": 0.8,
"pooling_fn": "tensorflow.layers.average_pooling1d",
"pool_size": 5,
"strides": 1,
"position_embeddings.enable": True,
"position_embeddings.combiner_fn": "tensorflow.multiply",
"position_embeddings.num_positions": 100,
}
def encode(self, inputs, sequence_length):
if self.params["position_embeddings.enable"]:
positions_embed = _create_position_embedding(
embedding_dim=inputs.get_shape().as_list()[-1],
num_positions=self.params["position_embeddings.num_positions"],
lengths=sequence_length,
maxlen=tf.shape(inputs)[1])
inputs = self._combiner_fn(inputs, positions_embed)
# Apply dropout
inputs = tf.contrib.layers.dropout(
inputs=inputs,
keep_prob=self.params["dropout_keep_prob"],
is_training=self.mode == tf.contrib.learn.ModeKeys.TRAIN)
outputs = self._pooling_fn(
inputs, filter_size=self.params["pool_size"], stride=self.params["strides"], padding="SAME")
# Final state is the average representation of the pooled embeddings
final_state = tf.reduce_mean(outputs, 1)
return EncoderOutput(
outputs=outputs,
final_state=final_state,
attention_values=inputs,
attention_values_length=sequence_length)
class UnidirectionalRNNEncoder(Encoder):
"""A unidirectional RNN encoder. Stacking should be performed as part of the
cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, reuse=None, name="forward_rnn_encoder"):
super(UnidirectionalRNNEncoder, self).__init__(params, mode, reuse, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(
tf.random_uniform_initializer(-self.params["init_scale"], self.params["init_scale"]))
cell = _get_rnn_cell(**self.params["rnn_cell"])
outputs, state = tf.nn.dynamic_rnn(
cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=tf.float32, **kwargs)
return EncoderOutput(
outputs=outputs,
final_state=state,
attention_values=outputs,
attention_values_length=sequence_length)
class BidirectionalRNNEncoder(Encoder):
"""A bidirectional RNN encoder. Uses the same cell for both the forward and
backward RNN. Stacking should be performed as part of the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, reuse=None, name="bidi_rnn_encoder"):
super(BidirectionalRNNEncoder, self).__init__(params, mode, reuse, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(
tf.random_uniform_initializer(-self.params["init_scale"], self.params["init_scale"]))
cell_fw = _get_rnn_cell(**self.params["rnn_cell"])
cell_bw = _get_rnn_cell(**self.params["rnn_cell"])
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
# Concatenate outputs and states of the forward and backward RNNs
outputs_concat = tf.concat(outputs, 2)
return EncoderOutput(
outputs=outputs_concat,
final_state=states,
attention_values=outputs_concat,
attention_values_length=sequence_length)
class StackBidirectionalRNNEncoder(Encoder):
"""A stacked bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, reuse=None, name="stacked_bidi_rnn_encoder"):
super(StackBidirectionalRNNEncoder, self).__init__(params, mode, reuse, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(
tf.random_uniform_initializer(-self.params["init_scale"], self.params["init_scale"]))
cell_fw = _get_rnn_cell(**self.params["rnn_cell"])
cell_bw = _get_rnn_cell(**self.params["rnn_cell"])
cells_fw = _unpack_cell(cell_fw)
cells_bw = _unpack_cell(cell_bw)
result = core_rnn_cell.stack_bidirectional_dynamic_rnn(
cells_fw=cells_fw,
cells_bw=cells_bw,
inputs=inputs,
dtype=tf.float32,
sequence_length=sequence_length,
**kwargs)
outputs_concat, _output_state_fw, _output_state_bw = result
final_state = (_output_state_fw, _output_state_bw)
return EncoderOutput(
outputs=outputs_concat,
final_state=final_state,
attention_values=outputs_concat,
attention_values_length=sequence_length)
def _parse_params(params, default_params):
"""Parses parameter values to the types defined by the default parameters.
Default parameters are used for missing values.
"""
# Cast parameters to correct types
if params is None:
params = {}
result = copy.deepcopy(default_params)
for key, value in params.items():
# If param is unknown, drop it to stay compatible with past versions
if key not in default_params:
raise ValueError("%s is not a valid model parameter" % key)
# Param is a dictionary
if isinstance(value, dict):
default_dict = default_params[key]
if not isinstance(default_dict, dict):
raise ValueError("%s should not be a dictionary", key)
if default_dict:
value = _parse_params(value, default_dict)
else:
# If the default is an empty dict we do not typecheck it
# and assume it's done downstream
pass
if value is None:
continue
if default_params[key] is None:
result[key] = value
else:
result[key] = type(default_params[key])(value)
return result
def _unpack_cell(cell):
"""Unpack the cells because the stack_bidirectional_dynamic_rnn expects a
list of cells, one per layer."""
if isinstance(cell, MultiRNNCell):
return cell._cells
else:
return [cell]
def _default_rnn_cell_params():
"""Creates default parameters used by multiple RNN encoders."""
return {
"cell_params": {
"num_units": 128,
"reuse": None,
"keep_prob": 0.8
},
"num_layers": 1,
"residual_connections": False,
"residual_combiner": "add",
"residual_dense": False
}
def _toggle_dropout(cell_params, mode):
"""Disables dropout during eval/inference mode."""
cell_params = copy.deepcopy(cell_params)
if mode != tf.contrib.learn.ModeKeys.TRAIN:
cell_params["cell_params"]["keep_prob"] = 1.0
return cell_params
def _position_encoding(sentence_size, embedding_size):
"""Position Encoding described in section 4.1 of End-To-End Memory Networks
(https://arxiv.org/abs/1503.08895).
Args:
sentence_size: length of the sentence
embedding_size: dimensionality of the embeddings
Returns:
A numpy array of shape [sentence_size, embedding_size] containing
the fixed position encodings for each sentence position.
"""
encoding = np.ones((sentence_size, embedding_size), dtype=np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for k in range(1, le):
for j in range(1, ls):
encoding[j - 1, k - 1] = (1.0 - j / float(ls)) - (k / float(le)) * (1. - 2. * j / float(ls))
return encoding
def _create_position_embedding(embedding_dim, num_positions, lengths, maxlen):
"""Creates position embeddings.
Args:
embedding_dim: Dimensionality of the embeddings. An integer.
num_positions: The number of positions to be embedded. For example,
if you have inputs of length up to 100, this should be 100. An integer.
lengths: The lengths of the inputs to create position embeddings for.
An int32 tensor of shape `[batch_size]`.
maxlen: The maximum length of the input sequence to create position
embeddings for. An int32 tensor.
Returns:
A tensor of shape `[batch_size, maxlen, embedding_dim]` that contains
embeddings for each position. All elements past `lengths` are zero.
"""
# Create constant position encodings
position_encodings = tf.constant(
_position_encoding(num_positions, embedding_dim), name="position_encoding")
# Slice to size of current sequence
pe_slice = position_encodings[:maxlen, :]
# Replicate encodings for each element in the batch
batch_size = tf.shape(lengths)[0]
pe_batch = tf.tile([pe_slice], [batch_size, 1, 1])
# Mask out positions that are padded
positions_mask = tf.sequence_mask(lengths=lengths, maxlen=maxlen, dtype=tf.float32)
positions_embed = pe_batch * tf.expand_dims(positions_mask, 2)
return positions_embed
def _get_rnn_cell(cell_params,
num_layers=1,
residual_connections=False,
residual_combiner="add",
residual_dense=False):
"""Creates a new RNN Cell.
Args:
cell_class: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
num_layers: Number of layers. The cell will be wrapped with
`tf.contrib.rnn.MultiRNNCell`
dropout_input_keep_prob: Dropout keep probability applied
to the input of cell *at each layer*
dropout_output_keep_prob: Dropout keep probability applied
to the output of cell *at each layer*
residual_connections: If true, add residual connections
between all cells
Returns:
An instance of `tf.contrib.rnn.RNNCell`.
"""
cells = []
for _ in range(num_layers):
cell = LSTMCell(**cell_params)
cells.append(cell)
if len(cells) > 1:
final_cell = ExtendedMultiRNNCell(
cells=cells,
residual_connections=residual_connections,
residual_combiner=residual_combiner,
residual_dense=residual_dense)
else:
final_cell = cells[0]
return final_cell
| 2.046875 | 2 |
.venv/lib/python3.8/site-packages/pycrop/__init__.py | amon-wanyonyi/publication | 0 | 12763289 | from .tools import contain, cover
| 0.972656 | 1 |
Data Scientist Career Path/11. Foundations of Machine Learning Supervised Learning/4. Classification/1. KNN/2. distance2.py | myarist/Codecademy | 23 | 12763290 | <gh_stars>10-100
star_wars = [125, 1977]
raiders = [115, 1981]
mean_girls = [97, 2004]
def distance(movie1, movie2):
length_difference = (movie1[0] - movie2[0]) ** 2
year_difference = (movie1[1] - movie2[1]) ** 2
distance = (length_difference + year_difference) ** 0.5
return distance
print(distance(star_wars, raiders))
print(distance(star_wars, mean_girls)) | 3.234375 | 3 |
policygradientpytorch.py | biancanevo/deeprl-snes | 10 | 12763291 | <gh_stars>1-10
# Agent that learns how to play a SNES game by using Proximal Policy Optimization
import retro
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
import argparse
import skimage
from skimage import color
from collections import deque
import envs
# Environment definitions
eps = np.finfo(np.float32).eps.item()
# Initialize device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def make_env(game, state, rewardscaling=1, pad_action=None):
"""Creates the SNES environment"""
env = retro.make(game=game, state=state)
env = envs.RewardScaler(env, rewardscaling)
env = envs.ButtonsRemapper(env, game)
env = envs.SkipFrames(env, pad_action=pad_action)
return env
def prepro(image):
""" prepro uint8 frame into tensor image"""
image = image[::4, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :] # downsample by factor of 4
image = color.rgb2gray(image) # turn to grayscale
return image - 0.5 # 0-center
def discount_rewards(r, gamma=0.99):
"""Take 1D float array of rewards and compute clipped discounted reward"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, len(r))):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
class Policy(nn.Module):
"""Pytorch CNN implementing a Policy"""
action_shape = []
def __init__(self, env, game, windowlength=4):
super(Policy, self).__init__()
self.action_shape = env.action_space.n
self.conv1 = nn.Conv2d(windowlength, 32, kernel_size=8, stride=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=2)
self.bn3 = nn.BatchNorm2d(128)
if "Snes" in game:
self.dense = nn.Linear(3840, 512)
elif "Genesis" in game:
self.dense = nn.Linear(5120, 512)
self.actionshead = nn.Linear(512, self.action_shape)
self.valuehead = nn.Linear(512, 1)
def forward(self, x):
x = F.selu(self.bn1((self.conv1(x))))
x = F.selu(self.bn2((self.conv2(x))))
x = F.selu(self.bn3((self.conv3(x))))
x = F.selu(self.dense(x.view(x.size(0), -1)))
return F.softmax(self.actionshead(x)), self.valuehead(x)
def _outdist(self, state):
"""Computes the probatility distribution of activating each output unit, given an input state"""
probs, _ = self(state.float().unsqueeze(0))
return Categorical(probs)
def select_action(self, state):
"""Selects an action following the policy
Returns the selected action and the log probabilities of that action being selected.
"""
m = self._outdist(state)
action = m.sample()
return action, m.log_prob(action)
def action_logprobs_value(self, state, action):
"""Returns the logprobabilities of performing a given action at given state under this policy
Also returns the value of the current state, for convenience.
"""
probs, value = self(state.float().unsqueeze(0))
m = Categorical(probs)
return m.log_prob(action), value
def value(self, state):
"""Estimates the value of the given state"""
_, value = self(state.float().unsqueeze(0))
return value
def entropy(self, state):
"""Returns the entropy of the policy for a given state"""
return torch.sum(self._outdist(state).entropy())
def runepisode(env, policy, episodesteps, render, windowlength=4):
"""Runs an episode under the given policy
Returns the episode history: an array of tuples in the form
(observation, processed observation, logprobabilities, action, reward, terminal)
"""
observation = env.reset()
x = prepro(observation)
statesqueue = deque([x for _ in range(windowlength)], maxlen=windowlength)
xbatch = np.stack(statesqueue, axis=0)
history = []
for _ in range(episodesteps):
if render:
env.render()
st = torch.tensor(xbatch).to(device)
action, p = policy.select_action(st)
newobservation, reward, done, info = env.step(action.tolist()[0])
history.append((observation, xbatch, p, action, reward, done))
if done:
break
observation = newobservation
x = prepro(observation)
statesqueue.append(x)
xbatch = np.stack(statesqueue, axis=0)
return history
def experiencegenerator(env, policy, episodesteps=None, render=False, windowlength=4, verbose=True):
"""Generates experience from the environment.
If the environment episode ends, it is resetted to continue acquiring experience.
Yields experiences as tuples in the form:
(observation, processed observation, logprobabilities, action, reward,
new observation, new processed observation, terminal)
"""
# Generate experiences indefinitely
episode = 0
totalsteps = 0
episoderewards = []
while True:
# Reinitialize environment
observation = env.reset()
x = prepro(observation)
statesqueue = deque([x for _ in range(windowlength)], maxlen=windowlength)
xbatch = np.stack(statesqueue, axis=0)
step = 0
# Steps
rewards = 0
while True:
if render:
env.render()
action, lp = policy.select_action(torch.tensor(xbatch).to(device))
action = int(action)
lp = float(lp)
newobservation, reward, done, info = env.step(action)
x = prepro(newobservation)
statesqueue.append(x)
newxbatch = np.stack(statesqueue, axis=0)
yield (observation, xbatch, lp, action, reward, newobservation, newxbatch, done)
rewards += reward
step += 1
if done or (episodesteps is not None and step >= episodesteps):
break
observation = newobservation
xbatch = newxbatch
totalsteps += step
episoderewards.append(rewards)
episode += 1
if verbose:
print(f"Episode {episode} end, {totalsteps} total steps performed. Reward {rewards:.2f}, "
f"100-episodes average reward {np.mean(episoderewards[-100:]):.2f}")
def loadnetwork(env, checkpoint, restart, game):
"""Loads the policy network from a checkpoint"""
if restart:
policy = Policy(env, game)
print("Restarted policy network from scratch")
else:
try:
policy = torch.load(checkpoint)
print(f"Resumed checkpoint {checkpoint}")
except:
policy = Policy(env, game)
print(f"Checkpoint {checkpoint} not found, created policy network from scratch")
policy.to(device)
return policy
def ppostep(policy, optimizer, states, actions, baseprobs, values, advantages, epscut, gradclip,
valuecoef, entcoef):
"""Performs a step of Proximal Policy Optimization
Arguments:
- policy: policy network to optimize.
- optimizer: pytorch optimizer algorithm to use.
- states: iterable of gathered experience states
- actions: iterable of performed actions in the states
- basepros: current base probabilities of performing those actions
- values: estimated values for each state
- advantages: estimated advantage values for those actions
- epscut: epsilon cut for policy gradient update
- gradclip: maximum norm for clipping gradients
- valuecoef: weight of value function loss
- entcoef: weight of entropy function loss
Returns the value of the losses computed in the optimization step.
Reference: https://arxiv.org/pdf/1707.06347.pdf
"""
optimizer.zero_grad()
# Compute action probabilities and state values for current network parameters
logprobs, newvalues = zip(*[policy.action_logprobs_value(st, ac) for st, ac in zip(states, actions)])
newvalues = torch.stack([x[0][0] for x in newvalues])
# Policy Gradients loss (advantages)
newprobs = [torch.exp(logp) for logp in logprobs]
probratios = [newprob / prob for prob, newprob in zip(baseprobs, newprobs)]
clippings = [torch.min(rt * adv, torch.clamp(rt, 1 - epscut, 1 + epscut) * adv)
for rt, adv in zip(probratios, advantages)]
pgloss = -torch.cat(clippings).mean()
# Entropy loss
entropyloss = - entcoef * torch.mean(torch.stack([policy.entropy(st) for st in states]))
# Value estimation loss
# (newvalue - [advantage + oldvalue])^2, that is, make new value closer to estimated error in old estimate
# A clipped version of the loss is also included
returns = advantages + values
valuelosses = (newvalues - returns) ** 2
newvalues_clipped = values + torch.clamp(newvalues - values, epscut)
valuelosses_clipped = (newvalues_clipped - returns) ** 2
valueloss = valuecoef * 0.5 * torch.mean(torch.max(valuelosses, valuelosses_clipped))
# Total loss
loss = pgloss + valueloss + entropyloss
# Optimizer step, with clipped gradients to a max norm to avoid exploding gradients
loss.backward()
torch.nn.utils.clip_grad_norm(policy.parameters(), gradclip)
optimizer.step()
return loss, pgloss, valueloss, entropyloss
def generalized_advantage_estimation(values, rewards, lastvalue, gamma, lam):
"""Computes a Generalized Advantage Estimator (GAE)
This estimator allows computing advantages for any state, even if the current episode
is unfinished. To do so, value function estimates are exploited.
Arguments:
- values: estimated values for each state
- rewards: iterable of obtained rewards for those states
- lastvalue: estimated value after all the steps above have been performed
- gamma: rewards discount parameter
- lam: GAE discount parameter.
For lam close to 1 we have high-variance low-bias estimates
For lam close to 0 we have low-variance high-bias estimates
Reference: https://arxiv.org/pdf/1506.02438.pdf
"""
allvalues = np.asarray(values.tolist() + [lastvalue])
deltas = rewards + gamma * allvalues[1:] - allvalues[:-1]
advantages = discount_rewards(deltas, gamma*lam)
# Normalize advantages
advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + eps)
return advantages.astype(np.float32)
def adjust_learning_rate(optimizer, lr):
"""Readjusts the learning rate of a given optimizer"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(game, state=None, render=False, checkpoint='policygradient.pt', episodesteps=10000, maxsteps=50000000,
restart=False, minibatchsize=128, nminibatches=32, optimizersteps=30, epscut_start=0.1, epscut_end=0,
gradclip=0.5, valuecoef=1, entcoef=0.01, gamma=0.99, lam=0.95, lr_start=0.00025,
lr_end=0, rewardscaling=1, pad_action=None):
"""Trains a policy network"""
env = make_env(game=game, state=state, rewardscaling=rewardscaling, pad_action=pad_action)
policy = loadnetwork(env, checkpoint, restart, game)
print(policy)
print("device: {}".format(device))
optimizer = optim.Adam(policy.parameters(), lr=lr_start)
expgen = experiencegenerator(env, policy, episodesteps=episodesteps, render=render)
totalsteps = 0
networkupdates = 0
while totalsteps < maxsteps:
# Annealings
epscut = np.interp(totalsteps, [0, maxsteps], [epscut_start, epscut_end])
lr = np.interp(totalsteps, [0, maxsteps], [lr_start, lr_end])
adjust_learning_rate(optimizer, lr)
# Gather experiences
samples = [next(expgen) for _ in range(minibatchsize*nminibatches)]
totalsteps += minibatchsize * nminibatches
_, states, logprobs, actions, rewards, _, newstates, terminals = zip(*samples)
probs = [np.exp(lp) for lp in logprobs]
values = torch.stack([policy.value(torch.tensor(st).to(device)).detach()[0][0] for st in states])
lastvalue = policy.value(torch.tensor(newstates[-1]).to(device)).detach()[0][0]
# Compute advantages
advantages = generalized_advantage_estimation(
values=values,
rewards=rewards,
lastvalue=lastvalue,
gamma=gamma,
lam=lam
)
advantages = torch.tensor(advantages).to(device)
print(f"Explored {minibatchsize*nminibatches} steps")
# Optimizer epochs
for optstep in range(optimizersteps):
losseshistory = []
# Random shuffle of experiences
idx = np.random.permutation(range(len(samples)))
# One step of SGD for each minibatch
for i in range(nminibatches):
batchidx = idx[i*minibatchsize:(i+1)*minibatchsize]
losses = ppostep(
policy=policy,
optimizer=optimizer,
states=[torch.tensor(states[i]).to(device) for i in batchidx],
actions=[torch.tensor(actions[i]).to(device) for i in batchidx],
baseprobs=[probs[i] for i in batchidx],
advantages=advantages[batchidx],
values=values[batchidx],
epscut=epscut,
gradclip=gradclip,
valuecoef=valuecoef,
entcoef=entcoef
)
losseshistory.append(losses)
loss, pgloss, valueloss, entropyloss = map(torch.stack, zip(*losseshistory))
print(f"Optimizer iteration {optstep+1}: loss {torch.mean(loss):.3f} (pg {torch.mean(pgloss):.3f} "
f"value {torch.mean(valueloss):.3f} entropy {torch.mean(entropyloss):.3f})")
del states, rewards, probs, actions, terminals
# Save policy network from time to time
networkupdates += 1
if not networkupdates % 10:
torch.save(policy, checkpoint)
# Save final model
torch.save(policy, checkpoint)
def test(game, state=None, render=False, checkpoint='policygradient.pt', saveanimations=False,
episodesteps=10000, pad_action=None):
"""Tests a previously trained network"""
env = make_env(game=game, state=state, pad_action=pad_action)
policy = loadnetwork(env, checkpoint, False, game)
print(policy)
print("device: {}".format(device))
episode = 0
episoderewards = []
while True:
# Run episode
history = runepisode(env, policy, episodesteps, render)
observations, states, _, _, rewards, _ = zip(*history)
episoderewards.append(np.sum(rewards))
print(f"Episode {episode} end, reward {np.sum(rewards)}. 5-episodes average reward "
f"{np.mean(episoderewards[-5:]):.0f}")
# Save animation (if requested)
if saveanimations:
envs.saveanimation(list(observations), f"{checkpoint}_episode{episode}.mp4")
envs.saveanimation([skimage.img_as_ubyte(color.gray2rgb(st[-1] + 0.5)) for st in states],
f"{checkpoint}_processed_episode{episode}.mp4")
episode += 1
del history, observations, states, rewards
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Agent that learns how to play a SNES game by using a simple Policy '
'Gradient method.')
parser.add_argument('game', type=str, help='Game to play. Must be a valid Gym Retro game')
parser.add_argument('state', type=str, help='State (level) of the game to play')
parser.add_argument('checkpoint', type=str, help='Checkpoint file in which to save learning progress')
parser.add_argument('--render', action='store_true', help='Render game while playing')
parser.add_argument('--saveanimations', action='store_true', help='Save mp4 files with playthroughs')
parser.add_argument('--test', action='store_true', help='Run in test mode (no policy updates)')
parser.add_argument('--restart', action='store_true', help='Ignore existing checkpoint file, restart from scratch')
parser.add_argument('--optimizersteps', type=int, default=30, help='Number of optimizer steps in each PPO update')
parser.add_argument('--episodesteps', type=int, default=10000, help='Max number of steps to run in each episode')
parser.add_argument('--maxsteps', type=int, default=50000000, help='Max number of training steps')
parser.add_argument('--rewardscaling', type=float, default=0.01, help='Scaling of rewards in training')
parser.add_argument('--padaction', type=int, default=None, help='Index of action used to pad skipped frames')
#TODO: maybe we are using too small batches? Check https://github.com/ray-project/ray/blob/master/examples/carla/train_ppo.py#L47
args = parser.parse_args()
if args.test:
test(args.game, args.state, render=args.render, saveanimations=args.saveanimations,
checkpoint=args.checkpoint, episodesteps=args.episodesteps, pad_action=args.padaction)
else:
train(args.game, args.state, render=args.render, checkpoint=args.checkpoint, restart=args.restart,
optimizersteps=args.optimizersteps, episodesteps=args.episodesteps, maxsteps=args.maxsteps,
rewardscaling=args.rewardscaling, pad_action=args.padaction)
| 2.5 | 2 |
utils/types.py | fomula91/todoapp | 4 | 12763292 | <reponame>fomula91/todoapp
# Annotaion(함수나 클래스의 인자값 또는 반환값의 형태를 알려주기 위해 타입을 지정하는 방법)을 위한 클래스
# bool 타입의 ok
class BooleanOk:
@staticmethod
def __type__():
return bool
# dict 타입의 uses
class DictionaryUser:
@staticmethod
def __type__():
fields = {
"_id": str,
"user_id": str,
"user_name": str,
"user_passwd": str
}
return fields
# dict 타입의 payload
class DictionaryPayload:
@staticmethod
def __type__():
fields = {
"user_id": str,
"user_name": str,
}
return fields
# string 타입의 token
class StringToken:
@staticmethod
def __type__():
return str
# string 타입의 user_id
class StringUserId:
@staticmethod
def __type__():
return str
# string 타입의 message
class StringMessage:
@staticmethod
def __type__():
return str
# string 타입의 objectId
class StringObjectId:
@staticmethod
def __type__():
return str
# list 타입의 words
class ArrayWords:
@staticmethod
def __type__():
return str
| 2.515625 | 3 |
algorithms/implementation/fair_rations.py | avenet/hackerrank | 0 | 12763293 | <filename>algorithms/implementation/fair_rations.py
def get_distribution_steps(distribution):
i = 0
distributed_loaves = 0
while i < len(distribution):
if distribution[i] % 2 == 0:
i += 1
continue
next_item_index = i + 1
if next_item_index == len(distribution):
return -1
distribution[next_item_index] += 1
distributed_loaves += 2
i += 1
return distributed_loaves
N = int(input().strip())
current_distribution = [
int(B_temp)
for B_temp
in input().strip().split(' ')
]
steps = get_distribution_steps(current_distribution)
print(steps if steps != -1 else 'NO')
| 3.765625 | 4 |
tests/som/test_utilities.py | ifsm/apollon | 0 | 12763294 | <gh_stars>0
import unittest
from hypothesis import strategies as hst
from hypothesis import given
import numpy as np
from scipy.spatial import distance
from apollon.som import utilities as asu
from apollon.types import SomDims
dimension = hst.integers(min_value=2, max_value=50)
som_dims = hst.tuples(dimension, dimension, dimension)
"""
class TestMatch(unittest.TestCase):
def setUp(self) -> None:
self.weights = np.random.rand(100, 5)
self.data = np.random.rand(200, 5)
def test_returns_tuple(self) -> None:
res = asu.match(self.weights, self.data, 2, 'euclidean')
self.assertIsInstance(res, tuple)
def test_elements_are_arrays(self) -> None:
bmu, err = asu.match(self.weights, self.data, 'euclidean')
self.assertIsInstance(bmu, np.ndarray)
self.assertIsInstance(err, np.ndarray)
def test_correct_ordering(self) -> None:
kth = 5
bmu, err = asu.match(self.weights, self.data, 'euclidean')
wdists = distance.cdist(self.weights, self.data)
kswd = wdists.sort(axis=0)[:kth, :]
"""
class TestDistribute(unittest.TestCase):
def setUp(self) -> None:
self.n_units = 400
self.bmu = np.random.randint(0, self.n_units, 100)
def returns_dict(self):
res = asu.distribute(self.bmu, self.n_units)
self.assertIsInstance(res, dict)
class TestSampleHist(unittest.TestCase):
def setUp(self) -> None:
pass
@given(som_dims)
def test_rows_are_stochastic(self, dims: SomDims) -> None:
weights = asu.sample_hist(dims)
comp =np.isclose(weights.sum(axis=1), 1)
self.assertTrue(comp.all())
class TestSamplePca(unittest.TestCase):
def setUp(self) -> None:
pass
@given(som_dims)
def test_x(self, dims: SomDims) -> None:
weights = asu.sample_pca(dims)
"""
class TestSelfOrganizingMap(unittest.TestCase):
def setUp(self):
self.weights = np.load('data/test_weights.npy')
self.inp = np.load('data/test_inp.npy')
def test_best_match_computation(self):
test_bmu = np.load('data/bmu_idx_euc.npy')
test_err = np.load('data/bmu_err_euc.npy')
bmu, err = utilities.best_match(self.weights, self.inp, 'euclidean')
self.assertTrue(np.array_equiv(test_bmu, bmu))
self.assertTrue(np.array_equiv(test_err, err))
"""
if __name__ == '__main__':
unittest.main()
| 2.296875 | 2 |
arch/cpu/arm/CMSIS/CMSIS/DSP/PythonWrapper/setup.py | Lkiraa/Contiki-ng | 41 | 12763295 | from distutils.core import setup, Extension
import glob
import numpy
import config
import sys
import os
from config import ROOT
includes = [os.path.join(ROOT,"Include"),os.path.join(ROOT,"PrivateInclude"),os.path.join("cmsisdsp_pkg","src")]
if sys.platform == 'win32':
cflags = ["-DWIN",config.cflags,"-DUNALIGNED_SUPPORT_DISABLE"]
# Custom because a customized arm_math.h is required to build on windows
# since the visual compiler and the win platform are
# not supported by default in arm_math.h
else:
cflags = ["-Wno-unused-variable","-Wno-implicit-function-declaration",config.cflags,"-D__GNUC_PYTHON__"]
transform = glob.glob(os.path.join(ROOT,"Source","TransformFunctions","*.c"))
#transform.remove(os.path.join(ROOT,"Source","TransformFunctions","arm_dct4_init_q15.c"))
#transform.remove(os.path.join(ROOT,"Source","TransformFunctions","arm_rfft_init_q15.c"))
transform.remove(os.path.join(ROOT,"Source","TransformFunctions","TransformFunctions.c"))
support = glob.glob(os.path.join(ROOT,"Source","SupportFunctions","*.c"))
support.remove(os.path.join(ROOT,"Source","SupportFunctions","SupportFunctions.c"))
fastmath = glob.glob(os.path.join(ROOT,"Source","FastMathFunctions","*.c"))
fastmath.remove(os.path.join(ROOT,"Source","FastMathFunctions","FastMathFunctions.c"))
filtering = glob.glob(os.path.join(ROOT,"Source","FilteringFunctions","*.c"))
filtering.remove(os.path.join(ROOT,"Source","FilteringFunctions","FilteringFunctions.c"))
matrix = glob.glob(os.path.join(ROOT,"Source","MatrixFunctions","*.c"))
matrix.remove(os.path.join(ROOT,"Source","MatrixFunctions","MatrixFunctions.c"))
statistics = glob.glob(os.path.join(ROOT,"Source","StatisticsFunctions","*.c"))
statistics.remove(os.path.join(ROOT,"Source","StatisticsFunctions","StatisticsFunctions.c"))
complexf = glob.glob(os.path.join(ROOT,"Source","ComplexMathFunctions","*.c"))
complexf.remove(os.path.join(ROOT,"Source","ComplexMathFunctions","ComplexMathFunctions.c"))
basic = glob.glob(os.path.join(ROOT,"Source","BasicMathFunctions","*.c"))
basic.remove(os.path.join(ROOT,"Source","BasicMathFunctions","BasicMathFunctions.c"))
controller = glob.glob(os.path.join(ROOT,"Source","ControllerFunctions","*.c"))
controller.remove(os.path.join(ROOT,"Source","ControllerFunctions","ControllerFunctions.c"))
common = glob.glob(os.path.join(ROOT,"Source","CommonTables","*.c"))
common.remove(os.path.join(ROOT,"Source","CommonTables","CommonTables.c"))
#modulesrc = glob.glob(os.path.join("cmsisdsp_pkg","src","*.c"))
modulesrc = []
modulesrc.append(os.path.join("cmsisdsp_pkg","src","cmsismodule.c"))
module1 = Extension(config.extensionName,
sources = (support
+ fastmath
+ filtering
+ matrix
+ statistics
+ complexf
+ basic
+ controller
+ transform
+ modulesrc
+ common
)
,
include_dirs = includes + [numpy.get_include()],
#extra_compile_args = ["-Wno-unused-variable","-Wno-implicit-function-declaration",config.cflags]
extra_compile_args = cflags
)
setup (name = config.setupName,
version = '0.0.1',
description = config.setupDescription,
ext_modules = [module1],
author = 'Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.',
url="https://github.com/ARM-software/CMSIS_5",
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
])
| 1.820313 | 2 |
tensorflow_privacy/privacy/keras_models/dp_keras_model_test.py | SoaringChicken/tensorflow-privacy | 0 | 12763296 | # Copyright 2021, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.keras_models import dp_keras_model
def get_data():
# Data is for hidden weights of [3, 1] and bias of 2.
# With mean squared loss, we expect loss = 15^2 = 225, gradients of
# weights = [90, 120], and gradient of bias = 30.
data = np.array([[3, 4]])
labels = np.matmul(data, [[3], [1]]) + 2
return data, labels
class DPKerasModelTest(tf.test.TestCase, parameterized.TestCase):
def testBaseline(self):
"""Tests that DPSequential works when DP-SGD has no effect."""
train_data, train_labels = get_data()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=1.0e9,
noise_multiplier=0.0,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss = tf.keras.losses.MeanSquaredError()
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=1)
model_weights = model.get_weights()
# Check parameters are as expected, taking into account the learning rate.
self.assertAllClose(model_weights[0], [[0.90], [1.20]])
self.assertAllClose(model_weights[1], [0.30])
@parameterized.named_parameters(
('l2_norm_clip 10.0', 10.0),
('l2_norm_clip 40.0', 40.0),
('l2_norm_clip 200.0', 200.0),
)
def testClippingNorm(self, l2_norm_clip):
"""Tests that clipping norm works."""
train_data, train_labels = get_data()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=0.0,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
learning_rate = 0.01
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=1)
model_weights = model.get_weights()
unclipped_gradient = np.sqrt(90**2 + 120**2 + 30**2)
scale = min(1.0, l2_norm_clip / unclipped_gradient)
expected_weights = np.array([[90], [120]]) * scale * learning_rate
expected_bias = np.array([30]) * scale * learning_rate
# Check parameters are as expected, taking into account the learning rate.
self.assertAllClose(model_weights[0], expected_weights)
self.assertAllClose(model_weights[1], expected_bias)
def _compute_expected_gradients(self, data, labels, w, l2_norm_clip,
num_microbatches):
batch_size = data.shape[0]
if num_microbatches is None:
num_microbatches = batch_size
preds = np.matmul(data, w)
grads = 2 * data * (labels - preds)[:, np.newaxis]
grads = np.reshape(grads,
[num_microbatches, batch_size // num_microbatches, -1])
mb_grads = np.mean(grads, axis=1)
mb_grad_norms = np.linalg.norm(mb_grads, axis=1)
scale = np.minimum(l2_norm_clip / mb_grad_norms, 1.0)
mb_grads = mb_grads * scale[:, np.newaxis]
final_grads = np.mean(mb_grads, axis=0)
return final_grads
@parameterized.named_parameters(
('mb_test 0', 1.0, None),
('mb_test 1', 1.0, 1),
('mb_test 2', 1.0, 2),
('mb_test 4', 1.0, 4),
)
def testMicrobatches(self, l2_norm_clip, num_microbatches):
train_data = np.array([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])
w = np.zeros((2))
train_labels = np.array([1.0, 3.0, -2.0, -4.0])
learning_rate = 1.0
expected_grads = self._compute_expected_gradients(train_data, train_labels,
w, l2_norm_clip,
num_microbatches)
expected_weights = np.squeeze(learning_rate * expected_grads)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=4, shuffle=False)
model_weights = np.squeeze(model.get_weights())
self.assertAllClose(model_weights, expected_weights)
@parameterized.named_parameters(
('noise_multiplier 3 2 1', 3.0, 2.0, 1),
('noise_multiplier 5 4 1', 5.0, 4.0, 1),
('noise_multiplier 3 2 2', 3.0, 2.0, 2),
('noise_multiplier 5 4 2', 5.0, 4.0, 2),
('noise_multiplier 3 2 4', 3.0, 2.0, 4),
('noise_multiplier 5 4 4', 5.0, 4.0, 4),
)
def testNoiseMultiplier(self, l2_norm_clip, noise_multiplier,
num_microbatches):
# The idea behind this test is to start with a model whose parameters
# are set to zero. We then run one step of a model that produces
# an un-noised gradient of zero, and then compute the standard deviation
# of the resulting weights to see if it matches the expected standard
# deviation.
# Data is one example of length 1000, set to zero, with label zero.
train_data = np.zeros((4, 1000))
train_labels = np.array([0.0, 0.0, 0.0, 0.0])
learning_rate = 1.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(1000,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=4)
model_weights = model.get_weights()
measured_std = np.std(model_weights[0])
expected_std = l2_norm_clip * noise_multiplier / num_microbatches
# Test standard deviation is close to l2_norm_clip * noise_multiplier.
self.assertNear(measured_std, expected_std, 0.1 * expected_std)
# Simple check to make sure dimensions are correct when output has
# dimension > 1.
@parameterized.named_parameters(
('mb_test None 1', None, 1),
('mb_test 1 2', 1, 2),
('mb_test 2 2', 2, 2),
('mb_test 4 4', 4, 4),
)
def testMultiDimensionalOutput(self, num_microbatches, output_dimension):
train_data = np.array([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])
train_labels = np.array([0, 1, 1, 0])
learning_rate = 1.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model = dp_keras_model.DPSequential(
l2_norm_clip=1.0e9,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
output_dimension, use_bias=False, kernel_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss_fn)
model.fit(train_data, train_labels, epochs=1, batch_size=4, shuffle=False)
# Checks that calls to earlier API using `use_xla` as a positional argument
# raise an exception.
@parameterized.named_parameters(
('earlier API True', True),
('earlier API False', False),
)
def testEarlierAPIFails(self, use_xla):
with self.assertRaises(ValueError):
_ = dp_keras_model.DPSequential(
1.0e9,
0.0,
use_xla,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
2, use_bias=False, kernel_initializer='zeros')
])
if __name__ == '__main__':
tf.test.main()
| 2.65625 | 3 |
transifex-data-fetcher.py | dhis2/translation-linker | 0 | 12763297 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: philld
"""
import os
import json
from transifex.api import transifex_api
print(os.getenv("PWD"))
transifex_api.setup(auth=os.getenv("TX_TOKEN"))
organization = transifex_api.Organization.get(slug="hisp-uio")
projects = organization.fetch('projects')
langs = set([])
tr = {}
lang_stats = {}
lang_statsall = {}
word_stats = {}
projmap = {}
versions = ("29","30","31","32","33","34","35","36","37","master")
ft = open('data/feature-toggling.json',)
togglers = json.load(ft)
ft.close()
langmap = {}
for l in transifex_api.Language.all():
langmap[l.code] = l.name
for p in projects:
if p.name[0:4] in ("APP:","APP-"):
projmap[p.name] = p.slug
print(p.name)
resources = p.fetch('resources')
for r in resources:
# print('\t',r["name"], "[", r["slug"],"]")
r_slug = r.attributes['slug']
base_version = r_slug.split('--')[0].replace('2-','').replace('v','').replace('-x','')
# print(r["slug"],' ---> ',version)
version_list = [base_version]
if base_version == 'master':
if p.homepage_url in togglers:
version_list += togglers[p.homepage_url]
for version in version_list:
if version in versions:
if version not in lang_stats:
lang_stats[version] = {}
lang_statsall[version] = {}
for s in transifex_api.ResourceLanguageStats.filter(project=p, resource=r):
language = s.language.id.split(':')[1]
trans = s.attributes['translated_strings']
tot = s.attributes['total_strings']
if language in lang_stats[version]:
lang_stats[version][language] = lang_stats[version][language] + s.attributes['translated_strings']
else:
lang_stats[version][language] = s.attributes['translated_strings']
if p.name not in lang_statsall[version]:
lang_statsall[version][p.name] = {}
if r_slug not in lang_statsall[version][p.name]:
lang_statsall[version][p.name][r_slug] = {}
if tot == 0:
lang_statsall[version][p.name][r_slug][language] = "0%"
else:
lang_statsall[version][p.name][r_slug][language] = f"{trans/tot:.1%}"
mylangs = lang_stats["master"]
lango = {}
for l in mylangs:
name = langmap[l]
lango[l] = name
mysortedLangs = {k: v for k, v in sorted(mylangs.items(), key=lambda item: item[1],reverse=True)}
langos = {k: v for k, v in sorted(lango.items(), key=lambda item: item[1],reverse=False)}
stats = {"versions": versions, "overview" : lang_stats,"details":lang_statsall,"languages": langos, "projects":projmap }
f = open("./data/transifex.json","w")
f.write("transifex = "+json.dumps(stats,indent=2)+";")
f.close()
| 1.984375 | 2 |
eta/modules/video_stream_info.py | saiprakash-c/504 | 0 | 12763298 | #!/usr/bin/env python
'''
A module for getting the stream info for a video.
Info:
type: eta.core.types.Module
version: 0.1.0
Copyright 2017-2018, Voxel51, LLC
voxel51.com
<NAME>, <EMAIL>
'''
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
import logging
import sys
from eta.core.config import Config
import eta.core.module as etam
import eta.core.video as etav
logger = logging.getLogger(__name__)
class VideoStreamInfoConfig(etam.BaseModuleConfig):
'''Video stream info configuration settings.
Attributes:
data (DataConfig)
'''
def __init__(self, d):
super(VideoStreamInfoConfig, self).__init__(d)
self.data = self.parse_object_array(d, "data", DataConfig)
class DataConfig(Config):
'''Data configuration settings.
Inputs:
video (eta.core.types.Video): The input video
Outputs:
stream_info (eta.core.types.VideoStreamInfo): The video stream info
'''
def __init__(self, d):
self.video = self.parse_string(d, "video")
self.stream_info = self.parse_string(d, "stream_info")
def _get_stream_info(stream_info_config):
for data_config in stream_info_config.data:
logger.info("Reading stream info for %s", data_config.video)
vsi = etav.VideoStreamInfo.build_for(data_config.video)
vsi.write_json(data_config.stream_info)
def run(config_path, pipeline_config_path=None):
'''Run the video_stream_info module.
Args:
config_path: path to a VideoStreamInfoConfig file
pipeline_config_path: optional path to a PipelineConfig file
'''
stream_info_config = VideoStreamInfoConfig.from_json(config_path)
etam.setup(stream_info_config, pipeline_config_path=pipeline_config_path)
_get_stream_info(stream_info_config)
if __name__ == "__main__":
run(*sys.argv[1:])
| 2.359375 | 2 |
src/extensions/cleanup/cleanup.py | mikeshardmind/salamander | 2 | 12763299 | # Copyright 2020-present <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import asyncio
import logging
from datetime import datetime, timedelta, timezone
import discord
from discord.ext import commands
from ...bot import Salamander, SalamanderContext, UserFeedbackError
from ...checks import admin, mod_or_perms
from ...utils import Waterfall
from ...utils.parsing import parse_positive_number, parse_snowflake
log = logging.getLogger("salamander.extensions.cleanup")
class Cleanup(commands.Cog):
"""Quick message cleanup"""
@commands.max_concurrency(1, commands.BucketType.guild)
@commands.bot_has_guild_permissions(manage_messages=True, read_message_history=True)
@admin()
@commands.command()
async def removegone(self, ctx: SalamanderContext):
"""
Removes messages from those who can no longer see the channel
Can be used if handling deletion requests for privacy reasons
Is intentionally very slow, limited to admins only, and can only run one at a time
"""
assert not isinstance(ctx.channel, (discord.DMChannel, discord.PartialMessageable, discord.GroupChannel))
if not await ctx.yes_or_no(
"Are you sure you want to remove all messages from any user who cannot see this channel? (yes, no)",
delete_on_return=True,
):
return
informational = await ctx.send("This may take a while, I'll inform you when it is done.")
lock = asyncio.Lock()
async def safe_slow_delete(msgs):
async with lock:
if msgs:
if len(msgs) == 1:
try:
await msgs[0].delete()
except discord.NotFound:
pass
# some wiggle room included
cutoff = datetime.now(timezone.utc) - timedelta(days=13, hours=22)
mass_deletable = []
for m in msgs:
if m.created_at > cutoff:
mass_deletable.append(m)
else:
try:
await m.delete()
except discord.NotFound:
pass
await asyncio.sleep(2)
if mass_deletable:
assert not isinstance(
ctx.channel, (discord.DMChannel, discord.PartialMessageable, discord.GroupChannel)
)
await ctx.channel.delete_messages(mass_deletable)
await asyncio.sleep(1)
waterfall = Waterfall(12, 100, safe_slow_delete)
try:
waterfall.start()
member_ids = {m.id for m in ctx.channel.members}
async for msg in ctx.history(limit=None, before=informational):
# artificial delay to avoid saturating ratelimits for something allowed to be a slow process
# This one takes a hit once every 100 messages under the hood, making this ~ 8s/100m
await asyncio.sleep(0.08)
if msg.author.id not in member_ids:
waterfall.put(msg)
except Exception as exc:
log.exception("Error during removegone", exc_info=exc)
await waterfall.stop(wait=True)
await ctx.send(
f"{ctx.author.mention} something went wrong during the "
"message removal process. The error has been logged.",
allowed_mentions=discord.AllowedMentions(users=[ctx.author]),
)
else:
await waterfall.stop(wait=True)
await ctx.send(
f"{ctx.author.mention} The message removal process has finished.",
allowed_mentions=discord.AllowedMentions(users=[ctx.author]),
)
@removegone.error
async def concurrency_fail(self, ctx: SalamanderContext, exc: commands.CommandError):
if isinstance(exc, commands.MaxConcurrencyReached):
await ctx.send("That command is already running for a channel in this server.")
@commands.bot_has_guild_permissions(manage_messages=True, read_message_history=True)
@mod_or_perms(manage_messages=True)
@commands.group()
async def cleanup(self, ctx: SalamanderContext):
"""Message cleanup tools"""
if ctx.invoked_subcommand is None:
await ctx.send_help()
@cleanup.command(name="number")
async def cleanup_number(self, ctx: SalamanderContext, number):
"""Cleanup some number of messages within the last 10 days."""
limit = parse_positive_number(number, 1e7)
if not limit:
raise UserFeedbackError(custom_message="You must provide a positive number of 1 million or less.")
if limit > 100:
if not await ctx.yes_or_no(
f"Are you sure you want to delete up to {limit} messages?",
delete_on_return=True,
):
return
await self._cleanup(ctx, limit=limit)
@cleanup.command(name="before")
async def cleanup_before(self, ctx: SalamanderContext, before):
"""Cleanup messages before a specific message ID within the last 10 days."""
snowflake = parse_snowflake(before)
if not snowflake:
raise UserFeedbackError(custom_message="That did not look like a valid message ID.")
before_obj = discord.Object(id=snowflake)
if before_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="This message is older than the 10 day cutoff.")
if not await ctx.yes_or_no(
"Are you sure you want to delete all the messages before this ID within the last 10 days?",
delete_on_return=True,
):
return
await self._cleanup(ctx, before=before_obj)
@cleanup.command(name="after")
async def cleanup_after(self, ctx: SalamanderContext, after):
"""Cleanup all messages after a specific message ID within the last 10 days."""
snowflake = parse_snowflake(after)
if not snowflake:
raise UserFeedbackError(custom_message="That did not look like a valid message ID.")
after_obj = discord.Object(id=snowflake)
if after_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="This message is older than the 10 day cutoff.")
if not await ctx.yes_or_no(
"Are you sure you want to delete all the messages after the provided message ID?",
delete_on_return=True,
):
return
await self._cleanup(ctx, after=after_obj)
@cleanup.command(name="between")
async def cleanup_between(self, ctx: SalamanderContext, first, second):
"""
Cleanup messages between two provided message IDs within the last 10 days.
"""
snowflake = parse_snowflake(first)
if not snowflake:
raise UserFeedbackError(custom_message="The first provided ID did not look like a valid message ID.")
first_obj = discord.Object(id=snowflake)
if first_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="The first provided message ID is older than the 10 day cutoff.")
snowflake = parse_snowflake(first)
if not snowflake:
raise UserFeedbackError(custom_message="The second provided ID did not look like a valid message ID.")
second_obj = discord.Object(id=snowflake)
if second_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="The second provided message ID is older than the 10 day cutoff.")
if second.obj.created_at < first_obj.created_at:
raise UserFeedbackError(
custom_message="The first message ID provided should be the earlier one. (Not continuing in case of accidental misuse.)"
)
if not await ctx.yes_or_no(
"Are you sure you want to delete all the messages between the provided message IDs?",
delete_on_return=True,
):
return
await self._cleanup(ctx, before=second_obj, after=first_obj)
async def _cleanup(
self,
ctx: SalamanderContext,
*,
limit: int | None = None,
before: discord.Message | discord.Object | None = None,
after: discord.Message | discord.Object | None = None,
):
assert not isinstance(ctx.channel, (discord.DMChannel, discord.PartialMessageable, discord.GroupChannel))
# I think waterfall use might make sense here? IDK --Liz
# Maybe, but I get the feeling it won't feel responsive enough. -- Sinbad
to_delete = [ctx.message]
before = before or ctx.message
cutoff = after.created_at if after else ctx.message.created_at - timedelta(days=10)
# Don't use after param, changes API behavior. Can add oldest_first=False,
# but this will increase the needed underlying api calls.
async for message in ctx.history(limit=limit, before=before):
if message.created_at < cutoff:
break
if not message.pinned:
to_delete.append(message)
if len(to_delete) == 100:
await ctx.channel.delete_messages(to_delete)
to_delete = []
if to_delete:
if len(to_delete) == 1:
# Why does discord's API care about this?
await to_delete[0].delete()
else:
await ctx.channel.delete_messages(to_delete)
| 2.015625 | 2 |
bastion_ssh/plugins/ssh/ssh_transport.py | wcc526/bastion-ssh | 13 | 12763300 | <filename>bastion_ssh/plugins/ssh/ssh_transport.py<gh_stars>10-100
import paramiko
import socket
import time
import logging
from paramiko.ssh_exception import SSHException
import bastion_ssh.errors
LOG = logging.getLogger(__name__)
class SSHTransport(object):
""" This object wraps a shell in yet another shell. When the shell is
switched into "simulate" mode it can just print what would be done. """
def __init__(self, context):
self.context = context
self.connection_attempts = 20
self.transport = None
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for tries in range(self.connection_attempts):
try:
self.client.connect(hostname=self.context.hostname,
username=self.context.username,
port=self.context.port,
allow_agent=True,
look_for_keys=True)
break
except paramiko.ssh_exception.AuthenticationException, paramiko.ssh_exception.SSHException:
LOG.warning("Unable to authenticate with remote server")
raise bastion_ssh.errors.ConnectionError(
"Unable to authenticate with remote server")
except (socket.error, EOFError):
LOG.warning("connection refused. retrying.")
time.sleep(tries + 1)
else:
self.client.close()
raise bastion_ssh.errors.ConnectionError(
"Connection refused %d times, giving up." % self.connection_attempts)
self.transport = self.client.get_transport()
self.transport.set_keepalive(30)
self.transport.use_compression(True)
def __del__(self):
self.client.close()
| 2.546875 | 3 |
python/scripts/create_synology_package.py | olefriis/simplepvr | 5 | 12763301 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from shutil import copytree, make_archive
import os
import sys
import shutil
def main(argv=None):
syno_package_dir = "syno_package"
build_dir = os.path.abspath(os.path.join(os.path.curdir, syno_package_dir))
os.mkdir(build_dir)
create_info(build_dir)
copy_scripts(build_dir)
download_src(build_dir)
download_requirements(build_dir)
package_dist(build_dir)
print "Done"
def create_info(target_dir):
info_file = target_dir+"/INFO"
with open(info_file, 'w') as f:
f.write('package="SimplePVR"' + os.linesep)
f.write('version="0.0.1"' + os.linesep)
f.write('maintainer="<NAME> <<EMAIL>>"' + os.linesep)
f.write('description="SimplePVR"' + os.linesep)
f.write('arch="noarch"' + os.linesep)
def copy_scripts(target_dir):
scripts_dir_name = "scripts"
templates_dir = "templates/" + scripts_dir_name
copytree(templates_dir, target_dir+"/"+scripts_dir_name)
def download_src(target_dir):
from subprocess import PIPE, call, check_call, check_output
src_download_dir = os.path.join(target_dir, "source")
shutil.rmtree(src_download_dir, ignore_errors=True)
os.makedirs(src_download_dir)
current_dir = os.curdir
os.chdir(src_download_dir)
check_call("git clone git://github.com/olefriis/simplepvr.git", shell=True)
os.chdir(current_dir)
def download_requirements(target_dir):
from subprocess import PIPE, call, check_call, check_output
from psutil import Popen
from datetime import datetime
import time
dependencies_dir = os.path.join(target_dir, "dependencies")
os.makedirs(dependencies_dir)
requirements_file = os.path.join("simplepvr", "python", "simplepvr", "requirements.txt" )
command = "pip install --download-cache={} --ignore-installed --no-install -r {}".format(dependencies_dir, requirements_file)
src_dep_build_dir = os.path.join(target_dir, "source", "build")
if not os.path.exists(src_dep_build_dir):
os.makedirs(src_dep_build_dir)
print "Executing '{}'".format(command)
start = datetime.now()
#proc = Popen(command, close_fds=True, shell=True, stdout=sys.stdout, stderr=sys.stderr)
check_output(command, close_fds=True, shell=True)
print "pip executed in ",(datetime.now() - start).total_seconds(), " seconds"
# while proc.is_running():
# print "Running for ", (datetime.now() - start).total_seconds(), " seconds"
# time.sleep(1)
for f in os.listdir(src_dep_build_dir):
if os.path.isfile(f):
print "Deleting file: ", f
if os.path.isdir(f):
print "Dir: ", f
# os.unlink(f)
def package_dist(target_dir):
print "TODO - implement packaging of src + build-deps"
dist_dir = os.path.join(target_dir, "tar_this")
#shutil.rmtree(dist_dir, ignore_errors=True)
source_dir = os.path.join(target_dir, "source", "simplepvr", "python")
shutil.copytree(source_dir, dist_dir)
raise Exception("TODO - implement packaging of src + build-deps")
if __name__ == "__main__":
sys.exit(main())
| 2.296875 | 2 |
fungit/commands/module/file.py | zlj-zz/pyzgit | 0 | 12763302 | class File:
def __init__(
self,
name: str,
display_str: str,
short_status: str,
has_staged_change: bool,
has_unstaged_change: bool,
tracked: bool,
deleted: bool,
added: bool,
has_merged_conflicts: bool,
has_inline_merged_conflicts: bool,
) -> None:
self.name = name
self.display_str = display_str
self.short_status = short_status
self.has_staged_change = has_staged_change
self.has_unstaged_change = has_unstaged_change
self.tracked = tracked
self.deleted = deleted
self.added = added
self.has_merged_conflicts = has_merged_conflicts
self.has_inline_merged_conflicts = has_inline_merged_conflicts
| 2.375 | 2 |
mythril/laser/smt/solver/independence_solver.py | RainOfPhone/mythril | 1 | 12763303 | import z3
from mythril.laser.smt.model import Model
from mythril.laser.smt.bool import Bool
from mythril.laser.smt.solver.solver_statistics import stat_smt_query
from typing import Set, Tuple, Dict, List, cast
def _get_expr_variables(expression: z3.ExprRef) -> List[z3.ExprRef]:
"""
Gets the variables that make up the current expression
:param expression:
:return:
"""
result = []
if not expression.children() and not isinstance(expression, z3.BitVecNumRef):
result.append(expression)
for child in expression.children():
c_children = _get_expr_variables(child)
result.extend(c_children)
return result
class DependenceBucket:
"""Bucket object to contain a set of conditions that are dependent on each other"""
def __init__(self, variables=None, conditions=None):
"""
Initializes a DependenceBucket object
:param variables: Variables contained in the conditions
:param conditions: The conditions that are dependent on each other
"""
self.variables = variables or [] # type: List[z3.ExprRef]
self.conditions = conditions or [] # type: List[z3.ExprRef]
class DependenceMap:
"""DependenceMap object that maintains a set of dependence buckets, used to separate independent smt queries"""
def __init__(self):
"""Initializes a DependenceMap object"""
self.buckets = [] # type: List[DependenceBucket]
self.variable_map = {} # type: Dict[str, DependenceBucket]
def add_condition(self, condition: z3.BoolRef) -> None:
"""
Add condition to the dependence map
:param condition: The condition that is to be added to the dependence map
"""
variables = set(_get_expr_variables(condition))
relevant_buckets = set()
for variable in variables:
try:
bucket = self.variable_map[str(variable)]
relevant_buckets.add(bucket)
except KeyError:
continue
new_bucket = DependenceBucket(variables, [condition])
self.buckets.append(new_bucket)
if relevant_buckets:
# Merge buckets, and rewrite variable map accordingly
relevant_buckets.add(new_bucket)
new_bucket = self._merge_buckets(relevant_buckets)
for variable in new_bucket.variables:
self.variable_map[str(variable)] = new_bucket
def _merge_buckets(self, bucket_list: Set[DependenceBucket]) -> DependenceBucket:
"""Merges the buckets in bucket list"""
variables = [] # type: List[str]
conditions = [] # type: List[z3.BoolRef]
for bucket in bucket_list:
self.buckets.remove(bucket)
variables += bucket.variables
conditions += bucket.conditions
new_bucket = DependenceBucket(variables, conditions)
self.buckets.append(new_bucket)
return new_bucket
class IndependenceSolver:
"""An SMT solver object that uses independence optimization"""
def __init__(self):
""""""
self.raw = z3.Solver()
self.constraints = []
self.models = []
def set_timeout(self, timeout: int) -> None:
"""Sets the timeout that will be used by this solver, timeout is in
milliseconds.
:param timeout:
"""
self.raw.set(timeout=timeout)
def add(self, *constraints: Tuple[Bool]) -> None:
"""Adds the constraints to this solver.
:param constraints: constraints to add
"""
raw_constraints = [
c.raw for c in cast(Tuple[Bool], constraints)
] # type: List[z3.BoolRef]
self.constraints.extend(raw_constraints)
def append(self, *constraints: Tuple[Bool]) -> None:
"""Adds the constraints to this solver.
:param constraints: constraints to add
"""
raw_constraints = [
c.raw for c in cast(Tuple[Bool], constraints)
] # type: List[z3.BoolRef]
self.constraints.extend(raw_constraints)
@stat_smt_query
def check(self) -> z3.CheckSatResult:
"""Returns z3 smt check result."""
dependence_map = DependenceMap()
for constraint in self.constraints:
dependence_map.add_condition(constraint)
self.models = []
for bucket in dependence_map.buckets:
self.raw.reset()
self.raw.append(*bucket.conditions)
check_result = self.raw.check()
if check_result == z3.sat:
self.models.append(self.raw.model())
else:
return check_result
return z3.sat
def model(self) -> Model:
"""Returns z3 model for a solution."""
return Model(self.models)
def reset(self) -> None:
"""Reset this solver."""
self.constraints = []
def pop(self, num) -> None:
"""Pop num constraints from this solver."""
self.constraints.pop(num)
| 2.375 | 2 |
2020/08/08.py | jackcogdill/Advent-of-Code-2019 | 0 | 12763304 | <reponame>jackcogdill/Advent-of-Code-2019
with open('input') as f:
instructions = []
for line in f:
op, arg = line.split()
instructions.append((op, int(arg)))
# Part 1
executed = [False] * len(instructions)
accumulator = 0
i = 0
while i < len(instructions):
if executed[i]: break
executed[i] = True
op, n = instructions[i]
if op == 'acc':
accumulator += n
i += 1
elif op == 'jmp':
i += n
elif op == 'nop':
i += 1
else:
print('invalid operator')
exit(1)
print(accumulator)
# Part 2
mod = ['jmp', 'nop']
def fix(j):
executed = [False] * len(instructions)
accumulator = 0
i = 0
while i < len(instructions):
if executed[i]:
return False
executed[i] = True
op, n = instructions[i]
if i == j:
op = mod[mod.index(op) ^ 1] # Swap
if op == 'acc':
accumulator += n
i += 1
elif op == 'jmp':
i += n
elif op == 'nop':
i += 1
else:
print('invalid operator')
exit(1)
return accumulator
for i, (op, _) in enumerate(instructions):
if op in mod:
res = fix(i)
if res != False:
print(res)
break
| 3.65625 | 4 |
tunnel/config.py | zrthxn/tunnel-worker | 0 | 12763305 | <gh_stars>0
from typing import List
from yaml import load, loader
from os import environ
from satellite import Satellite
def build_satellites() -> List[Satellite]:
config_path = environ.get("CONFIG_FILE")
if config_path != None:
with open(config_path) as f:
config = load(f, Loader=loader.SafeLoader)
satellites = config["satellites"]
built = []
for sat in satellites.keys():
dic = satellites[sat]
built.append(
Satellite(
REMOTE_USER=key_or_none(dic, "REMOTE_USER"),
REMOTE_HOST=key_or_none(dic, "REMOTE_HOST"),
ACCESS_PORT=key_or_none(dic, "ACCESS_PORT"),
SSHKEY_FILE=key_or_none(dic, "SSHKEY_FILE"),
TUNNEL_PORT=key_or_none(dic, "TUNNEL_PORT"),
TUNNEL_MODE=key_or_none(dic, "TUNNEL_MODE"),
TARGET_HOST=key_or_none(dic, "TARGET_HOST"),
TARGET_PORT=key_or_none(dic, "TARGET_PORT"),
REMOTE_PING_PORT=key_or_none(dic, "REMOTE_PING_PORT"),
REMOTE_PING_VERB=key_or_none(dic, "REMOTE_PING_VERB"),
FAIL_STATUS=key_or_none(dic, "FAIL_STATUS"),
)
)
return built
elif environ.get("REMOTE_USER") != None:
return [
Satellite(
REMOTE_USER=environ.get("REMOTE_USER"),
REMOTE_HOST=environ.get("REMOTE_HOST"),
ACCESS_PORT=environ.get("ACCESS_PORT"),
SSHKEY_FILE=environ.get("SSHKEY_FILE"),
TUNNEL_PORT=environ.get("TUNNEL_PORT"),
TUNNEL_MODE=environ.get("TUNNEL_MODE"),
TARGET_HOST=environ.get("TARGET_HOST"),
TARGET_PORT=environ.get("TARGET_PORT"),
REMOTE_PING_PORT=environ.get("REMOTE_PING_PORT"),
REMOTE_PING_VERB=environ.get("REMOTE_PING_VERB"),
FAIL_STATUS=environ.get("FAIL_STATUS"),
)
]
else:
return []
def key_or_none(dict: dict, key: str):
try:
return dict[key]
except KeyError:
return None | 2.546875 | 3 |
alipay/aop/api/response/ZolozIdentificationUserWebQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12763306 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZolozIdentificationUserWebQueryResponse(AlipayResponse):
def __init__(self):
super(ZolozIdentificationUserWebQueryResponse, self).__init__()
self._extern_info = None
@property
def extern_info(self):
return self._extern_info
@extern_info.setter
def extern_info(self, value):
self._extern_info = value
def parse_response_content(self, response_content):
response = super(ZolozIdentificationUserWebQueryResponse, self).parse_response_content(response_content)
if 'extern_info' in response:
self.extern_info = response['extern_info']
| 2.03125 | 2 |
tests/test_route.py | kr41/TraversalKit | 4 | 12763307 | <gh_stars>1-10
import re
from traversalkit.route import Node, Route
def test_node():
node = Node(object, name='foo')
assert str(node) == 'foo'
assert repr(node) == '<Node: foo>'
assert node.type == 'single'
node = Node(object, metaname='foo')
assert str(node) == '{foo}'
assert repr(node) == '<Node: {foo}>'
assert node.type == 'set'
node = Node(object, pattern=re.compile('.*'))
assert str(node) == '{.*}'
assert repr(node) == '<Node: {.*}>'
assert node.type == 'set'
node = Node(object)
assert str(node) == '*'
assert repr(node) == '<Node: *>'
assert node.type == 'set'
def test_path():
path = Route()
assert path.uri == '*'
assert repr(path) == '<Route: *>'
assert len(path) == 0
path += Node(object, name='')
assert path.uri == '/'
assert repr(path) == '<Route: />'
assert len(path) == 1
path += [Node(object, name='foo'), Node(object, metaname='bar')]
assert path.uri == '/foo/{bar}/'
assert repr(path) == '<Route: /foo/{bar}/>'
assert len(path) == 3
| 2.953125 | 3 |
third-party/gtd/gtd/ml/tests/test_vocab.py | timpowellgit/phrasenode | 81 | 12763308 | import numpy as np
import pytest
from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings
@pytest.fixture
def vocab():
return SimpleVocab(['a', 'b', 'c'])
@pytest.fixture
def embeds(vocab):
array = np.eye(len(vocab))
return SimpleEmbeddings(array, vocab)
class TestSimpleVocab(object):
def test_save_load(self, vocab, tmpdir):
path = str(tmpdir.join('vocab.txt'))
vocab.save(path)
new_vocab = SimpleVocab.load(path)
assert vocab == new_vocab | 2.390625 | 2 |
Test/assi_test.py | shemaaali/lambdata-shimadaoud-ds18 | 0 | 12763309 | # Import a library related to my test called unittest
import unittest
from pandas import DataFrame
from lambdata.assi import add_state_names_column
class TestAssi(unittest.TestCase):
def test_assi(self):
df = DataFrame({"abbrev": ["CA", "CO", "CT", "DC", "TX"]})
self.assertEqual(len(df.columns), 1)
self.assertEqual(list(df.columns), ['abbrev'])
self.assertEqual(df.iloc[3]["abbrev"],'DC')
result_map = add_state_names_column(df)
self.assertEqual(len(result_map.columns), 2)
self.assertEqual(list(result_map.columns), ['abbrev'])
self.assertEqual(result_map.iloc[3]["abbrev"], 'DC')
self.assertEqual(result_map.iloc[3]["name"], 'Washington')
if __name__ == '__main__':
unittest.main() | 3.125 | 3 |
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/image_creation_in_directories.py | akash519-gif/Handwritten-letter-detection. | 0 | 12763310 | """
Here I am going to convert array to image from it's pixel value and put those images in their respective directory for
both in train and test set.
train set -------> [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z]
test set -------> [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z]
"""
# Import required packages
import os
import numpy as np
import cv2
word_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L',
12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X',
24: 'Y', 25: 'Z'}
def test_images_creation():
# Open file of test.csv in read mode
file = open('test.csv', 'r')
count = 0
labels = []
# Directory where test image save
parent_dir = os.path.join(os.getcwd(), 'test')
while True:
# read line of file
line = file.readline()
# Break if line not found
if not line:
break
# Split line on ',' and create list of row values
row = line.split(',')
# extract label and pixel value from row
# label = str(row[0]) --orignal
lab_num = int(row[0])
label = word_dict.get(lab_num)
pixel = row[1:]
# Convert pixel in numpy array of 28 x 28
pixel = np.asarray(pixel, dtype=np.uint8).reshape((28, 28, 1))
# join path of directories
path = os.path.join(parent_dir, label)
# count line number and use with image name
count += 1
# list of contents(directory and file both) in directory
labels = os.listdir(parent_dir)
if label in labels:
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - not created directory only image add")
else:
try:
os.mkdir(path)
except OSError as error:
print(error)
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - created directory and image add")
file.close()
test_images_creation()
def train_images_creation():
# Open file of train.csv in read mode
file = open('train.csv', 'r')
count = 0
labels = []
# Directory where train image save
parent_dir = os.path.join(os.getcwd(), 'train')
while True:
# read line of file
line = file.readline()
# Break if line not found
if not line:
break
# Split line on ',' and create list of row values
row = line.split(',')
# extract label and pixel value from row
# label = str(row[0]) --orignal
lab_num = int(row[0])
label = word_dict.get(lab_num)
pixel = row[1:]
# Convert pixel in numpy array of 28 x 28
pixel = np.asarray(pixel, dtype=np.uint8).reshape((28, 28, 1))
# join path of directories
path = os.path.join(parent_dir, label)
# count line number and use with image name
count += 1
# list of contents(directory and file both) in directory
labels = os.listdir(parent_dir)
if label in labels:
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - not created directory only image add")
else:
try:
os.mkdir(path)
except OSError as error:
print(error)
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - created directory and image add")
file.close()
# train_images_creation()
| 3.109375 | 3 |
tests/__init__.py | szabosteve/eland | 335 | 12763311 | <filename>tests/__init__.py
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pandas as pd
from elasticsearch import Elasticsearch
from eland.common import es_version
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Define test files and indices
ELASTICSEARCH_HOST = os.environ.get(
"ELASTICSEARCH_URL", os.environ.get("ELASTICSEARCH_HOST", "http://localhost:9200")
)
# Define client to use in tests
ES_TEST_CLIENT = Elasticsearch(ELASTICSEARCH_HOST)
ES_VERSION = es_version(ES_TEST_CLIENT)
FLIGHTS_INDEX_NAME = "flights"
FLIGHTS_MAPPING = {
"mappings": {
"properties": {
"AvgTicketPrice": {"type": "float"},
"Cancelled": {"type": "boolean"},
"Carrier": {"type": "keyword"},
"Dest": {"type": "keyword"},
"DestAirportID": {"type": "keyword"},
"DestCityName": {"type": "keyword"},
"DestCountry": {"type": "keyword"},
"DestLocation": {"type": "geo_point"},
"DestRegion": {"type": "keyword"},
"DestWeather": {"type": "keyword"},
"DistanceKilometers": {"type": "float"},
"DistanceMiles": {"type": "float"},
"FlightDelay": {"type": "boolean"},
"FlightDelayMin": {"type": "integer"},
"FlightDelayType": {"type": "keyword"},
"FlightNum": {"type": "keyword"},
"FlightTimeHour": {"type": "float"},
"FlightTimeMin": {"type": "float"},
"Origin": {"type": "keyword"},
"OriginAirportID": {"type": "keyword"},
"OriginCityName": {"type": "keyword"},
"OriginCountry": {"type": "keyword"},
"OriginLocation": {"type": "geo_point"},
"OriginRegion": {"type": "keyword"},
"OriginWeather": {"type": "keyword"},
"dayOfWeek": {"type": "byte"},
"timestamp": {"type": "date", "format": "strict_date_hour_minute_second"},
}
}
}
FLIGHTS_FILE_NAME = ROOT_DIR + "/flights.json.gz"
FLIGHTS_DF_FILE_NAME = ROOT_DIR + "/flights_df.json.gz"
FLIGHTS_SMALL_INDEX_NAME = "flights_small"
FLIGHTS_SMALL_MAPPING = FLIGHTS_MAPPING
FLIGHTS_SMALL_FILE_NAME = ROOT_DIR + "/flights_small.json.gz"
ECOMMERCE_INDEX_NAME = "ecommerce"
ECOMMERCE_MAPPING = {
"mappings": {
"properties": {
"category": {"type": "text", "fields": {"keyword": {"type": "keyword"}}},
"currency": {"type": "keyword"},
"customer_birth_date": {"type": "date"},
"customer_first_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"customer_full_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"customer_gender": {"type": "text"},
"customer_id": {"type": "keyword"},
"customer_last_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"customer_phone": {"type": "keyword"},
"day_of_week": {"type": "keyword"},
"day_of_week_i": {"type": "integer"},
"email": {"type": "keyword"},
"geoip": {
"properties": {
"city_name": {"type": "keyword"},
"continent_name": {"type": "keyword"},
"country_iso_code": {"type": "keyword"},
"location": {"type": "geo_point"},
"region_name": {"type": "keyword"},
}
},
"manufacturer": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"order_date": {"type": "date"},
"order_id": {"type": "keyword"},
"products": {
"properties": {
"_id": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"base_price": {"type": "half_float"},
"base_unit_price": {"type": "half_float"},
"category": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"created_on": {"type": "date"},
"discount_amount": {"type": "half_float"},
"discount_percentage": {"type": "half_float"},
"manufacturer": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"min_price": {"type": "half_float"},
"price": {"type": "half_float"},
"product_id": {"type": "long"},
"product_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
"analyzer": "english",
},
"quantity": {"type": "integer"},
"sku": {"type": "keyword"},
"tax_amount": {"type": "half_float"},
"taxful_price": {"type": "half_float"},
"taxless_price": {"type": "half_float"},
"unit_discount_amount": {"type": "half_float"},
}
},
"sku": {"type": "keyword"},
"taxful_total_price": {"type": "float"},
"taxless_total_price": {"type": "float"},
"total_quantity": {"type": "integer"},
"total_unique_products": {"type": "integer"},
"type": {"type": "keyword"},
"user": {"type": "keyword"},
}
}
}
ECOMMERCE_FILE_NAME = ROOT_DIR + "/ecommerce.json.gz"
ECOMMERCE_DF_FILE_NAME = ROOT_DIR + "/ecommerce_df.json.gz"
TEST_MAPPING1 = {
"mappings": {
"properties": {
"city": {"type": "text", "fields": {"raw": {"type": "keyword"}}},
"text": {
"type": "text",
"fields": {"english": {"type": "text", "analyzer": "english"}},
},
"origin_location": {
"properties": {
"lat": {
"type": "text",
"index_prefixes": {},
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"lon": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
}
},
"maps-telemetry": {
"properties": {
"attributesPerMap": {
"properties": {
"dataSourcesCount": {
"properties": {
"avg": {"type": "long"},
"max": {"type": "long"},
"min": {"type": "long"},
}
},
"emsVectorLayersCount": {
"dynamic": "true",
"properties": {
"france_departments": {
"properties": {
"avg": {"type": "float"},
"max": {"type": "long"},
"min": {"type": "long"},
}
}
},
},
}
}
}
},
"type": {"type": "keyword"},
"name": {"type": "text"},
"user_name": {"type": "keyword"},
"email": {"type": "keyword"},
"content": {"type": "text"},
"tweeted_at": {"type": "date"},
"dest_location": {"type": "geo_point"},
"my_join_field": {
"type": "join",
"relations": {"question": ["answer", "comment"], "answer": "vote"},
},
}
}
}
TEST_MAPPING1_INDEX_NAME = "mapping1"
TEST_MAPPING1_EXPECTED = {
"city": "text",
"city.raw": "keyword",
"content": "text",
"dest_location": "geo_point",
"email": "keyword",
"maps-telemetry.attributesPerMap.dataSourcesCount.avg": "long",
"maps-telemetry.attributesPerMap.dataSourcesCount.max": "long",
"maps-telemetry.attributesPerMap.dataSourcesCount.min": "long",
"maps-telemetry.attributesPerMap.emsVectorLayersCount.france_departments.avg": "float",
"maps-telemetry.attributesPerMap.emsVectorLayersCount.france_departments.max": "long",
"maps-telemetry.attributesPerMap.emsVectorLayersCount.france_departments.min": "long",
"my_join_field": "join",
"name": "text",
"origin_location.lat": "text",
"origin_location.lat.keyword": "keyword",
"origin_location.lon": "text",
"origin_location.lon.keyword": "keyword",
"text": "text",
"text.english": "text",
"tweeted_at": "date",
"type": "keyword",
"user_name": "keyword",
}
TEST_MAPPING1_EXPECTED_DF = pd.DataFrame.from_dict(
data=TEST_MAPPING1_EXPECTED, orient="index", columns=["es_dtype"]
)
TEST_MAPPING1_EXPECTED_SOURCE_FIELD_DF = TEST_MAPPING1_EXPECTED_DF.drop(
index=[
"city.raw",
"origin_location.lat.keyword",
"origin_location.lon.keyword",
"text.english",
]
)
TEST_MAPPING1_EXPECTED_SOURCE_FIELD_COUNT = len(
TEST_MAPPING1_EXPECTED_SOURCE_FIELD_DF.index
)
TEST_NESTED_USER_GROUP_INDEX_NAME = "nested_user_group"
TEST_NESTED_USER_GROUP_MAPPING = {
"mappings": {
"properties": {
"group": {"type": "keyword"},
"user": {
"properties": {
"first": {"type": "keyword"},
"last": {"type": "keyword"},
"address": {"type": "keyword"},
}
},
}
}
}
TEST_NESTED_USER_GROUP_DOCS = [
{
"_index": TEST_NESTED_USER_GROUP_INDEX_NAME,
"_source": {
"group": "amsterdam",
"user": [
{
"first": "Manke",
"last": "Nelis",
"address": ["Elandsgracht", "Amsterdam"],
},
{
"first": "Johnny",
"last": "Jordaan",
"address": ["Elandsstraat", "Amsterdam"],
},
],
},
},
{
"_index": TEST_NESTED_USER_GROUP_INDEX_NAME,
"_source": {
"group": "london",
"user": [
{"first": "Alice", "last": "Monkton"},
{"first": "Jimmy", "last": "White", "address": ["London"]},
],
},
},
{
"_index": TEST_NESTED_USER_GROUP_INDEX_NAME,
"_source": {"group": "new york", "user": [{"first": "Bill", "last": "Jones"}]},
},
]
| 1.882813 | 2 |
ucsmsdk/methodmeta/ApeGetIpFromSerialMeta.py | Kego/ucsmsdk | 78 | 12763312 | """This module contains the meta information of ApeGetIpFromSerial ExternalMethod."""
from ..ucscoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("ApeGetIpFromSerial", "apeGetIpFromSerial", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_equipment_serial": MethodPropertyMeta("InEquipmentSerial", "inEquipmentSerial", "Xs:string", "Version142b", "Input", False),
"out_ip_addr": MethodPropertyMeta("OutIpAddr", "outIpAddr", "AddressIPv4", "Version142b", "Output", False),
}
prop_map = {
"cookie": "cookie",
"inEquipmentSerial": "in_equipment_serial",
"outIpAddr": "out_ip_addr",
}
| 2.078125 | 2 |
terrascript/dnsimple/r.py | hugovk/python-terrascript | 0 | 12763313 | # terrascript/dnsimple/r.py
import terrascript
class dnsimple_record(terrascript.Resource):
pass
| 1.304688 | 1 |
src/tpot_caller.py | NMNS93/snakepot | 0 | 12763314 | <gh_stars>0
#!/usr/bin/env python3
"""tpot.py
Run tpot on an input training dataset."""
import sys
import os
import importlib.util
import joblib
import tempfile
from src.log import Logger
log = Logger('tpot')
import argparse
import pandas as pd
import numpy as np
from tpot import TPOTClassifier
class TPOTCleaner():
def __init__(self, tpot_file):
with open(tpot_file, 'r') as f:
self.lines = f.readlines()
@property
def import_lines(self):
lines = self.lines
import_break = lines.index('\n')
import_lines = lines[:import_break]
return import_lines
@property
def export_lines(self):
lines = self.lines
export_start_line = list(filter(lambda x: 'exported_pipeline = ' in x, lines))[0]
export_list = lines[lines.index(export_start_line):]
export_break = export_list.index('\n')
export_lines = export_list[:export_break]
return export_lines
def write_out(self, outdir):
with open(outdir, 'w') as f:
f.write("".join(self.import_lines))
f.write("".join(self.export_lines))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--training')
parser.add_argument('--target')
parser.add_argument('--outdir')
parser.add_argument('--max_time', type=int)
args = parser.parse_args()
log.info('BEGIN')
log.info('Loading data')
training = pd.read_csv(args.training, index_col=0, dtype=np.float64)
X_train = training.drop(columns=[args.target]).to_numpy()
y_train = training[args.target].to_numpy()
# TPOT setup
pipeline_optimizer = TPOTClassifier(max_time_mins=args.max_time, cv=10, n_jobs=-1,
random_state=42, verbosity=2, memory='auto')
# TPOT run
log.info('Running TPOT')
pipeline_optimizer.fit(X_train, y_train)
pipeline_optimizer.export(f'{args.outdir}/tpot_pipeline.py')
# Create python file for refitting model
log.info('Cleaning TPOT output file')
# Read varialbe 'exported_pipeline' from TPOT output
tc = TPOTCleaner(f'{args.outdir}/tpot_pipeline.py')
tc.write_out(f'{args.outdir}/tpot_pipe.py')
# Refit model on training data and save
log.info('Refitting model')
spec = importlib.util.spec_from_file_location("src", f"{args.outdir}/tpot_pipe.py")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
model = mod.exported_pipeline
model.fit(X_train, y_train)
log.info('Saving model')
joblib.dump(model, f'{args.outdir}/model.joblib')
log.info('END')
if __name__=="__main__":
main() | 2.546875 | 3 |
Scripts/naive_bayes.py | MathewTWilliams/News-Category-Classifiers | 0 | 12763315 | <reponame>MathewTWilliams/News-Category-Classifiers
#Author: <NAME>
#Version: 06/24/2022
from sklearn.naive_bayes import ComplementNB
from utils import ClassificationModels, WordVectorModels, CV_BEST_DICT_KEY
from run_classification import run_classifier
from save_load_json import load_cv_result
# Parameter grid for cross validation
nb_param_grid = {
"alpha" : [10 ** i for i in range(-3,1)],
"norm" : [True, False]
}
def run_naive_bayes(vec_model_name):
'''Given the name of the vector model to train on and the values of the different hyperparameters,
run the Gaussian Naive Bayes Classification algorithm and save the results to a json file.'''
cv_result_dict = load_cv_result(ClassificationModels.CNB.value, vec_model_name)
best_params_dict = cv_result_dict[CV_BEST_DICT_KEY]
gauss = ComplementNB(**best_params_dict)
model_details = {
'Vector_Model': vec_model_name,
'Model' : ClassificationModels.CNB.value,
CV_BEST_DICT_KEY : best_params_dict
}
run_classifier(vec_model_name, gauss, model_details)
if __name__ == "__main__":
run_naive_bayes(WordVectorModels.WORD2VEC.value)
run_naive_bayes(WordVectorModels.FASTTEXT.value)
run_naive_bayes(WordVectorModels.GLOVE.value)
| 3.140625 | 3 |
app/apps/api/fields.py | lawi21/escriptorium | 4 | 12763316 | <reponame>lawi21/escriptorium
from rest_framework import serializers
class DisplayChoiceField(serializers.ChoiceField):
def to_representation(self, obj):
if obj == '' and self.allow_blank:
return obj
return self._choices[obj]
def to_internal_value(self, data):
# To support inserts with the value
if data == '' and self.allow_blank:
return ''
for key, val in self._choices.items():
if val == data:
return key
self.fail('invalid_choice', input=data)
| 2.546875 | 3 |
config.py | kendallnguyen/Rik-with-Personalization | 0 | 12763317 | # encoding: utf-8
SECRET_KEY = 'a unique and long key'
TITLE = 'Riki'
HISTORY_SHOW_MAX = 30
PIC_BASE = '/static/content/'
CONTENT_DIR = '///D:\\School\\Riki\\content'
USER_DIR = '///D:\\School\\Riki\\user'
NUMBER_OF_HISTORY = 5
PRIVATE = False
| 1.125 | 1 |
src/sentry/auth/utils.py | tobetterman/sentry | 1 | 12763318 | <filename>src/sentry/auth/utils.py
from __future__ import absolute_import
def is_active_superuser(user):
# TODO(dcramer): add VPN support via INTERNAL_IPS + ipaddr ranges
return user.is_superuser
| 1.59375 | 2 |
mfi_customization/mfi/doctype/asset_movement.py | anuradha-88/mfi_customization | 0 | 12763319 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, bizmap technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
# lst = []
# for i in frappe.get_all('Task',filters,['asset']):
# lst.append(i.asset)
# return [(d,) for d in lst]
@frappe.whitelist()
def get_asset_filter(doctype, txt, searchfield, start, page_len, filters):
lst = []
for i in frappe.get_all('Task',{"name":filters.get("task")},['asset']):
lst.append(i.get("asset"))
return [(d,) for d in lst] | 2.109375 | 2 |
tests/core/test_record_components.py | ai-fast-track/mantisshrimp | 580 | 12763320 | import pytest
from icevision.all import *
@pytest.fixture
def dummy_class_map():
return ClassMap(["dummy-1", "dummy-2"], background=None)
@pytest.fixture
def dummy_class_map_elaborate():
return ClassMap(["dummy-1", "dummy-2", "dummy-3", "dummy-4"], background=None)
def test_classification_multilabel(dummy_class_map):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=True)])
rec.classification.set_class_map(dummy_class_map)
rec.classification.set_labels_by_id([0, 1])
assert rec.classification.label_ids == [0, 1]
assert (rec.classification.one_hot_encoded() == np.array([1, 1])).all()
@pytest.mark.parametrize(
"label_ids",
[
([0, 1]),
([0]),
],
)
def test_classification_single_label(dummy_class_map, label_ids):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=False)])
rec.classification.set_class_map(dummy_class_map)
rec.classification.set_labels_by_id(label_ids)
if len(label_ids) > 1:
# label_ids == [0, 1]
# Setting two labels when `is_multilabel=False` raises an error
with pytest.raises(AutofixAbort):
rec.classification._autofix()
else:
# label_ids == [0]
# Only one label must be assigned
assert all(rec.classification._autofix().values())
assert rec.classification.one_hot_encoded().sum() == 1
@pytest.mark.parametrize(
"label_ids",
[
([0, 1, 2]),
([0, 1]),
([0]),
],
)
def test_one_hot_encodings(dummy_class_map_elaborate, label_ids):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=True)])
rec.classification.set_class_map(dummy_class_map_elaborate)
rec.classification.set_labels_by_id(label_ids)
assert all(rec.classification._autofix().values())
# Ensure we have the correct no. of labels and that they are indeed
# one-hot encoded
one_hot_values = rec.classification.one_hot_encoded()
assert one_hot_values.sum() == len(label_ids)
assert np.unique(one_hot_values).tolist() == [0, 1]
| 2.625 | 3 |
grr/server/grr_response_server/__init__.py | tsehori/grr | 1 | 12763321 | <reponame>tsehori/grr
#!/usr/bin/env python
# Lint as: python3
"""Server-specific GRR classes."""
| 1.09375 | 1 |
src/awkward1/_connect/_pandas.py | martindurant/awkward-1.0 | 0 | 12763322 | <filename>src/awkward1/_connect/_pandas.py<gh_stars>0
# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import distutils.version
import numpy
import awkward1.layout
import awkward1._util
import awkward1.operations.convert
import awkward1.operations.structure
# Don't import 'pandas' until an Awkward Array is used in Pandas or register() is called.
def register():
global AwkwardDtype
try:
pandas = get_pandas()
except ImportError:
pass
else:
if AwkwardDtype is None:
get_dtype()
if issubclass(PandasMixin, PandasNotImportedYet):
PandasMixin.__bases__ = (pandas.api.extensions.ExtensionArray,)
checked_version = False
def get_pandas():
import pandas
global checked_version
if not checked_version:
if distutils.version.LooseVersion(pandas.__version__) < distutils.version.LooseVersion("0.24.0"):
raise ImportError("cannot use Awkward Array with Pandas version {0} (at least 0.24.0 is required)".format(pandas.__version__))
checked_version = True
return pandas
AwkwardDtype = None
def get_dtype():
import awkward1.highlevel
pandas = get_pandas()
global AwkwardDtype
if AwkwardDtype is None:
@pandas.api.extensions.register_extension_dtype
class AwkwardDtype(pandas.api.extensions.ExtensionDtype):
name = "awkward1"
type = awkward1.highlevel.Array
kind = "O"
base = numpy.dtype("O")
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError("cannot construct a {0} from {1}".format(cls, string))
@classmethod
def construct_array_type(cls):
return awkward1.highlevel.Array
return AwkwardDtype
class PandasNotImportedYet(object):
pass
class NoFields(object):
def __str__(self):
return "(no fields)"
def __eq__(self, other):
return other is NoFields or isinstance(other, NoFields)
def __hash__(self):
return hash(NoFields)
class PandasMixin(PandasNotImportedYet):
@property
def _typ(self):
register()
return "dataframe"
@property
def columns(self):
if self.layout.numfields >= 0:
return self.layout.keys()
else:
return [NoFields()]
def _ixs(self, i, axis):
register()
if self.layout.numfields >= 0:
return get_pandas().Series(self[str(i)])
else:
return get_pandas().Series(self)
# REQUIRED by Pandas:
@classmethod
def _from_sequence(cls, scalars, *args, **kwargs):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray._from_sequence.html
register()
dtype, copy = awkward1._util.extra(args, kwargs, [
("dtype", None),
("copy", False)])
return awkward1.operations.convert.fromiter(scalars)
@classmethod
def _from_factorized(cls, values, original):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray._from_factorized.html
register()
raise NotImplementedError("_from_factorized")
# __getitem__(self)
# __len__(self)
@property
def dtype(self):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.dtype.html
register()
return AwkwardDtype()
@property
def nbytes(self):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.nbytes.html
return self._layout.nbytes
@property
def ndim(self):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.nbytes.html
return 1
@property
def shape(self):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.shape.html
return (len(self),)
def isna(self):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.isna.html
register()
return numpy.array(awkward1.operations.structure.isna(self))
def take(self, indices, *args, **kwargs):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.take.html
allow_fill, fill_value = awkward1._util.extra(args, kwargs, [
("allow_fill", False),
("fill_value", None)])
register()
if allow_fill:
indices = numpy.asarray(indices, dtype=numpy.int64)
if fill_value is None:
index = awkward1.layout.Index64(indices)
layout = awkward1.layout.IndexedOptionArray64(index, self.layout, parameters=self.layout.parameters)
return awkward1._util.wrap(layout, awkward1._util.behaviorof(self))
else:
tags = (indices >= 0).view(numpy.int8)
index = indices.copy()
index[~tags] = 0
content0 = awkward1.operations.convert.fromiter([fill_value], highlevel=False)
content1 = self.layout
tags = awkward1.layout.Index8(tags)
index = awkward1.layout.Index64(index)
layout = awkward1.layout.UnionArray8_64(tags, index, [content0, content1])
return awkward1._util.wrap(layout, awkward1._util.behaviorof(self))
else:
return self[indices]
def copy(self):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.copy.html
return awkward1._util.wrap(self._layout.deep_copy(copyarrays=True, copyindexes=True, copyidentities=True), awkward1._util.behaviorof(self))
@classmethod
def _concat_same_type(cls, to_concat):
# https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray._concat_same_type.html
register()
return awkward1.operations.structure.concatenate(to_concat)
# RECOMMENDED for performance:
# def fillna(self, *args, **kwargs):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.fillna.html
# value, method, limit = awkward1._util.extra(args, kwargs, [
# ("value", None),
# ("method", None),
# ("limit", None)])
# register()
# raise NotImplementedError
#
# def dropna(self):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.dropna.html
# register()
# raise NotImplementedError
#
# def unique(self):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.unique.html
# register()
# raise NotImplementedError
#
# def factorize(self, na_sentinel):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.factorize.html
# register()
# raise NotImplementedError
#
# def _values_for_factorize(self):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray._values_for_factorize.html
# register()
# raise NotImplementedError
#
# def argsort(self, *args, **kwargs):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.argsort.html
# ascending, kind = awkward1._util.extra(args, kwargs, [
# ("ascending", True),
# ("kind", "quicksort")]) # "quicksort", "mergesort", "heapsort"
# register()
# raise NotImplementedError
#
# def _values_for_argsort(self):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray._values_for_argsort.html
# register()
# raise NotImplementedError
#
# def searchsorted(self, value, *args, **kwargs):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray.searchsorted.html
# side, sorter = awkward1._util.extra(args, kwargs, [
# ("side", "left"),
# ("sorter", None)])
# register()
# raise NotImplementedError
#
# def _reduce(self, name, *args, **kwargs):
# # https://pandas.pydata.org/pandas-docs/version/1.0.0/reference/api/pandas.api.extensions.ExtensionArray._reduce.html
# skipna, = awkward1._util.extra(args, kwargs, [
# ("skipna", True)])
# register()
# raise NotImplementedError
def df(array, how="inner", levelname=lambda i: "sub"*i + "entry", anonymous="values"):
register()
pandas = get_pandas()
out = None
for df in dfs(array, levelname=levelname, anonymous=anonymous):
if out is None:
out = df
else:
out = pandas.merge(out, df, how=how, left_index=True, right_index=True)
return out
def dfs(array, levelname=lambda i: "sub"*i + "entry", anonymous="values"):
register()
pandas = get_pandas()
def recurse(layout, row_arrays, col_names):
if layout.purelist_depth > 1:
offsets, flattened = layout.offsets_and_flatten(axis=1)
offsets = numpy.asarray(offsets)
starts, stops = offsets[:-1], offsets[1:]
counts = stops - starts
if awkward1._util.win:
counts = counts.astype(numpy.int32)
if len(row_arrays) == 0:
newrows = [numpy.repeat(numpy.arange(len(counts), dtype=counts.dtype), counts)]
else:
newrows = [numpy.repeat(x, counts) for x in row_arrays]
newrows.append(numpy.arange(offsets[-1], dtype=counts.dtype) - numpy.repeat(starts, counts))
return recurse(flattened, newrows, col_names)
elif isinstance(layout, awkward1.layout.RecordArray):
return sum([recurse(layout.field(n), row_arrays, col_names + (n,)) for n in layout.keys()], [])
else:
try:
return [(awkward1.operations.convert.tonumpy(layout), row_arrays, col_names)]
except:
return [(layout, row_arrays, col_names)]
behavior = awkward1._util.behaviorof(array)
layout = awkward1.operations.convert.tolayout(array, allowrecord=True, allowother=False)
if isinstance(layout, awkward1.layout.Record):
layout2 = layout.array[layout.at : layout.at + 1]
else:
layout2 = layout
tables = []
last_row_arrays = None
for column, row_arrays, col_names in recurse(layout2, [], ()):
if isinstance(layout, awkward1.layout.Record):
row_arrays = row_arrays[1:] # this Record was presented as a RecordArray of one element
if len(col_names) == 0:
columns = [anonymous]
else:
columns = pandas.MultiIndex.from_tuples([col_names])
if last_row_arrays is not None and len(last_row_arrays) == len(row_arrays) and all(numpy.array_equal(x, y) for x, y in zip(last_row_arrays, row_arrays)):
oldcolumns = tables[-1].columns
numold = len(oldcolumns.levels)
numnew = len(columns.levels)
maxnum = max(numold, numnew)
if numold != maxnum:
oldcolumns = pandas.MultiIndex.from_tuples([x + ("",)*(maxnum - numold) for x in oldcolumns])
tables[-1].columns = oldcolumns
if numnew != maxnum:
columns = pandas.MultiIndex.from_tuples([x + ("",)*(maxnum - numold) for x in columns])
newframe = pandas.DataFrame(data=column, index=tables[-1].index, columns=columns)
tables[-1] = pandas.concat([tables[-1], newframe], axis=1)
else:
index = pandas.MultiIndex.from_arrays(row_arrays, names=[levelname(i) for i in range(len(row_arrays))])
tables.append(pandas.DataFrame(data=column, index=index, columns=columns))
last_row_arrays = row_arrays
return tables
| 2.234375 | 2 |
poly/primaryremotedevice.py | firstone/RESTRemote | 6 | 12763323 | from poly.remotedevice import RemoteDevice
class PrimaryRemoteDevice(RemoteDevice):
drivers = [ {'driver': 'ST', 'value': 0, 'uom': 2} ]
def __init__(self, controller, address, driverName, deviceName,
config, deviceDriver):
super(PrimaryRemoteDevice, self).__init__(controller, self, address,
address, driverName, deviceName, config, deviceDriver)
self.connected = False
def start(self):
self.deviceDriver.start()
self.refresh_state()
def refresh_state(self):
self.connected = self.deviceDriver.is_connected()
self.setDriver('ST', 1 if self.connected else 0)
super(PrimaryRemoteDevice, self).refresh_state()
| 2.6875 | 3 |
earth_enterprise/src/update_fusion_version.py | ezeeyahoo/earthenterprise | 2,661 | 12763324 | <reponame>ezeeyahoo/earthenterprise
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Update the Fusion version string.
It takes two flags --long long_version_name and --short short_version_name.
Then it opens all the files need to be updated and changes from current
long_version_name and short_version_name to these new values.
Note: The long version name needs to start with short version name.
Example Usage:
./update_fusion_version.py --long "3.2.0" --short "3.2"
"""
import datetime
import fileinput
import os
import sys
from pyglib import app
from pyglib import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('long',
'',
'Long version string for fusion (e.g 3.2.0')
flags.DEFINE_string('short',
'',
'Short version string for fusion (e.g 3.2')
def FindUpdateCurrentVersion(fusion_version_file, long_version, short_version,
year):
"""Find and update long and short version names in the fusion_version_file.
Args:
fusion_version_file: Absolute filename for fusion_version.txt
long_version: The new long_version to update to.
short_version: The new short_version to update to.
year: The current year to be used in copyright statement.
Returns:
A couple of string (in a list) representing current long and short
respectively.
Raises:
AssertionError: Whenever anything fails.
"""
cmd = 'cd %s; g4 open %s' % (os.path.dirname(fusion_version_file),
os.path.basename(fusion_version_file))
if os.system(cmd):
raise AssertionError('Cannot run command "%s"' % cmd)
stage = 0 # not yet reached long_version
for line in fileinput.FileInput(fusion_version_file, inplace=1):
if stage == 0:
if not line.startswith('#'):
stage = 1 # long_version reached
old_long = line[:-1]
print long_version
else:
# TODO: Create script to do this for all copyrights.
if line.startswith('# Copyright'):
print '# Copyright %d Google Inc. All Rights Reserved.' % year
else:
print line,
elif stage == 1:
old_short = line[:-1]
print short_version
stage = 2 # short version reached
else:
raise AssertionError('Cannot comprehend line "%s" in %s' % (
line, fusion_version_file))
return (old_long, old_short)
def ChangeVersionInInstallerFiles(
common_prefix, old_long, new_long, old_short, new_short):
"""For installer xml files change from old version to new version.
Args:
common_prefix: Common root for all files to change.
old_long: Current long version string.
new_long: New long version string.
old_short: Current short version string.
new_short: New short version string.
Raises:
AssertionError: Whenever anything fails.
"""
installer_files = ('installer/config/GoogleEarthInstaller.iap_xml',
'installer/config/GoogleFusionInstaller.iap_xml',
'installer/config/GoogleFusionToolsInstaller.iap_xml')
old_long_cdata = 'CDATA[%s]' % (old_long)
old_short_cdata = 'CDATA[%s]' % (old_short)
new_long_cdata = 'CDATA[%s]' % (new_long)
new_short_cdata = 'CDATA[%s]' % (new_short)
long_key = 'CDATA[$LONG_VERSION$]'
short_key = 'CDATA[$SHORT_VERSION$]'
for file_name in installer_files:
file_name = '%s/%s' % (common_prefix, file_name)
cmd = 'cd %s; g4 open %s' % (os.path.dirname(file_name),
os.path.basename(file_name))
if os.system(cmd):
raise AssertionError('Cannot run command "%s"' % cmd)
in_defered_mode = False
defered_lines = []
for line in fileinput.FileInput(file_name, inplace=1):
if not in_defered_mode:
if line.find(old_long_cdata) >= 0 or line.find(old_short_cdata) >= 0:
in_defered_mode = True
defered_lines.append(line)
else:
line = line.replace(old_long, new_long)
print line,
else:
long_key_found = (line.find(long_key) >= 0)
if long_key_found or (line.find(short_key) >= 0):
if long_key_found:
print defered_lines[0].replace(old_long_cdata, new_long_cdata),
else:
print defered_lines[0].replace(old_short_cdata, new_short_cdata),
for index in range(1, len(defered_lines)):
print defered_lines[index],
print line,
defered_lines = []
in_defered_mode = False
else:
defered_lines.append(line)
def main(argv):
if not (len(argv) == 1 and FLAGS.long and FLAGS.short and
FLAGS.long.startswith(FLAGS.short)):
sys.stderr.write('Wrong Usage of the script %s \n\n' % argv[0])
sys.stderr.write(__doc__)
sys.exit(-1)
script_path = os.path.abspath(argv[0])
common_prefix = os.path.dirname(os.path.dirname(script_path))
fusion_version_file = '%s/%s' % (common_prefix, 'src/fusion_version.txt')
(old_long, old_short) = FindUpdateCurrentVersion(fusion_version_file,
FLAGS.long, FLAGS.short,
datetime.datetime.now().year)
ChangeVersionInInstallerFiles(
common_prefix, old_long, FLAGS.long, old_short, FLAGS.short)
if __name__ == '__main__':
app.run()
| 2.25 | 2 |
simple_queue/circular_queue.py | dhrubach/python-code-recipes | 0 | 12763325 | ###############################################################
# LeetCode Problem Number : 622
# Difficulty Level : Medium
# URL : https://leetcode.com/problems/design-circular-queue/
###############################################################
class CircularQueue:
def __init__(self, size):
"""initialize your data structure here"""
self.max_size = size
""" set head and tail pointer to -1 """
self.head = -1
self.tail = -1
""" initialize internal array """
self.data = [[] for _ in range(size)]
def isEmpty(self) -> bool:
"""checks whether the circular queue is empty or not"""
return self.computeSize() == 0
def isFull(self) -> bool:
"""checks whether the circular queue is full or not"""
return self.computeSize() == self.max_size
def enqueue(self, val: int) -> bool:
"""insert an element into the circular queue
return true if the operation is successful
"""
if self.isFull():
return False
""" move tail pointer to the next valid index
example : max_size : 5, tail : 2, next tail : 3
max_size : 5, tail : 4, next tail : 0
"""
self.tail = (self.tail + 1) % self.max_size
self.data[self.tail] = val
""" for the first enqueue operation, set head to point to tail """
if self.head == -1:
self.head = self.tail
return True
def dequeue(self) -> bool:
"""delete an element from the circular queue
return true if the operation is successful
"""
if self.isEmpty():
return False
self.data[self.head] = None
""" if empty queue, set head to -1
else move head to the next valid insert position
"""
self.head = -1 if self.computeSize() == 1 else (self.head + 1) % self.max_size
""" reset tail pointer for an empty queue """
if self.head == -1:
self.tail = -1
return True
def front(self) -> int:
"""get the front item from the queue"""
if self.isEmpty():
return -1
""" return value pointed by head pointer """
return self.data[self.head]
def rear(self) -> int:
"""get the last item from the queue"""
if self.isEmpty():
return -1
""" return value pointed by tail pointer """
return self.data[self.tail]
def computeSize(self) -> int:
""" queue is empty if head pointer is set to -1 """
if self.head == -1:
return 0
""" if both head and tail pointers are set to the same index,
then queue has only 1 item
"""
if self.tail == self.head:
return 1
""" if tail points to an index after head, then current size : (tail - head) + 1
if tail points to an index before head, then current size : (tail - head) + 1 + max-size
"""
diff = self.tail - self.head
return diff + 1 if diff > 0 else diff + 1 + self.max_size
def executeCommands(self, command: str, operands: list):
"""Automate execution of large number of operations to test implementation.
Not part of main queue implementation.
"""
if command == "enQueue":
return self.enqueue(operands[0])
elif command == "deQueue":
return self.dequeue()
elif command == "Rear":
return self.rear()
elif command == "Front":
return self.front()
elif command == "isFull":
return self.isFull()
elif command == "isEmpty":
return self.isEmpty()
return
| 4.0625 | 4 |
publishtimer/custom_exceptions.py | paragguruji/publishtimer | 0 | 12763326 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 27 19:05:53 2016
@author: <NAME>, <EMAIL>
"""
from werkzeug.exceptions import HTTPException
class WriteScheduleFailedError(HTTPException):
"""Exception to be raised when schedule is computed successfully by the publishtimer but save_schedule API returns failure response.
The response includes error code and reason as returned by the save_schedule API and the schedule computed by publishtimer
"""
def __init__(self, **kwargs):
self.code = 512
self.upstream_response = kwargs.get('upstream_response')
self.computed_schedule = kwargs.get('computed_schedule', [])
self.description = 'Error in writing computed schedule to profile \
because request to WriteSchedule API failed with error code: ' + \
str(self.upstream_response.status_code) + '; reason: ' + \
str(self.upstream_response.reason)
super(WriteScheduleFailedError,
self).__init__(description=self.description,
response=self.upstream_response)
| 2 | 2 |
Farmacia/apps/compras/forms.py | cluco91/Django_Farmacia | 2 | 12763327 | from .models import * # Change as necessary
from django.forms import ModelForm
from django import forms
class TodoListForm(ModelForm):
class Meta:
model = Cabecera
exclude =('trabajador',)
widgets = {
'codigo': forms.TextInput(attrs={'class': 'form-control'}),
'distribuidor': forms.Select(attrs={'class': 'form-control'}),
'laboratorio': forms.Select(attrs={'class': 'form-control'}),
}
class TodoItemForm(forms.ModelForm):
class Meta:
model = DetalleCompra
exclude = ('list',)
widgets = {
'medicamento': forms.Select(attrs={'class': 'form-control'}),
'cantidad': forms.NumberInput(attrs={'class': 'form-control'}),
}
class RangoForm (forms.Form):
fecha_i = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_i', 'data-date-format':'dd/mm/yyyy'}))
fecha_f = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_f', 'data-date-format':'dd/mm/yyyy'}))
| 2.203125 | 2 |
an.py | Vladislav2018/soundclassification_analyzer | 0 | 12763328 | import pandas as pd
import datetime
import json
def parse_txt_report(path: str, path_tpl:str, separators: tuple = (':', ',')):
report = {}
with open(path) as file:
tpl = pd.read_csv(path_tpl)
for row in tpl['predicted_class']:
report.update({row: []})
for line in file:
act_cls = line[line.find(separators[0])+2 : line.find(separators[1])]
pred_class = line[line.find(separators[1])+2 : ]
pred_class = pred_class[pred_class.find(separators[0]) + 2:]
if pred_class.endswith('\n'):
pred_class = pred_class[:-1]
tpl_act_cls_row = tpl.loc[tpl['actual_class'] == act_cls]
tpl_pred_class = tpl_act_cls_row['predicted_class'].values[0]
if(pred_class == tpl_pred_class):
report[tpl_pred_class].append(1)
else:
report[tpl_pred_class].append(0)
with open('stat/' + "corresponds" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.txt', 'w') as file:
file.write(json.dumps(report))
return report
def analyze(data):
vals = data.values()
k_s = list(data.keys())
short_report = {}
i = 0
for val in vals:
accurancy = sum(val)/len(val)
short_report.update({k_s[i]:accurancy})
i += 1
with open('stat/' + "short_report" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.txt', 'w') as file:
file.write(json.dumps(short_report))
if __name__ == '__main__':
analyze(parse_txt_report("reports/report_lstm.txt", 'templates/sa_tpl.csv')) | 2.796875 | 3 |
lstchain/calib/camera/drs4.py | yukihok/cta-lstchain | 0 | 12763329 | import argparse
import numpy as np
from astropy.io import fits
from numba import jit
class DragonPedestal:
n_pixels = 7
roisize = 40
size4drs = 4*1024
high_gain = 0
low_gain = 1
def __init__(self):
self.first_capacitor = np.zeros((2, 8))
self.meanped = np.zeros((2, self.n_pixels, self.size4drs))
self.numped = np.zeros((2, self.n_pixels, self.size4drs))
def fill_pedestal_event(self, event, nr):
first_cap = event.lst.tel[0].evt.first_capacitor_id[nr * 8:(nr + 1) * 8]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 1, 2, 2, 3]):
self.first_capacitor[self.high_gain, i] = first_cap[j]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [4, 4, 5, 5, 6, 6, 7]):
self.first_capacitor[self.low_gain, i] = first_cap[j]
waveform = event.r0.tel[0].waveform[:, :, :]
expected_pixel_id = event.lst.tel[0].svc.pixel_ids
self._fill_pedestal_event_jit(nr, waveform, expected_pixel_id, self.first_capacitor, self.meanped, self.numped)
@staticmethod
@jit(parallel=True)
def _fill_pedestal_event_jit(nr, waveform, expected_pixel_id, first_cap, meanped, numped):
size4drs = 4096
roisize = 40
for i in range(0, 2):
for j in range(0, 7):
fc = int(first_cap[i, j])
pixel = expected_pixel_id[nr*7 + j]
posads0 = int((2+fc)%size4drs)
if posads0 + 40 < 4096:
meanped[i, j, posads0:(posads0+36)] += waveform[i, pixel, 2:38]
numped[i, j, posads0:(posads0 + 36)] += 1
else:
for k in range(2, roisize-2):
posads = int((k+fc)%size4drs)
val = waveform[i, pixel, k]
meanped[i, j, posads] += val
numped[i, j, posads] += 1
def finalize_pedestal(self):
try:
self.meanped = self.meanped/self.numped
except Exception as err:
print("Not enough events to coverage all capacitor. Please use more events to create pedestal file.")
print(err)
def get_first_capacitor(event, nr):
hg = 0
lg = 1
fc = np.zeros((2, 8))
first_cap = event.lst.tel[0].evt.first_capacitor_id[nr * 8:(nr + 1) * 8]
# First capacitor order according Dragon v5 board data format
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 1, 2, 2, 3]):
fc[hg, i] = first_cap[j]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [4, 4, 5, 5, 6, 6, 7]):
fc[lg, i] = first_cap[j]
return fc | 2.09375 | 2 |
ros/niryo_one_ros/niryo_one_tcp_server/clients/python/examples/simple_scripts/video_stream.py | paubrunet97/astrocytes | 5 | 12763330 | <filename>ros/niryo_one_ros/niryo_one_tcp_server/clients/python/examples/simple_scripts/video_stream.py
"""
This script allows to capture Niryo One's video streaming and to make some image processing on it
"""
# Imports
from niryo_one_tcp_client import *
from niryo_one_camera import *
# Set robot address
robot_ip_address = "192.168.1.202"
# Set Observation Pose. It's where the robot will be placed for streaming
observation_pose = PoseObject(
x=0.2, y=0.0, z=0.34,
roll=0, pitch=1.57, yaw=-0.2,
)
def video_stream(niryo_one_client):
# Getting calibration param
_, mtx, dist = niryo_one_client.get_calibration_object()
# Moving to observation pose
niryo_one_client.move_pose(*observation_pose.to_list())
while "User do not press Escape neither Q":
# Getting image
status, img_compressed = niryo_one_client.get_img_compressed()
if status is not True:
print("error with Niryo One's service")
break
# Uncompressing image
img_raw = uncompress_image(img_compressed)
# Undistorting
img_undistort = undistort_image(img_raw, mtx, dist)
# Trying to find markers
workspace_found, res_img_markers = debug_markers(img_undistort)
# Trying to extract workspace if possible
if workspace_found:
img_workspace = extract_img_workspace(img_undistort, workspace_ratio=1.0)
else:
img_workspace = None
# - Display
# Concatenating raw image and undistorted image
concat_ims = concat_imgs((img_raw, img_undistort))
# Concatenating extracted workspace with markers annotation
if img_workspace is not None:
res_img_markers = concat_imgs((res_img_markers, resize_img(img_workspace, height=res_img_markers.shape[0])))
# Showing images
show_img("Images raw & undistorted", concat_ims, wait_ms=0)
key = show_img("Markers", res_img_markers, wait_ms=30)
if key in [27, ord("q")]: # Will break loop if the user press Escape or Q
break
niryo_one_client.set_learning_mode(True)
if __name__ == '__main__':
# Connect to robot
client = NiryoOneClient()
client.connect(robot_ip_address)
# Calibrate robot if robot needs calibration
client.calibrate(CalibrateMode.AUTO)
# Launching main process
video_stream(client)
# Releasing connection
client.quit()
| 2.625 | 3 |
office365/sharepoint/sharing/sharingLinkInfo.py | rikeshtailor/Office365-REST-Python-Client | 544 | 12763331 | <gh_stars>100-1000
from office365.runtime.client_value import ClientValue
class SharingLinkInfo(ClientValue):
def __init__(self):
"""
Specifies the information about the tokenized sharing link.
"""
super(SharingLinkInfo, self).__init__()
self.AllowsAnonymousAccess = None
self.ApplicationId = None
self.CreatedBy = None
self.PasswordProtected = None
| 2.109375 | 2 |
setup.py | avanc/mopidy-headless | 6 | 12763332 | from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-Headless',
version=get_version('mopidy_headless/__init__.py'),
url='',
license='Apache License, Version 2.0',
author='<NAME>',
author_email='<EMAIL>',
description='Mopidy extension for controlling via input devices',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 0.19',
'Pykka >= 1.1',
],
entry_points={
'mopidy.ext': [
'headless = mopidy_headless:Extension',
],
},
)
| 1.664063 | 2 |
ansible/roles/blade.cumulus/library/ssh_user_alias.py | ClashTheBunny/cmdb | 111 | 12763333 | <filename>ansible/roles/blade.cumulus/library/ssh_user_alias.py
#!/usr/bin/python
DOCUMENTATION = """
---
module: ssh_user_alias.py
short_description: Create alias for users in SSH authorized_keys
options:
user:
description:
- base user to make alias for
groups:
description:
- list of groups we want our aliases to be in
"""
import os
import re
from ansible.module_utils.basic import AnsibleModule
def main():
module_args = dict(
user=dict(type='str', required=True),
groups=dict(type='list', elements='str', default=[])
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
result = dict(
changed=False
)
got = {}
wanted = {}
for source in ["/etc/passwd",
"/etc/shadow",
"/etc/group"]:
with open(source) as f:
got[source] = f.read()
wanted[source] = got[source]
base_uid = None
to_remove = []
# Handle /etc/passwd
to_keep = []
for line in wanted["/etc/passwd"].split("\n"):
if not line:
continue
user, _, uid, gid, gecos, home, shell = line.split(":")
if user == module.params["user"]:
base_uid, base_gid, base_home, base_shell = uid, gid, home, shell
elif gecos == "cmdb,,,":
to_remove.append(user)
continue
to_keep.append(line)
if base_uid is None:
result["msg"] = "user {} not found in /etc/passwd".format(
module.params["user"])
module.fail_json(**result)
# Get HOME/.ssh/authorized_keys
to_add = []
with open(os.path.join(base_home, ".ssh", "authorized_keys")) as f:
for line in f:
if not line:
continue
line = line.strip()
user = line.split(" ", 2)[-1]
if re.match(r"[a-z]+", user):
to_add.append(user)
# Add users
for user in to_add:
to_keep.append(":".join([user, "x", base_uid, base_gid,
"cmdb,,,", base_home, base_shell]))
wanted["/etc/passwd"] = "\n".join(to_keep) + "\n"
# Handle /etc/shadow
to_keep = []
for line in wanted["/etc/shadow"].split("\n"):
if not line:
continue
user, passwd, _, _, _, _, _, _, _ = line.split(":")
if passwd != "cmdb":
to_keep.append(line)
for user in to_add:
to_keep.append(":".join([user, "cmdb", "18312", "0",
"999999", "7", "", "", ""]))
wanted["/etc/shadow"] = "\n".join(to_keep) + "\n"
# Handle /etc/group
to_keep = []
for line in wanted["/etc/group"].split("\n"):
if not line:
continue
group, password, gid, users = line.split(":")
users = [u for u in users.split(",")
if u and u not in to_remove]
if group in module.params["groups"]:
users.extend(to_add)
users = ",".join(users)
to_keep.append(":".join([group, password, gid, users]))
wanted["/etc/group"] = "\n".join(to_keep) + "\n"
if got != wanted:
result['changed'] = True
result['diff'] = [
dict(
before_header=f,
after_header=f,
before=got[f],
after=wanted[f])
for f in got
]
if module.check_mode or not result['changed']:
module.exit_json(**result)
# Apply changes.
for dest in wanted:
with open(dest, "w") as f:
f.write(wanted[dest])
module.exit_json(**result)
if __name__ == '__main__':
main()
| 2.375 | 2 |
setup.py | cloudify-incubator/cloudify-ecosystem-test | 1 | 12763334 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='cloudify-ecosystem-test',
version='2.5.35',
license='LICENSE',
packages=find_packages(),
description='Stuff that Ecosystem Tests Use',
entry_points={
"console_scripts": [
"ecosystem-test = ecosystem_tests.ecosystem_tests_cli.main:_ecosystem_test",
"ecosystem-tests = ecosystem_tests.ecosystem_tests_cli.main:_ecosystem_test"
]
},
install_requires=[
'testtools',
'cloudify-common>=5.1.0',
'PyGithub',
'wagon>=0.10.0',
'boto3',
'urllib3>=1.25.4',
'progressbar',
'pyyaml',
'requests',
'click>7,<8',
'nose>=1.3',
'pytest==4.6.11'
]
)
| 1.421875 | 1 |
pioneer/das/tests/validate_imu_flow.py | leddartech/pioneer.das.api | 8 | 12763335 | #TODO: move this to pioneer.das.acquisition
from pioneer.das.api import platform
try:
import folium #pip3 install folium
except:
pass
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import tqdm
import utm
def easting_northing_from_lat_long(latitude, longitude):
easting, northing, _, _ = utm.from_latlon(latitude, longitude)
return easting, northing
def distance_traj_step(easting, northing, t=None):
d_e = np.diff(easting)
d_n = np.diff(northing)
if t is not None:
d_t = np.diff(t)
return (d_e**2 + d_n**2)**0.5/d_t
def get_trajectory(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel'):
'''simple: return easting, northing, points list, and time of the trajectory following the timestamps of ref_ts_sensor
'''
n = len(pfsynch)
easting, northing, ts = [], [], []
points = []
for mu in tqdm.tqdm(range(n)):
ref_ts = pfsynch[mu][ref_ts_sensor].timestamp
imu = pfsynch.platform[imu_nav].get_at_timestamp(ref_ts).raw
lati, longi = imu['latitude'], imu['longitude']
eg, ng = easting_northing_from_lat_long(lati, longi)
easting.append(eg)
northing.append(ng)
ts.append(ref_ts/1e6)
points.append([lati, longi])
return np.array(easting, dtype=np.float64), np.array(northing, dtype=np.float64), points, np.array(ts, dtype=np.float64)-ts[0]
def compute_neighbour_step_ratio(xt, yt, t, min_epsilon_precision=1e-5):
step_ratio_norm = []
step_ratio_norm.append(0)
for i in range(1,len(xt)-1):
d_t_l = np.abs(t[i-1]-t[i])
d_t_r = np.abs(t[i+1]-t[i])
d_xt_l = np.maximum(np.abs(xt[i-1]-xt[i])/d_t_l, min_epsilon_precision)
d_xt_r = np.maximum(np.abs(xt[i+1]-xt[i])/d_t_r, min_epsilon_precision)
d_yt_l = np.maximum(np.abs(yt[i-1]-yt[i])/d_t_l, min_epsilon_precision)
d_yt_r = np.maximum(np.abs(yt[i+1]-yt[i])/d_t_r, min_epsilon_precision)
step_ratio_xt = np.maximum(d_xt_l, d_xt_r) / np.minimum(d_xt_l, d_xt_r)
step_ratio_yt = np.maximum(d_yt_l, d_yt_r) / np.minimum(d_yt_l, d_yt_r)
step_ratio_norm.append((step_ratio_xt**2 + step_ratio_yt**2)**0.5)
step_ratio_norm.append(0)
return np.array(step_ratio_norm, dtype=np.float)
def compute_standard_score(x, seq_memory: int=200, start_at_zero: bool=True, outliers_threshold: float=100.0):
'''return the standard score based on a memory sequence of certain length.
'''
m = len(x)
epsilon_ = 1e-4 # 0.1 mm precision
z_score = []
z_score.append(0)
flag_outliers = np.zeros_like(x, dtype=bool)
for mu in tqdm.tqdm(range(1, m)):
a, b = np.maximum(mu - seq_memory, 0), mu
if mu < seq_memory and not start_at_zero:
z_score.append(0)
continue
window_seq = x[a:b][~flag_outliers[a:b]]
# if mu > seq_memory and len(window_seq) < 0.25*seq_memory:
# z_score.append(0)
# continue
seq_mean = np.mean(window_seq)
seq_std = np.std(window_seq)
z_ = np.abs((x[mu] - seq_mean)/(seq_std + epsilon_))
if z_ > outliers_threshold:
flag_outliers[mu] = 1
z_score.append(np.copy(z_))
return np.array(z_score)
def get_trajectory_standard_score(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_seq_memory:int=200):
'''estimation of the smoothness of a trajectory based on the standard score.
'''
easting, northing, _, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
traj_step = distance_traj_step(easting, northing, t)
z_scores = np.zeros_like(easting)
z_scores[1:] = compute_standard_score(traj_step, traj_seq_memory-1, False)
return z_scores
def get_trajectory_step_ratio(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_min_epsilon_precision:float=1e-6):
'''estimation of the smoothness of the trajectory based on the ratio of left-right epsilons step
'''
easting, northing, _, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
return compute_neighbour_step_ratio(easting, northing, t, traj_min_epsilon_precision)
def find_trajectory_jump(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_seq_memory:int=200,
traj_jump_threshold:float=15.5,
show_result:bool=True):
'''Compute the list of ranges of intervall from pfsynch which are smooth according to traj_jump_threshold.
'''
print('Computing trajectory')
easting, northing, points, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
traj_step = distance_traj_step(easting, northing)
print('Validate trajectory')
z_scores = np.zeros_like(easting)
z_scores[1:] = compute_standard_score(traj_step, traj_seq_memory-1, False)
jump_flag = (z_scores > traj_jump_threshold).astype(bool)
list_intervals = []
ids = np.arange(len(jump_flag))[jump_flag]
for mu in range(len(ids)):
if mu == 0:
list_intervals.append([0 , ids[mu]-1])
continue
if ids[mu]-ids[mu-1] >= traj_seq_memory:
list_intervals.append([ids[mu-1], ids[mu]-1])
if show_result:
t = np.arange(len(easting))
fig, ax = plt.subplots(2, 1, figsize=(9,10))
fig.suptitle('Trajectory positions and jumps')
ax[0].scatter(t, easting)
ax[0].scatter(t[jump_flag], easting[jump_flag], label='jump flags')
ax[0].legend()
ax[0].set_xlabel('Frame number')
ax[0].set_ylabel('Easting')
ax[1].scatter(t, northing)
ax[1].scatter(t[jump_flag], northing[jump_flag], label='jump flags')
ax[1].legend()
ax[1].set_xlabel('Frame number')
ax[1].set_ylabel('Northing')
plt.show()
my_map = folium.Map(location=points[0], zoom_start=15)
folium.PolyLine(points).add_to(my_map)
for mu in ids:
folium.CircleMarker(
location=points[mu],
radius=5.5,
popup='IMU jump: '+ str(mu),
color='red',
fill=True,
fill_color='red'
).add_to(my_map)
return jump_flag, list_intervals, my_map
return jump_flag, list_intervals
if __name__ == '__main__':
#example of use:
#see this dataset:
_dataset = '/nas/pixset/exportedDataset/20200610_195655_rec_dataset_quartier_pierre_exported'
_ignore = ['radarTI_bfc']
pf = platform.Platform(_dataset, ignore=_ignore)
# get the platform synchronized:
sync_labels = ['*ech*', '*_img*', '*_trr*', '*_trf*',' *_ftrr*', '*xyzit-*']
interp_labels = ['*_xyzit', 'sbgekinox_*', 'peakcan_*', '*temp', '*_pos*', '*_agc*']
synch = pf.synchronized(sync_labels=sync_labels, interp_labels=interp_labels, tolerance_us=1e3)
flags, inters, my_map = find_trajectory_jump(synch,
ref_ts_sensor='flir_bfc_img',
imu_nav='sbgekinox_bcc_navposvel',
traj_seq_memory=200,
traj_jump_threshold=4.0,
show_result=True)
print('Intervals:', inters)
| 2.53125 | 3 |
mmdet/datasets/logos_dataset.py | guigarfr/Robust_Logo_Detection | 0 | 12763336 | import os.path as osp
import numpy as np
import mmcv
from . import XMLDataset
from .builder import DATASETS
import xml.etree.ElementTree as ET
from PIL import Image
@DATASETS.register_module()
class LogosDataset(XMLDataset):
def load_annotations(self, ann_file):
"""Load annotation from XML style ann_file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
if not self.CLASSES:
self.CLASSES = set()
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'JPEGImages/{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
# Get image size data
size = root.find('size')
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, 'JPEGImages',
'{}.jpg'.format(img_id))
img = Image.open(img_path)
width, height = img.size
# Get object classes
self.CLASSES |= {x.text for x in tree.findall("object/name")}
data_infos.append(
dict(id=img_id, filename=filename, width=width, height=height))
self.CLASSES = sorted(list(self.CLASSES))
return data_infos
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls/mAPs. If set to a list, the average of all IoUs will
also be computed. Default: np.arange(0.5, 0.96, 0.05).
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
return dict()
| 2.53125 | 3 |
Python/CeV/Exercicios/ex67.py | WerickL/Learning | 0 | 12763337 | <filename>Python/CeV/Exercicios/ex67.py
while True:
T = int(input('Digite um número para mostrar a tabuada:'))
if T > 0:
print(f"""{T} X 1 = {T*1}
{T} X 2 = {T*2}
{T} X 3 = {T*3}
{T} X 4 = {T*4}
{T} X 5 = {T*5}
{T} X 6 = {T*6}
{T} X 7 = {T*7}
{T} X 8 = {T*8}
{T} X 9 = {T*9}
{T} X 10 = {T*10}""")
elif T < 0:
break
print('Obrigado por usar a tabuada, volte sempre!')
| 4.125 | 4 |
brain/tests/model/test_Position.py | siddharthkundu/raspberry-pi-os-image-builder | 0 | 12763338 | import json
import random
import unittest
from model.position import Position
class PositionTest(unittest.TestCase):
def test_given_a_position_then_it_is_serializable(self):
x = random.randint(1, 100)
y = random.randint(1, 100)
z = random.randint(1, 100)
expected_json = {
"x": x,
"y": y,
"z": z,
}
position = Position(x, y, z)
self.assertEqual(expected_json, json.loads(position.toJSON()))
| 3.25 | 3 |
simplegexf.py | demux/simplegexf | 6 | 12763339 | import os
from collections import OrderedDict, MutableMapping, MutableSequence
from operator import itemgetter, attrgetter
from copy import deepcopy
import xmltodict
TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft"
xmlns:viz="http://www.gexf.net/1.1draft/viz"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd"
version="1.2"
></gexf>"""
class BaseElement:
def __init__(self, parent, data):
self.parent = parent
self.data = data or OrderedDict()
def set(self, name, _text=None, **kwargs):
element = OrderedDict([('@%s' % k, str(kwargs[k])) for k in sorted(kwargs)])
if _text:
element['#text'] = _text
self.data[name] = element
return element
def _mklst(self, tag, **kwargs):
try:
el = self.data['%ss' % tag]
if type(el[tag]).__name__ != 'list':
el[tag] = [el[tag]]
except (KeyError, TypeError):
el = self.set('%ss' % tag, **kwargs)
el[tag] = []
return el
def get(self, name, default=None):
return self.data.get(name, default)
def __getattribute__(self, attr):
try:
return super().__getattribute__(attr)
except AttributeError:
try:
return self.data['@%s' % attr]
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data['key'] = value
def __delitem__(self, key):
del self.data['key']
class Gexf(BaseElement):
def __init__(self, path):
self.path = os.path.realpath(path)
try:
xml = open(self.path, 'r', encoding='utf-8').read()
except IOError:
xml = TEMPLATE
self.tree = xmltodict.parse(xml)
self._mklst('graph')
@property
def data(self):
return self.tree['gexf']
@data.setter
def data(self, value):
self.tree['gexf'] = value
def write(self):
open(self.path, 'w+', encoding='utf-8').write(str(self))
@property
def _graphs(self):
return self.data['graphs']['graph']
@property
def graphs(self):
return [Graph(self, graph) for graph in self._graphs]
def add_graph(self, **kwargs):
el = OrderedDict([('@%s' % k, str(kwargs[k])) for k in sorted(kwargs)])
self._graphs.append(el)
return Graph(self, el)
@property
def clean_tree(self):
# TODO: Remove all empty lists.
return self.tree
def __str__(self):
return xmltodict.unparse(self.clean_tree, pretty=True)
# class GexfGraphs(MutableSequence):
class Graph(BaseElement):
def __init__(self, *args):
super().__init__(*args)
try:
attr_wrapper_list = self.data['attributes']
# Ensure `attr_wrapper_list` is a list:
if type(attr_wrapper_list).__name__ != 'list':
self.data['attributes'] = [self.data['attributes']]
attr_wrapper_list = self.data['attributes']
except (KeyError, TypeError):
attr_wrapper_list = []
self.data['attributes'] = attr_wrapper_list
for _class in ['node', 'edge']:
try:
[attr_wrapper] = filter(lambda x: x['@class'] == _class,
attr_wrapper_list)
except ValueError:
attr_wrapper = OrderedDict([
('@class', _class),
('attribute', [])
])
attr_wrapper_list.append(attr_wrapper)
# If there is only one attribute in the parsed data,
# it will not be a list, so we need to fix that:
if type(attr_wrapper['attribute']).__name__ != 'list':
attr_wrapper['attribute'] = [attr_wrapper['attribute']]
self._mklst('node')
self._mklst('edge')
self.edges = GraphEdges(self)
@property
def _nodes(self):
return self.data['nodes']['node']
@_nodes.setter
def _nodes(self, value):
self.data['nodes']['node'] = value
@property
def _edges(self):
return self.data['edges']['edge']
@_edges.setter
def _edges(self, value):
self.data['edges']['edge'] = value
@property
def nodes(self):
return [Node(self, node) for node in self._nodes]
def add_node(self, **kwargs):
el = OrderedDict([('@%s' % k, str(kwargs[k])) for k in sorted(kwargs)])
self._nodes.append(el)
return Node(self, el)
def sort_nodes(self, key=None, attr=None, type_cast=int, reverse=False):
if key:
_key = key
elif attr:
_key = lambda x: type_cast(x.attributes[attr])
self._nodes = list(map(attrgetter('data'),
sorted(self.nodes, key=_key, reverse=reverse)))
@property
def _class_mapped_attributes(self):
return {w['@class']: w['attribute'] for w in self.data['attributes']}
def get_id_mapped_attributes(self, _class):
return {int(attr['@id']): {
'title': attr['@title'],
'type': attr['@type']
} for attr in self._class_mapped_attributes[_class]}
def get_attributes(self, _class):
return OrderedDict([(attr['@title'], {
'id': int(attr['@id']),
'type': attr['@type']
}) for attr in self._class_mapped_attributes[_class]])
@property
def node_attributes(self):
return self.get_attributes('node')
@property
def edge_attributes(self):
return self.get_attributes('edge')
def define_attributes(self, attributes, _class='node'):
defined = self.get_attributes(_class).keys()
_attributes = self._class_mapped_attributes[_class]
for attr, _type in attributes:
if attr in defined:
continue
el = OrderedDict([
('@id', str(len(_attributes))),
('@title', str(attr)),
('@type', str(_type))
])
_attributes.append(el)
class GraphEdges(MutableSequence):
def __init__(self, graph):
self.graph = graph
def __getitem__(self, index):
edge = Edge(None, None)
edge.data = self.graph._edges[index]
edge._create_attributes(self.graph)
return edge
def __setitem__(self, index, edge):
self.graph._edges[index] = edge.data
def __delitem__(self, index):
del self.graph._edges[index]
def __len__(self):
return len(self.graph._edges)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<GraphEdges %s>" % self.graph.edges
def insert(self, index, edge):
edge._create_attributes(self.graph)
# No duplicates:
if edge in self.graph.edges:
return
self.graph._edges.insert(index, edge.data)
for i, el in enumerate(self.graph._edges):
el['@id'] = str(i)
def append(self, edge):
self.insert(len(self.graph._edges), edge)
class Edge(BaseElement):
def __init__(self, source, target, id=None, type=None):
self.data = OrderedDict([
('@id', str(id)),
('@source', str(source)),
('@target', str(target))
])
if type:
self.data['@type'] = str(type)
self._mklst('attvalue')
def _create_attributes(self, graph):
self._attributes = EdgeAttributes(graph, self)
@property
def attributes(self):
try:
return self._attributes
except AttributeError:
raise AttributeError('Attributes are not available before edge '
'has been added to a graph')
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<Node %s -> %s>' % (self.source, self.target)
def __eq__(self, other):
return str(self) == str(other)
class Node(BaseElement):
def __init__(self, *args):
super().__init__(*args)
self._mklst('attvalue')
self.attributes = NodeAttributes(self.parent, self)
class NodeAttributes(MutableMapping):
_class = 'node'
def __init__(self, graph, obj, *args, **kwargs):
self.graph = graph
self.obj = obj
self.update(dict(*args, **kwargs))
@property
def _attvalues(self):
return self.obj.data['attvalues']['attvalue']
@property
def _mapped_attvalues(self):
return {int(v['@for']): v for v in self._attvalues}
def __getitem__(self, key):
tkey = self.__keytransform__(key)
return self._mapped_attvalues[tkey]['@value']
def __setitem__(self, key, value):
tkey = self.__keytransform__(key)
_value = str(value)
if self.graph.get_attributes(self._class)[key]['type'] == 'boolean':
_value = _value.lower()
try:
self._mapped_attvalues[tkey]['@value'] = _value
except KeyError:
# Create a new <attvalue/> element:
# XML Output:
# <attvalue for="<tkey>" value="<value>"></attvalue>
el = OrderedDict([('@for', str(tkey)), ('@value', _value)])
self._attvalues.append(el)
def __delitem__(self, key):
# Should not have to find the actual index as the id should be the
# same as the index:
tkey = self.__keytransform__(key)
del self._attvalues[tkey]
def __iter__(self):
return iter(map(lambda id: self.graph.get_id_mapped_attributes(self._class)[id]['title'],
self._mapped_attvalues.keys()))
def __len__(self):
return len(self._attvalues)
def __keytransform__(self, key):
return int(self.graph.get_attributes(self._class)[key]['id'])
class EdgeAttributes(NodeAttributes):
_class = 'edge'
| 2.578125 | 3 |
machin/frame/helpers/servers.py | lethaiq/machin | 0 | 12763340 | from typing import Callable, Any, List
from machin.parallel.distributed import (
get_world, get_cur_name
)
from machin.parallel.server import (
PushPullGradServerImpl,
PushPullModelServerImpl
)
from torch.optim import Adam
def grad_server_helper(model_creators: List[Callable],
optimizer: Any = Adam,
learning_rate: float = 1e-3):
"""
Helper function for creating a tuple of grad servers,
used by A3C, IMPALE, etc. This function requires all processes
in the world to enter.
Warning:
You should never run this function twice!
Args:
model_creators: A list of model creator functions,
each one corresponds to one gradient reduction server.
optimizer: Optimizer type, default is Adam.
learning_rate: Learning rate of the optimizer.
Returns:
A tuple of accessors to gradient servers, the tuple has the
same size as ``model_creators``
"""
# Note:
# passing a list of creator functions instead of passing a list of models
# directly is designed to remove the unnecessary model creation cost on
# not-the-primary-reducer processes.
DEFAULT_GROUP_NAME = "server_group"
# create groups first
world = get_world()
server_group = world.create_rpc_group(DEFAULT_GROUP_NAME,
world.get_members())
# create servers
primary_reducer = world.get_members()[0]
servers = [
PushPullGradServerImpl("grad_server_" + str(i),
server_group,
primary_reducer=primary_reducer)
for i in range(len(model_creators))
]
if get_cur_name() == primary_reducer:
for model_creator, server in zip(model_creators, servers):
model = model_creator()
server.manage_model(model,
optimizer(model.parameters(),
lr=learning_rate))
server.start()
server_group.barrier()
servers = tuple(
server_group.get_paired("grad_server_" + str(i)).to_here()
for i in range(len(model_creators))
)
# accessors instead of actual implementation instance
# will be returned because of __reduce__
return servers
def model_server_helper(model_num):
"""
Helper function for creating a tuple of model servers,
used by APEX, etc. This function requires all processes
in the world to enter.
Warning:
You should never run this function twice!
Returns:
A tuple of accessors to model servers, the size of tuple is
``model_num``
"""
DEFAULT_GROUP_NAME = "server_group"
# create groups first
world = get_world()
server_group = world.create_rpc_group(DEFAULT_GROUP_NAME,
world.get_members())
# create servers
# In current implementation, only one process will initialize the server
if get_cur_name() == world.get_members()[0]:
for i in range(model_num):
_server = PushPullModelServerImpl("model_server_" + str(i),
server_group)
server_group.barrier()
servers = tuple(
server_group.get_paired("model_server_" + str(i)).to_here()
for i in range(model_num)
)
# accessors instead of actual implementation instance
# will be returned because of __reduce__
return servers
| 2.390625 | 2 |
my/google/paths.py | ktaranov/HPI | 1 | 12763341 | """
Module for locating and accessing [[https://takeout.google.com][Google Takeout]] data
"""
from dataclasses import dataclass
from typing import Optional
from my.config import google as user_config
from ..core.common import Paths
@dataclass
class google(user_config):
# directory to unzipped takeout data
takeout_path: Paths
# this is the directory that my google drive gets mirrored to locally
# when it detects a new takeout, it sends a warning, so I can run
# the script to move it to takeout_path
# see HPI/scripts/unzip_google_takeout
google_drive_local_path: Optional[Paths]
from ..core.cfg import make_config
config = make_config(google)
import warnings
from pathlib import Path
from typing import Iterable
from more_itertools import last
from ..core.common import get_files
from ..core.kompress import kexists
def get_takeouts(*, path: Optional[str] = None) -> Iterable[Path]:
check_for_new_takeouts()
for takeout in get_files(config.takeout_path):
if path is None or kexists(takeout, path):
yield takeout
def get_last_takeout(*, path: Optional[str] = None) -> Path:
return last(get_takeouts(path=path))
# if there are any new takeouts, warn me
def check_for_new_takeouts():
if config.google_drive_local_path:
new_takeouts = list(
Path(config.google_drive_local_path).expanduser().absolute().rglob("*")
)
if new_takeouts:
# this may be temporary, once I'm confident the script works fine over
# some period, I'll just automate this
warnings.warn(
f"Theres a new takeout at {new_takeouts[0]}, run ./scripts/unzip_google_takeout to update the data!"
)
| 2.78125 | 3 |
pcg_libraries/src/pcg_gazebo/parsers/sdf/script.py | boschresearch/pcg_gazebo_pkgs | 42 | 12763342 | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLBase
from .name import Name
from .uri import URI
from .loop import Loop
from .delay_start import DelayStart
from .auto_start import AutoStart
from .trajectory import Trajectory
class Script(XMLBase):
_NAME = 'script'
_TYPE = 'sdf'
_CHILDREN_CREATORS = dict(
name=dict(creator=Name, mode='material', default=['default']),
uri=dict(creator=URI, mode='material', default=['file://media/materials/scripts/gazebo.material'], n_elems='+', optional=True),
loop=dict(creator=Loop, mode='actor', default=[True], optional=True),
delay_start=dict(creator=DelayStart, mode='actor', default=[0], optional=True),
auto_start=dict(creator=AutoStart, mode='actor', default=[True], optional=True),
trajectory=dict(creator=Trajectory, mode='actor', optional=True, n_elems='+')
)
_MODES = ['material', 'actor']
def __init__(self, mode='material'):
XMLBase.__init__(self)
self.reset(mode=mode)
@property
def name(self):
return self._get_child_element('name')
@name.setter
def name(self, value):
self._add_child_element('name', value)
@property
def loop(self):
return self._get_child_element('loop')
@loop.setter
def loop(self, value):
self._add_child_element('loop', value)
@property
def delay_start(self):
return self._get_child_element('delay_start')
@delay_start.setter
def delay_start(self, value):
self._add_child_element('delay_start', value)
@property
def auto_start(self):
return self._get_child_element('auto_start')
@auto_start.setter
def auto_start(self, value):
self._add_child_element('auto_start', value)
@property
def trajectories(self):
return self._get_child_element('trajectory')
@property
def uris(self):
return self._get_child_element('uri')
def add_uri(self, value):
self._add_child_element('uri', value)
def add_trajectory(self, value):
self._add_child_element('trajectory', value)
def is_valid(self):
if self.get_mode() == 'material':
if self._get_child_element('uri') is None:
print('No URIs found for script')
return False
return XMLBase.is_valid(self)
| 2.15625 | 2 |
boundaries/ocd-division/country:ca/csd:3520005/2010/definition.py | imhangoo/represent-canada-data | 0 | 12763343 | import re
from datetime import date
import boundaries
boundaries.register('Toronto wards (2010)',
singular='Toronto ward',
domain='Toronto, ON',
last_updated=date(2018, 1, 16),
name_func=boundaries.attr('NAME'),
id_func=lambda f: re.sub(r'\A0', '', f.get('SCODE_NAME')),
authority='City of Toronto',
source_url='https://www.toronto.ca/city-government/data-research-maps/open-data/open-data-catalogue/#29b6fadf-0bd6-2af9-4a8c-8c41da285ad7',
licence_url='https://www.toronto.ca/city-government/data-research-maps/open-data/open-data-licence/',
data_url='http://opendata.toronto.ca/gcc/wards_may2010_wgs84.zip',
encoding='iso-8859-1',
extra={'division_id': 'ocd-division/country:ca/csd:3520005'},
)
| 1.921875 | 2 |
guides/migrations/0016_participant_attending.py | kesara/ietf-guides | 0 | 12763344 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-11-08 04:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guides', '0015_auto_20200220_1619'),
]
operations = [
migrations.AddField(
model_name='participant',
name='attending',
field=models.CharField(choices=[('YES', 'Yes'), ('NO', 'No')], default='NO', max_length=32, verbose_name='Will you be attending the next IETF?'),
),
]
| 1.757813 | 2 |
obb/views/platforms.py | marcmelchor/OBB-Train-Station | 0 | 12763345 | from obb.models.platform import Platform, Train
from django.shortcuts import get_object_or_404
from obb.serializers.serializers import PlatformSerializer
from rest_framework import viewsets
from rest_framework.response import Response
class PlatformViewSet(viewsets.ViewSet):
def list(self, request):
queryset = Platform.objects.all()
serializer = PlatformSerializer(queryset, many=True)
return Response(serializer.data)
# Create a Platform, in Comment type "name: <Platform_name> train_id: <Train_id>" (USE QUOTES)
def create(self, request):
kwargs = {}
data = request.data.split(' ')
if len(data[1]) > 0:
kwargs.update({'name': data[1]})
if data[3] > 0 and data[3].is_numeric():
queryset = Train.objects.all()
train = get_object_or_404(queryset, pk=data[3])
if type(train) is Train:
kwargs.update({'train_id': train.id})
platform = Platform(**kwargs)
platform.save()
serializer = PlatformSerializer(platform)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Platform.objects.all()
platform = get_object_or_404(queryset, pk=pk)
serializer = PlatformSerializer(platform)
return Response(serializer.data)
# Update a Train station name, in Comment type "name: <Train_Station_name>" or "train: <Train_id>" (USE QUOTES)
def update(self, request, pk=None):
data = request.data.split(' ')
platform = Platform.objects.get(pk=pk)
if data[0] == "name":
platform.name = data[1]
platform.save()
elif data[0] == "train":
queryset = Train.objects.all()
train = get_object_or_404(queryset, pk=data[1])
if type(train) is Train:
platform.train_id = data[1]
platform.save()
return Response(PlatformSerializer(platform).data)
| 2.234375 | 2 |
data/external/repositories_2to3/141822/AXA_Telematics-master/Trip_Matching/RDP_Trip_Matching/find_matches.py | Keesiu/meta-kaggle | 0 | 12763346 | import csv
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.neighbors import NearestNeighbors
from path import Path
from vector_math import *
from find_matches import *
import search_matches
#********************
#**** this compares two sets of angles to see how close the two paths are
#********************
#@profile
def compare_two_sets_of_angles(path1, path2):
match_comparison = []
max_distance = 0
if len(path2.angles>0):
angles2 = path2.angles[:,0]
distances1_2 = path2.angles[:,1]
distances2_2 = path2.angles[:,2]
path2_angles = path2.angles[:,0:3]
path2_angles_test = path2.angles[:,0:3].tolist()
else:
return # if we don't have any angles then break out of the loop and go to the next path
if len(path1.angles>0):
path1_angles = path1.angles[:,0:3]
path1_angles_test = path1.angles[:,0:3].tolist()
else:
return # if we don't have any angles then break out of the loop and go to the next path
angle_tolerance = 4.2
distance_tolerance = 18.0
#cdef int cnt2
matching_angles, num_matched = search_matches.match_angles(path1_angles, path2_angles, angle_tolerance, distance_tolerance)
match_comparison = matching_angles[0:num_matched,:]
## path1 is being compared against path 2
#for cnt in xrange (0, len(path1.angles)):
# angle1 = path1.angles[cnt,0]
# matches = np.where( (abs(path2_angles[:,0]-angle1) <= angle_tolerance) & ( abs(path2_angles[:,1]-path1_angles[cnt,1]) <= 16) & ( abs(path2_angles[:,2]-path1_angles[cnt,2]) <= 16) )
# if (len(matches[0]) > 0):
# match_score = [1, cnt, matches[0][0], 1.0, angle1] # remember this angle
# match_comparison.append(match_score)
#
#
##while( path1_angles_test and path2_angles_test ): # look for matches and pop from the list
## if ( path1_angles_test[-1][0] > path2_angles_test[-1][0] ): # pop the highest angle
## path1_angles_test.pop()
## else:
## path2_angles_test.pop()
#
#for cnt, match_check in enumerate(match_comparison):
# if ( abs(match_check[0]-matching_angles[cnt,0]) > .01 ):
# print("here 1",abs(match_check[0]-matching_angles[cnt,0]) )
# print(0,cnt, match_check[0], matching_angles[cnt,0])
# sys.exit(0)
# if ( match_check[1] != matching_angles[cnt,1] ):
# print(1,cnt, match_check[1], matching_angles[cnt,1])
# sys.exit(0)
# if ( match_check[2] != matching_angles[cnt,2] ):
# print(2, cnt, match_check[2], matching_angles[cnt,2])
# sys.exit(0)
# if ( match_check[3] != matching_angles[cnt,3] ):
# print(3, cnt, match_check[3], matching_angles[cnt,3])
# sys.exit(0)
# if ( abs(match_check[4] - matching_angles[cnt,4]) > .01 ):
# print(4, cnt, match_check[4], matching_angles[cnt,4])
# sys.exit(0)
#
#
#
exact_match_cnt = 0
matched_points1 = []
matched_points2 = []
for cnt, match_score in enumerate(match_comparison):
if (match_score[0] ==1):
exact_match_cnt += 1
loc1 = match_score[1]
loc2 = match_score[2]
# remember all of the matching points
matched_points1.append( match_score[1])
matched_points2.append( match_score[2])
match_found =0
if ( exact_match_cnt >= 2 ):
path1_matching = [path2.routeid, 1, loc1]
for match_point in matched_points1:
path1_matching.append(match_point)
path2_matching = [path1.routeid, 1, loc2]
for match_point in matched_points2:
path2_matching.append(match_point)
path1_matching_angle_list = path1_matching[3:]
path2_matching_angle_list = path2_matching[3:]
if ( exact_match_cnt >= 3 ): # we need at least 3 points to check for an exact match
# loop through each of the angles that was a good match and see how many of the points line up
match_found = 0
for cnt, angle1 in enumerate(path1_matching_angle_list):
angle2 = path2_matching_angle_list[cnt]
if (match_found ==0):
#print
#print
#print
match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#path1, path2, match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#print
#print
#print
#if (match_found != match_found2):
# print("***************** no match*******************",match_found,match_found2)
if (match_found == 1):
path1.comparison.append( path1_matching ) # remember that we matched and remember which RDP points had a good match
path2.comparison.append( path2_matching )
if (path1.matched<0):
path1.matched=0
if (path2.matched<0):
path2.matched = 0
path1.matched += 1
path2.matched += 1
path1.print_flag = 1
# if we don't have a match, check 2 points to see if we have anything resembling a match
if ( (path1.matched < 0 or path2.matched < 0) and exact_match_cnt >=2):
#if (match_found ==1):
if (len(path1_matching_angle_list) < 5):
pass
# find the distances between each of these angles and see if we can get any matching pairs
for cnt1 in range(0,len(path1_matching_angle_list)-1):
for cnt2 in range(cnt1+1,len(path1_matching_angle_list)):
angle_id_1_1 = path1_matching_angle_list[cnt1]
angle_id_1_2 = path1_matching_angle_list[cnt2]
angle_id_2_1 = path2_matching_angle_list[cnt1]
angle_id_2_2 = path2_matching_angle_list[cnt2]
distance1 = path1.angle_distances[angle_id_1_1, angle_id_1_2]
distance2 = path2.angle_distances[angle_id_2_1, angle_id_2_2]
#if( abs(distance1-distance2) < 30): # if these angles are the same distance, count it
# print ("here 1")
if(distance1 != 0 and distance2 != 0 and abs(distance1-distance2) < 30): # if these angles are the same distance, count it
if (path1.matched < 0):
path1.matched = 0 # these could be a match, so move them off of the definitely not matched list
if (path2.matched < 0):
path2.matched = 0 # these could be match
return
#********************
#**** end this compares two sets of angles to see how close the two paths are
#********************
#**************************************************************************************
#***** this gets the x, y, location of an rdp point
#**************************************************************************************
def get_RDP_xy(path, RDP_point):
#x = path.route[path.feature_loc[RDP_point,2], 0]
#y = path.route[path.feature_loc[RDP_point,2], 1]
# saves time to not assign them to another variable
return path.route[path.feature_loc[RDP_point,2], 0], path.route[path.feature_loc[RDP_point,2], 1]
# ****************************************************************************
# This returns 3 RDP points for each angle
# ********************************************************************
def get_RDP_point_from_angle(path, angle_num):
#path_rdp1 = path.angles[angle_num, 3] # the is the before point
#path_rdp2 = path.angles[angle_num, 4] # center point
#path_rdp3 = path.angles[angle_num, 5] # after point
#return path_rdp1, path_rdp2, path_rdp3
return path.angles[angle_num, 3], path.angles[angle_num, 4], path.angles[angle_num, 5]
# ****************************************************************************
# This returns 3 RDP points for each angle
# ********************************************************************
def get_one_RDP_point_from_angle(path, angle_num):
#path_rdp1 = path.angles[angle_num, 3] # the is the before point
#path_rdp2 = path.angles[angle_num, 4] # center point
#path_rdp3 = path.angles[angle_num, 5] # after point
#return path_rdp1, path_rdp2, path_rdp3
return path.angles[angle_num, 4]
#********************
#**** this aligns two paths and gives a score of that alignment
#********************
#@profile
def align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list ):
# assign criteria for how closely we have to match teh vector and distance depending on how close the angle is
matching_criteria = [ [2.0, 4.5, 30.0], [3.0, 3.0, 20.0], [4.0, 2.5, 17.0], [15.0, 2.0, 15.0] ]
# find out which feature to center on for point 1
path1_rdp2 = get_one_RDP_point_from_angle(path1, angle1)
# find out which feature to center on for point 2
path2_rdp2 = get_one_RDP_point_from_angle(path2, angle2)
path1_rdp2_x, path1_rdp2_y = get_RDP_xy(path1, path1_rdp2)
path2_rdp2_x, path2_rdp2_y = get_RDP_xy(path2, path2_rdp2)
# center the path1
index_array = np.array([path1_rdp2_x, path1_rdp2_y])
path1.route = np.subtract(path1.route, index_array)
# center the path2
index_array = np.array([path2_rdp2_x, path2_rdp2_y])
path2.route = np.subtract(path2.route, index_array)
path1_rdp2_x, path1_rdp2_y = get_RDP_xy(path1, path1_rdp2)
path2_rdp2_x, path2_rdp2_y = get_RDP_xy(path2, path2_rdp2)
match_found = 0
# try aligning with the other RDP points
for cnt3, path1_aligning_angle in enumerate(path1_matching_angle_list):
good_angle_found_2 = 0
good_distance = 1
if (match_found ==0):
path2_aligning_angle = path2_matching_angle_list[cnt3] # find the MSE error between all of our points
# find out which feature to center on for point 1
path1_aligning_rdp2 = get_one_RDP_point_from_angle(path1, path1_aligning_angle)
# find out which feature to center on for point 2
path2_aligning_rdp2 = get_one_RDP_point_from_angle(path2, path2_aligning_angle)
path1_aligning_rdp2_x, path1_aligning_rdp2_y = get_RDP_xy(path1, path1_aligning_rdp2) #
path2_aligning_rdp2_x, path2_aligning_rdp2_y = get_RDP_xy(path2, path2_aligning_rdp2) #
distance1 = get_distance(path1_rdp2_x, path1_rdp2_y, path1_aligning_rdp2_x, path1_aligning_rdp2_y)
distance2 = get_distance(path2_rdp2_x, path2_rdp2_y, path2_aligning_rdp2_x, path2_aligning_rdp2_y)
if (match_found == 0 and abs(distance1 - distance2) < matching_criteria[0][2]+5 and
path1_rdp2 != path1_aligning_rdp2 and path2_rdp2 != path2_aligning_rdp2 and
path1_rdp2_x != path1_aligning_rdp2_x and path2_rdp2_x != path2_aligning_rdp2_x ):
path1_angle = np.arctan( (path1_rdp2_y-path1_aligning_rdp2_y) / (path1_rdp2_x-path1_aligning_rdp2_x) )
path2_angle = np.arctan( (path2_rdp2_y-path2_aligning_rdp2_y) / (path2_rdp2_x-path2_aligning_rdp2_x) )
path1.rotate_path(path1_angle)
path2.rotate_path(path2_angle) # rotate the paths to the same angle
path1_aligning_rdp2_x, path1_aligning_rdp2_y = get_RDP_xy(path1, path1_aligning_rdp2) #
path2_aligning_rdp2_x, path2_aligning_rdp2_y = get_RDP_xy(path2, path2_aligning_rdp2) #
# if the x signs values of our aligning points don't match, flip the x of number 2
if ( np.sign(path1_aligning_rdp2_x) != np.sign(path2_aligning_rdp2_x) ):
path2.flip_x_coords()
for rotation in range(0,2):
if ( rotation== 1 or rotation== 3): # on the second loop, flip the y coordinates of the second path
path2.flip_y_coords()
close_count = 0
good_angle_found = 0
close_list = []
close_list2 = []
close_list3 = []
for cnt, path1_angle in enumerate(path1_matching_angle_list):
path2_angle = path2_matching_angle_list[cnt] # find the MSE error between all of our points
path1_angle_degrees = path1.angles[path1_angle][0]
path2_angle_degrees = path2.angles[path2_angle][0]
angle_diff = abs(path1_angle_degrees - path2_angle_degrees)
distance_criteria = 30.0 # initially assume it needs to be within 10 meters
vector_criteria = 6.0 # assume it needs to be within 1 degrees
for criteria in matching_criteria:
if (angle_diff <= criteria[0]): # if the angle is less than the criteria, assign the distance and vector criteria
vector_criteria = criteria[1]
distance_criteria = criteria[2]
break
path1_test_rdp1, path1_test_rdp2, path1_test_rdp3 = get_RDP_point_from_angle(path1, path1_angle)
path2_test_rdp1, path2_test_rdp2, path2_test_rdp3 = get_RDP_point_from_angle(path2, path2_angle)
# get the location of the center points of the angle
path1_test_rdp2_x, path1_test_rdp2_y = get_RDP_xy(path1, path1_test_rdp2)
path2_test_rdp2_x, path2_test_rdp2_y = get_RDP_xy(path2, path2_test_rdp2)
# see how close the center points are
distance_off = get_distance(path1_test_rdp2_x, path1_test_rdp2_y, path2_test_rdp2_x, path2_test_rdp2_y)
# see how many points are close to matching, but make sure not to double count any
if ( distance_off < distance_criteria and path1_test_rdp2 not in close_list and path2_test_rdp2 not in close_list2):
if (path1_test_rdp1 < path1_test_rdp2):
path1_test_rdp1 = path1_test_rdp2 - 1
path1_test_rdp3 = path1_test_rdp2 + 1
else:
path1_test_rdp1 = path1_test_rdp2 + 1
path1_test_rdp3 = path1_test_rdp2 - 1
if (path2_test_rdp1 < path2_test_rdp2):
path2_test_rdp1 = path2_test_rdp2 - 1
path2_test_rdp3 = path2_test_rdp2 + 1
else:
path2_test_rdp1 = path2_test_rdp2 + 1
path2_test_rdp3 = path2_test_rdp2 - 1
# the the location of the rdp points adjacent to the center for each angle, to calculate vectors
path1_test_rdp1_x, path1_test_rdp1_y = get_RDP_xy(path1, path1_test_rdp1)
path1_test_rdp3_x, path1_test_rdp3_y = get_RDP_xy(path1, path1_test_rdp3)
path2_test_rdp1_x, path2_test_rdp1_y = get_RDP_xy(path2, path2_test_rdp1)
path2_test_rdp3_x, path2_test_rdp3_y = get_RDP_xy(path2, path2_test_rdp3)
# get the unit vectors for the path
path1_vector1 = [ path1_test_rdp2_x - path1_test_rdp1_x, path1_test_rdp2_y - path1_test_rdp1_y]
path1_vector2 = [ path1_test_rdp2_x - path1_test_rdp3_x, path1_test_rdp2_y - path1_test_rdp3_y]
path2_vector1 = [ path2_test_rdp2_x - path2_test_rdp1_x, path2_test_rdp2_y - path2_test_rdp1_y]
path2_vector2 = [ path2_test_rdp2_x - path2_test_rdp3_x, path2_test_rdp2_y - path2_test_rdp3_y]
# get the angle between path1 vector1 and path2 vector1 and 2
# and the angle between path2 vector2 and path2 vector1 and 2
angle1_1 = angle_between(path1_vector1, path2_vector1) * 57.2957795130823 # the angle of the angle in degrees
angle2_1 = angle_between(path1_vector2, path2_vector1) * 57.2957795130823 # the angle of the angle in degrees
angle1_2 = angle_between(path1_vector1, path2_vector2) * 57.2957795130823 # the angle of the angle in degrees
angle2_2 = angle_between(path1_vector2, path2_vector2) * 57.2957795130823 # the angle of the angle in degrees
not_a_match=1
# see if the first vector and the vector from path 2 are mostly aligned
if ( angle1_1 < vector_criteria or angle1_1 > (180-vector_criteria) or angle1_2 < vector_criteria or angle1_2 > (180-vector_criteria)):
# see if the second vector from path1 is mostly aligned with a vector from path 1
if ( angle2_1 < vector_criteria or angle2_1 > (180-vector_criteria) or angle2_2 < vector_criteria or angle2_2 > (180-vector_criteria)):
not_a_match=0 # this is a good enough match to continue
if (not_a_match ==0): # if the vectors are properly aligned
close_count += 1
close_list.append( path1_test_rdp2)
close_list2.append( path2_test_rdp2)
close_list3.append( [path1_test_rdp2, path2_test_rdp2] )
if (path1_angle_degrees < 135): # look for angles that aren't completely flat
good_angle_found =1
#if (path1_angle_degrees < 160): # look for angles that aren't completely flat
# good_angle_found_2 = 1
#if ( angle1_1 > 6 and angle1_1 < (180-6) and angle1_2 > 6 and angle1_2 < (180-6)):
# good_distance = 0
if ( close_count >= 3): # hold onto the lowest error case
#close_list3.sort()
#matching_distance_count = 0
#for rdp_cnt in range(0,len(close_list3)-1):
# rdp1_1 = close_list3[rdp_cnt][0] # get the path distance betwee these points
# rdp1_2 = close_list3[rdp_cnt+1][0]
#
# rdp2_1 = close_list3[rdp_cnt][1]
# rdp2_2 = close_list3[rdp_cnt+1][1]
#
# route_distance1 = path1.get_route_distance(int(path1.feature_loc[rdp1_1,2]), int(path1.feature_loc[rdp1_2,2]))
# route_distance2 = path2.get_route_distance(int(path2.feature_loc[rdp2_1,2]), int(path2.feature_loc[rdp2_2,2]))
#
# max_distance = max(route_distance1,route_distance2)
# min_distance = min(route_distance1,route_distance2)
#
# if (max_distance/min_distance < 1.25 or max_distance-min_distance < 20):
# matching_distance_count+=1
#
#if (matching_distance_count < 2):
# path1.print_flag = 1
matching_distance_count = 0
diff1 = max(close_list) - min(close_list)
diff2 = max(close_list2) - min(close_list2)
if (close_count >=5 or good_angle_found==1 or diff1 > 5 or diff2>5):
close_list3.sort()
matching_distance_count = 0
#print(path1.routeid, path2.routeid)
for rdp_cnt in range(0,len(close_list3)-1):
rdp1_1 = close_list3[rdp_cnt][0] # get the path distance betwee these points
rdp1_2 = close_list3[rdp_cnt+1][0]
rdp2_1 = close_list3[rdp_cnt][1]
rdp2_2 = close_list3[rdp_cnt+1][1]
#route_distance1 = path1.get_route_distance(int(path1.feature_loc[rdp1_1,2]), int(path1.feature_loc[rdp1_2,2]))
#route_distance2 = path2.get_route_distance(int(path2.feature_loc[rdp2_1,2]), int(path2.feature_loc[rdp2_2,2]))
path1_segment_start = int(path1.feature_loc[rdp1_1,2])
path1_segment_end = int(path1.feature_loc[rdp1_2,2])
path2_segment_start = int(path2.feature_loc[rdp2_1,2])
path2_segment_end = int(path2.feature_loc[rdp2_2,2])
max_distance = 0
max_distance = search_matches.max_distance_between_segments(path1.route, path2.route, path1_segment_start, path1_segment_end, \
path2_segment_start, path2_segment_end)
#print("Max distance is ",max_distance)
if ( max_distance < 18):
matching_distance_count+=1
#if (matching_distance_count < 2):
# path1.print_flag = 1
if (matching_distance_count >= 2):
# the current RDP has a problem with matching up gentle curves
# to combat this, we will look for either, 4 matching points, or 1 point with a sharp enough turn
# which I am starting to SWAG at 145 degrees, or that the three matching RDP points aren't all in a row
# for either path1 or path2
if (close_count >=5 or good_angle_found==1): # if we have at least 4 matches, or 1 of them was a good angle, count it
match_found = 1
#if (good_distance ==0):
# path1.print_flag = 1
# #print("here1")
return match_found
else:
diff1 = max(close_list) - min(close_list)
diff2 = max(close_list2) - min(close_list2)
if (diff1 > 5 or diff2>5): # if all of the RDP points aren't sequential then count it
match_found = 1
#if (good_distance ==0):
# path1.print_flag = 1
# #print("here2")
return match_found
return match_found
#********************
#**** this aligns and orients two matching paths the same before plotting and saving them two a file for viewing
#********************
def align_two_paths(path1, path2,driver_id,rdp_tolerance):
path1_matching_angle_list = path1.comparison[-1][3:]
path2_matching_angle_list = path2.comparison[-1][3:]
# loop through each of the angles that was a good match, and see which one makes the lowest error when they are aligned
match_found = 0
for cnt, angle1 in enumerate(path1_matching_angle_list):
angle2 = path2_matching_angle_list[cnt]
if (match_found ==0):
match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#print ("here2")
#print("match_found is ",match_found)
if (match_found == 1):
# if one path is a lot longer than the other, zoom in on the shorter one
#if (path1.distance < path2.distance / 5.0 or path2.distance < path1.distance / 5.0):
x1_max = np.amax ( path1.route[:,0] )
x1_min = np.amin ( path1.route[:,0] )
x2_max = np.amax ( path2.route[:,0] )
x2_min = np.amin ( path2.route[:,0] )
y1_max = np.amax ( path1.route[:,1] )
y1_min = np.amin ( path1.route[:,1] )
y2_max = np.amax ( path2.route[:,1] )
y2_min = np.amin ( path2.route[:,1] )
x_upper_bound = min( x1_max, x2_max) + 500
x_lower_bound = max( x1_min, x2_min) - 500
y_upper_bound = min( y1_max, y2_max) + 500
y_lower_bound = max( y1_min, y2_min) - 500
x_upper_bound2 = min( x1_max + 250, x2_max + 250, 1000)
x_lower_bound2 = max( x1_min - 250, x2_min - 250, -1000)
y_upper_bound2 = min( y1_max + 250, y2_max + 250, 1000)
y_lower_bound2 = max( y1_min - 250, y2_min - 250, -1000)
plt.figure()
plt.plot(path1.route[:,0],path1.route[:,1],markersize=2.0)
plt.plot(path2.route[:,0],path2.route[:,1],markersize=2.0)
feature_list1 = []
feature_list2 = []
for cnt, path1_angle in enumerate(path1_matching_angle_list):
path2_angle = path2_matching_angle_list[cnt] # find the MSE error between all of our points
path1_test_rdp1, path1_test_rdp2, path1_test_rdp3 = get_RDP_point_from_angle(path1, path1_angle)
path2_test_rdp1, path2_test_rdp2, path2_test_rdp3 = get_RDP_point_from_angle(path2, path2_angle)
path1_test_rdp2_x, path1_test_rdp2_y = get_RDP_xy(path1, path1_test_rdp2)
path2_test_rdp2_x, path2_test_rdp2_y = get_RDP_xy(path2, path2_test_rdp2)
feature_list1.append( [path1_test_rdp2_x, path1_test_rdp2_y] )
feature_list2.append( [path2_test_rdp2_x, path2_test_rdp2_y] )
# #* Temporary
path1.update_feature_loc()
path2.update_feature_loc()
path1_features = path1.feature_loc[:,0:2]
path2_features = path2.feature_loc[:,0:2]
#plt.scatter(path1_features[:,0],path1_features[:,1])
#plt.scatter(path2_features[:,0],path2_features[:,1])
# #* Temporary
#file1 = open("test1.csv",'wb')
#file1_csv = csv.writer(file1)
#for angle in path1.angles:
# file1_csv.writerow(angle)
#file1.close()
#file2 = open("test2.csv",'wb')
#file2_csv = csv.writer(file2)
#for angle in path2.angles:
# file2_csv.writerow(angle)
#file2.close()
feature_list1 = np.array(feature_list1)
plt.scatter(feature_list1[:,0],feature_list1[:,1],c='red')
feature_list2 = np.array(feature_list2)
plt.scatter(feature_list2[:,0],feature_list2[:,1],c='red')
plt.show()
#print ("here 3")
# if one path is a lot longer than the other, zoom in on the shorter one
if (path1.distance < path2.distance / 5.0 or path2.distance < path1.distance / 5.0):
plt.axis( (x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound) )
#else:
# plt.axis( (x_lower_bound2, x_upper_bound2, y_lower_bound2, y_upper_bound2) )
#plt.show()
plt.savefig("Test_Set\\Driver_" + str(driver_id)+"_" + str(path1.routeid) + "__" + str(path2.routeid) +"__"+ str(rdp_tolerance)+"m.png")
#plt.savefig("Test_Set\\Driver_1_" + str(path2.routeid) + "__" + str(path1.routeid) +".png")
plt.close()
return
#********************
#**** end aligns and orients two matching paths the same before plotting and saving them two a file for viewing
#********************
| 3.09375 | 3 |
code/fastai_ext/fastai_ext/utils.py | jandremarais/TabularLearner | 14 | 12763347 | from matplotlib import pyplot as plt
from fastai.callback import Callback
from fastai.callbacks import hook_output
def request_lr(learn, **kwargs):
learn.lr_find(**kwargs)
learn.recorder.plot()#suggestion=False
plt.show()
return float(input('Select LR: '))
def auto_lr(learn, **kwargs):
learn.lr_find(**kwargs)
learn.recorder.plot()
return learn.recorder.min_grad_lr
def transfer_from_dae(learn_cls, learn_dae):
learn_cls.model.embeds.load_state_dict(learn_dae.model.embeds.state_dict())
learn_cls.model.bn_cont.load_state_dict(learn_dae.model.bn_cont.state_dict())
learn_cls.model.layers[:-1].load_state_dict(learn_dae.model.layers[:-1].state_dict())
def freeze_layer(m):
for param in m.parameters(): param.requires_grad=False
def freeze_but_last(learn):
freeze_layer(learn.model.embeds)
freeze_layer(learn.model.bn_cont)
freeze_layer(learn.model.layers[:-1])
def unfreeze_all(learn):
for param in learn.model.parameters(): param.requires_grad=True
class StoreHook(Callback):
def __init__(self, module):
super().__init__()
self.custom_hook = hook_output(module)
self.outputs = []
def on_batch_end(self, train, **kwargs):
if (not train): self.outputs.append(self.custom_hook.stored)
| 2.375 | 2 |
scrape/apollo13/apollo13.py | jumpjack/ApolloTranscriptScraper | 1 | 12763348 | <reponame>jumpjack/ApolloTranscriptScraper
#!/usr/bin/env python3
import os, glob, re
import requests
from bs4 import BeautifulSoup
import json
"""
Apollo 13 Lunar Flight Journals:
https://www.hq.nasa.gov/alsj/main.html
Apollo 13 Lunar Surface Journals:
https://www.hq.nasa.gov/alsj/a13/a13.html
"""
SCRAPE_DIR = 'scrape'
DATA_DIR = 'data'
SPEAKERS = [
'Public Affairs Office',
'SC',
'MS',
'Fullerton',
'Lovell',
'Lovell (onboard)',
'Lovell (on board)',
'Haise',
'Haise (onboard)',
'Haise (on board)',
'Swigert',
'Swigert (onboard)',
'Swigert (on board)',
'NETWORK',
'MILA',
'Bermuda',
'Canary',
'Kerwin',
'Brand',
'Mattingly',
'Lousma',
'Lousma',
'Unrecognized crewman',
'Duke',
]
def apollo13_lfj_scrape_index():
"""
Scrape the index of the Apollo 13 Lunar Flight Journal.
Get each link to a "Day X" page.
Request the contents of each "Day X" page.
Save it to a file for later processing.
"""
lfj_base_link = 'https://web.archive.org/web/20171225232131/https://history.nasa.gov/afj/ap13fj/'
headers = {'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'}
# Make a soup from the page HTML
r = requests.get(lfj_base_link, headers = headers)
html_doc = r.text
soup = BeautifulSoup(html_doc,"lxml")
# Extract each link to a "Day X" page
log_links = []
a_s = soup.find_all('a')
for a_ in a_s:
link_text = a_.get_text()
if 'Day ' in link_text \
or 'Launch and Reaching' in link_text \
or 'Earth Orbit' in link_text \
or 'Transposition' in link_text \
or 'SPS Troubleshooting' in link_text:
page_name = a_.attrs['href']
link_name = lfj_base_link + page_name
log_links.append(link_name)
if not os.path.exists(SCRAPE_DIR):
os.mkdir(SCRAPE_DIR)
# Follow those links!!!
# Save each page to disk
for i,link in enumerate(log_links):
dest = os.path.join(SCRAPE_DIR, os.path.basename(link))
if not os.path.exists(dest):
print("Scraping...")
print(" Link: %s"%(link))
print(" Target file: %s"%(dest))
r = requests.get(link, headers=headers)
html_doc = r.content.decode('utf-8')
soup = BeautifulSoup(html_doc, "lxml")
with open(dest,'w') as f:
f.write(soup.text)
print("Done.\n")
else:
print("Skipping %s, file already exists..."%(dest))
print("Done scraping Apollo 13 Lunar Flight Journals.")
def apollo13_lfj_extract_dialogue():
"""
Use the saved "Day X" pages saved to disk to exract dialogue.
"""
import nltk
# a list of dictionaries with "speaker" and "token" keys
all_the_dialogue = []
hh = 0
mm = 0
lfj_files = glob.glob(os.path.join(SCRAPE_DIR,"0*.html"))
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
# For each LFJ transcript, we have plain text,
# so go through each line and look for speaker: dialogue tokens.
for lfj_file in lfj_files:
print("Tokenizing...")
print(" Target file: %s"%(lfj_file))
with open(lfj_file,'r') as f:
html_doc = f.read()
soup = BeautifulSoup(html_doc, "lxml")
## --------------------
## tokenize by word:
#tokens = nltk.wordpunct_tokenize(booty)
# tokenize by sentence:
tokens = nltk.tokenize.sent_tokenize(html_doc)
# split, then flatten list
tokens = [j.split(": ") for j in tokens]
tokens = [item for sublist in tokens for item in sublist]
# split, then flatten list
tokens = [j.split(" - ") for j in tokens]
tokens = [item for sublist in tokens for item in sublist]
# split, then flatten list
tokens = [j.split("\n") for j in tokens]
tokens = [item for sublist in tokens for item in sublist]
# replace double quotes
tokens = [j.replace('"','') for j in tokens]
# no mp3 audio clips
tokens = [j for j in tokens if 'mp3 audio' not in j.lower()]
tokens = [j for j in tokens if ' kb.' not in j.lower()]
comm_break = 'comm break'
tokens = [j for j in tokens if tokens!='']
# replace timestamps 000:00:00
# look for "last updated" location
#
last_updated_index = 0
for jj,tok in enumerate(tokens):
if any([speaker in tok for speaker in SPEAKERS]):
stripped_tok = re.sub('[0-9]{3}:[0-9]{2}:[0-9]{2} ','',tok)
stripped_tok2 = re.sub('at [0-9]{3}:[0-9]{2}:[0-9x]{2}','',stripped_tok)
stripped_tok3 = re.sub(' \(onboard\)','',stripped_tok2)
tokens[jj] = stripped_tok3
if 'last updated' in tok.lower():
last_updated_index = jj
if last_updated_index != 0:
tokens[0:last_updated_index+1] = []
ii = 0
while ii < len(tokens):
if tokens[ii] in SPEAKERS:
d = {}
d['speaker'] = tokens[ii]
ii += 1
z = []
while (ii<len(tokens)) and (comm_break not in tokens[ii].lower()) and (tokens[ii] not in SPEAKERS):
z.append(tokens[ii])
ii += 1
d['tokens'] = z
cc = len(all_the_dialogue)
if ((mm+1)%60)==0:
mm=0
if ((cc+1)%60)==0:
hh += 1
d['time'] = '%03d:%02d:00'%(hh,mm)
all_the_dialogue.append(d)
mm += 1
ii += 1
print("Done.")
out_min = os.path.join(DATA_DIR,'apollo_13_min.txt')
out_nice = os.path.join(DATA_DIR,'apollo_13.json')
print("Saving tokens to file:")
print(" Text: %s"%(out_min))
print(" Json: %s"%(out_nice))
with open(out_min,'w') as f:
for d in all_the_dialogue:
f.write(json.dumps(d))
f.write("\n")
with open(out_nice,'w') as f:
json.dump(all_the_dialogue,f,indent=4)
print("Done.\n")
print("Done tokenizing Apollo 13 Lunar Flight Journals.")
def check_for_funky_unicode(txt):
"""
Given some text, check if there are any funky unicode symbols
that need to be removed. Print out their names. Add them to
the strip_funky_unicode() method below.
"""
import unicodedata
for c in txt:
if ord(c) >= 127:
print('{} U+{:04x} {}'.format(c.encode('utf8'), ord(c), unicodedata.name(c)))
def strip_funky_unicode(txt):
"""
Scrub out any funk unicode.
"""
# scrub these unicode symbols from the scraped text
unicode_key = [
(u"\u2019",'RIGHT SINGLE QUOTATION MARK','\''),
(u"\u2013",'EN DASH','-'),
(u"\u00bd",'VULGAR FRACTION ONE HALF',' 1/2 '),
(u"\u00be",'VULGAR FRACTION THREE QUARTERS',' 3/4 '),
(u"\u201d",'RIGHT DOUBLE QUOTATION MARK','"'),
(u"\u201c",'LEFT DOUBLE QUOTATION MARK','"'),
(u"\u00b7",'MIDDLE DOT','.'),
(u"\u00b7",'MIDDLE DOT','.'),
(u"\u00a9",'COPYRIGHT SIGN',' '),
(u"\u00e9",'LATIN SMALL LETTER E WITH ACUTE','e'),
(u"\u00b0",'DEGREE SIGN','o'),
]
for code, name, symbol in unicode_key:
txt_decode = txt.decode("utf-8")
txt_replace = txt_decode.replace(code,symbol)
txt_encode = txt_replace.encode("utf-8")
txt = txt_encode
return txt
if __name__=="__main__":
apollo13_lfj_scrape_index()
apollo13_lfj_extract_dialogue()
| 2.90625 | 3 |
ttsplay/__init__.py | yuj09161/word | 0 | 12763349 | __all__=['ttsmake'] | 1.09375 | 1 |
location_register/migrations/0022_auto_20210415_0936.py | OlexandrTopuzov/Data_converter | 0 | 12763350 | <gh_stars>0
# Generated by Django 3.0.7 on 2021-04-15 09:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('location_register', '0021_auto_20210330_1650'),
]
operations = [
migrations.AlterModelOptions(
name='country',
options={'ordering': ('name',), 'verbose_name': 'country', 'verbose_name_plural': 'countries'},
),
]
| 1.445313 | 1 |