hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b519d2443bbd7f033d131ee60b0e78ef279c2a7 | 256 | py | Python | xai_court/predictors/__init__.py | michaeljneely/court-of-xai | 37eded49f46b3a05ad56986c1a9bb22eee3ac4b1 | [
"MIT"
] | 4 | 2021-05-07T09:40:11.000Z | 2022-03-27T18:19:07.000Z | xai_court/predictors/__init__.py | michaeljneely/court-of-xai | 37eded49f46b3a05ad56986c1a9bb22eee3ac4b1 | [
"MIT"
] | 1 | 2021-05-10T09:31:05.000Z | 2021-05-10T09:31:05.000Z | xai_court/predictors/__init__.py | michaeljneely/court-of-xai | 37eded49f46b3a05ad56986c1a9bb22eee3ac4b1 | [
"MIT"
] | 1 | 2021-06-06T18:45:39.000Z | 2021-06-06T18:45:39.000Z | from xai_court.predictors.jain_wallace_attention_binary_predictor import JWAEDPredictor
from xai_court.predictors.pair_sequence_predictor import PairSequencePredictor
from xai_court.predictors.distilbert import DistilBertForSequenceClassificationPredictor
| 64 | 88 | 0.929688 |
1a78d1534df6395ff59a78ecfb0bb7893186b878 | 812 | py | Python | Chapter 10/10-10.py | Nolazco1/pythonProjects | 33938487cb2e4771ea4ff6716703868844d04e52 | [
"MIT"
] | null | null | null | Chapter 10/10-10.py | Nolazco1/pythonProjects | 33938487cb2e4771ea4ff6716703868844d04e52 | [
"MIT"
] | null | null | null | Chapter 10/10-10.py | Nolazco1/pythonProjects | 33938487cb2e4771ea4ff6716703868844d04e52 | [
"MIT"
] | null | null | null | # This program demonstrates the BankAccount class
# with the __str__ method added to it.
import bankaccount2
def main():
# Get the starting balance
start_bal = float(input('Enter your starting balance: '))
# Create a BankAccount object.
savings = bankaccount2.BankAccount(start_bal)
# Deposit the user's paycheck.
pay = float(input('How much were you paid this week? '))
print('I will deposit that into your account.')
savings.deposit(pay)
# Display the balance.
print(savings)
# Get the amount to withdraw.
cash = float(input('How much would you like to withdraw? '))
print('I will withdraw that from your account.')
savings.withdraw(cash)
# Display the balance
print(savings)
# Call the main function
main()
| 26.193548 | 65 | 0.665025 |
b021152f418288ce3ab759061bf8de35007abc4a | 305 | py | Python | ROT13/app.py | davwheat/btec-python-challenges | 4cd0aee6ff67e50518f61f29b982cb182cafed36 | [
"MIT"
] | null | null | null | ROT13/app.py | davwheat/btec-python-challenges | 4cd0aee6ff67e50518f61f29b982cb182cafed36 | [
"MIT"
] | null | null | null | ROT13/app.py | davwheat/btec-python-challenges | 4cd0aee6ff67e50518f61f29b982cb182cafed36 | [
"MIT"
] | null | null | null | import codecs
option = input("Press 1 to decode or 2 to encode by ROT13.")
if option == "1":
text = input("Enter text to encode with ROT13: ")
print(codecs.encode(text, "rot_13"))
elif option == "2":
text = input("Enter text to decode from ROT13: ")
print(codecs.decode(text, "rot_13"))
| 27.727273 | 60 | 0.652459 |
6abbf26584b8ca594f2d572054148ee3bf94e340 | 2,880 | py | Python | tests/test_RC3.py | lukasgehrke/paraheat | 551caf98fd760bebb5ca8af4a51c9acdf58da914 | [
"BSD-2-Clause"
] | null | null | null | tests/test_RC3.py | lukasgehrke/paraheat | 551caf98fd760bebb5ca8af4a51c9acdf58da914 | [
"BSD-2-Clause"
] | null | null | null | tests/test_RC3.py | lukasgehrke/paraheat | 551caf98fd760bebb5ca8af4a51c9acdf58da914 | [
"BSD-2-Clause"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# import class
from heat.first_level import OLS, GLM
from heat.multcomp import MCC_MNE
import heat.modeling as modeling
import heat.plot as ph
df = pd.read_csv('/Users/lukasgehrke/Documents/temp/chatham/LG_data_crdPhase1/df_scenario1_random_sample.csv')
data = df[['X', 'Y']]
data['Z'] = data['X']
def test_fit_OLS():
design = df[['pID' ,'Activity', 'Workload', 'Intensity', 'GTLX']]
design = design.sort_values("pID")
st = OLS(name="test", data=df, design_matrix=design, model="Pixel ~ Activity * Intensity")
bins = 10
st.create_heatmaps('pID', 2, bins, None, 'count')
st.zero_to_nan()
st.standardize("zscore")
st.fit()
# TODO st.inspect_fit()
# prepare results
to_plot = np.reshape(np.array(st._heatmaps_betas["Intercept"]),(bins,bins))
mask = np.reshape(np.array(st._heatmaps_ps["Intercept"]),(bins,bins))
# apply mcc
mask = MCC_MNE.fdr(mask, .05)
mask = mask[0][0]
# plot and format it
bg_img = mpimg.imread('/Users/lukasgehrke/Documents/temp/matb.png')
extent = ph.get_image_extent(bg_img)
my_cm = ph.make_cm_transparent(plt.cm.coolwarm)
fig, ax = ph.create_figure_axes(1)
ph.add_background_image(bg_img, ax)
lims = [-20,20]
sig_levels = 1
heat = ph.add_heat(to_plot, ax, extent, cm=my_cm, lims=lims, add_contour=True, contour_mask=mask, levels=sig_levels)
ph.add_colorbar(heat, ax)
ph.set_labelnames(ax, title="some title", xlabel="some x label", ylabel="some y label")
ph.format_axes(ax)
ph.show()
def test_fit_GLM():
design = df[['pID', 'Activity']]
design = design.sort_values("pID") # okay since this is random data anyways
glm = GLM(name="Activity ~ Pixel", data=df, design_matrix=design, model="Activity ~ Pixel")
bins = 5
glm.create_heatmaps('pID', 2, bins, None, 'count')
glm.standardize("zscore")
glm.zero_to_nan()
glm.fit()
# glm.inspect_fit()
# apply mcc
# plot and format it
# multilevel modeling example
def test_fit_OLS_multilevel():
design = df[['pID' ,'Activity', 'Workload', 'Intensity', 'GTLX']]
design = design.sort_values("pID")
# rename pID to trialID and GLTX to RT
design.rename(columns={"pID": "trialNr", "GTLX": "RT"}, inplace=True)
df.rename(columns={"pID": "trialNr", "GTLX": "RT"}, inplace=True)
# pseudocode
# first level analysis: across trials within a single subject
# do for each subject and retain results
st = OLS(name="test", data=df, design_matrix=design, model="Pixel ~ Intensity + RT")
bins = 10
st.create_heatmaps('trialNr', 2, bins, None, 'count')
st.standardize("zscore")
st.zero_to_nan()
st.fit()
# second level analysis: across subjects
# now using heatmaps (betas) as input!
| 30.638298 | 120 | 0.668403 |
1c6acd002f238e9b623b88ca091a1153c3efc81d | 51 | py | Python | CrashingComputers/CrashingWindows.py | l1n3rd4/PyHackingScripts | 76e6901085f4b8043c82f011f78ffff575fcff85 | [
"MIT"
] | null | null | null | CrashingComputers/CrashingWindows.py | l1n3rd4/PyHackingScripts | 76e6901085f4b8043c82f011f78ffff575fcff85 | [
"MIT"
] | null | null | null | CrashingComputers/CrashingWindows.py | l1n3rd4/PyHackingScripts | 76e6901085f4b8043c82f011f78ffff575fcff85 | [
"MIT"
] | null | null | null | import os
while True:
os.startfile("cmd.exe") | 12.75 | 27 | 0.666667 |
12ef0e1d6f59094ce9ef0d2b11bf2dde96c23e1b | 4,086 | py | Python | screen.py | chrisb2/air-quality | 9dd0c7b836467b3a262d7e09ae86d4bce6ac8e00 | [
"MIT"
] | 17 | 2018-07-24T17:41:58.000Z | 2021-06-23T12:02:21.000Z | screen.py | chrisb2/air-quality | 9dd0c7b836467b3a262d7e09ae86d4bce6ac8e00 | [
"MIT"
] | null | null | null | screen.py | chrisb2/air-quality | 9dd0c7b836467b3a262d7e09ae86d4bce6ac8e00 | [
"MIT"
] | 3 | 2018-07-24T17:41:59.000Z | 2019-09-19T19:17:31.000Z | """Air quality monitor screen."""
import epaper2in9
from machine import SPI
from display_buffer import Buffer
class Screen:
"""Air quality monitor screen."""
# Text size scales
_TINY_TEXT = 0.3
_SMALL_TEXT = 0.6
_LARGE_TEXT = 1.5
# Screen
_HALF_WIDTH = int(epaper2in9.EPD_WIDTH / 2)
_HALF_HEIGHT = int(epaper2in9.EPD_HEIGHT / 2)
def __init__(self, config):
"""Create with the supplied configuration."""
spi = SPI(-1, baudrate=config.baudrate,
sck=config.sck, mosi=config.mosi, miso=config.miso)
self._epd = epaper2in9.EPD(spi, config.cs, config.dc,
config.rst1, config.busy)
self._epd.init()
self._buffer = Buffer(epaper2in9.EPD_WIDTH, epaper2in9.EPD_HEIGHT)
def update(self, temperature, humidity, co2, voc, voltage,
baseline=False, fullupdate=False):
"""Update the screen with the supplied readings."""
self._add_borders()
self._add_temperature(temperature)
self._add_humidity(humidity)
self._add_co2(co2)
self._add_voc(voc)
self._add_voltage(voltage)
self._add_baseline_indicator(baseline)
self._update_screen(fullupdate)
def sleep(self):
"""Put the screen into low current mode."""
self._epd.sleep()
def _add_borders(self):
self._buffer.background(self._buffer.WHITE)
self._add_line(0, self._HALF_WIDTH,
epaper2in9.EPD_HEIGHT, self._HALF_WIDTH)
self._add_line(self._HALF_HEIGHT, 0,
self._HALF_HEIGHT, epaper2in9.EPD_WIDTH)
def _add_line(self, x1, y1, x2, y2):
self._buffer.line(x1, y1, x2, y2,
self._buffer.BLACK, self._buffer.PEN_MEDIUM)
def _add_temperature(self, temperature):
self._write_title_text("Temperature", 2, 113)
self._write_value_text("%dC" % int(round(temperature)), 5, 70)
def _add_humidity(self, humidity):
self._write_title_text("Humidity", 152, 113)
self._write_value_text("%d%%" % int(round(humidity)), 158, 70)
def _add_co2(self, co2):
# 400ppm to 32768ppm
self._write_title_text("eCO2 ppm", 2, 48)
if co2 is None:
self._add_line(50, 25, 80, 25)
else:
self._write_value_text("%d" % co2, 10, 5)
def _add_voc(self, voc):
# 0ppb to 32768ppb
self._write_title_text("TVOC ppb", 152, 48)
if voc is None:
self._add_line(200, 25, 230, 25)
else:
self._write_value_text("%d" % voc, 158, 5)
def _add_voltage(self, voltage):
if voltage is not None:
self._buffer.line(272, 118, 272, epaper2in9.EPD_WIDTH,
self._buffer.BLACK, self._buffer.PEN_THIN)
self._buffer.line(272, 118, epaper2in9.EPD_HEIGHT, 118,
self._buffer.BLACK, self._buffer.PEN_THIN)
self._write_text("%.1fV" % voltage, 274, 120, self._TINY_TEXT,
self._buffer.PEN_THIN)
def _add_baseline_indicator(self, baseline):
if baseline:
self._write_text("B", 288, 2, self._TINY_TEXT,
self._buffer.PEN_THIN)
def _write_title_text(self, text, x, y):
self._write_text(text, x, y, self._SMALL_TEXT, self._buffer.PEN_MEDIUM)
def _write_value_text(self, text, x, y):
self._write_text(text, x, y, self._LARGE_TEXT, self._buffer.PEN_MEDIUM)
def _write_text(self, text, x, y, scale, pen):
self._buffer.write_text(text, x, y, self._buffer.BLACK,
scale, scale, None, pen)
def _update_screen(self, fullupdate):
if fullupdate:
self._epd.set_lut(self._epd.LUT_FULL_UPDATE)
else:
self._epd.set_lut(self._epd.LUT_PARTIAL_UPDATE)
self._epd.set_frame_memory(self._buffer.get(), 0, 0,
epaper2in9.EPD_WIDTH, epaper2in9.EPD_HEIGHT)
self._epd.display_frame()
| 37.145455 | 79 | 0.603524 |
ce8cc4fa95ec00c6d2dc2c9f694ac8b6161c4bc4 | 1,727 | py | Python | agent/tests/test_kraken_pylint.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | 1 | 2021-08-15T19:46:44.000Z | 2021-08-15T19:46:44.000Z | agent/tests/test_kraken_pylint.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | null | null | null | agent/tests/test_kraken_pylint.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | null | null | null | import json
from unittest.mock import patch
import pytest
from kraken.agent import kraken_pylint
@pytest.mark.parametrize("git_url", [
'git@github.com:Kraken-CI/kraken.git',
'https://github.com/Kraken-CI/kraken.git',
'https://github.com/Kraken-CI/kraken'
])
def test__get_git_url(git_url):
values = [
(0, git_url),
(0, ''' origin/HEAD -> origin/master
origin/master''')
]
with patch('kraken.agent.utils.execute', side_effect=values):
result = kraken_pylint._get_git_url('/tmp')
assert result == 'https://github.com/Kraken-CI/kraken/blob/master'
def test_run_analysis():
rcvd_issues = []
def _rep_issue(issue):
rcvd_issues.append(issue)
step = {
'rcfile': 'pylint.rc',
'modules_or_packages': '.'
}
issues = [{
"path": "path",
"line": 123,
"message": "msg",
}]
issues_json = json.dumps(issues)
with patch('kraken.agent.utils.execute', return_value=(0, issues_json)) as ue, \
patch('kraken.agent.kraken_pylint._get_git_url', return_value='https://github.com/Kraken-CI/kraken/blob/master'):
ret, msg = kraken_pylint.run_analysis(step, report_issue=_rep_issue)
ue.assert_called_once_with('pylint --exit-zero -f json --rcfile=pylint.rc .',
cwd='.',
out_prefix='',
timeout=180)
assert ret == 0
assert msg == ''
assert rcvd_issues[0] == {'line': 123,
'message': 'msg',
'path': 'path',
'url': 'https://github.com/Kraken-CI/kraken/blob/master/path#L123'}
| 30.298246 | 122 | 0.563405 |
ce122e2d9e5e8d1a07be63a05a5a5726ac847d1f | 686 | py | Python | app/core/migrations/0003_ingredient.py | DamienPond001/recipe-app-api | 586aa3c37175ef6628da4971120880d2c3c2bae2 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | DamienPond001/recipe-app-api | 586aa3c37175ef6628da4971120880d2c3c2bae2 | [
"MIT"
] | 2 | 2020-08-31T09:25:54.000Z | 2020-08-31T10:30:17.000Z | app/core/migrations/0003_ingredient.py | DamienPond001/recipe-app-api | 586aa3c37175ef6628da4971120880d2c3c2bae2 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-08-06 06:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333 | 118 | 0.618076 |
e6d005aa9ed6c53477fd7c9e85454e2af36373f2 | 14,779 | py | Python | boadata/data/field_types.py | janpipek/boadata | 3da00de9e014b764d40040c0db89aaaebc87fa18 | [
"MIT"
] | 2 | 2016-04-22T06:27:04.000Z | 2017-12-04T18:50:28.000Z | boadata/data/field_types.py | janpipek/boadata | 3da00de9e014b764d40040c0db89aaaebc87fa18 | [
"MIT"
] | 18 | 2015-03-19T10:24:04.000Z | 2017-02-22T17:12:21.000Z | boadata/data/field_types.py | janpipek/boadata | 3da00de9e014b764d40040c0db89aaaebc87fa18 | [
"MIT"
] | null | null | null | import os
from collections import OrderedDict
import numpy as np
import pandas as pd
import xarray as xr
from boadata.core import DataObject
from boadata.core.data_conversion import (
ChainConversion,
DataConversion,
IdentityConversion,
MethodConversion,
)
from .pandas_types import PandasDataFrameBase
from .xarray_types import XarrayDataArrayBase, XarrayDatasetBase
class AbstractFieldMap:
def get_last_axis(self, axis1, axis2) -> SystemError:
"""Get the third axis for two selected ones.
:rtype: str
"""
for ax in self.axes:
if ax not in (axis1, axis2):
return ax
def get_corresponding_column(self, axis):
i = self.axes.index(axis)
return self.columns[i]
def get_slice(self, axis, value, tolerance=1e-6):
kwargs = {"method": "nearest", axis: value, "tolerance": tolerance}
return self.__class__(self.inner_data.sel(**kwargs))
def get_axis_values(self, axis):
"""All unique coordinates along a given axis.
:rtype: list
"""
axis_values = self.inner_data[axis]
if axis_values.ndim > 0:
return axis_values.to_series().tolist()
else:
return [float(axis_values)]
def __to_pandas_data_frame__(self):
df = self.inner_data.to_dataframe().reset_index()
return DataObject.from_native(df, source=self)
# TODO: Some interpolation?
@DataObject.register_type()
@ChainConversion.enable_to("csv", through="pandas_data_frame", pass_kwargs=["uri"])
@ChainConversion.enable_from(
"csv", through="pandas_data_frame", condition=lambda c: len(c.columns) == 4
)
class ScalarFieldMap(AbstractFieldMap, XarrayDataArrayBase):
"""A scalar variable that is defined for each point in a 3D mesh.
"""
type_name = "scalar_field_map"
@classmethod
@DataConversion.condition(lambda x: len(x.columns) == 4)
def __from_pandas_data_frame__(cls, origin, axis_columns=None, value_column=None):
if not axis_columns:
axis_columns = origin.inner_data.columns[:3]
if not value_column:
value_column = origin.inner_data.columns[3]
axis_columns = list(axis_columns)
df = origin.inner_data.set_index(axis_columns)
data = xr.DataArray.from_series(df[value_column])
return cls(inner_data=data, source=origin)
@DataObject.register_type()
# @MethodConversion.enable_to("pandas_data_frame", method_name="to_dataframe")
@ChainConversion.enable_to("csv", through="pandas_data_frame", pass_kwargs=["uri"])
@ChainConversion.enable_from(
"csv", through="pandas_data_frame", condition=lambda c: len(c.columns) == 6
)
class VectorFieldMap(AbstractFieldMap, XarrayDatasetBase):
"""A vector variable that is defined for each point in a 3D mesh.
"""
type_name = "vector_field_map"
@classmethod
@DataConversion.condition(lambda x: len(x.columns) == 6)
def __from_pandas_data_frame__(cls, origin, axis_columns=None, value_columns=None):
"""
:type origin: boadata.data.PandasDataFrame
:param axis_columns: list[str] | None
:param value_columns: list[str] | None
:return:
"""
if not axis_columns:
axis_columns = origin.inner_data.columns[:3]
axis_columns = list(axis_columns)
df = origin.inner_data.set_index(axis_columns)
data = xr.Dataset.from_dataframe(df)
return cls(inner_data=data, source=origin)
def normalize_column_names(self, field_name, inplace=True):
inner_data = self.inner_data.rename(
dict(
zip(
self.axes + self.columns,
["x", "y", "z"] + [field_name + ax for ax in "xyz"],
)
)
)
if inplace:
self.inner_data = inner_data
else:
return self.__class__(inner_data=inner_data, source=self)
def magnitude(self, column_name="size"):
"""Scalar field produced of vector length at each point.
:rtype: ScalarFieldMap
"""
magnitude_column = np.sqrt(
sum([self.inner_data[self.columns[i]] ** 2 for i in range(3)])
)
new_inner_data = xr.DataArray(
magnitude_column, self.inner_data.coords, name=column_name
)
return ScalarFieldMap(inner_data=new_inner_data, source=self)
def __to_opera_field__(self, path, length_unit="mm", field_unit="tesla", **kwargs):
with open(path, "w") as f:
f.write(" {0} {1}\n".format(" ".join([str(s) for s in self.shape[1:]]), 2))
for i, ax in enumerate(self.axes):
f.write(
" {0} {1} [{2}]\n".format(i + 1, ax.upper(), length_unit.upper())
)
for j, ax in enumerate(self.columns):
f.write(
" {0} {1} [{2}]\n".format(j + 4, ax.upper(), field_unit.upper())
)
f.write(" 0\n")
df = self.convert("pandas_data_frame").inner_data
df.to_csv(f, sep=" ", index=None, header=None, **kwargs)
return OperaFieldTextFile.from_uri(path, source=self)
# TODO: Implement conversion to ScalarFieldMap
def mirror(self, ax, inplace=True):
"""Mirror the axis and the corresponding vector component.
:type ax: int
Multiplies both entities by -1.
"""
if not inplace:
a_copy = self.copy()
a_copy.invert_axis(ax, inplace=True)
return a_copy
if not ax in (0, 1, 2):
raise RuntimeError("Cannot invert non-existent axis")
else:
self.inner_data[self.axes[ax]] = self.inner_data[self.axes[ax]] * (-1)
self.inner_data[self.columns[ax]] = self.inner_data[self.columns[ax]] * (-1)
def _make_interpolators(self, method, bounds_error, fill_value):
from scipy.interpolate import RegularGridInterpolator
points = tuple(self.inner_data[axis] for axis in self.axes)
interpolators = [
RegularGridInterpolator(
points=points,
values=np.asarray(self.inner_data[axis]),
method=method,
bounds_error=bounds_error,
fill_value=fill_value[i],
)
for i, axis in enumerate(self.columns)
]
return interpolators
def interpolate(
self, x, y, z, method="linear", bounds_error=False, fill_value=(0, 0, 0)
):
"""(Tri-)linear interpolation of the values.
:param x: coordinate or array of coordinates in x
:param y: coordinate or array of coordinates in y
:param z: coordinate or array of coordinates in z
:param bounds_error:
:param method
:param fill_value
:return:
"""
# TODO: Check that the grid is regular? But maybe it is by xarray default?
interpolators = self._make_interpolators(
method=method, bounds_error=bounds_error, fill_value=fill_value
)
data = np.concatenate(
(
np.asarray(x)[..., np.newaxis],
np.asarray(y)[..., np.newaxis],
np.asarray(z)[..., np.newaxis],
),
axis=-1,
)
# data = tuple(np.asarray(t) for t in (x,y,z))
from boadata import wrap
return [wrap(interpolators[i](data), force=False) for i in range(3)]
def resample(self, dim1, dim2, dim3, method="linear", inplace=False):
"""Change the grid points using linear (or other) interpolation.
:param dim1: new number of points in x
:param dim2: new number of points in y
:param dim3: new number of points in z
:param method: interpolation method
"""
# TODO: Include option to resample only precisely
dims = (dim1, dim2, dim3)
new_axes = [
np.linspace(self[axis].min(), self[axis].max(), dims[i])
for i, axis in enumerate(self.axes)
]
new_mesh = np.meshgrid(*new_axes, indexing="ij")
new_fields = self.interpolate(*new_mesh, method=method)
print([f.shape for f in new_fields])
coords = OrderedDict([(axis, new_axes[i]) for i, axis in enumerate(self.axes)])
data = OrderedDict(
[
(
column,
xr.DataArray(
new_fields[i].inner_data, coords=coords, dims=self.axes
),
)
for i, column in enumerate(self.columns)
]
)
inner_data = xr.Dataset(data, coords)
if inplace:
self.inner_data = inner_data
return self
else:
return VectorFieldMap(inner_data=inner_data, source=self)
def swap_axes(self, ax1, ax2, inplace=True):
"""Swap two axes (and vector components).
swap(0, 1) means: "What was x, is now y (and vice versa)".
"""
if not inplace:
a_copy = self.copy()
a_copy.swap_axes(ax1, ax2, inplace=True)
return a_copy
if {ax1, ax2}.difference({0, 1, 2}):
raise RuntimeError("Wrong axis id")
elif ax1 == ax2:
inner_data = self.inner_data
else:
df_columns = self.axes + self.columns
df = self.convert("pandas_data_frame")
df.inner_data.reset_index(inplace=True, drop=True)
df.rename_columns(
{
self.axes[ax1]: self.axes[ax2],
self.axes[ax2]: self.axes[ax1],
self.columns[ax1]: self.columns[ax2],
self.columns[ax2]: self.columns[ax1],
}
)
df.reorder_columns(df_columns)
df.inner_data = df.inner_data.set_index(self.axes)
self.inner_data = xr.Dataset.from_dataframe(df.inner_data)
@DataObject.register_type()
@ChainConversion.enable_to("vector_field_map", through="pandas_data_frame")
class FieldTableFile(DataObject):
type_name = "field_table"
ndim = 2
real_type = None
def __init__(self, **kwargs):
super(FieldTableFile, self).__init__(**kwargs)
def _read_pandas(self):
return pd.read_table(
self.uri,
names=["x", "y", "z", "Bx", "By", "Bz"],
index_col=False,
delim_whitespace=True,
skiprows=2,
)
def __to_pandas_data_frame__(self, **kwargs):
data = self._read_pandas()
constructor = DataObject.registered_types["pandas_data_frame"]
return constructor(data, source=self, uri=self.uri, **kwargs)
def __to_text__(self, **kwargs):
constructor = DataObject.registered_types["text"]
return constructor.from_uri(self.uri, source=self, **kwargs)
@classmethod
def accepts_uri(cls, uri):
return uri[-6:] == ".TABLE"
@classmethod
def from_uri(cls, uri, **kwargs):
return cls(uri=uri, **kwargs)
@DataObject.register_type()
@IdentityConversion.enable_to("pandas_data_frame")
@ChainConversion.enable_to("vector_field_map", through="pandas_data_frame")
class ComsolFieldTextFile(PandasDataFrameBase):
type_name = "comsol_field"
@classmethod
def accepts_uri(cls, uri):
if not os.path.isfile(uri):
return False
try:
with open(uri, "rb") as f:
file_data = f.read(1000)
in_lines = file_data.decode()
for line in in_lines.splitlines():
if line.startswith("% Version") and "COMSOL" in line:
return True
except:
return False
@classmethod
def from_uri(cls, uri, index_col=False, source=None, **kwargs):
header_lines = []
with open(uri, "r") as f:
for line in f:
if line.startswith("%"):
header_lines.append(line.strip())
else:
break
frags = header_lines[-1][1:].strip().split()
column_names = [frag for frag in frags if not frag.startswith("(")]
data = pd.read_csv(
uri,
skiprows=len(header_lines),
index_col=False,
header=None,
delimiter="\\s+",
engine="python",
names=column_names,
)
return cls(inner_data=data, uri=uri)
@DataObject.register_type()
@IdentityConversion.enable_to("pandas_data_frame")
@ChainConversion.enable_to("vector_field_map", through="pandas_data_frame")
class OperaFieldTextFile(PandasDataFrameBase):
"""Field maps as exported from Opera.
Note: one particular setting => may not be applicable.
The example file looks like this (not indented):
201 51 51 2
1 X [MM]
2 Y [MM]
3 Z [MM]
4 BX [TESLA]
5 BY [TESLA]
6 BZ [TESLA]
0
-50.0000000000 -50.0000000000 -200.000000000 0.182000689291E-02 0.181548320077E-02 0.00000000000
-50.0000000000 -50.0000000000 -198.000000000 0.182963069824E-02 0.182665326586E-02 -0.222624232502E-03
"""
type_name = "opera_field"
@classmethod
def accepts_uri(cls, uri):
if not os.path.isfile(uri):
return False
return cls._parse_header(uri)[1] is not None
@classmethod
def _parse_header(cls, uri):
"""
:param uri:
:return: (skiprows, column_names)
"""
with open(uri, "rb") as f:
try:
file_data = f.read(1000)
in_lines = file_data.decode()
columns = []
for i, line in enumerate(in_lines.splitlines()):
if i == 0:
if len(line.strip().split()) != 4:
break
elif line.strip() == "0":
return i + 1, columns
else:
j, rest = line.strip().split(maxsplit=1)
if int(j) != i:
break
columns.append(rest)
except:
pass
return 0, None
@classmethod
def from_uri(cls, uri, **kwargs):
skiprows, column_names = cls._parse_header(uri)
data = pd.read_csv(
uri,
skiprows=skiprows,
index_col=False,
header=None,
delim_whitespace=True,
engine="python",
names=column_names,
)
return cls(inner_data=data, uri=uri)
| 33.512472 | 128 | 0.575276 |
f29cd2ef3386954a633296ce4affebd36b6b9663 | 1,046 | py | Python | Hacker_Rank/Lists.py | Jai-kishan/Practice-Questions | cf3a3eb5c2e930fcfcb762d822430060bb5deb2d | [
"Apache-2.0"
] | 1 | 2019-05-04T09:21:00.000Z | 2019-05-04T09:21:00.000Z | Hacker_Rank/Lists.py | Jai-kishan/Practice-Questions | cf3a3eb5c2e930fcfcb762d822430060bb5deb2d | [
"Apache-2.0"
] | null | null | null | Hacker_Rank/Lists.py | Jai-kishan/Practice-Questions | cf3a3eb5c2e930fcfcb762d822430060bb5deb2d | [
"Apache-2.0"
] | null | null | null | if __name__ == '__main__':
N = int(input())
user=[]
if N ==12:
user.insert(0,5)
user.insert(1,10)
user.insert(0,6)
print(user)
user.remove(6)
user.append(9)
user.append(1)
user.sort()
print(user)
user.pop()
user.reverse()
print(user)
else:
user.append(1)
user.append(6)
user.append(10)
user.append(8)
user.append(9)
user.append(2)
user.append(12)
user.append(7)
user.append(3)
user.append(5)
user.insert(8,66)
user.insert (1,30)
user.insert (6,75)
user.insert (4,44)
user.insert (9,67)
user.insert (2,44)
user.insert (9,21)
user.insert (8,87)
user.insert (1,75)
user.insert (1,48)
print(user)
user.reverse()
print(user)
user.sort()
print(user)
user.append(2)
user.append(5)
user.remove(2)
print(user)
| 21.791667 | 26 | 0.471319 |
7a68ba4fee3a105ade481618389fcac7e484f3ba | 149 | py | Python | make_mozilla/events/model_forms.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 4 | 2015-05-08T16:58:53.000Z | 2019-09-06T05:30:59.000Z | make_mozilla/events/model_forms.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:44:53.000Z | 2019-03-28T03:54:39.000Z | make_mozilla/events/model_forms.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 7 | 2015-05-21T15:38:29.000Z | 2019-10-28T23:39:06.000Z | from django.forms import ModelForm
from make_mozilla.events import models
class VenueForm(ModelForm):
class Meta:
model = models.Venue
| 18.625 | 38 | 0.751678 |
423a75d08dd46acb8470ba8d2d006b73dfbd7856 | 5,769 | py | Python | gramex/handlers/openapihandler.py | MSanKeys963/gramex | 8ac5fd6e79d100982fdc9e9308d9a6250ce021e2 | [
"MIT"
] | null | null | null | gramex/handlers/openapihandler.py | MSanKeys963/gramex | 8ac5fd6e79d100982fdc9e9308d9a6250ce021e2 | [
"MIT"
] | null | null | null | gramex/handlers/openapihandler.py | MSanKeys963/gramex | 8ac5fd6e79d100982fdc9e9308d9a6250ce021e2 | [
"MIT"
] | null | null | null | from fnmatch import fnmatch
import inspect
import json
import re
import gramex
from typing import get_type_hints
from textwrap import dedent
from gramex.config import merge
from gramex.transforms.transforms import typelist, Header
from gramex.handlers import BaseHandler
error_codes = {
'200': {
'description': 'Successful Response',
'content': {'application/json': {}}
},
'400': {
'description': 'Bad request',
'content': {'text/html': {'example': 'Bad request'}}
},
'401': {
'description': 'Not authorized',
'content': {'text/html': {'example': 'Not authorized'}}
},
'403': {
'description': 'Forbidden',
'content': {'text/html': {'example': 'Forbidden'}}
},
'404': {
'description': 'Not found',
'content': {'text/html': {'example': 'Not found'}}
},
'500': {
'description': 'Internal server error',
'content': {'text/html': {'example': 'Internal server error'}}
},
}
def url_name(pattern):
# Spec name is the last URL path that has alphabets
names = [part for part in pattern.split('/') if any(c.isalnum() for c in part)]
# Capitalize. url-like_this becomes "Url Like This"
names = [word.capitalize() for word in re.split(r'[\s\-_]', ' '.join(names))]
return ' '.join(names)
class OpenAPIHandler(BaseHandler):
types = {
str: 'string',
int: 'integer',
float: 'number',
bool: 'boolean',
None: 'null'
}
@classmethod
def function_spec(cls, function):
params = []
spec = {
'description': dedent(getattr(function, '__doc__', '') or ''),
'parameters': params
}
# Get the function signature. But "function: str" fails with ValueError.
# In such cases, skip the parameter configuration.
try:
signature = inspect.signature(function)
except ValueError:
return spec
hints = get_type_hints(function)
for name, param in signature.parameters.items():
hint = hints.get(name, None)
typ, is_list = typelist(hints[name]) if hint else (str, False)
config = {
'in': 'header' if hint and hint is Header else 'query',
'name': name,
'description': getattr(param.annotation, '__metadata__', ('',))[0],
'schema': {}
}
params.append(config)
# If default is not specific, parameter is required.
if param.default is inspect.Parameter.empty:
config['required'] = True
else:
config['default'] = param.default
# JSON Schema uses {type: array, items: {type: integer}} for array of ints.
# But a simple int is {type: integer}
if is_list:
config['schema']['type'] = 'array'
config['schema']['items'] = {'type': cls.types.get(typ, 'string')}
else:
config['schema']['type'] = cls.types.get(typ, 'string'),
spec['responses'] = error_codes
return spec
def get(self):
kwargs = self.conf.get('kwargs', {})
# TODO: Set header only if not already set in the configuration.
# This can be handled in gramex/gramex.yaml as a default configuration.
# Switch to YAML if a YAML spec is requested
self.set_header('Content-Type', 'application/json')
spec = {
'openapi': '3.0.2',
'info': kwargs.get('info', {}),
'servers': kwargs.get('servers', {}),
'paths': {}
}
key_patterns = kwargs.get('urls', ['*'])
# Loop through every function and get the default specs
for key, config in gramex.conf['url'].items():
# Only pick up those keys that matches the key pattern.
# Since imports create subkeys joined with :, just use the last part
key_end = key.split(':')[-1]
if not any(fnmatch(key_end, pat) for pat in key_patterns):
continue
# Normalize the pattern, e.g. /./docs -> /docs
pattern = config['pattern'].replace('/./', '/')
# Ignore invalid handlers
if key not in gramex.service.url or 'handler' not in config:
continue
# TODO: Handle wildcards, e.g. /(.*) -> / with an arg
info = spec['paths'][pattern] = {
'get': {
'summary': f'{url_name(pattern)}: {config["handler"]}'
},
}
cls = gramex.service.url[key].handler_class
if config['handler'] == 'FunctionHandler':
# Ignore functions with invalid setup
if hasattr(cls, 'info') and 'function' in cls.info:
function = cls.info['function']
function = function.__func__ or function
if callable(function):
fnspec = self.function_spec(function)
fnspec['summary'] = f'{url_name(pattern)}: {config["handler"]}'
default_methods = 'GET POST PUT DELETE PATCH OPTIONS'.split()
for method in getattr(cls, '_http_methods', default_methods):
info[method.lower()] = fnspec
# User's spec definition overrides our spec definition
merge(info, cls.conf.get('openapi', {}), mode='overwrite')
args = self.argparse(indent={'type': int, 'default': 0})
self.write(json.dumps(
spec,
indent=args.indent or None,
separators=(', ', ': ') if args.indent else (',', ':'),
))
| 38.46 | 87 | 0.538395 |
e0d03f218716c4b307d1a5d3704af1bb6c747db4 | 17,222 | py | Python | beginner_source/fgsm_tutorial.py | RodrigoGonzalez/tutorials | fd44c1b36255b158b3690ed4223e09985221b961 | [
"BSD-3-Clause"
] | 2 | 2020-01-03T03:07:56.000Z | 2021-08-03T12:11:12.000Z | beginner_source/fgsm_tutorial.py | RodrigoGonzalez/tutorials | fd44c1b36255b158b3690ed4223e09985221b961 | [
"BSD-3-Clause"
] | null | null | null | beginner_source/fgsm_tutorial.py | RodrigoGonzalez/tutorials | fd44c1b36255b158b3690ed4223e09985221b961 | [
"BSD-3-Clause"
] | 2 | 2018-07-01T17:54:42.000Z | 2019-10-12T16:59:23.000Z | # -*- coding: utf-8 -*-
"""
Adversarial Example Generation
==============================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`__
If you are reading this, hopefully you can appreciate how effective some
machine learning models are. Research is constantly pushing ML models to
be faster, more accurate, and more efficient. However, an often
overlooked aspect of designing and training models is security and
robustness, especially in the face of an adversary who wishes to fool
the model.
This tutorial will raise your awareness to the security vulnerabilities
of ML models, and will give insight into the hot topic of adversarial
machine learning. You may be surprised to find that adding imperceptible
perturbations to an image *can* cause drastically different model
performance. Given that this is a tutorial, we will explore the topic
via example on an image classifier. Specifically we will use one of the
first and most popular attack methods, the Fast Gradient Sign Attack
(FGSM), to fool an MNIST classifier.
"""
######################################################################
# Threat Model
# ------------
#
# For context, there are many categories of adversarial attacks, each with
# a different goal and assumption of the attacker’s knowledge. However, in
# general the overarching goal is to add the least amount of perturbation
# to the input data to cause the desired misclassification. There are
# several kinds of assumptions of the attacker’s knowledge, two of which
# are: **white-box** and **black-box**. A *white-box* attack assumes the
# attacker has full knowledge and access to the model, including
# architecture, inputs, outputs, and weights. A *black-box* attack assumes
# the attacker only has access to the inputs and outputs of the model, and
# knows nothing about the underlying architecture or weights. There are
# also several types of goals, including **misclassification** and
# **source/target misclassification**. A goal of *misclassification* means
# the adversary only wants the output classification to be wrong but does
# not care what the new classification is. A *source/target
# misclassification* means the adversary wants to alter an image that is
# originally of a specific source class so that it is classified as a
# specific target class.
#
# In this case, the FGSM attack is a *white-box* attack with the goal of
# *misclassification*. With this background information, we can now
# discuss the attack in detail.
#
# Fast Gradient Sign Attack
# -------------------------
#
# One of the first and most popular adversarial attacks to date is
# referred to as the *Fast Gradient Sign Attack (FGSM)* and is described
# by Goodfellow et. al. in `Explaining and Harnessing Adversarial
# Examples <https://arxiv.org/abs/1412.6572>`__. The attack is remarkably
# powerful, and yet intuitive. It is designed to attack neural networks by
# leveraging the way they learn, *gradients*. The idea is simple, rather
# than working to minimize the loss by adjusting the weights based on the
# backpropagated gradients, the attack *adjusts the input data to maximize
# the loss* based on the same backpropagated gradients. In other words,
# the attack uses the gradient of the loss w.r.t the input data, then
# adjusts the input data to maximize the loss.
#
# Before we jump into the code, let’s look at the famous
# `FGSM <https://arxiv.org/abs/1412.6572>`__ panda example and extract
# some notation.
#
# .. figure:: /_static/img/fgsm_panda_image.png
# :alt: fgsm_panda_image
#
# From the figure, :math:`\mathbf{x}` is the original input image
# correctly classified as a “panda”, :math:`y` is the ground truth label
# for :math:`\mathbf{x}`, :math:`\mathbf{\theta}` represents the model
# parameters, and :math:`J(\mathbf{\theta}, \mathbf{x}, y)` is the loss
# that is used to train the network. The attack backpropagates the
# gradient back to the input data to calculate
# :math:`\nabla_{x} J(\mathbf{\theta}, \mathbf{x}, y)`. Then, it adjusts
# the input data by a small step (:math:`\epsilon` or :math:`0.007` in the
# picture) in the direction (i.e.
# :math:`sign(\nabla_{x} J(\mathbf{\theta}, \mathbf{x}, y))`) that will
# maximize the loss. The resulting perturbed image, :math:`x'`, is then
# *misclassified* by the target network as a “gibbon” when it is still
# clearly a “panda”.
#
# Hopefully now the motivation for this tutorial is clear, so lets jump
# into the implementation.
#
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
######################################################################
# Implementation
# --------------
#
# In this section, we will discuss the input parameters for the tutorial,
# define the model under attack, then code the attack and run some tests.
#
# Inputs
# ~~~~~~
#
# There are only three inputs for this tutorial, and are defined as
# follows:
#
# - **epsilons** - List of epsilon values to use for the run. It is
# important to keep 0 in the list because it represents the model
# performance on the original test set. Also, intuitively we would
# expect the larger the epsilon, the more noticeable the perturbations
# but the more effective the attack in terms of degrading model
# accuracy. Since the data range here is :math:`[0,1]`, no epsilon
# value should exceed 1.
#
# - **pretrained_model** - path to the pretrained MNIST model which was
# trained with
# `pytorch/examples/mnist <https://github.com/pytorch/examples/tree/master/mnist>`__.
# For simplicity, download the pretrained model `here <https://drive.google.com/drive/folders/1fn83DF14tWmit0RTKWRhPq5uVXt73e0h?usp=sharing>`__.
#
# - **use_cuda** - boolean flag to use CUDA if desired and available.
# Note, a GPU with CUDA is not critical for this tutorial as a CPU will
# not take much time.
#
epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = "lenet_mnist_model.pth"
use_cuda=False
######################################################################
# Model Under Attack
# ~~~~~~~~~~~~~~~~~~
#
# As mentioned, the model under attack is the same MNIST model from
# `pytorch/examples/mnist <https://github.com/pytorch/examples/tree/master/mnist>`__.
# You may train and save your own MNIST model or you can download and use
# the provided model. The *Net* definition and test dataloader here have
# been copied from the MNIST example. The purpose of this section is to
# define the model and dataloader, then initialize the model and load the
# pretrained weights.
#
# LeNet Model definition
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# MNIST Test dataset and dataloader declaration
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
transforms.ToTensor(),
])),
batch_size=1, shuffle=True)
# Define what device we are using
print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
# Initialize the network
model = Net().to(device)
# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()
######################################################################
# FGSM Attack
# ~~~~~~~~~~~
#
# Now, we can define the function that creates the adversarial examples by
# perturbing the original inputs. The ``fgsm_attack`` function takes three
# inputs, *image* is the original clean image (:math:`x`), *epsilon* is
# the pixel-wise perturbation amount (:math:`\epsilon`), and *data_grad*
# is gradient of the loss w.r.t the input image
# (:math:`\nabla_{x} J(\mathbf{\theta}, \mathbf{x}, y)`). The function
# then creates perturbed image as
#
# .. math:: perturbed\_image = image + epsilon*sign(data\_grad) = x + \epsilon * sign(\nabla_{x} J(\mathbf{\theta}, \mathbf{x}, y))
#
# Finally, in order to maintain the original range of the data, the
# perturbed image is clipped to range :math:`[0,1]`.
#
# FGSM attack code
def fgsm_attack(image, epsilon, data_grad):
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = image + epsilon*sign_data_grad
# Adding clipping to maintain [0,1] range
perturbed_image = torch.clamp(perturbed_image, 0, 1)
# Return the perturbed image
return perturbed_image
######################################################################
# Testing Function
# ~~~~~~~~~~~~~~~~
#
# Finally, the central result of this tutorial comes from the ``test``
# function. Each call to this test function performs a full test step on
# the MNIST test set and reports a final accuracy. However, notice that
# this function also takes an *epsilon* input. This is because the
# ``test`` function reports the accuracy of a model that is under attack
# from an adversary with strength :math:`\epsilon`. More specifically, for
# each sample in the test set, the function computes the gradient of the
# loss w.r.t the input data (:math:`data\_grad`), creates a perturbed
# image with ``fgsm_attack`` (:math:`perturbed\_data`), then checks to see
# if the perturbed example is adversarial. In addition to testing the
# accuracy of the model, the function also saves and returns some
# successful adversarial examples to be visualized later.
#
def test( model, device, test_loader, epsilon ):
# Accuracy counter
correct = 0
adv_examples = []
# Loop over all examples in test set
for data, target in test_loader:
# Send the data and label to the device
data, target = data.to(device), target.to(device)
# Set requires_grad attribute of tensor. Important for Attack
data.requires_grad = True
# Forward pass the data through the model
output = model(data)
init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
# If the initial prediction is wrong, dont bother attacking, just move on
if init_pred.item() != target.item():
continue
# Calculate the loss
loss = F.nll_loss(output, target)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = data.grad.data
# Call FGSM Attack
perturbed_data = fgsm_attack(data, epsilon, data_grad)
# Re-classify the perturbed image
output = model(perturbed_data)
# Check for success
final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
if final_pred.item() == target.item():
correct += 1
# Special case for saving 0 epsilon examples
if (epsilon == 0) and (len(adv_examples) < 5):
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
else:
# Save some adv examples for visualization later
if len(adv_examples) < 5:
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
# Calculate final accuracy for this epsilon
final_acc = correct/float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
# Return the accuracy and an adversarial example
return final_acc, adv_examples
######################################################################
# Run Attack
# ~~~~~~~~~~
#
# The last part of the implementation is to actually run the attack. Here,
# we run a full test step for each epsilon value in the *epsilons* input.
# For each epsilon we also save the final accuracy and some successful
# adversarial examples to be plotted in the coming sections. Notice how
# the printed accuracies decrease as the epsilon value increases. Also,
# note the :math:`\epsilon=0` case represents the original test accuracy,
# with no attack.
#
accuracies = []
examples = []
# Run test for each epsilon
for eps in epsilons:
acc, ex = test(model, device, test_loader, eps)
accuracies.append(acc)
examples.append(ex)
######################################################################
# Results
# -------
#
# Accuracy vs Epsilon
# ~~~~~~~~~~~~~~~~~~~
#
# The first result is the accuracy versus epsilon plot. As alluded to
# earlier, as epsilon increases we expect the test accuracy to decrease.
# This is because larger epsilons mean we take a larger step in the
# direction that will maximize the loss. Notice the trend in the curve is
# not linear even though the epsilon values are linearly spaced. For
# example, the accuracy at :math:`\epsilon=0.05` is only about 4% lower
# than :math:`\epsilon=0`, but the accuracy at :math:`\epsilon=0.2` is 25%
# lower than :math:`\epsilon=0.15`. Also, notice the accuracy of the model
# hits random accuracy for a 10-class classifier between
# :math:`\epsilon=0.25` and :math:`\epsilon=0.3`.
#
plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
######################################################################
# Sample Adversarial Examples
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Remember the idea of no free lunch? In this case, as epsilon increases
# the test accuracy decreases **BUT** the perturbations become more easily
# perceptible. In reality, there is a tradeoff between accuracy
# degredation and perceptibility that an attacker must consider. Here, we
# show some examples of successful adversarial examples at each epsilon
# value. Each row of the plot shows a different epsilon value. The first
# row is the :math:`\epsilon=0` examples which represent the original
# “clean” images with no perturbation. The title of each image shows the
# “original classification -> adversarial classification.” Notice, the
# perturbations start to become evident at :math:`\epsilon=0.15` and are
# quite evident at :math:`\epsilon=0.3`. However, in all cases humans are
# still capable of identifying the correct class despite the added noise.
#
# Plot several examples of adversarial samples at each epsilon
cnt = 0
plt.figure(figsize=(8,10))
for i in range(len(epsilons)):
for j in range(len(examples[i])):
cnt += 1
plt.subplot(len(epsilons),len(examples[0]),cnt)
plt.xticks([], [])
plt.yticks([], [])
if j == 0:
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
orig,adv,ex = examples[i][j]
plt.title("{} -> {}".format(orig, adv))
plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()
######################################################################
# Where to go next?
# -----------------
#
# Hopefully this tutorial gives some insight into the topic of adversarial
# machine learning. There are many potential directions to go from here.
# This attack represents the very beginning of adversarial attack research
# and since there have been many subsequent ideas for how to attack and
# defend ML models from an adversary. In fact, at NIPS 2017 there was an
# adversarial attack and defense competition and many of the methods used
# in the competition are described in this paper: `Adversarial Attacks and
# Defences Competition <https://arxiv.org/pdf/1804.00097.pdf>`__. The work
# on defense also leads into the idea of making machine learning models
# more *robust* in general, to both naturally perturbed and adversarially
# crafted inputs.
#
# Another direction to go is adversarial attacks and defense in different
# domains. Adversarial research is not limited to the image domain, check
# out `this <https://arxiv.org/pdf/1801.01944.pdf>`__ attack on
# speech-to-text models. But perhaps the best way to learn more about
# adversarial machine learning is to get your hands dirty. Try to
# implement a different attack from the NIPS 2017 competition, and see how
# it differs from FGSM. Then, try to defend the model from your own
# attacks.
#
| 41.29976 | 147 | 0.681047 |
81d013ad36bbadf083c8aede33968029214319a8 | 4,701 | py | Python | tests/parsers/test_url.py | mrossinek/cobib | 4093fc6ff59b8415250c0d94dcecd6f031a5f80d | [
"MIT"
] | 9 | 2020-09-27T19:22:35.000Z | 2022-02-27T20:00:58.000Z | tests/parsers/test_url.py | mrossinek/cobib | 4093fc6ff59b8415250c0d94dcecd6f031a5f80d | [
"MIT"
] | null | null | null | tests/parsers/test_url.py | mrossinek/cobib | 4093fc6ff59b8415250c0d94dcecd6f031a5f80d | [
"MIT"
] | 2 | 2020-12-07T15:26:03.000Z | 2021-10-03T18:04:57.000Z | """Tests for coBib's URLParser."""
# pylint: disable=no-self-use,unused-argument
import logging
from typing import Callable, Dict, Optional
import pytest
from cobib.config import Event
from cobib.database import Entry
from cobib.parsers import URLParser
from .parser_test import ParserTest
from .test_arxiv import assert_default_test_entry as assert_arxiv_entry
from .test_doi import assert_default_test_entry as assert_doi_entry
def assert_default_test_entry(entry: Entry) -> None:
"""Asserts that the passed entry is the default testing entry.
Args:
entry: the entry to assert.
"""
entry.escape_special_chars()
assert entry.label == "Grimsley_2019"
assert entry.data["doi"] == "10.1038/s41467-019-10988-2"
assert entry.data["url"] == ["https://doi.org/10.1038%2Fs41467-019-10988-2"]
assert entry.data["year"] == 2019
assert entry.data["month"] == "jul"
assert entry.data["publisher"] == "Springer Science and Business Media {LLC}"
assert entry.data["volume"] == 10
assert entry.data["number"] == 1
assert (
entry.data["author"]
== "Harper R. Grimsley and Sophia E. Economou and Edwin Barnes and Nicholas J. Mayhall"
)
assert (
entry.data["title"]
== "An adaptive variational algorithm for exact molecular simulations on a quantum computer"
)
assert entry.data["journal"] == "Nature Communications"
class TestURLParser(ParserTest):
"""Tests for coBib's URLParser."""
@pytest.mark.parametrize(
("query", "assertion"),
[
("https://arxiv.org/abs/1812.09976", assert_arxiv_entry),
pytest.param(
"https://doi.org/10.1021/acs.chemrev.8b00803",
assert_doi_entry,
marks=pytest.mark.skip("https://gitlab.com/mrossinek/cobib/-/issues/91"),
),
pytest.param(
"https://www.nature.com/articles/s41467-019-10988-2",
assert_default_test_entry,
marks=pytest.mark.skip("https://gitlab.com/mrossinek/cobib/-/issues/91"),
),
],
)
def test_from_url(
self, query: str, assertion: Callable[[Entry], None], caplog: pytest.LogCaptureFixture
) -> None:
"""Test parsing from URL.
Args:
query: the URL which to query.
assertion: the assertion method to run.
caplog: the built-in pytest fixture.
"""
entries = URLParser().parse(query)
entry = list(entries.values())[0]
assertion(entry)
def test_invalid_url(self) -> None:
"""Test parsing an invalid URL."""
entries = URLParser().parse("https://github.com/")
assert not entries
assert entries == {}
def test_dump(self, caplog: pytest.LogCaptureFixture) -> None:
"""Test dumping.
Args:
caplog: the built-in pytest fixture.
"""
entry = Entry("dummy", {"ENTRYTYPE": "unpublished"})
URLParser().dump(entry)
assert (
"cobib.parsers.url",
logging.ERROR,
"Cannot dump an entry as a URL.",
) in caplog.record_tuples
@pytest.mark.skip("https://gitlab.com/mrossinek/cobib/-/issues/91")
def test_event_pre_url_parse(self, caplog: pytest.LogCaptureFixture) -> None:
"""Tests the PreURLParse event."""
@Event.PreURLParse.subscribe
def hook(string: str) -> Optional[str]:
return "https://www.nature.com/articles/s41467-019-10988-2"
assert Event.PreURLParse.validate()
entries = URLParser().parse("Hello world!")
if any(s == "cobib.parsers.url" and t == logging.ERROR for s, t, _ in caplog.record_tuples):
pytest.skip("The requests API encountered an error. Skipping test.")
entry = list(entries.values())[0]
assert_default_test_entry(entry)
@pytest.mark.skip("https://gitlab.com/mrossinek/cobib/-/issues/91")
def test_event_post_url_parse(self, caplog: pytest.LogCaptureFixture) -> None:
"""Tests the PostURLParse event."""
@Event.PostURLParse.subscribe
def hook(bib: Dict[str, Entry]) -> None:
bib["Grimsley_2019"].data["test"] = "dummy"
assert Event.PostURLParse.validate()
entries = URLParser().parse("https://www.nature.com/articles/s41467-019-10988-2")
if any(s == "cobib.parsers.url" and t == logging.ERROR for s, t, _ in caplog.record_tuples):
pytest.skip("The requests API encountered an error. Skipping test.")
entry = list(entries.values())[0]
assert_default_test_entry(entry)
assert entry.data["test"] == "dummy"
| 35.345865 | 100 | 0.625824 |
6f41d67e5b12a4cb8ecf621ec4d30f4d59045914 | 398 | py | Python | lab_7/lab_5/boardsproj/wsgi.py | jennifernolan/Software-for-the-Global-Market | 9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8 | [
"MIT"
] | null | null | null | lab_7/lab_5/boardsproj/wsgi.py | jennifernolan/Software-for-the-Global-Market | 9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8 | [
"MIT"
] | null | null | null | lab_7/lab_5/boardsproj/wsgi.py | jennifernolan/Software-for-the-Global-Market | 9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8 | [
"MIT"
] | null | null | null | """
WSGI config for boardsproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "boardsproj.settings")
application = get_wsgi_application()
| 23.411765 | 78 | 0.788945 |
97f8c5312c61f36976bf146ab35786bae7e6ec9b | 24,933 | py | Python | nmigen/hdl/xfrm.py | Fatsie/amaranth | 0b28a97ca00b44301fb35e2426d571e4f6640040 | [
"BSD-2-Clause"
] | 1 | 2022-02-21T16:04:10.000Z | 2022-02-21T16:04:10.000Z | nmigen/hdl/xfrm.py | Fatsie/amaranth | 0b28a97ca00b44301fb35e2426d571e4f6640040 | [
"BSD-2-Clause"
] | 5 | 2021-03-19T00:08:54.000Z | 2021-10-02T15:02:28.000Z | nmigen/hdl/xfrm.py | Fatsie/amaranth | 0b28a97ca00b44301fb35e2426d571e4f6640040 | [
"BSD-2-Clause"
] | null | null | null | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from collections.abc import Iterable
from .._utils import flatten
from .. import tracer
from .ast import *
from .ast import _StatementList
from .cd import *
from .ir import *
from .rec import *
__all__ = ["ValueVisitor", "ValueTransformer",
"StatementVisitor", "StatementTransformer",
"FragmentTransformer",
"TransformedElaboratable",
"DomainCollector", "DomainRenamer", "DomainLowerer",
"SampleDomainInjector", "SampleLowerer",
"SwitchCleaner", "LHSGroupAnalyzer", "LHSGroupFilter",
"ResetInserter", "EnableInserter"]
class ValueVisitor(metaclass=ABCMeta):
@abstractmethod
def on_Const(self, value):
pass # :nocov:
@abstractmethod
def on_AnyConst(self, value):
pass # :nocov:
@abstractmethod
def on_AnySeq(self, value):
pass # :nocov:
@abstractmethod
def on_Signal(self, value):
pass # :nocov:
@abstractmethod
def on_ClockSignal(self, value):
pass # :nocov:
@abstractmethod
def on_ResetSignal(self, value):
pass # :nocov:
@abstractmethod
def on_Operator(self, value):
pass # :nocov:
@abstractmethod
def on_Slice(self, value):
pass # :nocov:
@abstractmethod
def on_Part(self, value):
pass # :nocov:
@abstractmethod
def on_Cat(self, value):
pass # :nocov:
@abstractmethod
def on_Repl(self, value):
pass # :nocov:
@abstractmethod
def on_ArrayProxy(self, value):
pass # :nocov:
@abstractmethod
def on_Sample(self, value):
pass # :nocov:
@abstractmethod
def on_Initial(self, value):
pass # :nocov:
def on_unknown_value(self, value):
raise TypeError("Cannot transform value {!r}".format(value)) # :nocov:
def replace_value_src_loc(self, value, new_value):
return True
def on_value(self, value):
if type(value) is Const:
new_value = self.on_Const(value)
elif type(value) is AnyConst:
new_value = self.on_AnyConst(value)
elif type(value) is AnySeq:
new_value = self.on_AnySeq(value)
elif isinstance(value, Signal):
# Uses `isinstance()` and not `type() is` because nmigen.compat requires it.
new_value = self.on_Signal(value)
elif type(value) is ClockSignal:
new_value = self.on_ClockSignal(value)
elif type(value) is ResetSignal:
new_value = self.on_ResetSignal(value)
elif type(value) is Operator:
new_value = self.on_Operator(value)
elif type(value) is Slice:
new_value = self.on_Slice(value)
elif type(value) is Part:
new_value = self.on_Part(value)
elif type(value) is Cat:
new_value = self.on_Cat(value)
elif type(value) is Repl:
new_value = self.on_Repl(value)
elif type(value) is ArrayProxy:
new_value = self.on_ArrayProxy(value)
elif type(value) is Sample:
new_value = self.on_Sample(value)
elif type(value) is Initial:
new_value = self.on_Initial(value)
elif isinstance(value, UserValue):
# Uses `isinstance()` and not `type() is` to allow inheriting.
new_value = self.on_value(value._lazy_lower())
else:
new_value = self.on_unknown_value(value)
if isinstance(new_value, Value) and self.replace_value_src_loc(value, new_value):
new_value.src_loc = value.src_loc
return new_value
def __call__(self, value):
return self.on_value(value)
class ValueTransformer(ValueVisitor):
def on_Const(self, value):
return value
def on_AnyConst(self, value):
return value
def on_AnySeq(self, value):
return value
def on_Signal(self, value):
return value
def on_ClockSignal(self, value):
return value
def on_ResetSignal(self, value):
return value
def on_Operator(self, value):
return Operator(value.operator, [self.on_value(o) for o in value.operands])
def on_Slice(self, value):
return Slice(self.on_value(value.value), value.start, value.stop)
def on_Part(self, value):
return Part(self.on_value(value.value), self.on_value(value.offset),
value.width, value.stride)
def on_Cat(self, value):
return Cat(self.on_value(o) for o in value.parts)
def on_Repl(self, value):
return Repl(self.on_value(value.value), value.count)
def on_ArrayProxy(self, value):
return ArrayProxy([self.on_value(elem) for elem in value._iter_as_values()],
self.on_value(value.index))
def on_Sample(self, value):
return Sample(self.on_value(value.value), value.clocks, value.domain)
def on_Initial(self, value):
return value
class StatementVisitor(metaclass=ABCMeta):
@abstractmethod
def on_Assign(self, stmt):
pass # :nocov:
@abstractmethod
def on_Assert(self, stmt):
pass # :nocov:
@abstractmethod
def on_Assume(self, stmt):
pass # :nocov:
@abstractmethod
def on_Cover(self, stmt):
pass # :nocov:
@abstractmethod
def on_Switch(self, stmt):
pass # :nocov:
@abstractmethod
def on_statements(self, stmts):
pass # :nocov:
def on_unknown_statement(self, stmt):
raise TypeError("Cannot transform statement {!r}".format(stmt)) # :nocov:
def replace_statement_src_loc(self, stmt, new_stmt):
return True
def on_statement(self, stmt):
if type(stmt) is Assign:
new_stmt = self.on_Assign(stmt)
elif type(stmt) is Assert:
new_stmt = self.on_Assert(stmt)
elif type(stmt) is Assume:
new_stmt = self.on_Assume(stmt)
elif type(stmt) is Cover:
new_stmt = self.on_Cover(stmt)
elif isinstance(stmt, Switch):
# Uses `isinstance()` and not `type() is` because nmigen.compat requires it.
new_stmt = self.on_Switch(stmt)
elif isinstance(stmt, Iterable):
new_stmt = self.on_statements(stmt)
else:
new_stmt = self.on_unknown_statement(stmt)
if isinstance(new_stmt, Statement) and self.replace_statement_src_loc(stmt, new_stmt):
new_stmt.src_loc = stmt.src_loc
if isinstance(new_stmt, Switch) and isinstance(stmt, Switch):
new_stmt.case_src_locs = stmt.case_src_locs
if isinstance(new_stmt, Property):
new_stmt._MustUse__used = True
return new_stmt
def __call__(self, stmt):
return self.on_statement(stmt)
class StatementTransformer(StatementVisitor):
def on_value(self, value):
return value
def on_Assign(self, stmt):
return Assign(self.on_value(stmt.lhs), self.on_value(stmt.rhs))
def on_Assert(self, stmt):
return Assert(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en)
def on_Assume(self, stmt):
return Assume(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en)
def on_Cover(self, stmt):
return Cover(self.on_value(stmt.test), _check=stmt._check, _en=stmt._en)
def on_Switch(self, stmt):
cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items())
return Switch(self.on_value(stmt.test), cases)
def on_statements(self, stmts):
return _StatementList(flatten(self.on_statement(stmt) for stmt in stmts))
class FragmentTransformer:
def map_subfragments(self, fragment, new_fragment):
for subfragment, name in fragment.subfragments:
new_fragment.add_subfragment(self(subfragment), name)
def map_ports(self, fragment, new_fragment):
for port, dir in fragment.ports.items():
new_fragment.add_ports(port, dir=dir)
def map_named_ports(self, fragment, new_fragment):
if hasattr(self, "on_value"):
for name, (value, dir) in fragment.named_ports.items():
new_fragment.named_ports[name] = self.on_value(value), dir
else:
new_fragment.named_ports = OrderedDict(fragment.named_ports.items())
def map_domains(self, fragment, new_fragment):
for domain in fragment.iter_domains():
new_fragment.add_domains(fragment.domains[domain])
def map_statements(self, fragment, new_fragment):
if hasattr(self, "on_statement"):
new_fragment.add_statements(map(self.on_statement, fragment.statements))
else:
new_fragment.add_statements(fragment.statements)
def map_drivers(self, fragment, new_fragment):
for domain, signal in fragment.iter_drivers():
new_fragment.add_driver(signal, domain)
def on_fragment(self, fragment):
if isinstance(fragment, Instance):
new_fragment = Instance(fragment.type)
new_fragment.parameters = OrderedDict(fragment.parameters)
self.map_named_ports(fragment, new_fragment)
else:
new_fragment = Fragment()
new_fragment.flatten = fragment.flatten
new_fragment.attrs = OrderedDict(fragment.attrs)
self.map_ports(fragment, new_fragment)
self.map_subfragments(fragment, new_fragment)
self.map_domains(fragment, new_fragment)
self.map_statements(fragment, new_fragment)
self.map_drivers(fragment, new_fragment)
return new_fragment
def __call__(self, value, *, src_loc_at=0):
if isinstance(value, Fragment):
return self.on_fragment(value)
elif isinstance(value, TransformedElaboratable):
value._transforms_.append(self)
return value
elif hasattr(value, "elaborate"):
value = TransformedElaboratable(value, src_loc_at=1 + src_loc_at)
value._transforms_.append(self)
return value
else:
raise AttributeError("Object {!r} cannot be elaborated".format(value))
class TransformedElaboratable(Elaboratable):
def __init__(self, elaboratable, *, src_loc_at=0):
assert hasattr(elaboratable, "elaborate")
# Fields prefixed and suffixed with underscore to avoid as many conflicts with the inner
# object as possible, since we're forwarding attribute requests to it.
self._elaboratable_ = elaboratable
self._transforms_ = []
def __getattr__(self, attr):
return getattr(self._elaboratable_, attr)
def elaborate(self, platform):
fragment = Fragment.get(self._elaboratable_, platform)
for transform in self._transforms_:
fragment = transform(fragment)
return fragment
class DomainCollector(ValueVisitor, StatementVisitor):
def __init__(self):
self.used_domains = set()
self.defined_domains = set()
self._local_domains = set()
def _add_used_domain(self, domain_name):
if domain_name is None:
return
if domain_name in self._local_domains:
return
self.used_domains.add(domain_name)
def on_ignore(self, value):
pass
on_Const = on_ignore
on_AnyConst = on_ignore
on_AnySeq = on_ignore
on_Signal = on_ignore
def on_ClockSignal(self, value):
self._add_used_domain(value.domain)
def on_ResetSignal(self, value):
self._add_used_domain(value.domain)
def on_Operator(self, value):
for o in value.operands:
self.on_value(o)
def on_Slice(self, value):
self.on_value(value.value)
def on_Part(self, value):
self.on_value(value.value)
self.on_value(value.offset)
def on_Cat(self, value):
for o in value.parts:
self.on_value(o)
def on_Repl(self, value):
self.on_value(value.value)
def on_ArrayProxy(self, value):
for elem in value._iter_as_values():
self.on_value(elem)
self.on_value(value.index)
def on_Sample(self, value):
self.on_value(value.value)
def on_Initial(self, value):
pass
def on_Assign(self, stmt):
self.on_value(stmt.lhs)
self.on_value(stmt.rhs)
def on_property(self, stmt):
self.on_value(stmt.test)
on_Assert = on_property
on_Assume = on_property
on_Cover = on_property
def on_Switch(self, stmt):
self.on_value(stmt.test)
for stmts in stmt.cases.values():
self.on_statement(stmts)
def on_statements(self, stmts):
for stmt in stmts:
self.on_statement(stmt)
def on_fragment(self, fragment):
if isinstance(fragment, Instance):
for name, (value, dir) in fragment.named_ports.items():
self.on_value(value)
old_local_domains, self._local_domains = self._local_domains, set(self._local_domains)
for domain_name, domain in fragment.domains.items():
if domain.local:
self._local_domains.add(domain_name)
else:
self.defined_domains.add(domain_name)
self.on_statements(fragment.statements)
for domain_name in fragment.drivers:
self._add_used_domain(domain_name)
for subfragment, name in fragment.subfragments:
self.on_fragment(subfragment)
self._local_domains = old_local_domains
def __call__(self, fragment):
self.on_fragment(fragment)
class DomainRenamer(FragmentTransformer, ValueTransformer, StatementTransformer):
def __init__(self, domain_map):
if isinstance(domain_map, str):
domain_map = {"sync": domain_map}
for src, dst in domain_map.items():
if src == "comb":
raise ValueError("Domain '{}' may not be renamed".format(src))
if dst == "comb":
raise ValueError("Domain '{}' may not be renamed to '{}'".format(src, dst))
self.domain_map = OrderedDict(domain_map)
def on_ClockSignal(self, value):
if value.domain in self.domain_map:
return ClockSignal(self.domain_map[value.domain])
return value
def on_ResetSignal(self, value):
if value.domain in self.domain_map:
return ResetSignal(self.domain_map[value.domain],
allow_reset_less=value.allow_reset_less)
return value
def map_domains(self, fragment, new_fragment):
for domain in fragment.iter_domains():
cd = fragment.domains[domain]
if domain in self.domain_map:
if cd.name == domain:
# Rename the actual ClockDomain object.
cd.rename(self.domain_map[domain])
else:
assert cd.name == self.domain_map[domain]
new_fragment.add_domains(cd)
def map_drivers(self, fragment, new_fragment):
for domain, signals in fragment.drivers.items():
if domain in self.domain_map:
domain = self.domain_map[domain]
for signal in signals:
new_fragment.add_driver(self.on_value(signal), domain)
class DomainLowerer(FragmentTransformer, ValueTransformer, StatementTransformer):
def __init__(self, domains=None):
self.domains = domains
def _resolve(self, domain, context):
if domain not in self.domains:
raise DomainError("Signal {!r} refers to nonexistent domain '{}'"
.format(context, domain))
return self.domains[domain]
def map_drivers(self, fragment, new_fragment):
for domain, signal in fragment.iter_drivers():
new_fragment.add_driver(self.on_value(signal), domain)
def replace_value_src_loc(self, value, new_value):
return not isinstance(value, (ClockSignal, ResetSignal))
def on_ClockSignal(self, value):
domain = self._resolve(value.domain, value)
return domain.clk
def on_ResetSignal(self, value):
domain = self._resolve(value.domain, value)
if domain.rst is None:
if value.allow_reset_less:
return Const(0)
else:
raise DomainError("Signal {!r} refers to reset of reset-less domain '{}'"
.format(value, value.domain))
return domain.rst
def _insert_resets(self, fragment):
for domain_name, signals in fragment.drivers.items():
if domain_name is None:
continue
domain = fragment.domains[domain_name]
if domain.rst is None:
continue
stmts = [signal.eq(Const(signal.reset, signal.width))
for signal in signals if not signal.reset_less]
fragment.add_statements(Switch(domain.rst, {1: stmts}))
def on_fragment(self, fragment):
self.domains = fragment.domains
new_fragment = super().on_fragment(fragment)
self._insert_resets(new_fragment)
return new_fragment
class SampleDomainInjector(ValueTransformer, StatementTransformer):
def __init__(self, domain):
self.domain = domain
def on_Sample(self, value):
if value.domain is not None:
return value
return Sample(value.value, value.clocks, self.domain)
def __call__(self, stmts):
return self.on_statement(stmts)
class SampleLowerer(FragmentTransformer, ValueTransformer, StatementTransformer):
def __init__(self):
self.initial = None
self.sample_cache = None
self.sample_stmts = None
def _name_reset(self, value):
if isinstance(value, Const):
return "c${}".format(value.value), value.value
elif isinstance(value, Signal):
return "s${}".format(value.name), value.reset
elif isinstance(value, ClockSignal):
return "clk", 0
elif isinstance(value, ResetSignal):
return "rst", 1
elif isinstance(value, Initial):
return "init", 0 # Past(Initial()) produces 0, 1, 0, 0, ...
else:
raise NotImplementedError # :nocov:
def on_Sample(self, value):
if value in self.sample_cache:
return self.sample_cache[value]
sampled_value = self.on_value(value.value)
if value.clocks == 0:
sample = sampled_value
else:
assert value.domain is not None
sampled_name, sampled_reset = self._name_reset(value.value)
name = "$sample${}${}${}".format(sampled_name, value.domain, value.clocks)
sample = Signal.like(value.value, name=name, reset_less=True, reset=sampled_reset)
sample.attrs["nmigen.sample_reg"] = True
prev_sample = self.on_Sample(Sample(sampled_value, value.clocks - 1, value.domain))
if value.domain not in self.sample_stmts:
self.sample_stmts[value.domain] = []
self.sample_stmts[value.domain].append(sample.eq(prev_sample))
self.sample_cache[value] = sample
return sample
def on_Initial(self, value):
if self.initial is None:
self.initial = Signal(name="init")
return self.initial
def map_statements(self, fragment, new_fragment):
self.initial = None
self.sample_cache = ValueDict()
self.sample_stmts = OrderedDict()
new_fragment.add_statements(map(self.on_statement, fragment.statements))
for domain, stmts in self.sample_stmts.items():
new_fragment.add_statements(stmts)
for stmt in stmts:
new_fragment.add_driver(stmt.lhs, domain)
if self.initial is not None:
new_fragment.add_subfragment(Instance("$initstate", o_Y=self.initial))
class SwitchCleaner(StatementVisitor):
def on_ignore(self, stmt):
return stmt
on_Assign = on_ignore
on_Assert = on_ignore
on_Assume = on_ignore
on_Cover = on_ignore
def on_Switch(self, stmt):
cases = OrderedDict((k, self.on_statement(s)) for k, s in stmt.cases.items())
if any(len(s) for s in cases.values()):
return Switch(stmt.test, cases)
def on_statements(self, stmts):
stmts = flatten(self.on_statement(stmt) for stmt in stmts)
return _StatementList(stmt for stmt in stmts if stmt is not None)
class LHSGroupAnalyzer(StatementVisitor):
def __init__(self):
self.signals = SignalDict()
self.unions = OrderedDict()
def find(self, signal):
if signal not in self.signals:
self.signals[signal] = len(self.signals)
group = self.signals[signal]
while group in self.unions:
group = self.unions[group]
self.signals[signal] = group
return group
def unify(self, root, *leaves):
root_group = self.find(root)
for leaf in leaves:
leaf_group = self.find(leaf)
if root_group == leaf_group:
continue
self.unions[leaf_group] = root_group
def groups(self):
groups = OrderedDict()
for signal in self.signals:
group = self.find(signal)
if group not in groups:
groups[group] = SignalSet()
groups[group].add(signal)
return groups
def on_Assign(self, stmt):
lhs_signals = stmt._lhs_signals()
if lhs_signals:
self.unify(*stmt._lhs_signals())
def on_property(self, stmt):
lhs_signals = stmt._lhs_signals()
if lhs_signals:
self.unify(*stmt._lhs_signals())
on_Assert = on_property
on_Assume = on_property
on_Cover = on_property
def on_Switch(self, stmt):
for case_stmts in stmt.cases.values():
self.on_statements(case_stmts)
def on_statements(self, stmts):
for stmt in stmts:
self.on_statement(stmt)
def __call__(self, stmts):
self.on_statements(stmts)
return self.groups()
class LHSGroupFilter(SwitchCleaner):
def __init__(self, signals):
self.signals = signals
def on_Assign(self, stmt):
# The invariant provided by LHSGroupAnalyzer is that all signals that ever appear together
# on LHS are a part of the same group, so it is sufficient to check any of them.
lhs_signals = stmt.lhs._lhs_signals()
if lhs_signals:
any_lhs_signal = next(iter(lhs_signals))
if any_lhs_signal in self.signals:
return stmt
def on_property(self, stmt):
any_lhs_signal = next(iter(stmt._lhs_signals()))
if any_lhs_signal in self.signals:
return stmt
on_Assert = on_property
on_Assume = on_property
on_Cover = on_property
class _ControlInserter(FragmentTransformer):
def __init__(self, controls):
self.src_loc = None
if isinstance(controls, Value):
controls = {"sync": controls}
self.controls = OrderedDict(controls)
def on_fragment(self, fragment):
new_fragment = super().on_fragment(fragment)
for domain, signals in fragment.drivers.items():
if domain is None or domain not in self.controls:
continue
self._insert_control(new_fragment, domain, signals)
return new_fragment
def _insert_control(self, fragment, domain, signals):
raise NotImplementedError # :nocov:
def __call__(self, value, *, src_loc_at=0):
self.src_loc = tracer.get_src_loc(src_loc_at=src_loc_at)
return super().__call__(value, src_loc_at=1 + src_loc_at)
class ResetInserter(_ControlInserter):
def _insert_control(self, fragment, domain, signals):
stmts = [s.eq(Const(s.reset, s.width)) for s in signals if not s.reset_less]
fragment.add_statements(Switch(self.controls[domain], {1: stmts}, src_loc=self.src_loc))
class EnableInserter(_ControlInserter):
def _insert_control(self, fragment, domain, signals):
stmts = [s.eq(s) for s in signals]
fragment.add_statements(Switch(self.controls[domain], {0: stmts}, src_loc=self.src_loc))
def on_fragment(self, fragment):
new_fragment = super().on_fragment(fragment)
if isinstance(new_fragment, Instance) and new_fragment.type in ("$memrd", "$memwr"):
clk_port, clk_dir = new_fragment.named_ports["CLK"]
if isinstance(clk_port, ClockSignal) and clk_port.domain in self.controls:
en_port, en_dir = new_fragment.named_ports["EN"]
en_port = Mux(self.controls[clk_port.domain], en_port, Const(0, len(en_port)))
new_fragment.named_ports["EN"] = en_port, en_dir
return new_fragment
| 33.512097 | 98 | 0.6345 |
8d9ef9043a87279fac4fb96ca234678945004880 | 11,321 | py | Python | steam/protobufs/steammessages_clientserver_friends.py | smtp639/steam.py | bd67e8d91ac17984ef0657fa4625eb2fca81fb68 | [
"MIT"
] | null | null | null | steam/protobufs/steammessages_clientserver_friends.py | smtp639/steam.py | bd67e8d91ac17984ef0657fa4625eb2fca81fb68 | [
"MIT"
] | null | null | null | steam/protobufs/steammessages_clientserver_friends.py | smtp639/steam.py | bd67e8d91ac17984ef0657fa4625eb2fca81fb68 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: steammessages_clientserver_friends.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import List
import betterproto
@dataclass(eq=False, repr=False)
class CMsgClientFriendMsg(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
chat_entry_type: int = betterproto.int32_field(2)
message: bytes = betterproto.bytes_field(3)
rtime32_server_timestamp: int = betterproto.fixed32_field(4)
echo_to_sender: bool = betterproto.bool_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientFriendMsgIncoming(betterproto.Message):
steamid_from: int = betterproto.fixed64_field(1)
chat_entry_type: int = betterproto.int32_field(2)
from_limited_account: bool = betterproto.bool_field(3)
message: bytes = betterproto.bytes_field(4)
rtime32_server_timestamp: int = betterproto.fixed32_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientAddFriend(betterproto.Message):
steamid_to_add: int = betterproto.fixed64_field(1)
accountname_or_email_to_add: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientAddFriendResponse(betterproto.Message):
eresult: int = betterproto.int32_field(1)
steam_id_added: int = betterproto.fixed64_field(2)
persona_name_added: str = betterproto.string_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientRemoveFriend(betterproto.Message):
friendid: int = betterproto.fixed64_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientHideFriend(betterproto.Message):
friendid: int = betterproto.fixed64_field(1)
hide: bool = betterproto.bool_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientFriendsList(betterproto.Message):
bincremental: bool = betterproto.bool_field(1)
friends: List["CMsgClientFriendsListFriend"] = betterproto.message_field(2)
max_friend_count: int = betterproto.uint32_field(3)
active_friend_count: int = betterproto.uint32_field(4)
friends_limit_hit: bool = betterproto.bool_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientFriendsListFriend(betterproto.Message):
ulfriendid: int = betterproto.fixed64_field(1)
efriendrelationship: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientFriendsGroupsList(betterproto.Message):
bremoval: bool = betterproto.bool_field(1)
bincremental: bool = betterproto.bool_field(2)
friend_groups: List["CMsgClientFriendsGroupsListFriendGroup"] = betterproto.message_field(3)
memberships: List["CMsgClientFriendsGroupsListFriendGroupsMembership"] = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientFriendsGroupsListFriendGroup(betterproto.Message):
n_group_id: int = betterproto.int32_field(1)
str_group_name: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientFriendsGroupsListFriendGroupsMembership(betterproto.Message):
ul_steam_id: int = betterproto.fixed64_field(1)
n_group_id: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientPlayerNicknameList(betterproto.Message):
removal: bool = betterproto.bool_field(1)
incremental: bool = betterproto.bool_field(2)
nicknames: List["CMsgClientPlayerNicknameListPlayerNickname"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientPlayerNicknameListPlayerNickname(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
nickname: str = betterproto.string_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientSetPlayerNickname(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
nickname: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientSetPlayerNicknameResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientRequestFriendData(betterproto.Message):
persona_state_requested: int = betterproto.uint32_field(1)
friends: List[int] = betterproto.fixed64_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientChangeStatus(betterproto.Message):
persona_state: int = betterproto.uint32_field(1)
player_name: str = betterproto.string_field(2)
is_auto_generated_name: bool = betterproto.bool_field(3)
high_priority: bool = betterproto.bool_field(4)
persona_set_by_user: bool = betterproto.bool_field(5)
persona_state_flags: int = betterproto.uint32_field(6)
need_persona_response: bool = betterproto.bool_field(7)
is_client_idle: bool = betterproto.bool_field(8)
@dataclass(eq=False, repr=False)
class CMsgPersonaChangeResponse(betterproto.Message):
result: int = betterproto.uint32_field(1)
player_name: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientPersonaState(betterproto.Message):
status_flags: int = betterproto.uint32_field(1)
friends: List["CMsgClientPersonaStateFriend"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientPersonaStateFriend(betterproto.Message):
friendid: int = betterproto.fixed64_field(1)
persona_state: int = betterproto.uint32_field(2)
game_played_app_id: int = betterproto.uint32_field(3)
game_server_ip: int = betterproto.uint32_field(4)
game_server_port: int = betterproto.uint32_field(5)
persona_state_flags: int = betterproto.uint32_field(6)
online_session_instances: int = betterproto.uint32_field(7)
persona_set_by_user: bool = betterproto.bool_field(10)
player_name: str = betterproto.string_field(15)
query_port: int = betterproto.uint32_field(20)
steamid_source: int = betterproto.fixed64_field(25)
avatar_hash: bytes = betterproto.bytes_field(31)
last_logoff: int = betterproto.uint32_field(45)
last_logon: int = betterproto.uint32_field(46)
last_seen_online: int = betterproto.uint32_field(47)
clan_rank: int = betterproto.uint32_field(50)
game_name: str = betterproto.string_field(55)
gameid: int = betterproto.fixed64_field(56)
game_data_blob: bytes = betterproto.bytes_field(60)
clan_data: "CMsgClientPersonaStateFriendClanData" = betterproto.message_field(64)
clan_tag: str = betterproto.string_field(65)
rich_presence: List["CMsgClientPersonaStateFriendKv"] = betterproto.message_field(71)
broadcast_id: int = betterproto.fixed64_field(72)
game_lobby_id: int = betterproto.fixed64_field(73)
watching_broadcast_accountid: int = betterproto.uint32_field(74)
watching_broadcast_appid: int = betterproto.uint32_field(75)
watching_broadcast_viewers: int = betterproto.uint32_field(76)
watching_broadcast_title: str = betterproto.string_field(77)
@dataclass(eq=False, repr=False)
class CMsgClientPersonaStateFriendClanData(betterproto.Message):
ogg_app_id: int = betterproto.uint32_field(1)
chat_group_id: int = betterproto.uint64_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientPersonaStateFriendKv(betterproto.Message):
key: str = betterproto.string_field(1)
value: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientFriendProfileInfo(betterproto.Message):
steamid_friend: int = betterproto.fixed64_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientFriendProfileInfoResponse(betterproto.Message):
eresult: int = betterproto.int32_field(1)
steamid_friend: int = betterproto.fixed64_field(2)
time_created: int = betterproto.uint32_field(3)
real_name: str = betterproto.string_field(4)
city_name: str = betterproto.string_field(5)
state_name: str = betterproto.string_field(6)
country_name: str = betterproto.string_field(7)
headline: str = betterproto.string_field(8)
summary: str = betterproto.string_field(9)
@dataclass(eq=False, repr=False)
class CMsgClientCreateFriendsGroup(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
groupname: str = betterproto.string_field(2)
steamid_friends: List[int] = betterproto.fixed64_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientCreateFriendsGroupResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
groupid: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientDeleteFriendsGroup(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
groupid: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientDeleteFriendsGroupResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientManageFriendsGroup(betterproto.Message):
groupid: int = betterproto.int32_field(1)
groupname: str = betterproto.string_field(2)
steamid_friends_added: List[int] = betterproto.fixed64_field(3)
steamid_friends_removed: List[int] = betterproto.fixed64_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientManageFriendsGroupResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientAddFriendToGroup(betterproto.Message):
groupid: int = betterproto.int32_field(1)
steamiduser: int = betterproto.fixed64_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientAddFriendToGroupResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientRemoveFriendFromGroup(betterproto.Message):
groupid: int = betterproto.int32_field(1)
steamiduser: int = betterproto.fixed64_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientRemoveFriendFromGroupResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientGetEmoticonList(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class CMsgClientEmoticonList(betterproto.Message):
emoticons: List["CMsgClientEmoticonListEmoticon"] = betterproto.message_field(1)
stickers: List["CMsgClientEmoticonListSticker"] = betterproto.message_field(2)
effects: List["CMsgClientEmoticonListEffect"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientEmoticonListEmoticon(betterproto.Message):
name: str = betterproto.string_field(1)
count: int = betterproto.int32_field(2)
time_last_used: int = betterproto.uint32_field(3)
use_count: int = betterproto.uint32_field(4)
time_received: int = betterproto.uint32_field(5)
appid: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientEmoticonListSticker(betterproto.Message):
name: str = betterproto.string_field(1)
count: int = betterproto.int32_field(2)
time_received: int = betterproto.uint32_field(3)
appid: int = betterproto.uint32_field(4)
time_last_used: int = betterproto.uint32_field(5)
use_count: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientEmoticonListEffect(betterproto.Message):
name: str = betterproto.string_field(1)
count: int = betterproto.int32_field(2)
time_received: int = betterproto.uint32_field(3)
infinite_use: bool = betterproto.bool_field(4)
appid: int = betterproto.uint32_field(5)
| 37.486755 | 105 | 0.78085 |
9f0655af285efca0a0601167370be8af87f62b65 | 1,115 | py | Python | open-rpa/Scripts/rst2html5.py | rmdelossantos/open-rpa | 1b4f68cb0318e69b888f3ef1f938a8a708789e94 | [
"MIT"
] | null | null | null | open-rpa/Scripts/rst2html5.py | rmdelossantos/open-rpa | 1b4f68cb0318e69b888f3ef1f938a8a708789e94 | [
"MIT"
] | null | null | null | open-rpa/Scripts/rst2html5.py | rmdelossantos/open-rpa | 1b4f68cb0318e69b888f3ef1f938a8a708789e94 | [
"MIT"
] | null | null | null | #!d:\russ\github\open-rpa\open-rpa\scripts\python.exe
# -*- coding: utf8 -*-
# :Copyright: © 2015 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: https://opensource.org/licenses/BSD-2-Clause
#
# Revision: $Revision: 8567 $
# Date: $Date: 2020-09-30 13:57:21 +0200 (Mi, 30. Sep 2020) $
"""
A minimal front end to the Docutils Publisher, producing HTML 5 documents.
The output is also valid XML.
"""
try:
import locale # module missing in Jython
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
from docutils.core import publish_cmdline, default_description
description = (u'Generates HTML5 documents from standalone '
u'reStructuredText sources.\n'
+ default_description)
publish_cmdline(writer_name='html5', description=description)
| 31.857143 | 78 | 0.715695 |
77c1f7475549fd7e711ee5e8cf96af745796232a | 210 | py | Python | {{cookiecutter.github_project_name}}/tests/test_version.py | DerThorsten/py_cookiecutter | 9fb5aa9d192686ed8e3a4e0ad266d564c05d380f | [
"BSD-3-Clause"
] | 1 | 2020-02-21T12:28:06.000Z | 2020-02-21T12:28:06.000Z | {{cookiecutter.github_project_name}}/tests/test_version.py | DerThorsten/py_cookiecutter | 9fb5aa9d192686ed8e3a4e0ad266d564c05d380f | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.github_project_name}}/tests/test_version.py | DerThorsten/py_cookiecutter | 9fb5aa9d192686ed8e3a4e0ad266d564c05d380f | [
"BSD-3-Clause"
] | null | null | null | import {{cookiecutter.python_package_name}}
class TestVersion(object):
def test_version(self):
v = {{cookiecutter.python_package_name}}.__version__
assert v == '{{cookiecutter.version}}'
| 23.333333 | 60 | 0.7 |
7c055aae63e6660f07c2809c14b88ed5bc623156 | 6,140 | py | Python | img_scale.py | CTJChen/ctc_astropylib | a4194147b8d114fdf6fe8677aa48c3c9d59517a5 | [
"Apache-2.0"
] | 5 | 2021-11-18T13:27:30.000Z | 2021-12-05T00:15:33.000Z | dust/img_scale.py | interstellarmedium/interstellarmedium.github.io | 0440a5bd80052ab87575e70fc39acd4bf8e225b3 | [
"CC0-1.0"
] | null | null | null | dust/img_scale.py | interstellarmedium/interstellarmedium.github.io | 0440a5bd80052ab87575e70fc39acd4bf8e225b3 | [
"CC0-1.0"
] | null | null | null | #
# Written by Min-Su Shin
# Department of Astrophysical Sciences, Princeton University
#
# You can freely use the code.
#
import numpy
import math
def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100):
"""Estimating sky value for a given number of iterations
@type input_arr: numpy array
@param input_arr: image data array
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: max. of iterations
@rtype: tuple
@return: (sky value, number of iteration)
"""
work_arr = numpy.ravel(input_arr)
old_sky = numpy.median(work_arr)
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.median(work_arr)
iteration = 0
while ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :
iteration += 1
old_sky = new_sky
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.median(work_arr)
return (new_sky, iteration)
def sky_mean_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100):
"""Estimating sky value for a given number of iterations
@type input_arr: numpy array
@param input_arr: image data array
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: max. of iterations
@rtype: tuple
@return: (sky value, number of iteration)
"""
work_arr = numpy.ravel(input_arr)
old_sky = numpy.mean(work_arr)
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.mean(work_arr)
iteration = 0
while ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :
iteration += 1
old_sky = new_sky
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.mean(work_arr)
return (new_sky, iteration)
def linear(inputArray, scale_min=None, scale_max=None):
"""Performs linear scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
"""
print("img_scale : linear")
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
imageData = imageData.clip(min=scale_min, max=scale_max)
imageData = (imageData -scale_min) / (scale_max - scale_min)
indices = numpy.where(imageData < 0)
imageData[indices] = 0.0
indices = numpy.where(imageData > 1)
imageData[indices] = 1.0
return imageData
def sqrt(inputArray, scale_min=None, scale_max=None):
"""Performs sqrt scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
"""
print("img_scale : sqrt")
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
imageData = imageData.clip(min=scale_min, max=scale_max)
imageData = imageData - scale_min
indices = numpy.where(imageData < 0)
imageData[indices] = 0.0
imageData = numpy.sqrt(imageData)
imageData = imageData / math.sqrt(scale_max - scale_min)
return imageData
def log(inputArray, scale_min=None, scale_max=None):
"""Performs log10 scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
"""
print("img_scale : log")
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
factor = math.log10(scale_max - scale_min)
indices0 = numpy.where(imageData < scale_min)
indices1 = numpy.where((imageData >= scale_min) & (imageData <= scale_max))
indices2 = numpy.where(imageData > scale_max)
imageData[indices0] = 0.0
imageData[indices2] = 1.0
try :
imageData[indices1] = numpy.log10(imageData[indices1])/factor
except :
print("Error on math.log10 for ", (imageData[i][j] - scale_min))
return imageData
def asinh(inputArray, scale_min=None, scale_max=None, non_linear=2.0):
"""Performs asinh scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@type non_linear: float
@param non_linear: non-linearity factor
@rtype: numpy array
@return: image data array
"""
print("img_scale : asinh")
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
factor = numpy.arcsinh((scale_max - scale_min)/non_linear)
indices0 = numpy.where(imageData < scale_min)
indices1 = numpy.where((imageData >= scale_min) & (imageData <= scale_max))
indices2 = numpy.where(imageData > scale_max)
imageData[indices0] = 0.0
imageData[indices2] = 1.0
imageData[indices1] = numpy.arcsinh((imageData[indices1] - \
scale_min)/non_linear)/factor
return imageData
| 28.691589 | 92 | 0.734202 |
005ea1fbbf3479d9633f88a70aa016d6bd9d162b | 2,278 | py | Python | modules/wiki/samples.py | dgerod/cb4oru | b5fb3bd52193ab21b30b6917232a799ac41b6c32 | [
"Apache-2.0"
] | null | null | null | modules/wiki/samples.py | dgerod/cb4oru | b5fb3bd52193ab21b30b6917232a799ac41b6c32 | [
"Apache-2.0"
] | null | null | null | modules/wiki/samples.py | dgerod/cb4oru | b5fb3bd52193ab21b30b6917232a799ac41b6c32 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample wiki."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
SAMPLE_WIKI_ENTRY_1 = {
'edit_url': None,
'position': 1,
'is_html': True,
'is_draft': True,
'html': """
<br>
<b>Customize Your Experience</b>
<br><br>
You can customize your experience in several ways:
<ul>
<li>You can watch the videos multiple times for a deeper understanding
of each lesson. </li>
<li>You can read the text version for each lesson. Click the button
above the video to access it.</li>
<li>Lesson activities are designed for multiple levels of experience.
The first question checks your recall of the material in the video;
the second question lets you verify your mastery of the lesson; the
third question is an opportunity to apply your skills and share your
experiences in the class forums. You can answer some or all of the
questions depending on your familiarity and interest in the topic.
Activities are not graded and do not affect your final grade. </li>
<li>We'll also post extra challenges in the forums for people who seek
additional opportunities to practice and test their new skills!</li>
</ul>
<br>
<b>Forum</b>
<br>
<p>Apply your skills, share with others, and connect with your peers
and course staff in the <a href="forum">forum.</a> Discuss your favorite
search tips and troubleshoot technical issues. We'll also post bonus
videos and challenges there!</p>
<p>For an optimal learning experience, please plan to use the most
recent version of your browser, as well as a desktop, laptop or a tablet
computer instead of your mobile phone.</p>
"""}
SAMPLE_WIKI_ENTRIES = [SAMPLE_WIKI_ENTRY_1]
| 35.046154 | 74 | 0.742757 |
d9e59defb77c7bd2f5c437ffd0329af961a50a43 | 15,488 | py | Python | InternalPythonModules/android/operabrowser.py | drwetter/autopsy | ec91e3611159b148a6f205a667a44dd6a070ae13 | [
"Apache-2.0"
] | 1,473 | 2015-01-02T06:13:10.000Z | 2022-03-30T09:45:34.000Z | InternalPythonModules/android/operabrowser.py | drwetter/autopsy | ec91e3611159b148a6f205a667a44dd6a070ae13 | [
"Apache-2.0"
] | 1,068 | 2015-02-04T14:33:38.000Z | 2022-03-31T03:49:28.000Z | InternalPythonModules/android/operabrowser.py | drwetter/autopsy | ec91e3611159b148a6f205a667a44dd6a070ae13 | [
"Apache-2.0"
] | 510 | 2015-01-09T19:46:08.000Z | 2022-03-23T13:25:34.000Z | """
Autopsy Forensic Browser
Copyright 2019-2021 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Long
from java.lang import String
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.apache.commons.codec.binary import Base64
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.datamodel.blackboardutils import WebBrowserArtifactsHelper
import traceback
import general
"""
Finds the SQLite DB for Opera browser, parses the DB for Bookmarks, Cookies, Web History
and adds artifacts to the case.
"""
class OperaAnalyzer(general.AndroidComponentAnalyzer):
"""
Opera is a web browser on Android phones.
This module finds the SQLite DB for Opera, parses the DB for bookmarks,
downloads, web history, cookies, autofill and creates artifacts.
Opera version 53.1.2569 has the following database structure:
- cookies
-- A cookies table to store cookies
- history
-- A urls table to store history of visted urls
-- A downloads table to store downloads
- Web Data
-- A autofill table to store discrete autofill name/value pairs
-- A autofill_profile_names to store name fields (first name, middle name, last name)
-- A autofill_profiles to store the physical snailmail address (street address, city, state, country, zip)
-- A autofill_profile_phones to store phone numbers
-- A autofill_profile_emails to store email addresses
"""
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self._PACKAGE_NAME = "com.opera.browser"
self._MODULE_NAME = "Opera Analyzer"
self._PROGRAM_NAME = "Opera"
self._VERSION = "53.1.2569"
self.current_case = None
def analyzeCookies(self, dataSource, fileManager, context):
cookiesDbs = AppSQLiteDB.findAppDatabases(dataSource, "Cookies", True, self._PACKAGE_NAME)
for cookiesDb in cookiesDbs:
try:
cookiesDbHelper = WebBrowserArtifactsHelper(self.current_case.getSleuthkitCase(),
self._MODULE_NAME, cookiesDb.getDBFile(), context.getJobId())
cookiesResultSet = cookiesDb.runQuery("SELECT host_key, name, value, creation_utc FROM cookies")
if cookiesResultSet is not None:
while cookiesResultSet.next():
createTime = cookiesResultSet.getLong("creation_utc") / 1000000 - 11644473600 # Webkit time
cookiesDbHelper.addWebCookie( cookiesResultSet.getString("host_key"),
createTime,
cookiesResultSet.getString("name"),
cookiesResultSet.getString("value"),
self._PROGRAM_NAME)
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query results for Opera cookies.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add Opera cookie artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
cookiesDb.close()
def analyzeHistory(self, dataSource, fileManager, context):
historyDbs = AppSQLiteDB.findAppDatabases(dataSource, "History", True, self._PACKAGE_NAME)
for historyDb in historyDbs:
try:
historyDbHelper = WebBrowserArtifactsHelper(self.current_case.getSleuthkitCase(),
self._MODULE_NAME, historyDb.getDBFile(), context.getJobId())
historyResultSet = historyDb.runQuery("SELECT url, title, last_visit_time FROM urls")
if historyResultSet is not None:
while historyResultSet.next():
accessTime = historyResultSet.getLong("last_visit_time") / 1000000 - 11644473600
historyDbHelper.addWebHistory( historyResultSet.getString("url"),
accessTime,
"", # referrer
historyResultSet.getString("title"),
self._PROGRAM_NAME)
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query results for Opera history.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add Opera history artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
historyDb.close()
def analyzeDownloads(self, dataSource, fileManager, context):
downloadsDbs = AppSQLiteDB.findAppDatabases(dataSource, "History", True, self._PACKAGE_NAME)
for downloadsDb in downloadsDbs:
try:
downloadsDbHelper = WebBrowserArtifactsHelper(self.current_case.getSleuthkitCase(),
self._MODULE_NAME, downloadsDb.getDBFile(), context.getJobId())
queryString = "SELECT target_path, start_time, url FROM downloads"\
" INNER JOIN downloads_url_chains ON downloads.id = downloads_url_chains.id"
downloadsResultSet = downloadsDb.runQuery(queryString)
if downloadsResultSet is not None:
while downloadsResultSet.next():
startTime = historyResultSet.getLong("start_time") / 1000000 - 11644473600 #Webkit time format
downloadsDbHelper.addWebDownload( downloadsResultSet.getString("url"),
startTime,
downloadsResultSet.getString("target_path"),
self._PROGRAM_NAME)
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query results for Opera downloads.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add Opera download artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
downloadsDb.close()
def analyzeAutofill(self, dataSource, fileManager, context):
autofillDbs = AppSQLiteDB.findAppDatabases(dataSource, "Web Data", True, self._PACKAGE_NAME)
for autofillDb in autofillDbs:
try:
autofillDbHelper = WebBrowserArtifactsHelper(self.current_case.getSleuthkitCase(),
self._MODULE_NAME, autofillDb.getDBFile(), context.getJobId())
autofillsResultSet = autofillDb.runQuery("SELECT name, value, count, date_created FROM autofill")
if autofillsResultSet is not None:
while autofillsResultSet.next():
creationTime = autofillsResultSet.getLong("date_created") / 1000000 - 11644473600 #Webkit time format
autofillDbHelper.addWebFormAutofill( autofillsResultSet.getString("name"),
autofillsResultSet.getString("value"),
creationTime,
0,
autofillsResultSet.getInt("count"))
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query results for Opera autofill.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add Opera autofill artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
autofillDb.close()
def analyzeWebFormAddress(self, dataSource, fileManager, context):
webFormAddressDbs = AppSQLiteDB.findAppDatabases(dataSource, "Web Data", True, self._PACKAGE_NAME)
for webFormAddressDb in webFormAddressDbs:
try:
webFormAddressDbHelper = WebBrowserArtifactsHelper(self.current_case.getSleuthkitCase(),
self._MODULE_NAME, webFormAddressDb.getDBFile(), context.getJobId())
queryString = """
SELECT street_address, city, state, zipcode, country_code,
date_modified, first_name, last_name, number, email
FROM autofill_profiles
INNER JOIN autofill_profile_names ON autofill_profiles.guid = autofill_profile_names.guid
INNER JOIN autofill_profile_phones ON autofill_profiles.guid = autofill_profile_phones.guid
INNER JOIN autofill_profile_emails ON autofill_profiles.guid = autofill_profile_emails.guid
"""
webFormAddressResultSet = webFormAddressDb.runQuery(queryString)
if webFormAddressResultSet is not None:
while webFormAddressResultSet.next():
personName = webFormAddressResultSet.getString("first_name") + " " + webFormAddressResultSet.getString("last_name")
address = '\n'.join([ webFormAddressResultSet.getString("street_address"),
webFormAddressResultSet.getString("city"),
webFormAddressResultSet.getString("state") + " " + webFormAddressResultSet.getString("zipcode"),
webFormAddressResultSet.getString("country_code") ])
creationTime = webFormAddressResultSet.getLong("date_modified") / 1000000 - 11644473600
autofillDbHelper.addWebFormAddress( personName,
webFormAddressResultSet.getString("email"),
webFormAddressResultSet.getString("number"),
address,
creationTime,
0,
0)
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query results for Opera web form addresses.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add Opera form address artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
webFormAddressDb.close()
def analyze(self, dataSource, fileManager, context):
## open current case
try:
self.current_case = Case.getCurrentCaseThrows()
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
return
self.analyzeCookies(dataSource, fileManager, context)
self.analyzeHistory(dataSource, fileManager, context)
self.analyzeDownloads(dataSource, fileManager, context)
self.analyzeAutofill(dataSource, fileManager, context)
self.analyzeWebFormAddress(dataSource, fileManager, context)
| 58.889734 | 146 | 0.574574 |
41840ce2b244ecb77d361f120bc4ce71531b7173 | 922 | py | Python | tests/wfc3/test_uvis_25single.py | rendinam/hstcal | e08676b02e4c7cd06e3d5630b62f7b59951ac8c3 | [
"BSD-3-Clause"
] | 8 | 2016-07-28T15:14:27.000Z | 2020-04-02T16:37:23.000Z | tests/wfc3/test_uvis_25single.py | rendinam/hstcal | e08676b02e4c7cd06e3d5630b62f7b59951ac8c3 | [
"BSD-3-Clause"
] | 484 | 2016-03-14T20:44:42.000Z | 2022-03-31T15:54:38.000Z | tests/wfc3/test_uvis_25single.py | rendinam/hstcal | e08676b02e4c7cd06e3d5630b62f7b59951ac8c3 | [
"BSD-3-Clause"
] | 21 | 2016-03-14T14:22:35.000Z | 2022-02-07T18:41:49.000Z | import subprocess
import pytest
from ..helpers import BaseWFC3
class TestUVIS25Single(BaseWFC3):
"""
Test pos UVIS2 NGC7318B data
"""
detector = 'uvis'
def _single_raw_calib(self, rootname):
raw_file = '{}_raw.fits'.format(rootname)
# Prepare input file.
self.get_input_file(raw_file)
# Run CALWF3
subprocess.call(['calwf3.e', raw_file, '-vt'])
# Compare results
outputs = [('{}_flt.fits'.format(rootname),
'{}_flt_ref.fits'.format(rootname))]
self.compare_outputs(outputs)
# Ported from ``calwf3_uv_25``.
@pytest.mark.parametrize(
'rootname', ['iacr52vjq'])
# 'rootname', ['iacr52vjq',
# 'iacr52vlq',
# 'iacr52voq',
# 'iacr52vqq'])
def test_uvis_25single(self, rootname):
self._single_raw_calib(rootname)
| 24.918919 | 56 | 0.567245 |
ca0fe76d3ee2ef9110e09f71b0bffa24ef95ca55 | 3,370 | py | Python | improver_tests/psychrometric_calculations/test_calculate_svp_in_air.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/psychrometric_calculations/test_calculate_svp_in_air.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | 3 | 2020-04-25T12:55:42.000Z | 2020-07-23T11:50:46.000Z | improver_tests/psychrometric_calculations/test_calculate_svp_in_air.py | Kat-90/improver | a5c31be3430df429ae38e7c16e267fcbc2af1858 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for psychrometric_calculations calculate_svp_in_air"""
import unittest
import numpy as np
from iris.tests import IrisTest
from improver.psychrometric_calculations.psychrometric_calculations import (
_svp_from_lookup,
calculate_svp_in_air,
)
class Test_calculate_svp_in_air(IrisTest):
"""Test the calculate_svp_in_air function"""
def setUp(self):
"""Set up test data"""
self.temperature = np.array([[185.0, 260.65, 338.15]], dtype=np.float32)
self.pressure = np.array([[1.0e5, 9.9e4, 9.8e4]], dtype=np.float32)
def test_calculate_svp_in_air(self):
"""Test pressure-corrected SVP values"""
expected = np.array([[0.01362905, 208.47170252, 25187.76423485]])
result = calculate_svp_in_air(self.temperature, self.pressure)
np.testing.assert_allclose(result, expected, rtol=1e-5, atol=1e-5)
def test_values(self):
"""Basic extraction of SVP values from lookup table"""
self.temperature[0, 1] = 260.56833
expected = [[1.350531e-02, 2.06000274e02, 2.501530e04]]
result = _svp_from_lookup(self.temperature)
np.testing.assert_allclose(result, expected, rtol=1e-5, atol=1e-5)
def test_beyond_table_bounds(self):
"""Extracting SVP values from the table with temperatures beyond
its valid range. Should return the nearest end of the table."""
self.temperature[0, 0] = 150.0
self.temperature[0, 2] = 400.0
expected = [[9.664590e-03, 2.075279e02, 2.501530e04]]
result = _svp_from_lookup(self.temperature)
np.testing.assert_allclose(result, expected, rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
unittest.main()
| 43.766234 | 80 | 0.71365 |
7dc09db33128528b242038ecac0298b16decfd13 | 11,546 | py | Python | tests/io/test_video.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/io/test_video.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/io/test_video.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | null | null | null | import pytest
import os
import h5py
import numpy as np
from sleap.io.video import Video, HDF5Video, MediaVideo, DummyVideo
from tests.fixtures.videos import (
TEST_H5_FILE,
TEST_SMALL_ROBOT_MP4_FILE,
TEST_H5_DSET,
TEST_H5_INPUT_FORMAT,
)
# FIXME:
# Parameterizing fixtures with fixtures is annoying so this leads to a lot
# of redundant test code here.
# See: https://github.com/pytest-dev/pytest/issues/349
def test_from_filename():
assert type(Video.from_filename(TEST_H5_FILE).backend) == HDF5Video
assert type(Video.from_filename(TEST_SMALL_ROBOT_MP4_FILE).backend) == MediaVideo
def test_backend_extra_kwargs():
Video.from_filename(TEST_H5_FILE, grayscale=True, another_kwarg=False)
Video.from_filename(
TEST_SMALL_ROBOT_MP4_FILE, dataset="no dataset", fake_kwarg=True
)
def test_grayscale_video():
assert Video.from_filename(TEST_SMALL_ROBOT_MP4_FILE, grayscale=True).shape[-1] == 1
def test_hdf5_get_shape(hdf5_vid):
assert hdf5_vid.shape == (42, 512, 512, 1)
def test_hdf5_len(hdf5_vid):
assert len(hdf5_vid) == 42
def test_hdf5_dtype(hdf5_vid):
assert hdf5_vid.dtype == np.uint8
def test_hdf5_get_frame(hdf5_vid):
assert hdf5_vid.get_frame(0).shape == (512, 512, 1)
def test_hdf5_get_frames(hdf5_vid):
assert hdf5_vid.get_frames(0).shape == (1, 512, 512, 1)
assert hdf5_vid.get_frames([0, 1]).shape == (2, 512, 512, 1)
def test_hdf5_get_item(hdf5_vid):
assert hdf5_vid[0].shape == (1, 512, 512, 1)
assert np.alltrue(hdf5_vid[1:10:3] == hdf5_vid.get_frames([1, 4, 7]))
def test_hd5f_file_not_found():
with pytest.raises(FileNotFoundError):
Video.from_hdf5("non-existent-filename.h5", "dataset_name").height
def test_mp4_get_shape(small_robot_mp4_vid):
assert small_robot_mp4_vid.shape == (166, 320, 560, 3)
def test_mp4_fps(small_robot_mp4_vid):
assert small_robot_mp4_vid.fps == 30.0
def test_mp4_len(small_robot_mp4_vid):
assert len(small_robot_mp4_vid) == 166
def test_mp4_dtype(small_robot_mp4_vid):
assert small_robot_mp4_vid.dtype == np.uint8
def test_mp4_get_frame(small_robot_mp4_vid):
assert small_robot_mp4_vid.get_frame(0).shape == (320, 560, 3)
def test_mp4_get_frames(small_robot_mp4_vid):
assert small_robot_mp4_vid.get_frames(0).shape == (1, 320, 560, 3)
assert small_robot_mp4_vid.get_frames([0, 1]).shape == (2, 320, 560, 3)
def test_mp4_get_item(small_robot_mp4_vid):
assert small_robot_mp4_vid[0].shape == (1, 320, 560, 3)
assert np.alltrue(
small_robot_mp4_vid[1:10:3] == small_robot_mp4_vid.get_frames([1, 4, 7])
)
def test_mp4_file_not_found():
with pytest.raises(FileNotFoundError):
vid = Video.from_media("non-existent-filename.mp4")
vid.channels
def test_numpy_frames(small_robot_mp4_vid):
clip_frames = small_robot_mp4_vid.get_frames((3, 7, 9))
np_vid = Video.from_numpy(clip_frames)
assert np.all(np.equal(np_vid.get_frame(1), small_robot_mp4_vid.get_frame(7)))
def test_is_missing():
vid = Video.from_media(TEST_SMALL_ROBOT_MP4_FILE)
assert not vid.is_missing
vid = Video.from_media("non-existent-filename.mp4")
assert vid.is_missing
vid = Video.from_numpy(
Video.from_media(TEST_SMALL_ROBOT_MP4_FILE).get_frames((3, 7, 9))
)
assert not vid.is_missing
@pytest.mark.parametrize("format", ["png", "jpg", "mjpeg/avi"])
def test_imgstore_video(small_robot_mp4_vid, tmpdir, format):
path = os.path.join(tmpdir, "test_imgstore")
# If format is video, test saving all the frames.
if format == "mjpeg/avi":
frame_indices = None
else:
frame_indices = [0, 1, 5]
# Save and imgstore version of the first few frames of this
# video.
if format == "png":
# Check that the default format is "png"
imgstore_vid = small_robot_mp4_vid.to_imgstore(
path, frame_numbers=frame_indices
)
else:
imgstore_vid = small_robot_mp4_vid.to_imgstore(
path, frame_numbers=frame_indices, format=format
)
if frame_indices is None:
assert small_robot_mp4_vid.num_frames == imgstore_vid.num_frames
# Make sure we can the first 10 frames, takes to long to read them all.
for i in range(10):
assert type(imgstore_vid.get_frame(i)) == np.ndarray
else:
assert imgstore_vid.num_frames == len(frame_indices)
# Make sure we can read arbitrary frames by imgstore frame number
for i in frame_indices:
assert type(imgstore_vid.get_frame(i)) == np.ndarray
assert imgstore_vid.channels == 3
assert imgstore_vid.height == 320
assert imgstore_vid.width == 560
# Check the image data is exactly the same when lossless is used.
if format == "png":
assert np.allclose(
imgstore_vid.get_frame(0), small_robot_mp4_vid.get_frame(0), rtol=0.91
)
def test_imgstore_indexing(small_robot_mp4_vid, tmpdir):
"""
Test different types of indexing (by frame number or index) supported
by only imgstore videos.
"""
path = os.path.join(tmpdir, "test_imgstore")
frame_indices = [20, 40, 15]
imgstore_vid = small_robot_mp4_vid.to_imgstore(
path, frame_numbers=frame_indices, index_by_original=False
)
# Index by frame index in imgstore
frames = imgstore_vid.get_frames([0, 1, 2])
assert frames.shape == (3, 320, 560, 3)
assert imgstore_vid.last_frame_idx == len(frame_indices) - 1
with pytest.raises(ValueError):
imgstore_vid.get_frames(frame_indices)
# Now re-create the imgstore with frame number indexing, (the default)
imgstore_vid = small_robot_mp4_vid.to_imgstore(path, frame_numbers=frame_indices)
# Index by frame index in imgstore
frames = imgstore_vid.get_frames(frame_indices)
assert frames.shape == (3, 320, 560, 3)
assert imgstore_vid.last_frame_idx == max(frame_indices)
with pytest.raises(ValueError):
imgstore_vid.get_frames([0, 1, 2])
def test_imgstore_deferred_loading(small_robot_mp4_vid, tmpdir):
path = os.path.join(tmpdir, "test_imgstore")
frame_indices = [20, 40, 15]
vid = small_robot_mp4_vid.to_imgstore(path, frame_numbers=frame_indices)
# This is actually testing that the __img will be loaded when needed,
# since we use __img to get dtype.
assert vid.dtype == np.dtype("uint8")
def test_imgstore_single_channel(centered_pair_vid, tmpdir):
path = os.path.join(tmpdir, "test_imgstore")
frame_indices = [20, 40, 15]
vid = centered_pair_vid.to_imgstore(path, frame_numbers=frame_indices)
assert vid.channels == 1
def test_imgstore_no_frames(small_robot_mp4_vid, tmpdir):
path = os.path.join(tmpdir, "test_imgstore")
frame_indices = []
vid = small_robot_mp4_vid.to_imgstore(path, frame_numbers=frame_indices)
# This is actually testing that the __img will be loaded when needed,
# since we use __img to get dtype.
assert vid.dtype == np.dtype("uint8")
def test_empty_hdf5_video(small_robot_mp4_vid, tmpdir):
path = os.path.join(tmpdir, "test_to_hdf5")
hdf5_vid = small_robot_mp4_vid.to_hdf5(path, "testvid", frame_numbers=[])
@pytest.mark.parametrize("format", ["", "png", "jpg"])
def test_hdf5_inline_video(small_robot_mp4_vid, tmpdir, format):
path = os.path.join(tmpdir, f"test_to_hdf5_{format}")
frame_indices = [0, 1, 5]
# Save hdf5 version of the first few frames of this video.
hdf5_vid = small_robot_mp4_vid.to_hdf5(
path, "testvid", format=format, frame_numbers=frame_indices
)
assert hdf5_vid.num_frames == len(frame_indices)
# Make sure we can read arbitrary frames by imgstore frame number
for i in frame_indices:
assert type(hdf5_vid.get_frame(i)) == np.ndarray
assert hdf5_vid.channels == 3
assert hdf5_vid.height == 320
assert hdf5_vid.width == 560
# Try loading a frame from the source video that's not in the inline video
assert hdf5_vid.get_frame(3).shape == (320, 560, 3)
# Check the image data is exactly the same when lossless is used.
if format in ("", "png"):
assert np.allclose(
hdf5_vid.get_frame(0), small_robot_mp4_vid.get_frame(0), rtol=0.91
)
def test_hdf5_indexing(small_robot_mp4_vid, tmpdir):
"""
Test different types of indexing (by frame number or index).
"""
path = os.path.join(tmpdir, "test_to_hdf5")
frame_indices = [20, 40, 15]
hdf5_vid = small_robot_mp4_vid.to_hdf5(
path, dataset="testvid2", frame_numbers=frame_indices, index_by_original=False
)
# Index by frame index in newly saved video
frames = hdf5_vid.get_frames([0, 1, 2])
assert frames.shape == (3, 320, 560, 3)
assert hdf5_vid.last_frame_idx == len(frame_indices) - 1
# Disable loading frames from the original source video
hdf5_vid.backend.enable_source_video = False
with pytest.raises(ValueError):
hdf5_vid.get_frames(frame_indices)
# We have to close file before we can add another video dataset.
hdf5_vid.close()
# Now re-create the imgstore with frame number indexing, (the default)
hdf5_vid2 = small_robot_mp4_vid.to_hdf5(
path, dataset="testvid3", frame_numbers=frame_indices
)
# Disable loading frames from the original source video
hdf5_vid2.backend.enable_source_video = False
# Index by frame index in original video
frames = hdf5_vid2.get_frames(frame_indices)
assert frames.shape == (3, 320, 560, 3)
assert hdf5_vid2.last_frame_idx == max(frame_indices)
with pytest.raises(ValueError):
hdf5_vid2.get_frames([0, 1, 2])
def test_hdf5_vid_from_open_dataset():
with h5py.File(TEST_H5_FILE, "r") as f:
dataset = f[TEST_H5_DSET]
vid = Video(
backend=HDF5Video(
filename=f, dataset=dataset, input_format=TEST_H5_INPUT_FORMAT
)
)
assert vid.shape == (42, 512, 512, 1)
def test_dummy_video():
vid = Video(backend=DummyVideo("foo", 10, 20, 30, 3))
assert vid.filename == "foo"
assert vid.height == 10
assert vid.width == 20
assert vid.frames == 30
assert vid.channels == 3
assert vid[0].shape == (1, 10, 20, 3)
def test_images_video():
filenames = [f"tests/data/videos/robot{i}.jpg" for i in range(3)]
vid = Video.from_image_filenames(filenames)
assert vid.frames == len(filenames)
assert vid.height == 320
assert vid.width == 560
assert vid.channels == 3
assert vid[0].shape == (1, 320, 560, 3)
def test_imgstore_from_filenames(tmpdir):
temp_filename = os.path.join(tmpdir, "test_imgstore")
filenames = [f"tests/data/videos/robot{i}.jpg" for i in range(3)]
vid = Video.imgstore_from_filenames(filenames, temp_filename)
assert vid.frames == len(filenames)
assert vid.height == 320
assert vid.width == 560
assert vid.channels == 3
assert vid[0].shape == (1, 320, 560, 3)
def test_safe_frame_loading(small_robot_mp4_vid):
vid = small_robot_mp4_vid
frame_count = vid.frames
with pytest.raises(KeyError):
vid.get_frames([1, 2, frame_count + 5])
idxs, frames = vid.get_frames_safely([1, 2, frame_count + 5])
assert idxs == [1, 2]
assert len(frames) == 2
def test_safe_frame_loading_all_invalid():
vid = Video.from_filename("video_that_does_not_exist.mp4")
idxs, frames = vid.get_frames_safely(list(range(10)))
assert idxs == []
assert frames is None | 29.911917 | 88 | 0.697211 |
2baeefc2fa4c25f7182f28b7dc15aa301a2d6832 | 3,908 | py | Python | stratopy/IO.py | jorobledo/StratoPy | c2a705cb57f6ca13758d6d7a5f93ef9f5faa4d5d | [
"MIT"
] | null | null | null | stratopy/IO.py | jorobledo/StratoPy | c2a705cb57f6ca13758d6d7a5f93ef9f5faa4d5d | [
"MIT"
] | null | null | null | stratopy/IO.py | jorobledo/StratoPy | c2a705cb57f6ca13758d6d7a5f93ef9f5faa4d5d | [
"MIT"
] | null | null | null | import io
import os
import pathlib
import tempfile
from ftplib import FTP
from diskcache import Cache
from diskcache.core import ENOVAL
import s3fs
from . import merger
from .cloudsat import read_hdf
from .goes import read_nc
# type: ignore
DEFAULT_CACHE_PATH = pathlib.Path(
os.path.expanduser(os.path.join("~", "stratopy_cache"))
)
def fetch_cloudsat(
dirname,
user,
passwd,
host="ftp.cloudsat.cira.colostate.edu",
tag="stratopy-cloudsat",
path=DEFAULT_CACHE_PATH,
):
"""Fetch files of a certain date from cloudsat ftp server and
stores in a local cache.
Parameters
----------
dirname : ``str``,
path to cloudsat image.
user : `str`, username for cloudsat ftp connection.
passwd : `str`, password for cloudsat ftp connection.
host : `str`, name of the url where the file is hosted.
tag : `str`, tag to be added to the cached file.
path : `str`, path where to save the cached file.
Returns
-------
df : `stratopy.cloudsat.CloudSatFrame`,
dataframe containing the image data.
"""
cache = Cache(path)
# Transform dirname into cache id (exact date)
id_ = os.path.split(dirname)[-1].split("_")[0]
# Search in local cache
cache.expire()
result = cache.get(id_, default=ENOVAL, retry=True)
if result is ENOVAL:
ftp = FTP()
ftp.connect(host=host)
ftp.login(user, passwd)
buffer_file = io.BytesIO()
ftp.retrbinary(f"RETR {dirname}", buffer_file.write)
result = buffer_file.getvalue()
cache.set(id_, result, tag=tag)
with tempfile.TemporaryDirectory() as tmpdirname:
fname = os.path.join(tmpdirname, id_)
with open(fname, "wb") as fp:
fp.write(result)
df = read_hdf(fname)
return df
# Esta función es fácilmente extendible a descargar por fecha aprox.
# Simplemente es guardar en una lista los archivos de la carpeta(con s3fs.ls)
# luego buscar el que más se acerca al horario deseado dentro de cierto margen.
# Se podría implementar en caso de poder hacer lo mismo con cloudstat
# (el problema ahí es el número de órbita).
def fetch_goes(
dirname,
tag="stratopy-goes",
path=DEFAULT_CACHE_PATH,
):
"""Fetch files of a certain date from GOES server and
stores in a local cache.
Parameters:
-----------
dirname : `str`, name of the cached file.
tag : `str`, tag to append to name of cached file.
path : `str`, location where to save the cached file.
Returns:
--------
goes_obj : `netCDF4.Dataset`, goes image data.
"""
cache = Cache(path)
# Transform dirname into cache id
id_ = os.path.split(dirname)[-1].split("_")[0]
# Search in local cache
cache.expire()
result = cache.get(id_, default=ENOVAL, retry=True)
if result is ENOVAL:
# Starts connection with AWS S3 bucket
s3 = s3fs.S3FileSystem(anon=True)
# Open in-memory binary and write it
buffer_file = io.BytesIO()
with s3.open(dirname, "rb") as f:
buffer_file.write(f.read())
result = buffer_file.getvalue()
cache.set(id_, result, tag=tag)
with tempfile.TemporaryDirectory() as tmpdirname:
fname = os.path.join(tmpdirname, id_)
with open(fname, "wb") as fp:
fp.write(result)
goes_obj = read_nc((fname,))
return goes_obj
def fetch(cloudsat_id, goes_id, cloudsat_kw=None, goes_kw=None):
"""[Summary]"""
# Anon connection.
goes_kw = {} if goes_kw is None else goes_kw
goes_data = fetch_goes(goes_id, **goes_kw)
# In this case cloudsat_kw can't be empty:
# must have user and password to connect with server
cloudsat_kw = {} if cloudsat_kw is None else cloudsat_kw
cloudsat_data = fetch_cloudsat(cloudsat_id, **cloudsat_kw)
return merger.StratoFrame(goes_data, cloudsat_data)
| 26.585034 | 79 | 0.652764 |
1b6bfe7af21e5d9293ea9eb484527a749727df2d | 888 | py | Python | pi/robot/camera.py | maxschwe/PiCar | 81d83cd0f8a32d4c0faf8c36a13e09da732244d3 | [
"MIT"
] | null | null | null | pi/robot/camera.py | maxschwe/PiCar | 81d83cd0f8a32d4c0faf8c36a13e09da732244d3 | [
"MIT"
] | null | null | null | pi/robot/camera.py | maxschwe/PiCar | 81d83cd0f8a32d4c0faf8c36a13e09da732244d3 | [
"MIT"
] | null | null | null | import time
import cv2
import numpy as np
from imutils.video.pivideostream import PiVideoStream
import logging
class Camera(object):
def __init__(self, flip=False):
try:
self.vs = PiVideoStream(resolution=(320, 240))
self.vs.start()
except:
logging.error("Failed to start camera")
self.flip = flip
def _flip_if_needed(self, frame):
if self.flip:
return np.flip(frame, 0)
return frame
def get_frame(self):
frame = self._flip_if_needed(self.vs.read())
_, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def gen_videostream(self):
while True:
frame = self._get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def __del__(self):
self.vs.stop()
| 24.666667 | 77 | 0.577703 |
72f6421b98fad1b4d699781c85c673dc12fd4746 | 3,553 | py | Python | backend/autobets/apimb/migrations/0018_reportsbets_reportsmarket_reportsselections.py | slarkjm0803/autobets | f92a5d999acaf5d7c83ca2768a260c2282eabbee | [
"MIT"
] | 1 | 2020-03-06T14:29:12.000Z | 2020-03-06T14:29:12.000Z | backend/autobets/apimb/migrations/0018_reportsbets_reportsmarket_reportsselections.py | slarkjm0803/autobets | f92a5d999acaf5d7c83ca2768a260c2282eabbee | [
"MIT"
] | null | null | null | backend/autobets/apimb/migrations/0018_reportsbets_reportsmarket_reportsselections.py | slarkjm0803/autobets | f92a5d999acaf5d7c83ca2768a260c2282eabbee | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2019-11-26 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apimb', '0017_sessiontoken'),
]
operations = [
migrations.CreateModel(
name='ReportsBets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('i_d', models.CharField(max_length=50, null=True)),
('offer_id', models.CharField(max_length=50, null=True)),
('odds', models.CharField(max_length=50, null=True)),
('stake', models.CharField(max_length=50, null=True)),
('adjusted', models.CharField(max_length=50, null=True)),
('originator', models.CharField(max_length=50, null=True)),
('inplay', models.CharField(max_length=50, null=True)),
('submitted_time', models.CharField(max_length=50, null=True)),
('matched_time', models.CharField(max_length=50, null=True)),
('settled_time', models.CharField(max_length=50, null=True)),
('result', models.CharField(max_length=50, null=True)),
('profit_and_loss', models.CharField(max_length=50, null=True)),
('commission_type', models.CharField(max_length=50, null=True)),
('net_profit_and_loss', models.CharField(max_length=50, null=True)),
],
),
migrations.CreateModel(
name='ReportsMarket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('i_d', models.CharField(max_length=50, null=True)),
('name', models.CharField(max_length=50, null=True)),
('event_id', models.CharField(max_length=50, null=True)),
('event_name', models.CharField(max_length=50, null=True)),
('sport_id', models.CharField(max_length=50, null=True)),
('start_time', models.CharField(max_length=50, null=True)),
('settled_time', models.CharField(max_length=50, null=True)),
('stake', models.CharField(max_length=50, null=True)),
('commission', models.CharField(max_length=50, null=True)),
('profit_and_loss', models.CharField(max_length=50, null=True)),
('net_profit_and_loss', models.CharField(max_length=50, null=True)),
('selections', models.CharField(max_length=50, null=True)),
],
),
migrations.CreateModel(
name='ReportsSelections',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('runner_id', models.CharField(max_length=50, null=True)),
('runner_name', models.CharField(max_length=50, null=True)),
('side', models.CharField(max_length=50, null=True)),
('odds', models.CharField(max_length=50, null=True)),
('stake', models.CharField(max_length=50, null=True)),
('profit_and_loss', models.CharField(max_length=50, null=True)),
('commission', models.CharField(max_length=50, null=True)),
('net_profit_and_loss', models.CharField(max_length=50, null=True)),
('bets', models.CharField(max_length=50, null=True)),
],
),
]
| 53.029851 | 114 | 0.580918 |
7718833a093991bfdb648325eb7d170c74628b76 | 3,134 | py | Python | test/test_include.py | iafisher/hera-py | 773729870cd593f0dc55ac6fe8b9c8690faa73c1 | [
"MIT"
] | 2 | 2020-10-27T19:16:45.000Z | 2020-10-27T19:26:44.000Z | test/test_include.py | iafisher/hera-py | 773729870cd593f0dc55ac6fe8b9c8690faa73c1 | [
"MIT"
] | 141 | 2018-11-20T16:55:06.000Z | 2021-03-29T01:07:59.000Z | test/test_include.py | iafisher/hera-py | 773729870cd593f0dc55ac6fe8b9c8690faa73c1 | [
"MIT"
] | 1 | 2019-02-27T22:02:33.000Z | 2019-02-27T22:02:33.000Z | import pytest
from hera.main import main
from .utils import execute_program_helper
def test_basic_include():
program = """\
#include "test/assets/include/lib/add.hera"
SET(R1, 20)
SET(R2, 22)
CALL(R12, add)
"""
vm = execute_program_helper(program)
assert vm.registers[1] == 20
assert vm.registers[2] == 22
assert vm.registers[3] == 42
for r in vm.registers[4:11]:
assert r == 0
assert not vm.flag_sign
assert not vm.flag_zero
assert not vm.flag_overflow
assert not vm.flag_carry
assert not vm.flag_carry_block
def test_multiple_includes():
program = """\
#include "test/assets/include/lib/r1_to_42.hera"
#include "test/assets/include/lib/r2_to_42.hera"
SET(R3, 42)
"""
vm = execute_program_helper(program)
assert vm.registers[1] == 42
assert vm.registers[2] == 42
assert vm.registers[3] == 42
def test_recursive_program(capsys):
with pytest.raises(SystemExit):
main(["test/assets/include/recursive.hera"])
captured = capsys.readouterr()
assert "recursive include" in captured.err
assert '#include "recursive.hera"' in captured.err
assert "line 1 col 10 of test/assets/include/recursive.hera" in captured.err
def test_mutually_recursive_programs(capsys):
with pytest.raises(SystemExit):
main(["test/assets/include/mutually_recursive1.hera"])
captured = capsys.readouterr()
assert "recursive include" in captured.err
assert '#include "mutually_recursive1.hera"' in captured.err
assert (
"line 1 col 10 of test/assets/include/mutually_recursive2.hera" in captured.err
)
def test_nonexistent_path_program(capsys):
with pytest.raises(SystemExit):
execute_program_helper('#include "test/assets/include/whatever.hera"')
captured = capsys.readouterr()
assert 'file "test/assets/include/whatever.hera" does not exist' in captured.err
def test_include_stdin_program(capsys):
program = '#include "-"\n#include "<stdin>"'
with pytest.raises(SystemExit):
execute_program_helper(program)
captured = capsys.readouterr()
assert 'file "-" does not exist' in captured.err
# We want this error and not a recursive include error, i.e. we need to distinguish
# between actual standard input and a file called "<stdin>".
assert 'file "<stdin>" does not exist' in captured.err
def test_use_of_ifdef(capsys):
program = """\
SET(R1, 42)
#ifdef HERA_PY
SET(R2, 42)
#else
SET(R3, 666)
#endif
#ifdef HERA_C
R4 = 666;
#else
SET(R5, 42)
#endif
SET(R6, 42)
"""
vm = execute_program_helper(program)
assert vm.registers[1] == 42
assert vm.registers[2] == 42
assert vm.registers[3] == 0
assert vm.registers[4] == 0
assert vm.registers[5] == 42
assert vm.registers[6] == 42
def test_use_of_ifdef_in_included_program(capsys):
vm = execute_program_helper('#include "test/assets/include/ifdef.hera"')
assert vm.registers[1] == 42
assert vm.registers[2] == 42
assert vm.registers[3] == 0
assert vm.registers[4] == 0
assert vm.registers[5] == 42
assert vm.registers[6] == 42
| 25.479675 | 87 | 0.687939 |
2fb6eefcf6c5187dd590facee6c9475da1343579 | 2,404 | py | Python | bzt/six/py3.py | IamSaurabh1/taurus | 928d44e30e6cd5b979e675bfdce4c1dbeb5d0eff | [
"Apache-2.0"
] | 1 | 2019-12-05T14:57:58.000Z | 2019-12-05T14:57:58.000Z | bzt/six/py3.py | IamSaurabh1/taurus | 928d44e30e6cd5b979e675bfdce4c1dbeb5d0eff | [
"Apache-2.0"
] | null | null | null | bzt/six/py3.py | IamSaurabh1/taurus | 928d44e30e6cd5b979e675bfdce4c1dbeb5d0eff | [
"Apache-2.0"
] | null | null | null | """
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: skip-file
import io
import sys
import operator
import collections
import traceback
import urllib
import socketserver
import configparser
from http import server, cookiejar
string_types = str,
integer_types = int,
numeric_types = (int, float, complex)
class_types = type,
text_type = str
binary_type = bytes
file_type = io.IOBase
configparser = configparser
UserDict = collections.UserDict
StringIO = io.StringIO
BytesIO = io.BytesIO
cookielib=cookiejar
request = urllib.request
parse = urllib.parse
urlopen = request.urlopen
urlencode = parse.urlencode
build_opener = request.build_opener
install_opener = request.install_opener
ProxyHandler = request.ProxyHandler
Request = request.Request
HTTPError = urllib.error.HTTPError
URLError = urllib.error.URLError
BaseHTTPServer = server
socketserver = socketserver
SimpleHTTPRequestHandler = BaseHTTPServer.SimpleHTTPRequestHandler
url2pathname = urllib.request.url2pathname
viewvalues = operator.methodcaller("values")
r_input = input
def iteritems(dictionary, **kw):
return iter(dictionary.items(**kw))
def b(string):
return string.encode("latin-1")
def u(string):
return string
def get_stacktrace(exc):
return ''.join(traceback.format_tb(exc.__traceback__)).rstrip()
def reraise(exc_info, exc=None):
_type, message, stacktrace = exc_info
if exc is None:
exc = _type(message)
exc.__traceback__ = stacktrace
raise exc
def stream_decode(string):
if not isinstance(string, text_type):
return string.decode()
else:
return string
def deunicode(string, errors="strict"):
"""
If string is unicode - convert it to basic string. Otherwise - leave it.
Does nothing on py3 as there're no basic strings there.
"""
return string
| 23.568627 | 76 | 0.761647 |
21cf7ee60e2efc1d13010cad457f74e39fc79af8 | 2,473 | py | Python | otcextensions/tests/unit/sdk/nat/v2/test_snat.py | spielkind/python-otcextensions | 47ba917df2d85db6cb347f2038fd7f79a8a806b7 | [
"Apache-2.0"
] | null | null | null | otcextensions/tests/unit/sdk/nat/v2/test_snat.py | spielkind/python-otcextensions | 47ba917df2d85db6cb347f2038fd7f79a8a806b7 | [
"Apache-2.0"
] | null | null | null | otcextensions/tests/unit/sdk/nat/v2/test_snat.py | spielkind/python-otcextensions | 47ba917df2d85db6cb347f2038fd7f79a8a806b7 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from otcextensions.sdk.nat.v2 import snat
INSTANCE_ID = '5b95c675-69c2-4656-ba06-58ff72e1d338'
EXAMPLE = {
'floating_ip_id': 'bdc10a4c-d81a-41ec-adf7-de857f7c812a',
'status': 'PENDING_CREATE',
'nat_gateway_id': 'a78fb3eb-1654-4710-8742-3fc49d5f04f8',
'admin_state_up': True,
'network_id': 'eaad9cd6-2372-4be1-9535-9bd37210ae7b',
'cidr': None,
'source_type': 0,
'project_id': '27e25061336f4af590faeabeb7fcd9a3',
'created_at': '2017-11-18 07:54:21.665430',
'id': INSTANCE_ID,
'floating_ip_address': '5.21.11.226'
}
class TestSnat(base.TestCase):
def test_basic(self):
sot = snat.Snat()
self.assertEqual('snat_rule', sot.resource_key)
self.assertEqual('snat_rules', sot.resources_key)
path = '/snat_rules'
self.assertEqual(path, sot.base_path)
self.assertTrue(sot.allow_list)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertTrue(sot.allow_delete)
def test_make_it(self):
sot = snat.Snat(**EXAMPLE)
self.assertEqual(EXAMPLE['floating_ip_id'], sot.floating_ip_id)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['nat_gateway_id'], sot.nat_gateway_id)
self.assertEqual(EXAMPLE['admin_state_up'], sot.admin_state_up)
self.assertEqual(EXAMPLE['network_id'], sot.network_id)
self.assertIsNone(sot.cidr)
self.assertEqual(EXAMPLE['source_type'], sot.source_type)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
self.assertEqual(EXAMPLE['created_at'], sot.created_at)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['floating_ip_address'],
sot.floating_ip_address)
| 39.887097 | 76 | 0.682572 |
1d8c1a46d481f2f2cc69b3f0865f664027894dc5 | 1,228 | py | Python | solum/builder/controllers/root.py | ed-/solum | 2d23edb7fb53e1bdeff510710824658575d166c4 | [
"Apache-2.0"
] | null | null | null | solum/builder/controllers/root.py | ed-/solum | 2d23edb7fb53e1bdeff510710824658575d166c4 | [
"Apache-2.0"
] | null | null | null | solum/builder/controllers/root.py | ed-/solum | 2d23edb7fb53e1bdeff510710824658575d166c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import wsmeext.pecan as wsme_pecan
from solum.api.controllers import common_types
from solum.api.controllers import root as api_root
from solum.builder.controllers.v1 import root as v1_root
class RootController(object):
v1 = v1_root.Controller()
@wsme_pecan.wsexpose([api_root.Version])
def index(self):
host_url = '%s/%s' % (pecan.request.host_url, 'v1')
v1 = api_root.Version(id='v1.0',
status='CURRENT',
link=common_types.Link(target_name='v1',
href=host_url))
return [v1]
| 35.085714 | 75 | 0.673453 |
9ceb2a1566477b4dbfa8608f5d8f5276eef8b752 | 419 | py | Python | venv/Lib/site-packages/praw/models/reddit/mixins/fullname.py | The-Fragment/FragmentFembot | bca0027b423753eb162590e8fd440a2c1e65d133 | [
"MIT"
] | 1 | 2022-03-29T06:49:06.000Z | 2022-03-29T06:49:06.000Z | venv/Lib/site-packages/praw/models/reddit/mixins/fullname.py | The-Fragment/FragmentFembot | bca0027b423753eb162590e8fd440a2c1e65d133 | [
"MIT"
] | 12 | 2021-04-11T19:46:06.000Z | 2021-06-18T16:08:37.000Z | venv/Lib/site-packages/praw/models/reddit/mixins/fullname.py | The-Fragment/FragmentFembot | bca0027b423753eb162590e8fd440a2c1e65d133 | [
"MIT"
] | 1 | 2021-07-07T16:07:13.000Z | 2021-07-07T16:07:13.000Z | """Provide the FullnameMixin class."""
class FullnameMixin:
"""Interface for classes that have a fullname."""
_kind = None
@property
def fullname(self) -> str:
"""Return the object's fullname.
A fullname is an object's kind mapping like ``t3`` followed by an underscore and
the object's base36 ID, e.g., ``t1_c5s96e0``.
"""
return f"{self._kind}_{self.id}"
| 23.277778 | 88 | 0.610979 |
b6ef376687384ad348aaf2838889ab23b1d24de6 | 2,767 | py | Python | common/migrations/0001_initial.py | ikks/flisol-connect | 2fcfb2efadfac5b9519410d690fa098ed620266d | [
"MIT"
] | null | null | null | common/migrations/0001_initial.py | ikks/flisol-connect | 2fcfb2efadfac5b9519410d690fa098ed620266d | [
"MIT"
] | null | null | null | common/migrations/0001_initial.py | ikks/flisol-connect | 2fcfb2efadfac5b9519410d690fa098ed620266d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='nombre')),
('slug', models.SlugField(unique=True)),
('description', models.TextField(verbose_name='descripci\xf3n')),
('logo_flag', models.ImageField(upload_to=b'common/flag/', max_length=255, verbose_name='flag icon')),
('image', models.ImageField(upload_to=b'common/country_banner/', max_length=255, verbose_name='country banner image', blank=True)),
('map_center', models.CharField(default=b'-85.627,13.176', help_text='lat,lon', max_length=255, verbose_name='map center on')),
('map_zoom', models.PositiveSmallIntegerField(default=6, verbose_name='default map zoom')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('created_by', models.ForeignKey(verbose_name='created by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
'verbose_name': 'country',
'verbose_name_plural': 'countries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Distribution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='nombre')),
('slug', models.SlugField(unique=True)),
('logo', models.ImageField(upload_to=b'common/distro/', max_length=255, verbose_name='image')),
('description', models.TextField(verbose_name='descripci\xf3n')),
('wikipedia', models.URLField(max_length=255, verbose_name='wikipedia link', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('created_by', models.ForeignKey(verbose_name='created by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
'verbose_name': 'distribution',
'verbose_name_plural': 'distributions',
},
bases=(models.Model,),
),
]
| 49.410714 | 147 | 0.595591 |
4ebd6415ba449f4b48334dc34d951543a24ae65e | 874 | py | Python | Exercicio 01/minhas_compras/core/migrations/0001_initial.py | thiagorocha503/BACK-END-EXERCICIOS | e30fd7e790b2c722072dd12ae0924a906e275c91 | [
"Apache-2.0"
] | 1 | 2019-09-13T21:21:03.000Z | 2019-09-13T21:21:03.000Z | Exercicio 01/minhas_compras/core/migrations/0001_initial.py | thiagorocha503/BACK-END-EXERCICIOS | e30fd7e790b2c722072dd12ae0924a906e275c91 | [
"Apache-2.0"
] | 1 | 2020-06-06T14:01:18.000Z | 2020-06-06T14:01:18.000Z | Exercicio 01/minhas_compras/core/migrations/0001_initial.py | thiagorocha503/BACK-END-EXERCICIOS | e30fd7e790b2c722072dd12ae0924a906e275c91 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-28 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
('descricao', models.CharField(max_length=128)),
('unidadeCompra', models.CharField(max_length=30)),
('qtdPrevistoMes', models.DecimalField(decimal_places=2, max_digits=7)),
('preco', models.DecimalField(decimal_places=2, max_digits=7)),
('precoMaximo', models.DecimalField(decimal_places=2, max_digits=7)),
],
),
]
| 32.37037 | 114 | 0.592677 |
28293b42c00466f1218e8fe79809ba9c39e0f4e6 | 465 | py | Python | tests/if/if21.py | ktok07b6/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 83 | 2015-11-30T09:59:13.000Z | 2021-08-03T09:12:28.000Z | tests/if/if21.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 4 | 2017-02-10T01:43:11.000Z | 2020-07-14T03:52:25.000Z | tests/if/if21.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 11 | 2016-11-18T14:39:15.000Z | 2021-02-23T10:05:20.000Z | from polyphony import testbench
def if21(x):
y = 0
if x < 100:
if x < 10:
y = 10
else:
y = 20
#y += 1
else:
if x < 200:
y = 100
else:
y = 200
y += 2
return y
@testbench
def test():
assert 10 == if21(0)
assert 20 == if21(10)
assert 20 == if21(20)
#assert 20 == if20(30)
assert 102 == if21(100)
assert 202 == if21(200)
test()
| 15 | 31 | 0.427957 |
773d07cb88f0ebfafe5de95bf53d112bdc22197d | 299 | py | Python | office365/sharepoint/fields/fieldMultiUserValue.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/fields/fieldMultiUserValue.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/fields/fieldMultiUserValue.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | from office365.sharepoint.fields.fieldMultiLookupValue import FieldMultiLookupValue
from office365.sharepoint.fields.fieldUserValue import FieldUserValue
class FieldMultiUserValue(FieldMultiLookupValue):
def __init__(self):
super().__init__()
self._item_type = FieldUserValue
| 29.9 | 83 | 0.80602 |
4f060c6920f45eb14e758b898c6418dc6c246cdd | 937 | py | Python | examples/twitter/app.py | tirkarthi/firenado | d5a041707eff48a1ebb948e2b98eff1cd0b9afdc | [
"Apache-2.0"
] | 13 | 2015-06-01T02:04:06.000Z | 2022-01-09T20:28:20.000Z | examples/twitter/app.py | tirkarthi/firenado | d5a041707eff48a1ebb948e2b98eff1cd0b9afdc | [
"Apache-2.0"
] | 198 | 2015-05-30T18:23:11.000Z | 2022-03-30T18:41:52.000Z | examples/twitter/app.py | tirkarthi/firenado | d5a041707eff48a1ebb948e2b98eff1cd0b9afdc | [
"Apache-2.0"
] | 11 | 2015-05-25T01:35:50.000Z | 2021-07-22T15:16:56.000Z | #!/usr/bin/env python
#
# Copyright 2015-2016 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import twitter.handlers
import firenado.tornadoweb
class TwitterComponent(firenado.tornadoweb.TornadoComponent):
def get_handlers(self):
return [
('/', twitter.handlers.MainHandler),
('/login', twitter.handlers.LoginHandler),
('/logout', twitter.handlers.LogoutHandler)
]
| 32.310345 | 74 | 0.722519 |
e88cfba87f47e4c9b92460499c1528cc242b1b13 | 2,948 | py | Python | discord/context_managers.py | Awayume/discord.py | a5307af5bfe373d425b184633be81e8157c14abe | [
"MIT"
] | null | null | null | discord/context_managers.py | Awayume/discord.py | a5307af5bfe373d425b184633be81e8157c14abe | [
"MIT"
] | null | null | null | discord/context_managers.py | Awayume/discord.py | a5307af5bfe373d425b184633be81e8157c14abe | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Awayume
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, TypeVar, Optional, Type
if TYPE_CHECKING:
from .abc import Messageable
from types import TracebackType
TypingT = TypeVar('TypingT', bound='Typing')
__all__ = (
'Typing',
)
def _typing_done_callback(fut: asyncio.Future) -> None:
# just retrieve any exception and call it a day
try:
fut.exception()
except (asyncio.CancelledError, Exception):
pass
class Typing:
def __init__(self, messageable: Messageable) -> None:
self.loop: asyncio.AbstractEventLoop = messageable._state.loop
self.messageable: Messageable = messageable
async def do_typing(self) -> None:
try:
channel = self._channel
except AttributeError:
channel = await self.messageable._get_channel()
typing = channel._state.http.send_typing
while True:
await typing(channel.id)
await asyncio.sleep(5)
def __enter__(self: TypingT) -> TypingT:
self.task: asyncio.Task = self.loop.create_task(self.do_typing())
self.task.add_done_callback(_typing_done_callback)
return self
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.task.cancel()
async def __aenter__(self: TypingT) -> TypingT:
self._channel = channel = await self.messageable._get_channel()
await channel._state.http.send_typing(channel.id)
return self.__enter__()
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.task.cancel()
| 33.123596 | 75 | 0.714383 |
dfe9914b6de02201bdb878946a6cf0773b4eacdb | 171 | py | Python | blogs/urls.py | akshitbhatia/Python-Django-Project | d64f02eb8682e14243c38241af660b54a22d0db9 | [
"bzip2-1.0.6"
] | null | null | null | blogs/urls.py | akshitbhatia/Python-Django-Project | d64f02eb8682e14243c38241af660b54a22d0db9 | [
"bzip2-1.0.6"
] | null | null | null | blogs/urls.py | akshitbhatia/Python-Django-Project | d64f02eb8682e14243c38241af660b54a22d0db9 | [
"bzip2-1.0.6"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('',views.allblogs, name='blogs'),
path('<int:blog_id>/',views.detail_blog, name='detail')
]
| 21.375 | 59 | 0.678363 |
031da70e11d780778029f37eb30f7447d8294b8f | 801 | py | Python | AzureFunctions/F_get_runs/__init__.py | kloudkrafts01/kspyder | 109e7ea4b671c09da8744bf1cb2c2277b349bdba | [
"MIT"
] | 3 | 2021-03-29T13:08:29.000Z | 2021-04-02T15:27:17.000Z | AzureFunctions/F_get_runs/__init__.py | kloudkrafts01/kspyder | 109e7ea4b671c09da8744bf1cb2c2277b349bdba | [
"MIT"
] | null | null | null | AzureFunctions/F_get_runs/__init__.py | kloudkrafts01/kspyder | 109e7ea4b671c09da8744bf1cb2c2277b349bdba | [
"MIT"
] | null | null | null | #!python3
import json
import azure.functions as func
import azure.durable_functions as df
from common.spLogging import logger
async def main(req: func.HttpRequest, starter: str) -> None:
client = df.DurableOrchestrationClient(starter)
instances = await client.get_status_all()
instances_list = []
# response = None
try:
for instance in instances:
# instances_list += instance.to_json(),
logger.info(json.dumps(instance))
# resp_body = json.dumps(instances_list)
# response = func.HttpResponse(
# body = resp_body,
# status_code = 200,
# mimetype = 'application/json'
# )
except Exception as e:
logger.error("F_get_runs :: {}".format(e))
# return response | 24.272727 | 60 | 0.621723 |
fd9ee05dfff202df04c30034afc7af0862be56db | 1,625 | py | Python | baekjoon/python/airport_10775.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
] | null | null | null | baekjoon/python/airport_10775.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
] | 1 | 2019-11-04T06:44:04.000Z | 2019-11-04T06:46:55.000Z | baekjoon/python/airport_10775.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
] | null | null | null | # Title: 공항
# Link: https://www.acmicpc.net/problem/10775
import sys
from copy import deepcopy
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
class MaxSegment:
def __init__(self, ns: list, n: int):
self.n = n
self.tree = [0 for _ in range(n)] + deepcopy(ns) + [0]
for i in range(n-1, 0, -1):
self.tree[i] = max(self.tree[2*i], self.tree[2*i+1])
def update(self, pos: int, value: int):
pos += self.n
self.tree[pos] = value
while pos > 1:
pos >>= 1
self.tree[pos] = max(self.tree[2*pos], self.tree[2*pos+1])
def range_query(self, left: int, right: int):
left += self.n
right += 1
right += self.n
ma = -(10**10)
while left < right:
if left & 1 != 0:
ma = max(ma, self.tree[left])
left += 1
if right & 1 != 0:
right -= 1
ma = max(ma, self.tree[right])
left //= 2
right //= 2
return ma
def solution(g: int, p: int, gs: list):
gates = [i for i in range(g+1)]
tree = MaxSegment(gates, g+1)
count = 0
for gate in gs:
available = tree.range_query(1, gate)
if available <= 0:
return count
tree.update(available, -available)
count += 1
return count
def main():
g = read_single_int()
p = read_single_int()
gs = []
for _ in range(p):
gs.append(read_single_int())
print(solution(g, p, gs))
if __name__ == '__main__':
main() | 23.550725 | 70 | 0.508923 |
502b53f80368d491c455cee618cf236de9c238ab | 374 | py | Python | proxy/proxy_client.py | MailG/code_py | c21a27c871c5c42625aadf45d51a0ba325095739 | [
"MIT"
] | null | null | null | proxy/proxy_client.py | MailG/code_py | c21a27c871c5c42625aadf45d51a0ba325095739 | [
"MIT"
] | null | null | null | proxy/proxy_client.py | MailG/code_py | c21a27c871c5c42625aadf45d51a0ba325095739 | [
"MIT"
] | null | null | null | from socket import *
HOST = '127.0.0.1' # The remote host
PORT = 9090 # The same port as used by the server
s = socket(AF_INET, SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall('Hello, world')
data = s.recv(1024)
print 'Received', repr(data)
while 1:
indata = raw_input("input command:")
s.sendall(indata)
data = s.recv(1024)
print 'Received', repr(data)
s.close()
| 23.375 | 49 | 0.681818 |
cfc0df8b3bcf15d5ceda77d86c5dcf8330bf00c2 | 1,105 | py | Python | tests/bm_mesh_laplacian_smoothing.py | theycallmepeter/pytorch3d_PBR | bc83c23969ff7843fc05d2da001952b368926174 | [
"BSD-3-Clause"
] | null | null | null | tests/bm_mesh_laplacian_smoothing.py | theycallmepeter/pytorch3d_PBR | bc83c23969ff7843fc05d2da001952b368926174 | [
"BSD-3-Clause"
] | null | null | null | tests/bm_mesh_laplacian_smoothing.py | theycallmepeter/pytorch3d_PBR | bc83c23969ff7843fc05d2da001952b368926174 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_mesh_laplacian_smoothing import TestLaplacianSmoothing
def bm_mesh_laplacian_smoothing() -> None:
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
kwargs_list = []
num_meshes = [2, 10, 32]
num_verts = [100, 1000]
num_faces = [300, 3000]
test_cases = product(num_meshes, num_verts, num_faces, devices)
for case in test_cases:
n, v, f, d = case
kwargs_list.append(
{"num_meshes": n, "num_verts": v, "num_faces": f, "device": d}
)
benchmark(
TestLaplacianSmoothing.laplacian_smoothing_with_init,
"MESH_LAPLACIAN_SMOOTHING",
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_mesh_laplacian_smoothing()
| 26.95122 | 75 | 0.654299 |
41cf8379f2f0f3d221d0c6d96636f694e3f7fb9c | 23,645 | py | Python | gui/uis/windows/main_window/setup_main_window.py | sikros/FileDownloadManager | 93b66604f6e78c8cd2c62d37e8ace68233232fe1 | [
"Apache-2.0"
] | null | null | null | gui/uis/windows/main_window/setup_main_window.py | sikros/FileDownloadManager | 93b66604f6e78c8cd2c62d37e8ace68233232fe1 | [
"Apache-2.0"
] | null | null | null | gui/uis/windows/main_window/setup_main_window.py | sikros/FileDownloadManager | 93b66604f6e78c8cd2c62d37e8ace68233232fe1 | [
"Apache-2.0"
] | null | null | null | # IMPORT PACKAGES AND MODULES
# ///////////////////////////////////////////////////////////////
from gui.widgets.py_table_widget.py_table_widget import PyTableWidget
from . functions_main_window import *
import sys
import os
import winreg
# IMPORT QT CORE
# ///////////////////////////////////////////////////////////////
from qt_core import *
# IMPORT SETTINGS
# ///////////////////////////////////////////////////////////////
from gui.core.json_settings import Settings
# IMPORT THEME COLORS
# ///////////////////////////////////////////////////////////////
from gui.core.json_themes import Themes
# IMPORT PY ONE DARK WIDGETS
# ///////////////////////////////////////////////////////////////
from gui.widgets import *
# LOAD UI MAIN
# ///////////////////////////////////////////////////////////////
from . ui_main import *
# MAIN FUNCTIONS
# ///////////////////////////////////////////////////////////////
from . functions_main_window import *
def getDocPath():
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
return winreg.QueryValueEx(key, "{374DE290-123F-4565-9164-39C4925E467B}")[0]
# PY WINDOW
# ///////////////////////////////////////////////////////////////
class SetupMainWindow:
def __init__(self):
super().__init__()
# SETUP MAIN WINDOw
# Load widgets from "gui\uis\main_window\ui_main.py"
# ///////////////////////////////////////////////////////////////
self.ui = UI_MainWindow()
self.ui.setup_ui(self)
# ADD LEFT MENUS
# ///////////////////////////////////////////////////////////////
add_left_menus = [
{
"btn_icon" : "icon_home.svg",
"btn_id" : "btn_home",
"btn_text" : "主菜单",
"btn_tooltip" : "主菜单",
"show_top" : True,
"is_active" : True
},
{
"btn_icon" : "icon_widgets.svg",
"btn_id" : "btn_widgets",
"btn_text" : "反馈",
"btn_tooltip" : "反馈",
"show_top" : True,
"is_active" : False
},
{
"btn_icon" : "icon_add_user.svg",
"btn_id" : "btn_add_user",
"btn_text" : "Add Users",
"btn_tooltip" : "Add users",
"show_top" : True,
"is_active" : False
},
{
"btn_icon" : "icon_file.svg",
"btn_id" : "btn_new_file",
"btn_text" : "New File",
"btn_tooltip" : "Create new file",
"show_top" : True,
"is_active" : False
},
{
"btn_icon" : "icon_folder_open.svg",
"btn_id" : "btn_open_file",
"btn_text" : "Open File",
"btn_tooltip" : "Open file",
"show_top" : True,
"is_active" : False
},
{
"btn_icon" : "icon_save.svg",
"btn_id" : "btn_save",
"btn_text" : "Save File",
"btn_tooltip" : "Save file",
"show_top" : True,
"is_active" : False
},
{
"btn_icon" : "icon_info.svg",
"btn_id" : "btn_info",
"btn_text" : "Information",
"btn_tooltip" : "Open informations",
"show_top" : False,
"is_active" : False
},
{
"btn_icon" : "icon_settings.svg",
"btn_id" : "btn_settings",
"btn_text" : "Settings",
"btn_tooltip" : "Open settings",
"show_top" : False,
"is_active" : False
}
]
# ADD TITLE BAR MENUS
# ///////////////////////////////////////////////////////////////
add_title_bar_menus = [
{
"btn_icon" : "icon_info.svg",
"btn_id" : "btn_top_settings",
"btn_tooltip" : "关于About",
"is_active" : False
}
]
# SETUP CUSTOM BTNs OF CUSTOM WIDGETS
# Get sender() function when btn is clicked
# ///////////////////////////////////////////////////////////////
def setup_btns(self):
if self.ui.title_bar.sender() != None:
return self.ui.title_bar.sender()
elif self.ui.left_menu.sender() != None:
return self.ui.left_menu.sender()
elif self.ui.left_column.sender() != None:
return self.ui.left_column.sender()
# SETUP MAIN WINDOW WITH CUSTOM PARAMETERS
# ///////////////////////////////////////////////////////////////
def setup_gui(self):
# APP TITLE
# ///////////////////////////////////////////////////////////////
self.setWindowTitle(self.settings["app_name"])
# REMOVE TITLE BAR
# ///////////////////////////////////////////////////////////////
if self.settings["custom_title_bar"]:
self.setWindowFlag(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
# ADD GRIPS
# ///////////////////////////////////////////////////////////////
if self.settings["custom_title_bar"]:
self.left_grip = PyGrips(self, "left", self.hide_grips)
self.right_grip = PyGrips(self, "right", self.hide_grips)
self.top_grip = PyGrips(self, "top", self.hide_grips)
self.bottom_grip = PyGrips(self, "bottom", self.hide_grips)
self.top_left_grip = PyGrips(self, "top_left", self.hide_grips)
self.top_right_grip = PyGrips(self, "top_right", self.hide_grips)
self.bottom_left_grip = PyGrips(self, "bottom_left", self.hide_grips)
self.bottom_right_grip = PyGrips(self, "bottom_right", self.hide_grips)
# LEFT MENUS / GET SIGNALS WHEN LEFT MENU BTN IS CLICKED / RELEASED
# ///////////////////////////////////////////////////////////////
# ADD MENUS
self.ui.left_menu.add_menus(SetupMainWindow.add_left_menus)
# SET SIGNALS
self.ui.left_menu.clicked.connect(self.btn_clicked)
self.ui.left_menu.released.connect(self.btn_released)
# TITLE BAR / ADD EXTRA BUTTONS
# ///////////////////////////////////////////////////////////////
# ADD MENUS
self.ui.title_bar.add_menus(SetupMainWindow.add_title_bar_menus)
# SET SIGNALS
self.ui.title_bar.clicked.connect(self.btn_clicked)
self.ui.title_bar.released.connect(self.btn_released)
# ADD Title
if self.settings["custom_title_bar"]:
self.ui.title_bar.set_title(self.settings["app_name"])
else:
self.ui.title_bar.set_title("Welcome")
# LEFT COLUMN SET SIGNALS
# ///////////////////////////////////////////////////////////////
self.ui.left_column.clicked.connect(self.btn_clicked)
self.ui.left_column.released.connect(self.btn_released)
# SET INITIAL PAGE / SET LEFT AND RIGHT COLUMN MENUS
# ///////////////////////////////////////////////////////////////
MainFunctions.set_page(self, self.ui.load_pages.page_1)
MainFunctions.set_left_column_menu(
self,
menu = self.ui.left_column.menus.menu_1,
title = "Settings Left Column",
icon_path = Functions.set_svg_icon("icon_settings.svg")
)
MainFunctions.set_right_column_menu(self, self.ui.right_column.menu_1)
# ///////////////////////////////////////////////////////////////
# EXAMPLE CUSTOM WIDGETS
# Here are added the custom widgets to pages and columns that
# were created using Qt Designer.
# This is just an example and should be deleted when creating
# your application.
#
# OBJECTS FOR LOAD PAGES, LEFT AND RIGHT COLUMNS
# You can access objects inside Qt Designer projects using
# the objects below:
#
# <OBJECTS>
# LEFT COLUMN: self.ui.left_column.menus
# RIGHT COLUMN: self.ui.right_column
# LOAD PAGES: self.ui.load_pages
# </OBJECTS>
# ///////////////////////////////////////////////////////////////
# LOAD SETTINGS
# ///////////////////////////////////////////////////////////////
settings = Settings()
self.settings = settings.items
# LOAD THEME COLOR
# ///////////////////////////////////////////////////////////////
themes = Themes()
self.themes = themes.items
# LEFT COLUMN
# ///////////////////////////////////////////////////////////////
# BTN 1
self.left_btn_1 = PyPushButton(
text="Btn 1",
radius=8,
color=self.themes["app_color"]["text_foreground"],
bg_color=self.themes["app_color"]["dark_one"],
bg_color_hover=self.themes["app_color"]["dark_three"],
bg_color_pressed=self.themes["app_color"]["dark_four"]
)
self.left_btn_1.setMaximumHeight(40)
self.ui.left_column.menus.btn_1_layout.addWidget(self.left_btn_1)
# BTN 2
self.left_btn_2 = PyPushButton(
text="Btn With Icon",
radius=8,
color=self.themes["app_color"]["text_foreground"],
bg_color=self.themes["app_color"]["dark_one"],
bg_color_hover=self.themes["app_color"]["dark_three"],
bg_color_pressed=self.themes["app_color"]["dark_four"]
)
self.icon = QIcon(Functions.set_svg_icon("icon_settings.svg"))
self.left_btn_2.setIcon(self.icon)
self.left_btn_2.setMaximumHeight(40)
self.ui.left_column.menus.btn_2_layout.addWidget(self.left_btn_2)
# BTN 3 - Default QPushButton
self.left_btn_3 = QPushButton("Default QPushButton")
self.left_btn_3.setMaximumHeight(40)
self.ui.left_column.menus.btn_3_layout.addWidget(self.left_btn_3)
# PAGES
# ///////////////////////////////////////////////////////////////
# PAGE 1 - ADD LOGO TO MAIN PAGE
self.logo_svg = QSvgWidget(Functions.set_svg_image("logo_home.svg"))
self.ui.load_pages.logo_layout.addWidget(self.logo_svg, Qt.AlignCenter, Qt.AlignCenter)
# PAGE 2
# CIRCULAR PROGRESS 1
self.circular_progress_1 = PyCircularProgress(
value = 0,
progress_color = self.themes["app_color"]["context_color"],
text_color = self.themes["app_color"]["text_title"],
font_size = 14,
bg_color = self.themes["app_color"]["dark_four"]
)
self.circular_progress_1.setFixedSize(160,160)
# CIRCULAR PROGRESS 2
self.circular_progress_2 = PyCircularProgress(
value = 0,
progress_width = 4,
progress_color = self.themes["app_color"]["context_color"],
text_color = self.themes["app_color"]["context_color"],
font_size = 14,
bg_color = self.themes["app_color"]["bg_three"]
)
self.circular_progress_2.setFixedSize(160,160)
# CIRCULAR PROGRESS 3
self.circular_progress_3 = PyCircularProgress(
value = 0,
progress_width = 2,
progress_color = self.themes["app_color"]["pink"],
text_color = self.themes["app_color"]["white"],
font_size = 14,
bg_color = self.themes["app_color"]["bg_three"],
suffix = 'MB/s'
)
self.circular_progress_3.setFixedSize(160,160)
# PY LINE EDIT
self.line_edit = PyLineEdit(
text = "",
place_holder_text = "请输入要采集的URL",
radius = 8,
border_size = 2,
color = self.themes["app_color"]["text_foreground"],
selection_color = self.themes["app_color"]["white"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_active = self.themes["app_color"]["dark_three"],
context_color = self.themes["app_color"]["context_color"]
)
self.line_edit.setMinimumHeight(30)
# PY LINE EDIT
self.line_edit2 = PyLineEdit(
text = "",
place_holder_text = "请输入要采集的文件类型,以.分割",
radius = 8,
border_size = 2,
color = self.themes["app_color"]["text_foreground"],
selection_color = self.themes["app_color"]["white"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_active = self.themes["app_color"]["dark_three"],
context_color = self.themes["app_color"]["context_color"]
)
self.line_edit2.setMinimumHeight(30)
self.line_edit3 = PyLineEdit(
text = getDocPath(),
place_holder_text = "输入要下载到的目录",
radius = 8,
border_size = 2,
color = self.themes["app_color"]["text_foreground"],
selection_color = self.themes["app_color"]["white"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_active = self.themes["app_color"]["dark_three"],
context_color = self.themes["app_color"]["context_color"]
)
self.line_edit3.setMinimumHeight(30)
# ICON BUTTON 1
self.icon_button_1 = PyIconButton(
icon_path = Functions.set_svg_icon("icon_folder.svg"),
parent = self,
app_parent = self.ui.central_widget,
tooltip_text = "选择文件夹",
width = 40,
height = 40,
radius = 20,
dark_one = self.themes["app_color"]["dark_one"],
icon_color = self.themes["app_color"]["icon_color"],
icon_color_hover = self.themes["app_color"]["icon_hover"],
icon_color_pressed = self.themes["app_color"]["icon_active"],
icon_color_active = self.themes["app_color"]["icon_active"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_hover = self.themes["app_color"]["dark_three"],
bg_color_pressed = self.themes["app_color"]["pink"]
)
self.icon_button_1.clicked.connect(self.selectDirectory)
# ICON BUTTON 2
self.icon_button_2 = PyIconButton(
icon_path = Functions.set_svg_icon("icon_folder_open.svg"),
parent = self,
app_parent = self.ui.central_widget,
tooltip_text = "查看文件",
width = 40,
height = 40,
radius = 8,
dark_one = self.themes["app_color"]["dark_one"],
icon_color = self.themes["app_color"]["icon_color"],
icon_color_hover = self.themes["app_color"]["icon_hover"],
icon_color_pressed = self.themes["app_color"]["white"],
icon_color_active = self.themes["app_color"]["icon_active"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_hover = self.themes["app_color"]["dark_three"],
bg_color_pressed = self.themes["app_color"]["green"],
)
self.icon_button_2.clicked.connect(self.openDirectory)
# ICON BUTTON 3
self.icon_button_3 = PyIconButton(
icon_path = Functions.set_svg_icon("icon_send.svg"),
parent = self,
app_parent = self.ui.central_widget,
tooltip_text = "开始下载",
width = 40,
height = 40,
radius = 8,
dark_one = self.themes["app_color"]["dark_one"],
icon_color = self.themes["app_color"]["icon_color"],
icon_color_hover = self.themes["app_color"]["icon_hover"],
icon_color_pressed = self.themes["app_color"]["white"],
icon_color_active = self.themes["app_color"]["icon_active"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_hover = self.themes["app_color"]["dark_three"],
bg_color_pressed = self.themes["app_color"]["context_color"],
is_active = True
)
# PUSH BUTTON 1
self.push_button_1 = PyPushButton(
text = "清空记录",
radius =8,
color = self.themes["app_color"]["text_foreground"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_hover = self.themes["app_color"]["dark_three"],
bg_color_pressed = self.themes["app_color"]["dark_four"]
)
self.push_button_1.setMinimumHeight(40)
self.push_button_1.clicked.connect(self.clear)
# PUSH BUTTON 2
self.push_button_2 = PyPushButton(
text = "开始下载",
radius = 8,
color = self.themes["app_color"]["text_active"],
bg_color = self.themes["app_color"]["dark_one"],
bg_color_hover = self.themes["app_color"]["dark_three"],
bg_color_pressed = self.themes["app_color"]["dark_four"],
)
self.icon_2 = QIcon(Functions.set_svg_icon("icon_send.svg"))
self.push_button_2.setMinimumHeight(40)
self.push_button_2.setIcon(self.icon_2)
self.push_button_2.clicked.connect(self.start_download)
# TOGGLE BUTTON
self.toggle_button = PyToggle(
width = 50,
bg_color = self.themes["app_color"]["dark_two"],
circle_color = self.themes["app_color"]["icon_color"],
active_color = self.themes["app_color"]["context_color"]
)
# TABLE WIDGETS
self.table_widget = PyTableWidget(
radius = 8,
color = self.themes["app_color"]["text_foreground"],
selection_color = self.themes["app_color"]["context_color"],
bg_color = self.themes["app_color"]["bg_two"],
header_horizontal_color = self.themes["app_color"]["dark_two"],
header_vertical_color = self.themes["app_color"]["bg_three"],
bottom_line_color = self.themes["app_color"]["bg_three"],
grid_line_color = self.themes["app_color"]["bg_one"],
scroll_bar_bg_color = self.themes["app_color"]["bg_one"],
scroll_bar_btn_color = self.themes["app_color"]["dark_four"],
context_color = self.themes["app_color"]["context_color"]
)
self.table_widget.setColumnCount(4)
self.table_widget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.table_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.table_widget.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table_widget.itemChanged.connect(self.downfile)
# Columns / Header
self.column_1 = QTableWidgetItem()
self.column_1.setTextAlignment(Qt.AlignCenter)
self.column_1.setText("文件")
self.column_2 = QTableWidgetItem()
self.column_2.setTextAlignment(Qt.AlignCenter)
self.column_2.setText("URL")
self.column_3 = QTableWidgetItem()
self.column_3.setTextAlignment(Qt.AlignCenter)
self.column_3.setText("路径")
self.column_4 = QTableWidgetItem()
self.column_4.setTextAlignment(Qt.AlignCenter)
self.column_4.setText("状态")
# Set column
self.table_widget.setHorizontalHeaderItem(0, self.column_1)
self.table_widget.setHorizontalHeaderItem(1, self.column_2)
self.table_widget.setHorizontalHeaderItem(2, self.column_3)
self.table_widget.setHorizontalHeaderItem(3, self.column_4)
'''
for x in range(3):
row_number = self.table_widget.rowCount()
self.table_widget.insertRow(row_number) # Insert row
self.table_widget.setItem(row_number, 0, QTableWidgetItem(str("Wanderson"))) # Add name
self.table_widget.setItem(row_number, 1, QTableWidgetItem(str("vfx_on_fire_" + str(x)))) # Add nick
self.pass_text = QTableWidgetItem()
self.pass_text.setTextAlignment(Qt.AlignCenter)
self.pass_text.setText("12345" + str(x))
self.table_widget.setItem(row_number, 2, self.pass_text) # Add pass
self.table_widget.setRowHeight(row_number, 22)
'''
# ADD WIDGETS
self.ui.load_pages.row_1_layout.addWidget(self.line_edit)
self.ui.load_pages.row_1_layout.addWidget(self.line_edit2)
self.ui.load_pages.row_2_layout.addWidget(self.line_edit3)
self.ui.load_pages.row_2_layout.addWidget(self.icon_button_1)
self.ui.load_pages.row_2_layout.addWidget(self.icon_button_2)
self.ui.load_pages.row_3_layout.addWidget(self.push_button_1)
self.ui.load_pages.row_3_layout.addWidget(self.push_button_2)
#self.ui.load_pages.row_3_layout.addWidget(self.toggle_button)
self.ui.load_pages.row_4_layout.addWidget(self.circular_progress_1)
self.ui.load_pages.row_4_layout.addWidget(self.circular_progress_2)
self.ui.load_pages.row_4_layout.addWidget(self.circular_progress_3)
self.ui.load_pages.row_5_layout.addWidget(self.table_widget)
# RIGHT COLUMN
# ///////////////////////////////////////////////////////////////
# BTN 1
self.right_btn_1 = PyPushButton(
text="Show Menu 2",
radius=8,
color=self.themes["app_color"]["text_foreground"],
bg_color=self.themes["app_color"]["dark_one"],
bg_color_hover=self.themes["app_color"]["dark_three"],
bg_color_pressed=self.themes["app_color"]["dark_four"]
)
self.icon_right = QIcon(Functions.set_svg_icon("icon_arrow_right.svg"))
self.right_btn_1.setIcon(self.icon_right)
self.right_btn_1.setMaximumHeight(40)
self.right_btn_1.clicked.connect(lambda: MainFunctions.set_right_column_menu(
self,
self.ui.right_column.menu_2
))
#self.ui.right_column.btn_1_layout.addWidget(self.right_btn_1)
# BTN 2
self.right_btn_2 = PyPushButton(
text="Show Menu 1",
radius=8,
color=self.themes["app_color"]["text_foreground"],
bg_color=self.themes["app_color"]["dark_one"],
bg_color_hover=self.themes["app_color"]["dark_three"],
bg_color_pressed=self.themes["app_color"]["dark_four"]
)
self.icon_left = QIcon(Functions.set_svg_icon("icon_arrow_left.svg"))
self.right_btn_2.setIcon(self.icon_left)
self.right_btn_2.setMaximumHeight(40)
self.right_btn_2.clicked.connect(lambda: MainFunctions.set_right_column_menu(
self,
self.ui.right_column.menu_1
))
#self.ui.right_column.btn_2_layout.addWidget(self.right_btn_2)
# ///////////////////////////////////////////////////////////////
# END - EXAMPLE CUSTOM WIDGETS
# ///////////////////////////////////////////////////////////////
# RESIZE GRIPS AND CHANGE POSITION
# Resize or change position when window is resized
# ///////////////////////////////////////////////////////////////
def resize_grips(self):
if self.settings["custom_title_bar"]:
self.left_grip.setGeometry(5, 10, 10, self.height())
self.right_grip.setGeometry(self.width() - 15, 10, 10, self.height())
self.top_grip.setGeometry(5, 5, self.width() - 10, 10)
self.bottom_grip.setGeometry(5, self.height() - 15, self.width() - 10, 10)
self.top_right_grip.setGeometry(self.width() - 20, 5, 15, 15)
self.bottom_left_grip.setGeometry(5, self.height() - 20, 15, 15)
self.bottom_right_grip.setGeometry(self.width() - 20, self.height() - 20, 15, 15) | 40.979203 | 118 | 0.5476 |
cf60ffef1a70f4de48fa2f1d8619f6a14917c2e3 | 316 | py | Python | doc/examples/arrays/true_model.py | act-elegancy/consumet | 291eb6ad1cfbb1ca4f04ed3bc7a97c05783183b0 | [
"MIT"
] | 4 | 2019-10-04T14:55:31.000Z | 2022-03-20T07:22:13.000Z | doc/examples/arrays/true_model.py | act-elegancy/consumet | 291eb6ad1cfbb1ca4f04ed3bc7a97c05783183b0 | [
"MIT"
] | null | null | null | doc/examples/arrays/true_model.py | act-elegancy/consumet | 291eb6ad1cfbb1ca4f04ed3bc7a97c05783183b0 | [
"MIT"
] | 2 | 2019-10-04T15:01:12.000Z | 2021-08-08T16:45:55.000Z | import numpy as np
def simulate(x):
'''
This function defines a function z = simulate(x), where x is an 2D variable and
z is a 2D variable. This file combines the `rosenbrock` and `ripples` examples.
'''
z = []
z.append( (1-x[0])**2+100*(x[1]-x[0]**2)**2 )
z.append( np.sin(np.sqrt(np.dot(x,x))) )
return z
| 26.333333 | 80 | 0.639241 |
b8487ef0f0283bbe80054997ba21dcbc07f87877 | 25,063 | py | Python | scripts/generate_emission_spectra.py | hmvege/ElementSound | 976cd8a4c0a77b31fd5c1b1f230c8866c4fc0a89 | [
"MIT"
] | null | null | null | scripts/generate_emission_spectra.py | hmvege/ElementSound | 976cd8a4c0a77b31fd5c1b1f230c8866c4fc0a89 | [
"MIT"
] | 5 | 2021-03-02T22:24:57.000Z | 2021-03-13T14:55:14.000Z | scripts/generate_emission_spectra.py | hmvege/Elemental | 976cd8a4c0a77b31fd5c1b1f230c8866c4fc0a89 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import List, Tuple
import click
import numba as nb
import numpy as np
import matplotlib.pyplot as plt
# from matplotlib import rcParams
from scipy.interpolate import interp1d
from tqdm import tqdm
from elemental.elemental import Elemental
from elemental.utils import get_element, check_folder
from get_viable_elements import retrieve_viable_elements
# rcParams['text.usetex'] = True
# rcParams['font.family'] = "serif"
CIE = np.array(
[
[0.0014, 0.0000, 0.0065],
[0.0022, 0.0001, 0.0105],
[0.0042, 0.0001, 0.0201],
[0.0076, 0.0002, 0.0362],
[0.0143, 0.0004, 0.0679],
[0.0232, 0.0006, 0.1102],
[0.0435, 0.0012, 0.2074],
[0.0776, 0.0022, 0.3713],
[0.1344, 0.0040, 0.6456],
[0.2148, 0.0073, 1.0391],
[0.2839, 0.0116, 1.3856],
[0.3285, 0.0168, 1.6230],
[0.3483, 0.0230, 1.7471],
[0.3481, 0.0298, 1.7826],
[0.3362, 0.0380, 1.7721],
[0.3187, 0.0480, 1.7441],
[0.2908, 0.0600, 1.6692],
[0.2511, 0.0739, 1.5281],
[0.1954, 0.0910, 1.2876],
[0.1421, 0.1126, 1.0419],
[0.0956, 0.1390, 0.8130],
[0.0580, 0.1693, 0.6162],
[0.0320, 0.2080, 0.4652],
[0.0147, 0.2586, 0.3533],
[0.0049, 0.3230, 0.2720],
[0.0024, 0.4073, 0.2123],
[0.0093, 0.5030, 0.1582],
[0.0291, 0.6082, 0.1117],
[0.0633, 0.7100, 0.0782],
[0.1096, 0.7932, 0.0573],
[0.1655, 0.8620, 0.0422],
[0.2257, 0.9149, 0.0298],
[0.2904, 0.9540, 0.0203],
[0.3597, 0.9803, 0.0134],
[0.4334, 0.9950, 0.0087],
[0.5121, 1.0000, 0.0057],
[0.5945, 0.9950, 0.0039],
[0.6784, 0.9786, 0.0027],
[0.7621, 0.9520, 0.0021],
[0.8425, 0.9154, 0.0018],
[0.9163, 0.8700, 0.0017],
[0.9786, 0.8163, 0.0014],
[1.0263, 0.7570, 0.0011],
[1.0567, 0.6949, 0.0010],
[1.0622, 0.6310, 0.0008],
[1.0456, 0.5668, 0.0006],
[1.0026, 0.5030, 0.0003],
[0.9384, 0.4412, 0.0002],
[0.8544, 0.3810, 0.0002],
[0.7514, 0.3210, 0.0001],
[0.6424, 0.2650, 0.0000],
[0.5419, 0.2170, 0.0000],
[0.4479, 0.1750, 0.0000],
[0.3608, 0.1382, 0.0000],
[0.2835, 0.1070, 0.0000],
[0.2187, 0.0816, 0.0000],
[0.1649, 0.0610, 0.0000],
[0.1212, 0.0446, 0.0000],
[0.0874, 0.0320, 0.0000],
[0.0636, 0.0232, 0.0000],
[0.0468, 0.0170, 0.0000],
[0.0329, 0.0119, 0.0000],
[0.0227, 0.0082, 0.0000],
[0.0158, 0.0057, 0.0000],
[0.0114, 0.0041, 0.0000],
[0.0081, 0.0029, 0.0000],
[0.0058, 0.0021, 0.0000],
[0.0041, 0.0015, 0.0000],
[0.0029, 0.0010, 0.0000],
[0.0020, 0.0007, 0.0000],
[0.0014, 0.0005, 0.0000],
[0.0010, 0.0004, 0.0000],
[0.0007, 0.0002, 0.0000],
[0.0005, 0.0002, 0.0000],
[0.0003, 0.0001, 0.0000],
[0.0002, 0.0001, 0.0000],
[0.0002, 0.0001, 0.0000],
[0.0001, 0.0000, 0.0000],
[0.0001, 0.0000, 0.0000],
[0.0001, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
]
)
SPECTRA_GENERATION_OPTIONS = [
"NTSC",
"EBU",
"SMPTE",
"HDTV",
"CIE",
"Rec709",
"AdobeRBG1998",
"SRGB",
"empirical",
]
# Plotting parameters
OUTPUT_DPI = 300
OUTPUT_SIZE = (6.6, 3.6)
class ColorSystem:
"""Class for the color systems."""
def __init__(
self,
system_name: str,
x_red: float,
y_red: float,
x_green: float,
y_green: float,
x_blue: float,
y_blue: float,
x_white: float,
y_white: float,
gamma: float,
):
# Name of the color system
self.name = system_name
z_red = 1 - (x_red + y_red)
z_green = 1 - (x_green + y_green)
z_blue = 1 - (x_blue + y_blue)
# Dimensions x Color. rgb -> xyz
self.M = np.array(
[
[x_red, y_red, z_red],
[x_green, y_green, z_green],
[x_blue, y_blue, z_blue],
],
dtype=float,
).T
# Finds inverse, xyz -> rgb
self.M_inv = np.array(
[
[
self.M[1, 1] * self.M[2, 2] - self.M[1, 2] * self.M[2, 1],
self.M[0, 2] * self.M[2, 1] - self.M[0, 1] * self.M[2, 2],
self.M[0, 1] * self.M[1, 2] - self.M[0, 2] * self.M[1, 1],
],
[
self.M[1, 2] * self.M[2, 0] - self.M[1, 0] * self.M[2, 2],
self.M[0, 0] * self.M[2, 2] - self.M[0, 2] * self.M[2, 0],
self.M[0, 2] * self.M[1, 0] - self.M[0, 0] * self.M[1, 2],
],
[
self.M[1, 0] * self.M[2, 1] - self.M[1, 1] * self.M[2, 0],
self.M[0, 1] * self.M[2, 0] - self.M[0, 0] * self.M[2, 1],
self.M[0, 0] * self.M[1, 1] - self.M[0, 1] * self.M[1, 0],
],
],
dtype=float,
)
# White points
z_white = 1 - (x_white + y_white)
self.W = np.array([x_white, y_white, z_white], dtype=float)
rw = (self.M_inv[0, :] @ self.W) / self.W[1]
gw = (self.M_inv[1, :] @ self.W) / self.W[1]
bw = (self.M_inv[2, :] @ self.W) / self.W[1]
self.M_inv[0, :] /= rw
self.M_inv[1, :] /= gw
self.M_inv[2, :] /= bw
# for i in range(3):
# self.M_inv[i] /= self.W[i]
self.gamma = gamma
def __str__(self):
msg = "Color System: %s" % self.name
return msg
def generate_color_systems():
"""Generates the different color systems that using matrix color
mixing."""
# For NTSC television
IlluminantC = (0.3101, 0.3162)
# For EBU and SMPTE
IlluminantD65 = (0.3127, 0.3291)
# CIE equal-energy illuminant
IlluminantE = (0.33333333, 0.33333333)
GAMMA_REC709 = 0.0
# Different color system setups
NTSCsystem = ColorSystem(
"NTSC", 0.67, 0.33, 0.21, 0.71, 0.14, 0.08, *IlluminantC, GAMMA_REC709
)
EBUsystem = ColorSystem(
"EBU (PAL/SECAM)",
0.64,
0.33,
0.29,
0.60,
0.15,
0.06,
*IlluminantD65,
GAMMA_REC709,
)
SMPTEsystem = ColorSystem(
"SMPTE",
0.630,
0.340,
0.310,
0.595,
0.155,
0.070,
*IlluminantD65,
GAMMA_REC709,
)
HDTVsystem = ColorSystem(
"HDTV",
0.670,
0.330,
0.210,
0.710,
0.150,
0.060,
*IlluminantD65,
GAMMA_REC709,
)
CIEsystem = ColorSystem(
"CIE",
0.7355,
0.2645,
0.2658,
0.7243,
0.1669,
0.0085,
*IlluminantE,
GAMMA_REC709,
)
Rec709system = ColorSystem(
"CIE REC 709",
0.64,
0.33,
0.30,
0.60,
0.15,
0.06,
*IlluminantD65,
GAMMA_REC709,
)
AdobeRBG1998System = ColorSystem(
"Adobe RBG 1998",
0.64,
0.33,
0.21,
0.71,
0.15,
0.06,
*IlluminantD65,
GAMMA_REC709,
)
SRGBSystem = ColorSystem(
"sRGB", 0.64, 0.33, 0.3, 0.6, 0.15, 0.06, *IlluminantD65, GAMMA_REC709
)
return {
"NTSC": NTSCsystem,
"EBU": EBUsystem,
"SMPTE": SMPTEsystem,
"HDTV": HDTVsystem,
"CIE": CIEsystem,
"Rec709": Rec709system,
"AdobeRBG1998": AdobeRBG1998System,
"SRGB": SRGBSystem,
}
COLOR_SYSTEMS = generate_color_systems()
def planc_rad_law(lmbda: float, T: float = 5000) -> float:
"""Planc's Radiation Law.
Returns the intensity for a wavelength as given by Planc's Radiation Law,
P(lmbda) dLmbda = c1 lmbda^-1 / (exp(c2 / (T * lmbda)) - 1) dLmbda
Arguments:
lmbda {float} -- wavelength in nm.
Keyword Arguments:
T {float} -- temperature of spectrum (default: {5000})
Returns:
float or np.ndarray -- the intensity P(lmbda).
"""
# assert np.all(lmbda >= 380.0) and np.all(lmbda <= 780), (
# "Wavelength outside visible spectrum: %f" % lmbda)
# Converts from nm to m
lmbd = lmbda * 1e-9
c1 = 3.74183e-16 # W m^2, 2pi*h*c^2
c2 = 1.4388e-2 # m^2 K, h*c / k
return c1 / (lmbd ** 5 * (np.exp(c2 / (T * lmbd)) - 1.0))
def _CIE_color_matching(
T: float = 5000,
wavelengths: np.ndarray = np.arange(380, 780.1, 5.0),
_CIE: np.ndarray = CIE,
):
"""Converts wavelengths to xyz."""
# X, Y, Z = 0, 0, 0
# Return array setup
xyz = np.empty((wavelengths.shape[0], 3), dtype=float)
for i, lmbda in enumerate(wavelengths):
xyz[i] = planc_rad_law(lmbda, T=T)
xyz[i] *= _CIE[i]
# X += Me * cie[i, 0]
# Y += Me * cie[i, 1]
# Z += Me * cie[i, 2]
# XYZ = (X + Y + Z);
# return np.array([X / XYZ, Y / XYZ, Z / XYZ])
return xyz / np.sum(xyz)
def spectrum_to_xyz(T=5000):
"""Converts wavelengths to xyz."""
xyz = _CIE_color_matching(T=T)
X = xyz[:, 0].sum()
Y = xyz[:, 1].sum()
Z = xyz[:, 2].sum()
# XYZ = X + Y + Z
# return np.array([X / XYZ, Y / XYZ, Z / XYZ])
return np.array([X, Y, Z])
def xyz_to_rgb(cs: ColorSystem, xyz: np.ndarray):
"""Converts the xyz representation to RGB."""
if len(xyz.shape) == 2:
rgb = np.empty(xyz.shape, dtype=float)
for i in range(xyz.shape[0]):
rgb[i] = cs.M_inv @ xyz[i]
return rgb
else:
return cs.M_inv @ xyz
def norm_rgb(r, g, b):
"""Normalizes the RGB representation."""
greatest = max(r, max(g, b))
if greatest > 0:
r /= greatest
g /= greatest
b /= greatest
return np.array([r, g, b])
def constrain_rgb(r, g, b):
"""
Constrains the RGB representation, in order to remove negative values.
"""
w = -min(0, r, g, b)
if w > 0:
r += w
g += w
b += w
return True, np.array([r, g, b])
return False, np.array([r, g, b])
def _create_expanded_spectrum_input(N_new):
"""Expands the color mixing spectrum.
Uses the CIE mixing value and interpolates them to a new size defined
by the input.
Arguments:
N_new {int} -- new CIE mixing size.
Returns:
np.ndarray, np.ndarray, np.ndarray -- x array, x new array, expanded
CIE 3xN_new array.
"""
x = np.linspace(0, 1, CIE.shape[0])
x_dense = np.linspace(0, 1, N_new)
CIE_expanded = np.vstack(
[
interp1d(x, CIE[:, 0], kind="cubic")(x_dense),
interp1d(x, CIE[:, 1], kind="cubic")(x_dense),
interp1d(x, CIE[:, 2], kind="cubic")(x_dense),
]
).T
return x, x_dense, CIE_expanded
@nb.njit(cache=True)
def wavelength_to_rgb(
wavelength: float, gamma: float = 0.8
) -> Tuple[float, float, float]:
"""This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
Retrieved from
http://www.noah.org/wiki/Wavelength_to_RGB_in_Python
"""
if wavelength >= 380 and wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
elif wavelength >= 440 and wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif wavelength >= 490 and wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif wavelength >= 510 and wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif wavelength >= 580 and wavelength <= 645:
R = 1.0
G = (-(wavelength - 645) / (645 - 580)) ** gamma
B = 0.0
elif wavelength >= 645 and wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1.0 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
return R, G, B
@nb.njit(cache=True)
def logistic(x: np.ndarray) -> np.ndarray:
"""Simple logistic function."""
return 1 / (1 + np.exp(-x))
def create_emission_image(
spectra_rgb_image: np.ndarray,
x_ticks: np.ndarray,
wl_labels: List[str],
element_dict: dict,
element_watermark: bool,
output_folder: str,
):
"""Create the emission spectra image."""
element_name, element_ids, element_short = element_dict.values()
fig1 = plt.figure(figsize=OUTPUT_SIZE, dpi=OUTPUT_DPI)
ax1 = fig1.add_axes([0, 0, 1, 1])
fig1.frameon = False
ax1.set_frame_on(False)
ax1.imshow(spectra_rgb_image)
# Fixes the wavelength labels slightly inside the plot
ax1.set_xticks(x_ticks)
ax1.set_xticklabels(wl_labels, fontsize=5, color="white")
ax1.tick_params(axis="x", direction="in", pad=-15)
ax1.set_xticks(ax1.get_xticks()[1:-1])
if element_watermark:
ax1.text(
0.01,
0.95,
r"%s, $_{%d}$%s" % (element_name, element_ids, element_short),
verticalalignment="center",
horizontalalignment="left",
transform=ax1.transAxes,
color="white",
fontsize=15,
)
figpath = output_folder / f"{element_name}_{'%03d'%element_ids}.png"
fig1.canvas.print_png(figpath)
tqdm.write(f"Saved {figpath}.")
plt.close(fig1)
def _setup_visible_spectrum(wavelengths, color_system="CIE", temperature=3000):
"""Helper function for seting up a visible rainbow spectrum."""
# Expanding
x, x_dense, CIE_expanded = _create_expanded_spectrum_input(
wavelengths.shape[0]
)
xyz = _CIE_color_matching(
T=temperature, wavelengths=wavelengths, _CIE=CIE_expanded
)
rgb = xyz_to_rgb(COLOR_SYSTEMS[color_system], xyz)
rgb_array = np.empty(rgb.shape, dtype=float)
for i in range(rgb.shape[0]):
is_constrained, _rgb = constrain_rgb(*rgb[i])
if is_constrained:
rgb_array[i] = norm_rgb(*_rgb)
else:
rgb_array[i] = norm_rgb(*rgb[i])
return x, x_dense, CIE_expanded, xyz, rgb, rgb_array
def _expand_rgb(rgb_array: np.ndarray, Ny: int) -> np.ndarray:
"""Expands the RBG so it can be fit into an image/full matrix.
Performs a constant extrapolation in the new axis direction in a new
matrix [np.newaxis, ...].
Arguments:
rgb_array {np.ndarray} -- the image/matrix to expand into full image
along Ny axis.
Ny {int} -- the number of pixels in the y direction.
Returns:
np.ndarray -- the expanded image.
"""
Nx, N_channels = rgb_array.shape
rgb_m = np.empty((Ny, Nx, N_channels), dtype=float)
for i in range(Nx):
for j in range(Ny):
rgb_m[j, i] = rgb_array[i]
return rgb_m
def test_color_system(cs):
"""Creates the color spectrum mixing.
Using a ColorSystem input to create the spectrum mixing of it.
Arguments:
cs {ColorSystem} -- color system mixing.
"""
print("Tests color system: %s" % str(cs))
temperatures = np.asarray(list(range(1000, 10001, 500)))
bb_radiations = np.empty((temperatures.shape[0], 3), dtype=float)
for i, t in enumerate(temperatures):
xyz = spectrum_to_xyz(T=t)
x, y, z = xyz
r, g, b = xyz_to_rgb(cs, xyz)
s = " %5.0f K %.4f %.4f %.4f " % (t, x, y, z)
is_constrained, _rgb = constrain_rgb(r, g, b)
if is_constrained:
r, g, b = norm_rgb(*_rgb)
s += "%.3f %.3f %.3f (Approximation)" % (r, g, b)
else:
r, g, b = norm_rgb(r, g, b)
s += "%.3f %.3f %.3f" % (r, g, b)
print(s)
bb_radiations[i] = np.array([r, g, b])
plt.figure()
plt.plot(temperatures, bb_radiations[:, 0], "--", color="r", label="Red")
plt.plot(temperatures, bb_radiations[:, 1], "--", color="g", label="Green")
plt.plot(temperatures, bb_radiations[:, 2], "--", color="b", label="Blue")
plt.xlabel(r"Temperature $[K]$")
plt.legend()
plt.show()
def test_create_color_mixing():
"""Creates the color mixing spectrum.
Performs a extrapolation of the color mixing spectrum and creates a
figure of it.
"""
N_wls = 1500
# Expanding
x, x_dense, CIE_expanded = _create_expanded_spectrum_input(N_wls)
print("Shape before interpolation:", CIE.shape)
print("Shape after interpolation:", CIE_expanded.shape)
plt.plot(x, CIE[:, 0], color="r")
plt.plot(x_dense, CIE_expanded[:, 0], "--o", color="r")
plt.plot(x, CIE[:, 1], color="b")
plt.plot(x_dense, CIE_expanded[:, 1], "--o", color="b")
plt.plot(x, CIE[:, 2], color="g")
plt.plot(x_dense, CIE_expanded[:, 2], "--o", color="g")
plt.title("CIE color mixing")
plt.show()
def test_create_visible_spectrum():
"""Creates the visible spectrum.
Creates the visible spectrum(rainbow) from 380 nm to 780 nm.
"""
# Parameters
Ny = 100 # number of y pixels
N_wls = 1500
wl_start = 380
wl_stop = 780
wavelengths = np.linspace(wl_start, wl_stop, N_wls)
x, x_dense, CIE_expanded, xyz, rgb, rgb_array = _setup_visible_spectrum(
wavelengths
)
# Mixing plot
fig0, ax0 = plt.subplots(1, 1)
ax0.plot(xyz)
ax0.plot(rgb, "--")
ax0.legend(["x", "y", "z", "r", "b", "g"])
# Expands rgb to full image and smooths
rgb_m = _expand_rgb(rgb_array, Ny)
# X-direction scaling
x = np.linspace(0, 1, rgb_array.shape[0])
smoothing_x = 4 * (-((x - 0.5) ** 2) + 0.25)
for i in range(rgb_array.shape[0]):
rgb_m[:, i, :] *= smoothing_x[i]
# Rainbow spectra plot
wl_labels = ["%.1f" % i for i in wavelengths[::100]]
fig1, ax1 = plt.subplots(1, 1)
ax1.imshow(rgb_m)
ax1.set_xticks(np.linspace(0, rgb_m.shape[1] - 1, len(wl_labels)))
ax1.set_xticklabels(wl_labels)
plt.show()
@click.command()
@click.option(
"-s",
"--spectra",
type=click.Path(exists=True),
default=Path("spectras2"),
show_default=True,
help="The spectra data folder.",
)
@click.option(
"-o",
"--output_folder",
default=Path("generated_emission_spectras"),
show_default=True,
type=click.Path(exists=True),
help="Path to save spectra at.",
)
@click.option(
"--element_watermark",
default=False,
show_default=True,
is_flag=True,
type=bool,
help=(
"Will add element name and symbol in top left corner of output spectra"
" image."
),
)
@click.option(
"--spectra_option",
default="empirical",
type=click.Choice(SPECTRA_GENERATION_OPTIONS),
show_default=True,
help=("Method to use for generating spectra."),
)
def generate_emission_spectra(
spectra="spectras2",
output_folder="generated_emission_spectras",
element_watermark=False,
spectra_option="empirical",
):
"""Generates emission spectra.
Generates emission spectra based on observed emission spectra. Attempts to
moderate the strength of a line by using the intensity.
Arguments:
spectra {str} -- folder path to spectra sound files.
output_folder {str} -- path to save spectra at.
element_watermark {bool} -- if true, will include the element in the
output upper left corner.
spectra_option {str} -- method to use for generating spectra.
"""
output_folder = Path(output_folder)
spectra = Path(spectra)
# Parameters
Ny = 1080
Nx = 1980
wl_start = 380
wl_stop = 750
wavelengths = np.linspace(wl_start, wl_stop, Nx)
if spectra_option == "empirical":
rgb_array = np.zeros((len(wavelengths), 3), dtype=float)
for i in range(len(wavelengths)):
rgb_array[i, :] = wavelength_to_rgb(wavelengths[i], 0.8)
else:
rgb_array = _setup_visible_spectrum(
wavelengths,
color_system=spectra_option,
temperature=3000,
)[-1]
# Sets up image arrays
rgb_m_smoothed = _expand_rgb(rgb_array, Ny)
rgb_m = np.copy(rgb_m_smoothed)
# X-direction scaling
x = np.linspace(0, 1, Nx)
smoothing_x = 4 * (-((x - 0.5) ** 2) + 0.25)
for i in range(Nx):
rgb_m_smoothed[:, i, :] *= smoothing_x[i]
# Y-direction scaling
smoothing_y = np.sin(np.linspace(0, 1, Ny) * np.pi)
for i in range(Ny):
rgb_m_smoothed[i, :, :] *= smoothing_y[i]
# Rainbow spectra plot
wl_labels = ["%.0fnm" % i for i in wavelengths[::100]]
x_ticks = np.linspace(0, Nx - 1, len(wl_labels)).astype(int)
# Sets up interpolator for wavelength to index
wl_to_index = interp1d(wavelengths, np.arange(len(wavelengths)))
check_folder(output_folder)
for element_dict in tqdm(retrieve_viable_elements(spectra)):
# Sets up spectra data path
element_fpath = spectra / (element_dict["short"] + ".dat")
# Returns periodic table notation
element = get_element(element_dict["short"])[-1]
if not element:
tqdm.write(f"Element not found: {element_fpath}.")
Sound = Elemental(
element, local_file=str(element_fpath), verbose=False
)
if not Sound.has_spectra:
tqdm.write(f"No spectra exist for {element}.")
continue
# Sound.remove_beat(1e-2)
spectra_wl = Sound.spectra[:, 0]
spectra_intensity = Sound.spectra[:, 1]
# Filters out non-visible lines
spectra_wl = spectra_wl[wl_start < spectra_wl]
spectra_wl = spectra_wl[spectra_wl < wl_stop]
# Interpolates to get spectra indexes. We use the interpolator to
# retrieve the spectra nearest indexes in floats.
spectra_ids = wl_to_index(spectra_wl)
# Sets up interpolation points
spectra_ids_lower = list(map(int, np.floor(spectra_ids)))
spectra_ids_upper = list(map(int, np.ceil(spectra_ids)))
# Removes lines which is too close to each other to be seen.
ids_to_pop = []
spectra_wl_updated = []
spectra_intensity_updated = []
for i in range(spectra_wl.shape[0]):
if spectra_ids_lower[i] == spectra_ids_lower[i - 1]:
ids_to_pop.append(i)
else:
spectra_wl_updated.append(spectra_wl[i])
spectra_intensity_updated.append(spectra_intensity[i])
# Removes all weights for wl's no longer in use
for i in reversed(ids_to_pop):
del spectra_ids_lower[i]
del spectra_ids_upper[i]
# Updates the spectra wavelengths for the element
spectra_wl = np.array(spectra_wl_updated)
spectra_intensity = np.array(spectra_intensity_updated)
if len(spectra_wl) == 0:
tqdm.write(f"No spectra found for {element}. Saving empty image.")
create_emission_image(
rgb_m_smoothed * 0.25,
x_ticks,
wl_labels,
element_dict,
element_watermark,
output_folder,
)
continue
# Softly normalizes spectra intensity
spectra_intensity = logistic(spectra_intensity)
# Broadcasting the intensity
spectra_intensity_weights = np.empty(
(rgb_m.shape[0], len(spectra_ids_lower), rgb_m.shape[2]),
dtype=float,
)
for i in range(len(spectra_ids_lower)):
spectra_intensity_weights[:, i, :] = spectra_intensity[i]
# Scales the background
spectra_rgb_image = rgb_m_smoothed * 0.25
# Places the spectra RBG in the output image.
spectra_rgb_image[:, spectra_ids_lower, :] = np.maximum(
rgb_m[:, spectra_ids_lower, :] * spectra_intensity_weights,
spectra_rgb_image[:, spectra_ids_lower, :],
)
spectra_rgb_image[:, spectra_ids_upper, :] = np.maximum(
rgb_m[:, spectra_ids_upper, :] * spectra_intensity_weights,
spectra_rgb_image[:, spectra_ids_upper, :],
)
create_emission_image(
spectra_rgb_image,
x_ticks,
wl_labels,
element_dict,
element_watermark,
output_folder,
)
if __name__ == "__main__":
generate_emission_spectra()
# test_color_system(COLOR_SYSTEMS["SMPTE"])
# test_create_visible_spectrum()
# test_create_color_mixing()
| 28.097534 | 79 | 0.555041 |
05b2c854b6fb8475d5e25b46e07a2b415177dc9e | 23,184 | py | Python | utils/resizeNetwork.py | ctorney/deepWildCount | 4db03252740ff236ad19b4b0cf474bc940f24137 | [
"MIT"
] | 2 | 2019-11-06T04:28:44.000Z | 2019-12-18T17:29:34.000Z | utils/resizeNetwork.py | ctorney/deepWildCount | 4db03252740ff236ad19b4b0cf474bc940f24137 | [
"MIT"
] | null | null | null | utils/resizeNetwork.py | ctorney/deepWildCount | 4db03252740ff236ad19b4b0cf474bc940f24137 | [
"MIT"
] | 1 | 2021-01-13T15:11:52.000Z | 2021-01-13T15:11:52.000Z | import argparse
import os
import numpy as np
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
import struct
import cv2
from keras.utils import plot_model
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]=""
argparser = argparse.ArgumentParser(
description='test yolov3 network with coco weights')
argparser.add_argument(
'-i',
'--image',
help='path to image file')
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def make_yolov3_model():
input_image = Input(shape=( 416,416, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
def small_yolov3_model():
input_image = Input(shape=( 416,416, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 6, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 6, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 6, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
def preprocess_input(image, net_h, net_w):
new_h, new_w, _ = image.shape
# determine the new size of the image
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)/new_w
new_w = net_w
else:
new_w = (new_w * net_h)/new_h
new_h = net_h
new_w = int(new_w)
new_h = int(new_h)
# resize the image to the new size
resized = cv2.resize(image[:,:,::-1]/255., (int(new_w), int(new_h)))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[(net_h-new_h)//2:(net_h+new_h)//2, (net_w-new_w)//2:(net_w+new_w)//2, :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def decode_netout(netout, anchors, obj_thresh, nms_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if(objectness <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row,col,b,5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def draw_boxes(image, boxes, labels, obj_thresh):
for box in boxes:
label_str = ''
label = -1
# bx_area = (box.xmin-box.xmax)**2+(box.ymin-box.ymax)**2
# print(bx_area)
#if ((box.xmax-box.xmin)*(box.ymax-box.ymin))>(64*64):
# continue
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
print(label_str)
label = i
print(labels[i] + ': ' + str(box.classes[i]*100) + '%')
if label >= 0:
cv2.rectangle(image, (box.xmin,box.ymin), (box.xmax,box.ymax), (0,255,0), 1)
cv2.putText(image,
label_str + ' ' + str(box.get_score()),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,255,0), 2)
return image
def _main_(args):
image_path = args.image
# set some parameters
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.5, 0.45
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# make the yolov3 model to predict 80 classes on COCO
yolov3 = make_yolov3_model()
yolov3.load_weights('../weights/yolo-v3-coco.h5')
model = small_yolov3_model()
for i in range(106):
#print("loading weights of convolution #" + str(i))
if i in [81, 93, 105]:
layer = model.get_layer('conv_' + str(i))
print(layer.get_config(), layer.get_weights()[0].shape, layer.get_weights()[1].shape)
for i in range(106):
#print("loading weights of convolution #" + str(i))
if i in [81, 93, 105]:
layer = yolov3.get_layer('conv_' + str(i))
print(layer.get_config(), layer.get_weights()[0].shape, layer.get_weights()[1].shape)
# preprocess the image
image = cv2.imread(image_path)
image_h, image_w, _ = image.shape
new_image = preprocess_input(image, net_h, net_w)
# run the prediction
yolos = yolov3.predict(new_image)
boxes = []
#print(yolos.shape)
print(len(yolos))
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# draw bounding boxes on the image using labels
draw_boxes(image, boxes, labels, obj_thresh)
# write the image with bounding boxes to file
cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], (image).astype('uint8'))
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
| 47.508197 | 136 | 0.520057 |
6a11c3f5c9a1f2e6be51729c2754fba5000ee5c8 | 1,300 | py | Python | model.py | googleglass/mirror-catfacts-python | 31d957cf9236128f8f4606668ce168b5e1470030 | [
"Apache-2.0"
] | 4 | 2016-10-13T22:17:52.000Z | 2020-08-08T18:29:23.000Z | model.py | googleglass/mirror-catfacts-python | 31d957cf9236128f8f4606668ce168b5e1470030 | [
"Apache-2.0"
] | null | null | null | model.py | googleglass/mirror-catfacts-python | 31d957cf9236128f8f4606668ce168b5e1470030 | [
"Apache-2.0"
] | 5 | 2015-02-21T09:04:13.000Z | 2020-02-02T00:01:38.000Z | #!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'alainv@google.com (Alain Vongsouvanh)'
from google.appengine.ext import db
from oauth2client.appengine import CredentialsProperty
class UserSettings(db.Model):
"""Datastore entity for storing OAuth2.0 credentials.
The CredentialsProperty is provided by the Google API Python Client, and is
used by the Storage classes to store OAuth 2.0 credentials in the data store.
"""
credentials = CredentialsProperty()
night_mode = db.BooleanProperty(default=False)
frequency = db.IntegerProperty(default=15)
timezone_offset = db.IntegerProperty(default=240)
def to_dict(self):
return {
'nightMode': self.night_mode,
'frequency': self.frequency
}
| 31.707317 | 79 | 0.75 |
3696ece3c3bd4e356ed1e9c02d26a2abe4cc5885 | 7,667 | py | Python | Tests/pens/pointInsidePen_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | Tests/pens/pointInsidePen_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | Tests/pens/pointInsidePen_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.pens.pointInsidePen import PointInsidePen
import unittest
class PointInsidePenTest(unittest.TestCase):
def test_line(self):
def draw_triangles(pen):
pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0))
pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4))
pen.closePath()
self.assertEqual(
" *********"
" ** *"
" ** *"
" * *"
" *",
self.render(draw_triangles, even_odd=True))
self.assertEqual(
" *********"
" *******"
" *****"
" ***"
" *",
self.render(draw_triangles, even_odd=False))
def test_curve(self):
def draw_curves(pen):
pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5))
pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0))
pen.closePath()
self.assertEqual(
"*** ***"
"**** ****"
"*** ***"
"**** ****"
"*** ***",
self.render(draw_curves, even_odd=True))
self.assertEqual(
"*** ***"
"**********"
"**********"
"**********"
"*** ***",
self.render(draw_curves, even_odd=False))
def test_qCurve(self):
def draw_qCurves(pen):
pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5))
pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0))
pen.closePath()
self.assertEqual(
"*** **"
"**** ***"
"*** ***"
"*** ****"
"** ***",
self.render(draw_qCurves, even_odd=True))
self.assertEqual(
"*** **"
"**********"
"**********"
"**********"
"** ***",
self.render(draw_qCurves, even_odd=False))
@staticmethod
def render(draw_function, even_odd):
result = BytesIO()
for y in range(5):
for x in range(10):
pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd)
draw_function(pen)
if pen.getResult():
result.write(b"*")
else:
result.write(b" ")
return tounicode(result.getvalue())
def test_contour_no_solutions(self):
def draw_contour(pen):
pen.moveTo( (969, 230) )
pen.curveTo( (825, 348) , (715, 184) , (614, 202) )
pen.lineTo( (614, 160) )
pen.lineTo( (969, 160) )
pen.closePath()
piPen = PointInsidePen(None, (750, 295)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
self.assertEqual(piPen.getResult(), False)
piPen = PointInsidePen(None, (835, 190)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 1)
self.assertEqual(piPen.getResult(), True)
def test_contour_square_closed(self):
def draw_contour(pen):
pen.moveTo( (100, 100) )
pen.lineTo( (-100, 100) )
pen.lineTo( (-100, -100) )
pen.lineTo( (100, -100) )
pen.closePath()
piPen = PointInsidePen(None, (0, 0)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 1)
self.assertEqual(piPen.getResult(), True)
def test_contour_square_opened(self):
def draw_contour(pen):
pen.moveTo( (100, 100) )
pen.lineTo( (-100, 100) )
pen.lineTo( (-100, -100) )
pen.lineTo( (100, -100) )
# contour not explicitly closed
piPen = PointInsidePen(None, (0, 0)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 1)
self.assertEqual(piPen.getResult(), True)
def test_contour_circle(self):
def draw_contour(pen):
pen.moveTo( (0, 100) )
pen.curveTo( (-55, 100) , (-100, 55) , (-100, 0) )
pen.curveTo( (-100, -55) , (-55, -100) , (0, -100) )
pen.curveTo( (55, -100) , (100, -55) , (100, 0) )
pen.curveTo( (100, 55) , (55, 100) , (0, 100) )
piPen = PointInsidePen(None, (50, 50)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getResult(), True)
piPen = PointInsidePen(None, (50, -50)) # this point is inside
draw_contour(piPen)
self.assertEqual(piPen.getResult(), True)
def test_contour_diamond(self):
def draw_contour(pen):
pen.moveTo( (0, 100) )
pen.lineTo( (100, 0) )
pen.lineTo( (0, -100) )
pen.lineTo( (-100, 0) )
pen.closePath()
piPen = PointInsidePen(None, (-200, 0)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
piPen = PointInsidePen(None, (-200, 100)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
piPen = PointInsidePen(None, (-200, -100)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
piPen = PointInsidePen(None, (-200, 50)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour_integers(self):
def draw_contour(pen):
pen.moveTo( (728, 697) )
pen.lineTo( (504, 699) )
pen.curveTo( (487, 719) , (508, 783) , (556, 783) )
pen.lineTo( (718, 783) )
pen.curveTo( (739, 783) , (749, 712) , (728, 697) )
pen.closePath()
piPen = PointInsidePen(None, (416, 783)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour_decimals(self):
def draw_contour(pen):
pen.moveTo( (727.546875, 697.0) )
pen.lineTo( (504.375, 698.515625) )
pen.curveTo( (487.328125, 719.359375), (507.84375, 783.140625), (555.796875, 783.140625) )
pen.lineTo( (717.96875, 783.140625) )
pen.curveTo( (738.890625, 783.140625), (748.796875, 711.5), (727.546875, 697.0) )
pen.closePath()
piPen = PointInsidePen(None, (416.625, 783.140625)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour2_integers(self):
def draw_contour(pen):
pen.moveTo( (51, 22) )
pen.lineTo( (51, 74) )
pen.lineTo( (83, 50) )
pen.curveTo( (83, 49) , (82, 48) , (82, 47) )
pen.closePath()
piPen = PointInsidePen(None, (21, 50)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
def test_contour2_decimals(self):
def draw_contour(pen):
pen.moveTo( (51.25, 21.859375) )
pen.lineTo( (51.25, 73.828125) )
pen.lineTo( (82.5, 50.0) )
pen.curveTo( (82.5, 49.09375) , (82.265625, 48.265625) , (82.234375, 47.375) )
pen.closePath()
piPen = PointInsidePen(None, (21.25, 50.0)) # this point is outside
draw_contour(piPen)
self.assertEqual(piPen.getWinding(), 0)
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| 33.924779 | 102 | 0.503326 |
742651417dced8c648167ae1ed684c851b1d6556 | 7,937 | py | Python | custom_components/apple_tv/media_player.py | elsingaa/Home-Assistant-Config | d20a2dabf5deeef8f087fa6d34a371617da9c4cd | [
"MIT"
] | 1 | 2019-12-22T12:59:01.000Z | 2019-12-22T12:59:01.000Z | custom_components/apple_tv/media_player.py | elsingaa/Home-Assistant-Config | d20a2dabf5deeef8f087fa6d34a371617da9c4cd | [
"MIT"
] | 28 | 2019-12-22T10:33:13.000Z | 2020-08-09T04:46:46.000Z | custom_components/apple_tv/media_player.py | elsingaa/Home-Assistant-Config | d20a2dabf5deeef8f087fa6d34a371617da9c4cd | [
"MIT"
] | null | null | null | """Support for Apple TV media player."""
import logging
from pyatv.const import DeviceState, MediaType
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
MEDIA_TYPE_VIDEO,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
CONF_NAME,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.util.dt as dt_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
SUPPORT_APPLE_TV = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Apple TV media player based on a config entry."""
identifier = config_entry.unique_id
name = config_entry.data[CONF_NAME]
manager = hass.data[DOMAIN][config_entry.unique_id]
async_add_entities([AppleTvDevice(name, identifier, manager)])
class AppleTvDevice(MediaPlayerDevice):
"""Representation of an Apple TV device."""
def __init__(self, name, identifier, manager):
"""Initialize the Apple TV device."""
self.atv = None
self._name = name
self._identifier = identifier
self._playing = None
self._manager = manager
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
self._manager.listeners.append(self)
await self._manager.init()
async def async_will_remove_from_hass(self):
"""Handle when an entity is about to be removed from Home Assistant."""
self._manager.listeners.remove(self)
@callback
def device_connected(self):
"""Handle when connection is made to device."""
self.atv = self._manager.atv
self.atv.push_updater.listener = self
@callback
def device_disconnected(self):
"""Handle when connection was lost to device."""
self.atv.push_updater.listener = None
self.atv = None
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self._identifier)},
"manufacturer": "Apple",
"model": "Media Player",
"name": self.name,
"sw_version": "0.0",
"via_device": (DOMAIN, self._identifier),
}
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return f"mp_{self._identifier}"
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the device."""
if self._manager.is_connecting:
return STATE_UNKNOWN
if self.atv is None:
return STATE_OFF
if self._playing:
state = self._playing.device_state
if state in (DeviceState.Idle, DeviceState.Loading):
return STATE_IDLE
if state == DeviceState.Playing:
return STATE_PLAYING
if state in (DeviceState.Paused, DeviceState.Seeking, DeviceState.Stopped):
return STATE_PAUSED
return STATE_STANDBY # Bad or unknown state?
@callback
def playstatus_update(self, _, playing):
"""Print what is currently playing when it changes."""
self._playing = playing
self.async_schedule_update_ha_state()
@callback
def playstatus_error(self, _, exception):
"""Inform about an error and restart push updates."""
_LOGGER.warning("A %s error occurred: %s", exception.__class__, exception)
self._playing = None
self.async_schedule_update_ha_state()
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._playing:
return {
MediaType.Video: MEDIA_TYPE_VIDEO,
MediaType.Music: MEDIA_TYPE_MUSIC,
MediaType.TV: MEDIA_TYPE_TVSHOW,
}.get(self._playing.media_type)
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._playing:
return self._playing.total_time
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._playing:
return self._playing.position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
if self.state in (STATE_PLAYING, STATE_PAUSED):
return dt_util.utcnow()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
await self.atv.stream.play_url(media_id)
@property
def media_image_hash(self):
"""Hash value for media image."""
state = self.state
if self._playing and state not in [STATE_UNKNOWN, STATE_OFF, STATE_IDLE]:
return self.atv.metadata.artwork_id
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
state = self.state
if self._playing and state not in [STATE_OFF, STATE_IDLE]:
artwork = await self.atv.metadata.artwork()
if artwork:
return artwork.bytes, artwork.mimetype
return None, None
@property
def media_title(self):
"""Title of current playing media."""
if self._playing:
if self.state == STATE_IDLE:
return "Nothing playing"
title = self._playing.title
return title if title else "No title"
return self._manager.message
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_APPLE_TV
async def async_turn_on(self):
"""Turn the media player on."""
await self._manager.connect()
async def async_turn_off(self):
"""Turn the media player off."""
self._playing = None
await self._manager.disconnect()
async def async_media_play_pause(self):
"""Pause media on media player."""
if self._playing:
state = self.state
if state == STATE_PAUSED:
await self.atv.remote_control.play()
elif state == STATE_PLAYING:
await self.atv.remote_control.pause()
async def async_media_play(self):
"""Play media."""
if self._playing:
await self.atv.remote_control.play()
async def async_media_stop(self):
"""Stop the media player."""
if self._playing:
await self.atv.remote_control.stop()
async def async_media_pause(self):
"""Pause the media player."""
if self._playing:
await self.atv.remote_control.pause()
async def async_media_next_track(self):
"""Send next track command."""
if self._playing:
await self.atv.remote_control.next()
async def async_media_previous_track(self):
"""Send previous track command."""
if self._playing:
await self.atv.remote_control.previous()
async def async_media_seek(self, position):
"""Send seek command."""
if self._playing:
await self.atv.remote_control.set_position(position)
| 30.178707 | 87 | 0.632859 |
cc70fa1d40c68890ead2786cedee7873bdf9401c | 114 | py | Python | businesswebsite/services/urls.py | luckdeluxe/business-website | 78993636e156506aced55e44385d0e5778414a50 | [
"MIT"
] | 1 | 2021-12-17T02:04:16.000Z | 2021-12-17T02:04:16.000Z | businesswebsite/services/urls.py | luckdeluxe/business-website | 78993636e156506aced55e44385d0e5778414a50 | [
"MIT"
] | 15 | 2019-05-27T19:23:50.000Z | 2022-03-11T23:53:17.000Z | webempresa/services/urls.py | JulioAlbertoTum/web-emp-django2 | f4ee48885f5f0166d3620c27569f7cbcaf997561 | [
"MIT"
] | 1 | 2018-09-24T12:10:19.000Z | 2018-09-24T12:10:19.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.services, name="services"),
] | 19 | 46 | 0.692982 |
2dd4937079c76048535221114bdb61c8ec69907b | 6,851 | py | Python | tests/statemachine_test.py | FelixSchwarz/pymta | ac9b3e23e136bc1387fb9c359eabe6087e5264c8 | [
"MIT"
] | null | null | null | tests/statemachine_test.py | FelixSchwarz/pymta | ac9b3e23e136bc1387fb9c359eabe6087e5264c8 | [
"MIT"
] | 6 | 2018-10-11T09:01:48.000Z | 2020-10-07T08:15:40.000Z | tests/statemachine_test.py | FelixSchwarz/pymta | ac9b3e23e136bc1387fb9c359eabe6087e5264c8 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import print_function, unicode_literals
from pythonic_testcase import *
from pymta.statemachine import StateMachine, StateMachineDefinitionError, \
StateMachineError
class StateMachineTest(PythonicTestCase):
def setUp(self):
self.state = StateMachine(initial_state='new')
def test_can_initialize_statemachine(self):
StateMachine(initial_state='foo')
# --- adding states ------------------------------------------------------
def test_can_add_states(self):
self.state.add('new', 'processed', 'process')
self.state.add('new', 'new', 'noop')
def test_raise_exception_if_duplicate_action_is_defined(self):
self.state.add('new', 'processed', 'process')
with assert_raises(StateMachineDefinitionError):
self.state.add('new', 'new', 'process')
# --- introspection ------------------------------------------------------
def test_can_ask_for_current_state(self):
state = StateMachine(initial_state='foo')
state.add('foo', 'foo', 'noop')
assert_equals('foo', state.state())
assert_false(state.is_impossible_state())
def test_no_state_if_initial_state_not_available(self):
state = StateMachine(initial_state='invalid')
assert_none(state.state())
assert_true(state.is_impossible_state())
def test_can_ask_for_all_known_actions(self):
self.state.add('new', 'new', 'noop')
self.state.add('new', 'processed', 'process')
self.state.add('processed', 'new', 'rework')
assert_equals(set(('noop', 'process', 'rework')), self.state.known_actions())
def test_can_ask_for_all_currently_allowed_actions(self):
self.state.add('new', 'new', 'noop')
self.state.add('new', 'processed', 'process')
self.state.add('processed', 'new', 'rework')
assert_equals(set(('noop', 'process')), self.state.allowed_actions())
self.state.set_state('processed')
assert_equals(set(('rework',)), self.state.allowed_actions())
def test_can_ask_for_all_known_states(self):
assert_equals(set(), self.state.known_states())
self.state.add('new', 'processed', 'process')
self.state.add('processed', 'done', 'finalize')
assert_equals(set(('new', 'processed', 'done')), self.state.known_states())
def test_can_ask_for_all_non_final_states(self):
assert_equals(set(), self.state.known_non_final_states())
self.state.add('new', 'processed', 'process')
self.state.add('processed', 'done', 'finalize')
assert_equals(set(('new', 'processed')), self.state.known_non_final_states())
# --- handling states ----------------------------------------------------
def test_can_not_set_state_to_invalid_state(self):
with assert_raises(StateMachineError):
self.state.set_state('invalid')
# --- executing ----------------------------------------------------------
def test_can_execute_states(self):
self.state.add('new', 'processed', 'process')
self.state.execute('process')
assert_equals('processed', self.state.state())
def test_handler_is_called_for_state_transition(self):
self._transition = None
def handler(from_state, to_state, action_name):
self._transition = (from_state, to_state, action_name)
self.state.add('new', 'new', 'noop', handler)
self.state.execute('noop')
assert_equals('new', self.state.state())
assert_equals(('new', 'new', 'noop'), self._transition)
def test_raise_exception_for_invalid_action(self):
self.state.add('new', 'processed', 'process')
with assert_raises(StateMachineError):
self.state.execute('invalid')
self.state.add('processed', 'new', 'rework')
with assert_raises(StateMachineError):
self.state.execute('rework')
self.state.execute('process')
with assert_raises(StateMachineError):
self.state.execute('process')
self.state.execute('rework')
def test_raise_exception_if_in_impossible_state(self):
state = StateMachine(initial_state='invalid')
state.add('new', 'processed', 'process')
with assert_raises(StateMachineError):
self.state.execute('process')
def test_raise_exception_if_no_outgoing_transition_defined_when_executing(self):
self.state.add('new', 'processed', 'process')
self.state.set_state('processed')
with assert_raises(StateMachineError):
self.state.execute('rework')
# --- transition with operations and conditions --------------------------
def test_can_add_transition_with_additional_operation(self):
self.state.add('new', 'processed', 'process', operations=('set_foo',))
def test_can_tell_if_flag_is_set(self):
assert_false(self.state.is_set(None))
assert_false(self.state.is_set('foo'))
def test_transition_can_also_set_flags(self):
self.state.add('new', 'processed', 'process', operations=('set_foo',))
assert_false(self.state.is_set('foo'))
self.state.execute('process')
assert_true(self.state.is_set('foo'))
def test_can_add_conditional_transition(self):
self.state.add('new', 'authenticated', 'authenticate', condition='if_tls')
def test_allowed_actions_obeys_condition(self):
self.state.add('new', 'new', 'use_tls', operations=('set_tls',))
self.state.add('new', 'authenticated', 'authenticate', condition='if_tls')
assert_equals(set(('use_tls',)), self.state.allowed_actions())
self.state.execute('use_tls')
assert_equals(set(('use_tls', 'authenticate')), self.state.allowed_actions())
def test_conditional_transition_is_only_executed_if_flag_is_true(self):
self.state.add('new', 'new', 'use_tls', operations=('set_tls',))
self.state.add('new', 'authenticated', 'authenticate', condition='if_tls')
assert_equals('new', self.state.state())
with assert_raises(StateMachineError):
self.state.execute('authenticate')
self.state.execute('use_tls')
assert_true(self.state.is_set('tls'))
self.state.execute('authenticate')
def test_can_also_specify_negative_flag_checks_for_transitions(self):
self.state.add('new', 'new', 'use_tls', operations=('set_tls',), condition='if_not_tls')
self.state.add('new', 'authenticated', 'authenticate', condition='if_tls')
with assert_raises(StateMachineError):
self.state.execute('authenticate')
self.state.execute('use_tls')
with assert_raises(StateMachineError):
self.state.execute('use_tls')
self.state.execute('authenticate')
| 41.023952 | 96 | 0.643848 |
be0cd770a4918fd64a0dd583c51d264ea1d2a0ac | 18,185 | py | Python | var/spack/repos/builtin/packages/perl/package.py | fluidnumerics-joe/spack | 7b8db35410e80cc2e9908a15b9f4df4863f60d0f | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/perl/package.py | fluidnumerics-joe/spack | 7b8db35410e80cc2e9908a15b9f4df4863f60d0f | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 18 | 2021-03-12T16:22:58.000Z | 2022-03-02T17:07:08.000Z | var/spack/repos/builtin/packages/perl/package.py | fluidnumerics-joe/spack | 7b8db35410e80cc2e9908a15b9f4df4863f60d0f | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Author: Milton Woods <milton.woods@bom.gov.au>
# Date: March 22, 2017
# Author: George Hartzell <hartzell@alerce.com>
# Date: July 21, 2016
# Author: Justin Too <justin@doubleotoo.com>
# Date: September 6, 2015
#
import os
import re
from contextlib import contextmanager
from llnl.util.lang import match_predicate
from spack import *
class Perl(Package): # Perl doesn't use Autotools, it should subclass Package
"""Perl 5 is a highly capable, feature-rich programming language with over
27 years of development."""
homepage = "https://www.perl.org"
# URL must remain http:// so Spack can bootstrap curl
url = "http://www.cpan.org/src/5.0/perl-5.34.0.tar.gz"
executables = [r'^perl(-?\d+.*)?$']
# see https://www.cpan.org/src/README.html for
# explanation of version numbering scheme
# Development releases (odd numbers)
version('5.35.0', sha256='d6c0eb4763d1c73c1d18730664d43fcaf6100c31573c3b81e1504ec8f5b22708')
version('5.33.3', sha256='4f4ba0aceb932e6cf7c05674d05e51ef759d1c97f0685dee65a8f3d190f737cd')
version('5.31.7', sha256='d05c4e72128f95ef6ffad42728ecbbd0d9437290bf0f88268b51af011f26b57d')
version('5.31.4', sha256='418a7e6fe6485cc713a86d1227ef112f0bb3f80322e3b715ffe42851d97804a5')
# Maintenance releases (even numbers, recommended)
version('5.34.0', sha256='551efc818b968b05216024fb0b727ef2ad4c100f8cb6b43fab615fa78ae5be9a', preferred=True)
version('5.32.1', sha256='03b693901cd8ae807231b1787798cf1f2e0b8a56218d07b7da44f784a7caeb2c')
version('5.32.0', sha256='efeb1ce1f10824190ad1cadbcccf6fdb8a5d37007d0100d2d9ae5f2b5900c0b4')
version('5.30.3', sha256='32e04c8bb7b1aecb2742a7f7ac0eabac100f38247352a73ad7fa104e39e7406f')
version('5.30.2', sha256='66db7df8a91979eb576fac91743644da878244cf8ee152f02cd6f5cd7a731689')
version('5.30.1', sha256='bf3d25571ff1ee94186177c2cdef87867fd6a14aa5a84f0b1fb7bf798f42f964')
version('5.30.0', sha256='851213c754d98ccff042caa40ba7a796b2cee88c5325f121be5cbb61bbf975f2')
# End of life releases
version('5.28.0', sha256='7e929f64d4cb0e9d1159d4a59fc89394e27fa1f7004d0836ca0d514685406ea8')
version('5.26.2', sha256='572f9cea625d6062f8a63b5cee9d3ee840800a001d2bb201a41b9a177ab7f70d')
version('5.24.1', sha256='e6c185c9b09bdb3f1b13f678999050c639859a7ef39c8cad418448075f5918af')
version('5.22.4', sha256='ba9ef57c2b709f2dad9c5f6acf3111d9dfac309c484801e0152edbca89ed61fa')
version('5.22.3', sha256='1b351fb4df7e62ec3c8b2a9f516103595b2601291f659fef1bbe3917e8410083')
version('5.22.2', sha256='81ad196385aa168cb8bd785031850e808c583ed18a7901d33e02d4f70ada83c2')
version('5.22.1', sha256='2b475d0849d54c4250e9cba4241b7b7291cffb45dfd083b677ca7b5d38118f27')
version('5.22.0', sha256='0c690807f5426bbd1db038e833a917ff00b988bf03cbf2447fa9ffdb34a2ab3c')
version('5.20.3', sha256='3524e3a76b71650ab2f794fd68e45c366ec375786d2ad2dca767da424bbb9b4a')
version('5.18.4', sha256='01a4e11a9a34616396c4a77b3cef51f76a297e1a2c2c490ae6138bf0351eb29f')
version('5.16.3', sha256='69cf08dca0565cec2c5c6c2f24b87f986220462556376275e5431cc2204dedb6')
extendable = True
# Bind us below gdbm-1.20 due to API change: https://github.com/Perl/perl5/issues/18915
depends_on('gdbm@:1.19')
# :5.28 needs gdbm@:1:14.1: https://rt-archive.perl.org/perl5/Ticket/Display.html?id=133295
depends_on('gdbm@:1.14.1', when='@:5.28.0')
depends_on('berkeley-db')
depends_on('bzip2')
depends_on('zlib')
# :5.24.1 needs zlib@:1.2.8: https://rt.cpan.org/Public/Bug/Display.html?id=120134
depends_on('zlib@:1.2.8', when='@5.20.3:5.24.1')
# there has been a long fixed issue with 5.22.0 with regard to the ccflags
# definition. It is well documented here:
# https://rt.perl.org/Public/Bug/Display.html?id=126468
patch('protect-quotes-in-ccflags.patch', when='@5.22.0')
# Fix the Time-Local testase http://blogs.perl.org/users/tom_wyant/2020/01/my-y2020-bug.html
patch('https://rt.cpan.org/Public/Ticket/Attachment/1776857/956088/0001-Fix-Time-Local-tests.patch',
when='@5.26.0:5.28.9',
sha256='8cf4302ca8b480c60ccdcaa29ec53d9d50a71d4baf469ac8c6fca00ca31e58a2')
patch('https://raw.githubusercontent.com/costabel/fink-distributions/master/10.9-libcxx/stable/main/finkinfo/languages/perl5162-timelocal-y2020.patch',
when='@:5.24.1',
sha256='3bbd7d6f9933d80b9571533867b444c6f8f5a1ba0575bfba1fba4db9d885a71a')
# Fix build on Fedora 28
# https://bugzilla.redhat.com/show_bug.cgi?id=1536752
patch('https://src.fedoraproject.org/rpms/perl/raw/004cea3a67df42e92ffdf4e9ac36d47a3c6a05a4/f/perl-5.26.1-guard_old_libcrypt_fix.patch', level=1, sha256='0eac10ed90aeb0459ad8851f88081d439a4e41978e586ec743069e8b059370ac', when='@:5.26.2')
# Fix 'Unexpected product version' error on macOS 11.0 Big Sur
# https://github.com/Perl/perl5/pull/17946
patch('macos-11-version-check.patch', when='@5.24.1:5.32.0 platform=darwin')
# Enable builds with the NVIDIA compiler
# The Configure script assumes some gcc specific behavior, and use
# the mini Perl environment to bootstrap installation.
patch('nvhpc-5.30.patch', when='@5.30.0:5.30.99 %nvhpc')
patch('nvhpc-5.32.patch', when='@5.32.0:5.32.99 %nvhpc')
conflicts('@5.32.0:', when='%nvhpc@:20.11',
msg='The NVIDIA compilers are incompatible with version 5.32 and later')
# Make sure we don't get "recompile with -fPIC" linker errors when using static libs
conflicts('^zlib~shared~pic', msg='Needs position independent code when using static zlib')
conflicts('^bzip2~shared~pic', msg='Needs position independent code when using static bzip2')
# Installing cpanm alongside the core makes it safe and simple for
# people/projects to install their own sets of perl modules. Not
# having it in core increases the "energy of activation" for doing
# things cleanly.
variant('cpanm', default=True,
description='Optionally install cpanm with the core packages.')
variant('shared', default=True,
description='Build a shared libperl.so library')
variant('threads', default=True,
description='Build perl with threads support')
resource(
name="cpanm",
url="http://search.cpan.org/CPAN/authors/id/M/MI/MIYAGAWA/App-cpanminus-1.7042.tar.gz",
sha256="9da50e155df72bce55cb69f51f1dbb4b62d23740fb99f6178bb27f22ebdf8a46",
destination="cpanm",
placement="cpanm"
)
phases = ['configure', 'build', 'install']
@classmethod
def determine_version(cls, exe):
perl = spack.util.executable.Executable(exe)
output = perl('--version', output=str, error=str)
if output:
match = re.search(r'perl.*\(v([0-9.]+)\)', output)
if match:
return match.group(1)
return None
@classmethod
def determine_variants(cls, exes, version):
for exe in exes:
perl = spack.util.executable.Executable(exe)
output = perl('-V', output=str, error=str)
variants = ''
if output:
match = re.search(r'-Duseshrplib', output)
if match:
variants += '+shared'
else:
variants += '~shared'
match = re.search(r'-Duse.?threads', output)
if match:
variants += '+threads'
else:
variants += '~threads'
path = os.path.dirname(exe)
if 'cpanm' in os.listdir(path):
variants += '+cpanm'
else:
variants += '~cpanm'
return variants
# On a lustre filesystem, patch may fail when files
# aren't writeable so make pp.c user writeable
# before patching. This should probably walk the
# source and make everything writeable in the future.
def do_stage(self, mirror_only=False):
# Do Spack's regular stage
super(Perl, self).do_stage(mirror_only)
# Add write permissions on file to be patched
filename = join_path(self.stage.source_path, 'pp.c')
perm = os.stat(filename).st_mode
os.chmod(filename, perm | 0o200)
def configure_args(self):
spec = self.spec
prefix = self.prefix
config_args = [
'-des',
'-Dprefix={0}'.format(prefix),
'-Dlocincpth=' + self.spec['gdbm'].prefix.include,
'-Dloclibpth=' + self.spec['gdbm'].prefix.lib,
]
# Extensions are installed into their private tree via
# `INSTALL_BASE`/`--install_base` (see [1]) which results in a
# "predictable" installation tree that sadly does not match the
# Perl core's @INC structure. This means that when activation
# merges the extension into the extendee[2], the directory tree
# containing the extensions is not on @INC and the extensions can
# not be found.
#
# This bit prepends @INC with the directory that is used when
# extensions are activated [3].
#
# [1] https://metacpan.org/pod/ExtUtils::MakeMaker#INSTALL_BASE
# [2] via the activate method in the PackageBase class
# [3] https://metacpan.org/pod/distribution/perl/INSTALL#APPLLIB_EXP
config_args.append('-Accflags=-DAPPLLIB_EXP=\\"' +
self.prefix.lib.perl5 + '\\"')
# Discussion of -fPIC for Intel at:
# https://github.com/spack/spack/pull/3081 and
# https://github.com/spack/spack/pull/4416
if spec.satisfies('%intel'):
config_args.append('-Accflags={0}'.format(
self.compiler.cc_pic_flag))
if '+shared' in spec:
config_args.append('-Duseshrplib')
if '+threads' in spec:
config_args.append('-Dusethreads')
# Development versions have an odd second component
if spec.version[1] % 2 == 1:
config_args.append('-Dusedevel')
return config_args
def configure(self, spec, prefix):
configure = Executable('./Configure')
configure(*self.configure_args())
def build(self, spec, prefix):
make()
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
def install(self, spec, prefix):
make('install')
@run_after('install')
def install_cpanm(self):
spec = self.spec
if '+cpanm' in spec:
with working_dir(join_path('cpanm', 'cpanm')):
perl = spec['perl'].command
perl('Makefile.PL')
make()
make('install')
def _setup_dependent_env(self, env, dependent_spec, deptypes):
"""Set PATH and PERL5LIB to include the extension and
any other perl extensions it depends on,
assuming they were installed with INSTALL_BASE defined."""
perl_lib_dirs = []
for d in dependent_spec.traverse(deptype=deptypes):
if d.package.extends(self.spec):
perl_lib_dirs.append(d.prefix.lib.perl5)
if perl_lib_dirs:
perl_lib_path = ':'.join(perl_lib_dirs)
env.prepend_path('PERL5LIB', perl_lib_path)
def setup_dependent_build_environment(self, env, dependent_spec):
self._setup_dependent_env(env, dependent_spec,
deptypes=('build', 'run'))
def setup_dependent_run_environment(self, env, dependent_spec):
self._setup_dependent_env(env, dependent_spec, deptypes=('run',))
def setup_dependent_package(self, module, dependent_spec):
"""Called before perl modules' install() methods.
In most cases, extensions will only need to have one line:
perl('Makefile.PL','INSTALL_BASE=%s' % self.prefix)
"""
# If system perl is used through packages.yaml
# there cannot be extensions.
if dependent_spec.package.is_extension:
# perl extension builds can have a global perl
# executable function
module.perl = self.spec['perl'].command
# Add variables for library directory
module.perl_lib_dir = dependent_spec.prefix.lib.perl5
# Make the site packages directory for extensions,
# if it does not exist already.
mkdirp(module.perl_lib_dir)
def setup_build_environment(self, env):
spec = self.spec
# This is to avoid failures when using -mmacosx-version-min=11.1
# since not all Apple Clang compilers support that version range
# See https://eclecticlight.co/2020/07/21/big-sur-is-both-10-16-and-11-0-its-official/
if spec.satisfies('os=bigsur'):
env.set('SYSTEM_VERSION_COMPAT', 1)
# This is how we tell perl the locations of bzip and zlib.
env.set('BUILD_BZIP2', 0)
env.set('BZIP2_INCLUDE', spec['bzip2'].prefix.include)
env.set('BZIP2_LIB', spec['bzip2'].libs.directories[0])
env.set('BUILD_ZLIB', 0)
env.set('ZLIB_INCLUDE', spec['zlib'].prefix.include)
env.set('ZLIB_LIB', spec['zlib'].libs.directories[0])
@run_after('install')
def filter_config_dot_pm(self):
"""Run after install so that Config.pm records the compiler that Spack
built the package with. If this isn't done, $Config{cc} will
be set to Spack's cc wrapper script. These files are read-only, which
frustrates filter_file on some filesystems (NFSv4), so make them
temporarily writable.
"""
kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
# Find the actual path to the installed Config.pm file.
perl = self.spec['perl'].command
config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e',
'print is_loaded(Config)', output=str)
with self.make_briefly_writable(config_dot_pm):
match = 'cc *=>.*'
substitute = "cc => '{cc}',".format(cc=self.compiler.cc)
filter_file(match, substitute, config_dot_pm, **kwargs)
# And the path Config_heavy.pl
d = os.path.dirname(config_dot_pm)
config_heavy = join_path(d, 'Config_heavy.pl')
with self.make_briefly_writable(config_heavy):
match = '^cc=.*'
substitute = "cc='{cc}'".format(cc=self.compiler.cc)
filter_file(match, substitute, config_heavy, **kwargs)
match = '^ld=.*'
substitute = "ld='{ld}'".format(ld=self.compiler.cc)
filter_file(match, substitute, config_heavy, **kwargs)
match = "^ccflags='"
substitute = "ccflags='%s " % ' '\
.join(self.spec.compiler_flags['cflags'])
filter_file(match, substitute, config_heavy, **kwargs)
@contextmanager
def make_briefly_writable(self, path):
"""Temporarily make a file writable, then reset"""
perm = os.stat(path).st_mode
os.chmod(path, perm | 0o200)
yield
os.chmod(path, perm)
# ========================================================================
# Handle specifics of activating and deactivating perl modules.
# ========================================================================
def perl_ignore(self, ext_pkg, args):
"""Add some ignore files to activate/deactivate args."""
ignore_arg = args.get('ignore', lambda f: False)
# Many perl packages describe themselves in a perllocal.pod file,
# so the files conflict when multiple packages are activated.
# We could merge the perllocal.pod files in activated packages,
# but this is unnecessary for correct operation of perl.
# For simplicity, we simply ignore all perllocal.pod files:
patterns = [r'perllocal\.pod$']
return match_predicate(ignore_arg, patterns)
def activate(self, ext_pkg, view, **args):
ignore = self.perl_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Perl, self).activate(ext_pkg, view, **args)
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.spec)
exts[ext_pkg.name] = ext_pkg.spec
def deactivate(self, ext_pkg, view, **args):
ignore = self.perl_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Perl, self).deactivate(ext_pkg, view, **args)
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.spec)
# Make deactivate idempotent
if ext_pkg.name in exts:
del exts[ext_pkg.name]
@property
def command(self):
"""Returns the Perl command, which may vary depending on the version
of Perl. In general, Perl comes with a ``perl`` command. However,
development releases have a ``perlX.Y.Z`` command.
Returns:
Executable: the Perl command
"""
for ver in ('', self.spec.version):
path = os.path.join(self.prefix.bin, '{0}{1}'.format(
self.spec.name, ver))
if os.path.exists(path):
return Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(self.spec.name, self.prefix.bin))
def test(self):
"""Smoke tests"""
exe = self.spec['perl'].command.name
reason = 'test: checking version is {0}'.format(self.spec.version)
self.run_test(exe, '--version', ['perl', str(self.spec.version)],
installed=True, purpose=reason)
reason = 'test: ensuring perl runs'
msg = 'Hello, World!'
options = ['-e', 'use warnings; use strict;\nprint("%s\n");' % msg]
self.run_test(exe, options, msg, installed=True, purpose=reason)
| 42.889151 | 241 | 0.645587 |
3b8e2953f8d246bc7f8145a521b856cb0919b992 | 1,883 | py | Python | server.py | PaulLockett/OSConferenceCall | 93c165da54efd3fac67dd4e54c7619f7d312c1a5 | [
"MIT"
] | null | null | null | server.py | PaulLockett/OSConferenceCall | 93c165da54efd3fac67dd4e54c7619f7d312c1a5 | [
"MIT"
] | null | null | null | server.py | PaulLockett/OSConferenceCall | 93c165da54efd3fac67dd4e54c7619f7d312c1a5 | [
"MIT"
] | null | null | null | from serverlib import *
import tkinter as tk
import tkinter.simpledialog as tkSimpleDialog
import socket
import threading
# ask if user wants a gui or comandline
def ask_gui():
gui = input("Do you want a gui? (y/n) ")
if gui == "y":
return True
elif gui == "n":
return False
else:
print("Wrong input")
ask_gui()
if ask_gui():
def start_server():
t1 = threading.Thread(target=server.start_server)
t2 = threading.Thread(target=receiver.start_server)
t3 = threading.Thread(target=chatRoom.start)
t1.start()
t2.start()
t3.start()
def stop_server():
server.stop_server()
receiver.stop_server()
chatRoom.stop()
exit()
window = tk.Tk()
window.withdraw()
server_address = tkSimpleDialog.askstring("Server Address", "Enter the server address:")
if server_address is None:
server_address = socket.gethostbyname(socket.gethostname())
server = StreamingServer(server_address, 9999)
receiver = AudioServer(server_address, 8888)
chatRoom = ChatServer(server_address, 9090)
window.wm_deiconify()
window.title("Server")
window.geometry("300x200")
text_target_ip = tk.Text(window, height=1)
text_target_ip.pack()
btn_listen = tk.Button(window, text="Start Server", width=50, command=start_server)
btn_listen.pack(anchor=tk.CENTER, expand=True)
btn_stop = tk.Button(window, text="Stop Server", width=50, command=stop_server)
btn_stop.pack(anchor=tk.CENTER, expand=True)
window.mainloop()
else:
server_address = input("Enter the server address: ")
server = StreamingServer(server_address, 9999)
receiver = AudioServer(server_address, 8888)
chatRoom = ChatServer(server_address, 9090)
server.start_server()
receiver.start_server()
chatRoom.start() | 26.9 | 92 | 0.672331 |
51689019f31b435a54fee4459085870bfec4d823 | 625 | py | Python | 2D_array.py | YannMjl/Coding-Interview-Problems | 84ecf237da52aca7822fc3074823f3faeb360e8b | [
"MIT"
] | null | null | null | 2D_array.py | YannMjl/Coding-Interview-Problems | 84ecf237da52aca7822fc3074823f3faeb360e8b | [
"MIT"
] | null | null | null | 2D_array.py | YannMjl/Coding-Interview-Problems | 84ecf237da52aca7822fc3074823f3faeb360e8b | [
"MIT"
] | null | null | null | # 2 - Dimmensional array
# Is implemented as array of array or list of list in python
# initialize an 2D array
array_2D = [[1, 2, 3, 4],
["a", "b", "c", "d"]]
# or
array_2d = [[5,6,7,8],
[3,5,2,9]]
# print the second item in the first row
print(array_2d[0][1])
print(array_2D[0][1])
# print the third item in the second row
print(array_2D[1][2])
print(array_2d[1][2])
# ierate offer a 2D array
for row in array_2D:
for item in row:
print(item)
# using in range of...method
for i in range(len(array_2d)):
for j in range(len(array_2d[i])):
print(array_2d[i][j])
| 20.833333 | 60 | 0.6016 |
dd6d90b6ea22ebd0ff64ca7a4a0118a65e216581 | 1,331 | py | Python | Demo/sgi/gl/glstdwin/glstdwmenu.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | 1 | 2019-10-25T21:41:07.000Z | 2019-10-25T21:41:07.000Z | Demo/sgi/gl/glstdwin/glstdwmenu.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/sgi/gl/glstdwin/glstdwmenu.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | # Define menu operations for GL stdwin
import gl
from glstdwin import key2code
class MenuObject:
#
def _init(self, win, title):
self._win = win
self._title = title
self._items = []
return self
#
def close(self):
self._win.remove(self)
del self._win
#
def additem(self, *args):
if len(args) == 2:
text, shortcut = args
elif len(args) == 1:
text, shortcut = args[0], None
else:
raise TypeError, 'arg count'
self._items.append([text, shortcut, 1, 0])
#
def setitem(self, i, text):
self._items[i][0] = text
#
def enable(self, i, flag):
self._items[i][2] = flag
#
def check(self, i, flag):
self._items[i][3] = flag
#
def _makepup(self, firstitem):
pup = gl.newpup()
if self._title:
gl.addtopup(pup, self._title + '%t', 0)
for item in self._items:
text = item[0]
if not item[2]: # Disabled
text = ' ( ' + text + ' )%x-1'
else:
if item[3]: # Check mark
text = '-> ' + text
else:
text = ' ' + text
if key2code.has_key(item[1]):
text = text + ' [Alt-' + item[1] + ']'
text = text + '%x' + `firstitem`
gl.addtopup(pup, text, 0)
firstitem = firstitem + 1
return pup
#
def _checkshortcut(self, char):
for i in range(len(self._items)):
item = self._items[i]
if item[2] and item[1] == char:
return i
return -1
#
| 21.126984 | 44 | 0.592787 |
b8de74199c8146a4cec35d3efaffb218d9e4b35e | 13,510 | py | Python | qiskit/aqua/operators/primitive_ops/pauli_op.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 504 | 2018-12-15T16:34:03.000Z | 2022-03-26T11:24:53.000Z | qiskit/aqua/operators/primitive_ops/pauli_op.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 746 | 2018-12-16T16:44:42.000Z | 2021-07-10T16:59:43.000Z | qiskit/aqua/operators/primitive_ops/pauli_op.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 421 | 2018-12-22T14:49:00.000Z | 2022-03-04T09:47:07.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" PauliOp Class """
from typing import Union, Set, Dict, cast, List, Optional
import logging
import numpy as np
from scipy.sparse import spmatrix
from qiskit import QuantumCircuit
from qiskit.circuit import ParameterExpression, Instruction
from qiskit.quantum_info import Pauli
from qiskit.circuit.library import RZGate, RYGate, RXGate, XGate, YGate, ZGate, IGate
from ..operator_base import OperatorBase
from .primitive_op import PrimitiveOp
from ..list_ops.summed_op import SummedOp
from ..list_ops.tensored_op import TensoredOp
from ..legacy.weighted_pauli_operator import WeightedPauliOperator
from ... import AquaError
logger = logging.getLogger(__name__)
PAULI_GATE_MAPPING = {'X': XGate(), 'Y': YGate(), 'Z': ZGate(), 'I': IGate()}
class PauliOp(PrimitiveOp):
""" Class for Operators backed by Terra's ``Pauli`` module.
"""
def __init__(self,
primitive: Union[Pauli],
coeff: Union[int, float, complex, ParameterExpression] = 1.0) -> None:
"""
Args:
primitive: The Pauli which defines the behavior of the underlying function.
coeff: A coefficient multiplying the primitive.
Raises:
TypeError: invalid parameters.
"""
if not isinstance(primitive, Pauli):
raise TypeError(
'PauliOp can only be instantiated with Paulis, not {}'.format(type(primitive)))
super().__init__(primitive, coeff=coeff)
def primitive_strings(self) -> Set[str]:
return {'Pauli'}
@property
def num_qubits(self) -> int:
return len(self.primitive)
def add(self, other: OperatorBase) -> OperatorBase:
if not self.num_qubits == other.num_qubits:
raise ValueError(
'Sum over operators with different numbers of qubits, {} and {}, is not well '
'defined'.format(self.num_qubits, other.num_qubits))
if isinstance(other, PauliOp) and self.primitive == other.primitive:
return PauliOp(self.primitive, coeff=self.coeff + other.coeff)
return SummedOp([self, other])
def adjoint(self) -> OperatorBase:
return PauliOp(self.primitive, coeff=np.conj(self.coeff))
def equals(self, other: OperatorBase) -> bool:
if not isinstance(other, PauliOp) or not self.coeff == other.coeff:
return False
return self.primitive == other.primitive
def _expand_dim(self, num_qubits: int) -> 'PauliOp':
return PauliOp(Pauli('I'*num_qubits).expand(self.primitive), coeff=self.coeff)
def tensor(self, other: OperatorBase) -> OperatorBase:
# Both Paulis
if isinstance(other, PauliOp):
# Copying here because Terra's Pauli kron is in-place.
op_copy = Pauli((other.primitive.z, other.primitive.x)) # type: ignore
# NOTE!!! REVERSING QISKIT ENDIANNESS HERE
return PauliOp(op_copy.expand(self.primitive), coeff=self.coeff * other.coeff)
# pylint: disable=cyclic-import,import-outside-toplevel
from .circuit_op import CircuitOp
if isinstance(other, CircuitOp):
return self.to_circuit_op().tensor(other)
return TensoredOp([self, other])
def permute(self, permutation: List[int]) -> 'PauliOp':
"""Permutes the sequence of Pauli matrices.
Args:
permutation: A list defining where each Pauli should be permuted. The Pauli at index
j of the primitive should be permuted to position permutation[j].
Returns:
A new PauliOp representing the permuted operator. For operator (X ^ Y ^ Z) and
indices=[1,2,4], it returns (X ^ I ^ Y ^ Z ^ I).
Raises:
AquaError: if indices do not define a new index for each qubit.
"""
pauli_string = self.primitive.__str__()
length = max(permutation) + 1 # size of list must be +1 larger then its max index
new_pauli_list = ['I'] * length
if len(permutation) != self.num_qubits:
raise AquaError("List of indices to permute must have the same size as Pauli Operator")
for i, index in enumerate(permutation):
new_pauli_list[-index - 1] = pauli_string[-i - 1]
return PauliOp(Pauli(label=''.join(new_pauli_list)), self.coeff)
def compose(self, other: OperatorBase,
permutation: Optional[List[int]] = None, front: bool = False) -> OperatorBase:
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
new_self = cast(PauliOp, new_self)
if front:
return other.compose(new_self)
# If self is identity, just return other.
if not any(new_self.primitive.x + new_self.primitive.z): # type: ignore
return other * new_self.coeff # type: ignore
# Both Paulis
if isinstance(other, PauliOp):
p_a = new_self.primitive.dot(other.primitive) # type: ignore
product, phase = p_a[:], (-1j) ** p_a.phase # type: ignore
return PrimitiveOp(product, coeff=new_self.coeff * other.coeff * phase)
# pylint: disable=cyclic-import,import-outside-toplevel
from .circuit_op import CircuitOp
from ..state_fns.circuit_state_fn import CircuitStateFn
if isinstance(other, (CircuitOp, CircuitStateFn)):
return new_self.to_circuit_op().compose(other)
return super(PauliOp, new_self).compose(other)
def to_matrix(self, massive: bool = False) -> np.ndarray:
OperatorBase._check_massive('to_matrix', True, self.num_qubits, massive)
return self.primitive.to_matrix() * self.coeff # type: ignore
def to_spmatrix(self) -> spmatrix:
""" Returns SciPy sparse matrix representation of the Operator.
Returns:
CSR sparse matrix representation of the Operator.
Raises:
ValueError: invalid parameters.
"""
return self.primitive.to_matrix(sparse=True) * self.coeff # type: ignore
def __str__(self) -> str:
prim_str = str(self.primitive)
if self.coeff == 1.0:
return prim_str
else:
return "{} * {}".format(self.coeff, prim_str)
def eval(self,
front: Optional[Union[str, Dict[str, complex], np.ndarray, OperatorBase]] = None
) -> Union[OperatorBase, float, complex]:
if front is None:
return self.to_matrix_op()
# pylint: disable=import-outside-toplevel,cyclic-import
from ..state_fns.state_fn import StateFn
from ..state_fns.dict_state_fn import DictStateFn
from ..state_fns.circuit_state_fn import CircuitStateFn
from ..list_ops.list_op import ListOp
from .circuit_op import CircuitOp
new_front = None
# For now, always do this. If it's not performant, we can be more granular.
if not isinstance(front, OperatorBase):
front = StateFn(front, is_measurement=False)
if isinstance(front, ListOp) and front.distributive:
new_front = front.combo_fn([self.eval(front.coeff * front_elem) # type: ignore
for front_elem in front.oplist])
else:
if self.num_qubits != front.num_qubits:
raise ValueError(
'eval does not support operands with differing numbers of qubits, '
'{} and {}, respectively.'.format(
self.num_qubits, front.num_qubits))
if isinstance(front, DictStateFn):
new_dict = {} # type: Dict
corrected_x_bits = self.primitive.x[::-1] # type: ignore
corrected_z_bits = self.primitive.z[::-1] # type: ignore
for bstr, v in front.primitive.items():
bitstr = np.asarray(list(bstr)).astype(int).astype(bool)
new_b_str = np.logical_xor(bitstr, corrected_x_bits)
new_str = ''.join(map(str, 1 * new_b_str))
z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits))
y_factor = np.product(np.sqrt(1 - 2 * np.logical_and(corrected_x_bits,
corrected_z_bits) + 0j))
new_dict[new_str] = (v * z_factor * y_factor) + new_dict.get(new_str, 0)
new_front = StateFn(new_dict, coeff=self.coeff * front.coeff)
elif isinstance(front, StateFn) and front.is_measurement:
raise ValueError('Operator composed with a measurement is undefined.')
# Composable types with PauliOp
elif isinstance(front, (PauliOp, CircuitOp, CircuitStateFn)):
new_front = self.compose(front)
# Covers VectorStateFn and OperatorStateFn
elif isinstance(front, OperatorBase):
new_front = self.to_matrix_op().eval(front.to_matrix_op()) # type: ignore
return new_front
def exp_i(self) -> OperatorBase:
""" Return a ``CircuitOp`` equivalent to e^-iH for this operator H. """
# if only one qubit is significant, we can perform the evolution
corrected_x = self.primitive.x[::-1] # type: ignore
corrected_z = self.primitive.z[::-1] # type: ignore
# pylint: disable=import-outside-toplevel,no-member
sig_qubits = np.logical_or(corrected_x, corrected_z)
if np.sum(sig_qubits) == 0:
# e^I is just a global phase, but we can keep track of it! Should we?
# For now, just return identity
return PauliOp(self.primitive)
if np.sum(sig_qubits) == 1:
sig_qubit_index = sig_qubits.tolist().index(True)
coeff = np.real(self.coeff) \
if not isinstance(self.coeff, ParameterExpression) \
else self.coeff
# Y rotation
if corrected_x[sig_qubit_index] and corrected_z[sig_qubit_index]:
rot_op = PrimitiveOp(RYGate(2 * coeff))
# Z rotation
elif corrected_z[sig_qubit_index]:
rot_op = PrimitiveOp(RZGate(2 * coeff))
# X rotation
elif corrected_x[sig_qubit_index]:
rot_op = PrimitiveOp(RXGate(2 * coeff))
# pylint: disable=cyclic-import
from ..operator_globals import I
left_pad = I.tensorpower(sig_qubit_index)
right_pad = I.tensorpower(self.num_qubits - sig_qubit_index - 1)
# Need to use overloaded operators here in case left_pad == I^0
return left_pad ^ rot_op ^ right_pad
else:
from ..evolutions.evolved_op import EvolvedOp
return EvolvedOp(self)
def commutes(self, other_op: OperatorBase) -> bool:
""" Returns whether self commutes with other_op.
Args:
other_op: An ``OperatorBase`` with which to evaluate whether self commutes.
Returns:
A bool equaling whether self commutes with other_op
"""
if not isinstance(other_op, PauliOp):
return False
# Don't use compose because parameters will break this
self_bits = self.primitive.z + 2 * self.primitive.x # type: ignore
other_bits = other_op.primitive.z + 2 * other_op.primitive.x # type: ignore
return all((self_bits * other_bits) * (self_bits - other_bits) == 0)
def to_circuit(self) -> QuantumCircuit:
# If Pauli equals identity, don't skip the IGates
is_identity = sum(self.primitive.x + self.primitive.z) == 0 # type: ignore
# Note: Reversing endianness!!
qc = QuantumCircuit(len(self.primitive))
for q, pauli_str in enumerate(reversed(self.primitive.to_label())): # type: ignore
gate = PAULI_GATE_MAPPING[pauli_str]
if not pauli_str == 'I' or is_identity:
qc.append(gate, qargs=[q])
return qc
def to_instruction(self) -> Instruction:
# TODO should we just do the following because performance of adding and deleting IGates
# doesn't matter?
# (Reduce removes extra IGates).
# return PrimitiveOp(self.primitive.to_instruction(), coeff=self.coeff).reduce()
return self.to_circuit().to_instruction()
def to_pauli_op(self, massive: bool = False) -> OperatorBase:
return self
def to_legacy_op(self, massive: bool = False) -> WeightedPauliOperator:
if isinstance(self.coeff, ParameterExpression):
try:
coeff = float(self.coeff)
except TypeError as ex:
raise TypeError('Cannot convert Operator with unbound parameter {} to Legacy '
'Operator'.format(self.coeff)) from ex
else:
coeff = cast(float, self.coeff)
return WeightedPauliOperator(paulis=[(coeff, self.primitive)]) # type: ignore
| 42.351097 | 99 | 0.623686 |
8705dc4eb89308369dc919cc75af090f9a5a80ea | 8,235 | py | Python | conv/z80conv/lexer.py | hughpyle/GW-BASIC | f0c1ef3c9655b36cd312d18e4620bb076f03afd3 | [
"MIT"
] | 26 | 2020-05-23T18:09:05.000Z | 2022-01-30T10:07:04.000Z | conv/z80conv/lexer.py | hughpyle/GW-BASIC | f0c1ef3c9655b36cd312d18e4620bb076f03afd3 | [
"MIT"
] | 1 | 2020-06-25T06:20:01.000Z | 2020-06-25T06:20:01.000Z | conv/z80conv/lexer.py | hughpyle/GW-BASIC | f0c1ef3c9655b36cd312d18e4620bb076f03afd3 | [
"MIT"
] | 4 | 2020-05-23T12:36:44.000Z | 2022-01-16T00:20:20.000Z | #!/usr/bin/python
# Copyright (c) 2020 Leandro Pereira <leandro@hardinfo.org>
# Licensed under GPLv2.
from collections import deque
class Lexer:
def __init__(self, fp):
self.state = self._lexer_asm
self.start = 0
self.pos = 0
self.contents = fp.read()
self.end = self.pos + len(self.contents)
self.queue = deque()
self.line = 1
self.col = 1
self.paren_count = 0
def lex(self):
def queue():
last = {'type': None}
while self.queue:
token = self.queue.popleft()
if token['type'] == 'newline' and last['type'] == 'newline':
continue
if token['type'] == 'token' and token['value'] == '':
continue
yield token
last = token
while self.state is not None:
yield from queue()
self.state = self.state()
yield from queue()
def _next(self):
if self.pos >= self.end:
return None
c = self.contents[self.pos]
self.pos += 1
if c == '\n':
self.line += 1
self.col = 1
else:
self.col += 1
return c
def _current_token(self):
return self.contents[self.start : self.pos]
def _emit(self, typ, extra=None, value=None):
tok = {
'type': typ,
'value': self._current_token() if value is None else value
}
if extra is not None:
tok['extra'] = extra
if typ == 'token':
tok['value'] = tok['value'].upper()
self.queue.append(tok)
def _emit_token(self, typ, extra=None, value=None):
self._emit(typ, extra=extra, value=value)
self.start = self.pos
def _error(self, typ):
self._emit_token('error', extra={
'type': typ,
'position': (self.line, self.col)
})
return None
def _ignore(self):
self.start = self.pos
def _backup(self):
self.pos -= 1
def _lexer_single_line_comment(self):
self._ignore() # Ignore ;
while True:
c = self._next()
if c is None:
return self._error('EOF')
if c in '\r\n':
self._backup()
self._emit_token('comment')
return self._lexer_asm
def _lexer_directive(self):
while True:
c = self._next()
if c is None:
return self._error('EOF')
if c.isspace():
self._backup()
self._emit_token('directive')
return self._lexer_asm
if not c.isalpha():
return self._error('Expecting alphabetic character')
def _lexer_number(self):
while True:
c = self._next()
if c is None:
return self._error('EOF')
if not c.isdigit():
self._backup()
self._emit_token('number')
return self._lexer_asm
def _lexer_multiline_comment(self):
self._ignore() # Ignore COMMENT
comment_char = None
while True:
c = self._next()
if c is None:
return self._error('EOF')
if c.isspace():
continue
comment_char = c
self._ignore()
break
while True:
c = self._next()
if c is None:
return self._error('EOF')
if c == comment_char:
self._backup()
self._emit_token('comment')
self.pos += 1
self._ignore()
return self._lexer_asm
def _lexer_token_until_end_of_line(self, typ, lex_comma=False):
self._ignore()
while True:
c = self._next()
if c is None or c in ';\r\n':
self._backup()
self._emit_token(typ, value=self._current_token().strip())
return self._lexer_token
if c == ',':
self._backup()
self._emit_token(typ, value=self._current_token().strip())
self.pos += 1
self._ignore()
def _lexer_title(self):
return self._lexer_token_until_end_of_line('title')
def _lexer_subtitle(self):
return self._lexer_token_until_end_of_line('subtitle')
def _lexer_extern(self):
return self._lexer_token_until_end_of_line('extern', lex_comma=True)
def _lexer_db(self):
return self._lexer_token_until_end_of_line('db', lex_comma=True)
def _lexer_dw(self):
return self._lexer_token_until_end_of_line('dw', lex_comma=True)
def _lexer_assign(self):
self._backup()
tok = self._current_token()
self.pos += 1
self._ignore()
while True:
c = self._next()
if c is None or c in ';\r\n':
self._backup()
self._emit_token('assign', value={
'token': tok,
'value': self._current_token().strip()
})
return None if c is None else self._lexer_asm
def _lexer_token(self):
while True:
c = self._next()
if c is None:
return self._error('EOF')
if c == '=':
return self._lexer_assign
if not c.isalnum() and not c in ':.[]?$_&':
self._backup()
curtoken = self._current_token().upper()
if curtoken == 'COMMENT':
return self._lexer_multiline_comment
elif curtoken == 'TITLE':
return self._lexer_title
elif curtoken == 'SUBTTL':
return self._lexer_subtitle
elif curtoken == 'EXTRN':
return self._lexer_extern
elif curtoken == 'DB':
return self._lexer_db
elif curtoken == 'DW':
return self._lexer_dw
self._emit_token('token')
return self._lexer_asm
def _lexer_string(self, end_char, token_type='string'):
while True:
c = self._next()
if c is None:
return self._error('EOF')
if c == end_char:
self._emit_token(token_type)
return self._lexer_asm
def _lexer_string_double(self):
return self._lexer_string('"')
def _lexer_string_single(self):
return self._lexer_string('\'')
def _lexer_angle(self):
return self._lexer_string('>', token_type='token')
def _lexer_asm(self):
while True:
c = self._next()
if c is None:
return None
if c == '\n':
self._emit_token('newline')
continue
if c.isspace():
self._ignore()
continue
if c == ';':
return self._lexer_single_line_comment
if c == '.':
return self._lexer_directive
if c == ',':
self._emit_token('comma')
continue
if c == '(':
self._emit_token('open_paren')
self.paren_count += 1
continue
if c == ')':
if self.paren_count == 0:
return self._error('Closing parenthesis without an opening parenthesis')
self._emit_token('close_paren')
self.paren_count -= 1
if self.paren_count == 0:
self._ignore()
return self._lexer_asm
if c.isdigit():
return self._lexer_number
if c.isalpha() or c == '?':
return self._lexer_token
if c == '\'':
return self._lexer_string_single
if c == '"':
return self._lexer_string_double
if c == '<':
return self._lexer_angle
| 27.634228 | 92 | 0.485124 |
3f67d83e86603dd449eb99e3bcaf488d842a6e6e | 1,278 | py | Python | app.py | Malvikabhalla99/codeFury | ac02d443d7dd89a3dccea4e1e992b5b14e5aa9be | [
"MIT"
] | 15 | 2019-10-01T13:55:13.000Z | 2019-10-23T02:09:44.000Z | app.py | Malvikabhalla99/codeFury | ac02d443d7dd89a3dccea4e1e992b5b14e5aa9be | [
"MIT"
] | 7 | 2019-09-30T18:07:42.000Z | 2020-09-30T09:40:10.000Z | app.py | Malvikabhalla99/codeFury | ac02d443d7dd89a3dccea4e1e992b5b14e5aa9be | [
"MIT"
] | 31 | 2019-09-30T18:33:16.000Z | 2020-02-27T11:11:49.000Z | from flask import Flask, render_template,request,url_for
from flask_bootstrap import Bootstrap
# NLP Packages
from textblob import TextBlob,Word
import random
import time
app = Flask(__name__)
Bootstrap(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyse',methods=['POST'])
def analyse():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
#NLP Stuff
blob = TextBlob(rawtext)
received_text2 = blob
blob_sentiment,blob_subjectivity = blob.sentiment.polarity ,blob.sentiment.subjectivity
number_of_tokens = len(list(blob.words))
# Extracting Main Points
nouns = list()
for word, tag in blob.tags:
if tag == 'NN':
nouns.append(word.lemmatize())
len_of_words = len(nouns)
rand_words = random.sample(nouns,len(nouns))
final_word = list()
for item in rand_words:
word = Word(item).pluralize()
final_word.append(word)
summary = final_word
return render_template('index.html',received_text = received_text2,number_of_tokens=number_of_tokens,blob_sentiment=blob_sentiment,blob_subjectivity=blob_subjectivity,summary=summary,final_time=final_time)
if __name__ == '__main__':
app.run(debug=True) | 29.72093 | 206 | 0.712833 |
803116b5736b5763604e463ae8eb4d27a64dea2a | 1,363 | py | Python | tests/test_middleware.py | yunojuno/django-nps | a7f904f85f17f1f7735193e0b9aeb1010ecf9feb | [
"MIT"
] | 3 | 2016-06-21T21:56:19.000Z | 2019-10-02T13:04:37.000Z | tests/test_middleware.py | yunojuno/django-nps | a7f904f85f17f1f7735193e0b9aeb1010ecf9feb | [
"MIT"
] | 5 | 2016-02-22T14:05:44.000Z | 2020-06-03T18:32:09.000Z | tests/test_middleware.py | yunojuno/django-nps | a7f904f85f17f1f7735193e0b9aeb1010ecf9feb | [
"MIT"
] | 4 | 2016-03-27T02:51:28.000Z | 2017-07-05T16:20:07.000Z | from unittest import mock
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.test import RequestFactory, TransactionTestCase
from net_promoter_score.middleware import NPSMiddleware
from net_promoter_score.models import UserScore
class MiddlewareTests(TransactionTestCase):
"""Test suite for middleware."""
def setUp(self):
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user("zoidberg")
self.score = UserScore(user=self.user, score=10).save()
self.test_response = mock.Mock(spec=HttpResponse)
self.middleware = NPSMiddleware(lambda r: self.test_response)
def test_process_request(self):
request = self.factory.get("/")
request.user = self.user
with mock.patch(
"net_promoter_score.settings.NPS_DISPLAY_FUNCTION", lambda r: True
):
resp = self.middleware(request)
self.assertEqual(resp, self.test_response)
self.assertTrue(request.show_nps)
# verify that unauthenticated users always return False
request.user = AnonymousUser()
resp = self.middleware(request)
self.assertEqual(resp, self.test_response)
self.assertFalse(request.show_nps)
| 36.837838 | 78 | 0.701394 |
b6f565947440c2d9aadaf344751445e21fa3a128 | 5,370 | py | Python | dreamwidget/boshclient_bar.py | dreamplatform/dream-widget | 2fc237bda8418613a73a835ed683a6057da58e47 | [
"BSD-3-Clause"
] | null | null | null | dreamwidget/boshclient_bar.py | dreamplatform/dream-widget | 2fc237bda8418613a73a835ed683a6057da58e47 | [
"BSD-3-Clause"
] | null | null | null | dreamwidget/boshclient_bar.py | dreamplatform/dream-widget | 2fc237bda8418613a73a835ed683a6057da58e47 | [
"BSD-3-Clause"
] | null | null | null | import sys, os
import httplib, urllib
import random, binascii
from urlparse import urlparse
from punjab.httpb import HttpbParse
from twisted.words.xish import domish
from twisted.words.protocols.jabber import jid
TLS_XMLNS = 'urn:ietf:params:xml:ns:xmpp-tls'
SASL_XMLNS = 'urn:ietf:params:xml:ns:xmpp-sasl'
BIND_XMLNS = 'urn:ietf:params:xml:ns:xmpp-bind'
SESSION_XMLNS = 'urn:ietf:params:xml:ns:xmpp-session'
def request_bosh_session(username, password, service):
c = BOSHClient('%s@msg.dreamschool.fi'%username, password, service)
c.startSessionAndAuth()
return {'jid': c.jabberid.full(), 'sid': c.sid, 'rid': c.rid}
class BOSHClient:
def __init__(self, jabberid, password, bosh_service):
self.rid = random.randint(0, 10000000)
self.jabberid = jid.internJID(jabberid)
self.password = password
self.authid = None
self.sid = None
self.logged_in = False
self.headers = {"Content-type": "text/xml",
"Accept": "text/xml"}
self.bosh_service = urlparse(bosh_service)
def buildBody(self, child=None):
"""Build a BOSH body.
"""
body = domish.Element(("http://jabber.org/protocol/httpbind", "body"))
body['content'] = 'text/xml; charset=utf-8'
self.rid = self.rid + 1
body['rid'] = str(self.rid)
body['sid'] = str(self.sid)
body['xml:lang'] = 'en'
if child is not None:
body.addChild(child)
return body
def sendBody(self, body):
"""Send the body.
"""
parser = HttpbParse(True)
print repr(self.bosh_service.netloc)
# start new session
conn = httplib.HTTPConnection(self.bosh_service.netloc)
conn.request("POST", self.bosh_service.path, body.toXml(), self.headers)
response = conn.getresponse()
data = ''
if response.status == 200:
data = response.read()
print 'resp', repr(response.status), repr(response.read())
conn.close()
return parser.parse(data)
def startSessionAndAuth(self, hold='1', wait='70'):
# Create a session
# create body
body = domish.Element(("http://jabber.org/protocol/httpbind", "body"))
body['content'] = 'text/xml; charset=utf-8'
body['hold'] = hold
body['rid'] = str(self.rid)
body['to'] = self.jabberid.host
body['wait'] = wait
body['window'] = '5'
body['xml:lang'] = 'en'
body['ver'] = '1.6'
print 'body', repr(body.toXml())
retb, elems = self.sendBody(body)
print 'retb', repr(retb)
print 'elems', repr(elems)
return
if type(retb) != str and retb.hasAttribute('authid') and \
retb.hasAttribute('sid'):
self.authid = retb['authid']
self.sid = retb['sid']
# go ahead and auth
auth = domish.Element((SASL_XMLNS, 'auth'))
auth['mechanism'] = 'PLAIN'
# TODO: add authzid
if auth['mechanism'] == 'PLAIN':
auth_str = ""
auth_str += "\000"
auth_str += self.jabberid.user.encode('utf-8')
auth_str += "\000"
try:
auth_str += self.password.encode('utf-8').strip()
except UnicodeDecodeError:
auth_str += self.password.decode('latin1') \
.encode('utf-8').strip()
auth.addContent(binascii.b2a_base64(auth_str))
retb, elems = self.sendBody(self.buildBody(auth))
if len(elems) == 0:
# poll for data
retb, elems = self.sendBody(self.buildBody())
if len(elems) > 0:
if elems[0].name == 'success':
retb, elems = self.sendBody(self.buildBody())
if elems[0].firstChildElement().name == 'bind':
iq = domish.Element(('jabber:client', 'iq'))
iq['type'] = 'set'
iq.addUniqueId()
iq.addElement('bind')
iq.bind['xmlns'] = BIND_XMLNS
if self.jabberid.resource:
iq.bind.addElement('resource')
iq.bind.resource.addContent(
self.jabberid.resource)
retb, elems = self.sendBody(self.buildBody(iq))
if type(retb) != str and retb.name == 'body':
# send session
iq = domish.Element(('jabber:client', 'iq'))
iq['type'] = 'set'
iq.addUniqueId()
iq.addElement('session')
iq.session['xmlns'] = SESSION_XMLNS
retb, elems = self.sendBody(self.buildBody(iq))
# did not bind, TODO - add a retry?
if type(retb) != str and retb.name == 'body':
self.logged_in = True
# bump up the rid, punjab already
# received self.rid
self.rid += 1
| 35.562914 | 80 | 0.506145 |
e150349a49398a7acf442f6a0b0eb59b785ae21c | 9,477 | py | Python | app/lib_master_python/ds_salesforce.py | docusign/sfdc-recipe-soap-auto-provisioning | 50621220e91360306e60be6d7753943c95feb57d | [
"MIT"
] | 2 | 2021-11-14T17:10:50.000Z | 2021-12-29T21:41:58.000Z | app/lib_master_python/ds_salesforce.py | docusign/sfdc-recipe-auto-provisioning | 50621220e91360306e60be6d7753943c95feb57d | [
"MIT"
] | 2 | 2021-03-22T17:15:31.000Z | 2021-12-13T19:41:55.000Z | app/lib_master_python/ds_salesforce.py | docusign/sfdc-recipe-auto-provisioning | 50621220e91360306e60be6d7753943c95feb57d | [
"MIT"
] | 5 | 2017-01-28T05:50:53.000Z | 2020-12-03T01:00:09.000Z | # DocuSign Salesforce
#
# Set encoding to utf8. See http://stackoverflow.com/a/21190382/64904
import sys; reload(sys); sys.setdefaultencoding('utf8')
import os, base64, json, requests, logging, random
from app.lib_master_python import ds_recipe_lib
from urlparse import urlparse
from string import Template
from simple_salesforce import (Salesforce,
SalesforceError,
SalesforceMoreThanOneRecord,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound,
SalesforceGeneralError,
SalesforceMalformedRequest)
# If using a local copy:
# sys.path.insert(1, os.path.join(sys.path[0], '../simple_salesforce'))
# from simple_salesforce import Salesforce
# Constants
html_email_template = 'app/templates/welcome_email.html'
########################################################################
########################################################################
# Notes
#
# See https://github.com/superfell/Beatbox for more info on beatbox
# (SFDC SOAP library) and procedures for when SFDC requies TLS 1.2
#
# For the user object, fields TimeZoneSidKey, LocaleSidKey, ProfileId,
# EmailEncodingKey, and LanguageLocaleKey are all required. But how
# to determine good default values? One was is to create a user,
# then see what those fields are set to for the existing user.
# Use the Python interpreter:
# from simple_salesforce import Salesforce
# from app.lib_master_python import ds_cache
# cache = ds_cache.get()
# sf = Salesforce(instance = 'samdev-dev-ed.my.salesforce.com', session_id='')
# sf = Salesforce(username = cache['sfdc_username'], password = cache['sfdc_pw'], security_token = cache['sfdc_security_token'])
# sf.User.get('00561000001veBp') # The existing User record ID
# Then use https://pythoniter.appspot.com/ to pretty print the resulting Python dict
########################################################################
########################################################################
def provision_community_member(cache, email, first_name, last_name, sfdc_account_url):
'''Create a contact and external user in the sfdc account
Returns err_msg => None or the problem info
'''
# Interpreter command to test this method
# from app.lib_master_python import ds_salesforce; from app.lib_master_python import ds_cache; cache = ds_cache.get()
# ds_salesforce.provision_community_member(cache, 'someone@mailinator.com', 'Sally', 'Ride', 'https://name-dev-ed.my.salesforce.com/0016100000cgXXX')
ds_recipe_lib.log('################## Starting SFDC provisioning')
# Check that the url is for an SFDC account
# See http://www.salesforceben.com/salesforce-url-consist/
url_parts = urlparse(sfdc_account_url)
path = url_parts.path # includes leading /
obj_prefix = path[1:4]
account_obj_prefix = '001'
account_id = path[1:]
# ParseResult(scheme='http', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html', params='', query='', fragment='')
if obj_prefix != account_obj_prefix:
return 'The SFDC url is not an Account reference'
sf = Salesforce(instance = url_parts.netloc, session_id='')
sf = Salesforce(username = cache['sfdc_username'], password = cache['sfdc_pw'], security_token = cache['sfdc_security_token'])
# Create the contact
try:
r = sf.Contact.create ({'AccountID': account_id, 'LastName': last_name, 'FirstName': first_name, 'Email': email})
except (
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound,
SalesforceGeneralError) as e:
ds_recipe_lib.log("##################")
ds_recipe_lib.log("##################")
ds_recipe_lib.log("################## ERROR creating contact for account {}".format(account_id))
ds_recipe_lib.log( " url: " + e.url)
ds_recipe_lib.log( " content: " + str(e.content)) # This is actually a JSON string. It could be decoded and used.
# Example: [{u'errorCode': u'LICENSE_LIMIT_EXCEEDED', u'fields': [], u'message': u'License Limit Exceeded'}, {u'errorCode': u'LICENSE_LIMIT_EXCEEDED', u'fields': [], u'message': u'license limit exceeded'}]
ds_recipe_lib.log( " status: " + str(e.status))
ds_recipe_lib.log( "resource_name: " + e.resource_name)
return 'Problem creating the contact: ' + str(e.content)
if r['success']:
ds_recipe_lib.log('Created contact for account {}!'.format(account_id))
contact_id = r['id']
else:
ds_recipe_lib.log('ERROR creating contact for account {} -- {}'.format(account_id, str(r['errors'])))
return 'Problem creating the contact: ' + str(r['errors'])
# Create the user. See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/sforce_api_objects_user.htm
username = email.split('@')[0] + '@' + cache['sfdc_user_name_domain']
### In production, you'd want to automatically handle a username clash with an existing user,
### and then try alternate usernames for the new user.
## To be done: if there's an error while creating the contact's user record then
## we should delete the contact? Or maybe leave it for manual processing?
try:
r = sf.User.create ({
'ContactId': contact_id,
'Email': email,
'LastName': last_name,
'FirstName': first_name,
'IsActive': True,
'ProfileId': cache['sfdc_profile_id'],
'Username': username,
'UserPreferencesLightningExperiencePreferred': True,
'Alias': first_name[0:0] + last_name[0:6], # Alias is required, max length 8.
# In production you'd want to watch for alias clashes and then append a digit if needed
'TimeZoneSidKey': cache['sfdc_time_zone_sid_key'],
'LocaleSidKey': cache['sfdc_locale_sid_key'],
'EmailEncodingKey': cache['sfdc_email_encoding_key'],
'LanguageLocaleKey': cache['sfdc_language_locale_key'],
'ProfileId': cache['sfdc_profile_id']
})
except (
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound,
SalesforceGeneralError) as e:
ds_recipe_lib.log("##################")
ds_recipe_lib.log("##################")
ds_recipe_lib.log("################## ERROR creating the user")
ds_recipe_lib.log( " url: " + e.url)
ds_recipe_lib.log( " content: " + str(e.content)) # This is actually a JSON string. It could be decoded and used.
# Example: [{u'errorCode': u'LICENSE_LIMIT_EXCEEDED', u'fields': [], u'message': u'License Limit Exceeded'}, {u'errorCode': u'LICENSE_LIMIT_EXCEEDED', u'fields': [], u'message': u'license limit exceeded'}]
ds_recipe_lib.log( " status: " + str(e.status))
ds_recipe_lib.log( "resource_name: " + e.resource_name)
return 'Problem creating the contact: ' + str(e.content)
if r['success']:
user_id = r['id']
ds_recipe_lib.log('Created user record! ID = ' + user_id)
else:
ds_recipe_lib.log('ERROR creating user -- {}'.format(str(r['errors'])))
return 'Problem creating the user: ' + str(r['errors'])
send_welcome_email(cache, email, first_name, last_name, username)
ds_recipe_lib.log('################## Completed SFDC provisioning')
return None
def send_welcome_email(cache, email, first_name, last_name, username):
'''Send the welcome email with instructions for setting password'''
to = first_name + " " + last_name + " <" + email + ">"
from_ = "New Partner Robot <mailgun@" + cache['mg_domain'] + ">"
subject = "Your World Wide Partner Portal account was created"
text_body = '''
Hello!
Thank you for joining the World Wide Corp Partner Program!
This email includes your partner portal login instructions.
We look forward to working with you. Please let me know if
you have any questions or comments.
Best regards,
Juliette Morris, VP, Partnering
juliette.morris@worldwidecorp.us
~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
Login to your World Wide Partner Portal Account
===>>> Your portal user name is {username} <<<===
Step 1. Set you password by filling in the "Reset Password" form: {reset_password_url}. Use your portal username {username2}
Step 2. Login to the portal: {portal_url}
'''.format(username = username, username2 = username, reset_password_url = cache['sfdc_forgot_password'],
portal_url = cache['sfdc_community_url'])
# read in the html version of the email
html_body = Template(open(html_email_template, 'r').read()).substitute(
username = username, reset_password_url = cache['sfdc_forgot_password'],
portal_url = cache['sfdc_community_url'])
requests.post(
'https://api.mailgun.net/v3/' + cache['mg_domain'] + '/messages',
auth=("api", cache['mg_api_key']),
data={"from": from_,
"to": [to],
"subject": subject,
"text": text_body,
"html": html_body})
# FIN
| 45.5625 | 221 | 0.630685 |
815601f799cb4e0c512db183200d6102c4717299 | 3,396 | py | Python | hisa/learn/sentiment/sentiment.py | rittikaadhikari/stock-recommendation | 1f14276a955301b1c6fa1c00bd88b00cf5668d8c | [
"MIT"
] | null | null | null | hisa/learn/sentiment/sentiment.py | rittikaadhikari/stock-recommendation | 1f14276a955301b1c6fa1c00bd88b00cf5668d8c | [
"MIT"
] | null | null | null | hisa/learn/sentiment/sentiment.py | rittikaadhikari/stock-recommendation | 1f14276a955301b1c6fa1c00bd88b00cf5668d8c | [
"MIT"
] | null | null | null | import re
import json
import datetime
from datetime import datetime
from datetime import timedelta
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import argparse
import os
import csv
class ProcessTweets(object):
def __init__(self, filename, outname):
self.filename = filename
self.outname = outname
json_file = open(filename)
json_str = json_file.read()
self.json = json.loads(json_str)
self.sid = SentimentIntensityAnalyzer()
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_sentiment(self, tweet):
polarity_scores = self.sid.polarity_scores(tweet)
return polarity_scores['neg'], polarity_scores['pos'], polarity_scores['neu']
def get_tweets(self):
df = pd.DataFrame.from_dict(self.json)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.sort_values(by=['timestamp'], inplace=True, ascending=True)
df.reset_index(inplace=True)
self.json = df.to_dict()
timestamps = self.json['timestamp']
start_date = pd.to_datetime(timestamps[0])
end_date = start_date + timedelta(hours=1)
sentiments = dict()
temp = []
tweets = self.json['text']
for count, tweet in enumerate(tweets, start=0):
tweet = tweets[tweet]
curr_time = timestamps[count]
if isinstance(tweet, int):
print(tweet)
if curr_time >= start_date and curr_time < end_date:
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
else:
means = np.mean(np.asarray(temp), axis=0)
obj = {'neg': means[0], 'pos': means[1], 'neu': means[2]}
sentiments[start_date.strftime("%Y-%m-%d %H:%M:%S")] = obj
temp = []
start_date = end_date
end_date = start_date + timedelta(hours=1)
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
tmp_df = pd.DataFrame.from_dict(sentiments)
neg = tmp_df.loc['neg', :]
pos = tmp_df.loc['pos', :]
neu = tmp_df.loc['neu', :]
df = pd.DataFrame()
df['neg'] = neg
df['pos'] = pos
df['neu'] = neu
df = df.set_index(pd.to_datetime(tmp_df.columns.values))
df.index.name = 'date'
df.to_csv(self.outname, sep=',')
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--input_dir', required=True, help='directory containing json files from twitterscraper')
ap.add_argument('--output_dir', required=True, help='directory of resulting sentiment csv files')
args = ap.parse_args()
if(not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
files = []
for dirpath, dirnames, filenames in os.walk(args.input_dir):
for f in filenames:
if f.split('.')[-1] == 'json':
files.append((f.split('.')[0], os.path.join(dirpath, f)))
for f in files:
ProcessTweets(f[1], os.path.join(args.output_dir, f[0] + '.csv')).get_tweets()
if __name__ == "__main__":
main()
| 33.96 | 109 | 0.594817 |
89bda000909dd4b56f5c4f42f9e99543eef4dc96 | 17,864 | py | Python | synapse/app/_base.py | rhetenor/synapse | 5154afc00d841c7685a97700be3cd1398e633e05 | [
"Apache-2.0"
] | null | null | null | synapse/app/_base.py | rhetenor/synapse | 5154afc00d841c7685a97700be3cd1398e633e05 | [
"Apache-2.0"
] | null | null | null | synapse/app/_base.py | rhetenor/synapse | 5154afc00d841c7685a97700be3cd1398e633e05 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 New Vector Ltd
# Copyright 2019-2021 The Matrix.org Foundation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import gc
import logging
import os
import platform
import signal
import socket
import sys
import traceback
import warnings
from typing import TYPE_CHECKING, Awaitable, Callable, Iterable
from cryptography.utils import CryptographyDeprecationWarning
from typing_extensions import NoReturn
import twisted
from twisted.internet import defer, error, reactor
from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory
import synapse
from synapse.api.constants import MAX_PDU_SIZE
from synapse.app import check_bind_error
from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.logging.context import PreserveLoggingContext
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
from synapse.util.daemonize import daemonize_process
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# list of tuples of function, args list, kwargs dict
_sighup_callbacks = []
def register_sighup(func, *args, **kwargs):
"""
Register a function to be called when a SIGHUP occurs.
Args:
func (function): Function to be called when sent a SIGHUP signal.
*args, **kwargs: args and kwargs to be passed to the target function.
"""
_sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(appname, config, run_command=reactor.run):
"""Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
the reactor. Pulls configuration from the 'worker' settings in 'config'.
Args:
appname (str): application name which will be sent to syslog
config (synapse.config.Config): config object
run_command (Callable[]): callable that actually runs the reactor
"""
logger = logging.getLogger(config.worker_app)
start_reactor(
appname,
soft_file_limit=config.soft_file_limit,
gc_thresholds=config.gc_thresholds,
pid_file=config.worker_pid_file,
daemonize=config.worker_daemonize,
print_pidfile=config.print_pidfile,
logger=logger,
run_command=run_command,
)
def start_reactor(
appname,
soft_file_limit,
gc_thresholds,
pid_file,
daemonize,
print_pidfile,
logger,
run_command=reactor.run,
):
"""Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
the reactor
Args:
appname (str): application name which will be sent to syslog
soft_file_limit (int):
gc_thresholds:
pid_file (str): name of pid file to write to if daemonize is True
daemonize (bool): true to run the reactor in a background process
print_pidfile (bool): whether to print the pid file, if daemonize is True
logger (logging.Logger): logger instance to pass to Daemonize
run_command (Callable[]): callable that actually runs the reactor
"""
def run():
logger.info("Running")
setup_jemalloc_stats()
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
run_command()
# make sure that we run the reactor with the sentinel log context,
# otherwise other PreserveLoggingContext instances will get confused
# and complain when they see the logcontext arbitrarily swapping
# between the sentinel and `run` logcontexts.
#
# We also need to drop the logcontext before forking if we're daemonizing,
# otherwise the cputime metrics get confused about the per-thread resource usage
# appearing to go backwards.
with PreserveLoggingContext():
if daemonize:
if print_pidfile:
print(pid_file)
daemonize_process(pid_file, logger)
run()
def quit_with_error(error_string: str) -> NoReturn:
message_lines = error_string.split("\n")
line_length = min(max(len(line) for line in message_lines), 80) + 2
sys.stderr.write("*" * line_length + "\n")
for line in message_lines:
sys.stderr.write(" %s\n" % (line.rstrip(),))
sys.stderr.write("*" * line_length + "\n")
sys.exit(1)
def handle_startup_exception(e: Exception) -> NoReturn:
# Exceptions that occur between setting up the logging and forking or starting
# the reactor are written to the logs, followed by a summary to stderr.
logger.exception("Exception during startup")
quit_with_error(
f"Error during initialisation:\n {e}\nThere may be more information in the logs."
)
def redirect_stdio_to_logs() -> None:
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
for (stream, level) in streams:
oldStream = getattr(sys, stream)
loggingFile = LoggingFile(
logger=twisted.logger.Logger(namespace=stream),
level=level,
encoding=getattr(oldStream, "encoding", None),
)
setattr(sys, stream, loggingFile)
print("Redirected stdout/stderr to logs")
def register_start(cb: Callable[..., Awaitable], *args, **kwargs) -> None:
"""Register a callback with the reactor, to be called once it is running
This can be used to initialise parts of the system which require an asynchronous
setup.
Any exception raised by the callback will be printed and logged, and the process
will exit.
"""
async def wrapper():
try:
await cb(*args, **kwargs)
except Exception:
# previously, we used Failure().printTraceback() here, in the hope that
# would give better tracebacks than traceback.print_exc(). However, that
# doesn't handle chained exceptions (with a __cause__ or __context__) well,
# and I *think* the need for Failure() is reduced now that we mostly use
# async/await.
# Write the exception to both the logs *and* the unredirected stderr,
# because people tend to get confused if it only goes to one or the other.
#
# One problem with this is that if people are using a logging config that
# logs to the console (as is common eg under docker), they will get two
# copies of the exception. We could maybe try to detect that, but it's
# probably a cost we can bear.
logger.fatal("Error during startup", exc_info=True)
print("Error during startup:", file=sys.__stderr__)
traceback.print_exc(file=sys.__stderr__)
# it's no use calling sys.exit here, since that just raises a SystemExit
# exception which is then caught by the reactor, and everything carries
# on as normal.
os._exit(1)
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
def listen_metrics(bind_addresses, port):
"""
Start Prometheus metrics server.
"""
from synapse.metrics import RegistryProxy, start_http_server
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
start_http_server(port, addr=host, registry=RegistryProxy)
def listen_manhole(
bind_addresses: Iterable[str],
port: int,
manhole_settings: ManholeConfig,
manhole_globals: dict,
):
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
# suppress the warning for now.
warnings.filterwarnings(
action="ignore",
category=CryptographyDeprecationWarning,
message="int_from_bytes is deprecated",
)
from synapse.util.manhole import manhole
listen_tcp(
bind_addresses,
port,
manhole(settings=manhole_settings, globals=manhole_globals),
)
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
"""
Create a TCP socket for a port and several addresses
Returns:
list[twisted.internet.tcp.Port]: listening for TCP connections
"""
r = []
for address in bind_addresses:
try:
r.append(reactor.listenTCP(port, factory, backlog, address))
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
return r
def listen_ssl(
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
):
"""
Create an TLS-over-TCP socket for a port and several addresses
Returns:
list of twisted.internet.tcp.Port listening for TLS connections
"""
r = []
for address in bind_addresses:
try:
r.append(
reactor.listenSSL(port, factory, context_factory, backlog, address)
)
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
return r
def refresh_certificate(hs):
"""
Refresh the TLS certificates that Synapse is using by re-reading them from
disk and updating the TLS context factories to use them.
"""
if not hs.config.has_tls_listener():
return
hs.config.read_certificate_from_disk()
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
if hs._listening_services:
logger.info("Updating context factories...")
for i in hs._listening_services:
# When you listenSSL, it doesn't make an SSL port but a TCP one with
# a TLS wrapping factory around the factory you actually want to get
# requests. This factory attribute is public but missing from
# Twisted's documentation.
if isinstance(i.factory, TLSMemoryBIOFactory):
addr = i.getHost()
logger.info(
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port
)
# We want to replace TLS factories with a new one, with the new
# TLS configuration. We do this by reaching in and pulling out
# the wrappedFactory, and then re-wrapping it.
i.factory = TLSMemoryBIOFactory(
hs.tls_server_context_factory, False, i.factory.wrappedFactory
)
logger.info("Context factories updated.")
async def start(hs: "HomeServer"):
"""
Start a Synapse server or worker.
Should be called once the reactor is running.
Will start the main HTTP listeners and do some other startup tasks, and then
notify systemd.
Args:
hs: homeserver instance
"""
# Set up the SIGHUP machinery.
if hasattr(signal, "SIGHUP"):
reactor = hs.get_reactor()
@wrap_as_background_process("sighup")
def handle_sighup(*args, **kwargs):
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
sdnotify(b"RELOADING=1")
for i, args, kwargs in _sighup_callbacks:
i(*args, **kwargs)
sdnotify(b"READY=1")
# We defer running the sighup handlers until next reactor tick. This
# is so that we're in a sane state, e.g. flushing the logs may fail
# if the sighup happens in the middle of writing a log entry.
def run_sighup(*args, **kwargs):
# `callFromThread` should be "signal safe" as well as thread
# safe.
reactor.callFromThread(handle_sighup, *args, **kwargs)
signal.signal(signal.SIGHUP, run_sighup)
register_sighup(refresh_certificate, hs)
# Load the certificate from disk.
refresh_certificate(hs)
# Start the tracer
synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa
# Instantiate the modules so they can register their web resources to the module API
# before we start the listeners.
module_api = hs.get_module_api()
for module, config in hs.config.modules.loaded_modules:
module(config=config, api=module_api)
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs)
# If we've configured an expiry time for caches, start the background job now.
setup_expire_lru_cache_entries(hs)
# It is now safe to start your Synapse.
hs.start_listening()
hs.get_datastore().db_pool.start_profiling()
hs.get_pusherpool().start()
# Log when we start the shut down process.
hs.get_reactor().addSystemEventTrigger(
"before", "shutdown", logger.info, "Shutting down..."
)
setup_sentry(hs)
setup_sdnotify(hs)
# If background tasks are running on the main process, start collecting the
# phone home stats.
if hs.config.run_background_tasks:
start_phone_stats_home(hs)
# We now freeze all allocated objects in the hopes that (almost)
# everything currently allocated are things that will be used for the
# rest of time. Doing so means less work each GC (hopefully).
#
# This only works on Python 3.7
if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
gc.collect()
gc.freeze()
# Speed up shutdowns by freezing all allocated objects. This moves everything
# into the permanent generation and excludes them from the final GC.
# Unfortunately only works on Python 3.7
if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
atexit.register(gc.freeze)
def setup_sentry(hs):
"""Enable sentry integration, if enabled in configuration
Args:
hs (synapse.server.HomeServer)
"""
if not hs.config.sentry_enabled:
return
import sentry_sdk
sentry_sdk.init(dsn=hs.config.sentry_dsn, release=get_version_string(synapse))
# We set some default tags that give some context to this instance
with sentry_sdk.configure_scope() as scope:
scope.set_tag("matrix_server_name", hs.config.server_name)
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
name = hs.get_instance_name()
scope.set_tag("worker_app", app)
scope.set_tag("worker_name", name)
def setup_sdnotify(hs):
"""Adds process state hooks to tell systemd what we are up to."""
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
sdnotify(b"READY=1\nMAINPID=%i" % (os.getpid(),))
hs.get_reactor().addSystemEventTrigger(
"before", "shutdown", sdnotify, b"STOPPING=1"
)
sdnotify_sockaddr = os.getenv("NOTIFY_SOCKET")
def sdnotify(state):
"""
Send a notification to systemd, if the NOTIFY_SOCKET env var is set.
This function is based on the sdnotify python package, but since it's only a few
lines of code, it's easier to duplicate it here than to add a dependency on a
package which many OSes don't include as a matter of principle.
Args:
state (bytes): notification to send
"""
if not isinstance(state, bytes):
raise TypeError("sdnotify should be called with a bytes")
if not sdnotify_sockaddr:
return
addr = sdnotify_sockaddr
if addr[0] == "@":
addr = "\0" + addr[1:]
try:
with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:
sock.connect(addr)
sock.sendall(state)
except Exception as e:
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
# unless systemd is expecting us to notify it.
logger.warning("Unable to send notification to systemd: %s", e)
def max_request_body_size(config: HomeServerConfig) -> int:
"""Get a suitable maximum size for incoming HTTP requests"""
# Other than media uploads, the biggest request we expect to see is a fully-loaded
# /federation/v1/send request.
#
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
# json encoding); there is no specced limit to EDUs (see
# https://github.com/matrix-org/matrix-doc/issues/3121).
#
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
#
max_request_size = 200 * MAX_PDU_SIZE
# if we have a media repo enabled, we may need to allow larger uploads than that
if config.media.can_load_media_repo:
max_request_size = max(max_request_size, config.media.max_upload_size)
return max_request_size
| 35.165354 | 91 | 0.687248 |
82aa893b089e393da9961ad861c5639845615db4 | 50 | py | Python | python/perspective/perspective/core/_version.py | admariner/perspective | 15e1788cecfc14a515c3e5bafd4ed88e1001195e | [
"Apache-2.0"
] | 1 | 2022-01-09T12:14:25.000Z | 2022-01-09T12:14:25.000Z | python/perspective/perspective/core/_version.py | wenner/perspective | 15e1788cecfc14a515c3e5bafd4ed88e1001195e | [
"Apache-2.0"
] | 36 | 2021-10-12T08:32:00.000Z | 2021-12-23T23:59:26.000Z | python/perspective/perspective/core/_version.py | admariner/perspective | 15e1788cecfc14a515c3e5bafd4ed88e1001195e | [
"Apache-2.0"
] | null | null | null | __version__ = "1.1.0"
major_minor_version = "1.1"
| 16.666667 | 27 | 0.7 |
24488a670b076d68ef8ccfc032a8e0cdb1309009 | 10,778 | py | Python | onetask/__init__.py | onetask-ai/onetask-python | ea810a3092a029d5b30f6af9e9a5f17567e0b901 | [
"MIT"
] | 7 | 2021-06-06T10:57:08.000Z | 2022-02-04T15:11:55.000Z | onetask/__init__.py | onetask-ai/onetask-python | ea810a3092a029d5b30f6af9e9a5f17567e0b901 | [
"MIT"
] | 3 | 2021-07-15T11:57:57.000Z | 2022-01-17T17:27:55.000Z | onetask/__init__.py | onetask-ai/onetask-python | ea810a3092a029d5b30f6af9e9a5f17567e0b901 | [
"MIT"
] | 1 | 2021-06-06T10:57:16.000Z | 2021-06-06T10:57:16.000Z | # -*- coding: utf-8 -*-
from typing import Callable, Dict, List, Optional, Union
from wasabi import msg
import pandas as pd
import numpy as np
from tqdm import tqdm
import json
from onetask import api_calls, settings, util, auto_lf, embedding
import numpy as np
from bertopic import BERTopic
from collections import defaultdict
class Client:
"""
Python Client for the onetask API. If you have any questions, please contact our support.
Args:
user_name (str): The email with which you've been registered at onetask
password (str): Your password for onetask
project_id (str): The unique identifier for a project, can be found in the url after projects/
stage (str): The onetask system staging environment [beta, test, dev, local]
"""
def __init__(
self, user_name: str, password: str, project_id: str, stage: str = "beta"
):
settings.set_stage(stage)
self.session_token = api_calls.create_session_token(
user_name=user_name, password=password
)
self.project_id = project_id
if self.session_token is not None:
msg.good("Logged in to system.")
if not api_calls.GetProjectExists(project_id, self.session_token).exists:
msg.fail(f"Project with ID {self.project_id} does not exist.")
else:
msg.fail("Could not log in. Please check your username and password.")
def _get_unique_attributes(self) -> List[Dict[str, Union[str, bool]]]:
"""
Get the record schema for your project shown in the web app under 'Settings'
Returns:
List[Dict[str, Union[str, bool]]]: each record schema element
"""
attributes = api_calls.GetUniqueAttributes(
self.project_id, self.session_token
).attributes
return attributes
def register_lf(self, lf: Callable, autoexecute: bool = True) -> None:
"""
Send a local labeling function to the onetask application. Please make sure that the function fits the desired structure (for more information, please visit onetask.readme.io/reference)
Args:
lf (Callable): The function object you want to send to the system
autoexecute (bool, optional): If true, the function is automatically executed when entered in the system. Defaults to True.
"""
project_id, name, source_code, docs = util.unpack_python_function(
lf, self.project_id
)
if api_calls.PostLabelingFunction(
project_id, name, source_code, docs, autoexecute, self.session_token
).already_exists:
msg.warn(
f"Labeling function '{name}' already exists. It has not been entered again!"
)
else:
msg.good(f"Registered labeling function '{name}'.")
def get_records(self, keep_unlabeled=False, keep_programmatic=False):
"""
Get the records of your project.
Args:
keep_unlabeled (bool, optional): If true, you will receive all records, even if they are not labeled yet. Defaults to False.
keep_programmatic (bool, optional): if true, you will receive also the programmatically labeled records. Defaults to False.
Returns:
[type]: [description]
"""
records = api_calls.GetRecords(
self.project_id,
self.session_token,
keep_unlabeled=keep_unlabeled,
keep_programmatic=keep_programmatic,
).records
fetched_df = pd.DataFrame(records)
if len(fetched_df) > 0:
df = fetched_df["data"].apply(pd.Series)
df["label"] = fetched_df["label"]
df["is_programmatic"] = fetched_df["is_programmatic"]
return df
else:
msg.warn("Empty result")
return fetched_df # empty df
def get_embeddings(self, config_string: str) -> pd.DataFrame:
"""
Get the embeddings of your project of a configuration string
Args:
config_string (str): The name of your embedding
Returns:
pd.DataFrame: containing the record attributes and the embedding vectors
"""
embeddings = api_calls.GetEmbeddings(
self.project_id, self.session_token, config_string
).embeddings
fetched_embeddings = pd.DataFrame(embeddings)
if len(fetched_embeddings) > 0:
df = fetched_embeddings["data"].apply(pd.Series)
df[config_string] = fetched_embeddings["embedding"]
return df
else:
msg.warn("Empty result")
return fetched_embeddings
def generate_embeddings(
self, attribute_configs_dict: Dict[str, str], file_path: Optional[str] = None
) -> None:
"""
---EXPERIMENTAL---
Create new embeddings to upload into your project.
Args:
attribute_configs_dict (Dict[str, str]): describe which attribute should be embedded using which technique or model.
file_path (Optional[str], optional): path where the embeddings should be stored to. Defaults to 'embeddings_{project_id}.json'.
"""
if not file_path:
file_path = f"embeddings_{self.project_id}.json"
msg.info("Loading schema")
attributes = self._get_unique_attributes()
unique_attribute = None
for attr in attributes:
if attr["unique"]:
unique_attribute = attr["name"]
embedding_name = "-".join(list(attribute_configs_dict.values()))
if unique_attribute:
msg.info("Loading records")
records = self.get_records(keep_programmatic=False, keep_unlabeled=True)
embedding_concat = defaultdict(list)
for attribute, config_string in attribute_configs_dict.items():
vals = np.stack(records[attribute])
records_subset = records[[unique_attribute, attribute]].to_dict(
orient="records"
)
msg.info(f"Loading embedding model {config_string} for {attribute}")
model = embedding.get_fitted_model_by_config_string(config_string, vals)
if model:
msg.info("Starting embedding procedure")
for _, row in tqdm(
enumerate(records_subset),
total=len(records_subset),
desc="Embedding records...",
):
embedding_concat[row[unique_attribute]].extend(
model.encode(row[attribute]).tolist()
)
msg.good(f"Finished embedding procedure. Storing to {file_path}")
export = []
for unique_val, embedding_vector in embedding_concat.items():
export.append(
{unique_attribute: unique_val, embedding_name: embedding_vector}
)
with open(file_path, "w") as file:
json.dump(export, file)
else:
msg.fail(
"Currently, you must have exactly one unique attribute for embedding generation. Please validate this in the web app under 'Settings'"
)
def model_topics(self, attribute: str, config_string: str) -> BERTopic:
"""
---EXPERIMENTAL---
Apply a BERTopic to your data to do topic modelling. Further docs: https://maartengr.github.io/BERTopic/tutorial/visualization/visualization.html
Args:
attribute (str): the name of the string attribute you want to model
config_string (str): name of the embedding vector in the web application that you want to make use of. This MUST be a BERT-related embedding to work properly.
Returns:
BERTopic: BERTopic object that can be called for topic modelling
"""
msg.info("Loading embeddings")
embeddings_df = self.get_embeddings(config_string)
if len(embeddings_df) > 0:
docs = embeddings_df[attribute].tolist()
embeddings = np.stack(embeddings_df[config_string])
msg.info("Fitting Topic Model")
model = BERTopic(verbose=True, n_gram_range=[1, 2], top_n_words=30)
model.fit(docs, embeddings)
msg.good("Finished training")
msg.info(
"Further docs: https://maartengr.github.io/BERTopic/tutorial/visualization/visualization.html"
)
return model
def generate_regex_labeling_functions(
self,
nlp,
attribute: str,
min_precision: Optional[float] = 0.8,
filter_stopwords: Optional[bool] = False,
) -> pd.DataFrame:
"""
---EXPERIMENTAL---
Autogenerate labeling functions containing regular expressions to model your data. Uses spacy to model the linguistics of your data.
Args:
nlp (spacy.lang): nlp object of spacy for the specific language (e.g. en_core_web_sm)
attribute (str): the name of the attribute that should be analyzed for regular expressions
min_precision (Optional[float], optional): needed precision to generate a labeling function. Defaults to 0.8.
filter_stopwords (Optional[bool], optional): if set to true, stop words like 'this', 'that' etc. will be removed. Defaults to False.
Returns:
pd.DataFrame: [description]
"""
records = self.get_records()
if len(records) > 0:
candidates = auto_lf.derive_regex_candidates(
nlp, records, attribute, filter_stopwords
)
return auto_lf.create_regex_fns(
records, candidates, attribute, min_precision
)
else:
msg.fail("No manually labeled records available!")
def display_generated_labeling_functions(
self, lf_df: pd.DataFrame, label: Optional[str] = None
):
"""
Helper function to display the autogenerated labeling functions
Args:
lf_df (pd.DataFrame): outcome of client.generate_regex_labeling_functions
label (Optional[str], optional): filter option to only show one label. Defaults to None.
"""
if label is not None:
lf_df = lf_df.loc[lf_df["label"] == label]
for _, row in lf_df.iterrows():
est_coverage = row["est_coverage"]
est_precision = row["est_precision"]
code = row["code"]
msg.info(
f"Coverage: {est_coverage * 100}% | Precision: {est_precision * 100}%"
)
print(code)
print()
| 40.825758 | 193 | 0.613472 |
2dc6b5313cc6ab70a69cfd085b4a2aeae9444208 | 92,889 | py | Python | tests/infer/test_valid_models.py | sjfleming/pyro | c8dc40a75cc4ff1f43c6ff9178d91c08155d7973 | [
"Apache-2.0"
] | 1 | 2021-09-30T05:39:23.000Z | 2021-09-30T05:39:23.000Z | tests/infer/test_valid_models.py | liyunlong10/pyro | eadca9c9ed9654573037acdf4f48b34ea40037fe | [
"Apache-2.0"
] | null | null | null | tests/infer/test_valid_models.py | liyunlong10/pyro | eadca9c9ed9654573037acdf4f48b34ea40037fe | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import warnings
from collections import defaultdict
import pytest
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions.testing import fakes
from pyro.infer import (
SVI,
EnergyDistance,
Trace_ELBO,
TraceEnum_ELBO,
TraceGraph_ELBO,
TraceMeanField_ELBO,
TraceTailAdaptive_ELBO,
config_enumerate,
)
from pyro.infer.reparam import LatentStableReparam
from pyro.infer.tracetmc_elbo import TraceTMC_ELBO
from pyro.infer.util import torch_item
from pyro.ops.indexing import Vindex
from pyro.optim import Adam
from pyro.poutine.plate_messenger import block_plate
from tests.common import assert_close
logger = logging.getLogger(__name__)
# This file tests a variety of model,guide pairs with valid and invalid structure.
def EnergyDistance_prior(**kwargs):
kwargs["prior_scale"] = 0.0
kwargs.pop("strict_enumeration_warning", None)
return EnergyDistance(**kwargs)
def EnergyDistance_noprior(**kwargs):
kwargs["prior_scale"] = 1.0
kwargs.pop("strict_enumeration_warning", None)
return EnergyDistance(**kwargs)
def assert_ok(model, guide, elbo, **kwargs):
"""
Assert that inference works without warnings or errors.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
inference.step(**kwargs)
try:
pyro.set_rng_seed(0)
loss = elbo.loss(model, guide, **kwargs)
if hasattr(elbo, "differentiable_loss"):
try:
pyro.set_rng_seed(0)
differentiable_loss = torch_item(
elbo.differentiable_loss(model, guide, **kwargs)
)
except ValueError:
pass # Ignore cases where elbo cannot be differentiated
else:
assert_close(differentiable_loss, loss, atol=0.01)
if hasattr(elbo, "loss_and_grads"):
pyro.set_rng_seed(0)
loss_and_grads = elbo.loss_and_grads(model, guide, **kwargs)
assert_close(loss_and_grads, loss, atol=0.01)
except NotImplementedError:
pass # Ignore cases where loss isn't implemented, eg. TraceTailAdaptive_ELBO
def assert_error(model, guide, elbo, match=None):
"""
Assert that inference fails with an error.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
with pytest.raises(
(NotImplementedError, UserWarning, KeyError, ValueError, RuntimeError),
match=match,
):
inference.step()
def assert_warning(model, guide, elbo):
"""
Assert that inference works but with a warning.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inference.step()
assert len(w), "No warnings were raised"
for warning in w:
logger.info(warning)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
@pytest.mark.parametrize("strict_enumeration_warning", [True, False])
def test_nonempty_model_empty_guide_ok(Elbo, strict_enumeration_warning):
def model():
loc = torch.tensor([0.0, 0.0])
scale = torch.tensor([1.0, 1.0])
pyro.sample("x", dist.Normal(loc, scale).to_event(1), obs=loc)
def guide():
pass
elbo = Elbo(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning and Elbo in (TraceEnum_ELBO, TraceTMC_ELBO):
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
@pytest.mark.parametrize("strict_enumeration_warning", [True, False])
def test_nonempty_model_empty_guide_error(Elbo, strict_enumeration_warning):
def model():
pyro.sample("x", dist.Normal(0, 1))
def guide():
pass
elbo = Elbo(strict_enumeration_warning=strict_enumeration_warning)
assert_error(model, guide, elbo)
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
@pytest.mark.parametrize("strict_enumeration_warning", [True, False])
def test_empty_model_empty_guide_ok(Elbo, strict_enumeration_warning):
def model():
pass
def guide():
pass
elbo = Elbo(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning and Elbo in (TraceEnum_ELBO, TraceTMC_ELBO):
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_variable_clash_in_model_error(Elbo):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("x", dist.Bernoulli(p)) # Should error here.
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
assert_error(model, guide, Elbo(), match="Multiple sample sites named")
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_model_guide_dim_mismatch_error(Elbo):
def model():
loc = torch.zeros(2)
scale = torch.ones(2)
pyro.sample("x", dist.Normal(loc, scale).to_event(1))
def guide():
loc = pyro.param("loc", torch.zeros(2, 1, requires_grad=True))
scale = pyro.param("scale", torch.ones(2, 1, requires_grad=True))
pyro.sample("x", dist.Normal(loc, scale).to_event(2))
assert_error(
model,
guide,
Elbo(strict_enumeration_warning=False),
match="invalid log_prob shape|Model and guide event_dims disagree",
)
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_model_guide_shape_mismatch_error(Elbo):
def model():
loc = torch.zeros(1, 2)
scale = torch.ones(1, 2)
pyro.sample("x", dist.Normal(loc, scale).to_event(2))
def guide():
loc = pyro.param("loc", torch.zeros(2, 1, requires_grad=True))
scale = pyro.param("scale", torch.ones(2, 1, requires_grad=True))
pyro.sample("x", dist.Normal(loc, scale).to_event(2))
assert_error(
model,
guide,
Elbo(strict_enumeration_warning=False),
match="Model and guide shapes disagree",
)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_variable_clash_in_guide_error(Elbo):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("x", dist.Bernoulli(p)) # Should error here.
assert_error(model, guide, Elbo(), match="Multiple sample sites named")
@pytest.mark.parametrize("has_rsample", [False, True])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_set_has_rsample_ok(has_rsample, Elbo):
# This model has sparse gradients, so users may want to disable
# reparametrized sampling to reduce variance of gradient estimates.
# However both versions should be correct, i.e. with or without has_rsample.
def model():
z = pyro.sample("z", dist.Normal(0, 1))
loc = (z * 100).clamp(min=0, max=1) # sparse gradients
pyro.sample("x", dist.Normal(loc, 1), obs=torch.tensor(0.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
pyro.sample("z", dist.Normal(loc, 1).has_rsample_(has_rsample))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo(strict_enumeration_warning=False))
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_not_has_rsample_ok(Elbo):
def model():
z = pyro.sample("z", dist.Normal(0, 1))
p = z.round().clamp(min=0.2, max=0.8) # discontinuous
pyro.sample("x", dist.Bernoulli(p), obs=torch.tensor(0.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
pyro.sample("z", dist.Normal(loc, 1).has_rsample_(False))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo(strict_enumeration_warning=False))
@pytest.mark.parametrize("subsample_size", [None, 2], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_iplate_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 4, subsample_size):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 4, subsample_size):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_iplate_variable_clash_error(Elbo):
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 2):
# Each loop iteration should give the sample site a different name.
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 2):
# Each loop iteration should give the sample site a different name.
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_error(model, guide, Elbo(), match="Multiple sample sites named")
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_plate_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 10, subsample_size) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_plate_subsample_param_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size):
pyro.sample("x", dist.Bernoulli(p))
def guide():
with pyro.plate("plate", 10, subsample_size) as ind:
p0 = pyro.param("p0", torch.tensor(0.0), event_dim=0)
assert p0.shape == ()
p = pyro.param("p", 0.5 * torch.ones(10), event_dim=0)
assert len(p) == len(ind)
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_plate_subsample_primitive_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size):
pyro.sample("x", dist.Bernoulli(p))
def guide():
with pyro.plate("plate", 10, subsample_size) as ind:
p0 = torch.tensor(0.0)
p0 = pyro.subsample(p0, event_dim=0)
assert p0.shape == ()
p = 0.5 * torch.ones(10)
p = pyro.subsample(p, event_dim=0)
assert len(p) == len(ind)
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
@pytest.mark.parametrize(
"shape,ok",
[
((), True),
((1,), True),
((10,), True),
((3, 1), True),
((3, 10), True),
((5), False),
((3, 5), False),
],
)
def test_plate_param_size_mismatch_error(subsample_size, Elbo, shape, ok):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size):
pyro.sample("x", dist.Bernoulli(p))
def guide():
with pyro.plate("plate", 10, subsample_size):
pyro.param("p0", torch.ones(shape), event_dim=0)
p = pyro.param("p", torch.ones(10), event_dim=0)
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
if ok:
assert_ok(model, guide, Elbo())
else:
assert_error(model, guide, Elbo(), match="invalid shape of pyro.param")
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_plate_no_size_ok(Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate"):
pyro.sample("x", dist.Bernoulli(p).expand_by([10]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate"):
pyro.sample("x", dist.Bernoulli(p).expand_by([10]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, default="parallel", num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("max_plate_nesting", [0, float("inf")])
@pytest.mark.parametrize("subsample_size", [None, 2], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_iplate_iplate_ok(subsample_size, Elbo, max_plate_nesting):
def model():
p = torch.tensor(0.5)
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for i in outer_iplate:
for j in inner_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for i in outer_iplate:
for j in inner_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide, "parallel")
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize("max_plate_nesting", [0, float("inf")])
@pytest.mark.parametrize("subsample_size", [None, 2], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_iplate_iplate_swap_ok(subsample_size, Elbo, max_plate_nesting):
def model():
p = torch.tensor(0.5)
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for i in outer_iplate:
for j in inner_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for j in inner_iplate:
for i in outer_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide, "parallel")
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, default="parallel", num_samples=2)
assert_ok(model, guide, Elbo(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_iplate_in_model_not_guide_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 10, subsample_size):
pass
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
@pytest.mark.parametrize("is_validate", [True, False])
def test_iplate_in_guide_not_model_error(subsample_size, Elbo, is_validate):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 10, subsample_size):
pass
pyro.sample("x", dist.Bernoulli(p))
with pyro.validation_enabled(is_validate):
if is_validate:
assert_error(
model,
guide,
Elbo(),
match="Found plate statements in guide but not model",
)
else:
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_plate_broadcast_error(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate", 10, 5):
pyro.sample("x", dist.Bernoulli(p).expand_by([2]))
assert_error(model, model, Elbo(), match="Shape mismatch inside plate")
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_plate_iplate_ok(Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 3, 2) as ind:
for i in pyro.plate("iplate", 3, 2):
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 3, 2) as ind:
for i in pyro.plate("iplate", 3, 2):
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_iplate_plate_ok(Elbo):
def model():
p = torch.tensor(0.5)
inner_plate = pyro.plate("plate", 3, 2)
for i in pyro.plate("iplate", 3, 2):
with inner_plate as ind:
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
inner_plate = pyro.plate("plate", 3, 2)
for i in pyro.plate("iplate", 3, 2):
with inner_plate as ind:
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
@pytest.mark.parametrize("sizes", [(3,), (3, 4), (3, 4, 5)])
def test_plate_stack_ok(Elbo, sizes):
def model():
p = torch.tensor(0.5)
with pyro.plate_stack("plate_stack", sizes):
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate_stack("plate_stack", sizes):
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
@pytest.mark.parametrize("sizes", [(3,), (3, 4), (3, 4, 5)])
def test_plate_stack_and_plate_ok(Elbo, sizes):
def model():
p = torch.tensor(0.5)
with pyro.plate_stack("plate_stack", sizes):
with pyro.plate("plate", 7):
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate_stack("plate_stack", sizes):
with pyro.plate("plate", 7):
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("sizes", [(3,), (3, 4), (3, 4, 5)])
def test_plate_stack_sizes(sizes):
def model():
p = 0.5 * torch.ones(3)
with pyro.plate_stack("plate_stack", sizes):
x = pyro.sample("x", dist.Bernoulli(p).to_event(1))
assert x.shape == sizes + (3,)
model()
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_nested_plate_plate_ok(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer)]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample(
"y", dist.Bernoulli(p).expand_by([len(ind_inner), len(ind_outer)])
)
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(model)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(model, num_samples=2)
else:
guide = model
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_plate_reuse_ok(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
plate_outer = pyro.plate("plate_outer", 10, 5, dim=-1)
plate_inner = pyro.plate("plate_inner", 11, 6, dim=-2)
with plate_outer as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer)]))
with plate_inner as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner), 1]))
with plate_outer as ind_outer, plate_inner as ind_inner:
pyro.sample(
"z", dist.Bernoulli(p).expand_by([len(ind_inner), len(ind_outer)])
)
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(model)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(model, num_samples=2)
else:
guide = model
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO]
)
def test_nested_plate_plate_dim_error_1(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample(
"x", dist.Bernoulli(p).expand_by([len(ind_outer)])
) # error here
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner)]))
pyro.sample(
"z", dist.Bernoulli(p).expand_by([len(ind_outer), len(ind_inner)])
)
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(model)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(model, num_samples=2)
else:
guide = model
assert_error(model, guide, Elbo(), match="invalid log_prob shape")
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_dim_error_2(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer), 1]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample(
"y", dist.Bernoulli(p).expand_by([len(ind_outer)])
) # error here
pyro.sample(
"z", dist.Bernoulli(p).expand_by([len(ind_outer), len(ind_inner)])
)
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match="Shape mismatch inside plate")
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_dim_error_3(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer), 1]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner)]))
pyro.sample(
"z", dist.Bernoulli(p).expand_by([len(ind_inner), 1])
) # error here
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match="invalid log_prob shape|shape mismatch")
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_dim_error_4(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer), 1]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner)]))
pyro.sample(
"z", dist.Bernoulli(p).expand_by([len(ind_outer), len(ind_outer)])
) # error here
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match="hape mismatch inside plate")
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_subsample_param_ok(Elbo):
def model():
with pyro.plate("plate_outer", 10, 5):
pyro.sample("x", dist.Bernoulli(0.2))
with pyro.plate("plate_inner", 11, 6):
pyro.sample("y", dist.Bernoulli(0.2))
def guide():
p0 = pyro.param("p0", 0.5 * torch.ones(4, 5), event_dim=2)
assert p0.shape == (4, 5)
with pyro.plate("plate_outer", 10, 5):
p1 = pyro.param("p1", 0.5 * torch.ones(10, 3), event_dim=1)
assert p1.shape == (5, 3)
px = pyro.param("px", 0.5 * torch.ones(10), event_dim=0)
assert px.shape == (5,)
pyro.sample("x", dist.Bernoulli(px))
with pyro.plate("plate_inner", 11, 6):
py = pyro.param("py", 0.5 * torch.ones(11, 10), event_dim=0)
assert py.shape == (6, 5)
pyro.sample("y", dist.Bernoulli(py))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nonnested_plate_plate_ok(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_0", 10, 5) as ind1:
pyro.sample("x0", dist.Bernoulli(p).expand_by([len(ind1)]))
with pyro.plate("plate_1", 11, 6) as ind2:
pyro.sample("x1", dist.Bernoulli(p).expand_by([len(ind2)]))
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_ok(model, guide, Elbo())
def test_three_indep_plate_at_different_depths_ok():
r"""
/\
/\ ia
ia ia
"""
def model():
p = torch.tensor(0.5)
inner_plate = pyro.plate("plate2", 10, 5)
for i in pyro.plate("plate0", 2):
pyro.sample("x_%d" % i, dist.Bernoulli(p))
if i == 0:
for j in pyro.plate("plate1", 2):
with inner_plate as ind:
pyro.sample("y_%d" % j, dist.Bernoulli(p).expand_by([len(ind)]))
elif i == 1:
with inner_plate as ind:
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
inner_plate = pyro.plate("plate2", 10, 5)
for i in pyro.plate("plate0", 2):
pyro.sample("x_%d" % i, dist.Bernoulli(p))
if i == 0:
for j in pyro.plate("plate1", 2):
with inner_plate as ind:
pyro.sample("y_%d" % j, dist.Bernoulli(p).expand_by([len(ind)]))
elif i == 1:
with inner_plate as ind:
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind)]))
assert_ok(model, guide, TraceGraph_ELBO())
def test_plate_wrong_size_error():
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([1 + len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([1 + len(ind)]))
assert_error(model, guide, TraceGraph_ELBO(), match="Shape mismatch inside plate")
def test_block_plate_name_ok():
def model():
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate("plate"):
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
def guide():
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate("plate"):
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
assert_ok(model, guide, Trace_ELBO())
def test_block_plate_dim_ok():
def model():
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate(dim=-1):
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
def guide():
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate(dim=-1):
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
assert_ok(model, guide, Trace_ELBO())
def test_block_plate_missing_error():
def model():
with block_plate("plate"):
pyro.sample("a", dist.Normal(0, 1))
def guide():
pyro.sample("a", dist.Normal(0, 1))
assert_error(model, guide, Trace_ELBO(), match="block_plate matched 0 messengers")
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_enum_discrete_misuse_warning(Elbo, enumerate_):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p), infer={"enumerate": enumerate_})
if (enumerate_ is None) == (Elbo is TraceEnum_ELBO):
assert_warning(model, guide, Elbo(max_plate_nesting=0))
else:
assert_ok(model, guide, Elbo(max_plate_nesting=0))
def test_enum_discrete_single_ok():
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
@pytest.mark.parametrize("strict_enumeration_warning", [False, True])
def test_enum_discrete_missing_config_warning(strict_enumeration_warning):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
elbo = TraceEnum_ELBO(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning:
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
def test_enum_discrete_single_single_ok():
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("y", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("y", dist.Bernoulli(p))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
def test_enum_discrete_iplate_single_ok():
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 10, 5):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 10, 5):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
def test_plate_enum_discrete_batch_ok():
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
@pytest.mark.parametrize("strict_enumeration_warning", [False, True])
def test_plate_enum_discrete_no_discrete_vars_warning(strict_enumeration_warning):
def model():
loc = torch.tensor(0.0)
scale = torch.tensor(1.0)
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Normal(loc, scale).expand_by([len(ind)]))
@config_enumerate(default="sequential")
def guide():
loc = pyro.param("loc", torch.tensor(1.0, requires_grad=True))
scale = pyro.param("scale", torch.tensor(2.0, requires_grad=True))
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Normal(loc, scale).expand_by([len(ind)]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning:
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
def test_no_plate_enum_discrete_batch_error():
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p).expand_by([5]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p).expand_by([5]))
assert_error(
model, config_enumerate(guide), TraceEnum_ELBO(), match="invalid log_prob shape"
)
@pytest.mark.parametrize("max_plate_nesting", [0, 1, 2, float("inf")])
def test_enum_discrete_parallel_ok(max_plate_nesting):
guessed_nesting = 0 if max_plate_nesting == float("inf") else max_plate_nesting
plate_shape = torch.Size([1] * guessed_nesting)
def model():
p = torch.tensor(0.5)
x = pyro.sample("x", dist.Bernoulli(p))
if max_plate_nesting != float("inf"):
assert x.shape == torch.Size([2]) + plate_shape
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
x = pyro.sample("x", dist.Bernoulli(p))
if max_plate_nesting != float("inf"):
assert x.shape == torch.Size([2]) + plate_shape
assert_ok(
model,
config_enumerate(guide, "parallel"),
TraceEnum_ELBO(max_plate_nesting=max_plate_nesting),
)
@pytest.mark.parametrize("max_plate_nesting", [0, 1, 2, float("inf")])
def test_enum_discrete_parallel_nested_ok(max_plate_nesting):
guessed_nesting = 0 if max_plate_nesting == float("inf") else max_plate_nesting
plate_shape = torch.Size([1] * guessed_nesting)
def model():
p2 = torch.ones(2) / 2
p3 = torch.ones(3) / 3
x2 = pyro.sample("x2", dist.OneHotCategorical(p2))
x3 = pyro.sample("x3", dist.OneHotCategorical(p3))
if max_plate_nesting != float("inf"):
assert x2.shape == torch.Size([2]) + plate_shape + p2.shape
assert x3.shape == torch.Size([3, 1]) + plate_shape + p3.shape
assert_ok(
model,
config_enumerate(model, "parallel"),
TraceEnum_ELBO(max_plate_nesting=max_plate_nesting),
)
@pytest.mark.parametrize(
"enumerate_,expand,num_samples",
[
(None, False, None),
("sequential", False, None),
("sequential", True, None),
("parallel", False, None),
("parallel", True, None),
("parallel", True, 3),
],
)
def test_enumerate_parallel_plate_ok(enumerate_, expand, num_samples):
def model():
p2 = torch.ones(2) / 2
p34 = torch.ones(3, 4) / 4
p536 = torch.ones(5, 3, 6) / 6
x2 = pyro.sample("x2", dist.Categorical(p2))
with pyro.plate("outer", 3):
x34 = pyro.sample("x34", dist.Categorical(p34))
with pyro.plate("inner", 5):
x536 = pyro.sample("x536", dist.Categorical(p536))
# check shapes
if enumerate_ == "parallel":
if num_samples:
n = num_samples
# Meaning of dimensions: [ enum dims | plate dims ]
assert x2.shape == torch.Size([n, 1, 1]) # noqa: E201
assert x34.shape == torch.Size([n, 1, 1, 3]) # noqa: E201
assert x536.shape == torch.Size([n, 1, 1, 5, 3]) # noqa: E201
elif expand:
# Meaning of dimensions: [ enum dims | plate dims ]
assert x2.shape == torch.Size([2, 1, 1]) # noqa: E201
assert x34.shape == torch.Size([4, 1, 1, 3]) # noqa: E201
assert x536.shape == torch.Size([6, 1, 1, 5, 3]) # noqa: E201
else:
# Meaning of dimensions: [ enum dims | plate placeholders ]
assert x2.shape == torch.Size([2, 1, 1]) # noqa: E201
assert x34.shape == torch.Size([4, 1, 1, 1]) # noqa: E201
assert x536.shape == torch.Size([6, 1, 1, 1, 1]) # noqa: E201
elif enumerate_ == "sequential":
if expand:
# All dimensions are plate dimensions.
assert x2.shape == torch.Size([])
assert x34.shape == torch.Size([3])
assert x536.shape == torch.Size([5, 3])
else:
# All dimensions are plate placeholders.
assert x2.shape == torch.Size([])
assert x34.shape == torch.Size([1])
assert x536.shape == torch.Size([1, 1])
else:
# All dimensions are plate dimensions.
assert x2.shape == torch.Size([])
assert x34.shape == torch.Size([3])
assert x536.shape == torch.Size([5, 3])
elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=enumerate_)
guide = config_enumerate(model, enumerate_, expand, num_samples)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("max_plate_nesting", [1, float("inf")])
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
@pytest.mark.parametrize("is_validate", [True, False])
def test_enum_discrete_plate_dependency_warning(
enumerate_, is_validate, max_plate_nesting
):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={"enumerate": "parallel"})
with pyro.plate("plate", 10, 5):
x = pyro.sample(
"x", dist.Bernoulli(0.5).expand_by([5]), infer={"enumerate": enumerate_}
)
pyro.sample("y", dist.Bernoulli(x.mean())) # user should move this line up
with pyro.validation_enabled(is_validate):
elbo = TraceEnum_ELBO(max_plate_nesting=max_plate_nesting)
if enumerate_ and is_validate:
assert_warning(model, model, elbo)
else:
assert_ok(model, model, elbo)
@pytest.mark.parametrize("max_plate_nesting", [1, float("inf")])
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
def test_enum_discrete_iplate_plate_dependency_ok(enumerate_, max_plate_nesting):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={"enumerate": "parallel"})
inner_plate = pyro.plate("plate", 10, 5)
for i in pyro.plate("iplate", 3):
pyro.sample("y_{}".format(i), dist.Bernoulli(0.5))
with inner_plate:
pyro.sample(
"x_{}".format(i),
dist.Bernoulli(0.5).expand_by([5]),
infer={"enumerate": enumerate_},
)
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize("max_plate_nesting", [1, float("inf")])
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
@pytest.mark.parametrize("is_validate", [True, False])
def test_enum_discrete_iplates_plate_dependency_warning(
enumerate_, is_validate, max_plate_nesting
):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={"enumerate": "parallel"})
inner_plate = pyro.plate("plate", 10, 5)
for i in pyro.plate("iplate1", 2):
with inner_plate:
pyro.sample(
"x_{}".format(i),
dist.Bernoulli(0.5).expand_by([5]),
infer={"enumerate": enumerate_},
)
for i in pyro.plate("iplate2", 2):
pyro.sample("y_{}".format(i), dist.Bernoulli(0.5))
with pyro.validation_enabled(is_validate):
elbo = TraceEnum_ELBO(max_plate_nesting=max_plate_nesting)
if enumerate_ and is_validate:
assert_warning(model, model, elbo)
else:
assert_ok(model, model, elbo)
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
def test_enum_discrete_plates_dependency_ok(enumerate_):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={"enumerate": "parallel"})
x_plate = pyro.plate("x_plate", 10, 5, dim=-1)
y_plate = pyro.plate("y_plate", 11, 6, dim=-2)
pyro.sample("a", dist.Bernoulli(0.5))
with x_plate:
pyro.sample("b", dist.Bernoulli(0.5).expand_by([5]))
with y_plate:
# Note that it is difficult to check that c does not depend on b.
pyro.sample("c", dist.Bernoulli(0.5).expand_by([6, 1]))
with x_plate, y_plate:
pyro.sample("d", dist.Bernoulli(0.5).expand_by([6, 5]))
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=2))
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
def test_enum_discrete_non_enumerated_plate_ok(enumerate_):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={"enumerate": "parallel"})
with pyro.plate("non_enum", 2):
a = pyro.sample(
"a", dist.Bernoulli(0.5).expand_by([2]), infer={"enumerate": None}
)
p = (1.0 + a.sum(-1)) / (2.0 + a.size(0)) # introduce dependency of b on a
with pyro.plate("enum_1", 3):
pyro.sample(
"b", dist.Bernoulli(p).expand_by([3]), infer={"enumerate": enumerate_}
)
with pyro.validation_enabled():
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=1))
def test_plate_shape_broadcasting():
data = torch.ones(1000, 2)
def model():
with pyro.plate("num_particles", 10, dim=-3):
with pyro.plate("components", 2, dim=-1):
p = pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
assert p.shape == torch.Size((10, 1, 2))
with pyro.plate("data", data.shape[0], dim=-2):
pyro.sample("obs", dist.Bernoulli(p), obs=data)
def guide():
with pyro.plate("num_particles", 10, dim=-3):
with pyro.plate("components", 2, dim=-1):
pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
assert_ok(model, guide, Trace_ELBO())
@pytest.mark.parametrize(
"enumerate_,expand,num_samples",
[
(None, True, None),
("sequential", True, None),
("sequential", False, None),
("parallel", True, None),
("parallel", False, None),
("parallel", True, 3),
],
)
def test_enum_discrete_plate_shape_broadcasting_ok(enumerate_, expand, num_samples):
def model():
x_plate = pyro.plate("x_plate", 10, 5, dim=-1)
y_plate = pyro.plate("y_plate", 11, 6, dim=-2)
with pyro.plate("num_particles", 50, dim=-3):
with x_plate:
b = pyro.sample("b", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
with y_plate:
c = pyro.sample("c", dist.Bernoulli(0.5))
with x_plate, y_plate:
d = pyro.sample("d", dist.Bernoulli(b))
# check shapes
if enumerate_ == "parallel":
if num_samples and expand:
assert b.shape == (num_samples, 50, 1, 5)
assert c.shape == (num_samples, 1, 50, 6, 1)
assert d.shape == (num_samples, 1, num_samples, 50, 6, 5)
elif num_samples and not expand:
assert b.shape == (num_samples, 50, 1, 5)
assert c.shape == (num_samples, 1, 50, 6, 1)
assert d.shape == (num_samples, 1, 1, 50, 6, 5)
elif expand:
assert b.shape == (50, 1, 5)
assert c.shape == (2, 50, 6, 1)
assert d.shape == (2, 1, 50, 6, 5)
else:
assert b.shape == (50, 1, 5)
assert c.shape == (2, 1, 1, 1)
assert d.shape == (2, 1, 1, 1, 1)
elif enumerate_ == "sequential":
if expand:
assert b.shape == (50, 1, 5)
assert c.shape == (50, 6, 1)
assert d.shape == (50, 6, 5)
else:
assert b.shape == (50, 1, 5)
assert c.shape == (1, 1, 1)
assert d.shape == (1, 1, 1)
else:
assert b.shape == (50, 1, 5)
assert c.shape == (50, 6, 1)
assert d.shape == (50, 6, 5)
guide = config_enumerate(
model, default=enumerate_, expand=expand, num_samples=num_samples
)
elbo = TraceEnum_ELBO(
max_plate_nesting=3, strict_enumeration_warning=(enumerate_ == "parallel")
)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize(
"Elbo,expand",
[
(Trace_ELBO, False),
(TraceGraph_ELBO, False),
(TraceEnum_ELBO, False),
(TraceEnum_ELBO, True),
],
)
def test_dim_allocation_ok(Elbo, expand):
enumerate_ = Elbo is TraceEnum_ELBO
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_outer", 10, 5, dim=-3):
x = pyro.sample("x", dist.Bernoulli(p))
with pyro.plate("plate_inner_1", 11, 6):
y = pyro.sample("y", dist.Bernoulli(p))
# allocated dim is rightmost available, i.e. -1
with pyro.plate("plate_inner_2", 12, 7):
z = pyro.sample("z", dist.Bernoulli(p))
# allocated dim is next rightmost available, i.e. -2
# since dim -3 is already allocated, use dim=-4
with pyro.plate("plate_inner_3", 13, 8):
q = pyro.sample("q", dist.Bernoulli(p))
# check shapes
if enumerate_ and not expand:
assert x.shape == (1, 1, 1)
assert y.shape == (1, 1, 1)
assert z.shape == (1, 1, 1)
assert q.shape == (1, 1, 1, 1)
else:
assert x.shape == (5, 1, 1)
assert y.shape == (5, 1, 6)
assert z.shape == (5, 7, 6)
assert q.shape == (8, 5, 7, 6)
guide = (
config_enumerate(model, "sequential", expand=expand) if enumerate_ else model
)
assert_ok(model, guide, Elbo(max_plate_nesting=4))
@pytest.mark.parametrize(
"Elbo,expand",
[
(Trace_ELBO, False),
(TraceGraph_ELBO, False),
(TraceEnum_ELBO, False),
(TraceEnum_ELBO, True),
],
)
def test_dim_allocation_error(Elbo, expand):
enumerate_ = Elbo is TraceEnum_ELBO
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_outer", 10, 5, dim=-2):
x = pyro.sample("x", dist.Bernoulli(p))
# allocated dim is rightmost available, i.e. -1
with pyro.plate("plate_inner_1", 11, 6):
y = pyro.sample("y", dist.Bernoulli(p))
# throws an error as dim=-1 is already occupied
with pyro.plate("plate_inner_2", 12, 7, dim=-1):
pyro.sample("z", dist.Bernoulli(p))
# check shapes
if enumerate_ and not expand:
assert x.shape == (1, 1)
assert y.shape == (1, 1)
else:
assert x.shape == (5, 1)
assert y.shape == (5, 6)
guide = config_enumerate(model, expand=expand) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match="collide at dim=")
def test_enum_in_model_ok():
infer = {"enumerate": "parallel"}
def model():
p = pyro.param("p", torch.tensor(0.25))
a = pyro.sample("a", dist.Bernoulli(p))
b = pyro.sample("b", dist.Bernoulli(p + a / 2))
c = pyro.sample("c", dist.Bernoulli(p + b / 2), infer=infer)
d = pyro.sample("d", dist.Bernoulli(p + c / 2))
e = pyro.sample("e", dist.Bernoulli(p + d / 2))
f = pyro.sample("f", dist.Bernoulli(p + e / 2), infer=infer)
g = pyro.sample("g", dist.Bernoulli(p + f / 2), obs=torch.tensor(0.0))
# check shapes
assert a.shape == ()
assert b.shape == (2,)
assert c.shape == (2, 1, 1)
assert d.shape == (2,)
assert e.shape == (2, 1)
assert f.shape == (2, 1, 1, 1)
assert g.shape == ()
def guide():
p = pyro.param("p", torch.tensor(0.25))
a = pyro.sample("a", dist.Bernoulli(p))
b = pyro.sample("b", dist.Bernoulli(p + a / 2), infer=infer)
d = pyro.sample("d", dist.Bernoulli(p + b / 2))
e = pyro.sample("e", dist.Bernoulli(p + d / 2), infer=infer)
# check shapes
assert a.shape == ()
assert b.shape == (2,)
assert d.shape == (2,)
assert e.shape == (2, 1)
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_enum_in_model_plate_ok():
infer = {"enumerate": "parallel"}
def model():
p = pyro.param("p", torch.tensor(0.25))
a = pyro.sample("a", dist.Bernoulli(p))
b = pyro.sample("b", dist.Bernoulli(p + a / 2))
with pyro.plate("data", 3):
c = pyro.sample("c", dist.Bernoulli(p + b / 2), infer=infer)
d = pyro.sample("d", dist.Bernoulli(p + c / 2))
e = pyro.sample("e", dist.Bernoulli(p + d / 2))
f = pyro.sample("f", dist.Bernoulli(p + e / 2), infer=infer)
g = pyro.sample("g", dist.Bernoulli(p + f / 2), obs=torch.zeros(3))
# check shapes
assert a.shape == ()
assert b.shape == (2, 1)
assert c.shape == (2, 1, 1, 1)
assert d.shape == (2, 3)
assert e.shape == (2, 1, 1)
assert f.shape == (2, 1, 1, 1, 1)
assert g.shape == (3,)
def guide():
p = pyro.param("p", torch.tensor(0.25))
a = pyro.sample("a", dist.Bernoulli(p))
b = pyro.sample("b", dist.Bernoulli(p + a / 2), infer=infer)
with pyro.plate("data", 3):
d = pyro.sample("d", dist.Bernoulli(p + b / 2))
e = pyro.sample("e", dist.Bernoulli(p + d / 2), infer=infer)
# check shapes
assert a.shape == ()
assert b.shape == (2, 1)
assert d.shape == (2, 3)
assert e.shape == (2, 1, 1)
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=1))
def test_enum_sequential_in_model_error():
def model():
p = pyro.param("p", torch.tensor(0.25))
pyro.sample("a", dist.Bernoulli(p), infer={"enumerate": "sequential"})
def guide():
pass
assert_error(
model,
guide,
TraceEnum_ELBO(max_plate_nesting=0),
match="At site .*, model-side sequential enumeration is not implemented",
)
def test_enum_in_model_plate_reuse_ok():
@config_enumerate
def model():
p = pyro.param("p", torch.tensor([0.2, 0.8]))
a = pyro.sample("a", dist.Bernoulli(0.3)).long()
with pyro.plate("b_axis", 2):
pyro.sample("b", dist.Bernoulli(p[a]), obs=torch.tensor([0.0, 1.0]))
c = pyro.sample("c", dist.Bernoulli(0.3)).long()
with pyro.plate("c_axis", 2):
pyro.sample("d", dist.Bernoulli(p[c]), obs=torch.tensor([0.0, 0.0]))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=1))
def test_enum_in_model_multi_scale_error():
@config_enumerate
def model():
p = pyro.param("p", torch.tensor([0.2, 0.8]))
x = pyro.sample("x", dist.Bernoulli(0.3)).long()
with poutine.scale(scale=2.0):
pyro.sample("y", dist.Bernoulli(p[x]), obs=torch.tensor(0.0))
def guide():
pass
assert_error(
model,
guide,
TraceEnum_ELBO(max_plate_nesting=0),
match="Expected all enumerated sample sites to share a common poutine.scale",
)
@pytest.mark.parametrize("use_vindex", [False, True])
def test_enum_in_model_diamond_error(use_vindex):
data = torch.tensor([[0, 1], [0, 0]])
@config_enumerate
def model():
pyro.param("probs_a", torch.tensor([0.45, 0.55]))
pyro.param("probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]))
pyro.param("probs_c", torch.tensor([[0.75, 0.25], [0.55, 0.45]]))
pyro.param(
"probs_d",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]),
)
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2, dim=-1)
c_axis = pyro.plate("c_axis", 2, dim=-2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
with b_axis, c_axis:
if use_vindex:
probs = Vindex(probs_d)[b, c]
else:
d_ind = torch.arange(2, dtype=torch.long)
probs = probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]
pyro.sample("d", dist.Categorical(probs), obs=data)
def guide():
pass
assert_error(
model,
guide,
TraceEnum_ELBO(max_plate_nesting=2),
match="Expected tree-structured plate nesting",
)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_vectorized_num_particles(Elbo):
data = torch.ones(1000, 2)
def model():
with pyro.plate("components", 2):
p = pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
assert p.shape == torch.Size((10, 1, 2))
with pyro.plate("data", data.shape[0]):
pyro.sample("obs", dist.Bernoulli(p), obs=data)
def guide():
with pyro.plate("components", 2):
pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
pyro.clear_param_store()
guide = config_enumerate(guide) if Elbo is TraceEnum_ELBO else guide
assert_ok(
model,
guide,
Elbo(
num_particles=10,
vectorize_particles=True,
max_plate_nesting=2,
strict_enumeration_warning=False,
),
)
@pytest.mark.parametrize(
"enumerate_,expand,num_samples",
[
(None, False, None),
("sequential", False, None),
("sequential", True, None),
("parallel", False, None),
("parallel", True, None),
("parallel", True, 3),
],
)
@pytest.mark.parametrize("num_particles", [1, 50])
def test_enum_discrete_vectorized_num_particles(
enumerate_, expand, num_samples, num_particles
):
@config_enumerate(default=enumerate_, expand=expand, num_samples=num_samples)
def model():
x_plate = pyro.plate("x_plate", 10, 5, dim=-1)
y_plate = pyro.plate("y_plate", 11, 6, dim=-2)
with x_plate:
b = pyro.sample("b", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
with y_plate:
c = pyro.sample("c", dist.Bernoulli(0.5))
with x_plate, y_plate:
d = pyro.sample("d", dist.Bernoulli(b))
# check shapes
if num_particles > 1:
if enumerate_ == "parallel":
if num_samples and expand:
assert b.shape == (num_samples, num_particles, 1, 5)
assert c.shape == (num_samples, 1, num_particles, 6, 1)
assert d.shape == (num_samples, 1, num_samples, num_particles, 6, 5)
elif num_samples and not expand:
assert b.shape == (num_samples, num_particles, 1, 5)
assert c.shape == (num_samples, 1, num_particles, 6, 1)
assert d.shape == (num_samples, 1, 1, num_particles, 6, 5)
elif expand:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (2, num_particles, 6, 1)
assert d.shape == (2, 1, num_particles, 6, 5)
else:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (2, 1, 1, 1)
assert d.shape == (2, 1, 1, 1, 1)
elif enumerate_ == "sequential":
if expand:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (num_particles, 6, 1)
assert d.shape == (num_particles, 6, 5)
else:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (1, 1, 1)
assert d.shape == (1, 1, 1)
else:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (num_particles, 6, 1)
assert d.shape == (num_particles, 6, 5)
else:
if enumerate_ == "parallel":
if num_samples and expand:
assert b.shape == (
num_samples,
1,
5,
)
assert c.shape == (num_samples, 1, 6, 1)
assert d.shape == (num_samples, 1, num_samples, 6, 5)
elif num_samples and not expand:
assert b.shape == (
num_samples,
1,
5,
)
assert c.shape == (num_samples, 1, 6, 1)
assert d.shape == (num_samples, 1, 1, 6, 5)
elif expand:
assert b.shape == (5,)
assert c.shape == (2, 6, 1)
assert d.shape == (2, 1, 6, 5)
else:
assert b.shape == (5,)
assert c.shape == (2, 1, 1)
assert d.shape == (2, 1, 1, 1)
elif enumerate_ == "sequential":
if expand:
assert b.shape == (5,)
assert c.shape == (6, 1)
assert d.shape == (6, 5)
else:
assert b.shape == (5,)
assert c.shape == (1, 1)
assert d.shape == (1, 1)
else:
assert b.shape == (5,)
assert c.shape == (6, 1)
assert d.shape == (6, 5)
assert_ok(
model,
model,
TraceEnum_ELBO(
max_plate_nesting=2,
num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=(enumerate_ == "parallel"),
),
)
def test_enum_recycling_chain():
@config_enumerate
def model():
p = pyro.param("p", torch.tensor([[0.2, 0.8], [0.1, 0.9]]))
x = 0
for t in pyro.markov(range(100)):
x = pyro.sample("x_{}".format(t), dist.Categorical(p[x]))
assert x.dim() <= 2
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
@pytest.mark.parametrize("use_vindex", [False, True])
@pytest.mark.parametrize("markov", [False, True])
def test_enum_recycling_dbn(markov, use_vindex):
# x --> x --> x enum "state"
# y | y | y | enum "occlusion"
# \ | \ | \ |
# z z z obs
@config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
q = pyro.param("q", torch.ones(2))
r = pyro.param("r", torch.ones(3, 2, 4))
x = 0
times = pyro.markov(range(100)) if markov else range(11)
for t in times:
x = pyro.sample("x_{}".format(t), dist.Categorical(p[x]))
y = pyro.sample("y_{}".format(t), dist.Categorical(q))
if use_vindex:
probs = Vindex(r)[x, y]
else:
z_ind = torch.arange(4, dtype=torch.long)
probs = r[x.unsqueeze(-1), y.unsqueeze(-1), z_ind]
pyro.sample(
"z_{}".format(t), dist.Categorical(probs), obs=torch.tensor(0.0)
)
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_enum_recycling_nested():
# (x)
# \
# y0---(y1)--(y2)
# | | |
# z00 z10 z20
# | | |
# z01 z11 (z21)
# | | |
# z02 z12 z22 <-- what can this depend on?
#
# markov dependencies
# -------------------
# x:
# y0: x
# z00: x y0
# z01: x y0 z00
# z02: x y0 z01
# y1: x y0
# z10: x y0 y1
# z11: x y0 y1 z10
# z12: x y0 y1 z11
# y2: x y1
# z20: x y1 y2
# z21: x y1 y2 z20
# z22: x y1 y2 z21
@config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
x = pyro.sample("x", dist.Categorical(p[0]))
y = x
for i in pyro.markov(range(10)):
y = pyro.sample("y_{}".format(i), dist.Categorical(p[y]))
z = y
for j in pyro.markov(range(10)):
z = pyro.sample("z_{}_{}".format(i, j), dist.Categorical(p[z]))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
@pytest.mark.parametrize("use_vindex", [False, True])
def test_enum_recycling_grid(use_vindex):
# x---x---x---x -----> i
# | | | | |
# x---x---x---x |
# | | | | V
# x---x---x--(x) j
# | | | |
# x---x--(x)--x <-- what can this depend on?
@config_enumerate
def model():
p = pyro.param("p_leaf", torch.ones(2, 2, 2))
x = defaultdict(lambda: torch.tensor(0))
y_axis = pyro.markov(range(4), keep=True)
for i in pyro.markov(range(4)):
for j in y_axis:
if use_vindex:
probs = Vindex(p)[x[i - 1, j], x[i, j - 1]]
else:
ind = torch.arange(2, dtype=torch.long)
probs = p[x[i - 1, j].unsqueeze(-1), x[i, j - 1].unsqueeze(-1), ind]
x[i, j] = pyro.sample("x_{}_{}".format(i, j), dist.Categorical(probs))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_enum_recycling_reentrant():
data = (True, False)
for i in range(5):
data = (data, data, False)
@pyro.markov
def model(data, state=0, address=""):
if isinstance(data, bool):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample(
"leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1.0 if data else 0.0),
)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model(branch, next_state, address + letter)
def guide(data):
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0), data=data)
@pytest.mark.parametrize("history", [1, 2])
def test_enum_recycling_reentrant_history(history):
data = (True, False)
for i in range(5):
data = (data, data, False)
@pyro.markov(history=history)
def model(data, state=0, address=""):
if isinstance(data, bool):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample(
"leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1.0 if data else 0.0),
)
else:
assert isinstance(data, tuple)
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model(branch, next_state, address + letter)
def guide(data):
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0), data=data)
def test_enum_recycling_mutual_recursion():
data = (True, False)
for i in range(5):
data = (data, data, False)
def model_leaf(data, state=0, address=""):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample(
"leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1.0 if data else 0.0),
)
@pyro.markov
def model1(data, state=0, address=""):
if isinstance(data, bool):
model_leaf(data, state, address)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model2(branch, next_state, address + letter)
@pyro.markov
def model2(data, state=0, address=""):
if isinstance(data, bool):
model_leaf(data, state, address)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model1(branch, next_state, address + letter)
def guide(data):
pass
assert_ok(model1, guide, TraceEnum_ELBO(max_plate_nesting=0), data=data)
def test_enum_recycling_interleave():
def model():
with pyro.markov() as m:
with pyro.markov():
with m: # error here
pyro.sample(
"x",
dist.Categorical(torch.ones(4)),
infer={"enumerate": "parallel"},
)
def guide():
pass
assert_ok(
model,
guide,
TraceEnum_ELBO(max_plate_nesting=0, strict_enumeration_warning=False),
)
def test_enum_recycling_plate():
@config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
q = pyro.param("q", torch.tensor([0.5, 0.5]))
plate_x = pyro.plate("plate_x", 2, dim=-1)
plate_y = pyro.plate("plate_y", 3, dim=-1)
plate_z = pyro.plate("plate_z", 4, dim=-2)
a = pyro.sample("a", dist.Bernoulli(q[0])).long()
w = 0
for i in pyro.markov(range(5)):
w = pyro.sample("w_{}".format(i), dist.Categorical(p[w]))
with plate_x:
b = pyro.sample("b", dist.Bernoulli(q[a])).long()
x = 0
for i in pyro.markov(range(6)):
x = pyro.sample("x_{}".format(i), dist.Categorical(p[x]))
with plate_y:
c = pyro.sample("c", dist.Bernoulli(q[a])).long()
y = 0
for i in pyro.markov(range(7)):
y = pyro.sample("y_{}".format(i), dist.Categorical(p[y]))
with plate_z:
d = pyro.sample("d", dist.Bernoulli(q[a])).long()
z = 0
for i in pyro.markov(range(8)):
z = pyro.sample("z_{}".format(i), dist.Categorical(p[z]))
with plate_x, plate_z:
e = pyro.sample("e", dist.Bernoulli(q[b])).long()
xz = 0
for i in pyro.markov(range(9)):
xz = pyro.sample("xz_{}".format(i), dist.Categorical(p[xz]))
return a, b, c, d, e
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=2))
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
],
)
def test_factor_in_model_ok(Elbo):
def model():
pyro.factor("f", torch.tensor(0.0))
def guide():
pass
elbo = Elbo(strict_enumeration_warning=False)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
],
)
def test_factor_in_guide_ok(Elbo):
def model():
pass
def guide():
pyro.factor("f", torch.tensor(0.0))
elbo = Elbo(strict_enumeration_warning=False)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("history", [0, 1, 2, 3])
def test_markov_history(history):
@config_enumerate
def model():
p = pyro.param("p", 0.25 * torch.ones(2, 2))
q = pyro.param("q", 0.25 * torch.ones(2))
x_prev = torch.tensor(0)
x_curr = torch.tensor(0)
for t in pyro.markov(range(10), history=history):
probs = p[x_prev, x_curr]
x_prev, x_curr = (
x_curr,
pyro.sample("x_{}".format(t), dist.Bernoulli(probs)).long(),
)
pyro.sample(
"y_{}".format(t), dist.Bernoulli(q[x_curr]), obs=torch.tensor(0.0)
)
def guide():
pass
if history < 2:
assert_error(
model,
guide,
TraceEnum_ELBO(max_plate_nesting=0),
match="Enumeration dim conflict",
)
else:
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_mean_field_ok():
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
pyro.sample("y", dist.Normal(x, 1.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
x = pyro.sample("x", dist.Normal(loc, 1.0))
pyro.sample("y", dist.Normal(x, 1.0))
assert_ok(model, guide, TraceMeanField_ELBO())
@pytest.mark.parametrize("mask", [True, False])
def test_mean_field_mask_ok(mask):
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0).mask(mask))
pyro.sample("y", dist.Normal(x, 1.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
x = pyro.sample("x", dist.Normal(loc, 1.0).mask(mask))
pyro.sample("y", dist.Normal(x, 1.0))
assert_ok(model, guide, TraceMeanField_ELBO())
def test_mean_field_warn():
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
pyro.sample("y", dist.Normal(x, 1.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
y = pyro.sample("y", dist.Normal(loc, 1.0))
pyro.sample("x", dist.Normal(y, 1.0))
assert_warning(model, guide, TraceMeanField_ELBO())
def test_tail_adaptive_ok():
def plateless_model():
pyro.sample("x", dist.Normal(0.0, 1.0))
def plate_model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with pyro.plate("observe_data"):
pyro.sample("obs", dist.Normal(x, 1.0), obs=torch.arange(5).type_as(x))
def rep_guide():
pyro.sample("x", dist.Normal(0.0, 2.0))
assert_ok(
plateless_model,
rep_guide,
TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=2),
)
assert_ok(
plate_model,
rep_guide,
TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=2),
)
def test_tail_adaptive_error():
def plateless_model():
pyro.sample("x", dist.Normal(0.0, 1.0))
def rep_guide():
pyro.sample("x", dist.Normal(0.0, 2.0))
def nonrep_guide():
pyro.sample("x", fakes.NonreparameterizedNormal(0.0, 2.0))
assert_error(
plateless_model,
rep_guide,
TraceTailAdaptive_ELBO(vectorize_particles=False, num_particles=2),
)
assert_error(
plateless_model,
nonrep_guide,
TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=2),
)
def test_tail_adaptive_warning():
def plateless_model():
pyro.sample("x", dist.Normal(0.0, 1.0))
def rep_guide():
pyro.sample("x", dist.Normal(0.0, 2.0))
assert_warning(
plateless_model,
rep_guide,
TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=1),
)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
def test_reparam_ok(Elbo):
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
pyro.sample("y", dist.Normal(x, 1.0), obs=torch.tensor(0.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
pyro.sample("x", dist.Normal(loc, 1.0))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("mask", [True, False, torch.tensor(True), torch.tensor(False)])
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
def test_reparam_mask_ok(Elbo, mask):
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with poutine.mask(mask=mask):
pyro.sample("y", dist.Normal(x, 1.0), obs=torch.tensor(0.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
pyro.sample("x", dist.Normal(loc, 1.0))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"mask",
[
True,
False,
torch.tensor(True),
torch.tensor(False),
torch.tensor([False, True]),
],
)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
def test_reparam_mask_plate_ok(Elbo, mask):
data = torch.randn(2, 3).exp()
data /= data.sum(-1, keepdim=True)
def model():
c = pyro.sample("c", dist.LogNormal(0.0, 1.0).expand([3]).to_event(1))
with pyro.plate("data", len(data)), poutine.mask(mask=mask):
pyro.sample("obs", dist.Dirichlet(c), obs=data)
def guide():
loc = pyro.param("loc", torch.zeros(3))
scale = pyro.param("scale", torch.ones(3), constraint=constraints.positive)
pyro.sample("c", dist.LogNormal(loc, scale).to_event(1))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("num_particles", [1, 2])
@pytest.mark.parametrize(
"mask",
[
torch.tensor(True),
torch.tensor(False),
torch.tensor([True]),
torch.tensor([False]),
torch.tensor([False, True, False]),
],
)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceEnum_ELBO,
TraceGraph_ELBO,
TraceMeanField_ELBO,
],
)
def test_obs_mask_ok(Elbo, mask, num_particles):
data = torch.tensor([7.0, 7.0, 7.0])
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with pyro.plate("plate", len(data)):
y = pyro.sample("y", dist.Normal(x, 1.0), obs=data, obs_mask=mask)
assert ((y == data) == mask).all()
def guide():
loc = pyro.param("loc", torch.zeros(()))
scale = pyro.param("scale", torch.ones(()), constraint=constraints.positive)
x = pyro.sample("x", dist.Normal(loc, scale))
with pyro.plate("plate", len(data)):
with poutine.mask(mask=~mask):
pyro.sample("y_unobserved", dist.Normal(x, 1.0))
elbo = Elbo(
num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=False,
)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("num_particles", [1, 2])
@pytest.mark.parametrize(
"mask",
[
torch.tensor(True),
torch.tensor(False),
torch.tensor([True]),
torch.tensor([False]),
torch.tensor([False, True, True, False]),
],
)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceEnum_ELBO,
TraceGraph_ELBO,
TraceMeanField_ELBO,
],
)
def test_obs_mask_multivariate_ok(Elbo, mask, num_particles):
data = torch.full((4, 3), 7.0)
def model():
x = pyro.sample("x", dist.MultivariateNormal(torch.zeros(3), torch.eye(3)))
with pyro.plate("plate", len(data)):
y = pyro.sample(
"y", dist.MultivariateNormal(x, torch.eye(3)), obs=data, obs_mask=mask
)
assert ((y == data).all(-1) == mask).all()
def guide():
loc = pyro.param("loc", torch.zeros(3))
cov = pyro.param("cov", torch.eye(3), constraint=constraints.positive_definite)
x = pyro.sample("x", dist.MultivariateNormal(loc, cov))
with pyro.plate("plate", len(data)):
with poutine.mask(mask=~mask):
pyro.sample("y_unobserved", dist.MultivariateNormal(x, torch.eye(3)))
elbo = Elbo(
num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=False,
)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceEnum_ELBO,
TraceGraph_ELBO,
TraceMeanField_ELBO,
],
)
def test_obs_mask_multivariate_error(Elbo):
data = torch.full((3, 2), 7.0)
# This mask is invalid because it includes event shape.
mask = torch.tensor([[False, False], [False, True], [True, False]])
def model():
x = pyro.sample("x", dist.MultivariateNormal(torch.zeros(2), torch.eye(2)))
with pyro.plate("plate", len(data)):
pyro.sample(
"y", dist.MultivariateNormal(x, torch.eye(2)), obs=data, obs_mask=mask
)
def guide():
loc = pyro.param("loc", torch.zeros(2))
x = pyro.sample("x", dist.MultivariateNormal(loc, torch.eye(2)))
with pyro.plate("plate", len(data)):
with poutine.mask(mask=~mask):
pyro.sample("y_unobserved", dist.MultivariateNormal(x, torch.eye(2)))
elbo = Elbo(strict_enumeration_warning=False)
assert_error(model, guide, elbo, match="Invalid obs_mask shape")
@pytest.mark.parametrize("scale", [1, 0.1, torch.tensor(0.5)])
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
def test_reparam_scale_ok(Elbo, scale):
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with poutine.scale(scale=scale):
pyro.sample("y", dist.Normal(x, 1.0), obs=torch.tensor(0.0))
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
pyro.sample("x", dist.Normal(loc, 1.0))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"scale",
[
1,
0.1,
torch.tensor(0.5),
torch.tensor([0.1, 0.9]),
],
)
@pytest.mark.parametrize(
"Elbo",
[
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
def test_reparam_scale_plate_ok(Elbo, scale):
data = torch.randn(2, 3).exp()
data /= data.sum(-1, keepdim=True)
def model():
c = pyro.sample("c", dist.LogNormal(0.0, 1.0).expand([3]).to_event(1))
with pyro.plate("data", len(data)), poutine.scale(scale=scale):
pyro.sample("obs", dist.Dirichlet(c), obs=data)
def guide():
loc = pyro.param("loc", torch.zeros(3))
scale = pyro.param("scale", torch.ones(3), constraint=constraints.positive)
pyro.sample("c", dist.LogNormal(loc, scale).to_event(1))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize(
"Elbo",
[
EnergyDistance_prior,
EnergyDistance_noprior,
],
)
def test_no_log_prob_ok(Elbo):
def model(data):
loc = pyro.sample("loc", dist.Normal(0, 1))
scale = pyro.sample("scale", dist.LogNormal(0, 1))
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.Stable(1.5, 0.5, scale, loc), obs=data)
def guide(data):
map_loc = pyro.param("map_loc", torch.tensor(0.0))
map_scale = pyro.param(
"map_scale", torch.tensor(1.0), constraint=constraints.positive
)
pyro.sample("loc", dist.Delta(map_loc))
pyro.sample("scale", dist.Delta(map_scale))
data = torch.randn(10)
assert_ok(model, guide, Elbo(), data=data)
def test_reparam_stable():
@poutine.reparam(config={"z": LatentStableReparam()})
def model():
stability = pyro.sample("stability", dist.Uniform(0.0, 2.0))
skew = pyro.sample("skew", dist.Uniform(-1.0, 1.0))
y = pyro.sample("z", dist.Stable(stability, skew))
pyro.sample("x", dist.Poisson(y.abs()), obs=torch.tensor(1.0))
def guide():
pyro.sample("stability", dist.Delta(torch.tensor(1.5)))
pyro.sample("skew", dist.Delta(torch.tensor(0.0)))
pyro.sample("z_uniform", dist.Delta(torch.tensor(0.1)))
pyro.sample("z_exponential", dist.Delta(torch.tensor(1.0)))
assert_ok(model, guide, Trace_ELBO())
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_normal_normal(num_particles):
pytest.importorskip("funsor")
data = torch.tensor(0.0)
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with poutine.collapse():
y = pyro.sample("y", dist.Normal(x, 1.0))
pyro.sample("z", dist.Normal(y, 1.0), obs=data)
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
scale = pyro.param("scale", torch.tensor(1.0), constraint=constraints.positive)
pyro.sample("x", dist.Normal(loc, scale))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_normal_normal_plate(num_particles):
pytest.importorskip("funsor")
data = torch.randn(5)
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with poutine.collapse():
y = pyro.sample("y", dist.Normal(x, 1.0))
with pyro.plate("data", len(data), dim=-1):
pyro.sample("z", dist.Normal(y, 1.0), obs=data)
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
scale = pyro.param("scale", torch.tensor(1.0), constraint=constraints.positive)
pyro.sample("x", dist.Normal(loc, scale))
elbo = Trace_ELBO(
num_particles=num_particles, vectorize_particles=True, max_plate_nesting=1
)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_normal_plate_normal(num_particles):
pytest.importorskip("funsor")
data = torch.randn(5)
def model():
x = pyro.sample("x", dist.Normal(0.0, 1.0))
with poutine.collapse():
with pyro.plate("data", len(data), dim=-1):
y = pyro.sample("y", dist.Normal(x, 1.0))
pyro.sample("z", dist.Normal(y, 1.0), obs=data)
def guide():
loc = pyro.param("loc", torch.tensor(0.0))
scale = pyro.param("scale", torch.tensor(1.0), constraint=constraints.positive)
pyro.sample("x", dist.Normal(loc, scale))
elbo = Trace_ELBO(
num_particles=num_particles, vectorize_particles=True, max_plate_nesting=1
)
assert_ok(model, guide, elbo)
@pytest.mark.xfail(reason="missing pattern")
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_beta_bernoulli(num_particles):
pytest.importorskip("funsor")
data = torch.tensor(0.0)
def model():
c = pyro.sample("c", dist.Gamma(1, 1))
with poutine.collapse():
probs = pyro.sample("probs", dist.Beta(c, 2))
pyro.sample("obs", dist.Bernoulli(probs), obs=data)
def guide():
a = pyro.param("a", torch.tensor(1.0), constraint=constraints.positive)
b = pyro.param("b", torch.tensor(1.0), constraint=constraints.positive)
pyro.sample("c", dist.Gamma(a, b))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
@pytest.mark.xfail(reason="missing pattern")
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_beta_binomial(num_particles):
pytest.importorskip("funsor")
data = torch.tensor(5.0)
def model():
c = pyro.sample("c", dist.Gamma(1, 1))
with poutine.collapse():
probs = pyro.sample("probs", dist.Beta(c, 2))
pyro.sample("obs", dist.Binomial(10, probs), obs=data)
def guide():
a = pyro.param("a", torch.tensor(1.0), constraint=constraints.positive)
b = pyro.param("b", torch.tensor(1.0), constraint=constraints.positive)
pyro.sample("c", dist.Gamma(a, b))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
@pytest.mark.xfail(reason="missing pattern in Funsor")
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_beta_binomial_plate(num_particles):
pytest.importorskip("funsor")
data = torch.tensor([0.0, 1.0, 5.0, 5.0])
def model():
c = pyro.sample("c", dist.Gamma(1, 1))
with poutine.collapse():
probs = pyro.sample("probs", dist.Beta(c, 2))
with pyro.plate("plate", len(data)):
pyro.sample("obs", dist.Binomial(10, probs), obs=data)
def guide():
a = pyro.param("a", torch.tensor(1.0), constraint=constraints.positive)
b = pyro.param("b", torch.tensor(1.0), constraint=constraints.positive)
pyro.sample("c", dist.Gamma(a, b))
elbo = Trace_ELBO(
num_particles=num_particles, vectorize_particles=True, max_plate_nesting=1
)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_barrier(num_particles):
pytest.importorskip("funsor")
data = torch.tensor([0.0, 1.0, 5.0, 5.0])
def model():
with poutine.collapse():
z = pyro.sample("z_init", dist.Normal(0, 1))
for t, x in enumerate(data):
z = pyro.sample("z_{}".format(t), dist.Normal(z, 1))
pyro.sample("x_t{}".format(t), dist.Normal(z, 1), obs=x)
z = pyro.barrier(z)
z = torch.sigmoid(z)
return z
def guide():
pass
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
def test_ordered_logistic_plate():
N = 5 # num data points/batch size
K = 4 # num categories
data = (K * torch.rand(N)).long().float()
def model():
predictor = pyro.sample(
"predictor", dist.Normal(0.0, 1.0).expand([N]).to_event(1)
)
cutpoints = pyro.sample(
"cutpoints", dist.Normal(0.0, 1.0).expand([K - 1]).to_event(1)
)
# would have identifiability issues, but this isn't a real model...
cutpoints = torch.sort(cutpoints, dim=-1).values
with pyro.plate("obs_plate", N):
pyro.sample("obs", dist.OrderedLogistic(predictor, cutpoints), obs=data)
def guide():
# parameters
pred_mu = pyro.param("pred_mu", torch.zeros(N))
pred_std = pyro.param("pred_std", torch.ones(N))
cp_mu = pyro.param("cp_mu", torch.zeros(K - 1))
cp_std = pyro.param("cp_std", torch.ones(K - 1))
# sample
pyro.sample("predictor", dist.Normal(pred_mu, pred_std).to_event(1))
pyro.sample("cutpoints", dist.Normal(cp_mu, cp_std).to_event(1))
assert_ok(model, guide, Trace_ELBO())
| 33.58243 | 88 | 0.581737 |
e56c83311bb9ec90172a6cd85010f27d7b05e0c3 | 208 | py | Python | gammagl/utils/check.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | gammagl/utils/check.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | gammagl/utils/check.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def check_is_numpy(*data):
"""
Check if the given datas have numpy.array
"""
for d in data:
if isinstance(d, np.ndarray):
return True
return False
| 17.333333 | 45 | 0.591346 |
5b641468b9cb92249cd7f042a110329fe1ec29a0 | 2,739 | py | Python | resources/sksl/update_fuzzer.py | fourgrad/skia | b9b550e9bb1b73001088ba89483e2f9bbe46c3db | [
"BSD-3-Clause"
] | 1 | 2022-03-11T02:34:47.000Z | 2022-03-11T02:34:47.000Z | resources/sksl/update_fuzzer.py | Wal1e/skia | eda97288bdc8e87afea817b25d561724c2b6a2f8 | [
"BSD-3-Clause"
] | null | null | null | resources/sksl/update_fuzzer.py | Wal1e/skia | eda97288bdc8e87afea817b25d561724c2b6a2f8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2021 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This tool updates the OSS-Fuzz corpus using Google Cloud's 'gsutil' tool.
# You will need to be given access to the Google Storage fuzzer repo (at
# gs://skia-fuzzer/oss-fuzz/) by the Skia Infra team.
# You will also need to set up credentials for gsutil on your machine by running:
# gcloud auth login
import os
import subprocess
import tempfile
import zipfile
# Locate this script in the file system.
startDir = os.path.dirname(os.path.abspath(__file__))
fileNum = 1
# Prepare two scratch zip files, one for the input data as-is and another with 256-byte padding.
with tempfile.NamedTemporaryFile(suffix='primary.zip', delete=False, mode='w') as pathToZip:
with zipfile.ZipFile(pathToZip.name, 'w', zipfile.ZIP_DEFLATED) as archive:
# Iterate over every file in this directory and use it to assemble our corpus.
for root, dirs, files in os.walk(startDir):
for file in files:
# Exclude files that won't be useful fuzzer inputs.
if (not file.startswith('.') # Hidden
and not file.endswith('.py') # Python
and not file.endswith('.test') # ES2 conformance script
and not file.endswith('.txt')): # Text
# Prepend a number to each output filename to guarantee uniqueness.
pathInZip = '%d_%s' % (fileNum, file)
fileNum += 1
with open('%s/%s' % (root, file), 'r') as skslFile:
# Read the SkSL text as input.
inputSkSL = skslFile.read()
# Copy the SkSL into our zip archive.
archive.writestr(pathInZip, inputSkSL)
try:
# Upload our zip file to cloud storage.
output = subprocess.check_output(
['gsutil', 'cp', pathToZip.name,
'gs://skia-fuzzer/oss-fuzz/sksl_seed_corpus.zip'],
stderr=subprocess.STDOUT)
# Make the uploaded file world-readable.
output = subprocess.check_output(
['gsutil', 'acl', 'ch', '-u', 'AllUsers:R',
'gs://skia-fuzzer/oss-fuzz/sksl_seed_corpus.zip'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
# Report the error.
print("### Unable to upload fuzzer corpus to Google Cloud:")
print(" " + "\n ".join(err.output.splitlines()))
print("\nPlease read the notes at the top of update_fuzzer.py for next steps.\n")
sys.exit(err.returncode)
| 42.796875 | 96 | 0.606061 |
831c7cc49c1459b24e86df5acf61d5e8f6fe51d6 | 994 | py | Python | sphinxcontrib/collections/drivers/copy_folder.py | useblocks/sphinx-collections | 4f1fd7d4f4682a2df47cdde98b9668d59b2983ff | [
"MIT"
] | 4 | 2020-05-22T16:22:14.000Z | 2021-11-09T11:48:04.000Z | sphinxcontrib/collections/drivers/copy_folder.py | useblocks/sphinx-collections | 4f1fd7d4f4682a2df47cdde98b9668d59b2983ff | [
"MIT"
] | 1 | 2020-07-10T23:03:24.000Z | 2020-07-10T23:03:24.000Z | sphinxcontrib/collections/drivers/copy_folder.py | useblocks/sphinx-collections | 4f1fd7d4f4682a2df47cdde98b9668d59b2983ff | [
"MIT"
] | 1 | 2020-05-22T17:27:39.000Z | 2020-05-22T17:27:39.000Z | import os
from shutil import copytree, ignore_patterns, rmtree
from sphinxcontrib.collections.drivers import Driver
class CopyFolderDriver(Driver):
def run(self):
self.info('Copy folder...')
if not os.path.exists(self.config['source']):
self.error('Source {} does not exist'.format(self.config['source']))
return
try:
copytree(self.config['source'],
self.config['target'],
ignore_patterns(*self.config.get('ignore', [])))
except IOError as e:
self.error('Problems during copying folder.', e)
def clean(self):
try:
rmtree(self.config['target'])
self.info('Folder deleted: {}'.format(self.config['target']))
except FileNotFoundError:
pass # Already cleaned? I'm okay with it.
except IOError as e:
self.error('Problems during cleaning for collection {}'.format(self.config['name']), e)
| 31.0625 | 99 | 0.590543 |
b2e019bcf07e2d4e1e8384684ca03264dfe05538 | 8,442 | py | Python | Curso06.py | DarlanNoetzold/DeepLearning_Alura | efe67f85bfe5cf591263d253f24f502a90ec0b5d | [
"MIT"
] | 1 | 2022-03-02T14:24:09.000Z | 2022-03-02T14:24:09.000Z | Curso06.py | DarlanNoetzold/DeepLearning_Alura | efe67f85bfe5cf591263d253f24f502a90ec0b5d | [
"MIT"
] | null | null | null | Curso06.py | DarlanNoetzold/DeepLearning_Alura | efe67f85bfe5cf591263d253f24f502a90ec0b5d | [
"MIT"
] | null | null | null | import pandas as pd
dados_portugues = pd.read_csv("stackoverflow_portugues.csv")
dados_portugues.head()
questao_portugues = dados_portugues.Questão[5]
print(questao_portugues)
dados_ingles = pd.read_csv("stackoverflow_ingles.csv")
dados_ingles.head()
questao_ingles = dados_ingles.Questão[0]
print(questao_ingles)
import re
re.findall(r"<.*?>",questao_portugues)
print(questao_portugues)
texto_teste = re.sub(r"<.*?>"," T----E----S----T----E ",questao_portugues)
print(texto_teste)
re.search(r"70","18728736187263817628631872638716283670")
regex = re.compile(r"70")
regex.search("18728736187263817628631872638716283670")
from timeit import timeit
setup = """import re"""
timeit("""re.search(r"70","18728736187263817628631872638716283670")""", setup)
setup = """import re
regex = re.compile(r"70")"""
timeit("""regex.search("18728736187263817628631872638716283670")""", setup)
def remover(textos, regex):
if type(textos) == str:
return regex.sub("", textos)
else:
return [regex.sub("", texto) for texto in textos]
regex_html = re.compile(r"<.*?>")
questao_sem_tag = remover(questao_ingles, regex_html)
print(questao_sem_tag)
print(questao_ingles)
def substituir_codigo(textos, regex):
if type(textos) == str:
return regex.sub("CODE", textos)
else:
return [regex.sub("CODE", texto) for texto in textos]
regex_codigo = re.compile(r"<code>(.|(\n))*?</code>")
questoes_port_sem_code = substituir_codigo(dados_portugues.Questão,
regex_codigo)
questoes_port_sem_code_tag = remover(questoes_port_sem_code, regex_html)
dados_portugues["sem_code_tag"] = questoes_port_sem_code_tag
questoes_ing_sem_code = substituir_codigo(dados_ingles.Questão,
regex_codigo)
questoes_ing_sem_code_tag = remover(questoes_ing_sem_code, regex_html)
dados_ingles["sem_code_tag"] = questoes_ing_sem_code_tag
dados_ingles.head()
regex_pontuacao = re.compile(r"[^\w\s]")
def minusculo(textos):
if type(textos) == str:
return textos.lower()
else:
return [texto.lower() for texto in textos]
regex_digitos = re.compile(r"\d+")
print(remover("Alura \n 1234 Caelum 1234", regex_digitos))
regex_espaco = re.compile(r" +")
regex_quebra_linha = re.compile(r"(\n)")
def substituir_por_espaco(textos, regex):
if type(textos) == str:
return regex.sub(" ", textos)
else:
return [regex.sub(" ", texto) for texto in textos]
print(substituir_por_espaco("Alura \n \n Caleum", regex_quebra_linha))
questoes_port_sem_pont = remover(dados_portugues.sem_code_tag,
regex_pontuacao)
questoes_port_sem_pont_minus = minusculo(questoes_port_sem_pont)
questoes_port_sem_pont_minus_dig = remover(questoes_port_sem_pont_minus,
regex_digitos)
questoes_port_sem_quebra_linha = substituir_por_espaco(questoes_port_sem_pont_minus_dig,
regex_quebra_linha)
questoes_port_sem_espaco_duplicado = substituir_por_espaco(questoes_port_sem_quebra_linha,
regex_espaco)
dados_portugues["questoes_tratadas"] = questoes_port_sem_espaco_duplicado
questoes_ing_sem_pont = remover(dados_ingles.sem_code_tag,
regex_pontuacao)
questoes_ing_sem_pont_minus = minusculo(questoes_ing_sem_pont)
questoes_ing_sem_pont_minus_dig = remover(questoes_ing_sem_pont_minus,
regex_digitos)
questoes_ing_sem_quebra_linha = substituir_por_espaco(questoes_ing_sem_pont_minus_dig,
regex_quebra_linha)
questoes_ing_sem_espaco_duplicado = substituir_por_espaco(questoes_ing_sem_quebra_linha,
regex_espaco)
dados_ingles["questoes_tratadas"] = questoes_ing_sem_espaco_duplicado
from nltk.util import bigrams
texto_teste = "alura"
print(list(bigrams(texto_teste)))
from nltk.lm.preprocessing import pad_both_ends
print(list(bigrams(pad_both_ends(texto_teste, n = 2))))
from sklearn.model_selection import train_test_split
port_treino, port_teste = train_test_split(dados_portugues.questoes_tratadas,
test_size = 0.2,
random_state = 123)
ing_treino, ing_teste = train_test_split(dados_ingles.questoes_tratadas,
test_size = 0.2,
random_state = 123)
todas_questoes_port = ' '.join(port_treino)
from nltk.tokenize import WhitespaceTokenizer
todas_palavras_port = WhitespaceTokenizer().tokenize(todas_questoes_port)
print(todas_palavras_port)
from nltk.lm.preprocessing import padded_everygram_pipeline
port_treino_bigram, vocab_port = padded_everygram_pipeline(2,
todas_palavras_port)
from nltk.lm.preprocessing import padded_everygram_pipeline
port_treino_bigram, vocab_port = padded_everygram_pipeline(2,
todas_palavras_port)
from nltk.lm import MLE
modelo_port = MLE(2)
modelo_port.fit(port_treino_bigram, vocab_port)
modelo_port.generate(num_words=6)
from nltk.lm import NgramCounter
modelo_port.counts[['m']].items()
texto = "good morning"
palavras = WhitespaceTokenizer().tokenize(texto)
palavras_fakechar = [list(pad_both_ends(palavra, n = 2)) for palavra in palavras]
palavras_bigramns = [list(bigrams(palavra)) for palavra in palavras_fakechar]
print(palavras_bigramns)
print(palavras_bigramns[0])
print(modelo_port.perplexity(palavras_bigramns[0]))
print(modelo_port.perplexity(palavras_bigramns[1]))
def treinar_modelo_mle(lista_textos):
todas_questoes = ' '.join(lista_textos)
todas_palavras = WhitespaceTokenizer().tokenize(todas_questoes)
bigrams, vocabulario = padded_everygram_pipeline(2, todas_palavras)
modelo = MLE(2)
modelo.fit(bigrams, vocabulario)
return modelo
modelo_port_2 = treinar_modelo_mle(port_treino)
print(modelo_port_2.perplexity(palavras_bigramns[0]))
print(modelo_port_2.perplexity(palavras_bigramns[1]))
modelo_ing = treinar_modelo_mle(ing_treino)
print(modelo_ing.perplexity(palavras_bigramns[0]))
print(modelo_ing.perplexity(palavras_bigramns[1]))
def calcular_perplexidade(modelo, texto):
perplexidade = 0
palavras = WhitespaceTokenizer().tokenize(texto)
palavras_fakechar = [list(pad_both_ends(palavra, n=2)) for palavra in palavras]
palavras_bigramns = [list(bigrams(palavra)) for palavra in palavras_fakechar]
for palavra in palavras_bigramns:
perplexidade += modelo.perplexity(palavra)
return perplexidade
print(calcular_perplexidade(modelo_ing, "good morning"))
print(calcular_perplexidade(modelo_port, port_teste.iloc[0]))
port_teste.iloc[0]
print(calcular_perplexidade(modelo_ing, port_teste.iloc[0]))
from nltk.lm import Laplace
def treinar_modelo_Laplace(lista_textos):
todas_questoes = ' '.join(lista_textos)
todas_palavras = WhitespaceTokenizer().tokenize(todas_questoes)
bigrams, vocabulario = padded_everygram_pipeline(2, todas_palavras)
modelo = Laplace(2)
modelo.fit(bigrams, vocabulario)
return modelo
modelo_ing_Laplace = treinar_modelo_Laplace(ing_treino)
print(calcular_perplexidade(modelo_ing_Laplace, port_teste.iloc[0]))
modelo_port_Laplace = treinar_modelo_Laplace(port_treino)
print(calcular_perplexidade(modelo_port_Laplace, port_teste.iloc[0]))
def atribui_idioma(lista_textos):
idioma = []
for texto in lista_textos:
portugues = calcular_perplexidade(modelo_port_Laplace, texto)
ingles = calcular_perplexidade(modelo_ing_Laplace, texto)
espanhol = calcular_perplexidade(modelo_esp_Laplace, texto)
if ingles >= portugues <= espanhol:
idioma.append("portugues")
elif portugues > ingles < espanhol:
idioma.append("ingles")
else:
idioma.append("espanhol")
return idioma
resultados_portugues = atribui_idioma(port_teste)
taxa_portugues = resultados_portugues.count("portugues")/len(resultados_portugues)
len(port_teste)
resultados_ingles = atribui_idioma(ing_teste)
taxa_ingles = resultados_ingles.count("ingles")/len(resultados_ingles)
print("Port", taxa_portugues)
print("Ing", taxa_ingles) | 34.040323 | 90 | 0.715826 |
ced241c91dd4f735ef3afa077be474f7aed0ee83 | 765 | py | Python | spider/Config_International_Relations_od.py | iecasszyjy/tweet_search-master | e4978521a39964c22ae46bf35d6ff17710e8e6c6 | [
"MIT"
] | null | null | null | spider/Config_International_Relations_od.py | iecasszyjy/tweet_search-master | e4978521a39964c22ae46bf35d6ff17710e8e6c6 | [
"MIT"
] | 2 | 2021-03-31T18:54:16.000Z | 2021-12-13T19:49:08.000Z | spider/Config_International_Relations_od.py | iecasszyjy/tweet_search-master | e4978521a39964c22ae46bf35d6ff17710e8e6c6 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# got文件、MongoDB数据库和Redis数据库配置文件
# 开放域Health Environment事件的配置信息
import os
import sys
import pymongo
import redis
def get_noau_config():
# got文件
if sys.version_info[0] < 3:
import got
else:
import got3 as got
# MongoDB数据库
client = pymongo.MongoClient(os.environ['MONGOHOST'], 27017)
# client = pymongo.MongoClient('54.161.160.206', 27017)
db_auth = client.admin
db_auth.authenticate(name='iecas', password='szyjy')
db = client.International_Relations_od # Health_Environment_od数据库
# Redis数据库
r = redis.StrictRedis(host=os.environ['REDISHOST'], port=6379, db=0)
# r = redis.StrictRedis('54.161.160.206', port=6379, db=0)
return got, db, r
| 24.677419 | 73 | 0.652288 |
4365f4b4f7feeb6285d6a68edc16b08c091433c7 | 5,869 | py | Python | web/src/survey/models.py | frhumanes/consulting | 400df4fc59240d2cd1c5807feaabacd056fdce03 | [
"Apache-2.0"
] | null | null | null | web/src/survey/models.py | frhumanes/consulting | 400df4fc59240d2cd1c5807feaabacd056fdce03 | [
"Apache-2.0"
] | null | null | null | web/src/survey/models.py | frhumanes/consulting | 400df4fc59240d2cd1c5807feaabacd056fdce03 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import truncate_words
from log.models import TraceableModel
from formula.models import Variable
from django.conf import settings
class Survey(TraceableModel):
blocks = models.ManyToManyField(
'Block',
related_name='blocks_surveys',
verbose_name=_(u'Bloque'),
null=True,
blank=True)
multitype = models.BooleanField(
_(u'Disponible para pacientes'),
default=False,
help_text=_(u"Permite al paciente realizar el cuestionario"))
name = models.CharField(_(u'Nombre'), max_length=100)
code = models.IntegerField(_(u'Código'), blank=True, null=True,
db_index=True)
is_reportable = models.BooleanField(_(u'¿Tiene informe?'), default=False)
def __unicode__(self):
return u'%s' % (self.name)
def num_blocks(self):
return self.blocks.values('code').distinct().count()
def get_available_kinds(self, flat=True):
kinds = list(self.blocks.values_list('kind', flat=flat).distinct())
if len(kinds) > 1 and settings.GENERAL in kinds:
kinds.remove(settings.GENERAL)
elif len(kinds) == 0:
kinds = [settings.GENERAL]
return kinds
class Meta:
verbose_name = "Cuestionario"
class Template(TraceableModel):
name = models.CharField(_(u'Nombre'), max_length=100, null=True,
unique=True)
template = models.TextField(_(u'Plantilla'), max_length=5000)
class Meta:
verbose_name = "Plantilla"
class Category(TraceableModel):
KIND = (
(settings.GENERAL, _(u'General')),
(settings.EXTENSO, _(u'Extenso')),
(settings.ABREVIADO, _(u'Abreviado')),
)
questions = models.ManyToManyField('Question',
related_name='questions_categories',
verbose_name=_(u'Preguntas'))
name = models.CharField(_(u'Nombre'), max_length=100)
code = models.IntegerField(_(u'Código'), blank=True, null=True,
db_index=True)
variables = models.ManyToManyField(
Variable,
related_name='variables_categories',
verbose_name=_(u'Variables asociadas'),
blank=True,
null=True)
kind = models.IntegerField(_(u'Tipo'), choices=KIND)
def __unicode__(self):
return u'%s [%s]' % (self.name, self.get_kind())
def get_kind(self):
if self.kind == settings.GENERAL:
return 'General'
elif self.kind == settings.EXTENSO:
return 'Extenso'
else:
return 'Abreviado'
class Meta:
verbose_name = u"Categoría"
class Block(TraceableModel):
KIND = (
(settings.GENERAL, _(u'General')),
(settings.EXTENSO, _(u'Extenso')),
(settings.ABREVIADO, _(u'Abreviado')),
)
categories = models.ManyToManyField(
'Category',
related_name='categories_blocks',
verbose_name=_(u'Categorías'))
kind = models.IntegerField(_(u'Tipo'), choices=KIND)
name = models.CharField(_(u'Nombre'), max_length=100)
code = models.IntegerField(_(u'Código'), db_index=True)
is_scored = models.BooleanField(
_(u'¿Es puntuable?'),
default=False,
help_text=_(u'Se deberán crear las formulas asociadas'))
def __unicode__(self):
return u'%s [%s]' % (self.name, self.get_kind())
def get_kind(self):
if self.kind == settings.GENERAL:
return 'General'
elif self.kind == settings.EXTENSO:
return 'Extenso'
else:
return 'Abreviado'
class Meta:
verbose_name = "Bloque"
class Question(models.Model):
KIND = (
(settings.UNISEX, _(u'Ambos sexos')),
(settings.MAN, _(u'Hombre')),
(settings.WOMAN, _(u'Mujer')),
)
text = models.TextField(_(u'Text'), max_length=500)
code = models.CharField(_(u'Código'), max_length=10, db_index=True,
unique=True)
single = models.BooleanField(_(u'Respuesta única'), default=False)
kind = models.IntegerField(
_(u'Sexo'),
choices=KIND,
default=settings.UNISEX,
help_text="Pregunta disponible sólo para el sexo seleccionado")
required = models.BooleanField(_(u'Respuesta requerida'), default=False)
order = models.DecimalField(_(u'Orden'), default=0,
max_digits=6, decimal_places=3,
blank=True, null=True)
def __unicode__(self):
return u'%s - %s' % (self.code, truncate_words(self.text, 20))
def get_kind(self):
if self.kind == settings.MAN:
return 'Hombre'
elif self.kind == settings.WOMAN:
return 'Mujer'
else:
return 'Ambos'
def get_af_illness(self):
return self.text[self.text.find('padecido') + 9:self.text.find(' alguno')]
class Meta:
ordering = ['id', 'code']
verbose_name = "Pregunta"
class Option(models.Model):
question = models.ForeignKey('Question',
related_name="question_options",
verbose_name=_(u'Pregunta'))
code = models.CharField(_(u'Código'), max_length=10, db_index=True,
unique=True)
weight = models.DecimalField(_(u'Peso'), max_digits=5, decimal_places=2,
blank=True, null=True)
text = models.CharField(_(u'Texto'), max_length=255)
def __unicode__(self):
return u'%s - %s' % (self.code, self.text)
class Meta:
verbose_name = u"Opción"
verbose_name_plural = "Opciones"
| 28.769608 | 82 | 0.591924 |
3b879592220a1df925d6bb661b75cbb0e2f56821 | 1,250 | py | Python | Toolbox/test_functions/test_day2week2month.py | AndresPenuela/MHA-Workshop | 69ce4cedc1396e8ee57ccde3b2eea194c58599ea | [
"MIT"
] | 1 | 2021-05-25T13:12:09.000Z | 2021-05-25T13:12:09.000Z | Toolbox/test_functions/test_day2week2month.py | AndresPenuela/MHA-Workshop | 69ce4cedc1396e8ee57ccde3b2eea194c58599ea | [
"MIT"
] | null | null | null | Toolbox/test_functions/test_day2week2month.py | AndresPenuela/MHA-Workshop | 69ce4cedc1396e8ee57ccde3b2eea194c58599ea | [
"MIT"
] | 1 | 2020-05-27T01:43:01.000Z | 2020-05-27T01:43:01.000Z | # -*- coding: utf-8 -*-
"""
This is a function to test the day2week2month functions
@author: Andres Peñuela
"""
import pandas as pd
import numpy as np
from numpy.testing import assert_array_equal
if __name__ == '__main__':
import sys
sys.path.append("..") # Adds higher directory to python modules path.
### Function to test ###
from Data_management.day2week2month import day2week
# Test inputs
N = 3 # 3 weeks
dates = pd.date_range(start = '2020-06-09', end = '2020-07-09', freq = 'D')
data = np.ones(dates.size+1)
# Run the function to test
dates_week,data_week,data_cum_week = day2week(N,dates,data)
### Testing functions ###
def test_dates_week():
# Expected output
dates_week_expect = pd.to_datetime(np.array(['2020-06-15','2020-06-22','2020-06-29','2020-07-06']))
# Test
assert_array_equal(dates_week,dates_week_expect)
### Testing functions ###
def test_data_week():
# Expected output
data_week_expect = np.array([[0],[7],[7],[7]])
# Test
assert_array_equal(data_week,data_week_expect)
### Testing functions ###
def test_data_cum_week():
# Expected output
data_cum_week_expect = np.array([[0],[7],[14],[21]])
# Test
assert_array_equal(data_cum_week,data_cum_week_expect) | 29.761905 | 103 | 0.6928 |
475979e9f715c0fb43d2276e1aa36cb809548e60 | 1,628 | py | Python | Sorter/utils/CameraWebsocketHandler.py | Gadgeteering/project-teachable-sorter | b275bb267b67a0b9019adfcb99cf57a2e7fc0443 | [
"Apache-2.0"
] | 35 | 2019-11-14T19:13:45.000Z | 2021-09-15T13:26:18.000Z | Sorter/utils/CameraWebsocketHandler.py | Gadgeteering/project-teachable-sorter | b275bb267b67a0b9019adfcb99cf57a2e7fc0443 | [
"Apache-2.0"
] | 14 | 2019-12-08T16:27:33.000Z | 2022-03-31T21:59:50.000Z | Sorter/utils/CameraWebsocketHandler.py | Gadgeteering/project-teachable-sorter | b275bb267b67a0b9019adfcb99cf57a2e7fc0443 | [
"Apache-2.0"
] | 12 | 2019-11-24T17:18:47.000Z | 2022-01-10T18:11:54.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tornado
import tornado.websocket
import tornado.ioloop
from tornado.iostream import IOStream
import threading
import time
import base64
import sys, os
import asyncio
cam_sockets = None
class CameraWebsocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
global cam_sockets
cam_sockets.append(self)
print('new camera connection')
def on_message(self, message):
print (message)
def on_close(self):
global cam_sockets
cam_sockets.remove(self)
print('camera connection closed')
def check_origin(self, origin):
return True
def start_server(loop, cs):
global cam_sockets
cam_sockets = cs
asyncio.set_event_loop(loop)
cam_app = tornado.web.Application([
(r'/', CameraWebsocketHandler)
])
cam_server = (cam_app)
cam_server.listen(8889)
tornado.ioloop.IOLoop.instance().start()
def signal_handler(signum, frame):
print("Interrupt caught")
tornado.ioloop.IOLoop.instance().stop()
server_thread.stop()
| 26.688525 | 74 | 0.722973 |
3c25b3115263578fda514cd193d75798ccd7e93c | 2,023 | py | Python | source/conf.py | MinecraftMediaLibrary/MinecraftMediaLibrary-Wiki | 4e7b3e50019fddbd357b3b85926e65cc6722add5 | [
"MIT"
] | 1 | 2021-05-04T03:01:19.000Z | 2021-05-04T03:01:19.000Z | source/conf.py | MinecraftMediaLibrary/MinecraftMediaLibrary-Wiki | 4e7b3e50019fddbd357b3b85926e65cc6722add5 | [
"MIT"
] | null | null | null | source/conf.py | MinecraftMediaLibrary/MinecraftMediaLibrary-Wiki | 4e7b3e50019fddbd357b3b85926e65cc6722add5 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'MinecraftMediaLibrary'
copyright = '2021, PulseBeat_02'
author = 'PulseBeat_02'
# The full version, including alpha/beta/rc tags
release = 'Latest'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['myst_parser', 'sphinx_tabs.tabs', 'sphinx_rtd_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 36.125 | 79 | 0.67474 |
5739356097bddf3112e4d1310a358c45ff9d26e3 | 3,216 | py | Python | zmail/utils.py | choosingausernameisannoying/zmail | ff1e9c2a1736af57f28506489cecb5f7b1ddf1d3 | [
"MIT"
] | null | null | null | zmail/utils.py | choosingausernameisannoying/zmail | ff1e9c2a1736af57f28506489cecb5f7b1ddf1d3 | [
"MIT"
] | null | null | null | zmail/utils.py | choosingausernameisannoying/zmail | ff1e9c2a1736af57f28506489cecb5f7b1ddf1d3 | [
"MIT"
] | null | null | null | """
zmail.utils
~~~~~~~~~~~~
This module contains some useful function power zmail.
"""
import os
from typing import Optional
from .helpers import get_abs_path, make_list
from .parser import parse_mail
from .structures import CaseInsensitiveDict
def save_attachment(mail: CaseInsensitiveDict, target_path: Optional[str] = None, overwrite=False):
"""Parsing attachment and save it."""
if mail.get('attachments'):
if target_path is not None:
assert os.path.isdir(target_path) and os.path.exists(target_path)
else:
target_path = os.getcwd()
for name, raw in mail['attachments']:
file_path = os.path.join(target_path, name)
if not overwrite and os.path.exists(file_path):
raise FileExistsError("{} already exists, set overwrite to True to avoid this error.")
with open(file_path, 'wb') as f:
f.write(raw)
def save_one_attachment(mail: CaseInsensitiveDict, attach:tuple or list,target_path: Optional[str] = None, overwrite=False):
"""Save one designated attachment."""
if attach in mail['attachments']:
if target_path is not None:
assert os.path.isdir(target_path) and os.path.exists(target_path)
else:
target_path = os.getcwd()
name, raw = attach
file_path = os.path.join(target_path, name)
if not overwrite and os.path.exists(file_path):
raise FileExistsError("{} already exists, set overwrite to True to avoid this error.")
with open(file_path, 'wb') as f:
f.write(raw)
def show(mails: list or CaseInsensitiveDict) -> None:
"""Show mail or mails."""
mails = make_list(mails)
for mail in mails:
print('-------------------------')
for k in ('subject', 'id', 'from', 'to', 'date', 'content_text', 'content_html', 'attachments'):
if k != 'attachments':
print(k.capitalize() + ' ', mail.get(k))
else:
_ = ''
for idx, v in enumerate(mail['attachments']):
_ += str(idx + 1) + '.' + 'Name:' + v[0] + ' ' + 'Size:' + str(len(v[1])) + ' '
print(k.capitalize() + ' ', _)
def read_html(html_path: str):
"""Get html content by its path."""
path = get_abs_path(html_path)
with open(path, 'r') as f:
content = f.read()
return content
def read(file_path: str, SEP=b'\r\n') -> CaseInsensitiveDict:
"""Read a mail."""
abs_path = get_abs_path(file_path)
with open(abs_path, 'rb') as f:
raw_lines = f.read().split(SEP)
return parse_mail(raw_lines, 0)
def save(mail, name=None, target_path=None, overwrite=False) -> bool:
"""Save a mail."""
if name is None:
name = str(mail['subject'] + '.eml') if mail.get('subject') else 'Untitled'
if target_path is None:
target_path = os.getcwd()
file_path = os.path.join(target_path, name)
if not overwrite and os.path.exists(file_path):
raise FileExistsError("{} already exists, set overwrite to True to avoid this error.")
with open(file_path, 'wb') as f:
f.write(b'\r\n'.join(mail['raw']))
return True
| 32.816327 | 124 | 0.602612 |
77d22b90312a4beb73fc29069fc503cef7bc42f9 | 2,705 | py | Python | scripts/tombstone_scale.py | IMCG/RamCloud | dad96cf34d330608acb43b009d12949ed2d938f4 | [
"0BSD"
] | 5 | 2015-11-14T16:49:06.000Z | 2019-09-03T13:21:30.000Z | scripts/tombstone_scale.py | behnamm/cs244b_project | 957e8b3979e4ca24814edd73254cc4c69ea14126 | [
"0BSD"
] | null | null | null | scripts/tombstone_scale.py | behnamm/cs244b_project | 957e8b3979e4ca24814edd73254cc4c69ea14126 | [
"0BSD"
] | 1 | 2018-02-25T11:16:27.000Z | 2018-02-25T11:16:27.000Z | #!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Keeps partition size constant and scales percentage of tombstones
from 0-50% of the total partition size.
"""
from __future__ import division, print_function
from common import *
import config
import recovery
import subprocess
dat = open('%s/recovery/tombstone_scale.data' % top_path, 'w', 1)
tombBytes = 44 # each tombstone is 44 bytes in the log
minObjBytes = 36 # 0-length object is 36 bytes in the log
objectBytes = tombBytes - minObjBytes # how many bytes for equal tomb/obj size
partitionBytes = 600 * 1024 * 1024 # use a 600MB partition
num_hosts = len(config.hosts)
for tombPct in range(0, 51, 10):
tombPct /= 100.0
numObjs = ((1.0 - tombPct) * partitionBytes) / (objectBytes + minObjBytes)
numTombs = (tombPct * partitionBytes) / tombBytes
print('# backups:', num_hosts, file=dat)
print('# objectBytes, numObjs, numTombs, tombPct:', objectBytes,
numObjs, numTombs, tombPct * 100.0, file=dat)
args = {}
args['num_servers'] = num_hosts
args['backups_per_server'] = 1
args['num_partitions'] = 1
args['object_size'] = objectBytes
args['replicas'] = 3
args['num_objects'] = numObjs
args['num_removals'] = numTombs
args['master_ram'] = 1600
args['old_master_ram'] = 1600
args['timeout'] = 180
print('Using %d backups' % num_hosts)
print('Running with objects of size %d for a %d MB partition with '
'%d objs, %d tombstones (%.2f%% of space is tombstones)' %
(objectBytes, partitionBytes / 1024 / 1024, numObjs, numTombs,
tombPct * 100.0))
r = recovery.insist(**args)
print('->', r['ns'] / 1e6, 'ms', '(run %s)' % r['run'])
print(objectBytes, partitionBytes / 1024 / 1024, numObjs, numTombs,
tombPct * 100.0, r['ns'] / 1e6, file=dat)
print(file=dat)
print(file=dat)
| 39.202899 | 80 | 0.684658 |
47a93b0eb4d56ddc3c3baf081d36353d14036954 | 1,713 | py | Python | gpMgmt/bin/gpcheckcat_modules/mirror_matching_check.py | khuddlefish/gpdb | 2d20bae838c5ed433eecf6ecceca1b8dd5221197 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gpcheckcat_modules/mirror_matching_check.py | khuddlefish/gpdb | 2d20bae838c5ed433eecf6ecceca1b8dd5221197 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gpcheckcat_modules/mirror_matching_check.py | khuddlefish/gpdb | 2d20bae838c5ed433eecf6ecceca1b8dd5221197 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | from gppylib.gparray import get_gparray_from_config
class MirrorMatchingCheck:
def run_check(self, db_connection, logger):
logger.info('-----------------------------------')
logger.info('Checking mirroring_matching')
is_config_mirror_enabled = get_gparray_from_config().hasMirrors
# This query returns the mirroring status of all segments
mirroring_query = """SELECT gp_segment_id, mirror_existence_state FROM gp_dist_random('gp_persistent_relation_node') GROUP BY 1,2"""
segment_mirroring_result = db_connection.query(mirroring_query).getresult()
mismatching_segments = []
for (seg_id, mirror_state) in segment_mirroring_result:
is_segment_mirrored = mirror_state > 1
if mirror_state == 0:
continue # 0 is considered a match in either situation
if is_segment_mirrored != is_config_mirror_enabled:
mismatching_segments.append((seg_id, mirror_state))
if mismatching_segments:
logger.info('[FAIL] Mirroring mismatch detected')
logger.info("The GP configuration reports mirror enabling is: %s" % is_config_mirror_enabled)
logger.error("The following segments are mismatched in PT:")
logger.error("")
logger.error("Segment ID:\tmirror_existence_state:")
for (seg_id, mirror_existence_state) in mismatching_segments:
label = "Enabled" if mirror_existence_state > 1 else "Disabled"
logger.error("%i\t\t%i (%s)" % (seg_id, mirror_existence_state, label))
else:
logger.info('[OK] %s' % "mirroring_matching")
return mismatching_segments
| 46.297297 | 140 | 0.659661 |
f3aa841f0d270a98b3e51741120637b5f0e594be | 220 | py | Python | app/main/__init__.py | sbybfai/flask_blog | f08bcb6a73d85a926992d1d451f1a2075331a34f | [
"Apache-2.0"
] | null | null | null | app/main/__init__.py | sbybfai/flask_blog | f08bcb6a73d85a926992d1d451f1a2075331a34f | [
"Apache-2.0"
] | 3 | 2020-03-24T16:41:57.000Z | 2021-06-01T23:14:22.000Z | app/main/__init__.py | sbybfai/flask_blog | f08bcb6a73d85a926992d1d451f1a2075331a34f | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
from ..models import Permission
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission) | 20 | 38 | 0.781818 |
03b7e697c4cfc2104ad0c6403314515bf76c20e0 | 124 | py | Python | run.py | isaacrivas10/fb_chatbot | 01a78e690e698609a5b89dc8f04c216b248f5047 | [
"MIT"
] | null | null | null | run.py | isaacrivas10/fb_chatbot | 01a78e690e698609a5b89dc8f04c216b248f5047 | [
"MIT"
] | 5 | 2019-12-24T08:14:37.000Z | 2019-12-24T08:56:39.000Z | run.py | isaacrivas10/fb_chatbot | 01a78e690e698609a5b89dc8f04c216b248f5047 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from store import create_App
app= create_App()
if __name__ == "__main__":
app.run(debug=True) | 15.5 | 28 | 0.653226 |
d8a5f3a827e173c8d95e45cdf8b896394e09a7bb | 3,429 | py | Python | designate/openstack/deprecated/exception.py | melodous/designate | c0da0c464c07d34a9855ab704302d7662beb7c1d | [
"Apache-2.0"
] | null | null | null | designate/openstack/deprecated/exception.py | melodous/designate | c0da0c464c07d34a9855ab704302d7662beb7c1d | [
"Apache-2.0"
] | null | null | null | designate/openstack/deprecated/exception.py | melodous/designate | c0da0c464c07d34a9855ab704302d7662beb7c1d | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions common to OpenStack projects
"""
import logging
from designate.i18n import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
if not isinstance(e, Error):
# exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception(_('Uncaught exception'))
# logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""
Base Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception as e:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"
| 23.979021 | 78 | 0.664334 |
34d37aca7adda32d511da9b4f4463075c1e4bbce | 537 | py | Python | export_readiness/migrations/0032_auto_20190307_1512.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | 6 | 2018-03-20T11:19:07.000Z | 2021-10-05T07:53:11.000Z | export_readiness/migrations/0032_auto_20190307_1512.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | 802 | 2018-02-05T14:16:13.000Z | 2022-02-10T10:59:21.000Z | export_readiness/migrations/0032_auto_20190307_1512.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | 6 | 2019-01-22T13:19:37.000Z | 2019-07-01T10:35:26.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-07 15:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('export_readiness', '0031_internationallandingpage_squashed_0050_auto_20190219_1633'),
]
operations = [
migrations.AlterField(
model_name='articlepage',
name='article_teaser',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 25.571429 | 95 | 0.666667 |
4f87210f5cc257cb2b6172bb09c3037a4b4d9c83 | 993 | py | Python | mayloop/protocol/fixed_length_message.py | amol9/mayloop | 9bf181ab0e0168596bbbf8f76f1b21b018dcd4ca | [
"MIT"
] | null | null | null | mayloop/protocol/fixed_length_message.py | amol9/mayloop | 9bf181ab0e0168596bbbf8f76f1b21b018dcd4ca | [
"MIT"
] | null | null | null | mayloop/protocol/fixed_length_message.py | amol9/mayloop | 9bf181ab0e0168596bbbf8f76f1b21b018dcd4ca | [
"MIT"
] | null | null | null | import struct
from .message import Message
class FixedLengthMessage(Message):
def __init__(self):
Message.__init__(self)
self._length = None
def dataReceived(self, data):
self._buffer += data
next_message_available = True
while(next_message_available):
if len(self._buffer) >= 4 and self._length is None:
self._length = struct.unpack('>i', self._buffer[:4])[0]
self._buffer = self._buffer[4:]
if self._length is not None:
if len(self._buffer) > self._length:
self.messageReceived(self._buffer[0 : self._length])
self._buffer = self._buffer[self._length : ]
self._length = None
elif len(self._buffer) == self._length:
self.messageReceived(self._buffer[0 : self._length])
self._buffer = b''
self._length = None
next_message_available = False
else:
next_message_available = False
def sendMessage(self, data):
message = struct.pack('>i', len(data)) + data.encode('utf-8')
self.transport.write(message)
| 24.219512 | 63 | 0.688822 |
d32baa89fdee3667f888ddf926ebe6b8df35c309 | 4,358 | py | Python | mmdet/models/necks/segprocess_head_neck.py | Gitgigabyte/mmd | 02cf37884d3ac9a6018656d1871695669966dfb3 | [
"Apache-2.0"
] | 1 | 2020-03-13T08:37:35.000Z | 2020-03-13T08:37:35.000Z | mmdet/models/necks/segprocess_head_neck.py | Gitgigabyte/mmd | 02cf37884d3ac9a6018656d1871695669966dfb3 | [
"Apache-2.0"
] | null | null | null | mmdet/models/necks/segprocess_head_neck.py | Gitgigabyte/mmd | 02cf37884d3ac9a6018656d1871695669966dfb3 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import kaiming_init
import torch
from mmdet.core import auto_fp16, force_fp32
from mmdet.models.registry import NECKS
from mmdet.models.utils import ConvModule
@NECKS.register_module
class SemanticProcessNeck(nn.Module):
def __init__(self,
num_convs=4,
feature_channels=256,
mask_channels=80,
combine_level = 2,
num_levels = 5,
conv_out_channels=256,
groups=True,
conv_cfg=None,
norm_cfg=None):
super(SemanticProcessNeck, self).__init__()
self.num_convs = num_convs
self.mask_channels = mask_channels
self.combine_level = combine_level
self.num_levels = num_levels
self.conv_out_channels = conv_out_channels
self.feature_channels = feature_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.groups = groups
if self.groups:
self.lateral_conv = ConvModule(self.mask_channels,self.mask_channels, 3, stride=2, padding=1,
groups = self.mask_channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.lateral_conv = ConvModule(
self.mask_channels, self.mask_channels, 3, stride=2, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg
)
# self.convs = nn.ModuleList()
# for i in range(self.num_convs):
# # in_channels = self.in_channels if i == 0 else conv_out_channels
# if self.groups:
# self.convs.append(
# ConvModule(
# conv_out_channels,
# conv_out_channels,
# 3,
# padding=1,
# groups=in_channels,
# conv_cfg=self.conv_cfg,
# norm_cfg=self.norm_cfg))
# else:
# self.convs.append(
# ConvModule(
# conv_out_channels,
# conv_out_channels,
# 3,
# padding=1,
# conv_cfg=self.conv_cfg,
# norm_cfg=self.norm_cfg))
# self.conv_embedding = ConvModule(
# conv_out_channels,
# conv_out_channels,
# 1,
# conv_cfg=self.conv_cfg,
# norm_cfg=self.norm_cfg)
# self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.combine_conv = ConvModule(self.mask_channels+self.feature_channels,
self.conv_out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg)
def init_weights(self):
pass
@auto_fp16()
def forward(self, feats, masks):
assert len(feats) == self.num_levels
features = []
gather_size = feats[self.combine_level].size()[2:]
for i in range(self.num_levels):
if i < self.combine_level:
gathered = F.interpolate(
feats[i], size=gather_size, mode='nearest')
else:
gathered = F.adaptive_max_pool2d(
feats[i], gather_size)
features.append(gathered)
combine_feature = sum(features) / len(features)
masks = self.lateral_conv(masks)
combine_feature = torch.cat([combine_feature, masks], dim=1)
combine_feature = self.combine_conv(combine_feature)
outs = []
for i in range(self.num_levels):
out_size = feats[i].size()[2:]
if i < self.combine_level:
residual = F.interpolate(combine_feature, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(combine_feature, out_size)
outs.append(residual + feats[i])
return feats
| 37.247863 | 105 | 0.514915 |
fb50080d71de9b1ff445d64536c7e82de9b45000 | 2,082 | py | Python | py4we/NM80PowerIO.py | gtpedrosa/Python4WindEnergy | f8ad09018420cfb3a419173f97b129de7118d814 | [
"Apache-2.0"
] | 48 | 2015-01-19T18:21:10.000Z | 2021-11-27T22:41:06.000Z | py4we/NM80PowerIO.py | arash7444/Python4WindEnergy | 8f97a5f86e81ce01d80dafb6f8104165fd3ad397 | [
"Apache-2.0"
] | 1 | 2016-05-24T06:07:07.000Z | 2016-05-24T08:26:29.000Z | py4we/NM80PowerIO.py | arash7444/Python4WindEnergy | 8f97a5f86e81ce01d80dafb6f8104165fd3ad397 | [
"Apache-2.0"
] | 24 | 2015-06-26T14:44:07.000Z | 2021-06-07T18:36:52.000Z | """ IO classes for NM80 Power Curve file types
Copyright (C) 2013 DTU Wind Energy
Author: Iva Hrgovan
Email: ivah@dtu.dk
Last revision: 29-01-2014
License: Apache v2.0, http://www.apache.org/licenses/LICENSE-2.0
"""
from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import matplotlib.pyplot as plt
class NM80PowerIO(WEFileIO):
""" NM80 Power Curve IO
reads : wind speed,
mechanical power,
rotational speed,
tip speed ratio and
pitch setting
splits data to items by windspeed
writes data to a separate file test1.dat without comment header
"""
v = []
p = []
def _write(self):
""" Write a file (overrided)
"""
with open('test1.dat', 'w') as f:
for line in self.items:
if line[0] != "%": # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
f.write(line + '\n')
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
self.data = f.read()
self.items = self.data.split('\n')
for i in range(3, len(self.items)):
self.v.append(float(self.items[i].split()[0]))
self.p.append(float(self.items[i].split()[1]))
def _plot(self,fig):
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(self.v, self.p, 'r')
axes.set_xlabel('V [m/s]')
axes.set_ylabel('P [kW]')
axes.set_title('NM80 Power Curve');
### Main function ---------------------------------------------------------
if __name__ == '__main__':
""" This is the main fuction that will run the tests automatically
$> python my_file_type.py
.
----------------------------------------------------------------------
Ran X test in XXXs
OK
"""
# unittest.main()
t = NM80PowerIO('PowerExp_NM80_3deg.dat')
t.write()
t.plot()
| 25.390244 | 77 | 0.496158 |
9ac63642de917d4f25c24377243a2f2a2a04144f | 619 | py | Python | shot_detector/filters/sliding_window/kurtosis_swfilter.py | w495/shot_detector | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | [
"BSD-3-Clause"
] | 18 | 2015-09-27T21:34:18.000Z | 2022-03-24T12:14:51.000Z | shot_detector/filters/sliding_window/kurtosis_swfilter.py | w495/shot_detector | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | [
"BSD-3-Clause"
] | 6 | 2021-03-18T21:21:35.000Z | 2022-03-11T23:32:55.000Z | shot_detector/filters/sliding_window/kurtosis_swfilter.py | w495/shot_detector | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | [
"BSD-3-Clause"
] | 3 | 2017-09-14T20:53:26.000Z | 2021-12-18T19:18:18.000Z | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import logging
from .scipy_stat_swfilter import SciPyStatSWFilter
class KurtosisSWFilter(SciPyStatSWFilter):
"""
...
"""
__logger = logging.getLogger(__name__)
def aggregate_window_item(self, features, **kwargs):
"""
:param features:
:param kwargs:
:return:
"""
describe_result = self.describe(features, **kwargs)
return describe_result.kurtosis
| 21.344828 | 64 | 0.633279 |
0b01f4906dc10ef364b42cd1176c3ac7cbc35d6f | 6,807 | py | Python | indico/core/notifications.py | aiforrural/Digital-Events-Example | 628aaa8727b259b9367ac0ae1c5ba8e9e95eca82 | [
"MIT"
] | 1 | 2021-02-08T09:34:27.000Z | 2021-02-08T09:34:27.000Z | indico/core/notifications.py | pamirk/indico | c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768 | [
"MIT"
] | null | null | null | indico/core/notifications.py | pamirk/indico | c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import re
import time
from functools import wraps
from types import GeneratorType
from flask import g
from indico.core.config import config
from indico.core.db import db
from indico.core.logger import Logger
from indico.util.string import truncate
logger = Logger.get('emails')
def email_sender(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
mails = fn(*args, **kwargs)
if mails is None:
return
if isinstance(mails, GeneratorType):
mails = list(mails)
elif not isinstance(mails, list):
mails = [mails]
for mail in [_f for _f in mails if _f]:
send_email(mail)
return wrapper
def send_email(email, event=None, module=None, user=None, log_metadata=None):
"""Send an email created by :func:`make_email`.
When called while inside a RH, the email will be queued and only
sent or passed on to celery once the database commit succeeded.
:param email: The email object returned by :func:`make_email`
:param event: If specified, the email will be saved in that
event's log
:param module: The module name to show in the email log
:param user: The user to show in the email log
:param log_metadata: A metadata dictionary to be saved in the event's log
"""
from indico.core.emails import do_send_email, send_email_task
fn = send_email_task.delay if config.SMTP_USE_CELERY else do_send_email
# we log the email immediately (as pending). if we don't commit,
# the log message will simply be thrown away later
log_entry = _log_email(email, event, module, user, log_metadata)
if 'email_queue' in g:
g.email_queue.append((fn, email, log_entry))
else:
fn(email, log_entry)
def _log_email(email, event, module, user, meta=None):
from indico.modules.events.logs import EventLogKind, EventLogRealm
if not event:
return None
log_data = {
'content_type': 'text/html' if email['html'] else 'text/plain',
'from': email['from'],
'to': sorted(email['to']),
'cc': sorted(email['cc']),
'bcc': sorted(email['bcc']),
'subject': email['subject'],
'body': email['body'].strip(),
'state': 'pending',
'sent_dt': None,
}
return event.log(EventLogRealm.emails, EventLogKind.other, module or 'Unknown', log_data['subject'],
user, type_='email', data=log_data, meta=meta)
def init_email_queue():
"""Enable email queueing for the current context."""
g.setdefault('email_queue', [])
def flush_email_queue():
"""Send all the emails in the queue.
Note: This function does a database commit to update states
in case of failures or immediately-sent emails. It should only
be called if the session is in a state safe to commit or after
doing a commit/rollback of any other changes that might have
been pending.
"""
from indico.core.emails import store_failed_email, update_email_log_state
queue = g.get('email_queue', [])
if not queue:
return
logger.debug('Sending %d queued emails', len(queue))
for fn, email, log_entry in queue:
try:
fn(email, log_entry)
except Exception:
# Flushing the email queue happens after a commit.
# If anything goes wrong here we keep going and just log
# it to avoid losing (more) emails in case celery is not
# used for email sending or there is a temporary issue
# with celery.
if log_entry:
update_email_log_state(log_entry, failed=True)
path = store_failed_email(email, log_entry)
logger.exception('Flushing queued email "%s" failed; stored data in %s',
truncate(email['subject'], 100), path)
# Wait for a short moment in case it's a very temporary issue
time.sleep(0.25)
del queue[:]
db.session.commit()
def make_email(to_list=None, cc_list=None, bcc_list=None, from_address=None, reply_address=None, attachments=None,
subject=None, body=None, template=None, html=False):
"""Create an email.
The preferred way to specify the email content is using the
`template` argument. To do so, use :func:`.get_template_module` on
a template inheriting from ``emails/base.txt`` for text emails or
``emails/base.html`` for HTML emails.
:param to_list: The recipient email or a collection of emails
:param cc_list: The CC email or a collection of emails
:param bcc_list: The BCC email or a collection of emails
:param from_address: The sender address. Defaults to noreply.
:param reply_address: The reply-to address or a collection of addresses.
Defaults to empty.
:param attachments: A list of attachments. Each attachment can be
a `MIMEBase` subclass, a 3-tuple of the form
``(filename, content, mimetype)``, or a 2-tuple
``(filename, content)`` in which case the mime
type will be guessed from the file name.
:param subject: The subject of the email.
:param body: The body of the email:
:param template: A template module containing ``get_subject`` and
``get_body`` macros.
:param html: ``True`` if the email body is HTML
"""
if template is not None and (subject is not None or body is not None):
raise ValueError("Only subject/body or template can be passed")
if template:
subject = template.get_subject()
body = template.get_body()
if config.DEBUG and '\n' in subject:
raise ValueError('Email subject contains linebreaks')
subject = re.sub(r'\s+', ' ', subject)
if to_list is None:
to_list = set()
if cc_list is None:
cc_list = set()
if bcc_list is None:
bcc_list = set()
to_list = {to_list} if isinstance(to_list, str) else to_list
cc_list = {cc_list} if isinstance(cc_list, str) else cc_list
bcc_list = {bcc_list} if isinstance(bcc_list, str) else bcc_list
reply_address = {reply_address} if isinstance(reply_address, str) else (reply_address or set())
return {
'to': set(to_list),
'cc': set(cc_list),
'bcc': set(bcc_list),
'from': from_address or config.NO_REPLY_EMAIL,
'reply_to': set(reply_address),
'attachments': attachments or [],
'subject': subject.strip(),
'body': body.strip(),
'html': html,
}
| 38.457627 | 114 | 0.643455 |
2d01031dae7c7db0250967eb777f4a0c97d35c1c | 28,012 | py | Python | tests/deployd/test_watchers.py | yuanxu-li/paasta | 5b04f45659293f873c65111a9d1d0909aeed4019 | [
"Apache-2.0"
] | null | null | null | tests/deployd/test_watchers.py | yuanxu-li/paasta | 5b04f45659293f873c65111a9d1d0909aeed4019 | [
"Apache-2.0"
] | null | null | null | tests/deployd/test_watchers.py | yuanxu-li/paasta | 5b04f45659293f873c65111a9d1d0909aeed4019 | [
"Apache-2.0"
] | null | null | null | import sys
import unittest
import mock
from pytest import raises
from requests.exceptions import RequestException
from paasta_tools.deployd.common import BaseServiceInstance
class FakePyinotify: # pragma: no cover
class ProcessEvent():
pass
@property
def WatchManager(self):
pass
@property
def EventsCodes(self):
pass
@property
def Notifier(self):
pass
# This module is only available on linux
# and we will be mocking it in the unit tests anyway
# so this just creates it as a dummy module to prevent
# the ImportError
sys.modules['pyinotify'] = FakePyinotify
from paasta_tools.deployd.watchers import PaastaWatcher # noqa
from paasta_tools.deployd.watchers import SoaFileWatcher # noqa
from paasta_tools.deployd.watchers import YelpSoaEventHandler # noqa
from paasta_tools.deployd.watchers import AutoscalerWatcher # noqa
from paasta_tools.deployd.watchers import PublicConfigFileWatcher # noqa
from paasta_tools.deployd.watchers import PublicConfigEventHandler # noqa
from paasta_tools.deployd.watchers import get_service_instances_needing_update # noqa
from paasta_tools.deployd.watchers import get_marathon_clients_from_config # noqa
from paasta_tools.deployd.watchers import MaintenanceWatcher # noqa
class TestPaastaWatcher(unittest.TestCase):
def test_init(self):
mock_instances_that_need_to_be_bounced_in_the_future = mock.Mock()
PaastaWatcher(mock_instances_that_need_to_be_bounced_in_the_future, 'westeros-prod', config=mock.Mock())
class TestAutoscalerWatcher(unittest.TestCase):
def setUp(self):
self.mock_zk = mock.Mock()
self.mock_instances_that_need_to_be_bounced_in_the_future = mock.Mock()
self.watcher = AutoscalerWatcher(
self.mock_instances_that_need_to_be_bounced_in_the_future,
"westeros-prod",
zookeeper_client=self.mock_zk,
config=mock.Mock(),
)
def test_watch_folder(self):
with mock.patch(
'paasta_tools.deployd.watchers.ChildrenWatch', autospec=True,
) as mock_children_watch, mock.patch(
'paasta_tools.deployd.watchers.AutoscalerWatcher.watch_node', autospec=True,
) as mock_watch_node:
self.watcher.watch_folder('/path/autoscaling.lock')
assert not mock_children_watch.called
mock_watcher = mock.Mock(_client=mock.Mock(get_children=mock.Mock(return_value=[])))
mock_children_watch.return_value = mock_watcher
self.watcher.watch_folder('/rick/beth')
mock_children_watch.assert_called_with(
self.mock_zk,
'/rick/beth',
func=self.watcher.process_folder_event,
send_event=True,
)
assert not mock_watch_node.called
mock_children = mock.Mock(side_effect=[['morty', 'summer'], [], []])
mock_watcher = mock.Mock(_client=mock.Mock(get_children=mock_children))
mock_children_watch.return_value = mock_watcher
self.watcher.watch_folder('/rick/beth')
assert not mock_watch_node.called
calls = [
mock.call(
self.mock_zk,
'/rick/beth',
func=self.watcher.process_folder_event,
send_event=True,
),
mock.call(
self.mock_zk,
'/rick/beth/morty',
func=self.watcher.process_folder_event,
send_event=True,
),
mock.call(
self.mock_zk,
'/rick/beth/summer',
func=self.watcher.process_folder_event,
send_event=True,
),
]
for call in calls:
# this is a bit nasty because the calls to _client get lumped in too
# this just checks about the calls we really care happened
assert call in mock_children_watch.mock_calls
mock_watcher = mock.Mock(_client=mock.Mock(get_children=mock.Mock(return_value=['instances'])))
mock_children_watch.return_value = mock_watcher
self.watcher.watch_folder('/rick/beth')
mock_watch_node.assert_called_with(self.watcher, '/rick/beth/instances', enqueue=False)
mock_watch_node.reset_mock()
mock_watcher = mock.Mock(_client=mock.Mock(get_children=mock.Mock(return_value=[])))
mock_children_watch.return_value = mock_watcher
self.watcher.watch_folder('/rick/beth/instances')
mock_watch_node.assert_called_with(self.watcher, '/rick/beth/instances', enqueue=False)
mock_watcher = mock.Mock(_client=mock.Mock(get_children=mock.Mock(return_value=['instances'])))
mock_children_watch.return_value = mock_watcher
self.watcher.watch_folder('/rick/beth', enqueue_children=True)
mock_watch_node.assert_called_with(self.watcher, '/rick/beth/instances', enqueue=True)
def test_watch_node(self):
with mock.patch(
'paasta_tools.deployd.watchers.DataWatch', autospec=True,
) as mock_data_watch:
self.watcher.watch_node('/some/node')
mock_data_watch.assert_called_with(
self.mock_zk,
'/some/node',
func=self.watcher.process_node_event,
send_event=True,
)
def test_process_node_event(self):
with mock.patch(
'paasta_tools.deployd.common.get_priority', autospec=True, return_value=0,
), mock.patch(
'paasta_tools.deployd.watchers.EventType', autospec=True,
) as mock_event_type, mock.patch(
'time.time', autospec=True, return_value=1,
):
mock_event_other = mock_event_type.DELETED
mock_event = mock.Mock(
type=mock_event_other,
path='/autoscaling/service/instance/instances',
)
assert not self.mock_instances_that_need_to_be_bounced_in_the_future.put.called
mock_event_created = mock_event_type.CREATED
mock_event = mock.Mock(
type=mock_event_created,
path='/autoscaling/service/instance/instances',
)
self.watcher.process_node_event(mock.Mock(), mock.Mock(), mock_event)
self.mock_instances_that_need_to_be_bounced_in_the_future.put.assert_called_with(BaseServiceInstance(
service='service',
instance='instance',
bounce_by=1,
bounce_timers=None,
watcher=self.watcher.__class__.__name__,
priority=0,
failures=0,
))
mock_event_changed = mock_event_type.CHANGED
mock_event = mock.Mock(
type=mock_event_changed,
path='/autoscaling/service/instance/instances',
)
self.watcher.process_node_event(mock.Mock(), mock.Mock(), mock_event)
self.mock_instances_that_need_to_be_bounced_in_the_future.put.assert_called_with(BaseServiceInstance(
service='service',
instance='instance',
bounce_by=1,
bounce_timers=None,
watcher=self.watcher.__class__.__name__,
priority=0,
failures=0,
))
def test_process_folder_event(self):
with mock.patch(
'paasta_tools.deployd.watchers.EventType', autospec=True,
) as mock_event_type, mock.patch(
'paasta_tools.deployd.watchers.AutoscalerWatcher.watch_folder', autospec=True,
) as mock_watch_folder:
mock_event_other = mock_event_type.DELETED
mock_event = mock.Mock(
type=mock_event_other,
path='/autoscaling/service/instance',
)
self.watcher.process_folder_event([], mock_event)
assert not mock_watch_folder.called
mock_event_child = mock_event_type.CHILD
mock_event = mock.Mock(
type=mock_event_child,
path='/rick/beth',
)
self.watcher.process_folder_event(['morty', 'summer'], mock_event)
calls = [
mock.call(self.watcher, '/rick/beth/morty', enqueue_children=True),
mock.call(self.watcher, '/rick/beth/summer', enqueue_children=True),
]
mock_watch_folder.assert_has_calls(calls)
def test_run(self):
with mock.patch(
'time.sleep', autospec=True, side_effect=LoopBreak,
), mock.patch(
'paasta_tools.deployd.watchers.AutoscalerWatcher.watch_folder', autospec=True,
) as mock_watch_folder:
assert not self.watcher.is_ready
with raises(LoopBreak):
self.watcher.run()
assert self.watcher.is_ready
mock_watch_folder.assert_called_with(self.watcher, '/autoscaling')
class LoopBreak(Exception):
pass
class TestSoaFileWatcher(unittest.TestCase):
def setUp(self):
mock_instances_that_need_to_be_bounced_in_the_future = mock.Mock()
with mock.patch(
'paasta_tools.deployd.watchers.pyinotify.WatchManager', autospec=True,
), mock.patch(
'paasta_tools.deployd.watchers.YelpSoaEventHandler', autospec=True,
), mock.patch(
'paasta_tools.deployd.watchers.pyinotify.Notifier', autospec=True,
) as mock_notifier_class, mock.patch(
'paasta_tools.deployd.watchers.SoaFileWatcher.mask', autospec=True,
):
self.mock_notifier = mock.Mock()
mock_notifier_class.return_value = self.mock_notifier
self.watcher = SoaFileWatcher(
mock_instances_that_need_to_be_bounced_in_the_future, 'westeros-prod', config=mock.Mock(),
)
assert mock_notifier_class.called
def test_mask(self):
with mock.patch(
'paasta_tools.deployd.watchers.pyinotify.EventsCodes', autospec=True,
) as mock_event_codes:
mock_event_codes.OP_FLAGS = {'UNION_JACK': 1, 'STARS_AND_STRIPES': 2, 'IN_OPEN': 4}
assert self.watcher.mask == 3
def test_run(self):
self.watcher.run()
self.mock_notifier.loop.assert_called_with(callback=self.watcher.startup_checker)
def test_startup_checker(self):
assert not self.watcher.is_ready
self.watcher.startup_checker(mock.Mock())
assert self.watcher.is_ready
class TestPublicConfigWatcher(unittest.TestCase):
def setUp(self):
mock_instances_that_need_to_be_bounced_in_the_future = mock.Mock()
with mock.patch(
'paasta_tools.deployd.watchers.pyinotify.WatchManager', autospec=True,
), mock.patch(
'paasta_tools.deployd.watchers.PublicConfigEventHandler', autospec=True,
), mock.patch(
'paasta_tools.deployd.watchers.pyinotify.Notifier', autospec=True,
) as mock_notifier_class, mock.patch(
'paasta_tools.deployd.watchers.PublicConfigFileWatcher.mask', autospec=True,
):
self.mock_notifier = mock.Mock()
mock_notifier_class.return_value = self.mock_notifier
self.watcher = PublicConfigFileWatcher(
mock_instances_that_need_to_be_bounced_in_the_future, 'westeros-prod', config=mock.Mock(),
)
assert mock_notifier_class.called
def test_mask(self):
with mock.patch(
'paasta_tools.deployd.watchers.pyinotify.EventsCodes', autospec=True,
) as mock_event_codes:
mock_event_codes.OP_FLAGS = {'UNION_JACK': 1, 'STARS_AND_STRIPES': 2, 'IN_OPEN': 4}
assert self.watcher.mask == 3
def test_run(self):
self.watcher.run()
self.mock_notifier.loop.assert_called_with(callback=self.watcher.startup_checker)
def test_startup_checker(self):
assert not self.watcher.is_ready
self.watcher.startup_checker(mock.Mock())
assert self.watcher.is_ready
class TestMaintenanceWatcher(unittest.TestCase):
def setUp(self):
self.mock_instances_that_need_to_be_bounced_in_the_future = mock.Mock()
self.mock_marathon_client = mock.Mock()
mock_config = mock.Mock(get_deployd_maintenance_polling_frequency=mock.Mock(return_value=20))
with mock.patch(
'paasta_tools.deployd.watchers.get_marathon_clients_from_config', autospec=True,
):
self.watcher = MaintenanceWatcher(
self.mock_instances_that_need_to_be_bounced_in_the_future, "westeros-prod", config=mock_config,
)
def test_get_new_draining_hosts(self):
with mock.patch(
'paasta_tools.deployd.watchers.get_draining_hosts', autospec=True,
) as mock_get_draining_hosts:
mock_get_draining_hosts.return_value = ['host1', 'host2']
assert self.watcher.get_new_draining_hosts() == ['host1', 'host2']
assert self.watcher.draining == {'host1', 'host2'}
mock_get_draining_hosts.return_value = ['host1']
assert self.watcher.get_new_draining_hosts() == []
assert self.watcher.draining == {'host1'}
mock_get_draining_hosts.side_effect = RequestException
assert self.watcher.get_new_draining_hosts() == []
assert self.watcher.draining == {'host1'}
mock_get_draining_hosts.side_effect = None
mock_get_draining_hosts.return_value = ['host3', 'host1']
assert self.watcher.get_new_draining_hosts() == ['host3']
assert self.watcher.draining == {'host1', 'host3'}
mock_get_draining_hosts.return_value = []
assert self.watcher.get_new_draining_hosts() == []
assert self.watcher.draining == set()
def test_run(self):
with mock.patch(
'paasta_tools.deployd.watchers.MaintenanceWatcher.get_new_draining_hosts', autospec=True,
) as mock_get_new_draining_hosts, mock.patch(
'paasta_tools.deployd.watchers.MaintenanceWatcher.get_at_risk_service_instances', autospec=True,
) as mock_get_at_risk_service_instances, mock.patch(
'time.sleep', autospec=True, side_effect=LoopBreak,
):
mock_get_new_draining_hosts.return_value = []
assert not self.watcher.is_ready
with raises(LoopBreak):
self.watcher.run()
assert self.watcher.is_ready
assert not mock_get_at_risk_service_instances.called
mock_get_new_draining_hosts.return_value = ['host1', 'host2']
mock_get_at_risk_service_instances.return_value = ['si1', 'si2']
with raises(LoopBreak):
self.watcher.run()
mock_get_at_risk_service_instances.assert_called_with(self.watcher, ['host1', 'host2'])
calls = [
mock.call('si1'),
mock.call('si2'),
]
self.mock_instances_that_need_to_be_bounced_in_the_future.put.assert_has_calls(calls)
def test_get_at_risk_service_instances(self):
with mock.patch(
'paasta_tools.deployd.common.get_priority', autospec=True, return_value=0,
), mock.patch(
'paasta_tools.deployd.watchers.get_marathon_apps_with_clients', autospec=True,
) as mock_get_marathon_apps, mock.patch(
'time.time', autospec=True, return_value=1,
):
mock_marathon_apps = [
mock.Mock(tasks=[
mock.Mock(
host='host1',
app_id='/universe.c137.configsha.gitsha',
),
mock.Mock(
host='host2',
app_id='/universe.c138.configsha.gitsha',
),
]),
mock.Mock(tasks=[mock.Mock(
host='host1',
app_id='/universe.c139.configsha.gitsha',
)]),
mock.Mock(tasks=[mock.Mock(
host='host1',
app_id='/universe.c139.configsha.gitsha',
)]),
]
mock_client = mock.Mock()
mock_get_marathon_apps.return_value = [(app, mock_client) for app in mock_marathon_apps]
ret = self.watcher.get_at_risk_service_instances(['host1'])
expected = [
BaseServiceInstance(
service='universe',
instance='c137',
bounce_by=1,
watcher=self.watcher.__class__.__name__,
priority=0,
bounce_timers=None,
failures=0,
),
BaseServiceInstance(
service='universe',
instance='c139',
bounce_by=1,
watcher=self.watcher.__class__.__name__,
priority=0,
bounce_timers=None,
failures=0,
),
]
assert ret == expected
class TestPublicConfigEventHandler(unittest.TestCase):
def setUp(self):
self.handler = PublicConfigEventHandler()
self.mock_filewatcher = mock.Mock()
self.mock_config = mock.Mock(get_cluster=mock.Mock())
with mock.patch(
'paasta_tools.deployd.watchers.load_system_paasta_config', autospec=True, return_value=self.mock_config,
), mock.patch(
'paasta_tools.deployd.watchers.get_marathon_clients_from_config', autospec=True,
):
self.handler.my_init(self.mock_filewatcher)
def test_log(self):
self.handler.log.info('WHAAAAAT')
def test_filter_event(self):
mock_event = mock.Mock()
name = mock.PropertyMock(return_value='deployd.json')
type(mock_event).name = name
assert mock_event == self.handler.filter_event(mock_event)
mock_event = mock.Mock(maskname='MAJORAS')
name = mock.PropertyMock(return_value='another.file')
type(mock_event).name = name
assert self.handler.filter_event(mock_event) is None
mock_event = mock.Mock(maskname='IN_CREATE|IN_ISDIR', pathname='/foo/bar')
name = mock.PropertyMock(return_value='another.file')
type(mock_event).name = name
assert mock_event == self.handler.filter_event(mock_event)
def test_watch_new_folder(self):
mock_event = mock.Mock(maskname='MAJORAS')
self.handler.watch_new_folder(mock_event)
assert not self.mock_filewatcher.wm.add_watch.called
mock_event = mock.Mock(maskname='IN_CREATE|IN_ISDIR', pathname='/foo/')
self.handler.watch_new_folder(mock_event)
assert self.mock_filewatcher.wm.add_watch.called
def test_process_default(self):
with mock.patch(
'paasta_tools.deployd.watchers.PublicConfigEventHandler.filter_event', autospec=True,
) as mock_filter_event, mock.patch(
'paasta_tools.deployd.watchers.PublicConfigEventHandler.watch_new_folder', autospec=True,
), mock.patch(
'paasta_tools.deployd.watchers.get_services_for_cluster', autospec=True,
) as mock_get_services_for_cluster, mock.patch(
'paasta_tools.deployd.watchers.load_system_paasta_config', autospec=True,
) as mock_load_system_config, mock.patch(
'paasta_tools.deployd.watchers.get_service_instances_needing_update',
autospec=True,
) as mock_get_service_instances_needing_update, mock.patch(
'paasta_tools.deployd.watchers.rate_limit_instances', autospec=True,
) as mock_rate_limit_instances:
mock_event = mock.Mock()
mock_filter_event.return_value = mock_event
mock_load_system_config.return_value = self.mock_config
self.handler.process_default(mock_event)
assert mock_load_system_config.called
assert not mock_get_services_for_cluster.called
assert not mock_get_service_instances_needing_update.called
assert not mock_rate_limit_instances.called
assert not self.mock_filewatcher.instances_that_need_to_be_bounced_in_the_future.put.called
mock_load_system_config.return_value = mock.Mock(get_cluster=mock.Mock())
mock_get_service_instances_needing_update.return_value = []
self.handler.process_default(mock_event)
assert mock_load_system_config.called
assert mock_get_services_for_cluster.called
assert mock_get_service_instances_needing_update.called
assert not mock_rate_limit_instances.called
assert not self.mock_filewatcher.instances_that_need_to_be_bounced_in_the_future.put.called
mock_load_system_config.return_value = mock.Mock(get_deployd_big_bounce_rate=mock.Mock())
mock_si = mock.Mock()
mock_get_service_instances_needing_update.return_value = [mock_si]
mock_rate_limit_instances.return_value = [mock_si]
self.handler.process_default(mock_event)
assert mock_load_system_config.called
assert mock_get_services_for_cluster.called
assert mock_get_service_instances_needing_update.called
assert mock_rate_limit_instances.called
self.mock_filewatcher.instances_that_need_to_be_bounced_in_the_future.put.assert_called_with(mock_si)
class TestYelpSoaEventHandler(unittest.TestCase):
def setUp(self):
self.handler = YelpSoaEventHandler()
self.mock_filewatcher = mock.Mock()
with mock.patch(
'paasta_tools.deployd.watchers.get_marathon_clients_from_config', autospec=True,
):
self.handler.my_init(self.mock_filewatcher)
def test_log(self):
self.handler.log.info('WHAAAAAT')
def test_get_service_name_from_event(self):
mock_event = mock.Mock()
name = mock.PropertyMock(return_value='marathon-cluster.yaml')
type(mock_event).name = name
mock_event.path = '/blah/test-service'
assert "test-service" == self.handler.get_service_name_from_event(mock_event)
name = mock.PropertyMock(return_value='deployments.json')
type(mock_event).name = name
mock_event.path = '/blah/test-service'
assert "test-service" == self.handler.get_service_name_from_event(mock_event)
name = mock.PropertyMock(return_value='test-secret.json')
type(mock_event).name = name
mock_event.path = '/blah/test-service/secrets'
assert "test-service" == self.handler.get_service_name_from_event(mock_event)
name = mock.PropertyMock(return_value='something.json')
type(mock_event).name = name
mock_event.path = '/blah/test-service'
assert self.handler.get_service_name_from_event(mock_event) is None
name = mock.PropertyMock(return_value='another.file')
type(mock_event).name = name
mock_event.path = '/nail/blah/test-service'
assert self.handler.get_service_name_from_event(mock_event) is None
def test_watch_new_folder(self):
with mock.patch(
'os.listdir', autospec=True,
) as mock_os_list, mock.patch(
'paasta_tools.deployd.watchers.YelpSoaEventHandler.bounce_service', autospec=True,
) as mock_bounce_service:
mock_os_list.return_value = ["some.file", "some_other.file"]
mock_event = mock.Mock(maskname='MAJORAS', pathname='/some/path')
self.handler.watch_new_folder(mock_event)
assert not self.mock_filewatcher.wm.add_watch.called
mock_event = mock.Mock(maskname='IN_CREATE|IN_ISDIR', pathname='/foo')
name = mock.PropertyMock(return_value='universe')
type(mock_event).name = name
self.handler.watch_new_folder(mock_event)
assert self.mock_filewatcher.wm.add_watch.called
assert not mock_bounce_service.called
mock_os_list.return_value = ["some.file", "marathon-cluster.yaml"]
self.handler.watch_new_folder(mock_event)
assert self.mock_filewatcher.wm.add_watch.called
mock_bounce_service.assert_called_with(self.handler, 'universe')
mock_os_list.side_effect = OSError
mock_bounce_service.reset_mock()
self.handler.watch_new_folder(mock_event)
assert self.mock_filewatcher.wm.add_watch.called
assert not mock_bounce_service.called
def test_process_default(self):
mock_event = mock.Mock(path='/folder/universe')
type(mock_event).name = 'marathon-blah.yaml'
with mock.patch(
'paasta_tools.deployd.watchers.YelpSoaEventHandler.bounce_service', autospec=True,
) as mock_bounce_service, mock.patch(
'paasta_tools.deployd.watchers.YelpSoaEventHandler.watch_new_folder', autospec=True,
) as mock_watch_folder, mock.patch(
'paasta_tools.deployd.watchers.YelpSoaEventHandler.get_service_name_from_event', autospec=True,
) as mock_get_service_name_from_event:
mock_get_service_name_from_event.return_value = None
self.handler.process_default(mock_event)
mock_watch_folder.assert_called_with(self.handler, mock_event)
mock_get_service_name_from_event.assert_called_with(self.handler, mock_event)
assert not mock_bounce_service.called
mock_get_service_name_from_event.return_value = 'universe'
self.handler.process_default(mock_event)
mock_watch_folder.assert_called_with(self.handler, mock_event)
mock_get_service_name_from_event.assert_called_with(self.handler, mock_event)
mock_bounce_service.assert_called_with(self.handler, 'universe')
def test_bounce_service(self):
with mock.patch(
'paasta_tools.deployd.common.get_priority', autospec=True, return_value=0,
), mock.patch(
'paasta_tools.deployd.watchers.list_all_instances_for_service', autospec=True,
) as mock_list_instances, mock.patch(
'paasta_tools.deployd.watchers.get_service_instances_needing_update', autospec=True,
) as mock_get_service_instances_needing_update, mock.patch(
'time.time', autospec=True, return_value=1,
):
mock_list_instances.return_value = ['c137', 'c138']
mock_get_service_instances_needing_update.return_value = [('universe', 'c137')]
self.handler.bounce_service('universe')
mock_list_instances.assert_called_with(
service='universe',
clusters=[self.handler.filewatcher.cluster],
instance_type='marathon',
cache=False,
)
mock_get_service_instances_needing_update.assert_called_with(
self.handler.marathon_clients,
[
('universe', 'c137'),
('universe', 'c138'),
],
self.handler.filewatcher.cluster,
)
expected_si = BaseServiceInstance(
service='universe',
instance='c137',
bounce_by=1,
watcher='YelpSoaEventHandler',
bounce_timers=None,
priority=0,
failures=0,
)
self.mock_filewatcher.instances_that_need_to_be_bounced_in_the_future.put.assert_called_with(expected_si)
assert self.mock_filewatcher.instances_that_need_to_be_bounced_in_the_future.put.call_count == 1
| 44.322785 | 117 | 0.64251 |
245d5d32e040461dcc3bed9989ba952ff62db2d7 | 2,096 | py | Python | tests/integrations/android_sdk/AndroidSDK/test_update_emulator_config.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 522 | 2015-07-28T16:06:18.000Z | 2019-03-25T17:16:55.000Z | tests/integrations/android_sdk/AndroidSDK/test_update_emulator_config.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 154 | 2015-09-17T02:50:55.000Z | 2019-03-22T07:10:34.000Z | tests/integrations/android_sdk/AndroidSDK/test_update_emulator_config.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 105 | 2015-09-25T08:43:26.000Z | 2019-03-25T15:59:27.000Z | import pytest
@pytest.fixture
def test_device(tmp_path):
"""Create an AVD configuration file."""
config_file = tmp_path / ".android" / "avd" / "testDevice.avd" / "config.ini"
config_file.parent.mkdir(parents=True)
# Write a default config. It contains:
# * blank lines
# * a key whose value explicitly contains an equals sign.
with config_file.open("w") as f:
f.write(
"""
avd.ini.encoding=UTF-8
hw.device.manufacturer=Google
hw.device.name=pixel
weird.key=good=bad
PlayStore.enabled=no
avd.name=beePhone
disk.cachePartition=yes
disk.cachePartition.size=42M
"""
)
return config_file
def test_update_existing(mock_sdk, test_device):
"""Existing keys in an Android AVD config can be updated."""
# Update 2 keys in the config
mock_sdk.update_emulator_config(
"testDevice",
{
"avd.name": "testDevice",
"disk.cachePartition.size": "37MB",
},
)
with test_device.open() as f:
content = f.read()
# Keys have been updated, order is preserved.
# Blank lines have been dropped.
assert (
content
== """avd.ini.encoding=UTF-8
hw.device.manufacturer=Google
hw.device.name=pixel
weird.key=good=bad
PlayStore.enabled=no
avd.name=testDevice
disk.cachePartition=yes
disk.cachePartition.size=37MB
"""
)
def test_new_content(mock_sdk, test_device):
"""New keys can be added to an Android AVD config."""
# Add 2 new keys to the config
mock_sdk.update_emulator_config(
"testDevice",
{
"skin.name": "pixel_3a",
"skin.path": "skins/pixel_3a",
},
)
with test_device.open() as f:
content = f.read()
# New keys are appended to the end of the file
# Newlines have been dropped
assert (
content
== """avd.ini.encoding=UTF-8
hw.device.manufacturer=Google
hw.device.name=pixel
weird.key=good=bad
PlayStore.enabled=no
avd.name=beePhone
disk.cachePartition=yes
disk.cachePartition.size=42M
skin.name=pixel_3a
skin.path=skins/pixel_3a
"""
)
| 23.032967 | 81 | 0.653626 |
f4133db7c2fd02d5beb4b71ef52c83a1691ea026 | 758 | py | Python | projeto_escala/documento_fiscal.py | laurourbano/Projetos_Python | 50e7f4a7ff34158385ea7b635bac95ec8a0363a1 | [
"MIT"
] | 1 | 2021-12-28T02:51:34.000Z | 2021-12-28T02:51:34.000Z | projeto_escala/documento_fiscal.py | laurourbano/Projetos_Python | 50e7f4a7ff34158385ea7b635bac95ec8a0363a1 | [
"MIT"
] | null | null | null | projeto_escala/documento_fiscal.py | laurourbano/Projetos_Python | 50e7f4a7ff34158385ea7b635bac95ec8a0363a1 | [
"MIT"
] | null | null | null | class documento_fiscal:
def __init__(self):
self.__NUM_DV1 = []
self.__NUM_DV2 = []
def calcula_digito_verificador(self, documento, digito=1):
pass
def valido(self, documento):
documento = documento.replace('.', '').replace('/', '').replace('-', '')
if (not documento.isnumeric()):
return False
digitos = None
if (len(documento) == 11):
digitos = documento[:9]
elif (len(documento) == 14):
digitos = documento[:12]
else:
return False
dv1 = self.calcula_digito_verificador(digitos, 1)
dv2 = self.calcula_digito_verificador(digitos + str(dv1), 2)
return documento == digitos + str(dv1) + str(dv2) | 27.071429 | 80 | 0.563325 |
ed14320ac7e6a2468e6ae28d70af02a17125142c | 635 | py | Python | backend/conferences/migrations/0012_auto_20191012_2038.py | marcoacierno/pycon | 2b7b47598c4929769cc73e322b3fce2c89151e21 | [
"MIT"
] | 56 | 2018-01-20T17:18:40.000Z | 2022-03-28T22:42:04.000Z | backend/conferences/migrations/0012_auto_20191012_2038.py | marcoacierno/pycon | 2b7b47598c4929769cc73e322b3fce2c89151e21 | [
"MIT"
] | 2,029 | 2018-01-20T11:37:24.000Z | 2022-03-31T04:10:51.000Z | backend/conferences/migrations/0012_auto_20191012_2038.py | marcoacierno/pycon | 2b7b47598c4929769cc73e322b3fce2c89151e21 | [
"MIT"
] | 17 | 2018-03-17T09:44:28.000Z | 2021-12-27T19:57:35.000Z | # Generated by Django 2.2.5 on 2019-10-12 20:38
from django.db import migrations
import i18n.fields
class Migration(migrations.Migration):
dependencies = [
('conferences', '0011_auto_20190921_2340'),
]
operations = [
migrations.AddField(
model_name='deadline',
name='description',
field=i18n.fields.I18nTextField(blank=True, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='deadline',
name='name',
field=i18n.fields.I18nCharField(max_length=100, verbose_name='name'),
),
]
| 25.4 | 95 | 0.612598 |
3e55298f335664fe774ef0b1070a0ed523055413 | 1,855 | py | Python | planning/GamesTests/games/tetris/matrix_rotation.py | ChristsY12Robots/Robots | 306f000f85b41731a9e1249847434858319e5415 | [
"MIT"
] | null | null | null | planning/GamesTests/games/tetris/matrix_rotation.py | ChristsY12Robots/Robots | 306f000f85b41731a9e1249847434858319e5415 | [
"MIT"
] | null | null | null | planning/GamesTests/games/tetris/matrix_rotation.py | ChristsY12Robots/Robots | 306f000f85b41731a9e1249847434858319e5415 | [
"MIT"
] | null | null | null | def rotate_array(array, angle, wide=False):
'''
Rotates a rectangular or diamond 2D array in increments of 45 degrees.
Parameters:
array (list): a list containing sliceable sequences, such as list, tuple, or str
angle (int): a positive angle for rotation, in 45-degree increments.
wide (bool): whether a passed diamond array should rotate into a wide array
instead of a tall one (tall is the default). No effect on square matrices.
'''
angle = angle%360
if angle < 1:
return [list(row) for row in array]
lengths = list(map(len, array))
rect = len(set(lengths)) == 1
width = max(lengths)
height = sum(lengths)/width
if wide:
width, height = height, width
if not rect:
array = [list(row) for row in array]
array = [[array[row+col].pop() for row in range(width)] for col in range(height)]
angle += 45
nineties, more = divmod(angle, 90)
if nineties == 3:
array = list(zip(*array))[::-1]
else:
for i in range(nineties):
array = list(zip(*array[::-1]))
if more:
ab = abs(len(array)-len(array[0]))
m = min(len(array), len(array[0]))
tall = len(array) > len(array[0])
array = [[array[r][c] for r,c in zip(range(row-1, -1, -1), range(row))
] for row in range(1, m+1)
] + [[array[r][c] for r,c in zip(range(m-1+row*tall, row*tall-1, -1),
range(row*(not tall), m+row*(not tall)+1))
] for row in range(1, ab+(not tall))
] + [[array[r][c] for r,c in zip(range(len(array)-1, ab*tall+row-1, -1),
range(ab*(not tall)+row, len(array[0])+(not tall)))
] for row in range((not tall), m)
]
return array | 44.166667 | 95 | 0.539623 |
697f3dfd62b2cc56315847a43f69fb649f1c83e6 | 5,181 | py | Python | src/openprocurement/tender/openuadefense/models.py | pontostroy/openprocurement.api | 6651ef29413d155c83f893ee64a611cf75f4daaf | [
"Apache-2.0"
] | null | null | null | src/openprocurement/tender/openuadefense/models.py | pontostroy/openprocurement.api | 6651ef29413d155c83f893ee64a611cf75f4daaf | [
"Apache-2.0"
] | 2 | 2021-03-25T23:27:04.000Z | 2022-03-21T22:18:15.000Z | src/openprocurement/tender/openuadefense/models.py | scrubele/prozorro-testing | 42b93ea2f25d8cc40e66c596f582c7c05e2a9d76 | [
"Apache-2.0"
] | 1 | 2020-08-20T06:09:14.000Z | 2020-08-20T06:09:14.000Z | # -*- coding: utf-8 -*-
from datetime import timedelta, datetime, time
from schematics.exceptions import ValidationError
from schematics.types import StringType
from schematics.types.compound import ModelType
from schematics.types.serializable import serializable
from zope.interface import implementer
from openprocurement.api.utils import get_now
from openprocurement.api.models import Period, ListType, ContactPoint as BaseContactPoint
from openprocurement.tender.core.models import (
ProcuringEntity as BaseProcuringEntity,
EnquiryPeriod,
Lot as BaseLot,
validate_lots_uniq,
get_tender,
)
from openprocurement.tender.core.models import Cancellation as BaseCancellation
from openprocurement.tender.openua.models import Tender as BaseTender, IAboveThresholdUATender
from openprocurement.tender.core.utils import (
calc_auction_end_time,
)
from openprocurement.tender.openuadefense.constants import (
TENDER_PERIOD,
ENQUIRY_STAND_STILL_TIME,
ENQUIRY_PERIOD_TIME,
COMPLAINT_SUBMIT_TIME,
COMPLAINT_OLD_SUBMIT_TIME,
COMPLAINT_OLD_SUBMIT_TIME_BEFORE,
)
from openprocurement.tender.openuadefense.utils import (
calculate_tender_business_date,
calculate_clarifications_business_date,
calculate_complaint_business_date,
)
class IAboveThresholdUADefTender(IAboveThresholdUATender):
""" Marker interface for aboveThresholdUA defense tenders """
class LotAuctionPeriod(Period):
"""The auction period."""
@serializable(serialize_when_none=False)
def shouldStartAfter(self):
if self.endDate:
return
tender = get_tender(self)
lot = self.__parent__
if tender.status not in ["active.tendering", "active.auction"] or lot.status != "active":
return
if tender.status == "active.auction" and lot.numberOfBids < 2:
return
if self.startDate and get_now() > calc_auction_end_time(lot.numberOfBids, self.startDate):
return calc_auction_end_time(lot.numberOfBids, self.startDate).isoformat()
else:
decision_dates = [
datetime.combine(
complaint.dateDecision.date() + timedelta(days=3), time(0, tzinfo=complaint.dateDecision.tzinfo)
)
for complaint in tender.complaints
if complaint.dateDecision
]
decision_dates.append(tender.tenderPeriod.endDate)
return max(decision_dates).isoformat()
class Lot(BaseLot):
auctionPeriod = ModelType(LotAuctionPeriod, default={})
class ContactPoint(BaseContactPoint):
availableLanguage = StringType(choices=["uk", "en", "ru"])
class ProcuringEntity(BaseProcuringEntity):
contactPoint = ModelType(ContactPoint, required=True)
additionalContactPoints = ListType(ModelType(ContactPoint, required=True), required=False)
class Cancellation(BaseCancellation):
class Options:
roles = BaseCancellation._options.roles
_after_release_reasonType_choices = ["noDemand", "unFixable", "expensesCut"]
@implementer(IAboveThresholdUADefTender)
class Tender(BaseTender):
"""Data regarding tender process - publicly inviting prospective contractors to submit bids for evaluation and selecting a winner or winners."""
procuringEntity = ModelType(
ProcuringEntity, required=True
) # The entity managing the procurement, which may be different from the buyer who is paying / using the items
lots = ListType(ModelType(Lot, required=True), default=list(), validators=[validate_lots_uniq])
procurementMethodType = StringType(default="aboveThresholdUA.defense")
procuring_entity_kinds = ["defense"]
cancellations = ListType(ModelType(Cancellation, required=True), default=list())
@serializable(serialized_name="enquiryPeriod", type=ModelType(EnquiryPeriod))
def tender_enquiryPeriod(self):
endDate = calculate_tender_business_date(self.tenderPeriod.endDate, -ENQUIRY_PERIOD_TIME, self, True)
clarificationsUntil = calculate_clarifications_business_date(endDate, ENQUIRY_STAND_STILL_TIME, self, True)
return EnquiryPeriod(
dict(
startDate=self.tenderPeriod.startDate,
endDate=endDate,
invalidationDate=self.enquiryPeriod and self.enquiryPeriod.invalidationDate,
clarificationsUntil=clarificationsUntil,
)
)
def validate_tenderPeriod(self, data, period):
if period and calculate_tender_business_date(period.startDate, TENDER_PERIOD, data, True) > period.endDate:
raise ValidationError(u"tenderPeriod should be greater than {0.days} working days".format(TENDER_PERIOD))
@serializable(type=ModelType(Period))
def complaintPeriod(self):
if self.tenderPeriod.startDate < COMPLAINT_OLD_SUBMIT_TIME_BEFORE:
endDate = calculate_tender_business_date(self.tenderPeriod.endDate, -COMPLAINT_OLD_SUBMIT_TIME, self)
else:
endDate = calculate_complaint_business_date(self.tenderPeriod.endDate, -COMPLAINT_SUBMIT_TIME, self)
return Period(dict(startDate=self.tenderPeriod.startDate, endDate=endDate))
| 40.476563 | 148 | 0.73673 |
fd7e09319b36fd70962f38fe074a45bac23af24a | 23 | py | Python | data/studio21_generated/introductory/3103/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/3103/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/3103/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | def unique(integers):
| 11.5 | 21 | 0.73913 |
f839e73989e7d4a5d95b8d38c4ac910861e583bf | 4,452 | py | Python | app/main/views.py | Nobella-Nyarari-Ejiofor/PenBlog | 5c2e396adf827384888e44ced4e5623ceb6068cf | [
"MIT"
] | null | null | null | app/main/views.py | Nobella-Nyarari-Ejiofor/PenBlog | 5c2e396adf827384888e44ced4e5623ceb6068cf | [
"MIT"
] | null | null | null | app/main/views.py | Nobella-Nyarari-Ejiofor/PenBlog | 5c2e396adf827384888e44ced4e5623ceb6068cf | [
"MIT"
] | null | null | null | from flask import render_template,request,redirect,url_for,flash,abort
from ..requests import get_quotes
from . import main
from flask_login import login_required, current_user
from .forms import BlogForm,CommentForm,UpdateProfile
from ..models import Blog,Comment, User
from .. import db,photos
#Views
@main.route('/')
def index():
page = request.args.get('page', 1, type=int)
quotes = get_quotes()
blogs = Blog.query.order_by(Blog.date.desc()).paginate(page=page, per_page=10)
title = "The Blog hompage"
return render_template('index.html', quotes = quotes, title = title, blogs=blogs)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update_profile.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/blog/new', methods =['GET','POST'])
@login_required
def new_blog():
form = BlogForm()
if form.validate_on_submit():
title = form.title.data
subtitle = form.subtitle.data
content = form.content.data
new_post = Blog(title=title, subtitle=subtitle, content=content, user=current_user)
db.session.add(new_post)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('blogs/new_blog.html', form=form)
@main.route('/blogs/new/<int:blog_id>', methods = ['GET','POST'])
def blog(blog_id):
'''
View root page function that returns the posts page and its data
'''
blog = Blog.query.filter_by(id=blog_id).one()
comments = Comment.get_comments(blog_id)
# post_comments = Comment.get_comments(post_id)
title = f'blog_id'
return render_template('blogs/blog.html', title = title, blog=blog, comments = comments)
@main.route("/post/<int:blog_id>/update", methods=['GET', 'POST'])
def update_post(blog_id):
blog = Blog.query.get_or_404(blog_id)
if blog.user != current_user:
abort(403)
form = BlogForm()
if form.validate_on_submit():
blog.title = form.title.data
blog.subtitle = form.subtitle.data
blog.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('main.blog', blog_id=blog.id))
elif request.method == 'GET':
form.title.data = blog.title
form.subtitle = blog.subtitle
form.content.data = blog.content
return render_template('blogs/edit_blog.html', title='Update Post', form=form)
@main.route("/blog/<int:blog_id>/delete", methods=['GET','POST'])
@login_required
def delete_post(blog_id):
blog = Blog.query.get_or_404(blog_id)
if blog.user != current_user:
abort(403)
db.session.delete(blog)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
@main.route('/post/comments/new/<int:id>',methods = ['GET','POST'])
@login_required
def new_comment(id):
form = CommentForm()
if form.validate_on_submit():
new_comment = Comment(blog_id =id,comment=form.comment.data)
new_comment.save_comments()
return redirect(url_for('main.blog',blog_id=id))
return render_template('blogs/new_comment.html',comment_form=form)
@main.route("/comment/<int:id>/delete", methods=['GET', 'POST'])
@login_required
def delete_comment(id):
comment = Comment.query.get_or_404(id)
db.session.delete(comment)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.blog'))
| 32.49635 | 92 | 0.677673 |
0f2c0b638cc09812edfed1443d452e35720fae92 | 378 | py | Python | apps/partner/admin.py | glenjasper/cobija-web | 9a623daae9fba1b423b8fd690a25139ed8d06d7c | [
"MIT"
] | null | null | null | apps/partner/admin.py | glenjasper/cobija-web | 9a623daae9fba1b423b8fd690a25139ed8d06d7c | [
"MIT"
] | null | null | null | apps/partner/admin.py | glenjasper/cobija-web | 9a623daae9fba1b423b8fd690a25139ed8d06d7c | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Partner
class PartnerAdmin(admin.ModelAdmin):
list_display = [
'__str__',
'type_partner',
'status',
]
readonly_fields = ['created', 'updated']
def type_partner(self, obj):
return ",\n".join([p.name for p in obj.typepartner.all()])
admin.site.register(Partner, PartnerAdmin)
| 23.625 | 66 | 0.650794 |
78496e5249b0a5cf6fe82107ba8e1559b5c1b8c5 | 23,607 | py | Python | pybind/slxos/v17r_2_00/brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class vnetwork_vswitches(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vswitch - based on the path /brocade_vswitch_rpc/get-vnetwork-vswitches/output/vnetwork-vswitches. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__host','__datacenter','__pnic','__interface_type','__interface_name',)
_yang_name = 'vnetwork-vswitches'
_rest_name = 'vnetwork-vswitches'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__datacenter = YANGDynClass(base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback': {'value': 7}, u'tunnel': {'value': 12}, u'unknown': {'value': 1}, u'port-channel': {'value': 5}, u'fibrechannel': {'value': 8}, u'ethernet': {'value': 10}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='enumeration', is_config=True)
self.__host = YANGDynClass(base=unicode, is_leaf=True, yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__interface_name = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='union', is_config=True)
self.__pnic = YANGDynClass(base=unicode, is_leaf=True, yang_name="pnic", rest_name="pnic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_vswitch_rpc', u'get-vnetwork-vswitches', u'output', u'vnetwork-vswitches']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-vnetwork-vswitches', u'output', u'vnetwork-vswitches']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/name (string)
YANG Description: virtual switch name
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: virtual switch name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_host(self):
"""
Getter method for host, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/host (string)
YANG Description: host name
"""
return self.__host
def _set_host(self, v, load=False):
"""
Setter method for host, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/host (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_host is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host() directly.
YANG Description: host name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """host must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__host = t
if hasattr(self, '_set'):
self._set()
def _unset_host(self):
self.__host = YANGDynClass(base=unicode, is_leaf=True, yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_datacenter(self):
"""
Getter method for datacenter, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/datacenter (string)
YANG Description: host datacenter
"""
return self.__datacenter
def _set_datacenter(self, v, load=False):
"""
Setter method for datacenter, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/datacenter (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_datacenter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_datacenter() directly.
YANG Description: host datacenter
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """datacenter must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__datacenter = t
if hasattr(self, '_set'):
self._set()
def _unset_datacenter(self):
self.__datacenter = YANGDynClass(base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_pnic(self):
"""
Getter method for pnic, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/pnic (string)
YANG Description: host NIC
"""
return self.__pnic
def _set_pnic(self, v, load=False):
"""
Setter method for pnic, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/pnic (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_pnic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pnic() directly.
YANG Description: host NIC
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="pnic", rest_name="pnic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pnic must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="pnic", rest_name="pnic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__pnic = t
if hasattr(self, '_set'):
self._set()
def _unset_pnic(self):
self.__pnic = YANGDynClass(base=unicode, is_leaf=True, yang_name="pnic", rest_name="pnic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_interface_type(self):
"""
Getter method for interface_type, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/interface_type (enumeration)
YANG Description: The type of the interface. An 'unknown' type
represents error scenario and should not be used.
"""
return self.__interface_type
def _set_interface_type(self, v, load=False):
"""
Setter method for interface_type, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/interface_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_type() directly.
YANG Description: The type of the interface. An 'unknown' type
represents error scenario and should not be used.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback': {'value': 7}, u'tunnel': {'value': 12}, u'unknown': {'value': 1}, u'port-channel': {'value': 5}, u'fibrechannel': {'value': 8}, u'ethernet': {'value': 10}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_type must be of a type compatible with enumeration""",
'defined-type': "brocade-vswitch:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback': {'value': 7}, u'tunnel': {'value': 12}, u'unknown': {'value': 1}, u'port-channel': {'value': 5}, u'fibrechannel': {'value': 8}, u'ethernet': {'value': 10}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='enumeration', is_config=True)""",
})
self.__interface_type = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_type(self):
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback': {'value': 7}, u'tunnel': {'value': 12}, u'unknown': {'value': 1}, u'port-channel': {'value': 5}, u'fibrechannel': {'value': 8}, u'ethernet': {'value': 10}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='enumeration', is_config=True)
def _get_interface_name(self):
"""
Getter method for interface_name, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/interface_name (union)
YANG Description: The Interface value. The interface value is always
interpreted within the context of the value of
'interface-type' leaf:
interface-type interface-name
----------------- --------------------
ethernet slot/port
port-channel Port channel ID
l2vlan Vlan ID
unknown Zero-length string.
The value of an 'interface-name' must always be
consistent with the value of the associated
'interface-type'. Attempts to set an interface-name
to a value inconsistent with the associated
'interface-type' must fail with an error.
"""
return self.__interface_name
def _set_interface_name(self, v, load=False):
"""
Setter method for interface_name, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vswitches/output/vnetwork_vswitches/interface_name (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_name() directly.
YANG Description: The Interface value. The interface value is always
interpreted within the context of the value of
'interface-type' leaf:
interface-type interface-name
----------------- --------------------
ethernet slot/port
port-channel Port channel ID
l2vlan Vlan ID
unknown Zero-length string.
The value of an 'interface-name' must always be
consistent with the value of the associated
'interface-type'. Attempts to set an interface-name
to a value inconsistent with the associated
'interface-type' must fail with an error.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_name must be of a type compatible with union""",
'defined-type': "brocade-vswitch:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='union', is_config=True)""",
})
self.__interface_name = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_name(self):
self.__interface_name = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='union', is_config=True)
name = __builtin__.property(_get_name, _set_name)
host = __builtin__.property(_get_host, _set_host)
datacenter = __builtin__.property(_get_datacenter, _set_datacenter)
pnic = __builtin__.property(_get_pnic, _set_pnic)
interface_type = __builtin__.property(_get_interface_type, _set_interface_type)
interface_name = __builtin__.property(_get_interface_name, _set_interface_name)
_pyangbind_elements = {'name': name, 'host': host, 'datacenter': datacenter, 'pnic': pnic, 'interface_type': interface_type, 'interface_name': interface_name, }
| 66.686441 | 904 | 0.715635 |
6ef3e64cf2232ce81cfdbec4d1d9867b36fe523a | 220 | py | Python | configs/hpt-pretrain/bdd/imagenet_r50_supervised_basetrain/50-iters.py | Berkeley-Data/OpenSelfSup | 221191b88d891de57725b149caf237ffef72e529 | [
"Apache-2.0"
] | null | null | null | configs/hpt-pretrain/bdd/imagenet_r50_supervised_basetrain/50-iters.py | Berkeley-Data/OpenSelfSup | 221191b88d891de57725b149caf237ffef72e529 | [
"Apache-2.0"
] | 6 | 2021-03-11T05:35:54.000Z | 2021-04-03T22:25:11.000Z | configs/hpt-pretrain/bdd/imagenet_r50_supervised_basetrain/50-iters.py | Berkeley-Data/OpenSelfSup | 221191b88d891de57725b149caf237ffef72e529 | [
"Apache-2.0"
] | 1 | 2021-07-04T00:47:46.000Z | 2021-07-04T00:47:46.000Z | _base_="../base-bdd-config.py"
# this will merge with the parent
model=dict(pretrained='data/basetrain_chkpts/imagenet_r50_supervised.pth')
# epoch related
total_iters=50
checkpoint_config = dict(interval=total_iters)
| 24.444444 | 74 | 0.804545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.