content
stringlengths 5
1.05M
|
|---|
import pytest
from rpi_backlight import Backlight, _permission_denied
from rpi_backlight.utils import FakeBacklightSysfs
def test_permission_denied() -> None:
with pytest.raises(PermissionError):
_permission_denied()
def test_get_value() -> None:
with FakeBacklightSysfs() as backlight_sysfs:
backlight = Backlight(backlight_sysfs_path=backlight_sysfs.path)
assert backlight._get_value("brightness") == 255
def test_set_value() -> None:
with FakeBacklightSysfs() as backlight_sysfs:
backlight = Backlight(backlight_sysfs_path=backlight_sysfs.path)
assert backlight._set_value("brightness", 0) is None # type: ignore[func-returns-value]
assert backlight._get_value("brightness") == 0
def test_normalize_brightness() -> None:
with FakeBacklightSysfs() as backlight_sysfs:
backlight = Backlight(backlight_sysfs_path=backlight_sysfs.path)
assert backlight._normalize_brightness(255) == 100
assert backlight._normalize_brightness(128) == 50
assert backlight._normalize_brightness(0) == 0
def test_denormalize_brightness() -> None:
with FakeBacklightSysfs() as backlight_sysfs:
backlight = Backlight(backlight_sysfs_path=backlight_sysfs.path)
assert backlight._denormalize_brightness(100) == 255
assert backlight._denormalize_brightness(50) == 128
assert backlight._denormalize_brightness(0) == 0
|
__author__ = 'fmoscato'
from datetime import datetime
from collections import OrderedDict
from itertools import groupby
from operator import itemgetter
import pymongo
import constants as c
# The Aggregation DAO handles interactions with the publication collection,
# and aggregate the results.
class AggregationDAO:
# constructor for the class
def __init__(self, database):
self.db = database
self.publications = database.publications
self.missions = database.missions
self.users = database.users
def aggregatePublicationsTimeline(self):
pipe = [{'$project': {'year': {'$year': "$pub_date"},
'type': '$type'}},
{'$group': {'_id': {'year': '$year', 'type': '$type'},
'count': {'$sum': 1}}},
{'$sort': {'_id.year': 1}}]
res = self.publications.aggregate(pipeline=pipe)
types = self.publications.distinct('type')
result = OrderedDict()
old_year = None
for r in res['result']:
year = r['_id']['year']
# we ended the cycle of the year
if old_year != year:
if not result:
result[year] = {t: 0 for t in types}
else:
result[year] = {t_data: (result[old_year][t_data]) for t_data in result[old_year]}
old_year = year
result[year][r['_id']['type']] = result[year][r['_id']['type']] + r['count']
return result
"""
Aggregation of Author/type.
Could be for year or overall
:param
:return
"""
def aggregateAuthor(self, **kwargs):
authors = []
pipe = None
try:
if set(['year', 'pub_type']).issubset(kwargs.keys()):
date_start = datetime.strptime('01/01/%s' % kwargs['year'], c.DATE_FORMAT)
date_end = datetime.strptime('31/12/%s' % kwargs['year'], c.DATE_FORMAT)
pipe = [{'$unwind': '$ASI_authors'},
{'$match': {'type': kwargs['pub_type'],
'pub_date': {'$gte': date_start, '$lte': date_end}}},
{'$project': {'ASI_authors': 1}},
{'$group': {'_id': '$ASI_authors', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
elif 'pub_type' in kwargs:
pipe = [{'$unwind': '$ASI_authors'},
{'$match': {'type': kwargs['pub_type']}},
{'$project': {'ASI_authors': 1}},
{'$group': {'_id': '$ASI_authors', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
elif 'year' in kwargs:
date_start = datetime.strptime('01/01/%s' % kwargs['year'], c.DATE_FORMAT)
date_end = datetime.strptime('31/12/%s' % kwargs['year'], c.DATE_FORMAT)
pipe = [{'$unwind': '$ASI_authors'},
{'$match': {'pub_date': {'$gte': date_start, '$lte': date_end}}},
{'$project': {'ASI_authors': 1}},
{'$group': {'_id': '$ASI_authors', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
else:
pipe = [{'$unwind': '$ASI_authors'},
{'$project': {'ASI_authors': 1}},
{'$group': {'_id': '$ASI_authors', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
res = self.publications.aggregate(pipeline=pipe)
for j in res['result']:
authors.append({"author": j["_id"], "count": j["count"]})
except pymongo.errors.OperationFailure:
print "Mongo error, aggregating publications"
return authors
def aggregateMission(self, is_refeered=True, year=None):
missions = []
pipe = None
try:
if year:
date_start = datetime.strptime('01/01/%s' % year, c.DATE_FORMAT)
date_end = datetime.strptime('31/12/%s' % year, c.DATE_FORMAT)
pipe = [{'$unwind': '$mission'},
{'$match': {'is_refeered': is_refeered, 'is_open': False,
'asdc_auth.validate': True,
'pub_date': {'$gte': date_start, '$lte': date_end}}},
{'$project': {'mission': 1}},
{'$group': {'_id': '$mission', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
else:
pipe = [{'$unwind': '$mission'},
{'$match': {'is_refeered': is_refeered, 'is_open': False,
'asdc_auth.validate': True}},
{'$project': {'mission': 1}},
{'$group': {'_id': '$mission', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
res = self.publications.aggregate(pipeline=pipe)
for j in res['result']:
missions.append({"mission": j["_id"], "count": j["count"]})
except pymongo.errors.OperationFailure:
print "Mongo error, aggregating publications"
return missions
def aggregateMissionsAuthor(self, mission, is_refeered=True):
author_per_mission = []
pipe = [{'$unwind': '$asdc_auth'},
{'$match': {'is_refeered': is_refeered, 'is_open': False, 'asdc_auth.validate': True,
'mission': mission}},
{'$project': {'asdc_auth.author': 1}},
{'$group': {'_id': '$asdc_auth.author', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}}]
try:
res = self.publications.aggregate(pipeline=pipe)
for j in res['result']:
author_per_mission.append({"author": j["_id"], "count": j["count"]})
except pymongo.errors.OperationFailure:
print "Mongo error, aggregating publications"
return author_per_mission
def get_publications_type(self):
return self.publications.distinct('type')
def aggregateYearHistogram(self, author=None):
publications_per_year = []
pipe = None
types = self.get_publications_type()
if not author:
pipe = [{'$project': {'year': {'$year': '$pub_date'}, 'type': 1}},
{"$group": {"_id": {'year': '$year', 'type': "$type"},
"typecount": {'$sum': 1}}},
{"$group": {"_id": "$_id.year",
"type": {"$push": {"type": "$_id.type",
"count": "$typecount"}}}},
{'$sort': {'_id': 1}}]
else:
pipe = [{'$unwind': '$ASI_authors'},
{'$match': {'ASI_authors': author}},
{'$project': {'year': {'$year': '$pub_date'}, 'type': 1}},
{"$group": {"_id": {'year': '$year', 'type': "$type"},
"typecount": {'$sum': 1}}},
{"$group": {"_id": "$_id.year",
"type": {"$push": {"type": "$_id.type",
"count": "$typecount"}}}},
{'$sort': {'_id': 1}}]
res = self.publications.aggregate(pipeline=pipe)
for r in res['result']:
not_present_types = [item for item in types if item not in
[types_db['type'] for types_db in r['type']]]
year = r['_id']
types_count = {t: 0 for t in not_present_types}
types_count.update({types_db['type']: types_db['count'] for types_db in r['type']})
types_count.update({'year': year})
publications_per_year.append(types_count)
return publications_per_year
def aggregateCountAuthors(self):
authors_count = OrderedDict()
for y in reversed(c.years):
if y == 2000 : continue
start_date = datetime.strptime("01/01/%s" % y, c.DATE_FORMAT)
end_date = datetime.strptime("31/12/%s" % y, c.DATE_FORMAT)
pipeline = [{'$match': {'start_date': {'$lte': start_date}, 'end_date': {'$gte': end_date}}},
{'$project': {'_id': 1}}, {'$group': {'_id': '_id', 'count': {'$sum': 1}}}]
res = self.users.aggregate(pipeline=pipeline)
results = res['result']
if results:
authors_count[y] = results[0]['count']
else:
authors_count[y] = 0
return authors_count
|
# Generated by Django 3.2.3 on 2021-05-22 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0006_auction_time'),
]
operations = [
migrations.AlterField(
model_name='auction',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
]
|
import discord
import modules.codenames.globals as global_values
class Player():
team = -1
def __init__(self, user):
self.user = user
|
# https://github.com/sbarratt/inception-score-pytorch
# Revised by [elvisyjlin](https://github.com/elvisyjlin)
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.autograd import Variable
from torchvision.models.inception import inception_v3
import numpy as np
from scipy.stats import entropy
from tqdm import tqdm
class InceptionScore():
def __init__(self, gpu):
""" Constructor
gpu -- whether or not to run on GPU
"""
# Set up device
self.device = torch.device('cuda' if torch.cuda.is_available() and gpu else 'cpu')
print('Using device:', self.device)
# Load inception model
self.inception_model = inception_v3(pretrained=True, transform_input=False).to(self.device).eval()
print('Loaded pretrained weights of Inception v3.')
def compute(self, imgs, gpu=True, batch_size=32, resize=False, splits=1):
""" Computes the inception score of the generated images imgs
imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
batch_size -- batch size for feeding into Inception v3
resize -- whether or not to resize images to 299x299
splits -- number of splits
"""
N = len(imgs)
assert batch_size > 0
assert N > batch_size
# Set up dataloader
dataloader = data.DataLoader(imgs, batch_size=batch_size)
up = lambda x: F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True).to(self.device)
def get_pred(x):
if resize:
x = up(x)
with torch.no_grad():
x = self.inception_model(x)
return F.softmax(x, dim=1).data.cpu().numpy()
# Get predictions
preds = np.zeros((N, 1000))
for i, batch in enumerate(tqdm(dataloader)):
batch = batch.to(self.device)
batch_size_i = batch.size()[0]
preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batch)
# Now compute the mean kl-div
split_scores = []
for k in tqdm(range(splits)):
part = preds[k * (N // splits): (k+1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
if __name__ == '__main__':
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from data import IgnoreLabelDataset, LimitedImageDataset, PureImageFolder
IS = InceptionScore(gpu=True)
# CIFAR-10
cifar = dsets.CIFAR10(
root='/share/data/cifar-10', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
print('# of images:', len(cifar))
print("Calculating Inception Score for CIFAR-10 training set...")
print(IS.compute(IgnoreLabelDataset(cifar), batch_size=64, resize=True, splits=10))
# # CIFAR-10
# cifar = dsets.CIFAR10(
# root='/share/data/cifar-10', train=False, download=True,
# transform=transforms.Compose([
# transforms.Resize(32),
# transforms.CenterCrop(32),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
# print('# of images:', len(cifar))
# print("Calculating Inception Score for CIFAR-10 validation set...")
# print(IS.compute(IgnoreLabelDataset(cifar), batch_size=64, resize=True, splits=10))
# # ImageNet 32x32
# imagenet = PureImageFolder(
# root='/share/data/imagenet/valid_32x32',
# transform=transforms.Compose([
# transforms.Resize(32),
# transforms.CenterCrop(32),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
# print('# of images:', len(imagenet))
# print("Calculating Inception Score for ImageNet 32x32 validation set...")
# print(IS.compute(IgnoreLabelDataset(imagenet), batch_size=64, resize=True, splits=10))
# # ImageNet 64x64
# imagenet = PureImageFolder(
# root='/share/data/imagenet/valid_64x64',
# transform=transforms.Compose([
# transforms.Resize(64),
# transforms.CenterCrop(64),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
# print('# of images:', len(imagenet))
# print("Calculating Inception Score for ImageNet 64x64 validation set...")
# print(IS.compute(IgnoreLabelDataset(imagenet), batch_size=64, resize=True, splits=10))
# # CelebA
# celeba = PureImageFolder(
# root='/share/data/celeba',
# transform=transforms.Compose([
# transforms.Resize(64),
# transforms.CenterCrop(64),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
# print('# of images:', 50000)
# print("Calculating Inception Score for the first 50k images in CelebA 64x64 validation set...")
# print(IS.compute(LimitedImageDataset(IgnoreLabelDataset(celeba), 50000), batch_size=64, resize=True, splits=10))
# # LSUN bedroom
# lsun_bed = dsets.LSUN(
# root='/share/data/lsun', classes=['bedroom_train'],
# transform=transforms.Compose([
# transforms.Resize(64),
# transforms.CenterCrop(64),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
# print('# of images:', len(lsun_bed))
# print("Calculating Inception Score for LSUN bedroom training set...")
# print(IS.compute(IgnoreLabelDataset(lsun_bed), batch_size=64, resize=True, splits=10))
# # LSUN bedroom
# lsun_bed = dsets.LSUN(
# root='/share/data/lsun', classes=['bedroom_val'],
# transform=transforms.Compose([
# transforms.Resize(64),
# transforms.CenterCrop(64),
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
# print('# of images:', len(lsun_bed))
# print("Calculating Inception Score for LSUN bedroom validation set...")
# print(IS.compute(IgnoreLabelDataset(lsun_bed), batch_size=64, resize=True, splits=10))
|
"""
Tests simple example.
"""
import os
from silvera.lang.meta import get_metamodel
from silvera.utils import get_root_path
def test_example():
examples_path = os.path.join(get_root_path(), "tests", "examples")
metamodel = get_metamodel()
path = os.path.join(examples_path, "example.si")
metamodel.model_from_file(path)
|
# Preprocessing for CMIP6 models
import warnings
import cf_xarray.units
import numpy as np
import pandas as pd
import pint
import pint_xarray
import xarray as xr
from cmip6_preprocessing.utils import _maybe_make_list, cmip6_dataset_id
# global object for units
_desired_units = {"lev": "m"}
_unit_overrides = {name: None for name in ["so"]}
_drop_coords = ["bnds", "vertex"]
def cmip6_renaming_dict():
"""a universal renaming dict. Keys correspond to source id (model name)
and valuse are a dict of target name (key) and a list of variables that
should be renamed into the target."""
rename_dict = {
# dim labels (order represents the priority when checking for the dim labels)
"x": ["i", "ni", "xh", "nlon"],
"y": ["j", "nj", "yh", "nlat"],
"lev": ["deptht", "olevel", "zlev", "olev", "depth"],
"bnds": ["bnds", "axis_nbounds", "d2"],
"vertex": ["vertex", "nvertex", "vertices"],
# coordinate labels
"lon": ["longitude", "nav_lon"],
"lat": ["latitude", "nav_lat"],
"lev_bounds": [
"deptht_bounds",
"lev_bnds",
"olevel_bounds",
"zlev_bnds",
],
"lon_bounds": [
"bounds_lon",
"bounds_nav_lon",
"lon_bnds",
"x_bnds",
"vertices_longitude",
],
"lat_bounds": [
"bounds_lat",
"bounds_nav_lat",
"lat_bnds",
"y_bnds",
"vertices_latitude",
],
"time_bounds": ["time_bnds"],
}
return rename_dict
def _invert_dict(rdict):
exploded_dict = {}
# there is probably a more effective way to 'invert' a dictionary
for k, v in rdict.items():
v = _maybe_make_list(v)
for vv in v:
exploded_dict[vv] = k
return exploded_dict
def rename_cmip6(ds, rename_dict=None):
"""Homogenizes cmip6 dataasets to common naming"""
ds = ds.copy()
attrs = {k: v for k, v in ds.attrs.items()}
if rename_dict is None:
rename_dict = cmip6_renaming_dict()
inverted_rename_dict = _invert_dict(rename_dict)
ds_reset = ds.reset_coords()
def _maybe_rename(obj, rdict):
return obj.rename({kk: vv for kk, vv in rdict.items() if kk in obj.dims})
# first take care of the dims and reconstruct a clean ds
ds = xr.Dataset(
{
k: _maybe_rename(ds_reset[k], inverted_rename_dict)
for k in ds_reset.data_vars
}
)
# special treatment for 'lon'/'lat' if there is no 'x'/'y' after renaming process
for di, co in [("x", "lon"), ("y", "lat")]:
if di not in ds.dims and co in ds.dims:
ds = ds.rename({co: di})
# now rename the variables
# try and pass here, cause some of the datasets (MIROC) have like 3 times the same info
# e.g. lev/sigma/zlev...not sure this is the best way to handle this with
# a silent fail here though...
for va in ds.data_vars:
try:
ds = ds.rename({va: inverted_rename_dict[va]})
except:
pass
# restore attributes
ds.attrs = attrs
return ds
def promote_empty_dims(ds):
"""Convert empty dimensions to actual coordinates"""
ds = ds.copy()
for di in ds.dims:
if di not in ds.coords:
ds = ds.assign_coords({di: ds[di]})
return ds
# some of the models do not have 2d lon lats, correct that.
def broadcast_lonlat(ds, verbose=True):
"""Some models (all `gr` grid_labels) have 1D lon lat arrays
This functions broadcasts those so lon/lat are always 2d arrays."""
if "lon" not in ds.variables:
ds.coords["lon"] = ds["x"]
if "lat" not in ds.variables:
ds.coords["lat"] = ds["y"]
if len(ds["lon"].dims) < 2:
ds.coords["lon"] = ds["lon"] * xr.ones_like(ds["lat"])
if len(ds["lat"].dims) < 2:
ds.coords["lat"] = xr.ones_like(ds["lon"]) * ds["lat"]
return ds
def _interp_nominal_lon(lon_1d):
x = np.arange(len(lon_1d))
idx = np.isnan(lon_1d)
return np.interp(x, x[~idx], lon_1d[~idx], period=360)
def replace_x_y_nominal_lat_lon(ds):
"""Approximate the dimensional values of x and y with mean lat and lon at the equator"""
ds = ds.copy()
def maybe_fix_non_unique(data, pad=False):
"""remove duplicate values by linear interpolation
if values are non-unique. `pad` if the last two points are the same
pad with -90 or 90. This is only applicable to lat values"""
if len(data) == len(np.unique(data)):
return data
else:
# pad each end with the other end.
if pad:
if len(np.unique([data[0:2]])) < 2:
data[0] = -90
if len(np.unique([data[-2:]])) < 2:
data[-1] = 90
ii_range = np.arange(len(data))
_, indicies = np.unique(data, return_index=True)
double_idx = np.array([ii not in indicies for ii in ii_range])
# print(f"non-unique values found at:{ii_range[double_idx]})")
data[double_idx] = np.interp(
ii_range[double_idx], ii_range[~double_idx], data[~double_idx]
)
return data
if "x" in ds.dims and "y" in ds.dims:
# define 'nominal' longitude/latitude values
# latitude is defined as the max value of `lat` in the zonal direction
# longitude is taken from the `middle` of the meridonal direction, to
# get values close to the equator
# pick the nominal lon/lat values from the eastern
# and southern edge, and
eq_idx = len(ds.y) // 2
nominal_x = ds.isel(y=eq_idx).lon.load()
nominal_y = ds.lat.max("x").load()
# interpolate nans
# Special treatment for gaps in longitude
nominal_x = _interp_nominal_lon(nominal_x.data)
nominal_y = nominal_y.interpolate_na("y").data
# eliminate non unique values
# these occour e.g. in "MPI-ESM1-2-HR"
nominal_y = maybe_fix_non_unique(nominal_y)
nominal_x = maybe_fix_non_unique(nominal_x)
ds = ds.assign_coords(x=nominal_x, y=nominal_y)
ds = ds.sortby("x")
ds = ds.sortby("y")
# do one more interpolation for the x values, in case the boundary values were
# affected
ds = ds.assign_coords(
x=maybe_fix_non_unique(ds.x.load().data),
y=maybe_fix_non_unique(ds.y.load().data, pad=True),
)
else:
warnings.warn(
"No x and y found in dimensions for source_id:%s. This likely means that you forgot to rename the dataset or this is the German unstructured model"
% ds.attrs["source_id"]
)
return ds
def correct_units(ds):
"Converts coordinates into SI units using pint-xarray"
# codify units with pint
# Perhaps this should be kept separately from the fixing?
# See https://github.com/jbusecke/cmip6_preprocessing/pull/160#discussion_r667041858
try:
# exclude salinity from the quantification (see https://github.com/jbusecke/cmip6_preprocessing/pull/160#issuecomment-878627027 for details)
quantified = ds.pint.quantify(_unit_overrides)
target_units = {
var: target_unit
for var, target_unit in _desired_units.items()
if var in quantified
}
converted = quantified.pint.to(target_units)
ds = converted.pint.dequantify(format="~P")
except ValueError as e:
warnings.warn(
f"{cmip6_dataset_id(ds)}: Unit correction failed with: {e}", UserWarning
)
return ds
def correct_coordinates(ds, verbose=False):
"""converts wrongly assigned data_vars to coordinates"""
ds = ds.copy()
for co in [
"x",
"y",
"lon",
"lat",
"lev",
"bnds",
"lev_bounds",
"lon_bounds",
"lat_bounds",
"time_bounds",
"lat_verticies",
"lon_verticies",
]:
if co in ds.variables:
if verbose:
print("setting %s as coord" % (co))
ds = ds.set_coords(co)
return ds
def correct_lon(ds):
"""Wraps negative x and lon values around to have 0-360 lons.
longitude names expected to be corrected with `rename_cmip6`"""
ds = ds.copy()
# remove out of bounds values found in some
# models as missing values
ds["lon"] = ds["lon"].where(abs(ds["lon"]) <= 1000)
ds["lat"] = ds["lat"].where(abs(ds["lat"]) <= 1000)
# adjust lon convention
lon = ds["lon"].where(ds["lon"] > 0, 360 + ds["lon"])
ds = ds.assign_coords(lon=lon)
if "lon_bounds" in ds.variables:
lon_b = ds["lon_bounds"].where(ds["lon_bounds"] > 0, 360 + ds["lon_bounds"])
ds = ds.assign_coords(lon_bounds=lon_b)
return ds
def parse_lon_lat_bounds(ds):
"""both `regular` 2d bounds and vertex bounds are parsed as `*_bounds`.
This function renames them to `*_verticies` if the vertex dimension is found.
Also removes time dimension from static bounds as found in e.g. `SAM0-UNICON` model.
"""
if "source_id" in ds.attrs.keys():
if ds.attrs["source_id"] == "FGOALS-f3-L":
warnings.warn("`FGOALS-f3-L` does not provide lon or lat bounds.")
ds = ds.copy()
if "lat_bounds" in ds.variables:
if "x" not in ds.lat_bounds.dims:
ds.coords["lat_bounds"] = ds.coords["lat_bounds"] * xr.ones_like(ds.x)
if "lon_bounds" in ds.variables:
if "y" not in ds.lon_bounds.dims:
ds.coords["lon_bounds"] = ds.coords["lon_bounds"] * xr.ones_like(ds.y)
# I am assuming that all bound fields with time were broadcasted in error (except time bounds obviously),
# and will drop the time dimension.
error_dims = ["time"]
for ed in error_dims:
for co in ["lon_bounds", "lat_bounds", "lev_bounds"]:
if co in ds.variables:
if ed in ds[co].dims:
warnings.warn(
f"Found {ed} as dimension in `{co}`. Assuming this is an error and just picking the first step along that dimension."
)
stripped_coord = ds[co].isel({ed: 0}).squeeze()
# make sure that dimension is actually dropped
if ed in stripped_coord.coords:
stripped_coord = stripped_coord.drop(ed)
ds = ds.assign_coords({co: stripped_coord})
# Finally rename the bounds that are given in vertex convention
for va in ["lon", "lat"]:
va_name = va + "_bounds"
if va_name in ds.variables and "vertex" in ds[va_name].dims:
ds = ds.rename({va_name: va + "_verticies"})
return ds
def maybe_convert_bounds_to_vertex(ds):
"""Converts renamed lon and lat bounds into verticies, by copying
the values into the corners. Assumes a rectangular cell."""
ds = ds.copy()
if "bnds" in ds.dims:
if "lon_bounds" in ds.variables and "lat_bounds" in ds.variables:
if (
"lon_verticies" not in ds.variables
and "lat_verticies" not in ds.variables
):
lon_b = xr.ones_like(ds.lat) * ds.coords["lon_bounds"]
lat_b = xr.ones_like(ds.lon) * ds.coords["lat_bounds"]
lon_bb = xr.concat(
[lon_b.isel(bnds=ii).squeeze(drop=True) for ii in [0, 0, 1, 1]],
dim="vertex",
)
lon_bb = lon_bb.reset_coords(drop=True)
lat_bb = xr.concat(
[lat_b.isel(bnds=ii).squeeze(drop=True) for ii in [0, 1, 1, 0]],
dim="vertex",
)
lat_bb = lat_bb.reset_coords(drop=True)
ds = ds.assign_coords(lon_verticies=lon_bb, lat_verticies=lat_bb)
return ds
def maybe_convert_vertex_to_bounds(ds):
"""Converts lon and lat verticies to bounds by averaging corner points
on the appropriate cell face center."""
ds = ds.copy()
if "vertex" in ds.dims:
if "lon_verticies" in ds.variables and "lat_verticies" in ds.variables:
if "lon_bounds" not in ds.variables and "lat_bounds" not in ds.variables:
lon_b = xr.concat(
[
ds["lon_verticies"].isel(vertex=[0, 1]).mean("vertex"),
ds["lon_verticies"].isel(vertex=[2, 3]).mean("vertex"),
],
dim="bnds",
)
lat_b = xr.concat(
[
ds["lat_verticies"].isel(vertex=[0, 3]).mean("vertex"),
ds["lat_verticies"].isel(vertex=[1, 2]).mean("vertex"),
],
dim="bnds",
)
ds = ds.assign_coords(lon_bounds=lon_b, lat_bounds=lat_b)
ds = promote_empty_dims(ds)
return ds
def sort_vertex_order(ds):
"""sorts the vertex dimension in a coherent order:
0: lower left
1: upper left
2: upper right
3: lower right
"""
ds = ds.copy()
if (
"vertex" in ds.dims
and "lon_verticies" in ds.variables
and "lat_verticies" in ds.variables
):
# pick a vertex in the middle of the domain, to avoid the pole areas
x_idx = len(ds.x) // 2
y_idx = len(ds.y) // 2
lon_b = ds.lon_verticies.isel(x=x_idx, y=y_idx).load().data
lat_b = ds.lat_verticies.isel(x=x_idx, y=y_idx).load().data
vert = ds.vertex.load().data
points = np.vstack((lon_b, lat_b, vert)).T
# split into left and right
lon_sorted = points[np.argsort(points[:, 0]), :]
right = lon_sorted[:2, :]
left = lon_sorted[2:, :]
# sort again on each side to get top and bottom
bl, tl = left[np.argsort(left[:, 1]), :]
br, tr = right[np.argsort(right[:, 1]), :]
points_sorted = np.vstack((bl, tl, tr, br))
idx_sorted = (points_sorted.shape[0] - 1) - np.argsort(points_sorted[:, 2])
ds = ds.assign_coords(vertex=idx_sorted)
ds = ds.sortby("vertex")
return ds
# TODO: Implement this in a sleeker way with daops
def fix_metadata(ds):
"""
Fix known issues (from errata) with the metadata.
"""
# https://errata.es-doc.org/static/view.html?uid=2f6b5963-f87e-b2df-a5b0-2f12b6b68d32
if ds.attrs["source_id"] == "GFDL-CM4" and ds.attrs["experiment_id"] in [
"1pctCO2",
"abrupt-4xCO2",
"historical",
]:
ds.attrs["branch_time_in_parent"] = 91250
# https://errata.es-doc.org/static/view.html?uid=61fb170e-91bb-4c64-8f1d-6f5e342ee421
if ds.attrs["source_id"] == "GFDL-CM4" and ds.attrs["experiment_id"] in [
"ssp245",
"ssp585",
]:
ds.attrs["branch_time_in_child"] = 60225
return ds
def combined_preprocessing(ds):
# fix naming
ds = rename_cmip6(ds)
# promote empty dims to actual coordinates
ds = promote_empty_dims(ds)
# demote coordinates from data_variables
ds = correct_coordinates(ds)
# broadcast lon/lat
ds = broadcast_lonlat(ds)
# shift all lons to consistent 0-360
ds = correct_lon(ds)
# fix the units
ds = correct_units(ds)
# rename the `bounds` according to their style (bound or vertex)
ds = parse_lon_lat_bounds(ds)
# sort verticies in a consistent manner
ds = sort_vertex_order(ds)
# convert vertex into bounds and vice versa, so both are available
ds = maybe_convert_bounds_to_vertex(ds)
ds = maybe_convert_vertex_to_bounds(ds)
ds = fix_metadata(ds)
ds = ds.drop_vars(_drop_coords, errors="ignore")
return ds
|
# Write a function is_factor(f, n) that passes the tests below.
import sys
def is_factor(f, n):
return n % f == 0
def test(did_pass):
""" Print the result of a test. """
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
def test_suite():
""" Run the suite of tests for code in this module (this file).
"""
test(is_factor(3, 12))
test(not is_factor(5, 12))
test(is_factor(7, 14))
test(not is_factor(7, 15))
test(is_factor(1, 15))
test(is_factor(15, 15))
test(not is_factor(25, 15))
test_suite() # Here is the call to run the tests
|
#!/usr/bin/env python
from dockerpython.source import Source
if __name__ == '__main__':
print("hello world")
asource = Source('env/env.sh')
assert 'some env' in asource.atestenv
|
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.decomposition import PCA, FastICA
def read_data(csv_file_path, is_PCA):
dataset = pd.read_csv(csv_file_path)
dataset.columns = ['edible', 'cap-shape', 'cap-surface', 'cap-color', 'bruises', 'odor', 'gill-attachment',
'gill-spacing',
'gill-size', 'gill-color', 'stalk-shape', 'stalk-surface-above-ring', 'stalk-surface-below-ring',
'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number',
'ring-type', 'spore-print-color', 'population', 'habitat']
features = dataset.iloc[:, [0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]]
features = pd.get_dummies(features).astype(float) # one hot encoding
target = dataset.iloc[:, 5]
dataset_x = features.values
dataset_y = target.values
if is_PCA:
pca = PCA(n_components=10)
dataset_x = pca.fit_transform(dataset_x)
x_train, x_test, y_train, y_test = train_test_split(dataset_x, dataset_y, test_size=0.2)
return x_train, x_test, y_train, y_test, dataset_x, dataset_y
def fit(x_train, y_train, mission):
if mission == "missing_data":
# random forest
rf = RandomForestClassifier(min_samples_leaf=1, min_samples_split=2, n_estimators=200, random_state=42)
rf.fit(x_train, y_train)
return rf
elif mission == "second_assistant" :
# svm
clf_SVM = svm.SVC(kernel='sigmoid', gamma='auto')
# Train the model using the training sets
clf_SVM.fit(x_train, y_train)
return clf_SVM
|
print("Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n")
def s_list(l,size):
return [l[i:i+size] for i in range(0, len(l), size)]
lis=[1,3,4,7,9,8,6,2]
size=int(input("Enter the size: "))
print(s_list(lis,size))
|
# encoding: utf-8
# 该文件,为无界面启动文件,以vtServer为容器,加载MainEngine
# 配置:
# self.gateway_name ,gateway 的连接名称,在vtEngine.initGateway()里面定义,对应的配置文件是 "连接名称_connect.json",
# self.strategies:启动的策略实例,须在catStrategy/CtaSetting.json 里面定义 [u'S28_RB1001', u'S28_TFT', u'S28_HCRB',u'atr_rsi']
# vtServer的ZMQ端口: 从VT_Setting.json中配置,根据AUTO_CONNCET_GW找到对应得端口配置
import os
import sys
import ctypes
from datetime import datetime, timedelta, date
from time import sleep
from threading import Thread
# 将repostory的目录i,作为根目录,添加到系统环境中。
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(ROOT_PATH)
from vnpy.trader.vtEvent import *
from vnpy.rpc import RpcServer
from vnpy.trader.vtEngine import MainEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.setup_logger import setup_logger,get_logger
from vnpy.trader.util_monitor import *
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.util_gpid import *
from vnpy.trader.app import ctaStrategy,riskManager
AUTO_CONNCET_GW = 'CTP'
########################################################################
class VtServer(RpcServer):
"""vn.trader 无界面服务器"""
# ----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(VtServer, self).__init__(repAddress, pubAddress)
#self.usePickle()
# gateway 是否连接
self.connected = False
# gateway 的连接名称,在vtEngine.initGateway()里面定义,对应的配置文件是 "连接名称_connect.json",
self.gateway_name = AUTO_CONNCET_GW
# 启动的策略实例,须在catStrategy/CtaSetting.json 里面定义 [u'S28_RB1001', u'S28_TFT', u'S28_HCRB',u'atr_rsi']
self.strategies = [u'S30_RB0510', u'S30_HCRB05']
self.g_count = 0
self.disconnect_signal = 0
self.last_dt = datetime.now()
# 创建事件引擎
ee = EventEngine2()
# 创建主引擎对象
print( u'instance mainengine')
self.engine = MainEngine(ee)
# 添加CTP Gateway,配置文件为 CTP_Post
self.engine.addGateway(ctpGateway, self.gateway_name)
# 添加应用
self.engine.addApp(ctaStrategy)
self.engine.addApp(riskManager)
# 注册主引擎的方法到服务器的RPC函数
self.register(self.engine.connect)
self.register(self.engine.disconnect)
self.register(self.engine.subscribe)
self.register(self.engine.sendOrder)
self.register(self.engine.cancelOrder)
self.register(self.engine.qryAccount)
self.register(self.engine.qryPosition)
self.register(self.engine.checkGatewayStatus) # 检测gateway的连接状态
self.register(self.engine.qryStatus) # 检测ctaEngine的状态
self.register(self.engine.exit)
self.register(self.engine.writeLog)
self.register(self.engine.dbConnect)
self.register(self.engine.dbInsert)
self.register(self.engine.dbQuery)
self.register(self.engine.dbUpdate)
self.register(self.engine.getContract)
self.register(self.engine.getAllContracts)
self.register(self.engine.getOrder)
self.register(self.engine.getAllWorkingOrders)
self.register(self.engine.getAllGatewayNames)
self.register(self.engine.saveData)
self.register(self.engine.initStrategy)
self.register(self.engine.startStrategy)
self.register(self.engine.stopStrategy)
# 注册事件引擎发送的事件处理监听
self.engine.eventEngine.registerGeneralHandler(self.eventHandler)
def trade_off(self):
"""检查现在是否为非交易时间"""
now = datetime.now()
a = datetime.now().replace(hour=2, minute=35, second=0, microsecond=0)
b = datetime.now().replace(hour=8, minute=55, second=0, microsecond=0)
c = datetime.now().replace(hour=15, minute=30, second=0, microsecond=0)
d = datetime.now().replace(hour=20, minute=55, second=0, microsecond=0)
weekend = (now.isoweekday() == 6 and now >= a) or (now.isoweekday() == 7) or (now.isoweekday() == 1 and now <=b)
off = (a <= now <= b) or (c <= now <= d) or weekend
return off
def disconnect(self):
""""断开底层gateway的连接"""
if self.engine:
self.engine.disconnect(self.gateway_name)
self.connected = False
def onTimer(self, event):
"""定时器执行逻辑,每十秒执行一次"""
# 十秒才执行一次检查
self.g_count += 1
if self.g_count <= 30:
return
self.g_count = 0
dt = datetime.now()
self.engine.qryStatus()
if dt.hour != self.last_dt.hour:
self.last_dt = dt
print(u'noUiMain.py checkpoint:{0}'.format(dt))
self.engine.writeLog( u'noUiMain.py checkpoint:{0}'.format(dt))
# 定时断开
if self.trade_off():
"""非交易时间"""
if self.connected:
self.engine.writeLog(u'断开连接{0}'.format(self.gateway_name))
self.disconnect()
self.engine.writeLog(u'清空数据引擎')
self.engine.clearData()
self.connected = False
self.engine.writeNotification(u'非交易时间{0},断开连接{1}'.format(dt, self.gateway_name))
return
# 交易时间内,定时重连和检查
if not self.connected:
self.engine.writeLog(u'启动连接{0}'.format(self.gateway_name))
self.engine.writeLog(u'清空数据引擎')
self.engine.clearData()
self.engine.writeLog(u'重新连接{0}'.format(self.gateway_name))
self.engine.connect(self.gateway_name)
self.connected = True
self.disconnect_signal = 0
self.engine.writeNotification(u'{0},重新连接{1}'.format(dt, self.gateway_name))
return
else:
if not self.engine.checkGatewayStatus(self.gateway_name):
self.disconnect_signal += 1
if self.disconnect_signal >= 5:
self.engine.writeWarning(u'检查连接{0}异常,超过{1}次'.format(self.gateway_name,self.disconnect_signal))
sys.exit(0)
else:
self.disconnect_signal = 0
def start(self):
"""启动"""
super(VtServer, self).start()
# 若需要连接数据库,则启动
# self.mainEngine.dbConnect()
# 加载cta的配置
print( u'load cta setting')
self.engine.ctaEngine.loadSetting()
print(u'initialize all strategies')
# 初始化策略,如果多个,则需要逐一初始化多个
for s in self.strategies:
print( 'init trategy {0}'.format(s))
self.engine.ctaEngine.initStrategy(s)
# 逐一启动策略
print( 'start strategy {0}'.format(s))
self.engine.ctaEngine.startStrategy(s)
# 指定的连接配置
if not self.trade_off():
print( u'connect gateway:{0}'.format(self.gateway_name))
self.engine.connect(self.gateway_name)
self.connected = True
# 注册定时器,用于判断重连
self.engine.eventEngine.register(EVENT_TIMER, self.onTimer)
# 所有的日志监控
self.logM = LogMonitor(self.engine.eventEngine)
self.errorM = ErrorMonitor(self.engine.eventEngine)
self.tradeM = TradeMonitor(self.engine.eventEngine)
self.orderM = OrderMonitor(self.engine.eventEngine, self.engine)
self.positionM = PositionMonitor(self.engine.eventEngine)
self.accountM = AccountMonitor(self.engine.eventEngine)
self.engine.writeNotification(u'{0},服务启动{1}'.format(datetime.now(),self. gateway_name))
# ----------------------------------------------------------------------
def eventHandler(self, event):
"""事件处理"""
try:
# 调用RpcServer.publish()
if isinstance(event.type_, str):
self.publish(event.type_, event)
else:
self.publish(event.type_.encode('utf-8'), event)
except Exception as ex:
print( u'event Handler exception:{0}'.format(str(ex)))
# ----------------------------------------------------------------------
def stopServer(self):
"""停止服务器"""
print( 'stopServer')
# 关闭引擎
self.engine.exit()
# 停止服务器线程
self.stop()
# ----------------------------------------------------------------------
def printLog(content):
"""打印日志"""
print( datetime.now().strftime("%H:%M:%S"), '\t', content)
# ----------------------------------------------------------------------
def runServer():
"""运行服务器"""
try:
log_file_name = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'logs', u'noUiMain.log'))
except Exception as ex:
print( u'Use local dict:{0}'.format(os.getcwd()))
log_file_name = os.path.abspath(os.path.join(os.getcwd(), 'logs', u'noUiMain.log'))
setup_logger(filename=log_file_name, debug=False)
# Req/Publish端口
try:
zmqAddressDict = globalSetting['ZMQ']
zmqAddress = zmqAddressDict[AUTO_CONNCET_GW]
reqAddress = zmqAddress['ReqAddress']
pubAddress = zmqAddress['PubAddress']
except:
reqAddress = 'tcp://*:2014'
pubAddress = 'tcp://*:2016'
# 创建并启动服务器
server = VtServer(reqAddress, pubAddress)
server.start()
printLog('-' * 50)
printLog(u'Request端口:{0}'.format(reqAddress))
printLog(u'Publish端口:{0}'.format(pubAddress))
printLog(u'Trader服务器已启动')
if __name__ == '__main__':
# 主程序
thread = Thread(target=runServer, args=())
thread.start()
|
a = 14 ^ 15
|
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
def make_imagenet_dataloader(train_batch_size, test_batch_size, image_size, **kwargs):
assert(image_size == -1 or image_size == 224), 'Currently we only use default (224x224) for imagenet'
num_classes = 1000
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
input_size = 224
train_transform = transforms.Compose([
transforms.RandomResizedCrop(input_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
])
test_transform = transforms.Compose([
transforms.Resize(int(input_size / 0.875)),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
])
train_dataset = datasets.ImageFolder('DATA/ImageNet/train', train_transform)
test_dataset = datasets.ImageFolder('DATA/ImageNet/test', test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=train_batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=test_batch_size, shuffle=False, **kwargs)
return train_loader, test_loader, num_classes
|
# coding=utf-8
n, k = map(int, input().split())
s = [[]for _ in range(n)]
for i in range(n):
s[i] = input()
f = [[1 for d in range(n)] for _ in range(n)]
for i in range(n):
for j in range(n):
ssum = 0
for t in range(j, min(j+k, n)):
if s[i][t] == '.':
ssum += 1
else:
break
if ssum >= k:
for t in range(k):
f[i][j+t] = f[i][j+t] + 1
for i in range(n):
for j in range(n):
ssum = 0
for t in range(j, min(n, j+k)):
if s[t][i] == '.':
ssum += 1
else:
break
if ssum >= k:
for t in range(k):
f[j+t][i] += 1
ans, ans_x, ans_y = 0, 0, 0
for i in range(n):
for j in range(n):
if ans < f[i][j]:
ans = f[i][j]
ans_x = i+1
ans_y = j+1
print(ans_x, ans_y)
|
import numpy
import qrtools
import scipy.ndimage
import os
def qrpng2pcbpoly(qrpngfilename, x0=9600, y0=3200):
im = scipy.ndimage.imread(qrpngfilename, flatten=True)
ymax, xmax = im.shape
y, x = numpy.where(im == 255)
x0 = x0 - 450
y0 = y0 + 430
step = 14
polygons = []
for ix, xvalue in enumerate(x):
x1 = x0+xvalue*step
x2 = x0+xvalue*step+step
y1 = y0-(ymax-y[ix])*step
y2 = y0-(ymax-y[ix])*step+step
polygons.append(''' Polygon("clearpoly")
(
[%.2fmil %.2fmil] [%.2fmil %.2fmil] [%.2fmil %.2fmil] [%.2fmil %.2fmil]
)''' % (x1, y1, x2, y1, x2, y2, x1, y2))
return '\n'.join(polygons)
# String length limits for Version 2 (25x25), assuming "Alphanumeric" encoding:
# level='S' 40
# level='M' 28
# level='Q' 22
def qrgen(string, level='M'):
print("qrgen: " + string)
qr = qrtools.QR(string, pixel_size=1, margin_size=2, level=level)
qr.encode(string)
return string+'.png'
def pcbstring(x, y, size, s):
return '''
Text[%3.2fmil %3.2fmil 0 %d "%s" "clearline"]''' % (x, y, size, s)
def pcbstrings(l):
return "".join([pcbstring(*ll) for ll in l])
def munge_size(l, xmax, ymax):
if l[0:7] == 'PCB["" ':
return 'PCB["" %.2fmil %.2fmil]' % (xmax, ymax)
else:
return l
def get_template(xmax=12000, ymax=10000):
with open("template.pcb", "r") as f:
template = f.read()
lines = template.split('\n')
return "\n".join([munge_size(l, xmax=xmax, ymax=ymax) for l in lines])
def pcbfile(silk, template):
return template + '''Layer(10 "top silk" "silk")
(
'''+silk+'''
)'''
def infopcb(lines, sn, x0=9600, y0=3200):
line_break = lines.split('\n')
return pcbstrings([
(x0, y0+0, 110, line_break[0]),
(x0, y0+70, 110, line_break[1]),
(x0, y0+140, 110, line_break[2]),
(x0, y0+210, 200, 'S'),
(x0, y0+320, 200, 'N'),
(x0+100, y0+180, 500, '%03d' % sn)])
def oneboard(gerber_name="a.gbr", sn=1, qr_string="TEST 1", desc_string="", x0=3900, y0=1200, level="M", template="", lines=""):
polys = qrpng2pcbpoly(qrgen(qr_string, level=level), x0=x0, y0=y0)
pcbstr = pcbfile(silk=infopcb(lines, sn, x0=x0, y0=y0)+polys, template=template)
pcb_name = 'test'
with open(pcb_name+'.pcb', 'w') as f:
f.write(pcbstr)
f.close()
os.system('pcb -x gerber %s.pcb && mv %s.%s.gbr %s' % (pcb_name, pcb_name, 'topsilk', gerber_name))
if __name__ == "__main__":
setup = [3900, 7450, 'qr_sn_%03d.gbr', 'LBNL DIGITIZER V1.1 SN %03d', 'M', 'LBNL Digitizer\nLCLS-II LLRF\nRevision 1.1']
# setup = [10000, 5360, 'qr_dn_sn_%03d.gbr', 'FNAL DOWNCVT REV C SN %03d', 'M', 'FNAL DOWNCVT\nLCLS-II LLRF\nRev C']
# setup = [6900, 2300, 'qr_up_sn_%03d.gbr', 'FNAL UPCVT REV C SN %03d', 'M', 'FNAL UPCVT\nLCLS-II LLRF\nRev C']
x0, y0, gerber_base, qr_base, level, lines = setup
template = get_template(xmax=x0+1000, ymax=y0+1000)
for sn in range(32, 57):
gerber_name = gerber_base % sn
qr_string = qr_base % sn
desc_string = ''
oneboard(gerber_name=gerber_name, sn=sn, qr_string=qr_string, desc_string=desc_string, x0=x0, y0=0, level=level, template=template, lines=lines)
|
from __future__ import absolute_import
from .jaccard import batched_f_measure, batched_jaccard
|
import demistomock as demisto
from CommonServerPython import *
from typing import Optional, Tuple, Union
from datetime import datetime, timedelta
import json
import requests
import urllib3
import dateparser
# Disable insecure warnings
urllib3.disable_warnings()
ALL_EVENTS = "All"
ISSUES_EVENTS = "Issues"
BLOCKED_CLICKS = "Blocked Clicks"
PERMITTED_CLICKS = "Permitted Clicks"
BLOCKED_MESSAGES = "Blocked Messages"
DELIVERED_MESSAGES = "Delivered Messages"
DEFAULT_LIMIT = 50
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def get_now():
""" A wrapper function for datetime.now
helps handle tests
Returns:
datetime: time right now
"""
return datetime.now()
def get_fetch_times(last_fetch):
""" Get list of every hour since last_fetch. last is now.
Args:
last_fetch (datetime or str): last_fetch time
Returns:
List[str]: list of str represents every hour since last_fetch
"""
now = get_now()
times = list()
time_format = DATE_FORMAT
if isinstance(last_fetch, str):
times.append(last_fetch)
last_fetch = datetime.strptime(last_fetch, time_format)
elif isinstance(last_fetch, datetime):
times.append(last_fetch.strftime(time_format))
while now - last_fetch > timedelta(minutes=59):
last_fetch += timedelta(minutes=59)
times.append(last_fetch.strftime(time_format))
times.append(now.strftime(time_format))
return times
class Client:
def __init__(self, proofpoint_url, api_version, verify, service_principal, secret, proxies):
self.base_url = proofpoint_url
self.api_version = api_version
self.verify = verify
self.service_principal = service_principal
self.secret = secret
self.proxies = proxies
def http_request(self, method, url_suffix, params=None, data=None, forensics_api=False):
if forensics_api:
full_url = urljoin(self.base_url, '/v2/forensics')
else:
full_url = urljoin(urljoin(self.base_url, self.api_version), url_suffix)
res = requests.request(
method,
full_url,
verify=self.verify,
params=params,
json=data,
auth=(self.service_principal, self.secret),
proxies=self.proxies
)
if res.status_code not in [200, 204]:
raise ValueError(f'Error in API call to Proofpoint TAP {res.status_code}. Reason: {res.text}')
try:
return res.json()
except Exception:
raise ValueError(f"Failed to parse http response to JSON format. Original response body: \n{res.text}")
def get_events(self, interval=None, since_time=None, since_seconds=None, threat_type=None, threat_status=None,
event_type_filter="All"):
if not interval and not since_time and not since_seconds:
raise ValueError("Required to pass interval or sinceTime or sinceSeconds.")
query_params = {
"format": "json"
}
query_params.update(
assign_params(
interval=interval,
sinceTime=since_time,
sinceSeconds=since_seconds,
threatStatus=threat_status,
threatType=threat_type
)
)
url_route = {
"All": "/all",
"Issues": "/issues",
"Blocked Clicks": "/clicks/blocked",
"Permitted Clicks": "/clicks/permitted",
"Blocked Messages": "/messages/blocked",
"Delivered Messages": "/messages/delivered"
}[event_type_filter]
events = self.http_request("GET", urljoin('siem', url_route), params=query_params)
return events
def get_forensics(self, threat_id=None, campaign_id=None, include_campaign_forensics=None):
if threat_id and campaign_id:
raise DemistoException('threadId and campaignID supplied, supply only one of them')
if include_campaign_forensics and campaign_id:
raise DemistoException('includeCampaignForensics can be true only with threadId')
if campaign_id:
params = assign_params(campaignId=campaign_id)
else:
params = assign_params(threatId=threat_id, includeCampaignForensics=include_campaign_forensics)
return self.http_request('GET', None, params=params, forensics_api=True)
def get_clicks(self, clicks_type: str, interval: str, threat_status: str = None) -> dict:
"""
Retrieves clicks on malicious URLs in the specified time period. Clicks can either be blocked or permitted.
Args:
interval (str): ISO8601-formatted interval date. The minimum interval is 30 seconds. The maximum interval is one hour.
threat_status (str): The status of the threat. Can be: active, cleared or falsePositive.
clicks_type (str): The type of the click. Can be either "blocked" or "permitted".
Returns:
dict: API response from ProofpointTAP.
"""
params = remove_empty_elements({"interval": interval,
"threatStatus": threat_status,
"format": "json"})
return self.http_request("GET", f'/siem/clicks/{clicks_type}', params=params)
def get_messages(self, messages_type: str, interval: str, threat_status: str = None,
threat_type: str = None) -> dict:
"""
Retrieves events for messages in the specified time period. Messages can either be blocked or delivered.
Args:
interval (str): ISO8601-formatted interval date. The minimum interval is 30 seconds. The maximum interval is one hour.
threat_status (str): The status of the threat. Can be: active, cleared or falsePositive.
threat_type (str): The type of the threat. Can be: url, attachment or message.
messages_type (str): The type of the messages. Can be either "blocked" or "delivered"
Returns:
dict: API response from ProofpointTAP.
"""
params = remove_empty_elements({"interval": interval,
"threatStatus": threat_status,
"threatType": threat_type,
"format": "json"})
return self.http_request("GET", f'/siem/messages/{messages_type}', params=params)
def list_campaigns(self, interval: str, page: str = None, limit: str = None) -> dict:
"""
Retrieves a list of IDs of campaigns active in a time window.
Args:
interval (str): ISO8601-formatted interval date. The minimum interval is 30 seconds. The maximum interval is one day.
limit (str): The maximum number of campaign IDs to produce in the response.
page (str): The page of results to return, in multiples of the specified size.
Returns:
dict: API response from ProofpointTAP.
"""
params = remove_empty_elements({"interval": interval,
"page": page,
"size": limit,
"format": "json"})
return self.http_request("GET", '/campaign/ids', params=params)
def get_campaign(self, campaign_id: str) -> dict:
"""
Retrieves information for a given campaign.
Args:
campaign_id (str): The ID of the required campaign.
Returns:
dict: API response from ProofpointTAP.
"""
return self.http_request("GET", f'/campaign/{campaign_id}')
def list_most_attacked_users(self, window: str, limit: str = None, page: str = None) -> dict:
"""
Retrieves a list of the most attacked users in the organization for a given period.
Args:
window (str): The number of days for which the information will be retrieved.
limit (str): The maximum number of VAPs to produce.
page (str): The page of results to return, in multiples of the specified size.
Returns:
dict: API response from ProofpointTAP.
"""
params = remove_empty_elements({"window": window,
"size": limit,
"page": page})
return self.http_request("GET", '/people/vap', params=params)
def get_top_clickers(self, window: str, limit: str = None, page: str = None) -> dict:
"""
Retrieves a list of the top clickers in the organization for a given period.
Args:
window (str): The number of days for which the information will be retrieved.
limit (str): The maximum number of top clickers to produce.
page (str): The page of results to return, in multiples of the specified size.
Returns:
dict: API response from ProofpointTAP.
"""
params = remove_empty_elements({"window": window,
"size": limit,
"page": page})
return self.http_request("GET", '/people/top-clickers', params=params)
def url_decode(self, url_list: list) -> dict:
"""
Decode URLs that have been rewritten by TAP to their original, target URL.
Args:
url_list (list): List of encoded URLs.
Returns:
dict: API response from ProofpointTAP.
"""
data = {"urls": url_list}
return self.http_request("POST", '/url/decode', data=data)
def list_issues(self, interval: str, threat_status: str = None, threat_type: str = None) -> dict:
"""
Retrieves events for permitted clicks on malicious URLs and delivered messages in the specified time period.
Args:
interval (str): ISO8601-formatted interval date. The minimum interval is 30 seconds. The maximum interval is one hour.
threat_status (str): The status of the threat. Can be: active, cleared or falsePositive.
threat_type (str): The type of the threat. Can be: url, attachment or messageText.
Returns:
dict: API response from ProofpointTAP.
"""
params = remove_empty_elements({"interval": interval,
"threatStatus": threat_status,
"threatType": threat_type,
"format": "json"})
return self.http_request("GET", '/siem/issues', params=params)
def test_module(client: Client) -> str:
"""
Tests API connectivity and authentication.
Args:
client (Client): ProofpointTAP API client.
Returns:
str : 'ok' if test passed, anything else will fail the test.
"""
try:
client.get_top_clickers(window='90')
except Exception as exception:
if 'Unauthorized' in str(exception) or 'authentication' in str(exception):
return 'Authorization Error: make sure API Credentials are correctly set'
if 'connection' in str(exception):
return 'Connection Error: make sure Server URL is correctly set'
raise exception
return 'ok'
def build_context_attachment(what: dict) -> dict:
return assign_params(
SHA256=what.get('sha256'),
MD5=what.get('md5'),
Blacklisted=what.get('blacklisted'),
Offset=what.get('offset'),
Size=what.get('size'),
)
def build_context_cookie(what: dict) -> dict:
return assign_params(
Action=what.get('action'),
Domain=what.get('domain'),
Key=what.get('key'),
Value=what.get('value'),
)
def build_context_dns(what: dict) -> dict:
return assign_params(
Host=what.get('host'),
CNames=what.get('cnames'),
IP=what.get('ips'),
NameServers=what.get('nameservers'),
NameServersList=what.get('nameserversList'),
)
def build_context_mutex(what: dict) -> dict:
return assign_params(
Name=what.get('name'),
Path=what.get('path')
)
def build_context_ids(what: dict) -> dict:
return assign_params(
Name=what.get('name'),
SignatureID=what.get('signatureId')
)
def build_context_network(what: dict) -> dict:
return assign_params(
Action=what.get('action'),
IP=what.get('ip'),
Port=what.get('port'),
Protocol=what.get('type')
)
def build_context_process(what: dict) -> dict:
return assign_params(
Action=what.get('action'),
Path=what.get('path'),
)
def build_context_dropper(what: dict) -> dict:
return assign_params(
Path=what.get('path'),
URL=what.get('url'),
Rule=what.get('rule'),
)
def build_context_registry(what: dict) -> dict:
return assign_params(
Name=what.get('name'),
Action=what.get('action'),
Key=what.get('key'),
Value=what.get('value'),
)
def build_context_file(what: dict) -> dict:
return assign_params(
Path=what.get('path'),
Action=what.get('action'),
SHA256=what.get('sha256'),
MD5=what.get('md5'),
Size=what.get('size'),
)
def build_context_url(what: dict) -> dict:
return assign_params(
URL=what.get('url'),
Blacklisted=what.get('blacklisted'),
SHA256=what.get('sha256'),
MD5=what.get('md5'),
Size=what.get('size'),
HTTPStatus=what.get('httpStatus'),
IP=what.get('ip'),
)
def build_context_behavior(forensics_data: dict) -> dict:
"""
Build forensics behavior evidence type objects in order to update the command report context.
Args:
forensics_data (dict): Forensics data. A map of values associated with the specific evidence type.
Returns:
dict: Dictionary from given kwargs without empty values.
"""
return assign_params(
Path=forensics_data.get('path'),
URL=forensics_data.get('url'),
)
def build_context_screenshot(forensics_data: dict) -> dict:
"""
Build forensics screenshot evidence type objects in order to update the command report context.
Args:
forensics_data (dict): Forensics data. A map of values associated with the specific evidence type.
Returns:
dict: Dictionary from given kwargs without empty values.
"""
return assign_params(
URL=forensics_data.get('url'),
)
def get_forensic_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
"""
Args:
client:
args: demisto.args()
Returns:
Outputs
"""
forensic_types = {
'attachment': 'Attachment',
'cookie': 'Cookie',
'dns': 'DNS',
'dropper': 'Dropper',
'file': 'File',
'ids': 'IDS',
'mutex': 'Mutex',
'network': 'Network',
'process': 'Process',
'registry': 'Registry',
'url': 'URL',
'behavior': 'Behavior',
'screenshot': 'Screenshot'
}
threat_id = args.get('threatId')
campaign_id = args.get('campaignId')
include_campaign_forensics = args.get('includeCampaignForensics') == 'true'
limit = args.get('limit', DEFAULT_LIMIT)
raw_response = client.get_forensics(
threat_id=threat_id,
campaign_id=campaign_id,
include_campaign_forensics=include_campaign_forensics
)
reports = raw_response.get('reports', [])
if len(reports) > limit:
reports = reports[:limit]
reports_context = list()
for report in reports:
report_context = assign_params(
Scope=report.get('scope'),
Type=report.get('type'),
ID=report.get('id')
)
for evidence in report.get('forensics', []):
evidence_type = evidence.get('type')
evidence_type = forensic_types.get(evidence_type)
if evidence_type:
# Create list in report
if evidence_type not in report_context:
report_context[evidence_type] = list()
what = evidence.get('what', {})
basic_report = assign_params(
Time=evidence.get('time'),
Display=evidence.get('display'),
Malicious=evidence.get('malicious'),
)
basic_report['Platform'] = [{
'Name': platform.get('name'),
'OS': platform.get('os'),
'Version': platform.get('version')
} for platform in evidence.get('platforms', [])]
if evidence_type == 'Attachment':
basic_report.update(build_context_attachment(what))
report_context[evidence_type].append(basic_report)
elif evidence_type == 'Cookie':
basic_report.update(build_context_cookie(what))
report_context[evidence_type].append(basic_report)
elif evidence_type == 'DNS':
basic_report.update(build_context_dns(what))
report_context['DNS'].append(basic_report)
elif evidence_type == 'Dropper':
basic_report.update(build_context_dropper(what))
report_context['Dropper'].append(basic_report)
elif evidence_type == 'File':
basic_report.update(build_context_file(what))
report_context['File'].append(basic_report)
elif evidence_type == 'IDS':
basic_report.update(build_context_ids(what))
report_context['IDS'].append(basic_report)
elif evidence_type == 'Mutex':
basic_report.update(build_context_mutex(what))
report_context['Mutex'].append(basic_report)
elif evidence_type == 'Network':
basic_report.update(build_context_network(what))
report_context['Network'].append(basic_report)
elif evidence_type == 'Process':
basic_report.update(build_context_process(what))
report_context['Process'].append(basic_report)
elif evidence_type == 'Registry':
basic_report.update(build_context_registry(what))
report_context['Registry'].append(basic_report)
elif evidence_type == 'URL':
basic_report.update(build_context_url(what))
report_context['URL'].append(basic_report)
elif evidence_type == 'Behavior':
basic_report.update(build_context_behavior(what))
report_context['Behavior'].append(basic_report)
elif evidence_type == 'Screenshot':
basic_report.update(build_context_screenshot(what))
report_context['Screenshot'].append(basic_report)
reports_context.append(report_context)
outputs = {'Proofpoint.Report(var.ID === obj.ID)': reports_context}
readable_outputs = tableToMarkdown(
f'Forensic results from ProofPoint for ID: {threat_id or campaign_id}',
reports_context,
headers=['ID', 'Scope', 'Type']
)
return readable_outputs, outputs, raw_response
@logger
def get_events_command(client, args):
interval = args.get("interval")
threat_type = argToList(args.get("threatType"))
threat_status = args.get("threatStatus")
since_time = args.get("sinceTime")
since_seconds = int(args.get("sinceSeconds")) if args.get("sinceSeconds") else None
event_type_filter = args.get("eventTypes")
raw_events = client.get_events(interval, since_time, since_seconds, threat_type, threat_status, event_type_filter)
return (
tableToMarkdown("Proofpoint Events", raw_events),
{
'Proofpoint.MessagesDelivered(val.GUID == obj.GUID)': raw_events.get("messagesDelivered"),
'Proofpoint.MessagesBlocked(val.GUID == obj.GUID)': raw_events.get("messagesBlocked"),
'Proofpoint.ClicksBlocked(val.GUID == obj.GUID)': raw_events.get("clicksBlocked"),
'Proofpoint.ClicksPermitted(val.GUID == obj.GUID)': raw_events.get("clicksPermitted")
},
raw_events
)
def fetch_incidents(
client,
last_run,
first_fetch_time,
event_type_filter,
threat_type,
threat_status,
limit=DEFAULT_LIMIT,
integration_context=None,
raw_json_encoding: Optional[str] = None,
) -> Tuple[dict, list, list]:
incidents = []
end_query_time = ''
# check if there're incidents saved in context
if integration_context:
remained_incidents = integration_context.get("incidents")
# return incidents if exists in context.
if remained_incidents:
return last_run, remained_incidents[:limit], remained_incidents[limit:]
# Get the last fetch time, if exists
start_query_time = last_run.get("last_fetch")
# Handle first time fetch, fetch incidents retroactively
if not start_query_time:
start_query_time, _ = parse_date_range(first_fetch_time, date_format=DATE_FORMAT, utc=True)
fetch_times = get_fetch_times(start_query_time)
for i in range(len(fetch_times) - 1):
start_query_time = fetch_times[i]
end_query_time = fetch_times[i + 1]
raw_events = client.get_events(interval=start_query_time + "/" + end_query_time,
event_type_filter=event_type_filter,
threat_status=threat_status, threat_type=threat_type)
message_delivered = raw_events.get("messagesDelivered", [])
for raw_event in message_delivered:
raw_event["type"] = "messages delivered"
event_guid = raw_event.get("GUID", "")
if raw_json_encoding:
raw_json = json.dumps(raw_event, ensure_ascii=False).encode(raw_json_encoding).decode()
else:
raw_json = json.dumps(raw_event)
incident = {
"name": "Proofpoint - Message Delivered - {}".format(event_guid),
"rawJSON": raw_json,
"occurred": raw_event["messageTime"]
}
incidents.append(incident)
message_blocked = raw_events.get("messagesBlocked", [])
for raw_event in message_blocked:
raw_event["type"] = "messages blocked"
event_guid = raw_event.get("GUID", "")
if raw_json_encoding:
raw_json = json.dumps(raw_event, ensure_ascii=False).encode(raw_json_encoding).decode()
else:
raw_json = json.dumps(raw_event)
incident = {
"name": "Proofpoint - Message Blocked - {}".format(event_guid),
"rawJSON": raw_json,
"occured": raw_event["messageTime"],
}
incidents.append(incident)
clicks_permitted = raw_events.get("clicksPermitted", [])
for raw_event in clicks_permitted:
raw_event["type"] = "clicks permitted"
event_guid = raw_event.get("GUID", "")
if raw_json_encoding:
raw_json = json.dumps(raw_event, ensure_ascii=False).encode(raw_json_encoding).decode()
else:
raw_json = json.dumps(raw_event)
incident = {
"name": "Proofpoint - Click Permitted - {}".format(event_guid),
"rawJSON": raw_json,
"occurred": raw_event["clickTime"] if raw_event["clickTime"] > raw_event["threatTime"] else raw_event[
"threatTime"]
}
incidents.append(incident)
clicks_blocked = raw_events.get("clicksBlocked", [])
for raw_event in clicks_blocked:
raw_event["type"] = "clicks blocked"
event_guid = raw_event.get("GUID", "")
if raw_json_encoding:
raw_json = json.dumps(raw_event, ensure_ascii=False).encode(raw_json_encoding).decode()
else:
raw_json = json.dumps(raw_event)
incident = {
"name": "Proofpoint - Click Blocked - {}".format(event_guid),
"rawJSON": raw_json,
"occurred": raw_event["clickTime"] if raw_event["clickTime"] > raw_event["threatTime"] else raw_event[
"threatTime"]
}
incidents.append(incident)
# Cut the milliseconds from last fetch if exists
end_query_time = end_query_time[:-5] + 'Z' if end_query_time[-5] == '.' else end_query_time
next_run = {"last_fetch": end_query_time}
return next_run, incidents[:limit], incidents[limit:]
def handle_interval(time_range: datetime, is_hours_interval: bool = True, is_days_interval: bool = False):
"""
Create a list of interval objects from the current time over time range.
Most of ProofpointTAP requests required interval string in order to retrieve information from the API requests.
interval objects will be in the following format: '2021-04-27T09:00:00Z/2021-04-27T10:00:00Z'
Args:
time_range (datetime): Last interval time.
is_days_interval (bool): If True, create hours interval objects.
is_hours_interval (bool): If True, create days interval objects.
Returns:
list: List of hour interval items.
"""
current_time = datetime.utcnow()
intervals = []
if current_time - time_range > timedelta(
days=7): # The maximum time range of Proofpoint TAP API requests is 7 days minus one minute.
time_range += timedelta(minutes=1)
if is_days_interval:
while current_time - time_range > timedelta(days=1):
start = time_range.strftime(DATE_FORMAT)
time_range += timedelta(days=1)
intervals.append(f'{start}/{time_range.strftime(DATE_FORMAT)}')
if is_hours_interval:
while current_time - time_range > timedelta(hours=1):
start = time_range.strftime(DATE_FORMAT)
time_range += timedelta(hours=1)
intervals.append(f'{start}/{time_range.strftime(DATE_FORMAT)}')
return intervals
def get_clicks_command(client: Client, is_blocked: bool, interval: str = None, threat_status: str = None,
time_range: str = None) -> CommandResults:
"""
Retrieves clicks on malicious URLs in the specified time period. Clicks can either be blocked or permitted.
Args:
client (Client): ProofpointTAP API client.
is_blocked (bool): Indicates the clicks type.
interval (str): ISO8601-formatted interval date. The minimum interval is thirty seconds. The maximum interval is one hour.
threat_status (str): The status of the threat. Can be: active, cleared or falsePositive.
time_range (str): Time range, for example: 1 week, 2 days, 3 hours etc.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
clicks_type = 'blocked' if is_blocked else 'permitted'
if not (interval or time_range):
raise Exception('Must provide interval or time_range.')
if interval and time_range:
raise Exception('Must provide only one of the arguments interval or time_range.')
if time_range and dateparser.parse("7 days") > dateparser.parse(time_range): # type: ignore
raise Exception('The maximum time range is 7 days')
if time_range and dateparser.parse("30 seconds") < dateparser.parse(time_range): # type: ignore
raise Exception('The minimum time range is thirty seconds.')
if time_range and dateparser.parse("1 hour") < dateparser.parse(time_range): # type: ignore
end = datetime.utcnow().strftime(DATE_FORMAT)
start = dateparser.parse(time_range).strftime(DATE_FORMAT) # type: ignore
intervals = [f'{start}/{end}']
else:
intervals = handle_interval(dateparser.parse(time_range)) if time_range else [interval] # type: ignore
outputs = []
raw_responses = []
for interval_string in intervals:
raw_response = client.get_clicks(clicks_type, interval_string, threat_status)
clicks_path = ['clicksBlocked'] if clicks_type == 'blocked' else ['clicksPermitted']
if dict_safe_get(raw_response, clicks_path):
outputs.extend(dict_safe_get(raw_response, clicks_path))
raw_responses.append(raw_response)
readable_output = tableToMarkdown(f'{clicks_type.title()} Clicks',
outputs, headers=['id', 'senderIP', 'recipient', 'classification', 'threatID',
'threatURL', 'threatStatus', 'threatTime',
'clickTime', 'campaignId', 'userAgent'],
headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix=f'Proofpoint.Clicks{clicks_type.capitalize()}',
outputs=outputs,
outputs_key_field=['GUID', 'id'],
raw_response=raw_responses
)
def create_messages_output(messages_list: list) -> list:
"""
Creates and filters the required fields of messages output.
Args:
messages_list (list): List of retrieved messages.
Returns:
list: List of messages with the required fields.
"""
outputs = []
message_keys = ['spamScore', 'phishScore', 'threatsInfoMap', 'messageTime', 'impostorScore', 'malwareScore',
'cluster', 'subject', 'quarantineFolder', 'quarantineRule', 'policyRoutes', 'modulesRun',
'messageSize', 'messageParts', 'completelyRewritten', 'id', 'sender', 'recipient', 'senderIP',
'messageID', 'GUID']
header_fields = ['headerFrom', 'headerReplyTo', 'fromAddress', 'fromAddress', 'ccAddresses',
'replyToAddress', 'toAddresses', 'xmailer']
for message in messages_list:
message_header = {}
for field in header_fields:
message_header[field] = message[field]
message_output = {key: value for key, value in message.items() if key in message_keys}
message_output['Header'] = message_header
outputs.append(message_output)
return outputs
def create_threats_objects(messages: list) -> list:
"""
Creates list of threats items of messages.
Args:
messages (list): List of messages items.
Returns:
list: List of threats items.
"""
threats_info_map = []
message_keys = ['sender', 'recipient', 'subject']
for message in messages:
for threat in message.get('threatsInfoMap'):
threat_object = {key: value for key, value in message.items() if key in message_keys}
threat_object.update(threat)
threats_info_map.append(threat_object)
return threats_info_map
def get_messages_command(client: Client, is_blocked: bool, interval: str = None, threat_status: str = None,
threat_type: str = None, time_range: str = None) -> CommandResults:
"""
Retrieves events for messages in the specified time period. Messages can either be blocked or delivered.
Args:
client (Client): ProofpointTAP API client.
is_blocked (bool): Indicates the messages type.
interval (str): ISO8601-formatted interval date. The minimum interval is thirty seconds. The maximum interval is one hour.
threat_status (str): The status of the threat. Can be: active, cleared or falsePositive.
threat_type (str): The type of the threat. Can be: url, attachment or message.
time_range (str): Time range, for example: 1 week, 2 days, 3 hours etc.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
messages_type = 'blocked' if is_blocked else 'delivered'
if not (interval or time_range):
raise Exception('Must provide interval or time_range.')
if interval and time_range:
raise Exception('Must provide only one of the arguments interval or time_range.')
if time_range and dateparser.parse("7 days") > dateparser.parse(time_range): # type: ignore
raise Exception('The maximum time range is 7 days')
if time_range and dateparser.parse("30 seconds") < dateparser.parse(time_range): # type: ignore
raise Exception('The minimum time range is thirty seconds.')
if time_range and dateparser.parse("1 hour") < dateparser.parse(time_range): # type: ignore
end = datetime.utcnow().strftime(DATE_FORMAT)
start = dateparser.parse(time_range).strftime(DATE_FORMAT) # type: ignore
intervals = [f'{start}/{end}']
else:
intervals = handle_interval(dateparser.parse(time_range)) if time_range else [interval] # type: ignore
outputs = []
raw_responses = []
for interval_string in intervals:
raw_response = client.get_messages(messages_type, interval_string, threat_status, threat_type)
messages_path = ['messagesBlocked'] if messages_type == 'blocked' else ['messagesDelivered']
if dict_safe_get(raw_response, messages_path):
outputs.extend(create_messages_output(dict_safe_get(raw_response, messages_path)))
raw_responses.append(raw_response)
threats_info_map = create_threats_objects(outputs)
messages_readable_output = tableToMarkdown(f'{messages_type.title()} Messages',
outputs,
headers=['senderIP', 'sender', 'recipient', 'subject', 'messageSize',
'messageTime', 'malwareScore', 'phishScore', 'spamScore'],
headerTransform=pascalToSpace
)
threats_info_readable_output = tableToMarkdown(f'{messages_type.title()} Messages Threats Information',
threats_info_map,
headers=['sender', 'recipient', 'subject', 'classification',
'threat', 'threatStatus', 'threatUrl', 'threatID',
'threatTime', 'campaignID'],
headerTransform=pascalToSpace)
readable_output = messages_readable_output + "\n" + threats_info_readable_output
return CommandResults(
readable_output=readable_output,
outputs_prefix=f'Proofpoint.Messages{messages_type.capitalize()}',
outputs=outputs,
outputs_key_field=['GUID', 'id'],
raw_response=raw_responses
)
def list_campaigns_command(client: Client, interval: str = None, limit: str = None, page: str = None,
time_range: str = None) -> CommandResults:
"""
Retrieves a list of IDs of campaigns active in a time window.
Args:
client (Client): ProofpointTAP API client.
interval (str): ISO8601-formatted interval date. The minimum interval is thirty seconds. The maximum interval is one day.
limit (str): The maximum number of campaign IDs to produce in the response.
page (str): The page of results to return, in multiples of the specified size.
time_range (str): Time range, for example: 1 week, 2 days, 3 hours etc.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
if not (interval or time_range):
raise Exception('Must provide interval or time_range.')
if interval and time_range:
raise Exception('Must provide only one of the arguments interval or time_range.')
if time_range and dateparser.parse("7 days") > dateparser.parse(time_range): # type: ignore
raise Exception('The maximum time range is 7 days')
if time_range and dateparser.parse("30 seconds") < dateparser.parse(time_range): # type: ignore
raise Exception('The minimum time range is thirty seconds.')
if time_range and dateparser.parse("1 hour") < dateparser.parse(time_range): # type: ignore
end = datetime.utcnow().strftime(DATE_FORMAT)
start = dateparser.parse(time_range).strftime(DATE_FORMAT) # type: ignore
intervals = [f'{start}/{end}']
else:
intervals = handle_interval(dateparser.parse(time_range), # type: ignore
is_days_interval=True) if time_range else [ # type: ignore
interval] # type: ignore
outputs = []
raw_responses = []
request_error = []
for interval_string in intervals:
try:
raw_response = client.list_campaigns(interval_string, page, limit)
except ValueError: # In case there are no campaigns for the interval, the request returns status code 404
# which causes an error in http_request function
request_error.append(
{'interval': interval_string, "message": f'Not found campaigns data from {interval_string}'})
continue
if dict_safe_get(raw_response, ["campaigns"]):
outputs.extend(dict_safe_get(raw_response, ["campaigns"]))
raw_responses.append(raw_response)
readable_output = tableToMarkdown('Campaigns List',
outputs, headers=['id', 'lastUpdatedAt'],
headerTransform=pascalToSpace
)
if request_error:
readable_output += "\n" + tableToMarkdown('Errors',
request_error, headers=['interval', 'message'],
headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Proofpoint.Campaign',
outputs=outputs,
outputs_key_field='id',
raw_response=raw_responses
)
def get_campaign_command(client: Client, campaign_id: str) -> Union[CommandResults, str]:
"""
Retrieves information for a given campaign.
Args:
client (Client): ProofpointTAP API client.
campaign_id (str): The ID of the required campaign.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
try:
raw_response = client.get_campaign(campaign_id)
except ValueError:
return 'Campaign Id not found'
campaign_general_fields = ['id', 'name', 'description', 'startDate', 'notable']
campaign_fields = ['families', 'techniques', 'actors', 'brands', 'malware']
outputs = {}
outputs['campaignMembers'] = dict_safe_get(raw_response, ['campaignMembers'])
outputs['info'] = {key: value for key, value in raw_response.items() if key in campaign_general_fields}
outputs.update({key: value for key, value in raw_response.items() if key in campaign_fields})
fields_readable_output = ""
for field in campaign_fields:
fields_readable_output += "\n" + tableToMarkdown(field.capitalize(),
dict_safe_get(outputs, [field]), headers=['id', 'name'],
headerTransform=pascalToSpace
)
campaign_info_output = tableToMarkdown('Campaign Information',
outputs['info'],
headers=['id', 'name', 'description', 'startDate', 'notable'],
headerTransform=pascalToSpace
)
campaign_members_output = tableToMarkdown('Campaign Members',
outputs['campaignMembers'],
headers=['id', 'threat', 'type'],
headerTransform=pascalToSpace
)
readable_output = campaign_info_output + "\n" + campaign_members_output + fields_readable_output
return CommandResults(
readable_output=readable_output,
outputs_prefix='Proofpoint.Campaign',
outputs=outputs,
outputs_key_field='id',
raw_response=raw_response
)
def create_families_objects(users: list, statistics_key: str) -> list:
"""
Creates list of threat families items of users.
Args:
statistics_key (str): Dictionary key of users statistics.
users (list): List of users items.
Returns:
list: List of threats items
"""
threat_families = []
for user in users:
emails = dict_safe_get(user, ["identity", "emails"])
for family in dict_safe_get(user, [statistics_key, "families"]):
families_object = {'Mailbox': emails, 'Threat Family Name': family.get('name'),
'Threat Score': family.get('score')}
threat_families.append(families_object)
return sorted(threat_families, key=lambda x: (x.get('Threat Score', 0), x.get('Mailbox')), reverse=True)
def list_most_attacked_users_command(client: Client, window: str, limit: str = None,
page: str = None) -> CommandResults:
"""
Retrieves a list of the most attacked users in the organization for a given period.
Args:
client (Client): ProofpointTAP API client.
window (str): The number of days for which the information will be retrieved.
limit (str): The maximum number of VAPs to produce.
page (str): The page of results to return, in multiples of the specified size.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
raw_response = client.list_most_attacked_users(window, limit, page)
outputs = raw_response
threat_families = create_families_objects(dict_safe_get(outputs, ["users"]), "threatStatistics")
most_attacked_users_output = tableToMarkdown('Most Attacked Users Information',
outputs,
headers=['totalVapUsers', 'interval', 'averageAttackIndex',
'vapAttackIndexThreshold'],
headerTransform=pascalToSpace
)
threat_families_output = tableToMarkdown('Threat Families', threat_families,
headers=['Mailbox', 'Threat Family Name', 'Threat Score'],
headerTransform=pascalToSpace)
readable_output = most_attacked_users_output + "\n" + threat_families_output
return CommandResults(
readable_output=readable_output,
outputs_prefix='Proofpoint.Vap',
outputs=outputs,
raw_response=raw_response,
outputs_key_field='interval'
)
def get_top_clickers_command(client: Client, window: str, limit: str = None, page: str = None) -> CommandResults:
"""
Retrieves a list of the top clickers in the organization for a given period.
Args:
client (Client): ProofpointTAP API client.
window (str): The number of days for which the information will be retrieved.
limit (str): The maximum number of top clickers to produce.
page (str): The page of results to return, in multiples of the specified size.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
raw_response = client.get_top_clickers(window, limit, page)
outputs = raw_response
threat_families = create_families_objects(dict_safe_get(outputs, ["users"]), "clickStatistics")
top_clickers_output = tableToMarkdown('Top Clickers Users Information',
outputs,
headers=['totalTopClickers', 'interval'],
headerTransform=pascalToSpace
)
threat_families_output = tableToMarkdown('Threat Families',
threat_families,
headers=['Mailbox', 'Threat Family Name', 'Threat Score'],
headerTransform=pascalToSpace)
readable_output = top_clickers_output + threat_families_output
return CommandResults(
readable_output=readable_output,
outputs_prefix='Proofpoint.Topclickers',
outputs=outputs,
raw_response=raw_response,
outputs_key_field='interval'
)
def url_decode_command(client: Client, urls: str) -> CommandResults:
"""
Decode URLs that have been rewritten by TAP to their original, target URL.
Args:
client (Client): ProofpointTAP API client.
urls (str): Encoded URLs.
Returns:
CommandResults: raw response, outputs, and readable outputs.
"""
raw_response = client.url_decode(argToList(urls))
outputs = dict_safe_get(raw_response, ["urls"])
readable_output = tableToMarkdown('URLs decoded information',
outputs,
headers=['encodedUrl', 'decodedUrl'],
headerTransform=pascalToSpace)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Proofpoint.URL',
outputs_key_field='encodedUrl',
outputs=outputs,
raw_response=raw_response
)
def list_issues_command(client: Client, interval: str = None, threat_status: str = None,
threat_type: str = None, time_range: str = None) -> list:
"""
Retrieves events for permitted clicks on malicious URLs and delivered messages in the specified time period.
Args:
client (Client): ProofpointTAP API client.
interval (str): ISO8601-formatted interval date. The minimum interval is thirty seconds. The maximum interval is one hour.
threat_status (str): The status of the threat. Can be: active, cleared or falsePositive.
threat_type (str): The type of the threat. Can be: url, attachment or messageText.
time_range (str): Time range, for example: 1 week, 2 days, 3 hours etc.
Returns:
list: List of CommandResults objects.
"""
if not (interval or time_range):
raise Exception('Must provide interval or time_range.')
if interval and time_range:
raise Exception('Must provide only one of the arguments interval or time_range.')
if time_range and dateparser.parse("7 days") > dateparser.parse(time_range): # type: ignore
raise Exception('The maximum time range is 7 days')
if time_range and dateparser.parse("30 seconds") < dateparser.parse(time_range): # type: ignore
raise Exception('The minimum time range is thirty seconds.')
if time_range and dateparser.parse("1 hour") < dateparser.parse(time_range): # type: ignore
end = datetime.utcnow().strftime(DATE_FORMAT)
start = dateparser.parse(time_range).strftime(DATE_FORMAT) # type: ignore
intervals = [f'{start}/{end}']
else:
intervals = handle_interval(dateparser.parse(time_range)) if time_range else [interval] # type: ignore
messages_outputs = []
messages_raw_responses = []
clicks_outputs = []
clicks_raw_responses = []
command_results_list = []
for interval_string in intervals:
raw_response = client.list_issues(interval_string, threat_status, threat_type)
messages = dict_safe_get(raw_response, ['messagesDelivered'])
if messages:
messages_outputs.extend(create_messages_output(messages))
messages_raw_responses.append(raw_response)
clicks = dict_safe_get(raw_response, ['clicksPermitted'])
if clicks:
clicks_outputs.extend(clicks)
clicks_raw_responses.append(raw_response)
threats_info_map = create_threats_objects(messages_outputs)
delivered_messages_output = tableToMarkdown('Delivered Messages',
messages_outputs,
headers=['senderIP', 'sender', 'recipient', 'subject', 'messageSize',
'messageTime', 'malwareScore', 'phishScore', 'spamScore'],
headerTransform=pascalToSpace
)
threats_info_output = tableToMarkdown('Delivered Messages Threats Info Map:',
threats_info_map, headers=['sender', 'recipient', 'subject', 'classification',
'threat', 'threatStatus', 'threatUrl', 'threatID',
'threatTime', 'campaignID'],
headerTransform=pascalToSpace)
messages_readable_output = delivered_messages_output + "\n" + threats_info_output
command_results_list.append(CommandResults(
readable_output=messages_readable_output,
outputs_prefix='Proofpoint.MessagesDelivered',
outputs=messages_outputs,
outputs_key_field=['GUID', 'id'],
raw_response=messages_raw_responses
))
clicks_readable_output = tableToMarkdown('Permitted click from list-issues command result:',
clicks_outputs,
headers=['id', 'senderIP', 'recipient', 'classification', 'threatID',
'threatURL', 'threatStatus', 'threatTime',
'clickTime', 'campaignId', 'userAgent'],
headerTransform=pascalToSpace
)
command_results_list.append(CommandResults(
readable_output=clicks_readable_output,
outputs_prefix='Proofpoint.ClicksPermitted',
outputs=clicks_outputs,
outputs_key_field=['GUID', 'id'],
raw_response=clicks_raw_responses
))
return command_results_list
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
service_principal = params.get('credentials', {}).get('identifier')
secret = params.get('credentials', {}).get('password')
# Remove trailing slash to prevent wrong URL path to service
server_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']
api_version = params.get('api_version')
verify_certificate = not params.get('insecure', False)
# How many time before the first fetch to retrieve incidents
fetch_time = params.get('fetch_time', '60 minutes')
threat_status = argToList(params.get('threat_status'))
threat_type = argToList(params.get('threat_type'))
event_type_filter = params.get('events_type')
raw_json_encoding = params.get('raw_json_encoding')
fetch_limit = 50
# Remove proxy if not set to true in params
proxies = handle_proxy()
command = demisto.command()
args = demisto.args()
LOG(f'Command being called is {command}')
try:
client = Client(server_url, api_version, verify_certificate, service_principal, secret, proxies)
commands = {
'proofpoint-get-events': get_events_command,
'proofpoint-get-forensics': get_forensic_command
}
if command == 'test-module':
return_outputs(test_module(client))
elif demisto.command() == 'fetch-incidents':
integration_context = demisto.getIntegrationContext()
next_run, incidents, remained_incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=fetch_time,
event_type_filter=event_type_filter,
threat_status=threat_status,
threat_type=threat_type,
limit=fetch_limit,
integration_context=integration_context,
raw_json_encoding=raw_json_encoding,
)
# Save last_run, incidents, remained incidents into integration
demisto.setLastRun(next_run)
demisto.incidents(incidents)
# preserve context dict
integration_context['incidents'] = remained_incidents
demisto.setIntegrationContext(integration_context)
elif command in commands:
return_outputs(*commands[command](client, args))
elif command == 'proofpoint-get-events-clicks-blocked':
return_results(get_clicks_command(client, is_blocked=True, **args))
elif command == 'proofpoint-get-events-clicks-permitted':
return_results(get_clicks_command(client, is_blocked=False, **args))
elif command == 'proofpoint-get-events-messages-blocked':
return_results(get_messages_command(client, is_blocked=True, **args))
elif command == 'proofpoint-get-events-messages-delivered':
return_results(get_messages_command(client, is_blocked=False, **args))
elif command == 'proofpoint-list-campaigns':
return_results(list_campaigns_command(client, **args))
elif command == 'proofpoint-get-campaign':
return_results(get_campaign_command(client, **args))
elif command == 'proofpoint-list-most-attacked-users':
return_results(list_most_attacked_users_command(client, **args))
elif command == 'proofpoint-get-top-clickers':
return_results(get_top_clickers_command(client, **args))
elif command == 'proofpoint-url-decode':
return_results(url_decode_command(client, **args))
elif command == 'proofpoint-list-issues':
return_results(list_issues_command(client, **args))
except Exception as exception:
if command == 'test-module':
return_error(str(exception))
return_error(f'Failed to execute {command} command. Error: {str(exception)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
from setuptools import setup, find_packages
setup(
name='dfilter',
version='0.1',
description='filter and query tools for dictionaries',
long_description=open('README.rst').read(),
author='Martin Slabber',
author_email='martin.slabber@gmail.com',
license='MIT',
packages=find_packages(exclude=['ez_setup']),
install_requires=[''],
url='https://github.com/martinslabber/pyDfilter',
include_package_data=True,
entry_points="",
test_suite = 'nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
)
|
from __future__ import unicode_literals
from django.db import models
import datetime
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
# return self.pub_date >= timezone.now() - timezone.timedelta(days=1)
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self): # __unicode__ on Python 2
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(Person, through='Membership')
def __str__(self): # __unicode__ on Python 2
return self.name
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
date_joined = models.DateField()
invite_reason = models.CharField(max_length=64)
class CommonInfo(models.Model):
name = models.CharField(max_length=100,null=False,unique=True)
age = models.IntegerField()
class Meta:
abstract = True
ordering = ['name']
class Student(CommonInfo):
home_group = models.CharField(max_length=5)
class Meta(CommonInfo.Meta):
db_table = 'student_info'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring, import-error
import functools
import tick_pb as tick
def _main():
t = tick.Tick(lambda elapsed_ms: print(f"elapsed: {elapsed_ms} ms"),
500, 1000,
lambda: print("run beg"), lambda: print("run end"))
t.start()
t.wait_life_over()
print()
t = tick.Tick(500, 2000)
def tick_event(t, elapsed_ms):
print(f"elapsed: {elapsed_ms} ms")
t.stop()
t.set_tick_event(functools.partial(tick_event, t))
t.set_run_beg_callback(lambda: print("run beg"))
t.set_run_end_callback(lambda: print("run end"))
t.start()
t.wait_life_over()
if __name__ == "__main__":
_main()
|
"""
Module for finding an expression given a float
Note that this is of course just guessing.
"""
import math
def namednumber(num):
""" attempt to find exact constant for float """
def isnear(val):
return abs(num-val)<0.00001
if isnear(0.0): return "0"
if num<0:
sign = "-"
num = -num
else:
sign = ""
if isnear(1.0): return sign+"1"
if isnear(math.pi): return sign+"pi"
if isnear((math.sqrt(6)+math.sqrt(2))/4): return sign+"(sqrt(6)+sqrt(2))/4"
if isnear((math.sqrt(6)-math.sqrt(2))/4): return sign+"(sqrt(6)-sqrt(2))/4"
if isnear((math.sqrt(5)+1.0)/2): return sign+"(sqrt(5)+1)/2"
if isnear((math.sqrt(5)-1.0)/2): return sign+"(sqrt(5)-1)/2"
if isnear(math.sqrt((math.sqrt(5)+5.0)/2)): return sign+"sqrt((sqrt(5)+5)/2)"
if isnear(math.atan((math.sqrt(5)+1.0)/2)): return sign+"atan((sqrt(5)+1)/2)"
if isnear(math.atan((math.sqrt(5)-1.0)/2)): return sign+"atan((sqrt(5)-1)/2)"
if isnear(math.pi-math.atan((math.sqrt(5)+1.0)/2)): return sign+"(pi-atan((sqrt(5)+1)/2))"
if isnear(math.pi-math.atan((math.sqrt(5)-1.0)/2)): return sign+"(pi-atan((sqrt(5)-1)/2))"
for div in range(2,20):
if isnear(div): return sign+"%d" % div
if isnear(1.0/div): return sign+"1/%d" % div
if isnear(math.sqrt(div)): return sign+"sqrt(%d)" % div
if isnear(1.0/math.sqrt(div)): return sign+"1/sqrt(%d)" % div
if isnear(math.pi/div): return sign+"pi/%d" % div
if isnear(math.atan(div)): return sign+"atan(%d)" % div
if isnear(math.pi-math.atan(div)): return sign+"(pi-atan(%d))" % div
if isnear(math.atan(1.0/div)): return sign+"atan(1/%d)" % div
if isnear(math.pi-math.atan(1.0/div)): return sign+"(pi-atan(1/%d))" % div
if isnear(math.atan(1.0/div)/2): return sign+"atan(1/%d)/2" % div
if isnear((math.pi-math.atan(1.0/div))/2): return sign+"(pi-atan(1/%d))/2" % div
if isnear(math.pi-math.atan(1.0/div)/2): return sign+"(pi-atan(1/%d)/2)" % div
if isnear((math.pi+math.atan(1.0/div))/2): return sign+"(pi-atan(1/%d))/2" % div
if isnear(math.atan(math.sqrt(div))): return sign+"atan(sqrt(%d))" % div
if isnear(math.pi-math.atan(math.sqrt(div))): return sign+"(pi-atan(sqrt(%d)))" % div
if isnear(math.atan(1.0/math.sqrt(div))): return sign+"atan(1/sqrt(%d))" % div
if isnear(math.pi-math.atan(1.0/math.sqrt(div))): return sign+"(pi-atan(1/sqrt(%d)))" % div
if isnear(math.atan(1.0/math.sqrt(div))/2): return sign+"atan(1/sqrt(%d))/2" % div
if isnear((math.pi-math.atan(1.0/math.sqrt(div)))/2): return sign+"(pi-atan(1/sqrt(%d)))/2" % div
if isnear(math.pi-math.atan(1.0/math.sqrt(div))/2): return sign+"(pi-atan(1/sqrt(%d))/2)" % div
if isnear((math.pi+math.atan(1.0/math.sqrt(div)))/2): return sign+"(pi-atan(1/sqrt(%d)))/2" % div
for div in range(2,20):
for mul in range(2,19):
if div==mul:
continue
if isnear(float(div)/mul): return sign+"%d/%d" % (div,mul)
if isnear(math.sqrt(div)/mul): return sign+"sqrt(%d)/%d" % (div,mul)
if isnear(mul*math.pi/div): return sign+"%d*pi/%d" % (mul,div)
for div in range(2,20):
for mul in range(2,19):
if div==mul:
continue
if isnear(math.atan(float(mul)/div)): return sign+"atan(%d/%d)" % (mul, div)
if isnear(math.pi-math.atan(float(mul)/div)): return sign+"(pi-atan(%d/%d))" % (mul, div)
if isnear(math.atan(float(mul)/div)/2): return sign+"atan(%d/%d)/2" % (mul, div)
if isnear((math.pi-math.atan(float(mul)/div))/2): return sign+"(pi-atan(%d/%d))/2" % (mul, div)
if isnear(math.pi-math.atan(float(mul)/div)/2): return sign+"(pi-atan(%d/%d)/2)" % (mul, div)
if isnear((math.pi+math.atan(float(mul)/div))/2): return sign+"(pi+atan(%d/%d))/2" % (mul, div)
if isnear(math.atan(float(mul)/math.sqrt(div))): return sign+"atan(%d/sqrt(%d))" % (mul, div)
if isnear(math.pi-math.atan(float(mul)/math.sqrt(div))): return sign+"(pi-atan(%d/sqrt(%d)))" % (mul, div)
if isnear(math.atan(float(mul)/math.sqrt(div))/2): return sign+"atan(%d/sqrt(%d))/2" % (mul, div)
if isnear((math.pi-math.atan(float(mul)/math.sqrt(div)))/2): return sign+"(pi-atan(%d/sqrt(%d)))/2" % (mul, div)
if isnear(math.pi-math.atan(float(mul)/math.sqrt(div))/2): return sign+"(pi-atan(%d/sqrt(%d))/2)" % (mul, div)
if isnear((math.pi+math.atan(float(mul)/math.sqrt(div)))/2): return sign+"(pi-atan(%d/sqrt(%d)))/2" % (mul, div)
return str(num)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Text diff/match analyzer API.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["get_diff_ratio", "MatchingAnalyzer"]
from difflib import SequenceMatcher
#------------------------------------------------------------------------------
def get_diff_ratio(text1, text2):
"""
Compare two texts and return a floating point value between 0 and 1 with
the difference ratio, with 0 being absolutely different and 1 being
absolutely equal - the more similar the two texts are, the closer the ratio
will be to 1.
:param text1: First text to compare.
:type text1: str
:param text2: Second text to compare.
:type text2: str
:returns: Floating point value between 0 and 1.
:rtype: float
"""
# Solve some trivial type errors (like using None).
if not text1:
text1 = ""
if not text2:
text2 = ""
# Check for type errors we can't fix.
if not isinstance(text1, basestring):
raise TypeError("Expected string, got %r instead" % type(text1))
if not isinstance(text2, basestring):
raise TypeError("Expected string, got %r instead" % type(text2))
# Trivial case, the two texts are identical.
if text1 == text2:
return 1.0
# Use the difflib sequence matcher to calculate the ratio.
m = SequenceMatcher(a=text1, b=text2)
return m.ratio()
#------------------------------------------------------------------------------
class MatchingAnalyzerElement(object):
"""
Match element of the :ref:`MatchingAnalyzer`.
:ivar text: Text.
:type text: str
:ivar ratio: Difference ratio against the base text.
:type ratio: float
"""
#--------------------------------------------------------------------------
def __init__(self, text, ratio, attrs):
"""
:param text: Text.
:type text: str
:param ratio: Difference ratio against the base text.
:type ratio: float
:param attrs: Custom attributes dictionary.
:type attrs: dict(str -> \\*)
"""
self.text = text
self.ratio = ratio
self.__attrs = attrs
#--------------------------------------------------------------------------
def __getattr__(self, name):
return self.__attrs[name]
#------------------------------------------------------------------------------
class MatchingAnalyzer(object):
"""
Text matching analyzer.
Compares any number of texts from a base text and generates
an iterator with those that are sufficiently different.
"""
#--------------------------------------------------------------------------
def __init__(self, base_text, min_ratio = 0.52, min_deviation = 1.15):
"""
:param base_text: Base text to be used for comparisons.
:type base_text: str
:param min_ratio: Minimum diff ratio to consider two texts as different.
:type min_ratio: float
:param min_deviation: Minimum deviation from the average to consider
texts to be unique.
:type min_deviation: float
"""
if not base_text:
raise ValueError("Base text cannot be empty")
if not isinstance(base_text, basestring):
raise TypeError("Expected string , got %r instead" % type(base_text))
if not isinstance(min_ratio, float):
raise TypeError("Expected float, got %r instead" % type(min_ratio))
if not isinstance(min_deviation, float):
raise TypeError("Expected float, got %r instead" % type(min_deviation))
self.__base_text = base_text
self.__min_ratio = min_ratio
self.__min_deviation = min_deviation
self.__matches = []
self.__unique_strings = None
self.__average_ratio = None
#--------------------------------------------------------------------------
@property
def base_text(self):
"""
:returns: Base text to be used for comparisons.
:rtype: str
"""
return self.__base_text
#--------------------------------------------------------------------------
@property
def min_ratio(self):
"""
:returns: Minimum diff ratio to consider two texts as different.
:rtype: float
"""
return self.__min_ratio
#--------------------------------------------------------------------------
@property
def min_deviation(self):
"""
:returns: Minimum deviation from the average to consider
texts to be unique.
:rtype: float
"""
return self.__min_deviation
#--------------------------------------------------------------------------
def analyze(self, text, **kwargs):
"""
If the matching level of text var is sufficient similar
to the base_text, then, store the text, and anything vars as
\\*\\*kargs associated with this text.
:param text: Text to compare with the base text.
:type text: str
:returns: True if the text is accepted as equal, False otherwise.
:rtype: bool
"""
# Ignore empty text.
if text:
# Calculate the diff ratio.
ratio = get_diff_ratio(self.__base_text, text)
# If it's lower than our boundary...
if ratio > self.__min_ratio:
# Invalidate the caches.
self.__clear_caches()
# Save the results.
match = MatchingAnalyzerElement(text, ratio, kwargs)
self.__matches.append(match)
# Text accepted.
return True
# Text rejected.
return False
#--------------------------------------------------------------------------
def __clear_caches(self):
self.__average_ratio = None
self.__unique_strings = None
#--------------------------------------------------------------------------
@property
def average_ratio(self):
"""
:returns: Average diff ratio.
:rtype: float
"""
# If the cache is empty, calculate.
if self.__average_ratio is None:
if self.__matches:
ratios = sum(match.ratio for match in self.__matches)
count = len(self.__matches)
self.__average_ratio = float(ratios) / float(count)
else:
self.__average_ratio = 0.0
# Return the cached value.
return self.__average_ratio
#--------------------------------------------------------------------------
@property
def unique_texts(self):
"""
:returns: List of unique texts.
:rtype: list(str)
"""
# If the cache is empty, calculate.
if self.__unique_strings is None:
self.__calculate_unique_texts()
# Return results from the cache.
return list(self.__unique_strings)
#--------------------------------------------------------------------------
def __calculate_unique_texts(self):
# Empty results list.
self.__unique_strings = []
# Get the average deviation.
average = self.average_ratio
# Skip if the ratio is 0.)
if average:
# Optimization.
append = self.__unique_strings.append
deviation = self.__min_deviation
# For each match element...
for match in self.__matches:
# Get the ratio and calculate the max deviation.
ratio = match.ratio
deviated = ratio * deviation
# Skip matches under the max deviation.
if not (ratio < average < deviated):
# Append the result.
append(match)
|
# -*- coding: utf-8 -*-
import sys
from library.components.SensorFactory import SensorFactory as SensorFactory
from library.components.JobModule import JobModule as Job
from collections import defaultdict
import time
import json
#add the project folder to pythpath
sys.path.append('../../')
class FileLogMetaData(Job):
def __init__(self, specification):
super(FileLogMetaData, self).__init__(specification)
def run(self):
senseFact = SensorFactory()
for sensorName in self.specification:
sensor = senseFact.getSensor(sensorName)
if (sensor is not False):
metaData = sensor.getMetaData()
data = defaultdict(list)
log = defaultdict(list)
timestamp = int(time.time())
data['unit'] = metaData[self.specification[sensorName]].getUnit()
data['value'] = metaData[self.specification[sensorName]].getValue()
data['sensor'] = sensorName
data['metaData'] = self.specification[sensorName]
log[timestamp] = data
# Open a file
fo = open("log.txt", "a")
#append to file the log data
fo.write(json.dumps(log) + "\n")
# Close opend file
fo.close()
|
from society import UKSociety, HighValencyTester
from society.test import TestQueue
import random
class TwoTrackTester(UKSociety):
CONTACT_VALENCY_THRESHOLD = 22 # vF
PROPORTION_FAST_TRACK = 0.5
DAYS_TO_CONTACTS_SECOND_TEST = 3
MIN_CONTACTS_TEST = 0
def __init__(self, **kwargs):
UKSociety.__init__(self, **kwargs)
self.fast_track = TestQueue()
self.slow_track = TestQueue()
self.queues = (self.fast_track, self.slow_track)
def act_on_test(self, test, test_contacts=False):
UKSociety.act_on_test(self, test, test_contacts=True)
if not test.positive and test.notes == 'contact':
if random.random() < self.cfg.PROB_ISOLATE_IF_TRACED:
test.person.isolate()
self.get_test_request(test.person, notes='contact part two',
priority=True, days_delayed_start=self.DAYS_TO_CONTACTS_SECOND_TEST)
def screen_contact_for_testing(self, c, do_test=None):
if len(c.contacts) >= self.CONTACT_VALENCY_THRESHOLD:
self.get_test_request(c, notes='contact', priority=True)
def get_test_request(self, person, notes='', priority=False, days_delayed_start=0):
if len(person.contacts) < self.MIN_CONTACTS_TEST:
return
if random.random() < self.cfg.PROB_TEST_IF_REQUESTED:
if not self.currently_testing(person):
args = (person, notes, self.cfg.TEST_DAYS_ELAPSED, False, days_delayed_start)
if priority:
self.fast_track.add_test(*args)
else:
self.slow_track.add_test(*args)
def add_test(self, person, notes, front_of_queue=False):
raise NotImplementedError
def set_actionable_tests(self, max_processed):
fast_max = int(max_processed * self.PROPORTION_FAST_TRACK)
self.fast_track.completed_tests = self.fast_track.pick_actionable_tests(fast_max)
slow_max = max_processed - len(self.fast_track.completed_tests)
self.slow_track.completed_tests = self.slow_track.pick_actionable_tests(slow_max)
class TwoTrackTesterofSymptoms(TwoTrackTester):
def get_test_request(self, person, notes='', priority=False, days_delayed_start=0):
if notes == 'symptoms' and len(person.contacts) > self.CONTACT_VALENCY_THRESHOLD:
assert not priority
priority = True
TwoTrackTester.get_test_request(self, person,
notes=notes,
priority=priority,
days_delayed_start=days_delayed_start)
class TwoTrackSystem(TwoTrackTesterofSymptoms):
VALENCY_TEST_FREQUENCY_DAYS = 0
GENERAL_VALENCY_THRESHOLD = 20
def manage_outbreak(self, population, max_processed=None):
HighValencyTester.handle_high_valencies(self, population)
UKSociety.manage_outbreak(self, population)
def handle_connected_person(self, person):
if not self.currently_testing(person):
if not self.fast_track.contains_planned_test_of(person):
self.fast_track.add_test(person, 'valency', self.cfg.TEST_DAYS_ELAPSED,
days_delayed_start=self.VALENCY_TEST_FREQUENCY_DAYS)
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Car(models.Model):
vin = models.CharField(verbose_name='Vin', db_index=True, unique=True, max_length=64)
color = models.CharField(verbose_name='Color', max_length=64)
brand = models.CharField(verbose_name='Brand', max_length=64)
CAR_TYPES = (
(1, 'Седан'),
(2, 'Хечбэк'),
(3, 'Универсал'),
(4, 'Купе')
)
car_type = models.IntegerField(verbose_name='Car Type', choices=CAR_TYPES)
user = models.ForeignKey(User, verbose_name='Пользователь', on_delete=models.CASCADE)
year = models.PositiveSmallIntegerField(verbose_name='Год', null=True)
volume = models.PositiveIntegerField(verbose_name='Объём', null=True)
|
from transformers import BertTokenizer,RobertaTokenizer, AutoTokenizer
import json
import torch
import os
import numpy as np
from formatter.Basic import BasicFormatter
import random
class HierarchyFormatter(BasicFormatter):
def __init__(self, config, mode, *args, **params):
super().__init__(config, mode, *args, **params)
self.max_len = config.getint("train", "max_len")
self.mode = mode
# self.tokenizer = BertTokenizer.from_pretrained('hfl/chinese-roberta-wwm-ext')
self.tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-legal-electra-large-discriminator")
labels = json.load(open(config.get('data', 'label2num'), 'r'))
self.label2id = {'NA': 0}
for l in labels:
if labels[l] >= 10:
self.label2id[l] = len(self.label2id)
self.label2id_2 = {'NA': 0}
self.label32id2 = {'NA': 0}
for l in self.label2id:
if l == 'NA':
continue
key = l.split('/')
key = key[0] + '/' + key[1]
if key not in self.label2id_2:
self.label2id_2[key] = len(self.label2id_2)
self.label32id2[l] = self.label2id_2[key]
self.prefix = self.tokenizer.convert_tokens_to_ids(['[CLS]'] * 10)
self.map = np.zeros((len(self.label2id), len(self.label2id_2)))
self.map[0,0] = 1
for l in self.label2id:
if l == 'NA':
continue
key = l.split('/')
key = key[0] + '/' + key[1]
self.map[self.label2id[l],self.label2id_2[key]] = 1
def process(self, data, config, mode, *args, **params):
inputx = []
mask = []
label = []
label_2 = []
for paras in data:
for para in paras:
if len(para['label']) == 0:
label.append(self.label2id['NA'])
label_2.append(self.label2id_2['NA'])
else:
label.append(self.label2id[random.choice(para['label'])])
label_2.append(self.label32id2[random.choice(para['label'])])
tokens = self.tokenizer.encode(para['para'], max_length=self.max_len - 11, add_special_tokens=False, truncation=True)
tokens = self.prefix + tokens + [self.tokenizer.sep_token_id]
mask.append([1] * len(tokens) + [0] * (self.max_len - len(tokens)))
tokens += [self.tokenizer.pad_token_id] * (self.max_len - len(tokens))
inputx.append(tokens)
gatt = np.zeros((len(inputx), self.max_len))
gatt[:, :10] = 1
return {
'input': torch.LongTensor(inputx),
'mask': torch.LongTensor(mask),
'label': torch.LongTensor(label),
'label2': torch.LongTensor(label_2),
'map': torch.FloatTensor(self.map),
'gAtt': torch.LongTensor(gatt),
}
|
#!/usr/bin/python
import datetime,redis,ConfigParser
import xml.etree.ElementTree as ET
import requests
from walrus import *
from requests.auth import HTTPBasicAuth
import argparse
db = redis.Redis('localhost')
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
Config = ConfigParser.ConfigParser()
Config.read("/opt/pi-disco/netdisco.conf")
srx_ip = ConfigSectionMap("SRX")['ip']
username = ConfigSectionMap("SRX")['webapi_username']
password = ConfigSectionMap("SRX")['webapi_password']
def logon(dev_username,ip,redis_mac_vendor,redis_device_model,redis_os,redis_os_version,redis_category,customtags):
timestamp = str(datetime.datetime.now().isoformat())
source = ET.Element("source")
source.text = "NetDisco Agent"
user.insert(0,source)
time = ET.Element("timestamp")
time.text = timestamp + "z"
user.insert(1,time)
operation = ET.Element("operation")
operation.text = "logon"
user.insert(2,operation)
address = ET.Element("IP")
address.text = str(ip)
user.insert(3,address)
domain = ET.Element("domain")
domain.text = "NetDisco"
user.insert(4,domain)
name = ET.Element("user")
name.text = str(dev_username)
user.insert(5,name)
posture = ET.Element("posture")
posture.text = "Healthy"
user.insert(6,posture)
#Add Device attributes into XML
hostname = ET.Element("value")
hostname.text = str(dev_username)
device.insert(0,hostname)
#Add role assignments into XML
#if role is not None:
# for role in args.role:
# ET.SubElement(roles, "role").text = role
#if groups is not None:
# for group in args.groups:
# ET.SubElement(device, "group").text = group
type = ET.Element("device-category")
type.text = str(redis_category)
attributes.insert(0,type)
vendor = ET.Element("device-vendor")
vendor.text = str(redis_mac_vendor)
attributes.insert(1,vendor)
model = ET.Element("device-model")
model.text = str(redis_device_model)
attributes.insert(2,model)
os = ET.Element("device-os")
os.text = str(redis_os)
attributes.insert(3,os)
version = ET.Element("device-os-version")
version.text = str(redis_os_version)
attributes.insert(4,version)
if customtags is not None:
for entry in customtags:
ET.SubElement(attributes, entry).text = customtags[entry]
return;
def logoff(dev_username,ip):
#Generate XML file for upload
timestamp = str(datetime.datetime.now().isoformat())
source = ET.Element("source")
source.text = "Aruba ClearPass"
user.insert(0,source)
time = ET.Element("timestamp")
time.text = timestamp + "z"
user.insert(1,time)
operation = ET.Element("operation")
operation.text = 'logoff'
user.insert(2,operation)
address = ET.Element("IP")
address.text = str(ip)
user.insert(3,address)
name = ET.Element("user")
name.text = dev_username
user.insert(4,name)
return;
def generatexml():
tree = ET.ElementTree(root)
xml = "<?xml version=\"1.0\"?>" + ET.tostring(root)
headers = {'Content-Type': 'application/xml'}
url = 'http://'+srx_ip+':8080/api/userfw/v1/post-entry'
#print xml
print requests.post(url, auth=HTTPBasicAuth(username,password), data=xml, headers=headers).text
#Redis subscribe
r = redis.StrictRedis()
pubsub = r.pubsub()
pubsub.psubscribe('__keyspace@0__:device_*')
print "Subscribing to local Redis database for device update information"
#Initialize a local Dictionary to track device and IP assignment if known
state_db = {}
for msg in pubsub.listen():
type = msg['data']
device_id = str(msg['channel'].split(':')[1])
if type == 'hset':
customtags = {}
root = ET.Element("userfw-entries")
user = ET.SubElement(root, "userfw-entry")
roles = ET.SubElement(user, "role-list")
attributes = ET.SubElement(user, "end-user-attribute")
device = ET.SubElement(attributes, "device-identity")
dev_username = device_id.split('_')[1]
type = msg['data']
ip = db.hget(device_id,'requested_addr')
redis_mac_vendor = db.hget(device_id,'mac_vendor')
redis_device_model = db.hget(device_id,'device_model')
redis_os = db.hget(device_id,'os')
redis_os_version = db.hget(device_id,'os_version')
redis_category = db.hget(device_id,'category')
customtags['Deny'] = db.hget(device_id,'deny')
customtags['MAC-Address'] = db.hget(device_id,'mac')
customtags['Switch-Serial'] = db.hget(device_id,'source_switch_serial')
customtags['Switch-Name'] = db.hget(device_id,'source_switch')
customtags['Switch-Interface'] = db.hget(device_id,'source_interface')
customtags['Switch-VLAN'] = db.hget(device_id,'source_vlan')
customtags['Is-Mobile'] = db.hget(device_id,'is_mobile')
customtags['Hostname'] = db.hget(device_id,'hostname')
customtags['Is-Mobile'] = db.hget(device_id,'is_mobile')
customtags['Options-List'] = db.hget(device_id,'options_list')
customtags['DHCP-Lease-Expire'] = db.hget(device_id,'lease_expire')
customtags['DHCP-Last-Request'] = db.hget(device_id,'last_dhcp_req')
customtags['DHCP-Vendor'] = db.hget(device_id,'vendor_class_id')
customtags['TTL'] = db.hget(device_id,'ttl')
customtags['TTL-OS-Guess'] = db.hget(device_id,'ttl_os_guess')
customtags['Status'] = db.hget(device_id,'status')
customtags['Browser'] = db.hget(device_id,'browser_family')
customtags['Redis_Key'] = device_id
state_db[dev_username] = ip
logon(dev_username,ip,redis_mac_vendor,redis_device_model,redis_os,redis_os_version,redis_category,customtags)
generatexml()
if type == 'expired':
root = ET.Element("userfw-entries")
user = ET.SubElement(root, "userfw-entry")
dev_username = device_id.split('_')[1]
print dev_username
print state_db
try:
ip = state_db[dev_username]
#print "Expired entry for:",ip
logoff(dev_username,ip)
generatexml()
del state_db[dev_username]
except KeyError:
pass
if type == 'del':
root = ET.Element("userfw-entries")
user = ET.SubElement(root, "userfw-entry")
dev_username = device_id.split('_')[1]
print state_db
try:
ip = state_db[dev_username]
#print "Deleting entry for:",ip
logoff(dev_username,ip)
generatexml()
del state_db[dev_username]
except KeyError:
pass
|
from distutils.core import setup, Extension
import glob
_DEBUG = True
_DEBUG_LEVEL = 1
_UICAL_LOG_LEVEL = 5
extra_compile_args = ["-std=c++11", "-Wall", "-Wextra", "-Wno-missing-field-initializers"]
if _DEBUG:
extra_compile_args += ["-g3", "-O0", "-DDEBUG=%s" % _DEBUG_LEVEL, "-UNDEBUG", "-DUICAL_LOG_LEVEL=%s" % _UICAL_LOG_LEVEL]
else:
extra_compile_args += ["-DNDEBUG", "-O3"]
# extra_compile_args += ["--coverage", "-lgcov"]
# extra_compile_args += ["-ftest-coverage", "-fprofile-arcs"]
uical = Extension(
'uICAL',
language = 'c++',
# define_macros = [('MAJOR_VERSION', '1'),
# ('MINOR_VERSION', '0')],
include_dirs = ['./src'],
# libraries = ['tcl83'],
# library_dirs = ['/usr/local/lib'],
sources = glob.glob('./python-module/*.cpp') + glob.glob('./src/*.cpp'),
extra_compile_args = extra_compile_args,
)
setup (
name = 'uICAL',
version = '0.0.1',
description = 'Light weight ICAL library',
author = 'Source Simian',
author_email = 'source.simian@gmail.com',
url = 'https://github.com/sourcesimian/uical',
long_description = '''
This is really just a demo package.
''',
ext_modules = [uical],
# test_require = [
# 'pytest',
# 'pytest-faulthandler',
# ],
)
|
import logging
from logging.handlers import TimedRotatingFileHandler
logFormatter = logging.Formatter("%(asctime)s - %(levelname)s :\t%(message)s")
timedHandler = TimedRotatingFileHandler(filename="./logs/stardust.log", when="m", interval=10, backupCount=0)
timedHandler.setFormatter(logFormatter)
stardustLogger = logging.getLogger("StardustLogger")
stardustLogger.addHandler(timedHandler)
stardustLogger.setLevel(logging.INFO)
try:
import leds.rgbLed as rgb
except:
stardustLogger.error("Error importing led script. Check file structure."
try:
import sensors.veml6075 as veml
except:
rgb.vemlError()
stardustLogger.error("Error importing veml6075.")
try:
import sensors.sam_m8q as sam
except:
rgb.samError()
stardustLogger.error("Error importing sam_m8q.")
try:
import sensors.scd_30 as scd
except:
rgb.scd30Error()
stardustLogger.error("Error importing scd_30.")
try:
import sensors.BMP388 as bmp
except:
rgb.bmpError()
stardustLogger.error("Error importing BMP388.")
import time
import sys
import os
def start():
try:
stardustLogger.debug("start(): Run all sensors once to remove bad reads.")
rgb.startup()
bmp.bmp388()
veml.uv()
try:
sam.gps()
except KeyError:
pass
scd.CO2()
except KeyboardInterrupt:
rgb.off()
raise KeyboardInterrupt
def main():
stardustLogger.debug("Begin startdust.py main")
start()
stardustLogger.debug("Begin mainloop")
while(True):
rgb.statusOk()
try:
stardustLogger.debug("bmp")
bmp.bmp388() #bmp to console
bmp.logPressure() #bmp log pressure
bmp.logTemperature() #bmp log temperature
time.sleep(1)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("bmp error")
rgb.bmpError()
rgb.statusOk()
try:
stardustLogger.debug("veml")
veml.uv() #veml uv to console
time.sleep(1)
veml.logUv() #veml log uv
time.sleep(1)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("veml error")
rgb.vemlError()
rgb.statusOk()
try:
stardustLogger.debug("sam")
sam.gps() #sam gps to console
time.sleep(1)
sam.logGps() #sam log gps
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("sam error")
rgb.samError()
rgb.statusOk()
try:
stardustLogger.debug("scd-30")
scd.CO2() #scd CO2 to console
scd.logCO2() #scd log CO2
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("scd-30 error")
rgb.scd30Error()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:#USE THIS BLOCK TO CLEANUP OPEN CONNECTIONS.
print("Detected Keyboard Interrupt")
rgb.off()
try:
print("trying sys.exit")
sys.exit(0)
except SystemExit:
print("doing os.exit")
logging.shutdown()
os._exit(0)
|
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import copy
import datetime as dt
import logging
import operator as op
from concurrent.futures import Future
from functools import reduce
from itertools import chain
from typing import Any, Iterable, Mapping, Optional, Tuple, Union
import pandas as pd
from gs_quant.base import InstrumentBase, Priceable, RiskKey, Sentinel
from gs_quant.risk import DataFrameWithInfo, ErrorValue, FloatWithInfo, RiskMeasure, SeriesWithInfo, aggregate_results
_logger = logging.getLogger(__name__)
def _value_for_date(result: Union[DataFrameWithInfo, SeriesWithInfo], date: dt.date) -> \
Union[DataFrameWithInfo, ErrorValue, FloatWithInfo]:
from gs_quant.markets import CloseMarket
raw_value = result.loc[date]
key = result.risk_key
risk_key = RiskKey(
key.provider,
date,
CloseMarket(date=date, location=key.market.location if isinstance(key.market, CloseMarket) else None),
key.params,
key.scenario,
key.risk_measure)
if isinstance(raw_value, ErrorValue):
return raw_value
elif isinstance(raw_value, DataFrameWithInfo):
return DataFrameWithInfo(
raw_value.raw_value.reset_index(drop=True),
risk_key,
unit=result.unit,
error=result.error)
else:
return FloatWithInfo(
risk_key,
raw_value,
unit=result.unit.get(date, '') if result.unit else None,
error=result.error)
class PricingFuture(Future):
__RESULT_SENTINEL = Sentinel('PricingFuture')
def __init__(self, result: Optional[Any] = __RESULT_SENTINEL):
super().__init__()
if result is not self.__RESULT_SENTINEL:
self.set_result(result)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
:param timeout: The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given timeout.
Exception: If the call raised then that exception will be raised.
"""
from gs_quant.markets import PricingContext
if not self.done() and PricingContext.current.active_context.is_entered:
raise RuntimeError('Cannot evaluate results under the same pricing context being used to produce them')
return super().result(timeout=timeout)
class CompositeResultFuture(PricingFuture):
def __init__(self, futures: Iterable[PricingFuture]):
super().__init__()
self.__futures = tuple(futures)
self.__pending = set()
for future in self.__futures:
if not future.done():
future.add_done_callback(self.__cb)
self.__pending.add(future)
if not self.__pending:
self._set_result()
def __getitem__(self, item):
return self.result()[item]
def __cb(self, future: PricingFuture):
self.__pending.discard(future)
if not self.__pending:
self._set_result()
def _set_result(self):
self.set_result([f.result() for f in self.__futures])
@property
def futures(self) -> Tuple[PricingFuture, ...]:
return self.__futures
def to_frame(self):
dates = self.dates
rm = self.risk_measures
def to_records(p: PortfolioRiskResult) -> list:
records = [to_records(res) if isinstance(res, PortfolioRiskResult) else res for res in p]
return records
def flatten_list(lst):
return [item for sublist in lst for item in sublist]
def multiple(p):
if len(p.portfolios) == 0:
return len(p.all_instruments) * len(rm) if dates else len(p.all_instruments)
if len(p.portfolios) > 0:
for i in p.portfolios:
return len(i) * multiple(i)
def pop_idx_labels(p, level, label_arr):
if len(p.portfolios) == 0:
for idx, r in enumerate(p.all_instruments):
r.name = f'{r.type.name}_{idx}' if r.name is None else r.name
if multi_risk_vector or (len(rm) > 1 and dates):
label_arr[0].extend([str(r) for r in rm] * len(p.all_instruments))
if len(label_arr) > 1:
label_arr[1].extend(flatten_list([[r.name] * len(rm) for r in p.all_instruments]))
elif risk_vector:
label_arr[0].extend(
flatten_list([r.name] * len(rm) for r in p.all_instruments))
else:
label_arr[0].extend([r.name for r in p.all_instruments])
if level > 1:
curr_level_arr = label_arr[level - 1]
for idx, r in enumerate(p.all_portfolios):
r.name = f'Portfolio_{idx}' if r.name is None else r.name
curr_level_arr.extend([r.name] * multiple(r))
pop_idx_labels(r, level - 1, label_arr)
return label_arr
record = to_records(self)
'''Check if risk object is a vector'''
risk_vector = False
multi_risk_vector = False
if len(rm) > 1:
multi_risk_vector = all(
[isinstance(record[idx][r], DataFrameWithInfo) for r in rm for idx, _ in enumerate(record)])
else:
risk_vector = any([isinstance(record[idx], DataFrameWithInfo) for idx, _ in enumerate(record)])
'''Populate index labels'''
port_depth = len(max(self.portfolio.all_paths, key=len))
port_depth = port_depth + 1 if ((dates and len(rm) > 1) or multi_risk_vector) else port_depth
idx_labels = pop_idx_labels(self.portfolio, port_depth, [[] for _ in range(port_depth)])
idx_labels.reverse()
if risk_vector or multi_risk_vector:
'''Handle results for risk vectors'''
combine_names = ['_'.join(list(name)) for name in list(zip(*idx_labels))]
dfs_list = [pd.DataFrame(rec) for rec in record] if risk_vector else [pd.DataFrame(rec[r]) for r in rm for
rec in record]
join_on = ['date', 'mkt_type', 'mkt_asset', 'mkt_class', 'mkt_point'] \
if isinstance(self.dates[0], dt.date) else ['mkt_type', 'mkt_asset', 'mkt_class', 'mkt_point']
df = reduce(lambda df1, df2: pd.merge(df1, df2, on=join_on, how='outer'), dfs_list)
cols = ['mkt_type', 'mkt_asset', 'mkt_class', 'mkt_point']
cols.extend(combine_names)
df.columns = cols
else:
'''Handle results for risk scalars'''
'''Case with risk values calculated over a range of dates'''
if dates:
df = pd.concat([pd.DataFrame(rec) for rec in record], axis=1)
'''Ensure dates are always the index'''
index_is_dts = [idx for idx in df.index] == [idx for idx in dates]
index_is_reversed_dts = [idx for idx in df.index] == [idx for idx in dates][::-1]
df = df.transpose() if not (index_is_dts or index_is_reversed_dts) else df
df.columns = pd.MultiIndex.from_tuples(list(zip(*idx_labels)))
else:
if len(self.portfolio.all_portfolios) == 0:
return pd.DataFrame(record, columns=rm, index=[p.name for p in self.portfolio])
else:
df = pd.DataFrame(record)
df.index = pd.MultiIndex.from_tuples(list(zip(*idx_labels)))
df.columns = rm
return df
class MultipleRiskMeasureResult(dict):
def __init__(self, instrument, dict_values: Iterable):
super().__init__(dict_values)
self.__instrument = instrument
def __getitem__(self, item):
if isinstance(item, dt.date):
if all(isinstance(v, (DataFrameWithInfo, SeriesWithInfo)) for v in self.values()):
return MultipleRiskMeasureResult(self.__instrument, ((k, _value_for_date(v, item))
for k, v in self.items()))
else:
raise ValueError('Can only index by date on historical results')
else:
return super().__getitem__(item)
def __mul__(self, other):
if isinstance(other, (int, float)):
return self.__op(op.mul, other)
else:
return ValueError('Can only multiply by an int or float')
def __add__(self, other):
if isinstance(other, (int, float)):
return self.__op(op.add, other)
elif isinstance(other, MultipleRiskMeasureResult):
if sorted(self.keys()) == sorted(other.keys()):
from gs_quant.markets.portfolio import Portfolio
return PortfolioRiskResult(
Portfolio((self.__instrument, other.__instrument)),
self.keys(),
tuple(MultipleRiskMeasureFuture(r.__instrument, dict((k, PricingFuture(v)) for k, v in r))
for r in (self, other))
)
elif set(self.keys()).isdisjoint(other.keys()) and self.__instrument == other.__instrument:
if set(self.keys()).intersection(other.keys()):
raise ValueError('Keys must be disjoint')
return MultipleRiskMeasureResult(self.__instrument, chain(self.items(), other.items()))
else:
raise ValueError('Can only add where risk_measures match or instrument identical &' +
'risk_measures disjoint')
else:
raise ValueError('Can only add instances of MultipleRiskMeasureResult or int, float')
def __op(self, operator, operand):
values = {}
for key, value in self.items():
if isinstance(value, pd.DataFrame):
new_value = value.copy()
new_value.value = operator(value.value, operand)
else:
new_value = operator(value, operand)
values[key] = new_value
return MultipleRiskMeasureResult(self.__instrument, values)
@property
def instrument(self):
return self.__instrument
@property
def dates(self) -> Tuple[dt.date, ...]:
dates = set()
for value in self.values():
if isinstance(value, (DataFrameWithInfo, SeriesWithInfo)):
dates.update(value.index)
return tuple(sorted(dates))
def to_frame(self):
lst = [self[r] for r in self]
if isinstance(lst[0], DataFrameWithInfo):
join_on = ['date', 'mkt_type', 'mkt_asset', 'mkt_class', 'mkt_point'] \
if isinstance(self.dates[0], dt.date) else ['mkt_type', 'mkt_asset', 'mkt_class', 'mkt_point']
return reduce(lambda df1, df2: pd.merge(df1, df2, on=join_on, how='outer'), lst)
else:
return pd.DataFrame(self)
class MultipleRiskMeasureFuture(CompositeResultFuture):
def __init__(self, instrument: InstrumentBase, measures_to_futures: Mapping[RiskMeasure, PricingFuture]):
self.__measures_to_futures = measures_to_futures
self.__instrument = instrument
super().__init__(measures_to_futures.values())
def _set_result(self):
self.set_result(MultipleRiskMeasureResult(self.__instrument,
zip(self.__measures_to_futures.keys(),
(f.result() for f in self.futures))))
@property
def measures_to_futures(self) -> Mapping[RiskMeasure, PricingFuture]:
return self.__measures_to_futures
class HistoricalPricingFuture(CompositeResultFuture):
def _set_result(self):
results = [f.result() for f in self.futures]
base = next((r for r in results if not isinstance(r, (ErrorValue, Exception))), None)
if base is None:
_logger.error(f'Historical pricing failed: {results[0]}')
self.set_result(results[0])
else:
result = MultipleRiskMeasureResult(base.instrument,
{k: base[k].compose(r[k] for r in results) for k in base.keys()}) \
if isinstance(base, MultipleRiskMeasureResult) else base.compose(results)
self.set_result(result)
class PortfolioPath:
def __init__(self, path):
self.__path = (path,) if isinstance(path, int) else path
def __repr__(self):
return repr(self.__path)
def __iter__(self):
return iter(self.__path)
def __len__(self):
return len(self.__path)
def __add__(self, other):
return PortfolioPath(self.__path + other.__path)
def __eq__(self, other):
return self.__path == other.__path
def __hash__(self):
return hash(self.__path)
def __call__(self, target, rename_to_parent: Optional[bool] = False):
parent = None
path = list(self.__path)
while path:
elem = path.pop(0)
parent = target if len(self) - len(path) > 1 else None
target = target.futures[elem] if isinstance(target, CompositeResultFuture) else target[elem]
if isinstance(target, PricingFuture) and path:
target = target.result()
if rename_to_parent and parent and getattr(parent, 'name', None):
target = copy.copy(target)
target.name = parent.name
return target
class PortfolioRiskResult(CompositeResultFuture):
def __init__(self,
portfolio,
risk_measures: Iterable[RiskMeasure],
futures: Iterable[PricingFuture]):
super().__init__(futures)
self.__portfolio = portfolio
self.__risk_measures = tuple(risk_measures)
def __getitem__(self, item):
futures = []
if isinstance(item, RiskMeasure) or (isinstance(item, list) and isinstance(item[0], RiskMeasure)):
'''Slicing a list of risk measures'''
if isinstance(item, list):
if any([it not in self.risk_measures for it in item]):
raise ValueError('{} not computed'.format(item))
else:
if item not in self.risk_measures:
raise ValueError('{} not computed'.format(item))
if len(self.risk_measures) == 1:
return self
elif isinstance(item, list):
return PortfolioRiskResult(self.__portfolio, tuple([it for it in item]), self.futures)
else:
return PortfolioRiskResult(self.__portfolio, (item,), self.futures)
# Inputs from excel always becomes a list
# Catch list length = 1 so that it doesn't return a sub-portfolioriskresult
elif isinstance(item, list) and len(item) == 1:
return self.__results(items=item[0])
elif isinstance(item, list) and all([isinstance(it, InstrumentBase) for it in item]):
'''Slicing a list of instruments'''
from gs_quant.markets.portfolio import Portfolio
portfolio = Portfolio(self.__portfolio[item])
for idx, result in enumerate(self):
instr = self.portfolio[idx]
futures.extend([PricingFuture(result) for it in item if instr == it])
return PortfolioRiskResult(portfolio, self.risk_measures, futures)
elif isinstance(item, dt.date):
for result in self:
if isinstance(result, (MultipleRiskMeasureResult, PortfolioRiskResult)):
futures.append(PricingFuture(result[item]))
elif isinstance(result, (DataFrameWithInfo, SeriesWithInfo)):
futures.append(PricingFuture(_value_for_date(result, item)))
else:
raise RuntimeError('Can only index by date on historical results')
return PortfolioRiskResult(self.__portfolio, self.risk_measures, futures)
else:
return self.__results(items=item)
def __contains__(self, item):
if isinstance(item, RiskMeasure):
return item in self.__risk_measures
elif isinstance(item, dt.date):
return item in self.dates
else:
return item in self.__portfolio
def __len__(self):
return len(self.futures)
def __iter__(self):
return iter(self.__results())
def __mul__(self, other):
if isinstance(other, (int, float)):
futures = [f + other if isinstance(f, PortfolioRiskResult) else PricingFuture(f.result() * other)
for f in self.futures]
return PortfolioRiskResult(self.__portfolio, self.__risk_measures, futures)
else:
return ValueError('Can only multiply by an int or float')
def __add__(self, other):
if isinstance(other, (int, float)):
futures = [f + other if isinstance(f, PortfolioRiskResult) else PricingFuture(f.result() + other)
for f in self.futures]
return PortfolioRiskResult(self.__portfolio, self.__risk_measures, futures)
elif isinstance(other, PortfolioRiskResult):
if sorted(self.__risk_measures) == sorted(other.__risk_measures):
return PortfolioRiskResult(
self.__portfolio + other.__portfolio,
self.__risk_measures,
self.futures + other.futures)
elif set(self.__risk_measures).isdisjoint(other.__risk_measures) and self.__portfolio == other.__portfolio:
futures = []
risk_measures = self.__risk_measures + other.__risk_measures
risk_measure = self.__risk_measures[0] if len(self.__risk_measures) == 1 else None
other_measure = other.__risk_measures[0] if len(other.__risk_measures) == 1 else None
for priceable, future, other_future in zip(self.__portfolio, self.futures, other.futures):
if isinstance(future, PortfolioRiskResult) and isinstance(other_future, PortfolioRiskResult):
futures.append(future + other_future)
else:
if risk_measure:
future = MultipleRiskMeasureFuture(priceable, {risk_measure: future})
if other_measure:
other_future = MultipleRiskMeasureFuture(priceable, {other_measure: other_future})
risk_measure_futures = [future.measures_to_futures.get(m) or other_future.measures_to_futures[m]
for m in risk_measures]
futures.append(MultipleRiskMeasureFuture(priceable,
dict(zip(risk_measures, risk_measure_futures))))
return PortfolioRiskResult(self.__portfolio, risk_measures, futures)
else:
raise ValueError('Can only add where risk_measures match or portfolios identical &' +
'risk_measures disjoint')
else:
raise ValueError('Can only add instances of PortfolioRiskResult or int, float')
@property
def portfolio(self):
return self.__portfolio
@property
def risk_measures(self) -> Tuple[RiskMeasure, ...]:
return self.__risk_measures
@property
def dates(self) -> Tuple[dt.date, ...]:
dates = set()
for result in self.__results():
if isinstance(result, (MultipleRiskMeasureResult, PortfolioRiskResult)):
dates.update(result.dates)
elif isinstance(result, (pd.DataFrame, pd.Series)):
dates.update(result.index)
try:
return tuple(sorted(dates))
except TypeError:
return tuple()
def result(self, timeout: Optional[int] = None):
super().result(timeout=timeout)
return self
def subset(self, items: Iterable[Union[int, str, PortfolioPath, Priceable]], name: Optional[str] = None):
paths = tuple(chain.from_iterable((i,) if isinstance(i, PortfolioPath) else self.__paths(i) for i in items))
sub_portfolio = self.__portfolio.subset(paths, name=name)
return PortfolioRiskResult(sub_portfolio, self.risk_measures, [p(self.futures) for p in paths])
def aggregate(self) -> Union[float, pd.DataFrame, pd.Series, MultipleRiskMeasureResult]:
if len(self.__risk_measures) > 1:
return MultipleRiskMeasureResult(self.portfolio, ((r, self[r].aggregate()) for r in self.__risk_measures))
else:
return aggregate_results(self.__results())
def __paths(self, items: Union[int, slice, str, Priceable]) -> Tuple[PortfolioPath, ...]:
if isinstance(items, int):
return PortfolioPath(items),
elif isinstance(items, slice):
return tuple(PortfolioPath(i) for i in range(len(self.__portfolio))[items])
elif isinstance(items, (str, Priceable)):
paths = self.__portfolio.paths(items)
if not paths and isinstance(items, InstrumentBase) and items.unresolved:
paths = self.__portfolio.paths(items.unresolved)
key = items.resolution_key.ex_measure
paths = tuple(p for p in paths if self.__result(p, self.risk_measures[0]).risk_key.ex_measure == key)
if not paths:
raise KeyError(f'{items} not in portfolio')
return paths
def __results(self, items: Optional[Union[int, slice, str, Priceable]] = None):
if items is None:
if len(self.__portfolio.all_portfolios) != len(self.futures):
'''Catches PortfolioRiskResult after slicing operation'''
return tuple(self.__result(p) for p in self.__portfolio.all_paths[:len(self.futures)])
else:
return tuple(self.__result(p) for p in self.__portfolio.all_paths)
paths = self.__paths(items)
return self.__result(paths[0]) if not isinstance(items, slice) else self.subset(paths)
def __result(self, path: PortfolioPath, risk_measure: Optional[RiskMeasure] = None):
res = path(self.futures).result()
if len(self.risk_measures) == 1 and not risk_measure:
risk_measure = self.risk_measures[0]
return res[risk_measure] \
if risk_measure and isinstance(res, (MultipleRiskMeasureResult, PortfolioRiskResult)) else res
|
# The API requires specific book codes, so I have to be able to translate all of them. key -> value is the best for this.
def books_dict():
return dict([("GENESIS", "GEN"),
("EXODUS", "EXO"),
("LEVITICUS", "LEV"),
("NUMBERS", "NUM"),
("DEUTERONOMY", "DEU"),
("JOSHUA", "JOS"),
("JUDGES", "JDG"),
("RUTH", "RUT"),
("1 SAMUEL", "1SA"),
("2 SAMUEL", "2SA"),
("1ST SAMUEL", "1SA"),
("2ND SAMUEL", "2SA"),
("1ST KINGS", "1KI"),
("2ND KINGS", "2KI"),
("1 KINGS", "1KI"),
("2 KINGS", "2KI"),
("1ST CHRONICLES", "1CH"),
("2ND CHRONICLES", "2CH"),
("1 CHRONICLES", "1CH"),
("2 CHRONICLES", "2CH"),
("EZRA", "EZR"),
("NEHEMIAH", "NEH"),
("ESTHER", "EST"),
("JOB", "JOB"),
("PSALMS", "PSA"),
("PROVERBS", "PRO"),
("ECCLESIASTES", "ECC"),
("SONG OF SOLOMON", "SNG"),
("ISAIAH", "ISA"),
("JEREMIAH", "JER"),
("LAMENTATIONS", "LAM"),
("EZEKIEL", "EZK"),
("DANIEL", "DAN"),
("HOSEA", "HOS"),
("JOEL", "JOL"),
("AMOS", "AMO"),
("OBADIAH", "OBA"),
("JONAH", "JON"),
("MICAH", "MIC"),
("NAHUM", "NAM"),
("HABAKKUK", "HAB"),
("ZEPHANIAH", "ZEP"),
("HAGGAI", "HAG"),
("ZECHARIAH", "ZEC"),
("MALACHI", "MAL"),
("TOBIT", "TOB"),
("JUDITH", "JDT"),
("ESTHER GREEK", "ESG"),
("WISDOM OF SOLOMON", "WIS"),
("SIRACH", "SIR"),
("BARUCH", "BAR"),
("DANIEL GREEK", "DAG"),
("1ST MACCABEES", "1MA"),
("2ND MACCABEES", "2MA"),
("1ST ESDRAS", "1ES"),
("2ND ESDRAS", "2ES"),
("1 MACCABEES", "1MA"),
("2 MACCABEES", "2MA"),
("1 ESDRAS", "1ES"),
("2 ESDRAS", "2ES"),
("3RD MACCABEES", "3MA"),
("4TH MACCABEES", "4MA"),
("PRAYER OF MANASSES", "MAN"),
("3 MACCABEES", "3MA"),
("4 MACCABEES", "4MA"),
("PSALM 151", "PS2"),
("MATTHEW", "MAT"),
("MARK", "MRK"),
("LUKE", "LUK"),
("JOHN", "JHN"),
("ACTS", "ACT"),
("ROMANS", "ROM"),
("1ST CORINTHIANS", "1CO"),
("2ND CORINTHIANS", "2CO"),
("1 CORINTHIANS", "1CO"),
("2 CORINTHIANS", "2CO"),
("GALATIANS", "GAL"),
("EPHESIANS", "EPH"),
("PHILIPPIANS", "PHP"),
("COLOSSIANS", "COL"),
("1ST THESSALONIANS", "1TH"),
("2ND THESSALONIANS", "2TH"),
("1ST TIMOTHY", "1TI"),
("2ND TIMOTHY", "2TI"),
("1 THESSALONIANS", "1TH"),
("2 THESSALONIANS", "2TH"),
("1 TIMOTHY", "1TI"),
("2 TIMOTHY", "2TI"),
("TITUS", "TIT"),
("PHILEMON", "PHM"),
("HEBREWS", "HEB"),
("JAMES", "JAS"),
("1ST PETER", "1PE"),
("2ND PETER", "2PE"),
("1ST JOHN", "1JN"),
("2ND JOHN", "2JN"),
("3RD JOHN", "3JN"),
("1 PETER", "1PE"),
("2 PETER", "2PE"),
("1 JOHN", "1JN"),
("2 JOHN", "2JN"),
("3 JOHN", "3JN"),
("JUDE", "JUD"),
("REVELATION", "REV")])
|
"""Minify HTML
A quick hack at the moment.
"""
import sys
import argparse
from parser import parseHTML, ParseException
def doMin(fileName):
body = parseHTML(fileName)
sys.stdout.write(body.toStr({'discardComments':True, 'deploy':True}))
if __name__ == "__main__":
cmdParser = argparse.ArgumentParser(description='Minify HTML')
cmdParser.add_argument('files', nargs=1, help='input file')
args = cmdParser.parse_args()
try:
doMin(args.files[0])
except ParseException as er:
sys.stderr.write(str(er) + '\n')
|
from shutil import copyfile
import numpy as np
from navec import Navec
path = 'ru/navec_hudlit_v1_12B_500K_300d_100q.tar'
navec = Navec.load(path)
words = []
embeddings = []
for word, id in navec.vocab.word_ids.items():
if word == '<unk>':
word = '*UNK*'
words.append(word)
embeddings.append(navec.pq[id])
embeddings = np.array(embeddings).astype(np.float)
np.save('ru/static_word_embeddings.npy', embeddings)
with open('ru/static_word_vocabulary.txt', 'w') as f:
for word in words:
f.write("%s\n" % word)
copyfile('ru/static_word_embeddings.npy', 'ru/tuned_word_embeddings.npy')
copyfile('ru/static_word_vocabulary.txt', 'ru/tuned_word_vocabulary.txt')
|
"""This example is a loose HyperparameterHunter adaptation of the SKLearn example on the
"Effect of transforming the targets in regression models"
(https://scikit-learn.org/stable/auto_examples/compose/plot_transformed_target.html#real-world-data-set).
Specifically, we'll be looking at the section using the Boston Housing regression dataset, adapting
the target transformations therein to be used with HyperparameterHunter"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter import Environment, CVExperiment, FeatureEngineer
from hyperparameter_hunter import DummyOptPro, Categorical
from hyperparameter_hunter.utils.learning_utils import get_boston_data
##################################################
# Import Miscellaneous Assets
##################################################
import numpy as np
##################################################
# Import Learning Assets
##################################################
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import QuantileTransformer, StandardScaler
# noinspection PyUnusedLocal
def get_holdout_data(train, target_column):
train_data, holdout_data = train_test_split(train, random_state=1)
return train_data, holdout_data
##################################################
# Feature Engineering Steps
##################################################
def quantile_transform(train_targets, non_train_targets):
transformer = QuantileTransformer(output_distribution="normal", n_quantiles=100)
train_targets[train_targets.columns] = transformer.fit_transform(train_targets.values)
non_train_targets[train_targets.columns] = transformer.transform(non_train_targets.values)
return train_targets, non_train_targets, transformer
def log_transform(all_targets):
all_targets = np.log1p(all_targets)
return all_targets, np.expm1
def standard_scale(train_inputs, non_train_inputs):
scaler = StandardScaler()
train_inputs[train_inputs.columns] = scaler.fit_transform(train_inputs.values)
non_train_inputs[train_inputs.columns] = scaler.transform(non_train_inputs.values)
return train_inputs, non_train_inputs
def standard_scale_BAD(all_inputs):
"""If you wanted to standard-scale, by fitting on your entire dataset, rather than only on your
train dataset (which is not recommended), this is how you could do it"""
scaler = StandardScaler()
all_inputs[all_inputs.columns] = scaler.fit_transform(all_inputs.values)
return all_inputs
def square_sum_feature(all_inputs):
all_inputs["square_sum"] = all_inputs.agg(
lambda row: np.sqrt(np.sum([np.square(_) for _ in row])), axis="columns"
)
return all_inputs
##################################################
# Execute
##################################################
def execute():
#################### Environment ####################
env = Environment(
train_dataset=get_boston_data(),
results_path="HyperparameterHunterAssets",
holdout_dataset=get_holdout_data,
target_column="DIS",
metrics=["r2_score", "median_absolute_error"],
cv_type="KFold",
cv_params=dict(n_splits=10, random_state=1),
)
#################### CVExperiment ####################
exp_0 = CVExperiment(
model_initializer=Ridge,
model_init_params=dict(),
feature_engineer=FeatureEngineer([quantile_transform]),
)
#################### Optimization ####################
# `opt_0` recognizes `exp_0`'s `feature_engineer` and its results as valid learning material
# This is because `opt_0` marks the engineer step functions omitted by `exp_0` as `optional=True`
opt_0 = DummyOptPro(iterations=10)
opt_0.forge_experiment(
model_initializer=Ridge,
model_init_params=dict(),
feature_engineer=FeatureEngineer(
[
Categorical([quantile_transform, log_transform], optional=True),
Categorical([standard_scale, standard_scale_BAD], optional=True),
Categorical([square_sum_feature], optional=True),
]
),
)
opt_0.go()
if __name__ == "__main__":
execute()
|
from importlib.abc import Loader, MetaPathFinder
from importlib.util import spec_from_loader
from io import BytesIO
import os
import random
import sys
import webbrowser
import fabulous.image
import fabulous.utils
import requests
from dotenv import load_dotenv
load_dotenv()
EDAMAM_ENDPOINT = "https://api.edamam.com/search"
# Hack: Fabulous has a Python 2/3 bug that keeps a width parameter from
# working. As a workaround, make Python 2's basestring work in Python 3.
fabulous.image.basestring = str
def fetch_recipe(keywords):
params = {
'q': ' '.join(keywords),
'app_id': os.getenv('EDAMAM_APP_ID'),
'app_key': os.getenv('EDAMAM_APP_KEY'),
}
r = requests.get(EDAMAM_ENDPOINT, params=params)
results = r.json()
if not results['hits']:
return None
return random.choice(results['hits'])['recipe']
def print_recipe_text(recipe):
print(recipe['label'])
print(' (' + recipe['url'] + ')')
for ingredient in recipe['ingredientLines']:
print(' - ' + ingredient)
def print_recipe_image(recipe):
image_url = recipe['image']
r = requests.get(image_url)
print(fabulous.image.Image(BytesIO(r.content), width=fabulous.utils.term.width // 3))
def dict_to_attributes(dictionary, obj):
for k, v in dictionary.items():
setattr(obj, k, v)
# Code for meta finder and loader based on https://stackoverflow.com/a/43573798/25507
class YummyMetaFinder(MetaPathFinder):
BLACKLIST = ['jedi.parser', 'nt']
def find_spec(self, fullname, path, target=None):
if fullname in self.BLACKLIST:
return None
if path is None or path == "":
path = [os.getcwd()] # top level import
return spec_from_loader(fullname, loader=YummyLoader(fullname))
class YummyLoader(Loader):
def __init__(self, filename):
self.filename = filename
def create_module(self, spec):
return None # use default module creation semantics
def exec_module(self, module):
keywords = self.filename.split('_')
print("yum, %s..." % ' '.join(keywords))
recipe = fetch_recipe(keywords)
if not recipe:
return
print_recipe_text(recipe)
print_recipe_image(recipe)
dict_to_attributes(recipe, module)
module.open = lambda: webbrowser.open(recipe['url'])
def install():
"""Inserts the finder into the import machinery"""
sys.meta_path.append(YummyMetaFinder())
install()
|
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Scale(nn.Module):
def __init__(self, init_value=1e-3):
super(Scale, self).__init__()
self.scale = Parameter(torch.FloatTensor([init_value]))
def forward(self, x):
return x * self.scale
class AWRU(nn.Module):
def __init__(self, nf, kernel_size, wn, act=nn.ReLU(True)):
super(AWRU, self).__init__()
self.res_scale = Scale(1)
self.x_scale = Scale(1)
self.body = nn.Sequential(
wn(nn.Conv2d(nf, nf, kernel_size, padding=kernel_size//2)),
act,
wn(nn.Conv2d(nf, nf, kernel_size, padding=kernel_size//2)),
)
def forward(self, x):
res = self.res_scale(self.body(x)) + self.x_scale(x)
return res
class AWMS(nn.Module):
def __init__(self, nf, out_chl, wn, act=nn.ReLU(True)):
super(AWMS, self).__init__()
self.tail_k3 = wn(nn.Conv2d(nf, nf, 3, padding=3//2, dilation=1))
self.tail_k5 = wn(nn.Conv2d(nf, nf, 5, padding=5//2, dilation=1))
self.scale_k3 = Scale(0.5)
self.scale_k5 = Scale(0.5)
self.fuse = wn(nn.Conv2d(nf, nf, 3, padding=3 // 2))
self.act = act
self.w_conv = wn(nn.Conv2d(nf, out_chl, 3, padding=3//2))
def forward(self, x):
x0 = self.scale_k3(self.tail_k3(x))
x1 = self.scale_k5(self.tail_k5(x))
cur_x = x0 + x1
fuse_x = self.act(self.fuse(cur_x))
out = self.w_conv(fuse_x)
return out
class LFB(nn.Module):
def __init__(self, nf, wn, act=nn.ReLU(inplace=True)):
super(LFB, self).__init__()
self.b0 = AWRU(nf, 3, wn=wn, act=act)
self.b1 = AWRU(nf, 3, wn=wn, act=act)
self.b2 = AWRU(nf, 3, wn=wn, act=act)
self.b3 = AWRU(nf, 3, wn=wn, act=act)
self.reduction = wn(nn.Conv2d(nf * 4, nf, 3, padding=3//2))
self.res_scale = Scale(1)
self.x_scale = Scale(1)
def forward(self, x):
x0 = self.b0(x)
x1 = self.b1(x0)
x2 = self.b2(x1)
x3 = self.b3(x2)
res = self.reduction(torch.cat([x0, x1, x2, x3], dim=1))
return self.res_scale(res) + self.x_scale(x)
class WeightNet(nn.Module):
def __init__(self, config):
super(WeightNet, self).__init__()
in_chl = config.IN_CHANNEL
nf = config.N_CHANNEL
n_block = config.RES_BLOCK
out_chl = config.N_WEIGHT
scale = config.SCALE
act = nn.ReLU(inplace=True)
wn = lambda x: nn.utils.weight_norm(x)
rgb_mean = torch.FloatTensor([0.4488, 0.4371, 0.4040]).view([1, 3, 1, 1])
self.register_buffer('rgb_mean', rgb_mean)
self.head = nn.Sequential(
wn(nn.Conv2d(in_chl, nf, 3, padding=3//2)),
act,
)
body = []
for i in range(n_block):
body.append(LFB(nf, wn=wn, act=act))
self.body = nn.Sequential(*body)
self.up = nn.Sequential(
wn(nn.Conv2d(nf, nf * scale ** 2, 3, padding=3//2)),
act,
nn.PixelShuffle(upscale_factor=scale)
)
self.tail = AWMS(nf, out_chl, wn, act=act)
def forward(self, x):
x = x - self.rgb_mean
x = self.head(x)
x = self.body(x)
x = self.up(x)
out = self.tail(x)
return out
if __name__ == '__main__':
from easydict import EasyDict as edict
config = edict()
config.IN_CHANNEL = 3
config.N_CHANNEL = 32
config.RES_BLOCK = 4
config.N_WEIGHT = 72
config.SCALE = 2
net = WeightNet(config).cuda()
cnt = 0
for p in net.parameters():
cnt += p.numel()
print(cnt)
x = torch.randn(1, 3, 32, 32).cuda()
out = net(x)
print(out.size())
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.homescreen.app import Homescreen
class TestRocketBarAddCollectionSaveBookmark(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.apps.set_permission('Smart Collections', 'geolocation', 'deny')
self.connect_to_local_area_network()
def test_rocketbar_add_collection(self):
homescreen = Homescreen(self.marionette)
self.apps.switch_to_displayed_app()
contextmenu = homescreen.open_context_menu()
collection_activity = contextmenu.tap_add_collection()
collection_list = collection_activity.collection_name_list
# Choose the second option to avoid 'Custom'
collection = collection_list[1]
collection_activity.select(collection)
self.wait_for_condition(lambda m: self.apps.displayed_app.name == homescreen.name)
self.apps.switch_to_displayed_app()
self.assertTrue(homescreen.is_app_installed(collection),
"Collection '%s' not found on Homescreen" % collection)
collection = homescreen.tap_collection(collection)
app = collection.applications[0]
app_name = app.name
app.long_tap_to_install()
add_link = app.tap_save_to_home_screen()
add_link.tap_add_bookmark_to_home_screen_dialog_button()
# Switch to Home Screen to look for app
self.device.touch_home_button()
self.assertTrue(homescreen.is_app_installed(app_name),
'The app %s was not found to be installed on the home screen.' % app_name)
|
import base64
import io
import os
import urllib.error
import urllib.parse
import urllib.request
import uuid
import PIL
from PIL import Image
from flask import current_app
from flask import current_app as app
from sqlalchemy.orm.exc import NoResultFound
from xhtml2pdf import pisa
from app import get_settings
from app.api.helpers.storage import UploadedFile, upload, generate_hash, UPLOAD_PATHS
from app.models.image_size import ImageSizes
def get_file_name():
return str(uuid.uuid4())
def uploaded_image(extension='.png', file_content=None):
filename = get_file_name() + extension
filedir = current_app.config.get('BASE_DIR') + '/static/uploads/'
if not os.path.isdir(filedir):
os.makedirs(filedir)
file_path = filedir + filename
file = open(file_path, "wb")
file.write(base64.b64decode(file_content.split(",")[1]))
file.close()
return UploadedFile(file_path, filename)
def uploaded_file(files, multiple=False):
if multiple:
files_uploaded = []
for file in files:
extension = file.filename.split('.')[1]
filename = get_file_name() + '.' + extension
filedir = current_app.config.get('BASE_DIR') + '/static/uploads/'
if not os.path.isdir(filedir):
os.makedirs(filedir)
file_path = filedir + filename
file.save(file_path)
files_uploaded.append(UploadedFile(file_path, filename))
else:
extension = files.filename.split('.')[1]
filename = get_file_name() + '.' + extension
filedir = current_app.config.get('BASE_DIR') + '/static/uploads/'
if not os.path.isdir(filedir):
os.makedirs(filedir)
file_path = filedir + filename
files.save(file_path)
files_uploaded = UploadedFile(file_path, filename)
return files_uploaded
def create_save_resized_image(image_file, basewidth=None, maintain_aspect=None, height_size=None, upload_path=None,
ext='jpg', remove_after_upload=False, resize=True):
"""
Create and Save the resized version of the background image
:param resize:
:param upload_path:
:param ext:
:param remove_after_upload:
:param height_size:
:param maintain_aspect:
:param basewidth:
:param image_file:
:return:
"""
if not image_file:
return None
filename = '{filename}.{ext}'.format(filename=get_file_name(), ext=ext)
data = urllib.request.urlopen(image_file).read()
image_file = io.BytesIO(data)
try:
im = Image.open(image_file)
except IOError:
raise IOError("Corrupt/Invalid Image")
# Convert to jpeg for lower file size.
if im.format != 'JPEG':
img = im.convert('RGB')
else:
img = im
if resize:
if maintain_aspect:
width_percent = (basewidth / float(img.size[0]))
height_size = int((float(img.size[1]) * float(width_percent)))
img = img.resize((basewidth, height_size), PIL.Image.ANTIALIAS)
temp_file_relative_path = 'static/media/temp/' + generate_hash(str(image_file)) + get_file_name() + '.jpg'
temp_file_path = app.config['BASE_DIR'] + '/' + temp_file_relative_path
dir_path = temp_file_path.rsplit('/', 1)[0]
# create dirs if not present
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
img.save(temp_file_path)
upfile = UploadedFile(file_path=temp_file_path, filename=filename)
if remove_after_upload:
# os.remove(image_file) No point in removing in memory file
pass
uploaded_url = upload(upfile, upload_path)
os.remove(temp_file_path)
return uploaded_url
def create_save_image_sizes(image_file, image_sizes_type, unique_identifier=None):
"""
Save the resized version of the background image
:param unique_identifier:
:param image_sizes_type:
:param image_file:
:return:
"""
try:
image_sizes = ImageSizes.query.filter_by(type=image_sizes_type).one()
except NoResultFound:
image_sizes = ImageSizes(image_sizes_type, 1300, 500, True, 100, 75, 30, True, 100, 500, 200, True, 100)
# Get an unique identifier from uuid if not provided
if unique_identifier is None:
unique_identifier = get_file_name()
if image_sizes_type == 'speaker-image':
thumbnail_aspect = icon_aspect = small_aspect = True
thumbnail_basewidth = thumbnail_height_size = image_sizes.thumbnail_size_width_height
icon_basewidth = icon_height_size = image_sizes.icon_size_width_height
small_basewidth = small_height_size = image_sizes.small_size_width_height
original_upload_path = UPLOAD_PATHS['user']['original'].format(
identifier=unique_identifier)
small_upload_path = UPLOAD_PATHS['user']['small'].format(
identifier=unique_identifier)
thumbnail_upload_path = UPLOAD_PATHS['user']['thumbnail'].format(
identifier=unique_identifier)
icon_upload_path = UPLOAD_PATHS['user']['icon'].format(
identifier=unique_identifier)
new_images = {
'original_image_url': create_save_resized_image(image_file, 0, 0, 0, original_upload_path, resize=False),
'small_image_url': create_save_resized_image(image_file, small_basewidth, small_aspect, small_height_size,
small_upload_path),
'thumbnail_image_url': create_save_resized_image(image_file, thumbnail_basewidth, thumbnail_aspect,
thumbnail_height_size, thumbnail_upload_path),
'icon_image_url': create_save_resized_image(image_file, icon_basewidth, icon_aspect, icon_height_size,
icon_upload_path)
}
else:
large_aspect = image_sizes.full_aspect if image_sizes.full_aspect else False
large_basewidth = image_sizes.full_width if image_sizes.full_width else 1300
large_height_size = image_sizes.full_height if image_sizes.full_width else 500
thumbnail_aspect = image_sizes.thumbnail_aspect if image_sizes.full_aspect else False
thumbnail_basewidth = image_sizes.thumbnail_width if image_sizes.thumbnail_width else 500
thumbnail_height_size = image_sizes.thumbnail_height if image_sizes.thumbnail_height else 200
icon_aspect = image_sizes.icon_aspect if image_sizes.icon_aspect else False
icon_basewidth = image_sizes.icon_width if image_sizes.icon_width else 75
icon_height_size = image_sizes.icon_height if image_sizes.icon_height else 30
original_upload_path = UPLOAD_PATHS['event']['original'].format(
identifier=unique_identifier)
large_upload_path = UPLOAD_PATHS['event']['large'].format(
identifier=unique_identifier)
thumbnail_upload_path = UPLOAD_PATHS['event']['thumbnail'].format(
identifier=unique_identifier)
icon_upload_path = UPLOAD_PATHS['event']['icon'].format(
identifier=unique_identifier)
new_images = {
'original_image_url': create_save_resized_image(image_file, 0, 0, 0, original_upload_path, resize=False),
'large_image_url': create_save_resized_image(image_file, large_basewidth, large_aspect, large_height_size,
large_upload_path),
'thumbnail_image_url': create_save_resized_image(image_file, thumbnail_basewidth, thumbnail_aspect,
thumbnail_height_size, thumbnail_upload_path),
'icon_image_url': create_save_resized_image(image_file, icon_basewidth, icon_aspect, icon_height_size,
icon_upload_path)
}
return new_images
def create_system_image(image_file=None, upload_path=None, unique_identifier=None,
ext='jpg'):
"""
Create System Images for Event Topics
:param upload_path:
:param ext:
:param remove_after_upload:
:param image_file:
:return:
"""
# Get an unique identifier from uuid if not provided
filename = '{filename}.{ext}'.format(filename=get_file_name(), ext=ext)
if image_file:
with urllib.request.urlopen(image_file) as img_data:
image_file = io.BytesIO(img_data.read())
else:
file_relative_path = 'static/default_system_image.png'
image_file = app.config['BASE_DIR'] + '/' + file_relative_path
try:
im = Image.open(image_file)
except IOError:
raise IOError("Corrupt/Invalid Image")
# Convert to jpeg for lower file size.
if im.format != 'JPEG':
img = im.convert('RGB')
else:
img = im
temp_file_relative_path = 'static/media/temp/' + generate_hash(str(image_file)) + get_file_name() + '.jpg'
temp_file_path = app.config['BASE_DIR'] + '/' + temp_file_relative_path
dir_path = temp_file_path.rsplit('/', 1)[0]
# create dirs if not present
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
img.save(temp_file_path)
upfile = UploadedFile(file_path=temp_file_path, filename=filename)
if not upload_path:
upload_path = UPLOAD_PATHS['event_topic']['system_image'].format(event_topic_id=unique_identifier)
uploaded_url = upload(upfile, upload_path)
os.remove(temp_file_path)
image = {'system_image_url': uploaded_url}
return image
def make_frontend_url(path, parameters=None):
"""
Create URL for frontend
"""
settings = get_settings()
frontend_url = urllib.parse.urlparse(settings.get('frontend_url') or '')
full_path = '/'.join(x.strip('/') for x in (frontend_url.path, str(path)) if x)
return urllib.parse.urlunparse((
frontend_url.scheme,
frontend_url.netloc,
full_path,
'',
str(urllib.parse.urlencode(parameters) if parameters else ''),
''
))
def create_save_pdf(pdf_data, key, dir_path='/static/uploads/pdf/temp/', identifier=get_file_name(), upload_dir='static/media/'):
"""
Create and Saves PDFs from html
:param pdf_data:
:return:
"""
filedir = current_app.config.get('BASE_DIR') + dir_path
if not os.path.isdir(filedir):
os.makedirs(filedir)
filename = identifier + '.pdf'
dest = filedir + filename
file = open(dest, "wb")
pisa.CreatePDF(io.BytesIO(pdf_data.encode('utf-8')), file)
file.close()
uploaded_file = UploadedFile(dest, filename)
upload_path = key.format(identifier=identifier)
new_file = upload(uploaded_file, upload_path, upload_dir=upload_dir)
# Removing old file created
os.remove(dest)
return new_file
|
from categories.base import CategoryBaseAdminForm
from categories.models import Category
class CategoryAdminForm(CategoryBaseAdminForm):
class Meta:
model = Category
def clean_alternate_title(self):
if self.instance is None or not self.cleaned_data["alternate_title"]:
return self.cleaned_data["name"]
else:
return self.cleaned_data["alternate_title"]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""manage_vms tests."""
from builtins import object
from builtins import range
from builtins import str
import copy
import functools
import mock
import unittest
from google.cloud import ndb
import six
from datastore import data_types
from google_cloud_utils import compute_engine_projects
from handlers.cron import manage_vms
from handlers.cron.helpers import bot_manager
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
INSTANCE_GROUPS = {
'oss-fuzz-linux-zone2-pre-proj2': {
'targetSize': 1,
},
'oss-fuzz-linux-zone2-pre-proj3': {
'targetSize': 499,
},
'oss-fuzz-linux-zone2-pre-proj4': {
'targetSize': 99,
},
'oss-fuzz-linux-zone2-pre-proj5': {
'targetSize': 99,
}
}
INSTANCE_TEMPLATES = {
'oss-fuzz-linux-zone2-pre-proj2': {
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
'oss-fuzz-linux-zone2-pre-proj3': {
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
'oss-fuzz-linux-zone2-pre-proj4': {
'description': '{"version": 0}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
'oss-fuzz-linux-zone2-pre-proj5': {
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
}
}
INSTANCES = {
'oss-fuzz-linux-zone3-host': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-host-abcd',
}, {
'instance': 'https://blah/oss-fuzz-linux-zone3-host-efgh',
}],
'oss-fuzz-linux-zone3-worker-proj1': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj1-%04d' % i
} for i in range(1, 2)],
'oss-fuzz-linux-zone3-worker-proj2': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj2-%04d' % i
} for i in range(1, 5)],
'oss-fuzz-linux-zone3-worker-proj3': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj3-%04d' % i
} for i in range(1, 10)],
'oss-fuzz-linux-zone3-worker-proj4': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj4-%04d' % i
} for i in range(1, 2)],
'oss-fuzz-linux-zone3-worker-proj5': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj5-%04d' % i
} for i in range(1, 2)],
'oss-fuzz-linux-zone3-host-high-end': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-host-high-end-1'
}],
'oss-fuzz-linux-zone3-worker-high-end-proj6': [{
'instance': ('https://blah/'
'oss-fuzz-linux-zone3-worker-high-end-proj6-%04d' % i)
} for i in range(1, 3)],
}
def mock_resource(spec):
"""Mock resource."""
resource = mock.Mock(spec=spec)
resource.created = False
resource.body = None
def create(*args, **kwargs): # pylint: disable=unused-argument
if resource.created:
raise bot_manager.AlreadyExistsError
resource.created = True
def get():
if resource.created:
return resource.body
raise bot_manager.NotFoundError
def exists():
return resource.created
def delete():
if not resource.created:
raise bot_manager.NotFoundError
resource.created = False
resource.create.side_effect = create
resource.get.side_effect = get
resource.exists.side_effect = exists
resource.delete.side_effect = delete
return resource
class MockBotManager(object):
"""Mock BotManager."""
def __init__(self, project_id, zone, instance_groups, instance_templates):
self.project_id = project_id
self.zone = zone
self.instance_groups = instance_groups
self.instance_templates = instance_templates
def _get_resource(self, name, cache, values, spec):
"""Get resource."""
if name in cache:
return cache[name]
resource = mock_resource(spec=spec)
if name in values:
resource.created = True
resource.body = values[name]
cache[name] = resource
return resource
def instance_group(self, name):
"""Get an InstanceGroup resource with the given name."""
resource = self._get_resource(name, self.instance_groups, INSTANCE_GROUPS,
bot_manager.InstanceGroup)
if name in INSTANCES:
resource.list_managed_instances.return_value = INSTANCES[name]
return resource
def instance_template(self, name):
"""Get an InstanceTemplate resource with the given name."""
return self._get_resource(name, self.instance_templates, INSTANCE_TEMPLATES,
bot_manager.InstanceTemplate)
def expected_instance_template(gce_project_name,
name,
project_name,
disk_size_gb=None,
service_account=None,
tls_cert=False):
"""Get the expected instance template for a project."""
gce_project = compute_engine_projects.load_project(gce_project_name)
expected = copy.deepcopy(gce_project.get_instance_template(name))
expected['properties']['metadata']['items'].append({
'key': 'task-tag',
'value': project_name,
})
if disk_size_gb:
disk = expected['properties']['disks'][0]
disk['initializeParams']['diskSizeGb'] = disk_size_gb
if service_account:
expected['properties']['serviceAccounts'][0]['email'] = service_account
if tls_cert:
expected['properties']['metadata']['items'].extend([{
'key': 'tls-cert',
'value': project_name + '_cert',
}, {
'key': 'tls-key',
'value': project_name + '_key',
}])
return expected
def expected_host_instance_template(gce_project_name, name):
"""Get the expected instance template for a project."""
gce_project = compute_engine_projects.load_project(gce_project_name)
return copy.deepcopy(gce_project.get_instance_template(name))
@test_utils.with_cloud_emulators('datastore')
class CronTest(unittest.TestCase):
"""Test manage_vms cron."""
def setUp(self):
test_helpers.patch_environ(self)
test_helpers.patch(self, [
'base.utils.is_oss_fuzz',
'handlers.cron.helpers.bot_manager.BotManager',
'system.environment.is_running_on_app_engine',
'google_cloud_utils.compute_engine_projects.load_project',
])
self.mock.is_oss_fuzz.return_value = True
self.mock.is_running_on_app_engine.return_value = True
self.mock.load_project.return_value = compute_engine_projects.Project(
project_id='clusterfuzz-external',
clusters=[
compute_engine_projects.Cluster(
name='oss-fuzz-linux-zone2-pre',
gce_zone='us-east2-a',
instance_count=997,
instance_template='external-pre-zone2',
distribute=True,
worker=False,
high_end=False),
compute_engine_projects.Cluster(
name='oss-fuzz-linux-zone3-host',
gce_zone='us-central1-d',
instance_count=2,
instance_template='host-zone3',
distribute=False,
worker=False,
high_end=False),
compute_engine_projects.Cluster(
name='oss-fuzz-linux-zone3-worker',
gce_zone='us-central1-d',
instance_count=16,
instance_template='worker-zone3',
distribute=True,
worker=True,
high_end=False),
compute_engine_projects.Cluster(
name='oss-fuzz-linux-zone3-host-high-end',
gce_zone='us-central1-d',
instance_count=1,
instance_template='host-high-end-zone3',
distribute=False,
worker=False,
high_end=True),
compute_engine_projects.Cluster(
name='oss-fuzz-linux-zone3-worker-high-end',
gce_zone='us-central1-d',
instance_count=2,
instance_template='worker-zone3',
distribute=True,
worker=True,
high_end=True),
],
instance_templates=[
{
'name': 'external-pre-zone2',
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': 30,
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
{
'name': 'host-zone3',
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': 30,
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
{
'name': 'worker-zone3',
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': 30,
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/prodxmon',
]
}],
}
},
{
'name': 'host-high-end-zone3',
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': 100,
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/prodxmon',
]
}],
}
},
],
host_worker_assignments=[
compute_engine_projects.HostWorkerAssignment(
host='oss-fuzz-linux-zone3-host',
worker='oss-fuzz-linux-zone3-worker',
workers_per_host=8),
compute_engine_projects.HostWorkerAssignment(
host='oss-fuzz-linux-zone3-host-high-end',
worker='oss-fuzz-linux-zone3-worker-high-end',
workers_per_host=2),
])
data_types.OssFuzzProject(
id='proj1',
name='proj1',
cpu_weight=1.0,
service_account='proj1@serviceaccount.com').put()
data_types.OssFuzzProject(
id='proj2',
name='proj2',
cpu_weight=2.0,
service_account='proj2@serviceaccount.com').put()
data_types.OssFuzzProject(
id='proj3',
name='proj3',
cpu_weight=5.0,
service_account='proj3@serviceaccount.com').put()
data_types.OssFuzzProject(
id='proj4',
name='proj4',
cpu_weight=1.0,
service_account='proj4@serviceaccount.com').put()
data_types.OssFuzzProject(
id='proj5',
name='proj5',
cpu_weight=1.0,
service_account='proj5@serviceaccount.com',
disk_size_gb=10).put()
data_types.OssFuzzProject(
id='proj6',
name='proj6',
cpu_weight=1.0,
service_account='proj6@serviceaccount.com',
high_end=True).put()
for j in range(1, 7):
project_name = 'proj%d' % j
data_types.WorkerTlsCert(
id=project_name,
project_name=project_name,
cert_contents=project_name.encode() + b'_cert',
key_contents=project_name.encode() + b'_key').put()
data_types.OssFuzzProjectInfo(id='old_proj', name='old_proj').put()
data_types.OssFuzzProjectInfo(
id='proj2',
name='proj2',
clusters=[
data_types.OssFuzzProjectInfo.ClusterInfo(
cluster='oss-fuzz-linux-zone2-pre',
gce_zone='us-east2-a',
cpu_count=1,
),
data_types.OssFuzzProjectInfo.ClusterInfo(
cluster='old-cluster',
gce_zone='us-east2-a',
cpu_count=1,
),
]).put()
data_types.OssFuzzProjectInfo(
id='proj3',
name='proj3',
clusters=[
data_types.OssFuzzProjectInfo.ClusterInfo(
cluster='oss-fuzz-linux-zone2-pre',
gce_zone='us-east2-a',
cpu_count=499,
)
]).put()
data_types.OssFuzzProjectInfo(
id='proj4',
name='proj4',
clusters=[
data_types.OssFuzzProjectInfo.ClusterInfo(
cluster='oss-fuzz-linux-zone2-pre',
gce_zone='us-east2-a',
cpu_count=99,
)
]).put()
data_types.OssFuzzProjectInfo(
id='proj5',
name='proj5',
clusters=[
data_types.OssFuzzProjectInfo.ClusterInfo(
cluster='oss-fuzz-linux-zone2-pre',
gce_zone='us-east2-a',
cpu_count=99,
)
]).put()
data_types.OssFuzzProjectInfo(
id='old_proj',
name='old_proj',
clusters=[
data_types.OssFuzzProjectInfo.ClusterInfo(
cluster='oss-fuzz-linux-zone2-pre',
gce_zone='us-east2-a',
cpu_count=5,
)
]).put()
data_types.HostWorkerAssignment(
id='old-host-0',
host_name='old-host',
worker_name='worker',
instance_num=0).put()
instance_groups = {}
instance_templates = {}
self.mock.BotManager.side_effect = functools.partial(
MockBotManager,
instance_groups=instance_groups,
instance_templates=instance_templates)
def test_update_cpus(self):
"""Tests CPU distribution cron."""
self.maxDiff = None # pylint: disable=invalid-name
manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external')
manager.update_clusters()
proj1 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj1').get()
self.assertIsNotNone(proj1)
self.assertDictEqual({
'name':
'proj1',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 100,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 1,
'gce_zone': 'us-central1-d',
}],
}, proj1.to_dict())
proj2 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj2').get()
self.assertIsNotNone(proj2)
self.assertDictEqual({
'name':
'proj2',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 200,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 4,
'gce_zone': 'us-central1-d',
}],
}, proj2.to_dict())
proj3 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj3').get()
self.assertIsNotNone(proj3)
self.assertDictEqual({
'name':
'proj3',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 499,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 9,
'gce_zone': 'us-central1-d',
}],
}, proj3.to_dict())
proj4 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj4').get()
self.assertIsNotNone(proj4)
self.assertDictEqual({
'name':
'proj4',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 99,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 1,
'gce_zone': 'us-central1-d',
}],
}, proj4.to_dict())
proj5 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj5').get()
self.assertIsNotNone(proj5)
self.assertDictEqual({
'name':
'proj5',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 99,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 1,
'gce_zone': 'us-central1-d',
}],
}, proj5.to_dict())
proj6 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj6').get()
self.assertIsNotNone(proj6)
self.assertDictEqual({
'name':
'proj6',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone3-worker-high-end',
'cpu_count': 2,
'gce_zone': 'us-central1-d',
}],
}, proj6.to_dict())
old_proj = ndb.Key(data_types.OssFuzzProjectInfo, 'old_proj').get()
self.assertIsNone(old_proj)
mock_bot_manager = self.mock.BotManager('clusterfuzz-external',
'us-east2-a')
# proj1: new project.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with(
expected_instance_template('clusterfuzz-external',
'external-pre-zone2', 'proj1'))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with(
'oss-fuzz-linux-zone2-pre-proj1',
'oss-fuzz-linux-zone2-pre-proj1',
size=100,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj1').resize.assert_not_called()
# proj2: already exists. needs a resize. old cluster should be deleted.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj2').resize.assert_called_with(
200, wait_for_instances=False)
mock_bot_manager.instance_template(
'old-cluster-proj2').delete.assert_called()
mock_bot_manager.instance_group('old-cluster-proj2').delete.assert_called()
# proj3: already exists. no changes needed.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj3').resize.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called()
# proj4: needs a template update (version change).
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with(
expected_instance_template('clusterfuzz-external',
'external-pre-zone2', 'proj4'))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with(
'oss-fuzz-linux-zone2-pre-proj4',
'oss-fuzz-linux-zone2-pre-proj4',
size=99,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj4').resize.assert_not_called()
# proj5: needs a template update (disk size change).
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with(
expected_instance_template(
'clusterfuzz-external',
'external-pre-zone2',
'proj5',
disk_size_gb=10))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with(
'oss-fuzz-linux-zone2-pre-proj5',
'oss-fuzz-linux-zone2-pre-proj5',
size=99,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj5').resize.assert_not_called()
# proj6: high end project.
for j in range(1, 6):
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-high-end-proj' +
str(j)).create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-high-end-proj6').create.assert_called()
# old_proj: deleted.
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-old-proj').create.assert_not_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called()
# host instances: created.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone3-host').create.assert_called_with(
expected_host_instance_template('clusterfuzz-external',
'host-zone3'))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-host').create.assert_called_with(
'oss-fuzz-linux-zone3-host',
'oss-fuzz-linux-zone3-host',
size=2,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-host-high-end').create.assert_called_with(
'oss-fuzz-linux-zone3-host-high-end',
'oss-fuzz-linux-zone3-host-high-end',
size=1,
wait_for_instances=False)
# Worker instances: created.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with(
expected_instance_template(
'clusterfuzz-external',
'worker-zone3',
'proj1',
service_account='proj1@serviceaccount.com',
tls_cert=True))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with(
'oss-fuzz-linux-zone3-worker-proj1',
'oss-fuzz-linux-zone3-worker-proj1',
size=1,
wait_for_instances=False)
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with(
expected_instance_template(
'clusterfuzz-external',
'worker-zone3',
'proj2',
service_account='proj2@serviceaccount.com',
tls_cert=True))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with(
'oss-fuzz-linux-zone3-worker-proj2',
'oss-fuzz-linux-zone3-worker-proj2',
size=4,
wait_for_instances=False)
six.assertCountEqual(self, [{
'instance_num': 0,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj1-0001',
'project_name': u'proj1',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 1,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0001',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 2,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0002',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 3,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0003',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 4,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0004',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 5,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0001',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 6,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0002',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 7,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0003',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 0,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0004',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 1,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0005',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 2,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0006',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 3,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0007',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 4,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0008',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 5,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0009',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 6,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj4-0001',
'project_name': u'proj4',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 7,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj5-0001',
'project_name': u'proj5',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 0,
'worker_name': u'oss-fuzz-linux-zone3-worker-high-end-proj6-0001',
'project_name': u'proj6',
'host_name': u'oss-fuzz-linux-zone3-host-high-end-1'
}, {
'instance_num': 1,
'worker_name': u'oss-fuzz-linux-zone3-worker-high-end-proj6-0002',
'project_name': u'proj6',
'host_name': u'oss-fuzz-linux-zone3-host-high-end-1'
}], [
assignment.to_dict()
for assignment in data_types.HostWorkerAssignment.query()
])
class OssFuzzDistributeCpusTest(unittest.TestCase):
"""Tests OSS-Fuzz CPU distribution."""
def test_equal(self):
"""Tests for each project receiving equal share."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 30)
self.assertListEqual([10, 10, 10], result)
def test_equal_uneven(self):
"""Tests for each project receiving equal share with an uneven division."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 31)
self.assertListEqual([11, 10, 10], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 32)
self.assertListEqual([11, 11, 10], result)
def test_weight_preference(self):
"""Tests that remainders are given to projects with higher weights
first.
"""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.01),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.1),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 4)
self.assertListEqual([1, 1, 2], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 5)
self.assertListEqual([1, 2, 2], result)
def test_not_enough(self):
"""Tests allocation with not enough CPUs."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 1)
self.assertListEqual([1, 0, 0], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 2)
self.assertListEqual([1, 1, 0], result)
def test_minimum(self):
"""Tests that projects are given a minimum share."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=0.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=0.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=0.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 3)
self.assertListEqual([1, 1, 1], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 10)
self.assertListEqual([4, 3, 3], result)
def test_maximum(self):
"""Tests that projects are capped at the maximum share."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 10000)
self.assertListEqual([1000, 1000, 1000], result)
def test_primes(self):
"""Test a bunch of different distributions."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=2.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=3.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=5.0),
data_types.OssFuzzProject(name='proj4', cpu_weight=7.0),
data_types.OssFuzzProject(name='proj5', cpu_weight=11.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 101)
self.assertListEqual([7, 10, 18, 26, 40], result)
self.assertEqual(101, sum(result))
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 887)
self.assertListEqual([63, 95, 158, 222, 349], result)
self.assertEqual(887, sum(result))
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 2741)
self.assertListEqual([214, 313, 509, 705, 1000], result)
self.assertEqual(2741, sum(result))
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 3571)
self.assertListEqual([356, 483, 738, 994, 1000], result)
self.assertEqual(3571, sum(result))
@test_utils.with_cloud_emulators('datastore')
class AssignHostWorkerTest(unittest.TestCase):
"""Tests host -> worker assignment."""
def test_assign_keep_existing(self):
"""Test that assignment keeps existing assignments."""
host_names = ['host']
worker_instances = [
manage_vms.WorkerInstance(name='worker-proj-0', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-1', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-2', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-3', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-4', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-5', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-6', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-7', project='proj'),
]
data_types.HostWorkerAssignment(
host_name='host',
instance_num=2,
worker_name='worker-proj-6',
project_name='proj',
id='host-2').put()
data_types.HostWorkerAssignment(
host_name='host',
instance_num=3,
worker_name='worker-proj-1',
project_name='proj',
id='host-3').put()
data_types.HostWorkerAssignment(
host_name='host',
instance_num=0,
worker_name='worker-nonexistent-1',
project_name='nonexistent',
id='host-0').put()
manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external')
new_assignments = manager.do_assign_hosts_to_workers(
host_names, worker_instances, 8)
self.assertListEqual([
{
'host_name': u'host',
'instance_num': 0,
'project_name': 'proj',
'worker_name': 'worker-proj-0'
},
{
'host_name': u'host',
'instance_num': 1,
'project_name': 'proj',
'worker_name': 'worker-proj-2'
},
{
'host_name': u'host',
'instance_num': 4,
'project_name': 'proj',
'worker_name': 'worker-proj-3'
},
{
'host_name': u'host',
'instance_num': 5,
'project_name': 'proj',
'worker_name': 'worker-proj-4'
},
{
'host_name': u'host',
'instance_num': 6,
'project_name': 'proj',
'worker_name': 'worker-proj-5'
},
{
'host_name': u'host',
'instance_num': 7,
'project_name': 'proj',
'worker_name': 'worker-proj-7'
},
], [assignment.to_dict() for assignment in new_assignments])
|
from abc import ABC, ABCMeta, abstractmethod
class Véhicule(ABC):
def demarrer(self):
pass
class Voiture(Véhicule):
def demarrer(self):
print("C'est une voiture")
pass
class Moto(Véhicule):
def demarrer(self):
print("C'est une moto eeeeh ouais bg ca se demarre au cick")
pass
class Scootbite(Véhicule):
def demarrer(self):
print("C'est une merde de scoot")
pass
class Compétition(Véhicule):
def compet(self):
print("Ah ! La c'est un véhicule de compet frérot, il faut un lanceur")
pass
class Laguna(Voiture):
def lag(self):
print("Oh une laguna de ses mort")
pass
class Demolitionderby(Voiture):
def demolir(self):
print("Demoooolitionnn !!!!")
pass
class Rallye(Compétition, Voiture):
def wrc(self):
print("Et ouais c'est Loeb qui a gagné le rallye de mont'carl")
pass
class MotoGP(Moto, Compétition):
def ktm(self):
print("De sacré couilles mon pote")
pass
class FinDeVie(Laguna, Demolitionderby):
def mort(self):
print("Jamais sucée l'auto ses mort")
pass
MonteCarlo = Rallye()
Bouillave = FinDeVie()
FabioQuatarraro = MotoGP()
MonteCarlo.demarrer()
MonteCarlo.compet()
MonteCarlo.wrc()
|
import numpy as np
import random
import sys
import mnist # This is just to get the training data working
class Net: # Simple neural network for classification problems
def __init__(self,nodes,initialisation=random.random):
# Initialisation is the function which is used to intialise the weights and biases
self.nodes = nodes; # Number of nodes in each row, e.g. [2,3,3,2],
# for a network with 2 input neurons,
# two hidden layers of 3 neurons and 2 output neurons
self.weights = []; # Array of matrices for each row of weights in the network
self.biases = [];
self.gamma = 0.01; # Step size
totalWeights = 0;
# Initialise weights and biases
for i in range(1,len(self.nodes)): # For each row (after input layer)
self.weights.append([]);
self.biases.append([]);
for j in range(self.nodes[i]): # For each node in the row
self.weights[i-1].append([]);
self.biases[i-1].append(initialisation());
for k in range(self.nodes[i-1]): # For each node in the previous layer
self.weights[i-1][j].append(initialisation());
totalWeights+=1;
totalWeights+=1;
# print(">> Total weights and biases: "+str(totalWeights));
def save(self,file_name="network.npy"): # Save weights to file
np.save(file_name,np.asarray(self.weights));
def load(self,file_name="network.npy"): # Load weights from file
self.weights = np.load(file_name,allow_pickle=True);
def sigmoid(self,x): # Maybe these functions shouldn't be part of the class but I wanted to keep everything together
return(1.0/(1.0+np.exp(-x)));
def sigmoid_d(self,x): # Derivative of the sigmoid function σ'(x)=σ(x)*(1-σ(x))
return(1.0/(1.0+np.exp(-x))*(1.0-(1.0/(1.0+np.exp(-x)))));
def guess(self,data): # Feed forward
current_layer = [];
previous_layer = data;
for i in range(1,len(self.nodes)):
try:
current_layer = np.array([j for j in self.weights[i-1]]).dot(previous_layer); # Sum weights and previous layer
except IndexError:
sys.stderr.write("Error: Training data does not match input layer");
return(1);
for j in range(len(current_layer)):
current_layer[j] = current_layer[j]+self.biases[i-1][j]; # Add bias
#print(current_layer);
current_layer = [self.sigmoid(j) for j in current_layer];
#print(current_layer);
previous_layer = current_layer;
return(current_layer);
def train(self,data,label): # Label is the desired output for a given training point
output = self.guess(data);
error = [];
try:
# Error is difference between output and expected output (label)
error = [(output[i]-label[i])**2 for i in range(len(label))];
cost = sum(error);
except IndexError:
# Label must be an array of equal length to the last layer in the network
sys.stderr.write("Error: Labels does not match output layer");
return(1);
#print("output: "+str(output)); print("error: "+str(error)); print("cost: "+str(cost));
#for i in range(len(weights)): # Each row in network
# for j in range(len(weights[i])): # Each node in network
# for k in range(len(weights[i][j])): # Each weight and bias in the network
def f():
return(2);
# n = Net([784,16,16,10]);
n= Net([2,3,3,2],f);
print(n.guess([1,2]));
n.train([1,2],[0,1]);
# mndata = mnist.MNIST('data');
# train_images,train_labels = mndata.load_training();
# test_images,test_labels = mndata.load_testing();
# index = randrange(0,len(images));
# image = np.array(images[index], dtype="uint8");
|
# Copyright (c) 2013, Helio de Jesus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import flt
from frappe.model.meta import get_field_precision
from frappe.utils.xlsxutils import handle_html
from erpnext.accounts.report.sales_register.sales_register import get_mode_of_payments
def execute(filters=None):
return _execute(filters)
def _execute(filters=None, additional_table_columns=None, additional_query_columns=None):
if not filters: filters = {}
columns = get_columns(additional_table_columns)
company_currency = erpnext.get_company_currency(filters.company)
item_list = get_items(filters, additional_query_columns)
if item_list:
itemised_tax, tax_columns = get_tax_accounts(item_list, columns, company_currency)
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Data",
"width": 80
})
mode_of_payments = get_mode_of_payments(set([d.parent for d in item_list]))
so_dn_map = get_delivery_notes_against_sales_order(item_list)
data = []
for d in item_list:
delivery_note = None
if d.delivery_note:
delivery_note = d.delivery_note
elif d.so_detail:
delivery_note = ", ".join(so_dn_map.get(d.so_detail, []))
if not delivery_note and d.update_stock:
delivery_note = d.parent
row = [d.item_code, d.item_name, d.item_group, d.parent, d.posting_date, d.customer, d.customer_name, d.owner]
if additional_query_columns:
for col in additional_query_columns:
row.append(d.get(col))
row += [
d.customer_group, d.debit_to, ", ".join(mode_of_payments.get(d.parent, [])),
d.territory, d.project, d.company, d.sales_order,
delivery_note, d.income_account, d.cost_center, d.stock_qty, d.stock_uom
]
row += [(d.base_net_rate * d.qty)/d.stock_qty, d.base_net_amount] \
if d.stock_uom != d.uom else [d.base_net_rate, d.base_net_amount]
total_tax = 0
for tax in tax_columns:
item_tax = itemised_tax.get(d.name, {}).get(tax, {})
row += [item_tax.get("tax_rate", 0), item_tax.get("tax_amount", 0)]
total_tax += flt(item_tax.get("tax_amount"))
row += [total_tax, d.base_net_amount + total_tax, company_currency]
data.append(row)
return columns, data
def get_columns(additional_table_columns):
columns = [
_("Item Code") + ":Link/Item:120", _("Item Name") + "::120",
_("Item Group") + ":Link/Item Group:100", _("Invoice") + ":Link/Sales Invoice:120",
_("Posting Date") + ":Date:80", _("Customer") + ":Link/Customer:120",
_("Customer Name") + "::120",
_("User") + "::120"]
if additional_table_columns:
columns += additional_table_columns
columns += [
_("Customer Group") + ":Link/Customer Group:120",
_("Receivable Account") + ":Link/Account:120",
_("Mode of Payment") + "::120", _("Territory") + ":Link/Territory:80",
_("Project") + ":Link/Project:80", _("Company") + ":Link/Company:100",
_("Sales Order") + ":Link/Sales Order:100", _("Delivery Note") + ":Link/Delivery Note:100",
_("Income Account") + ":Link/Account:140", _("Cost Center") + ":Link/Cost Center:140",
_("Stock Qty") + ":Float:120", _("Stock UOM") + "::100",
_("Rate") + ":Currency/currency:120",
_("Amount") + ":Currency/currency:120"
]
return columns
def get_conditions(filters):
conditions = ""
for opts in (("company", " and company=%(company)s"),
("cost_center", " and `tabSales Invoice Item`.cost_center = %(cost_center)s"),
("customer", " and `tabSales Invoice`.customer = %(customer)s"),
("item_code", " and `tabSales Invoice Item`.item_code = %(item_code)s"),
("from_date", " and `tabSales Invoice`.posting_date>=%(from_date)s"),
("to_date", " and `tabSales Invoice`.posting_date<=%(to_date)s"),
("owner", " and `tabSales Invoice`.owner = %(owner)s")):
if filters.get(opts[0]):
conditions += opts[1]
if filters.get("mode_of_payment"):
conditions += """ and exists(select name from `tabSales Invoice Payment`
where parent=`tabSales Invoice`.name
and ifnull(`tabSales Invoice Payment`.mode_of_payment, '') = %(mode_of_payment)s)"""
return conditions
def get_items(filters, additional_query_columns):
conditions = get_conditions(filters)
match_conditions = frappe.build_match_conditions("Sales Invoice")
if match_conditions:
match_conditions = " and {0} ".format(match_conditions)
if additional_query_columns:
additional_query_columns = ', ' + ', '.join(additional_query_columns)
return frappe.db.sql("""
select
`tabSales Invoice Item`.name, `tabSales Invoice Item`.parent,
`tabSales Invoice`.posting_date, `tabSales Invoice`.debit_to,
`tabSales Invoice`.project, `tabSales Invoice`.customer, `tabSales Invoice`.remarks,
`tabSales Invoice`.territory, `tabSales Invoice`.company, `tabSales Invoice`.base_net_total,
`tabSales Invoice Item`.item_code, `tabSales Invoice Item`.item_name,
`tabSales Invoice Item`.item_group, `tabSales Invoice Item`.sales_order,
`tabSales Invoice Item`.delivery_note, `tabSales Invoice Item`.income_account,
`tabSales Invoice Item`.cost_center, `tabSales Invoice Item`.stock_qty,
`tabSales Invoice Item`.stock_uom, `tabSales Invoice Item`.base_net_rate,
`tabSales Invoice Item`.base_net_amount, `tabSales Invoice`.customer_name,
`tabSales Invoice`.customer_group, `tabSales Invoice Item`.so_detail,
`tabSales Invoice`.update_stock, `tabSales Invoice Item`.uom, `tabSales Invoice Item`.qty {0},
`tabSales Invoice`.owner
from `tabSales Invoice`, `tabSales Invoice Item`
where `tabSales Invoice`.name = `tabSales Invoice Item`.parent
and `tabSales Invoice`.docstatus = 1 %s %s
order by `tabSales Invoice`.posting_date desc, `tabSales Invoice Item`.cost_center desc, `tabSales Invoice Item`.item_code desc
""".format(additional_query_columns or '') % (conditions, match_conditions), filters, as_dict=1)
def get_delivery_notes_against_sales_order(item_list):
so_dn_map = frappe._dict()
so_item_rows = list(set([d.so_detail for d in item_list]))
if so_item_rows:
delivery_notes = frappe.db.sql("""
select parent, so_detail
from `tabDelivery Note Item`
where docstatus=1 and so_detail in (%s)
group by so_detail, parent
""" % (', '.join(['%s']*len(so_item_rows))), tuple(so_item_rows), as_dict=1)
for dn in delivery_notes:
so_dn_map.setdefault(dn.so_detail, []).append(dn.parent)
return so_dn_map
def get_tax_accounts(item_list, columns, company_currency,
doctype="Sales Invoice", tax_doctype="Sales Taxes and Charges"):
import json
item_row_map = {}
tax_columns = []
invoice_item_row = {}
itemised_tax = {}
tax_amount_precision = get_field_precision(frappe.get_meta(tax_doctype).get_field("tax_amount"),
currency=company_currency) or 2
for d in item_list:
invoice_item_row.setdefault(d.parent, []).append(d)
item_row_map.setdefault(d.parent, {}).setdefault(d.item_code or d.item_name, []).append(d)
conditions = ""
if doctype == "Purchase Invoice":
conditions = " and category in ('Total', 'Valuation and Total') and base_tax_amount_after_discount_amount != 0"
tax_details = frappe.db.sql("""
select
parent, description, item_wise_tax_detail,
charge_type, base_tax_amount_after_discount_amount
from `tab%s`
where
parenttype = %s and docstatus = 1
and (description is not null and description != '')
and parent in (%s)
%s
order by description
""" % (tax_doctype, '%s', ', '.join(['%s']*len(invoice_item_row)), conditions),
tuple([doctype] + invoice_item_row.keys()))
for parent, description, item_wise_tax_detail, charge_type, tax_amount in tax_details:
description = handle_html(description)
if description not in tax_columns and tax_amount:
# as description is text editor earlier and markup can break the column convention in reports
tax_columns.append(description)
if item_wise_tax_detail:
try:
item_wise_tax_detail = json.loads(item_wise_tax_detail)
for item_code, tax_data in item_wise_tax_detail.items():
itemised_tax.setdefault(item_code, frappe._dict())
if isinstance(tax_data, list):
tax_rate, tax_amount = tax_data
else:
tax_rate = tax_data
tax_amount = 0
if charge_type == "Actual" and not tax_rate:
tax_rate = "NA"
item_net_amount = sum([flt(d.base_net_amount)
for d in item_row_map.get(parent, {}).get(item_code, [])])
for d in item_row_map.get(parent, {}).get(item_code, []):
item_tax_amount = flt((tax_amount * d.base_net_amount) / item_net_amount) \
if item_net_amount else 0
if item_tax_amount:
itemised_tax.setdefault(d.name, {})[description] = frappe._dict({
"tax_rate": tax_rate,
"tax_amount": flt(item_tax_amount, tax_amount_precision)
})
except ValueError:
continue
elif charge_type == "Actual" and tax_amount:
for d in invoice_item_row.get(parent, []):
itemised_tax.setdefault(d.name, {})[description] = frappe._dict({
"tax_rate": "NA",
"tax_amount": flt((tax_amount * d.base_net_amount) / d.base_net_total,
tax_amount_precision)
})
tax_columns.sort()
for desc in tax_columns:
columns.append(desc + " Rate:Data:80")
columns.append(desc + " Amount:Currency/currency:100")
columns += ["Total Tax:Currency/currency:80", "Total:Currency/currency:100"]
return itemised_tax, tax_columns
|
from django.db.models import CharField, DateTimeField, JSONField, Model, TextField
class FailedMessage(Model):
created = DateTimeField(auto_now_add=True)
topic_name = CharField(max_length=255)
subscription_name = CharField(max_length=255)
subject = CharField(max_length=255)
message = JSONField()
exception_str = TextField()
traceback = TextField()
# If there is a correlation ID (Requires Django GUID + Celery Integration), save it. Makes fetching logs easy
correlation_id = CharField(max_length=36, blank=True)
def __str__(self) -> str:
"""
String representation
"""
return f'{self.topic_name}-{self.subject}. Correlation ID: {self.correlation_id}' # pragma: no cover
|
n = int(input())
a = 0
b = 1
# while a != n:
# a, b = b, a + b
print(f"{0} {0} {n}")
|
#!/usr/local/bin/python
#
# WSPR receiver.
#
# switches among bands if weakcat.py understands the radio.
# reports to wsprnet if mycall/mygrid defined in weak.ini.
#
# Robert Morris, AB1HL
#
import wspr
import sys
import os
import time
import weakaudio
import numpy
import threading
import re
import random
import copy
import weakcat
from six.moves import urllib
#import urllib.request, urllib.parse, urllib.error
import weakutil
import weakargs
# look only at these bands.
plausible = [ "80", "40", "30", "20", "17" ]
b2f = { "80" : 3.568600, "40" : 7.038600, "30" : 10.138700, "20" : 14.095600,
"17" : 18.104600, "15" : 21.094600, "12" : 24.924600,
"10" : 28.124600, "6" : 50.293000, "2" : 144.489 }
def load_prefixes():
d = { }
f = open("jt65prefixes.dat")
for ln in f:
ln = re.sub(r'\t', ' ', ln)
ln = re.sub(r' *', ' ', ln)
ln.strip()
ln = re.sub(r' *\(.*\) *', '', ln)
ln.strip()
m = re.search(r'^([A-Z0-9]+) +(.*)', ln)
if m != None:
d[m.group(1)] = m.group(2)
f.close()
return d
def look_prefix(call, d):
if len(call) == 5 and call[0:3] == "KG4":
# KG4xx is Guantanamo, KG4x and KG4xxx are not.
return "Guantanamo Bay"
while len(call) > 0:
if call in d:
return d[call]
call = call[0:-1]
return None
# weighted choice (to pick bands).
# a[i] = [ value, weight ]
def wchoice(a, n):
total = 0.0
for e in a:
total += e[1]
ret = [ ]
while len(ret) < n:
x = random.random() * total
for ai in range(0, len(a)):
e = a[ai]
if x <= e[1]:
ret.append(e[0])
total -= e[1]
a = a[0:ai] + a[ai+1:]
break
x -= e[1]
return ret
def wchoice_test():
a = [ [ "a", .1 ], [ "b", .1 ], [ "c", .4 ], [ "d", .3 ], [ "e", .1 ] ]
counts = { }
for iter in range(0, 500):
x = wchoice(a, 2)
for e in x:
counts[e] = counts.get(e, 0) + 1
print(counts)
class WSPRMon:
def __init__(self, incard, cat, oneband):
self.mycall = weakutil.cfg("wsprmon", "mycall")
self.mygrid = weakutil.cfg("wsprmon", "mygrid")
self.running = True
self.rate = 12000
self.logname = "wspr-log.txt"
self.bandname = "wspr-band.txt"
self.jtname = "wspr"
self.verbose = False
self.incard = incard
self.oneband = oneband
if cat != None:
self.cat = weakcat.open(cat)
self.cat.sync()
self.cat.set_usb_data()
else:
self.cat = None
# for each band, count of received signals last time we
# looked at it, to guess most profitable band.
self.bandinfo = { }
# for each two-minute interval, the band we were listening on.
self.minband = { }
# has readall() processed each interval?
self.mindone = { }
self.prefixes = load_prefixes()
def start(self):
self.r = wspr.WSPR()
self.r.cardrate = self.rate
self.r.opencard(self.incard)
self.rth = threading.Thread(target=lambda : self.r.gocard())
self.rth.daemon = True
self.rth.start()
if self.mycall == None or self.mygrid == None:
print("not reporting to wsprnet because no mycall/mygrid in weak.cfg")
elif True:
self.nth = threading.Thread(target=lambda : self.gonet())
self.nth.daemon = True
self.nth.start()
print("reporting to wsprnet as %s at %s." % (self.mycall, self.mygrid))
else:
print("not reporting to wsprnet.")
def close(self):
self.running = False
self.r.close()
self.rth.join()
self.nth.join()
self.pya.terminate()
# thread to send to wsprnet.
# hints from wsjtx wsprnet.cpp and
# http://blog.marxy.org/2015/12/wsprnet-down-up-down.html
def gonet(self):
mi = 0
while self.running:
time.sleep(30)
msgs = self.r.get_msgs()
while mi < len(msgs):
msg = msgs[mi]
mi += 1
# msg is a wspr.Decode.
if not (msg.minute in self.minband):
continue
band = self.minband[msg.minute]
pp = self.parse(msg.msg)
if pp == None:
continue
[ call, grid, dbm ] = pp
when = self.r.start_time + 60*msg.minute
gm = time.gmtime(when)
url = "http://wsprnet.org/post?"
url += "function=wspr&"
url += "rcall=%s&" % (self.mycall)
url += "rgrid=%s&" % (self.mygrid)
url += "rqrg=%.6f&" % (b2f[band]) # my frequency, mHz
url += "date=%02d%02d%02d&" % (gm.tm_year-2000, gm.tm_mon, gm.tm_mday)
url += "time=%02d%02d&" % (gm.tm_hour, gm.tm_min)
url += "sig=%.0f&" % (msg.snr)
url += "dt=%.1f&" % (msg.dt)
url += "drift=%.1f&" % (msg.drift)
url += "tqrg=%.6f&" % (b2f[band] + msg.hz()/1000000.0)
url += "tcall=%s&" % (call)
url += "tgrid=%s&" % (grid)
url += "dbm=%s&" % (dbm)
url += "version=weakmon-0.3&"
url += "mode=2"
try:
req = urllib.request.urlopen(url)
for junk in req:
pass
req.close()
except:
print("wsprnet GET failed for %s" % (msg.msg))
pass
# process messages from one cycle ago, i.e. the latest
# cycle for which both reception and
# decoding have completed.
def readall(self):
now = time.time()
nowmin = self.r.minute(now)
for min in range(max(0, nowmin-6), nowmin, 2):
if min in self.mindone:
continue
self.mindone[min] = True
if not (min in self.minband):
continue
band = self.minband[min]
bandcount = 0
msgs = self.r.get_msgs()
# each msg is a wspr.Decode.
for m in msgs[len(msgs)-50:]:
if m.minute == min:
bandcount += 1
self.log(self.r.start_time + 60*min, band, m.hz(), m.msg, m.snr, m.dt, m.drift)
x = self.bandinfo.get(band, 0)
self.bandinfo[band] = 0.5 * x + 0.5 * bandcount
# turn "WB4HIR EM95 33" into ["WB4HIR", "EM95", "33"], or None.
def parse(self, msg):
msg = msg.strip()
msg = re.sub(r' *', ' ', msg)
m = re.search(r'^([A-Z0-9\/]+) ([A-Z0-9]+) ([0-9]+)', msg)
if m == None:
print("wsprmon log could not parse %s" % (msg))
return None
call = m.group(1)
grid = m.group(2)
dbm = m.group(3)
return [ call, grid, dbm ]
def log(self, when, band, hz, msg, snr, dt, drift):
pp = self.parse(msg)
if pp == None:
return
[ call, grid, dbm ] = pp
entity = look_prefix(call, self.prefixes)
# b2f is mHz
freq = b2f[band] + hz / 1000000.0
ts = self.r.ts(when)
ts = re.sub(r':[0-9][0-9]$', '', ts) # delete seconds
info = "%s %9.6f %s %s %s %.0f %.1f %.1f %s" % (ts,
freq,
call,
grid,
dbm,
snr,
dt,
drift,
entity)
print("%s" % (info))
f = open(self.logname, "a")
f.write("%s\n" % (info))
f.close()
# return a good band on which to listen.
def rankbands(self):
global plausible
# are we missing bandinfo for any bands?
missing = [ ]
for b in plausible:
if self.bandinfo.get(b) == None:
missing.append(b)
# always explore missing bands first.
if len(missing) > 0:
band = missing[0]
# so we no longer count it as "missing".
self.bandinfo[band] = 0
return band
# most profitable bands, best first.
best = sorted(plausible, key = lambda b : -self.bandinfo.get(b, -1))
if random.random() < 0.3 or self.bandinfo[best[0]] <= 0.1:
band = random.choice(plausible)
else:
wa = [ [ b, self.bandinfo[b] ] for b in best ]
band = wchoice(wa, 1)[0]
return band
def go(self):
while self.running:
# wait until we'are at the start of a two-minute interval.
# that is, don't tell the radio to change bands in the
# middle of an interval.
while True:
if self.running == False:
return
second = self.r.second(time.time())
if second >= 119 or second < 1:
break
time.sleep(0.2)
# choose a band.
if self.oneband != None:
band = self.oneband
else:
band = self.rankbands()
if self.cat != None:
self.cat.setf(0, int(b2f[band] * 1000000.0))
now = time.time()
if self.r.second(now) < 5:
min = self.r.minute(now)
else:
min = self.r.minute(now + 5)
# remember the band for this minute, for readall().
self.minband[min] = band
if self.verbose:
sys.stdout.write("band %s ; " % (band))
for b in self.bandinfo:
sys.stdout.write("%s %.1f, " % (b, self.bandinfo[b]))
sys.stdout.write("\n")
sys.stdout.flush()
# make sure we get into the next minute
time.sleep(5)
# collect incoming message reports.
while self.running:
now = time.time()
second = self.r.second(now)
if second >= 118:
break
self.readall()
time.sleep(1)
def oldmain():
incard = None
cattype = None
catdev = None
oneband = None
levels = False
vflag = False
i = 1
while i < len(sys.argv):
if sys.argv[i] == "-in":
incard = sys.argv[i+1]
i += 2
elif sys.argv[i] == "-cat":
cattype = sys.argv[i+1]
catdev = sys.argv[i+2]
i += 3
elif sys.argv[i] == "-band":
oneband = sys.argv[i+1]
i += 2
elif sys.argv[i] == "-levels":
levels = True
i += 1
elif sys.argv[i] == "-v":
vflag = True
i += 1
else:
usage()
if levels:
# print sound card avg/peak once per second, to
# adjust level.
if incard == None:
usage()
c = weakaudio.new(incard, 12000)
c.levels()
sys.exit(0)
if catdev == None and oneband == None:
sys.stderr.write("wsprmon needs either -cat or -band\n")
usage()
if incard != None:
w = WSPRMon(incard, cattype, catdev, oneband)
w.verbose = vflag
w.start()
w.go()
w.close()
else:
usage()
def main():
parser = weakargs.stdparse('Decode WSPR.')
parser.add_argument("-band")
args = weakargs.parse_args(parser)
if args.card == None:
parser.error("wsprmon requires -card")
if args.cat == None and args.band == None:
parser.error("wsprmon needs either -cat or -band")
w = WSPRMon(args.card, args.cat, args.band)
w.verbose = args.v
w.start()
w.go()
w.close()
sys.exit(0)
main()
|
import tensorflow as tf
import tensorflow.keras.applications as keras_applications
import efficientnet.tfkeras as efficientnet
from tensorflow.keras import layers as keras_layers
from tensorflow.keras import backend as K
from glrec.train import utils as train_utils
from glrec.train.constants import constants as train_constants
from glrec.train.layers import heads as retrieval_heads
from glrec.train.layers import pooling as retrieval_pooling
from glrec.train.layers import delg as delg_layers
# Mapping for backbone architecture modules
_backbone_architecture_module = {
# Lightweight architectures for local testing
'MobileNetV2': keras_applications.MobileNetV2,
# ResNet family
'ResNet50': keras_applications.ResNet50,
'ResNet101': keras_applications.ResNet101,
'ResNet152': keras_applications.ResNet152,
'ResNet50V2': keras_applications.ResNet50V2,
'ResNet101V2': keras_applications.ResNet101V2,
'ResNet152V2': keras_applications.ResNet152V2,
# DenseNet family
'DenseNet121': keras_applications.DenseNet121,
'DenseNet169': keras_applications.DenseNet169,
'DenseNet201': keras_applications.DenseNet201,
# EfficientNet family
'EfficientNetB5': efficientnet.EfficientNetB5,
'EfficientNetB6': efficientnet.EfficientNetB6,
'EfficientNetB7': efficientnet.EfficientNetB7,
}
def load_backbone_model(architecture, weights, trainable=True):
network_module = _backbone_architecture_module[architecture]
weights_file = None
if weights not in [None, 'imagenet', 'noisy-student']:
weights_file, weights = weights, None
backbone = network_module(include_top=False,
weights=weights)
if weights_file is not None:
# `by_name` flag is used if we're loading from a different
# architecture, with some layers in common.
weights_file = train_utils.resolve_file_path(weights_file)
backbone.load_weights(weights_file, by_name=True)
backbone.trainable = trainable
return backbone
def load_global_head(layer, kwargs):
if not hasattr(retrieval_heads, layer):
raise ValueError(
f'Module `glrec.layers.heads` does not contain {layer}')
head_layer = getattr(retrieval_heads, layer)(
num_classes=train_constants.NUM_CLASSES, **kwargs)
return head_layer
# Mapping for pooling layers in the retrieval branch
_retrieval_pooling_module = {
'GAP': tf.keras.layers.GlobalAveragePooling2D,
'GeM': retrieval_pooling.GeneralizedMeanPooling2D,
}
def load_pooling_layer(method, kwargs):
pooling_layer_class = _retrieval_pooling_module[method]
pooling_layer = pooling_layer_class(**kwargs)
return pooling_layer
class DelgGlobalBranch(tf.keras.layers.Layer):
"""
Global (retrieval) branch with Cosine head. The Cosine head requires
ground-truth to calculate margin and scale values. However, during
inference (with K.learning_phase() == False), one can just put empty
labels in, as it doesn't affect the outcome.
"""
def __init__(self,
pooling_config,
embedding_dim,
head_config,
trainable=True,
**kwargs):
super().__init__(**kwargs)
self.trainable = trainable
self._pool_features = load_pooling_layer(**pooling_config)
self._reduce_dimensionality = keras_layers.Dense(embedding_dim)
self._cosine_head = load_global_head(**head_config)
self._softmax = tf.keras.layers.Softmax()
def call(self, inputs):
backbone_features, labels = inputs
pooled_features = self._pool_features(backbone_features)
dim_reduced_features = self._reduce_dimensionality(pooled_features)
output_logits = self._cosine_head([dim_reduced_features, labels])
output = self._softmax(output_logits)
return output
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, None, None],
dtype=tf.float32,
name='delg_global_infer_input'),
])
def delg_inference(self, backbone_features):
"""Returns normalized embeddings, given backbone features.
"""
pooled_features = self._pool_features(backbone_features)
embeddings = self._reduce_dimensionality(pooled_features)
normalized_embeddings = tf.nn.l2_normalize(embeddings, axis=1)
return normalized_embeddings
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None],
dtype=tf.float32,
name='delg_normalized_embeddings_input'),
])
def classify_from_embedding(self, normalized_embeddings):
"""
Given the normalized embeddings (from `delg_inference`), perform
classification and return a tensor (batch_size, num_classes).
"""
normalized_w = tf.nn.l2_normalize(self._cosine_head._w, axis=0)
cosine_similarity = tf.matmul(normalized_embeddings, normalized_w)
return cosine_similarity
class DelgLocalBranch(tf.keras.layers.Layer):
"""Local (recognition) branch with reconstruction and attention heads.
"""
def __init__(self,
attention_config,
autoencoder_config,
trainable=False,
name='local_branch',
**kwargs):
super().__init__(name=name, **kwargs)
self.trainable = trainable
self.attention = delg_layers.Attention(**attention_config)
self.autoencoder = delg_layers.Autoencoder(**autoencoder_config)
self.attention_classifier = keras_layers.Dense(
train_constants.NUM_CLASSES,
activation='softmax', name='attention_fc')
def call(self, inputs):
backbone_features, labels = inputs
# Attention and AutoEncoder
probability, score = self.attention(backbone_features)
embedding, reconstruction = self.autoencoder(backbone_features)
# Classification using attention and reconstructed features
with tf.name_scope('local_classification'):
# WTF? There shouldn't be an l2 here!!! This is absurd!
# features = tf.nn.l2_normalize(reconstruction, axis=-1)
features = reconstruction
features = tf.reduce_sum(
tf.multiply(features, probability),
[1, 2], keepdims=False)
tf.debugging.assert_rank(
features, 2, message='features should have rank 2')
classification_output = self.attention_classifier(features)
# I'm too lazy to do this shit properly so I'll calculate the
# reconstruction loss right here. Pls don't judge me :(
with tf.name_scope('local_reconstruction_score'):
cn_axis = 3 if K.image_data_format() == 'channels_last' else 1
pointwise_l2_norm = tf.norm(
reconstruction - backbone_features,
keepdims=False, axis=-1)
tf.debugging.assert_rank(
pointwise_l2_norm, 3,
message='pointwise_l2_norm should have rank 3')
reconstruction_score = tf.reduce_mean(
tf.math.square(pointwise_l2_norm),
axis=[1, 2], keepdims=False)
reconstruction_score = tf.divide(
reconstruction_score,
tf.cast(tf.shape(backbone_features)[cn_axis], tf.float32))
tf.debugging.assert_rank(
reconstruction_score, 1,
message='reconstruction_score should have rank 1')
# Output the classification results and reconstruction l2 loss
return classification_output, reconstruction_score
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, None, None],
dtype=tf.float32,
name='delg_local_infer_input'),
])
def delg_inference(self, backbone_features):
"""Local model (descriptors) inference, given backbone features.
"""
# Attention scores
probability, score = self.attention(backbone_features)
# Dimensionality reduced embeddings
embedding = self.autoencoder.encoder(backbone_features)
embedding = tf.nn.l2_normalize(embedding, axis=-1)
# Output shapes: [bs, h, w, c], [bs, h, w, 1], [bs, h, w, 1]
return embedding, probability, score
class DelgModel(tf.keras.Model):
"""DELG architecture, as in https://arxiv.org/abs/2001.05027
"""
def __init__(self,
backbone_config,
global_branch_config,
local_branch_config,
places_branch_config,
shallow_layer_name,
training_mode,
inference_mode,
**kwargs):
"""Initialization of the DELG model
Args:
backbone_config: a dictionalry of kwargs for backbone
global_branch_config: a dict of kwargs for DelgGlobalBranch
local_branch_config: a dict of kwarsg for DelgLocalBranch or None
places_branch_config: not usable right now
shallow_layer_name: name of the shallower layer to get features
"""
super().__init__(**kwargs)
self.training_mode = training_mode
self.inference_mode = inference_mode
# Prepare backbone inference with intermediate outputs
self.backbone = load_backbone_model(**backbone_config)
deep_features = self.backbone.layers[-1].output
shallow_features = self.backbone.get_layer(shallow_layer_name).output
self.backbone_infer = tf.keras.Model(
self.backbone.input,
outputs=[deep_features, shallow_features])
# Construct the global branch
if training_mode in ['global_only', 'local_and_global']:
self.global_branch = DelgGlobalBranch(**global_branch_config)
# Construct the local branch
if training_mode in ['local_only', 'local_and_global']:
self.local_branch = DelgLocalBranch(**local_branch_config)
# If we're only training the local branch, no need to train backbone
if training_mode == 'local_only':
self.backbone.trainable = False
def call(self, inputs, first_time_warmup=False):
"""
first_time_warmup is deprecated
"""
input_image, sparse_label = inputs
deep_features, shallow_features = self.backbone_infer(input_image)
# global branch
if self.training_mode in ['global_only', 'local_and_global']:
global_output = self.global_branch([deep_features, sparse_label])
# local branch with stop gradients, as described in the paper
if self.training_mode in ['local_only', 'local_and_global']:
shallow_features = tf.identity(shallow_features)
shallow_features = tf.stop_gradient(shallow_features)
local_cls_output, local_recon_score = self.local_branch(
[shallow_features, sparse_label])
# 3 heads for 3 losses
if self.training_mode == 'global_only':
return global_output
elif self.training_mode == 'local_only':
return local_cls_output, local_recon_score
elif self.training_mode == 'local_and_global':
return global_output, local_cls_output, local_recon_score
else:
raise RuntimeError('training_mode should be either global_only, '
'local_only, or local_and_global.')
@tf.function(input_signature=[
tf.TensorSpec(
shape=[None, None, None, 3],
dtype=tf.float32,
name='delg_infer_input',
)
])
def delg_inference(self, input_image):
deep_features, shallow_features = self.backbone_infer(input_image)
if self.inference_mode in ['global_only', 'local_and_global']:
global_descriptor = \
self.global_branch.delg_inference(deep_features)
if self.inference_mode in ['local_only', 'local_and_global']:
local_descriptors, probability, scores = \
self.local_branch.delg_inference(shallow_features)
if self.inference_mode == 'global_only':
return global_descriptor
elif self.inference_mode == 'local_only':
return local_descriptors, probability, scores
elif self.inference_mode == 'local_and_global':
return global_descriptor, local_descriptors, probability, scores
else:
raise RuntimeError('Inference_mode should be either global_only, '
'local_only, or local_and_global.')
|
import shlex
import subprocess
from game_control.game import Game
class ExecutableGame(Game):
"""Implementation of abstract base class to control an executable game."""
def __init__(self, executable_filepath, **kwargs):
"""Constructs and starts an executable game.
Args:
executable_filepath (str): filepath of the executable of the game
"""
self._executable_filepath = executable_filepath
super().__init__(**kwargs)
def start(self):
"""Starts the game executable in a separate process."""
self._process = subprocess.Popen(shlex.split(self._executable_filepath))
def stop(self):
"""Stops the game by terminating the process."""
self._process.terminate()
|
import comprehender
import ItemSearch
import deserializer as ds
TEST_TEXT="""
People have known since ancient times that aspirin helps to reduce pain and high body temperature. But that is not all aspirin can do. It has gained important new uses in recent years. Small amounts of the drug may help prevent a heart attack or stroke.
One study showed that some people who took two aspirin pills a day had lower rates of colorectal cancer. And researchers say aspirin may help patients with colon cancer live longer. But others say the acid in aspirin can cause bleeding in the stomach and intestines. And studies showed that aspirin or other pain medicines may lead to loss of eyesight and hearing.
So, how did aspirin become so important? The story begins with a willow tree. Two thousand years ago, the Greek doctor Hippocrates advised his patients to chew on the bark and leaves of the willow. The tree contains the chemical salicin. In the 1800s, researchers discovered how to make salicylic acid from salicin. In 1897, a chemist named Felix Hoffmann at Friedrich Bayer and Company in Germany created acetyl salicylic acid. Later, it became the active substance in a medicine that Bayer called aspirin.
In 1982, a British scientist shared the Nobel Prize in Medicine in part for discovering how aspirin works. Sir John Vane found that aspirin blocks the body from making natural substances called prostaglandins. Prostaglandins have several effects on the body. Some cause pain and the expansion, or swelling, of damaged tissue. Others protect the lining of the stomach and small intestine. Prostaglandins make the heart, kidneys and blood vessels work well.
But there is a problem. Aspirin works against all prostaglandins, good and bad. Scientists have also learned how aspirin interferes with an enzyme. One form of this enzyme makes the prostaglandin that causes pain and swelling. Another form creates a protective effect. So aspirin can reduce pain and swelling in damaged tissues. But it can also harm the inside of the stomach and small intestine. And sometimes it can cause bleeding.
Many people take aspirin to reduce the risk of a heart attack or stroke from blood clots. Clots can block the flow of blood to the heart or brain and cause a heart attack or stroke. Scientists say aspirin prevents blood cells called platelets from sticking together to form clots.
A California doctor named Lawrence Craven first reported this effect in the 1950s. He observed unusual bleeding in children who chewed on an aspirin product to ease the pain after a common operation. Doctor Craven believed the bleeding took place because aspirin prevented blood from thickening. He thought this effect might help prevent heart attacks caused by blood clots. He examined the medical records of 8,000 aspirin users and found no heart attacks in this group. He invited other scientists to test his ideas. But it was years before large studies took place.
Charles Hennekens of Harvard Medical School led one of the studies. In 1983, he began to study more than 22,000 healthy male doctors over 40 years of age. Half took an aspirin every other day. The others took what they thought was aspirin. But it was only a placebo, an inactive substance. Five years later, Dr. Hennekens reported that people who took aspirin reduced their risk of a heart attack. But they had a higher risk of bleeding in the brain than the other doctors.
A few years ago, a group of experts examined studies of aspirin at the request of federal health officials in the United States. The experts said people with an increased risk of a heart attack should take a low-strength aspirin every day. Aspirin may help someone who is having a heart attack caused by a blockage in a blood vessel. Aspirin thins the blood, so the blood may be able to flow past the blockage. But experts say people should seek emergency help immediately. And they say an aspirin is no substitute for treatment, only a temporary help.
"""
category = 'HealthPersonalCare'
TEST_TEXT2 = "Samsung is an amazing company. I am so happy they have the worst products. LIke the SamsUng Gear VR wo w it is ssssss so good!!!! and also. Samsung rocks my sox off. The best monitor and gaming console go well with the samsungs."
c = comprehender.Comprehender()
kp = c.comprehend_key_phrases(TEST_TEXT)
ent = c.comprehend_entities(TEST_TEXT)
item_searcher = ItemSearch.ItemSearch(category,ent,kp)
item_searcher.naive_parse()
results = item_searcher.search()
def p(r):
for i in r:
print(i)
|
#!/usr/bin/env python3
import numpy as np
def run_round(board, stuck=False):
size = board.shape[1]
new_board = np.zeros(board.shape, dtype=bool)
for r in range(size):
for c in range(size):
n_on = np.count_nonzero(board[max(0, r - 1):min(r + 2, size), max(0, c - 1):min(c + 2, size)])
if board[r, c]:
if 3 <= n_on <= 4: # bump by one beause we know the center is on
new_board[r, c] = True
else:
if n_on == 3:
new_board[r, c] = True
if stuck:
new_board[0, 0] = True
new_board[0, -1] = True
new_board[-1, 0] = True
new_board[-1, -1] = True
return new_board
initial_board = np.zeros((100, 100), dtype=bool)
with open('input.txt', 'r') as f:
for r, row in enumerate(f):
for c, ch in enumerate(row.strip()):
if ch == '#':
initial_board[r, c] = True
board = initial_board
for _ in range(100):
board = run_round(board)
print("No stuck lights", np.count_nonzero(board))
board = initial_board
board[0, 0] = True
board[0, -1] = True
board[-1, 0] = True
board[-1, -1] = True
for _ in range(100):
board = run_round(board, True)
print("Corners Stuck", np.count_nonzero(board))
|
import os
import sys
import csv
import numpy as np
import scipy.io
import scipy.misc as spm
import cv2
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
def showImg(img):
cv2.imshow("test", img)
cv2.waitKey(-1)
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes), dtype=np.int32)
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
classes_info = '../data_processing/classes.csv'
photo_folder = '../Datasets/Sketchy/rendered_256x256/256x256/photo/tx_000000000000'
sketch_folder = '../Datasets/Sketchy/rendered_256x256/256x256/sketch/tx_000000000000'
info_dir = '../Datasets/Sketchy/info'
data_dir = '../tfrecords/sketchy'
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,
intra_op_parallelism_threads=4)
def check_repeat(seq):
seen = set()
seen_add = seen.add
seen_twice = set(x for x in seq if x in seen or seen_add(x))
return list(seen_twice)
def build_graph():
photo_filename = tf.placeholder(dtype=tf.string, shape=())
label_filename = tf.placeholder(dtype=tf.string, shape=())
photo = tf.read_file(photo_filename)
label = tf.read_file(label_filename)
photo_decoded = tf.image.decode_jpeg(photo, fancy_upscaling=True)
label_decoded = tf.image.decode_png(label)
# Encode 64x64
photo_input = tf.placeholder(dtype=tf.uint8, shape=(64, 64, 3))
label_input = tf.placeholder(dtype=tf.uint8, shape=(256, 256, 1))
label_small_input = tf.placeholder(dtype=tf.uint8, shape=(64, 64, 1))
photo_stream = tf.image.encode_jpeg(photo_input, quality=95, progressive=False,
optimize_size=False, chroma_downsampling=False)
label_stream = tf.image.encode_png(label_input, compression=7)
label_small_stream = tf.image.encode_png(label_small_input, compression=7)
return photo_filename, label_filename, photo, label, photo_decoded, label_decoded, photo_input, label_input,\
label_small_input, photo_stream, label_stream, label_small_stream
def read_csv(filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
l = list(reader)
return l
def read_txt(filename):
with open(filename) as txtfile:
lines = txtfile.readlines()
return [l[:-1] for l in lines]
def split_csvlist(stat_info):
cat = list(set([item['Category'] for item in stat_info]))
l = []
for c in cat:
li = [item for item in stat_info if item['Category'] == c]
l.append(li)
return cat, l
def binarize(sketch, threshold=245):
sketch[sketch < threshold] = 0
sketch[sketch >= threshold] = 255
return sketch
def write_image_data():
csv_file = os.path.join(info_dir, 'stats.csv')
stat_info = read_csv(csv_file)
classes = read_csv(classes_info)
classes_ids = [item['Name'] for item in classes]
test_list = read_txt(os.path.join(info_dir, 'testset.txt'))
invalid_notations = ['invalid-ambiguous.txt', 'invalid-context.txt', 'invalid-error.txt', 'invalid-pose.txt']
invalid_files = []
for txtfile in invalid_notations:
cur_path = os.path.join(info_dir, txtfile)
files = read_txt(cur_path)
files = [f[:-1] for f in files]
invalid_files.extend(files)
path_image = photo_folder
path_label = sketch_folder
dirs, stats = split_csvlist(stat_info)
photo_filename, label_filename, photo, label, photo_decoded, label_decoded, photo_input, label_input, \
label_small_input, photo_stream, label_stream, label_small_stream = build_graph()
assert len(dirs) == len(stats)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(len(dirs)):
dir = dirs[i].replace(' ', '_')
print(dir)
class_id = classes_ids.index(dir)
stat = stats[i]
writer = tf.python_io.TFRecordWriter(os.path.join(data_dir, dir + '.tfrecord'))
cur_photo_path = os.path.join(path_image, dir)
cur_label_path = os.path.join(path_label, dir)
num_label = len(stat)
# photo_files = [f for f in os.listdir(cur_photo_path) if os.path.isfile(os.path.join(cur_photo_path, f))]
# label_files = [f for f in os.listdir(cur_label_path) if os.path.isfile(os.path.join(cur_label_path, f))]
for j in range(num_label):
if j % 500 == 499:
print(j)
item = stat[j]
ImageNetID = item['ImageNetID']
SketchID = int(item['SketchID'])
Category = item['Category']
CategoryID = int(item['CategoryID'])
Difficulty = int(item['Difficulty'])
Stroke_Count = int(item['Stroke_Count'])
WrongPose = int(item['WrongPose?'])
Context = int(item['Context?'])
Ambiguous = int(item['Ambiguous?'])
Error = int(item['Error?'])
if os.path.join(dir, ImageNetID + '.jpg') in test_list:
IsTest = 1
else:
IsTest = 0
# print(os.path.join(cur_photo_path, ImageNetID + '.jpg'))
# print(os.path.join(cur_label_path, ImageNetID + '-' + str(SketchID) + '.png'))
out_image, out_image_decoded = sess.run([photo, photo_decoded], feed_dict={
photo_filename: os.path.join(cur_photo_path, ImageNetID + '.jpg')})
out_label, out_label_decoded = sess.run([label, label_decoded], feed_dict={
label_filename: os.path.join(cur_label_path, ImageNetID + '-' + str(SketchID) + '.png')})
# Resize
out_image_decoded_small = cv2.resize(out_image_decoded, (64, 64), interpolation=cv2.INTER_AREA)
out_label_decoded = (np.sum(out_label_decoded.astype(np.float64), axis=2)/3).astype(np.uint8)
out_label_decoded_small = cv2.resize(out_label_decoded, (64, 64), interpolation=cv2.INTER_AREA)
# Distance map
out_dist_map = ndimage.distance_transform_edt(binarize(out_label_decoded))
out_dist_map = (out_dist_map / out_dist_map.max() * 255.).astype(np.uint8)
out_dist_map_small = ndimage.distance_transform_edt(binarize(out_label_decoded_small))
out_dist_map_small = (out_dist_map_small / out_dist_map_small.max() * 255.).astype(np.uint8)
# Stream
image_string_small, label_string_small = sess.run([photo_stream, label_small_stream], feed_dict={
photo_input: out_image_decoded_small, label_small_input: out_label_decoded_small.reshape((64, 64, 1))
})
dist_map_string = sess.run(label_stream, feed_dict={label_input: out_dist_map.reshape((256, 256, 1))})
dist_map_string_small = sess.run(label_small_stream, feed_dict={
label_small_input: out_dist_map_small.reshape((64, 64, 1))})
example = tf.train.Example(features=tf.train.Features(feature={
'ImageNetID': _bytes_feature(ImageNetID.encode('utf-8')),
'SketchID': _int64_feature(SketchID),
'Category': _bytes_feature(Category.encode('utf-8')),
'CategoryID': _int64_feature(CategoryID),
'Difficulty': _int64_feature(Difficulty),
'Stroke_Count': _int64_feature(Stroke_Count),
'WrongPose': _int64_feature(WrongPose),
'Context': _int64_feature(Context),
'Ambiguous': _int64_feature(Ambiguous),
'Error': _int64_feature(Error),
'is_test': _int64_feature(IsTest),
'class_id': _int64_feature(class_id),
'image_jpeg': _bytes_feature(out_image),
'image_small_jpeg': _bytes_feature(image_string_small),
'sketch_png': _bytes_feature(out_label),
'sketch_small_png': _bytes_feature(label_string_small),
'dist_map_png': _bytes_feature(dist_map_string),
'dist_map_small_png': _bytes_feature(dist_map_string_small),
}))
writer.write(example.SerializeToString())
# coord.request_stop()
# coord.join(threads)
writer.close()
write_image_data()
|
import pkg_resources
from rfhub.version import __version__
import rfhub.kwdb
# this will be defined once the app starts
KWDB = None
|
BATCH_SIZE = 64
EPOCHS = 100
IMG_WIDTH = 1801
IMG_HEIGHT = 32
NUM_CHANNELS = 3
NUM_CLASSES = 2
NUM_REGRESSION_OUTPUTS = 24
K_NEGATIVE_SAMPLE_RATIO_WEIGHT = 4
INPUT_SHAPE = (IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS)
PREDICTION_FILE_NAME = 'objects_obs1_lidar_predictions.csv'
PREDICTION_MD_FILE_NAME = 'objects_obs1_metadata.csv'
WEIGHT_BB = 0.01
LEARNING_RATE = 0.001
LIDAR_CONV_VERTICAL_STRIDE = 1
IMG_CAM_WIDTH = 1368
IMG_CAM_HEIGHT = 512
NUM_CAM_CHANNELS = 1
USE_FEATURE_WISE_BATCH_NORMALIZATION = True
USE_SAMPLE_WISE_BATCH_NORMALIZATION = False
|
#!/usr/bin/python
"""
Sphinx HTML output has leading newlines for some reason. This prevents
GH-Pages from auto-forwarding the root url to index.html, so need to trim out
the leading newlines. Very easy to do from the shell, but use a python
script in the interest of cross-platform compatibility.
This script removes leading whitespace from the given file (in-place).
"""
import sys
if __name__ == "__main__":
filename = sys.argv[1]
with open(filename, 'r') as f:
contents = f.read()
with open(filename, 'w') as f:
f.write(contents.lstrip())
|
import time
import heapq
class PriorityQueue:
def __init__(self):
self._q = []
def add(self, value, priority=0):
heapq.heappush(self._q, (priority, time.time(), value))
def pop(self):
return heapq.heappop(self._q)[-1]
f1 = lambda: print("hello")
f2 = lambda: print("world")
pq = PriorityQueue()
pq.add(f2, priority=1)
pq.add(f1, priority=0)
print(pq.pop()())
print(pq.pop()())
|
import click
from data.services import RottenTomatoesSearcher
from tables.builders import MovieSearchTableBuilder, TvShowSearchTableBuilder
from tables.rows.builders import MovieSearchRowBuilder, TvShowSearchRowBuilder
searcher = RottenTomatoesSearcher()
movie_search_table_builder = MovieSearchTableBuilder(MovieSearchRowBuilder())
tv_show_search_table_builder = TvShowSearchTableBuilder(TvShowSearchRowBuilder())
@click.command()
@click.argument("term", type=click.STRING)
def search(term):
results = searcher.search(term=term)
if len(results.movies) > 0:
movie_search_table = movie_search_table_builder.build(results.movies)
click.echo(movie_search_table)
if len(results.tv_shows) > 0:
tv_show_search_table = tv_show_search_table_builder.build(results.tv_shows)
click.echo(tv_show_search_table)
|
# 2017.12.16 by xiaohang
import sys
from caffenet import *
import numpy as np
import argparse
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import time
class CaffeDataLoader:
def __init__(self, protofile):
caffe.set_mode_cpu()
self.net = caffe.Net(protofile, 'aaa', caffe.TRAIN)
def next(self):
output = self.net.forward()
data = self.net.blobs['data'].data
label = self.net.blobs['label'].data
return data, label
def create_network(protofile, weightfile):
net = CaffeNet(protofile)
if args.cuda:
net.cuda()
print(net)
net.load_weights(weightfile)
net.train()
return net
def forward_network(net, data, label):
data = torch.from_numpy(data)
label = torch.from_numpy(label)
if args.cuda:
data = Variable(data.cuda())
label = Variable(label.cuda())
else:
data = Variable(data)
label = Variable(label)
blobs = net(data, label)
return blobs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert caffe to pytorch')
parser.add_argument('--data_protofile', default='', type=str)
parser.add_argument('--net_protofile', default='', type=str)
parser.add_argument('--weightfile', default='', type=str)
parser.add_argument('--cuda', action='store_true', help='enables cuda')
args = parser.parse_args()
print(args)
data_protofile = args.data_protofile
net_protofile = args.net_protofile
weightfile = args.weightfile
data_loader = CaffeDataLoader(data_protofile)
net = create_network(net_protofile, weightfile)
net.set_verbose(False)
for i in range(10):
data, label = data_loader.next()
print('data shape', data.shape)
blobs = forward_network(net, data, label)
blob_names = blobs.keys()
for blob_name in blob_names:
if args.cuda:
blob_data = blobs[blob_name].data.cpu().numpy()
else:
blob_data = blobs[blob_name].data.numpy()
print('[%d] %-30s pytorch_shape: %-20s mean: %f' % (i, blob_name, blob_data.shape, blob_data.mean()))
|
import sys
import pandas as pd
data_type = sys.argv[1]
folder_no = sys.argv[2]
filename = '../data/MTA/MTA_videos_coords/' + data_type + '/cam_' + folder_no + '/coords_cam_' + folder_no + '.csv'
# filename = '../data/MTA/mta_data/images/' + data_type + '/cam_' + folder_no + '/coords_cam_' + folder_no + '.csv'
# file = np.loadtxt(filename, delimiter=',', skiprows=1)
output = '../data/MTA/MTA_videos_coords/' + data_type + '/cam_' + folder_no + '/test_new_' + folder_no + '.csv'
# output = '../data/MTA/mta_data/images/' + data_type + '/cam_' + folder_no + '/test_index_' + folder_no + '.csv'
chunksize = 10 ** 6
chunk_no = 1
for chunk in pd.read_csv(filename, chunksize=chunksize, index_col='Unnamed: 0'):
header = True if chunk_no == 1 else False
df = chunk.query('frame_no_cam >= 37843 & frame_no_cam <= 42763')
# df['frame_no_cam'] = df['frame_no_cam'].apply(lambda x: x - 37843)
df['frame_no_cam'] = df['frame_no_cam'].map(lambda x: x - 37843)
df.to_csv(output, header=header, mode='a')
chunk_no += 1
# res.to_csv(output)
# with open(output, 'a') as f:
# header = 'Unnamed: 0,frame_no_gta,frame_no_cam,person_id,appearance_id,joint_type,x_2D_joint,y_2D_joint,x_3D_joint,y_3D_joint,z_3D_joint,joint_occluded,joint_self_occluded,x_3D_cam,y_3D_cam,z_3D_cam,x_rot_cam,y_rot_cam,z_rot_cam,fov,x_3D_person,y_3D_person,z_3D_person,x_2D_person,y_2D_person,ped_type,wears_glasses,yaw_person,hours_gta,minutes_gta,seconds_gta,x_top_left_BB,y_top_left_BB,x_bottom_right_BB,y_bottom_right_BB\n'
# f.write(header)
# for unnamed, frame_no_gta, frame_no_cam, person_id, appearance_id, joint_type, x_2D_joint, y_2D_joint, \
# x_3D_joint, y_3D_joint, z_3D_joint, joint_occluded, joint_self_occluded, x_3D_cam, y_3D_cam, z_3D_cam, \
# x_rot_cam, y_rot_cam, z_rot_cam, fov, x_3D_person, y_3D_person, z_3D_person, x_2D_person, y_2D_person, \
# ped_type, wears_glasses, yaw_person, hours_gta, minutes_gta, seconds_gta, x_top_left_BB, y_top_left_BB, \
# x_bottom_right_BB, y_bottom_right_BB in file:
# if 37843 <= frame_no_cam <= 42763:
# gt_str = '{:d},{:d},{:d},{:d},{:d},{:d},{:f},{:f},' \
# '{:f},{:f},{:f},{:d},{:d},{:f},{:f},{:f},' \
# '{:f},{:f},{:f},{:d},{:f},{:f},{:f},{:f},{:f},' \
# '{:d},{:d},{:f},{:d},{:d},{:d},{:f},{:f},' \
# '{:f},{:f}\n'.format(
# int(unnamed), int(frame_no_gta), int(frame_no_cam) - 37843, int(person_id), int(appearance_id), int(joint_type), x_2D_joint, y_2D_joint,
# x_3D_joint, y_3D_joint, z_3D_joint, int(joint_occluded), int(joint_self_occluded), x_3D_cam, y_3D_cam, z_3D_cam,
# x_rot_cam, y_rot_cam, z_rot_cam, int(fov), x_3D_person, y_3D_person, z_3D_person, x_2D_person, y_2D_person,
# int(ped_type), int(wears_glasses), yaw_person, int(hours_gta), int(minutes_gta), int(seconds_gta), x_top_left_BB, y_top_left_BB,
# x_bottom_right_BB, y_bottom_right_BB)
# f.write(gt_str)
|
import numpy as np
import cupy as cp
class model_mixer:
''' Functions for combining the weights of different models. The models should probably be based on an initial training run so that the majority of parameters are the same to begin with.'''
def __init__(self):
self._model_parameters = []
self._parameter_indicies = []
self._weight_stats = []
def add_model(self, model, replace = -1):
'''Just add a model, stats will be added seperately'''
if replace == -1:
# This is only storing weights for now
self._model_parameters.append([cp.asnumpy(i.weights) for i in model.layers])
else:
self._model_parameters[replace] = [cp.asnumpy(i.weights) for i in model.layers]
return
def add_stats(self, stats, replace = -1):
'''One stats array per model, should add them with an index so that I can change them at any time. If replace does not specify a position, they will be added to the last set in the list. '''
return
def add_MAAV_stats(self, model, data, replace = -1):
''' Adds MAAV stats from another module I built.'''
if replace == -1:
self._weight_stats.append([np.zeros(i._weights.shape) for i in model.layers])
else:
self._weight_stats[replace] = [np.zeros(i._weights.shape) for i in model.layers]
for layer in model.layers:
layer._dropout = None
for i in data:
if model._layer_type == 'Sparse':
this_layer_inputs = i.transpose()
if model._layer_type == 'Full':
this_layer_inputs = i
if model._comp_type == 'GPU':
this_layer_inputs = cp.array(this_layer_inputs)
output = None
layer_count = 0
for layer in model.layers:
# Get the activated values
self._weight_stats[replace][layer_count] += abs(cp.asnumpy(layer.activate_weights(this_layer_inputs)))
# Run a step forward in the model
output = layer.activate(this_layer_inputs)
# Store the output for the next layers input
this_layer_inputs = output
# Iterate through the layers
layer_count += 1
# Convert stuff here
for i in self._weight_stats[replace]:
i = i/len(data)
for layer in model.layers:
layer._dropout = model._dropout
return
def add_indices(self, indices, replace = None):
''' Adds the indices for one of the models, if replace does not specify a position, they will be added to the last set in the list.'''
return
def add_absolute_values(self, model, replace = -1):
''' Adds the absolute values as the stats.'''
if replace == -1:
self._weight_stats.append([abs(cp.asnumpy(i._weights)) for i in model.layers])
else:
self._weight_stats[replace] = ([abs(cp.asnumpy(i._weights)) for i in model.layers])
return
def rank_and_zero(self, models = False, zero = False):
'''Ranks the stats arrays and zeros a ratio of them. Set zero= ratio for the ratio values you want set to 0.'''
if models == False:
models = np.arange(len(self._weight_stats))
for i in models:
for j in range(len(self._weight_stats[i])):
self._weight_stats[i][j] = np.argsort(self._weight_stats[i][j], axis = None)
temp = np.empty_like(self._weight_stats[i][j])
temp[self._weight_stats[i][j]] = np.arange(len(self._weight_stats[i][j]))
self._weight_stats[i][j] = np.reshape(temp, self._model_parameters[i][j].shape)+1
#self._weight_stats[i] = [np.argsort(j, axis = None).reshape(j.shape)+1 for j in self._weight_stats[i]]
if i != 0 and zero:
for j in self._weight_stats[i]:
j[j < zero*(j.size)] = 0
return
def replace_all(self, model, primary, secondary):
''' Replaces all of the parameters in the primary model with those in the secondary determined by non zero values.'''
output_parameters = self._weight_stats[primary]
output_parameters *= (self._weight_stats[secondary]!=0)
output_parameters += self._weight_stats[secondary]
#output is adding them together, putting them into the model.
for i in range(len(output_parameters)):
if model._comp_type == 'CPU':
model.layers[i]._weights = output_parameters[i]
if model._comp_type == 'GPU':
model.layers[i]._weights = cp.array(output_parameters[i])
return
def average_all(self, model):
'''Returns a model with the average of all of the parameters, don't think it will work well in most situations.'''
output_parameters = [np.zeros(i._weights.shape) for i in model.layers]
for i in range(len(self._model_parameters)):
for j in range(len(output_parameters)):
output_parameters[j] += self._model_parameters[i][j]
output_parameters = [i/len(self._model_parameters) for i in output_parameters]
# Now put them back in the model
for i in range(len(output_parameters)):
if model._comp_type == 'CPU':
model.layers[i]._weights = output_parameters[i]
if model._comp_type == 'GPU':
model.layers[i]._weights = cp.array(output_parameters[i])
return
def importance_weighted_average(self, model):
'''Averages parameters based on their relative importance according to the weight_stats arrays.'''
allsums = [np.zeros(i._weights.shape) for i in model.layers]
output_parameters = [np.zeros(i._weights.shape) for i in model.layers]
for i in range(len(self._weight_stats)):
for j in range(len(output_parameters)):
allsums[j] += self._weight_stats[i][j]
for i in range(len(self._weight_stats)):
for j in range(len(output_parameters)):
output_parameters[j] += self._model_parameters[i][j]*(self._weight_stats[i][j]/allsums[j])
# Now put the parameters back in the model
for i in range(len(output_parameters)):
if model._comp_type == 'CPU':
model.layers[i]._weights = output_parameters[i]
if model._comp_type == 'GPU':
model.layers[i]._weights = cp.array(output_parameters[i])
return
|
#!/usr/bin/env python
import pytest
"""
Test 1771. Maximize Palindrome Length From Subsequences
"""
@pytest.fixture(scope="session")
def init_variables_1771():
from src.leetcode_1771_maximize_palindrome_length_from_subsequences import Solution
solution = Solution()
def _init_variables_1771():
return solution
yield _init_variables_1771
class TestClass1771:
def test_solution_0(self, init_variables_1771):
assert init_variables_1771().longestPalindrome("cacb", "cbba") == 5
def test_solution_1(self, init_variables_1771):
assert init_variables_1771().longestPalindrome("ab", "ab") == 3
def test_solution_2(self, init_variables_1771):
assert init_variables_1771().longestPalindrome("aa", "bb") == 0
|
matches = 0
for line in open('2/input.txt'):
acceptable_positions, letter, password = line.split(" ")
low, high = acceptable_positions.split("-")
if (letter[0] == password[int(low)-1] ) != (letter[0] == password[int(high)-1]):
matches += 1
print(matches)
|
import numpy as np
def editDistance(s1, s2):
m=len(s1)+1
n=len(s2)+1
tbl = np.empty([m,n])
for i in xrange(m): tbl[i,0]=i
for j in xrange(n): tbl[0,j]=j
for i in xrange(1, m):
for j in xrange(1, n):
cost = 0 if s1[i-1] == s2[j-1] else 1
tbl[i,j] = min(tbl[i, j-1]+1, tbl[i-1, j]+1, tbl[i-1, j-1]+cost)
return tbl[i,j]
#test
if __name__ == "__main__":
d = -1
for i in xrange(1000000):
d=editDistance("AAAATTTTCCCCGGGGAAAANTTTTCCCCGGGG", "AAAATTTTCCCCGGGGAAAAMTTTTCCCCGGGG")
print d
|
from setuptools import find_packages, setup
__version__ = '0.1'
url = 'https://github.com/ntt123/haiku_trainer'
download_url = '{}/archive/{}.tar.gz'.format(url, __version__)
install_requires = []
setup_requires = []
tests_require = []
setup(
name='haiku_trainer',
version=__version__,
description='A helper library for training dm-haiku models.',
author='Thông Nguyên',
author_email='xcodevn@gmail.com',
url=url,
download_url=download_url,
keywords=['dm-haiku', 'parameters', 'deep-learning', 'trainer', 'jax'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
packages=find_packages(),
)
|
import requests
import os
import sys
import time
import datetime
start = time.mktime(time.localtime())
t = datetime.datetime.now()
today = t.strftime('%m-%d-%Y')
vm_name = "Kali-%s" % today
url = 'https://images.offensive-security.com/\
virtual-images/kali-linux-2020.3-vbox-amd64.ova'
local_filename = url.split('/')[-1]
def download_file(url):
if os.path.exists(local_filename):
print('[*] Kali OVA Already Downloaded.')
else:
print('[*] Downloading Kali OVA...')
with open(local_filename, 'wb') as f:
response = requests.get(url, stream=True)
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total/1000), 1024*1024)):
downloaded += len(data)
f.write(data)
done = int(50*downloaded/total)
sys.stdout.write('\r[{}{}]'.format('█' * done, '.' * (50-done)))
sys.stdout.flush()
sys.stdout.write('\n')
return
return local_filename
print('[*] Done!')
def build_kali_vm():
print('[*] Configuring Kali VM.')
cmd = ("vboxmanage import %s --vsys 0 --vmname %s --eula accept" % (local_filename, vm_name))
os.system(cmd)
clipboardon = "vboxmanage modifyvm %s --clipboard bidirectional" % vm_name
os.system(clipboardon)
# scalescreen = "VBoxManage setextradata %s GUI/ScaleFactor 2.5" % vm_name
# os.system(scalescreen)
def update_vm():
print('[*] Starting Kali VM Headless.')
startvmheadless = "VBoxManage startvm %s --type headless > /dev/null 2>&1 " % vm_name
os.system(startvmheadless)
time.sleep(45)
print('[*] Updating Kali VM')
cmd = 'VBoxManage guestcontrol %s run --quiet --verbose --exe "/bin/bash"\
--username root --password toor --wait-stdout -- "/bin/bash" "-c"\
"apt-mark hold virtualbox* > /dev/null 2>&1\
&& apt-get update > dev/null 2>&1\
&& DEBIAN_FRONTEND=noninteractive apt-get -y upgrade > dev/null 2>&1\
&& apt-mark unhold virtualbox* > /dev/null 2>&1"' % vm_name
os.system(cmd)
print('[*] Rebooting Kali VM.')
stopvm = 'VBoxManage controlvm %s poweroff > /dev/null 2>&1' % vm_name
os.system(stopvm)
time.sleep(30)
print('[*] Rebooting Kali VM.')
def main():
download_file(url)
build_kali_vm()
update_vm()
stop = time.mktime(time.localtime())
elapsed = datetime.timedelta(seconds=stop-start)
print('[*] %s Took %s To Build.' % (vm_name, elapsed))
startvm = "VBoxManage startvm %s> /dev/null 2>&1 " % vm_name
os.system(startvm)
main()
|
from bolsonaro.models.model_raw_results import ModelRawResults
from bolsonaro.models.omp_forest_regressor import OmpForestRegressor
from bolsonaro.models.omp_forest_classifier import OmpForestBinaryClassifier, OmpForestMulticlassClassifier
from bolsonaro.models.nn_omp_forest_regressor import NonNegativeOmpForestRegressor
from bolsonaro.models.nn_omp_forest_classifier import NonNegativeOmpForestBinaryClassifier
from bolsonaro.models.similarity_forest_regressor import SimilarityForestRegressor, SimilarityForestClassifier
from bolsonaro.models.kmeans_forest_regressor import KMeansForestRegressor, KMeansForestClassifier
from bolsonaro.models.ensemble_selection_forest_regressor import EnsembleSelectionForestRegressor, EnsembleSelectionForestClassifier
from bolsonaro.error_handling.logger_factory import LoggerFactory
from bolsonaro.data.task import Task
from . import LOG_PATH
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.preprocessing import normalize
import time
import datetime
import numpy as np
import os
import pickle
class Trainer(object):
"""
Class capable of fitting any model object to some prepared data then evaluate and save results through the `train` method.
"""
def __init__(self, dataset, regression_score_metric=mean_squared_error, classification_score_metric=accuracy_score,
base_regression_score_metric=mean_squared_error, base_classification_score_metric=accuracy_score):
"""
:param dataset: Object with X_train, y_train, X_dev, y_dev, X_test and Y_test attributes
"""
self._dataset = dataset
self._logger = LoggerFactory.create(LOG_PATH, __name__)
self._regression_score_metric = regression_score_metric
self._classification_score_metric = classification_score_metric
self._base_regression_score_metric = base_regression_score_metric
self._base_classification_score_metric = base_classification_score_metric
self._score_metric_name = regression_score_metric.__name__ if dataset.task == Task.REGRESSION \
else classification_score_metric.__name__
self._base_score_metric_name = base_regression_score_metric.__name__ if dataset.task == Task.REGRESSION \
else base_classification_score_metric.__name__
@property
def score_metric_name(self):
return self._score_metric_name
@property
def base_score_metric_name(self):
return self._base_score_metric_name
def init(self, model, subsets_used='train,dev'):
if type(model) in [RandomForestRegressor, RandomForestClassifier]:
if subsets_used == 'train,dev':
self._X_forest = self._dataset.X_train
self._y_forest = self._dataset.y_train
else:
self._X_forest = np.concatenate([self._dataset.X_train, self._dataset.X_dev])
self._y_forest = np.concatenate([self._dataset.y_train, self._dataset.y_dev])
self._logger.debug('Fitting the forest on train subset')
elif model.models_parameters.subsets_used == 'train,dev':
self._X_forest = self._dataset.X_train
self._y_forest = self._dataset.y_train
self._X_omp = self._dataset.X_dev
self._y_omp = self._dataset.y_dev
self._logger.debug('Fitting the forest on train subset and OMP on dev subset.')
elif model.models_parameters.subsets_used == 'train+dev,train+dev':
self._X_forest = np.concatenate([self._dataset.X_train, self._dataset.X_dev])
self._X_omp = self._X_forest
self._y_forest = np.concatenate([self._dataset.y_train, self._dataset.y_dev])
self._y_omp = self._y_forest
self._logger.debug('Fitting both the forest and OMP on train+dev subsets.')
elif model.models_parameters.subsets_used == 'train,train+dev':
self._X_forest = self._dataset.X_train
self._y_forest = self._dataset.y_train
self._X_omp = np.concatenate([self._dataset.X_train, self._dataset.X_dev])
self._y_omp = np.concatenate([self._dataset.y_train, self._dataset.y_dev])
else:
raise ValueError("Unknown specified subsets_used parameter '{}'".format(model.models_parameters.subsets_used))
def train(self, model, extracted_forest_size=None, seed=None, use_distillation=False):
"""
:param model: An instance of either RandomForestRegressor, RandomForestClassifier, OmpForestRegressor,
OmpForestBinaryClassifier, OmpForestMulticlassClassifier.
:return:
"""
self._logger.debug('Training model using train set...')
self._begin_time = time.time()
if type(model) in [RandomForestRegressor, RandomForestClassifier]:
if extracted_forest_size is not None:
estimators_index = np.arange(len(model.estimators_))
np.random.seed(seed)
np.random.shuffle(estimators_index)
choosen_estimators = estimators_index[:extracted_forest_size]
model.estimators_ = np.array(model.estimators_)[choosen_estimators]
else:
model.fit(
X=self._X_forest,
y=self._y_forest
)
else:
if type(model) in [OmpForestRegressor, OmpForestBinaryClassifier, OmpForestMulticlassClassifier,
NonNegativeOmpForestRegressor, NonNegativeOmpForestBinaryClassifier] and \
use_distillation:
model.fit(
self._X_forest, # X_train or X_train+X_dev
self._y_forest,
self._X_omp, # X_train+X_dev or X_dev
self._y_omp,
use_distillation=use_distillation
)
else:
model.fit(
self._X_forest, # X_train or X_train+X_dev
self._y_forest,
self._X_omp, # X_train+X_dev or X_dev
self._y_omp
)
self._end_time = time.time()
def __score_func(self, model, X, y_true, weights=True, extracted_forest_size=None):
if type(model) in [OmpForestRegressor, RandomForestRegressor]:
if weights:
y_pred = model.predict(X)
else:
y_pred = model.predict_no_weights(X)
result = self._regression_score_metric(y_true, y_pred)
elif type(model) == NonNegativeOmpForestRegressor:
if weights:
y_pred = model.predict(X, extracted_forest_size)
else:
y_pred = model.predict_no_weights(X, extracted_forest_size)
result = self._regression_score_metric(y_true, y_pred)
elif type(model) == NonNegativeOmpForestBinaryClassifier:
if weights:
y_pred = model.predict(X, extracted_forest_size)
else:
y_pred = model.predict_no_weights(X, extracted_forest_size)
y_pred = np.sign(y_pred)
y_pred = np.where(y_pred == 0, 1, y_pred)
result = self._classification_score_metric(y_true, y_pred)
elif type(model) in [OmpForestBinaryClassifier, OmpForestMulticlassClassifier, RandomForestClassifier]:
if weights:
y_pred = model.predict(X)
else:
y_pred = model.predict_no_weights(X)
if type(model) is OmpForestBinaryClassifier:
y_pred = np.sign(y_pred)
y_pred = np.where(y_pred == 0, 1, y_pred)
result = self._classification_score_metric(y_true, y_pred)
elif type(model) in [SimilarityForestRegressor, SimilarityForestClassifier, KMeansForestRegressor, EnsembleSelectionForestRegressor, KMeansForestClassifier,
EnsembleSelectionForestClassifier]:
result = model.score(X, y_true)
return result
def __score_func_base(self, model, X, y_true):
if type(model) in [OmpForestRegressor, SimilarityForestRegressor, KMeansForestRegressor, EnsembleSelectionForestRegressor,
NonNegativeOmpForestRegressor]:
y_pred = model.predict_base_estimator(X)
result = self._base_regression_score_metric(y_true, y_pred)
elif type(model) in [OmpForestBinaryClassifier, OmpForestMulticlassClassifier, KMeansForestClassifier,
SimilarityForestClassifier, EnsembleSelectionForestClassifier, NonNegativeOmpForestBinaryClassifier]:
y_pred = model.predict_base_estimator(X)
result = self._base_classification_score_metric(y_true, y_pred)
elif type(model) == RandomForestClassifier:
y_pred = model.predict(X)
result = self._base_classification_score_metric(y_true, y_pred)
elif type(model) is RandomForestRegressor:
y_pred = model.predict(X)
result = self._base_regression_score_metric(y_true, y_pred)
return result
def _evaluate_predictions(self, predictions, aggregation_function):
predictions = normalize(predictions)
return aggregation_function(np.abs((predictions @ predictions.T - np.eye(len(predictions)))))
def _compute_forest_strength(self, predictions, y, metric_function):
scores = np.array([metric_function(y, prediction) for prediction in predictions])
return scores, np.mean(scores)
def compute_results(self, model, models_dir, subsets_used='train+dev,train+dev', extracted_forest_size=None):
"""
:param model: Object with
:param models_dir: Where the results will be saved
"""
# Reeeally dirty to put that here but otherwise it's not thread safe...
if type(model) in [RandomForestRegressor, RandomForestClassifier]:
if subsets_used == 'train,dev':
X_forest = self._dataset.X_train
y_forest = self._dataset.y_train
else:
X_forest = np.concatenate([self._dataset.X_train, self._dataset.X_dev])
y_forest = np.concatenate([self._dataset.y_train, self._dataset.y_dev])
X_omp = self._dataset.X_dev
y_omp = self._dataset.y_dev
elif model.models_parameters.subsets_used == 'train,dev':
X_forest = self._dataset.X_train
y_forest = self._dataset.y_train
X_omp = self._dataset.X_dev
y_omp = self._dataset.y_dev
elif model.models_parameters.subsets_used == 'train+dev,train+dev':
X_forest = np.concatenate([self._dataset.X_train, self._dataset.X_dev])
X_omp = X_forest
y_forest = np.concatenate([self._dataset.y_train, self._dataset.y_dev])
y_omp = y_forest
elif model.models_parameters.subsets_used == 'train,train+dev':
X_forest = self._dataset.X_train
y_forest = self._dataset.y_train
X_omp = np.concatenate([self._dataset.X_train, self._dataset.X_dev])
y_omp = np.concatenate([self._dataset.y_train, self._dataset.y_dev])
else:
raise ValueError("Unknown specified subsets_used parameter '{}'".format(model.models_parameters.subsets_used))
model_weights = ''
if type(model) in [OmpForestRegressor, OmpForestBinaryClassifier]:
model_weights = model._omp.coef_
elif type(model) == OmpForestMulticlassClassifier:
model_weights = model._dct_class_omp
elif type(model) == OmpForestBinaryClassifier:
model_weights = model._omp
elif type(model) in [NonNegativeOmpForestRegressor, NonNegativeOmpForestBinaryClassifier]:
model_weights = model._omp.get_coef(extracted_forest_size)
if type(model) in [SimilarityForestRegressor, KMeansForestRegressor, EnsembleSelectionForestRegressor,
SimilarityForestClassifier, KMeansForestClassifier, EnsembleSelectionForestClassifier]:
selected_trees = model.selected_trees
elif type(model) in [OmpForestRegressor, OmpForestMulticlassClassifier, OmpForestBinaryClassifier,
NonNegativeOmpForestRegressor, NonNegativeOmpForestBinaryClassifier]:
selected_trees = np.asarray(model.forest)[model_weights != 0]
elif type(model) in [RandomForestRegressor, RandomForestClassifier]:
selected_trees = model.estimators_
if len(selected_trees) > 0:
target_selected_tree = int(os.path.split(models_dir)[-1])
if target_selected_tree != len(selected_trees):
predictions_X_omp = model.predict(X_omp, extracted_forest_size) \
if type(model) in [NonNegativeOmpForestBinaryClassifier, NonNegativeOmpForestRegressor] \
else model.predict(X_omp)
error_prediction = np.linalg.norm(predictions_X_omp - y_omp)
if not np.isclose(error_prediction, 0):
#raise ValueError(f'Invalid selected tree number target_selected_tree:{target_selected_tree} - len(selected_trees):{len(selected_trees)}')
self._logger.error(f'Invalid selected tree number target_selected_tree:{target_selected_tree} - len(selected_trees):{len(selected_trees)}')
else:
self._logger.warning(f"Invalid selected tree number target_selected_tree:{target_selected_tree} - len(selected_trees):{len(selected_trees)}"
" But the prediction is perfect on X_omp. Keep less trees.")
with open(os.path.join(models_dir, 'selected_trees.pickle'), 'wb') as output_file:
pickle.dump(selected_trees, output_file)
strength_metric = self._regression_score_metric if self._dataset.task == Task.REGRESSION \
else lambda y_true, y_pred: self._classification_score_metric(y_true, (y_pred -0.5)*2)
train_predictions = np.array([tree.predict(X_forest) for tree in selected_trees])
dev_predictions = np.array([tree.predict(X_omp) for tree in selected_trees])
test_predictions = np.array([tree.predict(self._dataset.X_test) for tree in selected_trees])
train_scores, train_strength = self._compute_forest_strength(train_predictions, y_forest, strength_metric)
dev_scores, dev_strength = self._compute_forest_strength(dev_predictions, y_omp, strength_metric)
test_scores, test_strength = self._compute_forest_strength(test_predictions, self._dataset.y_test, strength_metric)
results = ModelRawResults(
model_weights=model_weights,
training_time=self._end_time - self._begin_time,
datetime=datetime.datetime.now(),
train_score=self.__score_func(model, X_forest, y_forest, extracted_forest_size=extracted_forest_size),
dev_score=self.__score_func(model, X_omp, y_omp, extracted_forest_size=extracted_forest_size),
test_score=self.__score_func(model, self._dataset.X_test, self._dataset.y_test, extracted_forest_size=extracted_forest_size),
train_score_base=self.__score_func_base(model, X_forest, y_forest),
dev_score_base=self.__score_func_base(model, X_omp, y_omp),
test_score_base=self.__score_func_base(model, self._dataset.X_test, self._dataset.y_test),
score_metric=self._score_metric_name,
base_score_metric=self._base_score_metric_name,
train_coherence=self._evaluate_predictions(train_predictions, aggregation_function=np.max),
dev_coherence=self._evaluate_predictions(dev_predictions, aggregation_function=np.max),
test_coherence=self._evaluate_predictions(test_predictions, aggregation_function=np.max),
train_correlation=self._evaluate_predictions(train_predictions, aggregation_function=np.mean),
dev_correlation=self._evaluate_predictions(dev_predictions, aggregation_function=np.mean),
test_correlation=self._evaluate_predictions(test_predictions, aggregation_function=np.mean),
train_scores=train_scores,
dev_scores=dev_scores,
test_scores=test_scores,
train_strength=train_strength,
dev_strength=dev_strength,
test_strength=test_strength
)
results.save(models_dir)
self._logger.info("Base performance on test: {}".format(results.test_score_base))
self._logger.info("Performance on test: {}".format(results.test_score))
self._logger.info("Base performance on train: {}".format(results.train_score_base))
self._logger.info("Performance on train: {}".format(results.train_score))
self._logger.info("Base performance on dev: {}".format(results.dev_score_base))
self._logger.info("Performance on dev: {}".format(results.dev_score))
self._logger.info(f'test_coherence: {results.test_coherence}')
self._logger.info(f'test_correlation: {results.test_correlation}')
self._logger.info(f'test_strength: {results.test_strength}')
if type(model) in [OmpForestBinaryClassifier, OmpForestRegressor, OmpForestMulticlassClassifier,
NonNegativeOmpForestBinaryClassifier, NonNegativeOmpForestRegressor]:
results = ModelRawResults(
model_weights='',
training_time=self._end_time - self._begin_time,
datetime=datetime.datetime.now(),
train_score=self.__score_func(model, X_forest, y_forest, False, extracted_forest_size=extracted_forest_size),
dev_score=self.__score_func(model, X_omp, y_omp, False, extracted_forest_size=extracted_forest_size),
test_score=self.__score_func(model, self._dataset.X_test, self._dataset.y_test, False, extracted_forest_size=extracted_forest_size),
train_score_base=self.__score_func_base(model, X_forest, y_forest),
dev_score_base=self.__score_func_base(model, X_omp, y_omp),
test_score_base=self.__score_func_base(model, self._dataset.X_test, self._dataset.y_test),
score_metric=self._score_metric_name,
base_score_metric=self._base_score_metric_name,
train_scores=train_scores,
dev_scores=dev_scores,
test_scores=test_scores
)
results.save(models_dir+'_no_weights')
self._logger.info("Base performance on test without weights: {}".format(results.test_score_base))
self._logger.info("Performance on test without weights: {}".format(results.test_score))
self._logger.info("Base performance on train without weights: {}".format(results.train_score_base))
self._logger.info("Performance on train without weights: {}".format(results.train_score))
self._logger.info("Base performance on dev without weights: {}".format(results.dev_score_base))
self._logger.info("Performance on dev without weights: {}".format(results.dev_score))
|
from __future__ import absolute_import
from django.test import TestCase
from django.test.utils import override_settings
import django_dynamic_fixture as fixture
from readthedocs.projects.models import Project
@override_settings(
USE_SUBDOMAIN=True, PUBLIC_DOMAIN='public.readthedocs.org', SERVE_PUBLIC_DOCS=True
)
class RedirectSingleVersionTests(TestCase):
def setUp(self):
self.pip = fixture.get(Project, slug='pip', single_version=True, main_language_project=None)
def test_docs_url_generation(self):
with override_settings(USE_SUBDOMAIN=False):
self.assertEqual(self.pip.get_docs_url(),
'http://readthedocs.org/docs/pip/')
with override_settings(USE_SUBDOMAIN=True):
self.assertEqual(self.pip.get_docs_url(),
'http://pip.public.readthedocs.org/')
self.pip.single_version = False
with override_settings(USE_SUBDOMAIN=False):
self.assertEqual(self.pip.get_docs_url(),
'http://readthedocs.org/docs/pip/en/latest/')
with override_settings(USE_SUBDOMAIN=True):
self.assertEqual(self.pip.get_docs_url(),
'http://pip.public.readthedocs.org/en/latest/')
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import warnings
from collections import defaultdict
import pytest
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions.testing import fakes
from pyro.infer import (SVI, EnergyDistance, Trace_ELBO, TraceEnum_ELBO, TraceGraph_ELBO, TraceMeanField_ELBO,
TraceTailAdaptive_ELBO, config_enumerate)
from pyro.infer.reparam import LatentStableReparam
from pyro.infer.tracetmc_elbo import TraceTMC_ELBO
from pyro.infer.util import torch_item
from pyro.ops.indexing import Vindex
from pyro.optim import Adam
from pyro.poutine.plate_messenger import block_plate
from tests.common import assert_close
logger = logging.getLogger(__name__)
# This file tests a variety of model,guide pairs with valid and invalid structure.
def EnergyDistance_prior(**kwargs):
kwargs["prior_scale"] = 0.0
kwargs.pop("strict_enumeration_warning", None)
return EnergyDistance(**kwargs)
def EnergyDistance_noprior(**kwargs):
kwargs["prior_scale"] = 1.0
kwargs.pop("strict_enumeration_warning", None)
return EnergyDistance(**kwargs)
def assert_ok(model, guide, elbo, **kwargs):
"""
Assert that inference works without warnings or errors.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
inference.step(**kwargs)
try:
pyro.set_rng_seed(0)
loss = elbo.loss(model, guide, **kwargs)
if hasattr(elbo, "differentiable_loss"):
try:
pyro.set_rng_seed(0)
differentiable_loss = torch_item(elbo.differentiable_loss(model, guide, **kwargs))
except ValueError:
pass # Ignore cases where elbo cannot be differentiated
else:
assert_close(differentiable_loss, loss, atol=0.01)
if hasattr(elbo, "loss_and_grads"):
pyro.set_rng_seed(0)
loss_and_grads = elbo.loss_and_grads(model, guide, **kwargs)
assert_close(loss_and_grads, loss, atol=0.01)
except NotImplementedError:
pass # Ignore cases where loss isn't implemented, eg. TraceTailAdaptive_ELBO
def assert_error(model, guide, elbo, match=None):
"""
Assert that inference fails with an error.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
with pytest.raises((NotImplementedError, UserWarning, KeyError, ValueError, RuntimeError),
match=match):
inference.step()
def assert_warning(model, guide, elbo):
"""
Assert that inference works but with a warning.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inference.step()
assert len(w), 'No warnings were raised'
for warning in w:
logger.info(warning)
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
@pytest.mark.parametrize("strict_enumeration_warning", [True, False])
def test_nonempty_model_empty_guide_ok(Elbo, strict_enumeration_warning):
def model():
loc = torch.tensor([0.0, 0.0])
scale = torch.tensor([1.0, 1.0])
pyro.sample("x", dist.Normal(loc, scale).to_event(1), obs=loc)
def guide():
pass
elbo = Elbo(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning and Elbo in (TraceEnum_ELBO, TraceTMC_ELBO):
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
@pytest.mark.parametrize("strict_enumeration_warning", [True, False])
def test_nonempty_model_empty_guide_error(Elbo, strict_enumeration_warning):
def model():
pyro.sample("x", dist.Normal(0, 1))
def guide():
pass
elbo = Elbo(strict_enumeration_warning=strict_enumeration_warning)
assert_error(model, guide, elbo)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
@pytest.mark.parametrize("strict_enumeration_warning", [True, False])
def test_empty_model_empty_guide_ok(Elbo, strict_enumeration_warning):
def model():
pass
def guide():
pass
elbo = Elbo(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning and Elbo in (TraceEnum_ELBO, TraceTMC_ELBO):
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_variable_clash_in_model_error(Elbo):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("x", dist.Bernoulli(p)) # Should error here.
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
assert_error(model, guide, Elbo(), match='Multiple sample sites named')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_model_guide_dim_mismatch_error(Elbo):
def model():
loc = torch.zeros(2)
scale = torch.ones(2)
pyro.sample("x", dist.Normal(loc, scale).to_event(1))
def guide():
loc = pyro.param("loc", torch.zeros(2, 1, requires_grad=True))
scale = pyro.param("scale", torch.ones(2, 1, requires_grad=True))
pyro.sample("x", dist.Normal(loc, scale).to_event(2))
assert_error(model, guide, Elbo(strict_enumeration_warning=False),
match='invalid log_prob shape|Model and guide event_dims disagree')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_model_guide_shape_mismatch_error(Elbo):
def model():
loc = torch.zeros(1, 2)
scale = torch.ones(1, 2)
pyro.sample("x", dist.Normal(loc, scale).to_event(2))
def guide():
loc = pyro.param("loc", torch.zeros(2, 1, requires_grad=True))
scale = pyro.param("scale", torch.ones(2, 1, requires_grad=True))
pyro.sample("x", dist.Normal(loc, scale).to_event(2))
assert_error(model, guide, Elbo(strict_enumeration_warning=False),
match='Model and guide shapes disagree')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_variable_clash_in_guide_error(Elbo):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("x", dist.Bernoulli(p)) # Should error here.
assert_error(model, guide, Elbo(), match='Multiple sample sites named')
@pytest.mark.parametrize("has_rsample", [False, True])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_set_has_rsample_ok(has_rsample, Elbo):
# This model has sparse gradients, so users may want to disable
# reparametrized sampling to reduce variance of gradient estimates.
# However both versions should be correct, i.e. with or without has_rsample.
def model():
z = pyro.sample("z", dist.Normal(0, 1))
loc = (z * 100).clamp(min=0, max=1) # sparse gradients
pyro.sample("x", dist.Normal(loc, 1), obs=torch.tensor(0.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
pyro.sample("z", dist.Normal(loc, 1).has_rsample_(has_rsample))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo(strict_enumeration_warning=False))
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_not_has_rsample_ok(Elbo):
def model():
z = pyro.sample("z", dist.Normal(0, 1))
p = z.round().clamp(min=0.2, max=0.8) # discontinuous
pyro.sample("x", dist.Bernoulli(p), obs=torch.tensor(0.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
pyro.sample("z", dist.Normal(loc, 1).has_rsample_(False))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo(strict_enumeration_warning=False))
@pytest.mark.parametrize("subsample_size", [None, 2], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_iplate_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 4, subsample_size):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 4, subsample_size):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_iplate_variable_clash_error(Elbo):
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 2):
# Each loop iteration should give the sample site a different name.
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 2):
# Each loop iteration should give the sample site a different name.
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_error(model, guide, Elbo(), match='Multiple sample sites named')
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_plate_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 10, subsample_size) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_plate_subsample_param_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size):
pyro.sample("x", dist.Bernoulli(p))
def guide():
with pyro.plate("plate", 10, subsample_size) as ind:
p0 = pyro.param("p0", torch.tensor(0.), event_dim=0)
assert p0.shape == ()
p = pyro.param("p", 0.5 * torch.ones(10), event_dim=0)
assert len(p) == len(ind)
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_plate_subsample_primitive_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size):
pyro.sample("x", dist.Bernoulli(p))
def guide():
with pyro.plate("plate", 10, subsample_size) as ind:
p0 = torch.tensor(0.)
p0 = pyro.subsample(p0, event_dim=0)
assert p0.shape == ()
p = 0.5 * torch.ones(10)
p = pyro.subsample(p, event_dim=0)
assert len(p) == len(ind)
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
@pytest.mark.parametrize("shape,ok", [
((), True),
((1,), True),
((10,), True),
((3, 1), True),
((3, 10), True),
((5), False),
((3, 5), False),
])
def test_plate_param_size_mismatch_error(subsample_size, Elbo, shape, ok):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, subsample_size):
pyro.sample("x", dist.Bernoulli(p))
def guide():
with pyro.plate("plate", 10, subsample_size):
pyro.param("p0", torch.ones(shape), event_dim=0)
p = pyro.param("p", torch.ones(10), event_dim=0)
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
if ok:
assert_ok(model, guide, Elbo())
else:
assert_error(model, guide, Elbo(), match="invalid shape of pyro.param")
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_plate_no_size_ok(Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate"):
pyro.sample("x", dist.Bernoulli(p).expand_by([10]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate"):
pyro.sample("x", dist.Bernoulli(p).expand_by([10]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, default="parallel", num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("max_plate_nesting", [0, float('inf')])
@pytest.mark.parametrize("subsample_size", [None, 2], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_iplate_iplate_ok(subsample_size, Elbo, max_plate_nesting):
def model():
p = torch.tensor(0.5)
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for i in outer_iplate:
for j in inner_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for i in outer_iplate:
for j in inner_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide, "parallel")
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize("max_plate_nesting", [0, float('inf')])
@pytest.mark.parametrize("subsample_size", [None, 2], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_iplate_iplate_swap_ok(subsample_size, Elbo, max_plate_nesting):
def model():
p = torch.tensor(0.5)
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for i in outer_iplate:
for j in inner_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
outer_iplate = pyro.plate("plate_0", 3, subsample_size)
inner_iplate = pyro.plate("plate_1", 3, subsample_size)
for j in inner_iplate:
for i in outer_iplate:
pyro.sample("x_{}_{}".format(i, j), dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide, "parallel")
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, default="parallel", num_samples=2)
assert_ok(model, guide, Elbo(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_iplate_in_model_not_guide_ok(subsample_size, Elbo):
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 10, subsample_size):
pass
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("subsample_size", [None, 5], ids=["full", "subsample"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
@pytest.mark.parametrize("is_validate", [True, False])
def test_iplate_in_guide_not_model_error(subsample_size, Elbo, is_validate):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 10, subsample_size):
pass
pyro.sample("x", dist.Bernoulli(p))
with pyro.validation_enabled(is_validate):
if is_validate:
assert_error(model, guide, Elbo(),
match='Found plate statements in guide but not model')
else:
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_plate_broadcast_error(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate", 10, 5):
pyro.sample("x", dist.Bernoulli(p).expand_by([2]))
assert_error(model, model, Elbo(), match='Shape mismatch inside plate')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_plate_iplate_ok(Elbo):
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 3, 2) as ind:
for i in pyro.plate("iplate", 3, 2):
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 3, 2) as ind:
for i in pyro.plate("iplate", 3, 2):
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_iplate_plate_ok(Elbo):
def model():
p = torch.tensor(0.5)
inner_plate = pyro.plate("plate", 3, 2)
for i in pyro.plate("iplate", 3, 2):
with inner_plate as ind:
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
inner_plate = pyro.plate("plate", 3, 2)
for i in pyro.plate("iplate", 3, 2):
with inner_plate as ind:
pyro.sample("x_{}".format(i), dist.Bernoulli(p).expand_by([len(ind)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
@pytest.mark.parametrize("sizes", [(3,), (3, 4), (3, 4, 5)])
def test_plate_stack_ok(Elbo, sizes):
def model():
p = torch.tensor(0.5)
with pyro.plate_stack("plate_stack", sizes):
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate_stack("plate_stack", sizes):
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
@pytest.mark.parametrize("sizes", [(3,), (3, 4), (3, 4, 5)])
def test_plate_stack_and_plate_ok(Elbo, sizes):
def model():
p = torch.tensor(0.5)
with pyro.plate_stack("plate_stack", sizes):
with pyro.plate("plate", 7):
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate_stack("plate_stack", sizes):
with pyro.plate("plate", 7):
pyro.sample("x", dist.Bernoulli(p))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(guide, num_samples=2)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("sizes", [(3,), (3, 4), (3, 4, 5)])
def test_plate_stack_sizes(sizes):
def model():
p = 0.5 * torch.ones(3)
with pyro.plate_stack("plate_stack", sizes):
x = pyro.sample("x", dist.Bernoulli(p).to_event(1))
assert x.shape == sizes + (3,)
model()
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_nested_plate_plate_ok(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer)]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner), len(ind_outer)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(model)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(model, num_samples=2)
else:
guide = model
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_plate_reuse_ok(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
plate_outer = pyro.plate("plate_outer", 10, 5, dim=-1)
plate_inner = pyro.plate("plate_inner", 11, 6, dim=-2)
with plate_outer as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer)]))
with plate_inner as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner), 1]))
with plate_outer as ind_outer, plate_inner as ind_inner:
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind_inner), len(ind_outer)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(model)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(model, num_samples=2)
else:
guide = model
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO])
def test_nested_plate_plate_dim_error_1(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer)])) # error here
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner)]))
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind_outer), len(ind_inner)]))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(model)
elif Elbo is TraceTMC_ELBO:
guide = config_enumerate(model, num_samples=2)
else:
guide = model
assert_error(model, guide, Elbo(), match='invalid log_prob shape')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_dim_error_2(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer), 1]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_outer)])) # error here
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind_outer), len(ind_inner)]))
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match='Shape mismatch inside plate')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_dim_error_3(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer), 1]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner)]))
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind_inner), 1])) # error here
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match='invalid log_prob shape|shape mismatch')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_dim_error_4(Elbo):
def model():
p = torch.tensor([0.5], requires_grad=True)
with pyro.plate("plate_outer", 10, 5) as ind_outer:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind_outer), 1]))
with pyro.plate("plate_inner", 11, 6) as ind_inner:
pyro.sample("y", dist.Bernoulli(p).expand_by([len(ind_inner)]))
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind_outer), len(ind_outer)])) # error here
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match='hape mismatch inside plate')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_plate_plate_subsample_param_ok(Elbo):
def model():
with pyro.plate("plate_outer", 10, 5):
pyro.sample("x", dist.Bernoulli(0.2))
with pyro.plate("plate_inner", 11, 6):
pyro.sample("y", dist.Bernoulli(0.2))
def guide():
p0 = pyro.param("p0", 0.5 * torch.ones(4, 5), event_dim=2)
assert p0.shape == (4, 5)
with pyro.plate("plate_outer", 10, 5):
p1 = pyro.param("p1", 0.5 * torch.ones(10, 3), event_dim=1)
assert p1.shape == (5, 3)
px = pyro.param("px", 0.5 * torch.ones(10), event_dim=0)
assert px.shape == (5,)
pyro.sample("x", dist.Bernoulli(px))
with pyro.plate("plate_inner", 11, 6):
py = pyro.param("py", 0.5 * torch.ones(11, 10), event_dim=0)
assert py.shape == (6, 5)
pyro.sample("y", dist.Bernoulli(py))
if Elbo is TraceEnum_ELBO:
guide = config_enumerate(guide)
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nonnested_plate_plate_ok(Elbo):
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_0", 10, 5) as ind1:
pyro.sample("x0", dist.Bernoulli(p).expand_by([len(ind1)]))
with pyro.plate("plate_1", 11, 6) as ind2:
pyro.sample("x1", dist.Bernoulli(p).expand_by([len(ind2)]))
guide = config_enumerate(model) if Elbo is TraceEnum_ELBO else model
assert_ok(model, guide, Elbo())
def test_three_indep_plate_at_different_depths_ok():
r"""
/\
/\ ia
ia ia
"""
def model():
p = torch.tensor(0.5)
inner_plate = pyro.plate("plate2", 10, 5)
for i in pyro.plate("plate0", 2):
pyro.sample("x_%d" % i, dist.Bernoulli(p))
if i == 0:
for j in pyro.plate("plate1", 2):
with inner_plate as ind:
pyro.sample("y_%d" % j, dist.Bernoulli(p).expand_by([len(ind)]))
elif i == 1:
with inner_plate as ind:
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
inner_plate = pyro.plate("plate2", 10, 5)
for i in pyro.plate("plate0", 2):
pyro.sample("x_%d" % i, dist.Bernoulli(p))
if i == 0:
for j in pyro.plate("plate1", 2):
with inner_plate as ind:
pyro.sample("y_%d" % j, dist.Bernoulli(p).expand_by([len(ind)]))
elif i == 1:
with inner_plate as ind:
pyro.sample("z", dist.Bernoulli(p).expand_by([len(ind)]))
assert_ok(model, guide, TraceGraph_ELBO())
def test_plate_wrong_size_error():
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([1 + len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([1 + len(ind)]))
assert_error(model, guide, TraceGraph_ELBO(), match='Shape mismatch inside plate')
def test_block_plate_name_ok():
def model():
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate("plate"):
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
def guide():
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate("plate"):
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
assert_ok(model, guide, Trace_ELBO())
def test_block_plate_dim_ok():
def model():
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate(dim=-1):
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
def guide():
c = pyro.sample("c", dist.Normal(0, 1))
assert c.shape == ()
with pyro.plate("plate", 2):
b = pyro.sample("b", dist.Normal(0, 1))
assert b.shape == (2,)
with block_plate(dim=-1):
a = pyro.sample("a", dist.Normal(0, 1))
assert a.shape == ()
assert_ok(model, guide, Trace_ELBO())
def test_block_plate_missing_error():
def model():
with block_plate("plate"):
pyro.sample("a", dist.Normal(0, 1))
def guide():
pyro.sample("a", dist.Normal(0, 1))
assert_error(model, guide, Trace_ELBO(),
match="block_plate matched 0 messengers")
@pytest.mark.parametrize("enumerate_", [None, "sequential", "parallel"])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_enum_discrete_misuse_warning(Elbo, enumerate_):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p), infer={"enumerate": enumerate_})
if (enumerate_ is None) == (Elbo is TraceEnum_ELBO):
assert_warning(model, guide, Elbo(max_plate_nesting=0))
else:
assert_ok(model, guide, Elbo(max_plate_nesting=0))
def test_enum_discrete_single_ok():
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
@pytest.mark.parametrize("strict_enumeration_warning", [False, True])
def test_enum_discrete_missing_config_warning(strict_enumeration_warning):
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
elbo = TraceEnum_ELBO(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning:
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
def test_enum_discrete_single_single_ok():
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("y", dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p))
pyro.sample("y", dist.Bernoulli(p))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
def test_enum_discrete_iplate_single_ok():
def model():
p = torch.tensor(0.5)
for i in pyro.plate("plate", 10, 5):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
for i in pyro.plate("plate", 10, 5):
pyro.sample("x_{}".format(i), dist.Bernoulli(p))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
def test_plate_enum_discrete_batch_ok():
def model():
p = torch.tensor(0.5)
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Bernoulli(p).expand_by([len(ind)]))
assert_ok(model, config_enumerate(guide), TraceEnum_ELBO())
@pytest.mark.parametrize("strict_enumeration_warning", [False, True])
def test_plate_enum_discrete_no_discrete_vars_warning(strict_enumeration_warning):
def model():
loc = torch.tensor(0.0)
scale = torch.tensor(1.0)
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Normal(loc, scale).expand_by([len(ind)]))
@config_enumerate(default="sequential")
def guide():
loc = pyro.param("loc", torch.tensor(1.0, requires_grad=True))
scale = pyro.param("scale", torch.tensor(2.0, requires_grad=True))
with pyro.plate("plate", 10, 5) as ind:
pyro.sample("x", dist.Normal(loc, scale).expand_by([len(ind)]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=strict_enumeration_warning)
if strict_enumeration_warning:
assert_warning(model, guide, elbo)
else:
assert_ok(model, guide, elbo)
def test_no_plate_enum_discrete_batch_error():
def model():
p = torch.tensor(0.5)
pyro.sample("x", dist.Bernoulli(p).expand_by([5]))
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
pyro.sample("x", dist.Bernoulli(p).expand_by([5]))
assert_error(model, config_enumerate(guide), TraceEnum_ELBO(),
match='invalid log_prob shape')
@pytest.mark.parametrize('max_plate_nesting', [0, 1, 2, float('inf')])
def test_enum_discrete_parallel_ok(max_plate_nesting):
guessed_nesting = 0 if max_plate_nesting == float('inf') else max_plate_nesting
plate_shape = torch.Size([1] * guessed_nesting)
def model():
p = torch.tensor(0.5)
x = pyro.sample("x", dist.Bernoulli(p))
if max_plate_nesting != float('inf'):
assert x.shape == torch.Size([2]) + plate_shape
def guide():
p = pyro.param("p", torch.tensor(0.5, requires_grad=True))
x = pyro.sample("x", dist.Bernoulli(p))
if max_plate_nesting != float('inf'):
assert x.shape == torch.Size([2]) + plate_shape
assert_ok(model, config_enumerate(guide, "parallel"),
TraceEnum_ELBO(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize('max_plate_nesting', [0, 1, 2, float('inf')])
def test_enum_discrete_parallel_nested_ok(max_plate_nesting):
guessed_nesting = 0 if max_plate_nesting == float('inf') else max_plate_nesting
plate_shape = torch.Size([1] * guessed_nesting)
def model():
p2 = torch.ones(2) / 2
p3 = torch.ones(3) / 3
x2 = pyro.sample("x2", dist.OneHotCategorical(p2))
x3 = pyro.sample("x3", dist.OneHotCategorical(p3))
if max_plate_nesting != float('inf'):
assert x2.shape == torch.Size([2]) + plate_shape + p2.shape
assert x3.shape == torch.Size([3, 1]) + plate_shape + p3.shape
assert_ok(model, config_enumerate(model, "parallel"),
TraceEnum_ELBO(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize('enumerate_,expand,num_samples', [
(None, False, None),
("sequential", False, None),
("sequential", True, None),
("parallel", False, None),
("parallel", True, None),
("parallel", True, 3),
])
def test_enumerate_parallel_plate_ok(enumerate_, expand, num_samples):
def model():
p2 = torch.ones(2) / 2
p34 = torch.ones(3, 4) / 4
p536 = torch.ones(5, 3, 6) / 6
x2 = pyro.sample("x2", dist.Categorical(p2))
with pyro.plate("outer", 3):
x34 = pyro.sample("x34", dist.Categorical(p34))
with pyro.plate("inner", 5):
x536 = pyro.sample("x536", dist.Categorical(p536))
# check shapes
if enumerate_ == "parallel":
if num_samples:
n = num_samples
# Meaning of dimensions: [ enum dims | plate dims ]
assert x2.shape == torch.Size([ n, 1, 1]) # noqa: E201
assert x34.shape == torch.Size([ n, 1, 1, 3]) # noqa: E201
assert x536.shape == torch.Size([n, 1, 1, 5, 3]) # noqa: E201
elif expand:
# Meaning of dimensions: [ enum dims | plate dims ]
assert x2.shape == torch.Size([ 2, 1, 1]) # noqa: E201
assert x34.shape == torch.Size([ 4, 1, 1, 3]) # noqa: E201
assert x536.shape == torch.Size([6, 1, 1, 5, 3]) # noqa: E201
else:
# Meaning of dimensions: [ enum dims | plate placeholders ]
assert x2.shape == torch.Size([ 2, 1, 1]) # noqa: E201
assert x34.shape == torch.Size([ 4, 1, 1, 1]) # noqa: E201
assert x536.shape == torch.Size([6, 1, 1, 1, 1]) # noqa: E201
elif enumerate_ == "sequential":
if expand:
# All dimensions are plate dimensions.
assert x2.shape == torch.Size([])
assert x34.shape == torch.Size([3])
assert x536.shape == torch.Size([5, 3])
else:
# All dimensions are plate placeholders.
assert x2.shape == torch.Size([])
assert x34.shape == torch.Size([1])
assert x536.shape == torch.Size([1, 1])
else:
# All dimensions are plate dimensions.
assert x2.shape == torch.Size([])
assert x34.shape == torch.Size([3])
assert x536.shape == torch.Size([5, 3])
elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=enumerate_)
guide = config_enumerate(model, enumerate_, expand, num_samples)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize('max_plate_nesting', [1, float('inf')])
@pytest.mark.parametrize('enumerate_', [None, "sequential", "parallel"])
@pytest.mark.parametrize('is_validate', [True, False])
def test_enum_discrete_plate_dependency_warning(enumerate_, is_validate, max_plate_nesting):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={'enumerate': 'parallel'})
with pyro.plate("plate", 10, 5):
x = pyro.sample("x", dist.Bernoulli(0.5).expand_by([5]),
infer={'enumerate': enumerate_})
pyro.sample("y", dist.Bernoulli(x.mean())) # user should move this line up
with pyro.validation_enabled(is_validate):
elbo = TraceEnum_ELBO(max_plate_nesting=max_plate_nesting)
if enumerate_ and is_validate:
assert_warning(model, model, elbo)
else:
assert_ok(model, model, elbo)
@pytest.mark.parametrize('max_plate_nesting', [1, float('inf')])
@pytest.mark.parametrize('enumerate_', [None, "sequential", "parallel"])
def test_enum_discrete_iplate_plate_dependency_ok(enumerate_, max_plate_nesting):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={'enumerate': 'parallel'})
inner_plate = pyro.plate("plate", 10, 5)
for i in pyro.plate("iplate", 3):
pyro.sample("y_{}".format(i), dist.Bernoulli(0.5))
with inner_plate:
pyro.sample("x_{}".format(i), dist.Bernoulli(0.5).expand_by([5]),
infer={'enumerate': enumerate_})
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=max_plate_nesting))
@pytest.mark.parametrize('max_plate_nesting', [1, float('inf')])
@pytest.mark.parametrize('enumerate_', [None, "sequential", "parallel"])
@pytest.mark.parametrize('is_validate', [True, False])
def test_enum_discrete_iplates_plate_dependency_warning(enumerate_, is_validate, max_plate_nesting):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={'enumerate': 'parallel'})
inner_plate = pyro.plate("plate", 10, 5)
for i in pyro.plate("iplate1", 2):
with inner_plate:
pyro.sample("x_{}".format(i), dist.Bernoulli(0.5).expand_by([5]),
infer={'enumerate': enumerate_})
for i in pyro.plate("iplate2", 2):
pyro.sample("y_{}".format(i), dist.Bernoulli(0.5))
with pyro.validation_enabled(is_validate):
elbo = TraceEnum_ELBO(max_plate_nesting=max_plate_nesting)
if enumerate_ and is_validate:
assert_warning(model, model, elbo)
else:
assert_ok(model, model, elbo)
@pytest.mark.parametrize('enumerate_', [None, "sequential", "parallel"])
def test_enum_discrete_plates_dependency_ok(enumerate_):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={'enumerate': 'parallel'})
x_plate = pyro.plate("x_plate", 10, 5, dim=-1)
y_plate = pyro.plate("y_plate", 11, 6, dim=-2)
pyro.sample("a", dist.Bernoulli(0.5))
with x_plate:
pyro.sample("b", dist.Bernoulli(0.5).expand_by([5]))
with y_plate:
# Note that it is difficult to check that c does not depend on b.
pyro.sample("c", dist.Bernoulli(0.5).expand_by([6, 1]))
with x_plate, y_plate:
pyro.sample("d", dist.Bernoulli(0.5).expand_by([6, 5]))
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=2))
@pytest.mark.parametrize('enumerate_', [None, "sequential", "parallel"])
def test_enum_discrete_non_enumerated_plate_ok(enumerate_):
def model():
pyro.sample("w", dist.Bernoulli(0.5), infer={'enumerate': 'parallel'})
with pyro.plate("non_enum", 2):
a = pyro.sample("a", dist.Bernoulli(0.5).expand_by([2]),
infer={'enumerate': None})
p = (1.0 + a.sum(-1)) / (2.0 + a.size(0)) # introduce dependency of b on a
with pyro.plate("enum_1", 3):
pyro.sample("b", dist.Bernoulli(p).expand_by([3]),
infer={'enumerate': enumerate_})
with pyro.validation_enabled():
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=1))
def test_plate_shape_broadcasting():
data = torch.ones(1000, 2)
def model():
with pyro.plate("num_particles", 10, dim=-3):
with pyro.plate("components", 2, dim=-1):
p = pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
assert p.shape == torch.Size((10, 1, 2))
with pyro.plate("data", data.shape[0], dim=-2):
pyro.sample("obs", dist.Bernoulli(p), obs=data)
def guide():
with pyro.plate("num_particles", 10, dim=-3):
with pyro.plate("components", 2, dim=-1):
pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
assert_ok(model, guide, Trace_ELBO())
@pytest.mark.parametrize('enumerate_,expand,num_samples', [
(None, True, None),
("sequential", True, None),
("sequential", False, None),
("parallel", True, None),
("parallel", False, None),
("parallel", True, 3),
])
def test_enum_discrete_plate_shape_broadcasting_ok(enumerate_, expand, num_samples):
def model():
x_plate = pyro.plate("x_plate", 10, 5, dim=-1)
y_plate = pyro.plate("y_plate", 11, 6, dim=-2)
with pyro.plate("num_particles", 50, dim=-3):
with x_plate:
b = pyro.sample("b", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
with y_plate:
c = pyro.sample("c", dist.Bernoulli(0.5))
with x_plate, y_plate:
d = pyro.sample("d", dist.Bernoulli(b))
# check shapes
if enumerate_ == "parallel":
if num_samples and expand:
assert b.shape == (num_samples, 50, 1, 5)
assert c.shape == (num_samples, 1, 50, 6, 1)
assert d.shape == (num_samples, 1, num_samples, 50, 6, 5)
elif num_samples and not expand:
assert b.shape == (num_samples, 50, 1, 5)
assert c.shape == (num_samples, 1, 50, 6, 1)
assert d.shape == (num_samples, 1, 1, 50, 6, 5)
elif expand:
assert b.shape == (50, 1, 5)
assert c.shape == (2, 50, 6, 1)
assert d.shape == (2, 1, 50, 6, 5)
else:
assert b.shape == (50, 1, 5)
assert c.shape == (2, 1, 1, 1)
assert d.shape == (2, 1, 1, 1, 1)
elif enumerate_ == "sequential":
if expand:
assert b.shape == (50, 1, 5)
assert c.shape == (50, 6, 1)
assert d.shape == (50, 6, 5)
else:
assert b.shape == (50, 1, 5)
assert c.shape == (1, 1, 1)
assert d.shape == (1, 1, 1)
else:
assert b.shape == (50, 1, 5)
assert c.shape == (50, 6, 1)
assert d.shape == (50, 6, 5)
guide = config_enumerate(model, default=enumerate_, expand=expand, num_samples=num_samples)
elbo = TraceEnum_ELBO(max_plate_nesting=3,
strict_enumeration_warning=(enumerate_ == "parallel"))
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("Elbo,expand", [
(Trace_ELBO, False),
(TraceGraph_ELBO, False),
(TraceEnum_ELBO, False),
(TraceEnum_ELBO, True),
])
def test_dim_allocation_ok(Elbo, expand):
enumerate_ = (Elbo is TraceEnum_ELBO)
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_outer", 10, 5, dim=-3):
x = pyro.sample("x", dist.Bernoulli(p))
with pyro.plate("plate_inner_1", 11, 6):
y = pyro.sample("y", dist.Bernoulli(p))
# allocated dim is rightmost available, i.e. -1
with pyro.plate("plate_inner_2", 12, 7):
z = pyro.sample("z", dist.Bernoulli(p))
# allocated dim is next rightmost available, i.e. -2
# since dim -3 is already allocated, use dim=-4
with pyro.plate("plate_inner_3", 13, 8):
q = pyro.sample("q", dist.Bernoulli(p))
# check shapes
if enumerate_ and not expand:
assert x.shape == (1, 1, 1)
assert y.shape == (1, 1, 1)
assert z.shape == (1, 1, 1)
assert q.shape == (1, 1, 1, 1)
else:
assert x.shape == (5, 1, 1)
assert y.shape == (5, 1, 6)
assert z.shape == (5, 7, 6)
assert q.shape == (8, 5, 7, 6)
guide = config_enumerate(model, "sequential", expand=expand) if enumerate_ else model
assert_ok(model, guide, Elbo(max_plate_nesting=4))
@pytest.mark.parametrize("Elbo,expand", [
(Trace_ELBO, False),
(TraceGraph_ELBO, False),
(TraceEnum_ELBO, False),
(TraceEnum_ELBO, True),
])
def test_dim_allocation_error(Elbo, expand):
enumerate_ = (Elbo is TraceEnum_ELBO)
def model():
p = torch.tensor(0.5, requires_grad=True)
with pyro.plate("plate_outer", 10, 5, dim=-2):
x = pyro.sample("x", dist.Bernoulli(p))
# allocated dim is rightmost available, i.e. -1
with pyro.plate("plate_inner_1", 11, 6):
y = pyro.sample("y", dist.Bernoulli(p))
# throws an error as dim=-1 is already occupied
with pyro.plate("plate_inner_2", 12, 7, dim=-1):
pyro.sample("z", dist.Bernoulli(p))
# check shapes
if enumerate_ and not expand:
assert x.shape == (1, 1)
assert y.shape == (1, 1)
else:
assert x.shape == (5, 1)
assert y.shape == (5, 6)
guide = config_enumerate(model, expand=expand) if Elbo is TraceEnum_ELBO else model
assert_error(model, guide, Elbo(), match='collide at dim=')
def test_enum_in_model_ok():
infer = {'enumerate': 'parallel'}
def model():
p = pyro.param('p', torch.tensor(0.25))
a = pyro.sample('a', dist.Bernoulli(p))
b = pyro.sample('b', dist.Bernoulli(p + a / 2))
c = pyro.sample('c', dist.Bernoulli(p + b / 2), infer=infer)
d = pyro.sample('d', dist.Bernoulli(p + c / 2))
e = pyro.sample('e', dist.Bernoulli(p + d / 2))
f = pyro.sample('f', dist.Bernoulli(p + e / 2), infer=infer)
g = pyro.sample('g', dist.Bernoulli(p + f / 2), obs=torch.tensor(0.))
# check shapes
assert a.shape == ()
assert b.shape == (2,)
assert c.shape == (2, 1, 1)
assert d.shape == (2,)
assert e.shape == (2, 1)
assert f.shape == (2, 1, 1, 1)
assert g.shape == ()
def guide():
p = pyro.param('p', torch.tensor(0.25))
a = pyro.sample('a', dist.Bernoulli(p))
b = pyro.sample('b', dist.Bernoulli(p + a / 2), infer=infer)
d = pyro.sample('d', dist.Bernoulli(p + b / 2))
e = pyro.sample('e', dist.Bernoulli(p + d / 2), infer=infer)
# check shapes
assert a.shape == ()
assert b.shape == (2,)
assert d.shape == (2,)
assert e.shape == (2, 1)
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_enum_in_model_plate_ok():
infer = {'enumerate': 'parallel'}
def model():
p = pyro.param('p', torch.tensor(0.25))
a = pyro.sample('a', dist.Bernoulli(p))
b = pyro.sample('b', dist.Bernoulli(p + a / 2))
with pyro.plate('data', 3):
c = pyro.sample('c', dist.Bernoulli(p + b / 2), infer=infer)
d = pyro.sample('d', dist.Bernoulli(p + c / 2))
e = pyro.sample('e', dist.Bernoulli(p + d / 2))
f = pyro.sample('f', dist.Bernoulli(p + e / 2), infer=infer)
g = pyro.sample('g', dist.Bernoulli(p + f / 2), obs=torch.zeros(3))
# check shapes
assert a.shape == ()
assert b.shape == (2, 1)
assert c.shape == (2, 1, 1, 1)
assert d.shape == (2, 3)
assert e.shape == (2, 1, 1)
assert f.shape == (2, 1, 1, 1, 1)
assert g.shape == (3,)
def guide():
p = pyro.param('p', torch.tensor(0.25))
a = pyro.sample('a', dist.Bernoulli(p))
b = pyro.sample('b', dist.Bernoulli(p + a / 2), infer=infer)
with pyro.plate('data', 3):
d = pyro.sample('d', dist.Bernoulli(p + b / 2))
e = pyro.sample('e', dist.Bernoulli(p + d / 2), infer=infer)
# check shapes
assert a.shape == ()
assert b.shape == (2, 1)
assert d.shape == (2, 3)
assert e.shape == (2, 1, 1)
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=1))
def test_enum_sequential_in_model_error():
def model():
p = pyro.param('p', torch.tensor(0.25))
pyro.sample('a', dist.Bernoulli(p), infer={'enumerate': 'sequential'})
def guide():
pass
assert_error(model, guide, TraceEnum_ELBO(max_plate_nesting=0),
match='Found vars in model but not guide')
def test_enum_in_model_plate_reuse_ok():
@config_enumerate
def model():
p = pyro.param("p", torch.tensor([0.2, 0.8]))
a = pyro.sample("a", dist.Bernoulli(0.3)).long()
with pyro.plate("b_axis", 2):
pyro.sample("b", dist.Bernoulli(p[a]), obs=torch.tensor([0., 1.]))
c = pyro.sample("c", dist.Bernoulli(0.3)).long()
with pyro.plate("c_axis", 2):
pyro.sample("d", dist.Bernoulli(p[c]), obs=torch.tensor([0., 0.]))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=1))
def test_enum_in_model_multi_scale_error():
@config_enumerate
def model():
p = pyro.param("p", torch.tensor([0.2, 0.8]))
x = pyro.sample("x", dist.Bernoulli(0.3)).long()
with poutine.scale(scale=2.):
pyro.sample("y", dist.Bernoulli(p[x]), obs=torch.tensor(0.))
def guide():
pass
assert_error(model, guide, TraceEnum_ELBO(max_plate_nesting=0),
match='Expected all enumerated sample sites to share a common poutine.scale')
@pytest.mark.parametrize('use_vindex', [False, True])
def test_enum_in_model_diamond_error(use_vindex):
data = torch.tensor([[0, 1], [0, 0]])
@config_enumerate
def model():
pyro.param("probs_a", torch.tensor([0.45, 0.55]))
pyro.param("probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]))
pyro.param("probs_c", torch.tensor([[0.75, 0.25], [0.55, 0.45]]))
pyro.param("probs_d", torch.tensor([[[0.4, 0.6], [0.3, 0.7]],
[[0.3, 0.7], [0.2, 0.8]]]))
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2, dim=-1)
c_axis = pyro.plate("c_axis", 2, dim=-2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
with b_axis, c_axis:
if use_vindex:
probs = Vindex(probs_d)[b, c]
else:
d_ind = torch.arange(2, dtype=torch.long)
probs = probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]
pyro.sample("d", dist.Categorical(probs), obs=data)
def guide():
pass
assert_error(model, guide, TraceEnum_ELBO(max_plate_nesting=2),
match='Expected tree-structured plate nesting')
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_vectorized_num_particles(Elbo):
data = torch.ones(1000, 2)
def model():
with pyro.plate("components", 2):
p = pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
assert p.shape == torch.Size((10, 1, 2))
with pyro.plate("data", data.shape[0]):
pyro.sample("obs", dist.Bernoulli(p), obs=data)
def guide():
with pyro.plate("components", 2):
pyro.sample("p", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
pyro.clear_param_store()
guide = config_enumerate(guide) if Elbo is TraceEnum_ELBO else guide
assert_ok(model, guide, Elbo(num_particles=10,
vectorize_particles=True,
max_plate_nesting=2,
strict_enumeration_warning=False))
@pytest.mark.parametrize('enumerate_,expand,num_samples', [
(None, False, None),
("sequential", False, None),
("sequential", True, None),
("parallel", False, None),
("parallel", True, None),
("parallel", True, 3),
])
@pytest.mark.parametrize('num_particles', [1, 50])
def test_enum_discrete_vectorized_num_particles(enumerate_, expand, num_samples, num_particles):
@config_enumerate(default=enumerate_, expand=expand, num_samples=num_samples)
def model():
x_plate = pyro.plate("x_plate", 10, 5, dim=-1)
y_plate = pyro.plate("y_plate", 11, 6, dim=-2)
with x_plate:
b = pyro.sample("b", dist.Beta(torch.tensor(1.1), torch.tensor(1.1)))
with y_plate:
c = pyro.sample("c", dist.Bernoulli(0.5))
with x_plate, y_plate:
d = pyro.sample("d", dist.Bernoulli(b))
# check shapes
if num_particles > 1:
if enumerate_ == "parallel":
if num_samples and expand:
assert b.shape == (num_samples, num_particles, 1, 5)
assert c.shape == (num_samples, 1, num_particles, 6, 1)
assert d.shape == (num_samples, 1, num_samples, num_particles, 6, 5)
elif num_samples and not expand:
assert b.shape == (num_samples, num_particles, 1, 5)
assert c.shape == (num_samples, 1, num_particles, 6, 1)
assert d.shape == (num_samples, 1, 1, num_particles, 6, 5)
elif expand:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (2, num_particles, 6, 1)
assert d.shape == (2, 1, num_particles, 6, 5)
else:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (2, 1, 1, 1)
assert d.shape == (2, 1, 1, 1, 1)
elif enumerate_ == "sequential":
if expand:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (num_particles, 6, 1)
assert d.shape == (num_particles, 6, 5)
else:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (1, 1, 1)
assert d.shape == (1, 1, 1)
else:
assert b.shape == (num_particles, 1, 5)
assert c.shape == (num_particles, 6, 1)
assert d.shape == (num_particles, 6, 5)
else:
if enumerate_ == "parallel":
if num_samples and expand:
assert b.shape == (num_samples, 1, 5,)
assert c.shape == (num_samples, 1, 6, 1)
assert d.shape == (num_samples, 1, num_samples, 6, 5)
elif num_samples and not expand:
assert b.shape == (num_samples, 1, 5,)
assert c.shape == (num_samples, 1, 6, 1)
assert d.shape == (num_samples, 1, 1, 6, 5)
elif expand:
assert b.shape == (5,)
assert c.shape == (2, 6, 1)
assert d.shape == (2, 1, 6, 5)
else:
assert b.shape == (5,)
assert c.shape == (2, 1, 1)
assert d.shape == (2, 1, 1, 1)
elif enumerate_ == "sequential":
if expand:
assert b.shape == (5,)
assert c.shape == (6, 1)
assert d.shape == (6, 5)
else:
assert b.shape == (5,)
assert c.shape == (1, 1)
assert d.shape == (1, 1)
else:
assert b.shape == (5,)
assert c.shape == (6, 1)
assert d.shape == (6, 5)
assert_ok(model, model, TraceEnum_ELBO(max_plate_nesting=2,
num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=(enumerate_ == "parallel")))
def test_enum_recycling_chain():
@config_enumerate
def model():
p = pyro.param("p", torch.tensor([[0.2, 0.8], [0.1, 0.9]]))
x = 0
for t in pyro.markov(range(100)):
x = pyro.sample("x_{}".format(t), dist.Categorical(p[x]))
assert x.dim() <= 2
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
@pytest.mark.parametrize('use_vindex', [False, True])
@pytest.mark.parametrize('markov', [False, True])
def test_enum_recycling_dbn(markov, use_vindex):
# x --> x --> x enum "state"
# y | y | y | enum "occlusion"
# \ | \ | \ |
# z z z obs
@config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
q = pyro.param("q", torch.ones(2))
r = pyro.param("r", torch.ones(3, 2, 4))
x = 0
times = pyro.markov(range(100)) if markov else range(11)
for t in times:
x = pyro.sample("x_{}".format(t), dist.Categorical(p[x]))
y = pyro.sample("y_{}".format(t), dist.Categorical(q))
if use_vindex:
probs = Vindex(r)[x, y]
else:
z_ind = torch.arange(4, dtype=torch.long)
probs = r[x.unsqueeze(-1), y.unsqueeze(-1), z_ind]
pyro.sample("z_{}".format(t), dist.Categorical(probs),
obs=torch.tensor(0.))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_enum_recycling_nested():
# (x)
# \
# y0---(y1)--(y2)
# | | |
# z00 z10 z20
# | | |
# z01 z11 (z21)
# | | |
# z02 z12 z22 <-- what can this depend on?
#
# markov dependencies
# -------------------
# x:
# y0: x
# z00: x y0
# z01: x y0 z00
# z02: x y0 z01
# y1: x y0
# z10: x y0 y1
# z11: x y0 y1 z10
# z12: x y0 y1 z11
# y2: x y1
# z20: x y1 y2
# z21: x y1 y2 z20
# z22: x y1 y2 z21
@config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
x = pyro.sample("x", dist.Categorical(p[0]))
y = x
for i in pyro.markov(range(10)):
y = pyro.sample("y_{}".format(i), dist.Categorical(p[y]))
z = y
for j in pyro.markov(range(10)):
z = pyro.sample("z_{}_{}".format(i, j), dist.Categorical(p[z]))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
@pytest.mark.parametrize('use_vindex', [False, True])
def test_enum_recycling_grid(use_vindex):
# x---x---x---x -----> i
# | | | | |
# x---x---x---x |
# | | | | V
# x---x---x--(x) j
# | | | |
# x---x--(x)--x <-- what can this depend on?
@config_enumerate
def model():
p = pyro.param("p_leaf", torch.ones(2, 2, 2))
x = defaultdict(lambda: torch.tensor(0))
y_axis = pyro.markov(range(4), keep=True)
for i in pyro.markov(range(4)):
for j in y_axis:
if use_vindex:
probs = Vindex(p)[x[i - 1, j], x[i, j - 1]]
else:
ind = torch.arange(2, dtype=torch.long)
probs = p[x[i - 1, j].unsqueeze(-1),
x[i, j - 1].unsqueeze(-1), ind]
x[i, j] = pyro.sample("x_{}_{}".format(i, j),
dist.Categorical(probs))
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_enum_recycling_reentrant():
data = (True, False)
for i in range(5):
data = (data, data, False)
@pyro.markov
def model(data, state=0, address=""):
if isinstance(data, bool):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample("leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1. if data else 0.))
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample("branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"})
model(branch, next_state, address + letter)
def guide(data):
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0), data=data)
@pytest.mark.parametrize('history', [1, 2])
def test_enum_recycling_reentrant_history(history):
data = (True, False)
for i in range(5):
data = (data, data, False)
@pyro.markov(history=history)
def model(data, state=0, address=""):
if isinstance(data, bool):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample("leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1. if data else 0.))
else:
assert isinstance(data, tuple)
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample("branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"})
model(branch, next_state, address + letter)
def guide(data):
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0), data=data)
def test_enum_recycling_mutual_recursion():
data = (True, False)
for i in range(5):
data = (data, data, False)
def model_leaf(data, state=0, address=""):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample("leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1. if data else 0.))
@pyro.markov
def model1(data, state=0, address=""):
if isinstance(data, bool):
model_leaf(data, state, address)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample("branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"})
model2(branch, next_state, address + letter)
@pyro.markov
def model2(data, state=0, address=""):
if isinstance(data, bool):
model_leaf(data, state, address)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample("branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"})
model1(branch, next_state, address + letter)
def guide(data):
pass
assert_ok(model1, guide, TraceEnum_ELBO(max_plate_nesting=0), data=data)
def test_enum_recycling_interleave():
def model():
with pyro.markov() as m:
with pyro.markov():
with m: # error here
pyro.sample("x", dist.Categorical(torch.ones(4)),
infer={"enumerate": "parallel"})
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0, strict_enumeration_warning=False))
def test_enum_recycling_plate():
@config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
q = pyro.param("q", torch.tensor([0.5, 0.5]))
plate_x = pyro.plate("plate_x", 2, dim=-1)
plate_y = pyro.plate("plate_y", 3, dim=-1)
plate_z = pyro.plate("plate_z", 4, dim=-2)
a = pyro.sample("a", dist.Bernoulli(q[0])).long()
w = 0
for i in pyro.markov(range(5)):
w = pyro.sample("w_{}".format(i), dist.Categorical(p[w]))
with plate_x:
b = pyro.sample("b", dist.Bernoulli(q[a])).long()
x = 0
for i in pyro.markov(range(6)):
x = pyro.sample("x_{}".format(i), dist.Categorical(p[x]))
with plate_y:
c = pyro.sample("c", dist.Bernoulli(q[a])).long()
y = 0
for i in pyro.markov(range(7)):
y = pyro.sample("y_{}".format(i), dist.Categorical(p[y]))
with plate_z:
d = pyro.sample("d", dist.Bernoulli(q[a])).long()
z = 0
for i in pyro.markov(range(8)):
z = pyro.sample("z_{}".format(i), dist.Categorical(p[z]))
with plate_x, plate_z:
e = pyro.sample("e", dist.Bernoulli(q[b])).long()
xz = 0
for i in pyro.markov(range(9)):
xz = pyro.sample("xz_{}".format(i), dist.Categorical(p[xz]))
return a, b, c, d, e
def guide():
pass
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=2))
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
])
def test_factor_in_model_ok(Elbo):
def model():
pyro.factor("f", torch.tensor(0.))
def guide():
pass
elbo = Elbo(strict_enumeration_warning=False)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
TraceTMC_ELBO,
])
def test_factor_in_guide_ok(Elbo):
def model():
pass
def guide():
pyro.factor("f", torch.tensor(0.))
elbo = Elbo(strict_enumeration_warning=False)
assert_ok(model, guide, elbo)
@pytest.mark.parametrize('history', [0, 1, 2, 3])
def test_markov_history(history):
@config_enumerate
def model():
p = pyro.param("p", 0.25 * torch.ones(2, 2))
q = pyro.param("q", 0.25 * torch.ones(2))
x_prev = torch.tensor(0)
x_curr = torch.tensor(0)
for t in pyro.markov(range(10), history=history):
probs = p[x_prev, x_curr]
x_prev, x_curr = x_curr, pyro.sample("x_{}".format(t), dist.Bernoulli(probs)).long()
pyro.sample("y_{}".format(t), dist.Bernoulli(q[x_curr]),
obs=torch.tensor(0.))
def guide():
pass
if history < 2:
assert_error(model, guide, TraceEnum_ELBO(max_plate_nesting=0),
match="Enumeration dim conflict")
else:
assert_ok(model, guide, TraceEnum_ELBO(max_plate_nesting=0))
def test_mean_field_ok():
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.Normal(x, 1.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
x = pyro.sample("x", dist.Normal(loc, 1.))
pyro.sample("y", dist.Normal(x, 1.))
assert_ok(model, guide, TraceMeanField_ELBO())
@pytest.mark.parametrize('mask', [True, False])
def test_mean_field_mask_ok(mask):
def model():
x = pyro.sample("x", dist.Normal(0., 1.).mask(mask))
pyro.sample("y", dist.Normal(x, 1.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
x = pyro.sample("x", dist.Normal(loc, 1.).mask(mask))
pyro.sample("y", dist.Normal(x, 1.))
assert_ok(model, guide, TraceMeanField_ELBO())
def test_mean_field_warn():
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.Normal(x, 1.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
y = pyro.sample("y", dist.Normal(loc, 1.))
pyro.sample("x", dist.Normal(y, 1.))
assert_warning(model, guide, TraceMeanField_ELBO())
def test_tail_adaptive_ok():
def plateless_model():
pyro.sample("x", dist.Normal(0., 1.))
def plate_model():
x = pyro.sample("x", dist.Normal(0., 1.))
with pyro.plate('observe_data'):
pyro.sample('obs', dist.Normal(x, 1.0), obs=torch.arange(5).type_as(x))
def rep_guide():
pyro.sample("x", dist.Normal(0., 2.))
assert_ok(plateless_model, rep_guide, TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=2))
assert_ok(plate_model, rep_guide, TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=2))
def test_tail_adaptive_error():
def plateless_model():
pyro.sample("x", dist.Normal(0., 1.))
def rep_guide():
pyro.sample("x", dist.Normal(0., 2.))
def nonrep_guide():
pyro.sample("x", fakes.NonreparameterizedNormal(0., 2.))
assert_error(plateless_model, rep_guide, TraceTailAdaptive_ELBO(vectorize_particles=False, num_particles=2))
assert_error(plateless_model, nonrep_guide, TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=2))
def test_tail_adaptive_warning():
def plateless_model():
pyro.sample("x", dist.Normal(0., 1.))
def rep_guide():
pyro.sample("x", dist.Normal(0., 2.))
assert_warning(plateless_model, rep_guide, TraceTailAdaptive_ELBO(vectorize_particles=True, num_particles=1))
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
def test_reparam_ok(Elbo):
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.Normal(x, 1.), obs=torch.tensor(0.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
pyro.sample("x", dist.Normal(loc, 1.))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("mask", [True, False, torch.tensor(True), torch.tensor(False)])
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
def test_reparam_mask_ok(Elbo, mask):
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
with poutine.mask(mask=mask):
pyro.sample("y", dist.Normal(x, 1.), obs=torch.tensor(0.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
pyro.sample("x", dist.Normal(loc, 1.))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("mask", [
True,
False,
torch.tensor(True),
torch.tensor(False),
torch.tensor([False, True]),
])
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
def test_reparam_mask_plate_ok(Elbo, mask):
data = torch.randn(2, 3).exp()
data /= data.sum(-1, keepdim=True)
def model():
c = pyro.sample("c", dist.LogNormal(0., 1.).expand([3]).to_event(1))
with pyro.plate("data", len(data)), poutine.mask(mask=mask):
pyro.sample("obs", dist.Dirichlet(c), obs=data)
def guide():
loc = pyro.param("loc", torch.zeros(3))
scale = pyro.param("scale", torch.ones(3),
constraint=constraints.positive)
pyro.sample("c", dist.LogNormal(loc, scale).to_event(1))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("scale", [1, 0.1, torch.tensor(0.5)])
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
def test_reparam_scale_ok(Elbo, scale):
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
with poutine.scale(scale=scale):
pyro.sample("y", dist.Normal(x, 1.), obs=torch.tensor(0.))
def guide():
loc = pyro.param("loc", torch.tensor(0.))
pyro.sample("x", dist.Normal(loc, 1.))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("scale", [
1,
0.1,
torch.tensor(0.5),
torch.tensor([0.1, 0.9]),
])
@pytest.mark.parametrize("Elbo", [
Trace_ELBO,
TraceMeanField_ELBO,
EnergyDistance_prior,
EnergyDistance_noprior,
])
def test_reparam_scale_plate_ok(Elbo, scale):
data = torch.randn(2, 3).exp()
data /= data.sum(-1, keepdim=True)
def model():
c = pyro.sample("c", dist.LogNormal(0., 1.).expand([3]).to_event(1))
with pyro.plate("data", len(data)), poutine.scale(scale=scale):
pyro.sample("obs", dist.Dirichlet(c), obs=data)
def guide():
loc = pyro.param("loc", torch.zeros(3))
scale = pyro.param("scale", torch.ones(3),
constraint=constraints.positive)
pyro.sample("c", dist.LogNormal(loc, scale).to_event(1))
assert_ok(model, guide, Elbo())
@pytest.mark.parametrize("Elbo", [
EnergyDistance_prior,
EnergyDistance_noprior,
])
def test_no_log_prob_ok(Elbo):
def model(data):
loc = pyro.sample("loc", dist.Normal(0, 1))
scale = pyro.sample("scale", dist.LogNormal(0, 1))
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.Stable(1.5, 0.5, scale, loc),
obs=data)
def guide(data):
map_loc = pyro.param("map_loc", torch.tensor(0.))
map_scale = pyro.param("map_scale", torch.tensor(1.),
constraint=constraints.positive)
pyro.sample("loc", dist.Delta(map_loc))
pyro.sample("scale", dist.Delta(map_scale))
data = torch.randn(10)
assert_ok(model, guide, Elbo(), data=data)
def test_reparam_stable():
@poutine.reparam(config={"z": LatentStableReparam()})
def model():
stability = pyro.sample("stability", dist.Uniform(0., 2.))
skew = pyro.sample("skew", dist.Uniform(-1., 1.))
y = pyro.sample("z", dist.Stable(stability, skew))
pyro.sample("x", dist.Poisson(y.abs()), obs=torch.tensor(1.))
def guide():
pyro.sample("stability", dist.Delta(torch.tensor(1.5)))
pyro.sample("skew", dist.Delta(torch.tensor(0.)))
pyro.sample("z_uniform", dist.Delta(torch.tensor(0.1)))
pyro.sample("z_exponential", dist.Delta(torch.tensor(1.)))
assert_ok(model, guide, Trace_ELBO())
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_normal_normal(num_particles):
pytest.importorskip("funsor")
data = torch.tensor(0.)
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
with poutine.collapse():
y = pyro.sample("y", dist.Normal(x, 1.))
pyro.sample("z", dist.Normal(y, 1.), obs=data)
def guide():
loc = pyro.param("loc", torch.tensor(0.))
scale = pyro.param("scale", torch.tensor(1.),
constraint=constraints.positive)
pyro.sample("x", dist.Normal(loc, scale))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_normal_normal_plate(num_particles):
pytest.importorskip("funsor")
data = torch.randn(5)
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
with poutine.collapse():
y = pyro.sample("y", dist.Normal(x, 1.))
with pyro.plate("data", len(data), dim=-1):
pyro.sample("z", dist.Normal(y, 1.), obs=data)
def guide():
loc = pyro.param("loc", torch.tensor(0.))
scale = pyro.param("scale", torch.tensor(1.),
constraint=constraints.positive)
pyro.sample("x", dist.Normal(loc, scale))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True,
max_plate_nesting=1)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_normal_plate_normal(num_particles):
pytest.importorskip("funsor")
data = torch.randn(5)
def model():
x = pyro.sample("x", dist.Normal(0., 1.))
with poutine.collapse():
with pyro.plate("data", len(data), dim=-1):
y = pyro.sample("y", dist.Normal(x, 1.))
pyro.sample("z", dist.Normal(y, 1.), obs=data)
def guide():
loc = pyro.param("loc", torch.tensor(0.))
scale = pyro.param("scale", torch.tensor(1.),
constraint=constraints.positive)
pyro.sample("x", dist.Normal(loc, scale))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True,
max_plate_nesting=1)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_beta_bernoulli(num_particles):
pytest.importorskip("funsor")
data = torch.tensor(0.)
def model():
c = pyro.sample("c", dist.Gamma(1, 1))
with poutine.collapse():
probs = pyro.sample("probs", dist.Beta(c, 2))
pyro.sample("obs", dist.Bernoulli(probs), obs=data)
def guide():
a = pyro.param("a", torch.tensor(1.), constraint=constraints.positive)
b = pyro.param("b", torch.tensor(1.), constraint=constraints.positive)
pyro.sample("c", dist.Gamma(a, b))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_beta_binomial(num_particles):
pytest.importorskip("funsor")
data = torch.tensor(5.)
def model():
c = pyro.sample("c", dist.Gamma(1, 1))
with poutine.collapse():
probs = pyro.sample("probs", dist.Beta(c, 2))
pyro.sample("obs", dist.Binomial(10, probs), obs=data)
def guide():
a = pyro.param("a", torch.tensor(1.), constraint=constraints.positive)
b = pyro.param("b", torch.tensor(1.), constraint=constraints.positive)
pyro.sample("c", dist.Gamma(a, b))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
@pytest.mark.xfail(reason="missing pattern in Funsor")
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_beta_binomial_plate(num_particles):
pytest.importorskip("funsor")
data = torch.tensor([0., 1., 5., 5.])
def model():
c = pyro.sample("c", dist.Gamma(1, 1))
with poutine.collapse():
probs = pyro.sample("probs", dist.Beta(c, 2))
with pyro.plate("plate", len(data)):
pyro.sample("obs", dist.Binomial(10, probs),
obs=data)
def guide():
a = pyro.param("a", torch.tensor(1.), constraint=constraints.positive)
b = pyro.param("b", torch.tensor(1.), constraint=constraints.positive)
pyro.sample("c", dist.Gamma(a, b))
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True,
max_plate_nesting=1)
assert_ok(model, guide, elbo)
@pytest.mark.stage("funsor")
@pytest.mark.parametrize("num_particles", [1, 2])
def test_collapse_barrier(num_particles):
pytest.importorskip("funsor")
data = torch.tensor([0., 1., 5., 5.])
def model():
with poutine.collapse():
z = pyro.sample("z_init", dist.Normal(0, 1))
for t, x in enumerate(data):
z = pyro.sample("z_{}".format(t), dist.Normal(z, 1))
pyro.sample("x_t{}".format(t), dist.Normal(z, 1), obs=x)
z = pyro.barrier(z)
z = torch.sigmoid(z)
return z
def guide():
pass
elbo = Trace_ELBO(num_particles=num_particles, vectorize_particles=True)
assert_ok(model, guide, elbo)
def test_ordered_logistic_plate():
N = 5 # num data points/batch size
K = 4 # num categories
data = (K*torch.rand(N)).long().float()
def model():
predictor = pyro.sample("predictor", dist.Normal(0., 1.).expand([N]).to_event(1))
cutpoints = pyro.sample("cutpoints", dist.Normal(0., 1.).expand([K-1]).to_event(1))
# would have identifiability issues, but this isn't a real model...
cutpoints = torch.sort(cutpoints, dim=-1).values
with pyro.plate("obs_plate", N):
pyro.sample("obs", dist.OrderedLogistic(predictor, cutpoints), obs=data)
def guide():
# parameters
pred_mu = pyro.param("pred_mu", torch.zeros(N))
pred_std = pyro.param("pred_std", torch.ones(N))
cp_mu = pyro.param("cp_mu", torch.zeros(K-1))
cp_std = pyro.param("cp_std", torch.ones(K-1))
# sample
pyro.sample("predictor", dist.Normal(pred_mu, pred_std).to_event(1))
pyro.sample("cutpoints", dist.Normal(cp_mu, cp_std).to_event(1))
assert_ok(model, guide, Trace_ELBO())
|
"""
Main program dla 2to3.
"""
z __future__ zaimportuj with_statement, print_function
zaimportuj sys
zaimportuj os
zaimportuj difflib
zaimportuj logging
zaimportuj shutil
zaimportuj optparse
z . zaimportuj refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
zwróć difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
klasa StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory oraz albo
have an extra file suffix appended to their name dla use w situations
where you do nie want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict przy RefactoringTool configuration.
explicit: A list of fixers to run even jeżeli they are explicit.
nobackups: If true no backup '.bak' files will be created dla those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory dla all input files. This class
will strip this path prefix off of filenames before substituting
it przy output_dir. Only meaningful jeżeli output_dir jest supplied.
All files processed by refactor() must start przy this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful dla changing .py to
.py3 dla example by dalejing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
jeżeli input_base_dir oraz nie input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
jeżeli self._output_dir:
jeżeli filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
inaczej:
podnieś ValueError('filename %s does nie start przy the '
'input_base_dir %s' % (
filename, self._input_base_dir))
jeżeli self._append_suffix:
filename += self._append_suffix
jeżeli orig_filename != filename:
output_dir = os.path.dirname(filename)
jeżeli nie os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
jeżeli nie self.nobackups:
# Make backup
backup = filename + ".bak"
jeżeli os.path.lexists(backup):
spróbuj:
os.remove(backup)
wyjąwszy OSError jako err:
self.log_message("Can't remove backup %s", backup)
spróbuj:
os.rename(filename, backup)
wyjąwszy OSError jako err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
jeżeli nie self.nobackups:
shutil.copymode(backup, filename)
jeżeli orig_filename != filename:
# Preserve the file mode w the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
jeżeli equal:
self.log_message("No changes to %s", filename)
inaczej:
self.log_message("Refactored %s", filename)
jeżeli self.show_diffs:
diff_lines = diff_texts(old, new, filename)
spróbuj:
jeżeli self.output_lock jest nie Nic:
przy self.output_lock:
dla line w diff_lines:
print(line)
sys.stdout.flush()
inaczej:
dla line w diff_lines:
print(line)
wyjąwszy UnicodeEncodeError:
warn("couldn't encode %s's diff dla your terminal" %
(filename,))
zwróć
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=Nic):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] jest used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation z being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() jest a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=Nieprawda,
help="Don't write backups dla modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files w this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even jeżeli no changes were required"
" (useful przy --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n jeżeli non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = Nieprawda
flags = {}
options, args = parser.parse_args(args)
jeżeli options.write_unchanged_files:
flags["write_unchanged_files"] = Prawda
jeżeli nie options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = Prawda
# If we allowed these, the original files would be renamed to backup names
# but nie replaced.
jeżeli options.output_dir oraz nie options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
jeżeli options.add_suffix oraz nie options.nobackups:
parser.error("Can't use --add-suffix without -n.")
jeżeli nie options.write oraz options.no_diffs:
warn("not writing files oraz nie printing diffs; that's nie very useful")
jeżeli nie options.write oraz options.nobackups:
parser.error("Can't use -n without -w")
jeżeli options.list_fixes:
print("Available transformations dla the -f/--fix option:")
dla fixname w refactor.get_all_fix_names(fixer_pkg):
print(fixname)
jeżeli nie args:
zwróć 0
jeżeli nie args:
print("At least one file albo directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
zwróć 2
jeżeli "-" w args:
refactor_stdin = Prawda
jeżeli options.write:
print("Can't write to stdin.", file=sys.stderr)
zwróć 2
jeżeli options.print_function:
flags["print_function"] = Prawda
# Set up logging handler
level = logging.DEBUG jeżeli options.verbose inaczej logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix dla fix w options.nofix)
explicit = set()
jeżeli options.fix:
all_present = Nieprawda
dla fix w options.fix:
jeżeli fix == "all":
all_present = Prawda
inaczej:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) jeżeli all_present inaczej explicit
inaczej:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
jeżeli (input_base_dir oraz nie input_base_dir.endswith(os.sep)
oraz nie os.path.isdir(input_base_dir)):
# One albo more similar names were dalejed, their directory jest the base.
# os.path.commonprefix() jest ignorant of path elements, this corrects
# dla that weird API.
input_base_dir = os.path.dirname(input_base_dir)
jeżeli options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output w %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, nie options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files oraz directories dalejed jako arguments
jeżeli nie rt.errors:
jeżeli refactor_stdin:
rt.refactor_stdin()
inaczej:
spróbuj:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
wyjąwszy refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
zwróć 1
rt.summarize()
# Return error status (0 jeżeli rt.errors jest zero)
zwróć int(bool(rt.errors))
|
from django import forms
from zentral.contrib.inventory.models import BusinessUnit
from .models import SimpleMDMInstance
from .api_client import APIClient, APIClientError
class SimpleMDMInstanceForm(forms.ModelForm):
class Meta:
model = SimpleMDMInstance
fields = ("business_unit", "api_key")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["business_unit"].queryset = (
BusinessUnit.objects.filter(source__module="zentral.contrib.inventory")
.order_by('meta_business_unit__name')
)
def clean_api_key(self):
api_key = self.cleaned_data["api_key"]
if api_key:
api_client = APIClient(api_key)
try:
account = api_client.get_account()
except APIClientError as e:
if e.status_code == 401:
msg = "Invalid API key"
else:
msg = "API Error"
raise forms.ValidationError(msg)
else:
self.account_name = account["name"]
return api_key
def save(self, *args, **kwargs):
simplemdm_instance = super().save(commit=False)
simplemdm_instance.account_name = self.account_name
simplemdm_instance.save()
return simplemdm_instance
|
import logging
import urllib.request
import time
import sys
import bot
def is_connected(url_string):
connected = False
try:
urllib.request.urlopen(url_string, timeout=5)
connected = True
except Exception as e:
logging.error(e)
return connected
def usage():
print("Usage : python3 %s {config file}" % sys.argv[0])
if __name__ == "__main__":
logging.basicConfig(#filename='teller.log',
format='[%(asctime)s] %(levelname)s %(message)s',
level=logging.DEBUG)
if len(sys.argv) != 2:
usage()
sys.exit(1)
config_file = sys.argv[1]
url = "https://telegram.me/tell_me_your_ip_bot"
while not is_connected(url):
logging.info("Not connected.")
time.sleep(10)
logging.info("Connected to url %s", url)
telegram_bot = bot.TelegramBotHandler(config_file)
telegram_bot.start()
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
The core scheduling of logic, managing the tricky interaction between the man asyncio event loop and
background threads.
"""
from asyncio import CancelledError, Future, InvalidStateError, gather, get_event_loop, wait_for
from concurrent.futures import ThreadPoolExecutor
from datetime import timedelta
import signal
from typing import List
import warnings
from ..prim import TimeDeltaLike, to_timedelta
from ..util.asyncio_util import execute_in_loop, safe_create_future
from ._base import RunLevel
__all__ = ["Invoker", "DEFAULT_TIMEOUT"]
DEFAULT_TIMEOUT = timedelta(seconds=30)
class Invoker:
"""
A generally thread-safe invoker that aids in coordination between event-loop driven events and
background thread events.
This serves a similar purpose to Akka's ExecutionContext in Scala.
"""
def __init__(self) -> None:
self.level = RunLevel.RUN_FOREVER
self.loop = None
self.executor = None
self._futures = [] # type: List[Future]
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.shutdown(exception=exc_val)
def _unhook_future(self, fut: Future):
try:
self._futures.remove(fut)
except ValueError:
pass
def create_future(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
f = safe_create_future()
f.add_done_callback(self._unhook_future)
self._futures.append(f)
return f
async def shutdown(self, timeout=DEFAULT_TIMEOUT, exception=None) -> None:
"""
Stop the event loop and executor. Outstanding futures will be terminated after the
specified timeout, or if an error is provided, that error will be used to terminate all
futures IMMEDIATELY.
:param timeout:
The maximum amount of time before outstanding Futures are terminated ungracefully.
:param exception:
If provided, is used to trigger all outstanding futures as failures.
"""
t = to_timedelta(timeout)
if exception is not None:
for fut in self._futures:
if not fut.done():
fut.set_exception(exception)
else:
if self._futures:
if t.total_seconds() > 0:
try:
await wait_for(gather(*self._futures), timeout=t.total_seconds())
except CancelledError:
for fut in self._futures:
if not fut.done():
fut.cancel()
else:
for fut in self._futures:
if not fut.done():
fut.cancel()
def set_context_as_current(self) -> None:
"""
Adopt the current event loop as the loop for this :class:`Invoker`, and additionally define
a default executor if one has not yet been set.
"""
self.loop = get_event_loop()
if self.executor is None:
self.executor = ThreadPoolExecutor()
def run_in_loop(self, func, timeout: TimeDeltaLike = 30.0):
"""
Schedule a normal function or coroutine function to be run on the event loop, and block
until the function has returned.
"""
# TODO: the awful awful witchcraft required to remove these checks
if self.loop is None:
raise InvalidStateError("loop must be set before calling these methods")
return execute_in_loop(self.loop, func, timeout=timeout)
def run_in_executor(self, func):
"""
Schedule a normal function to be run on a background thread, and yield until the function
has returned.
"""
# TODO: the awful awful witchcraft required to remove these checks
if self.loop is None or self.executor is None:
raise InvalidStateError("loop must be set before calling these methods")
return self.loop.run_in_executor(self.executor, func)
def install_signal_handlers(self) -> None:
try:
if self.loop is not None:
self.loop.add_signal_handler(signal.SIGINT, self.handle_sigint)
self.loop.add_signal_handler(signal.SIGQUIT, self.handle_sigquit)
else:
signal.signal(signal.SIGINT, lambda *_: self.handle_sigint())
signal.signal(signal.SIGQUIT, lambda *_: self.handle_sigquit())
except (NotImplementedError, AttributeError, ValueError):
# SIGINT and SIGQUIT are not supported on Windows.
pass
def handle_sigint(self) -> None:
self.level = RunLevel.TERMINATE_GRACEFULLY
def handle_sigquit(self) -> None:
self.level = RunLevel.TERMINATE_IMMEDIATELY
|
# Copyright (C) 2015 Twitter, Inc.
"""Container for all campaign management logic used by the Ads API SDK."""
from twitter_ads.enum import TRANSFORM
from twitter_ads.analytics import Analytics
from twitter_ads.resource import resource_property, Resource, Persistence, Batch
from twitter_ads.http import Request
from twitter_ads.cursor import Cursor
from twitter_ads.utils import FlattenParams
from twitter_ads import API_VERSION
class TargetingCriteria(Resource, Persistence, Batch):
PROPERTIES = {}
BATCH_RESOURCE_COLLECTION = '/' + API_VERSION + '/batch/accounts/{account_id}/\
targeting_criteria'
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/targeting_criteria'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/targeting_criteria/{id}'
RESOURCE_OPTIONS = '/' + API_VERSION + '/targeting_criteria/'
@classmethod
@FlattenParams
def all(klass, account, **kwargs):
"""Returns a Cursor instance for a given resource."""
resource = klass.RESOURCE_COLLECTION.format(account_id=account.id)
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[account])
@classmethod
def app_store_categories(klass, account, **kwargs):
"""Returns a list of supported app store categories"""
resource = klass.RESOURCE_OPTIONS + 'app_store_categories'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def behavior_taxonomies(klass, account, **kwargs):
"""Returns a list of supported behavior taxonomies"""
resource = klass.RESOURCE_OPTIONS + 'behavior_taxonomies'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def behaviors(klass, account, **kwargs):
"""Returns a list of supported behaviors"""
resource = klass.RESOURCE_OPTIONS + 'behaviors'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def conversations(klass, account, **kwargs):
"""Returns a list of supported conversations"""
resource = klass.RESOURCE_OPTIONS + 'conversations'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def devices(klass, account, **kwargs):
"""Returns a list of supported devices"""
resource = klass.RESOURCE_OPTIONS + 'devices'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def events(klass, account, **kwargs):
"""Returns a list of supported events"""
resource = klass.RESOURCE_OPTIONS + 'events'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def interests(klass, account, **kwargs):
"""Returns a list of supported interests"""
resource = klass.RESOURCE_OPTIONS + 'interests'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def languages(klass, account, **kwargs):
"""Returns a list of supported languages"""
resource = klass.RESOURCE_OPTIONS + 'languages'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def locations(klass, account, **kwargs):
"""Returns a list of supported locations"""
resource = klass.RESOURCE_OPTIONS + 'locations'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def network_operators(klass, account, **kwargs):
"""Returns a list of supported network operators"""
resource = klass.RESOURCE_OPTIONS + 'network_operators'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def platforms(klass, account, **kwargs):
"""Returns a list of supported platforms"""
resource = klass.RESOURCE_OPTIONS + 'platforms'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def platform_versions(klass, account, **kwargs):
"""Returns a list of supported platform versions"""
resource = klass.RESOURCE_OPTIONS + 'platform_versions'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def tv_markets(klass, account, **kwargs):
"""Returns a list of supported TV markets"""
resource = klass.RESOURCE_OPTIONS + 'tv_markets'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
@classmethod
def tv_shows(klass, account, **kwargs):
"""Returns a list of supported TV shows"""
resource = klass.RESOURCE_OPTIONS + 'tv_shows'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request)
# targeting criteria properties
# read-only
resource_property(TargetingCriteria, 'id', readonly=True)
resource_property(TargetingCriteria, 'name', readonly=True)
resource_property(TargetingCriteria, 'localized_name', readonly=True)
resource_property(TargetingCriteria, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(TargetingCriteria, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(TargetingCriteria, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
# writable
resource_property(TargetingCriteria, 'line_item_id')
resource_property(TargetingCriteria, 'operator_type')
resource_property(TargetingCriteria, 'targeting_type')
resource_property(TargetingCriteria, 'targeting_value')
# sdk-only
resource_property(TargetingCriteria, 'to_delete', transform=TRANSFORM.BOOL)
class FundingInstrument(Analytics, Resource, Persistence):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/funding_instruments'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/funding_instruments/{id}'
# funding instrument properties
# read-only
resource_property(FundingInstrument, 'id', readonly=True)
resource_property(FundingInstrument, 'name', readonly=True)
resource_property(FundingInstrument, 'credit_limit_local_micro', readonly=True)
resource_property(FundingInstrument, 'currency', readonly=True)
resource_property(FundingInstrument, 'description', readonly=True)
resource_property(FundingInstrument, 'funded_amount_local_micro', readonly=True)
resource_property(FundingInstrument, 'type', readonly=True)
resource_property(FundingInstrument, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(FundingInstrument, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(FundingInstrument, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
resource_property(FundingInstrument, 'able_to_fund', readonly=True, transform=TRANSFORM.BOOL)
resource_property(FundingInstrument, 'entity_status', readonly=True)
resource_property(FundingInstrument, 'io_header', readonly=True)
resource_property(FundingInstrument, 'reasons_not_able_to_fund', readonly=True,
transform=TRANSFORM.LIST)
resource_property(FundingInstrument, 'start_time', readonly=True)
resource_property(FundingInstrument, 'end_time', readonly=True)
resource_property(FundingInstrument, 'credit_remaining_local_micro', readonly=True)
class PromotableUser(Resource):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/promotable_users'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/promotable_users/{id}'
# promotable user properties
# read-only
resource_property(PromotableUser, 'id', readonly=True)
resource_property(PromotableUser, 'promotable_user_type', readonly=True)
resource_property(PromotableUser, 'user_id', readonly=True)
resource_property(PromotableUser, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(PromotableUser, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(PromotableUser, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
class AppList(Resource, Persistence):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/app_lists'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/app_lists/{id}'
def apps(self):
if self.id and not hasattr(self, '_apps'):
self.reload()
return self._apps
# app list properties
# read-only
resource_property(AppList, 'id', readonly=True)
resource_property(AppList, 'name', readonly=True)
resource_property(AppList, 'apps', readonly=True)
class Campaign(Analytics, Resource, Persistence, Batch):
PROPERTIES = {}
BATCH_RESOURCE_COLLECTION = '/' + API_VERSION + '/batch/accounts/{account_id}/campaigns'
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/campaigns'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/campaigns/{id}'
# campaign properties
# read-only
resource_property(Campaign, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(Campaign, 'currency', readonly=True)
resource_property(Campaign, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
resource_property(Campaign, 'id', readonly=True)
resource_property(Campaign, 'reasons_not_servable', readonly=True)
resource_property(Campaign, 'servable', readonly=True, transform=TRANSFORM.BOOL)
resource_property(Campaign, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
# writable
resource_property(Campaign, 'daily_budget_amount_local_micro')
resource_property(Campaign, 'duration_in_days', transform=TRANSFORM.INT)
resource_property(Campaign, 'end_time', transform=TRANSFORM.TIME)
resource_property(Campaign, 'entity_status')
resource_property(Campaign, 'frequency_cap', transform=TRANSFORM.INT)
resource_property(Campaign, 'funding_instrument_id')
resource_property(Campaign, 'name')
resource_property(Campaign, 'standard_delivery', transform=TRANSFORM.BOOL)
resource_property(Campaign, 'start_time', transform=TRANSFORM.TIME)
resource_property(Campaign, 'total_budget_amount_local_micro')
# sdk-only
resource_property(Campaign, 'to_delete', transform=TRANSFORM.BOOL)
class LineItem(Analytics, Resource, Persistence, Batch):
PROPERTIES = {}
BATCH_RESOURCE_COLLECTION = '/' + API_VERSION + '/batch/accounts/{account_id}/line_items'
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/line_items'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/line_items/{id}'
def targeting_criteria(self, id=None, **kwargs):
"""
Returns a collection of targeting criteria available to the
current line item.
"""
self._validate_loaded()
if id is None:
return TargetingCriteria.all(self.account, line_item_ids=[self.id], **kwargs)
else:
return TargetingCriteria.load(self.account, id, **kwargs)
def save(self):
super(LineItem, self).save()
# line item properties
# read-only
resource_property(LineItem, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(LineItem, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
resource_property(LineItem, 'id', readonly=True)
resource_property(LineItem, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(LineItem, 'creative_source', readonly=True)
resource_property(LineItem, 'currency', readonly=True)
resource_property(LineItem, 'target_cpa_local_micro', readonly=True)
# writable
resource_property(LineItem, 'advertiser_domain')
resource_property(LineItem, 'advertiser_user_id')
resource_property(LineItem, 'bid_amount_local_micro')
resource_property(LineItem, 'bid_strategy')
resource_property(LineItem, 'campaign_id')
resource_property(LineItem, 'categories', transform=TRANSFORM.LIST)
resource_property(LineItem, 'end_time', transform=TRANSFORM.TIME)
resource_property(LineItem, 'entity_status')
resource_property(LineItem, 'goal')
resource_property(LineItem, 'ios_app_store_identifier')
resource_property(LineItem, 'android_app_store_identifier')
resource_property(LineItem, 'audience_expansion')
resource_property(LineItem, 'name')
resource_property(LineItem, 'objective')
resource_property(LineItem, 'pay_by')
resource_property(LineItem, 'placements', transform=TRANSFORM.LIST)
resource_property(LineItem, 'primary_web_event_tag')
resource_property(LineItem, 'product_type')
resource_property(LineItem, 'start_time', transform=TRANSFORM.TIME)
resource_property(LineItem, 'total_budget_amount_local_micro')
resource_property(LineItem, 'tracking_tags')
# sdk-only
resource_property(LineItem, 'to_delete', transform=TRANSFORM.BOOL)
class LineItemApps(Resource, Persistence):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/line_item_apps'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/line_item_apps/{id}'
resource_property(LineItemApps, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(LineItemApps, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(LineItemApps, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
resource_property(LineItemApps, 'id', readonly=True)
resource_property(LineItemApps, 'os_type', readonly=True)
resource_property(LineItemApps, 'app_store_identifier', readonly=True)
resource_property(LineItemApps, 'line_item_id', readonly=True)
class LineItemPlacements(Resource):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/line_items/placements'
resource_property(LineItemPlacements, 'product_type', readonly=True)
resource_property(LineItemPlacements, 'placements', readonly=True)
class ScheduledPromotedTweet(Resource, Persistence):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/scheduled_promoted_tweets'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/scheduled_promoted_tweets/{id}'
# scheduled promoted tweets properties
# read-only
resource_property(ScheduledPromotedTweet, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(ScheduledPromotedTweet, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
resource_property(ScheduledPromotedTweet, 'id', readonly=True)
resource_property(ScheduledPromotedTweet, 'tweet_id', readonly=True)
resource_property(ScheduledPromotedTweet, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
# writable
resource_property(ScheduledPromotedTweet, 'line_item_id')
resource_property(ScheduledPromotedTweet, 'scheduled_tweet_id')
class TrackingTags(Resource, Persistence):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/accounts/{account_id}/tracking_tags'
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/tracking_tags/{id}'
# tracking tags properties
# read-only
resource_property(TrackingTags, 'created_at', readonly=True, transform=TRANSFORM.TIME)
resource_property(TrackingTags, 'id', readonly=True)
resource_property(TrackingTags, 'deleted', readonly=True, transform=TRANSFORM.BOOL)
resource_property(TrackingTags, 'updated_at', readonly=True, transform=TRANSFORM.TIME)
# writable
resource_property(TrackingTags, 'line_item_id')
resource_property(TrackingTags, 'tracking_tag_type')
resource_property(TrackingTags, 'tracking_tag_url')
class Tweet(object):
TWEET_CREATE = '/' + API_VERSION + '/accounts/{account_id}/tweet'
def __init__(self):
raise NotImplementedError(
'Error! {name} cannot be instantiated.'.format(name=self.__class__.__name__))
@classmethod
@FlattenParams
def create(klass, account, **kwargs):
"""
Creates a "Promoted-Only" Tweet using the specialized Ads API end point.
"""
resource = klass.TWEET_CREATE.format(account_id=account.id)
response = Request(account.client, 'post', resource, params=kwargs).perform()
return response.body['data']
class UserSettings(Resource, Persistence):
PROPERTIES = {}
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/user_settings/{id}'
# user settings properties
# writable
resource_property(UserSettings, 'notification_email')
resource_property(UserSettings, 'contact_phone')
resource_property(UserSettings, 'contact_phone_extension')
resource_property(UserSettings, 'subscribed_email_types')
resource_property(UserSettings, 'user_id')
class TaxSettings(Resource, Persistence):
PROPERTIES = {}
RESOURCE = '/' + API_VERSION + '/accounts/{account_id}/tax_settings'
@classmethod
def load(self, account):
"""
Returns an object instance for a given account.
"""
resource = self.RESOURCE.format(account_id=account.id)
response = Request(account.client, 'get', resource).perform()
return self(account).from_response(response.body['data'])
def save(self):
"""
Update the current object instance.
"""
resource = self.RESOURCE.format(account_id=self.account.id)
response = Request(
self.account.client, 'put',
resource, params=self.to_params()).perform()
return self.from_response(response.body['data'])
# tax settings properties
# writable
resource_property(TaxSettings, 'address_city')
resource_property(TaxSettings, 'address_country')
resource_property(TaxSettings, 'address_email')
resource_property(TaxSettings, 'address_first_name')
resource_property(TaxSettings, 'address_last_name')
resource_property(TaxSettings, 'address_name')
resource_property(TaxSettings, 'address_postal_code')
resource_property(TaxSettings, 'address_region')
resource_property(TaxSettings, 'address_street1')
resource_property(TaxSettings, 'address_street2')
resource_property(TaxSettings, 'bill_to')
resource_property(TaxSettings, 'business_relationship')
resource_property(TaxSettings, 'client_address_city')
resource_property(TaxSettings, 'client_address_country')
resource_property(TaxSettings, 'client_address_email')
resource_property(TaxSettings, 'client_address_first_name')
resource_property(TaxSettings, 'client_address_last_name')
resource_property(TaxSettings, 'client_address_name')
resource_property(TaxSettings, 'client_address_postal_code')
resource_property(TaxSettings, 'client_address_region')
resource_property(TaxSettings, 'client_address_street1')
resource_property(TaxSettings, 'client_address_street2')
resource_property(TaxSettings, 'invoice_jurisdiction')
resource_property(TaxSettings, 'tax_category')
resource_property(TaxSettings, 'tax_exemption_id')
resource_property(TaxSettings, 'tax_id')
class ContentCategories(Resource):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/content_categories'
resource_property(ContentCategories, 'id', readonly=True)
resource_property(ContentCategories, 'name', readonly=True)
resource_property(ContentCategories, 'iab_categories', readonly=True)
class IabCategories(Resource):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/iab_categories'
resource_property(IabCategories, 'id', readonly=True)
resource_property(IabCategories, 'name', readonly=True)
resource_property(IabCategories, 'parent_id', readonly=True)
class AdvertiserBusinessCategories(Resource):
PROPERTIES = {}
RESOURCE_COLLECTION = '/' + API_VERSION + '/advertiser_business_categories'
resource_property(ContentCategories, 'id', readonly=True)
resource_property(ContentCategories, 'name', readonly=True)
resource_property(ContentCategories, 'iab_categories', readonly=True)
|
from typing import Optional
from password_manager.database_manager import DatabaseManager
from password_manager.encryption.record_reader import EncryptedRecordReader
from password_manager.encryption.record_writer import EncryptedRecordWriter
from password_manager.integration.controller import IntegrationController
from password_manager.repositories.encryption_metadata import EncryptionMetadataRepository
from password_manager.repositories.record import RecordRepository
from password_manager.utils.logger import Logger
from password_manager.utils.options import get_generation_options
from password_manager.utils.password_generator import GenerationOptions
class ApplicationContext:
"""
Class holding references to all required objects that need to be retrieved at runtime
"""
def __init__(self) -> None:
# avoid circular imports
from password_manager.controllers.create_database import CreateDatabaseController
from password_manager.controllers.login import LoginController
from password_manager.controllers.main_window import MainWindowController
self.database_manager: Optional[DatabaseManager] = None
self.metadata_repository: Optional[EncryptionMetadataRepository] = None
self.data_writer: Optional[EncryptedRecordWriter] = None
self.data_reader: Optional[EncryptedRecordReader] = None
self.integration_controller: Optional[IntegrationController] = None
self.create_database_controller: CreateDatabaseController = CreateDatabaseController(self)
self.login_controller: LoginController = LoginController(self)
self.main_window_controller: MainWindowController = MainWindowController(self)
self.password_generation_options: GenerationOptions = get_generation_options()
self.run_server = True
self.save_preferences = True
def initialize_data_access(self, key: bytes) -> None:
"""
Initialize objects for data encryption and decryption
"""
if self.database_manager is None:
raise ValueError("Database manager is not initialized")
self.data_writer = EncryptedRecordWriter(RecordRepository(self.database_manager), key)
self.data_reader = EncryptedRecordReader(RecordRepository(self.database_manager), key)
def initialize_database(self, db_path: str) -> None:
"""
Initialize database from filename
"""
self.database_manager = DatabaseManager(db_path)
self.metadata_repository = EncryptionMetadataRepository(self.database_manager)
Logger.info(f"Switched to database file {db_path}")
def get_data_writer(self) -> EncryptedRecordWriter:
if self.data_writer is None:
raise ValueError("Data writer is not initialized")
return self.data_writer
def get_data_reader(self) -> EncryptedRecordReader:
if self.data_reader is None:
raise ValueError("Data reader is not initialized")
return self.data_reader
def get_metadata_repository(self) -> EncryptionMetadataRepository:
if self.metadata_repository is None:
raise ValueError("Metadata repository is not initialized")
return self.metadata_repository
def get_database_manager(self) -> DatabaseManager:
if self.database_manager is None:
raise ValueError("Database manager is not initialized")
return self.database_manager
def get_integration_controller(self) -> 'IntegrationController':
if self.integration_controller is None:
raise ValueError("Integration controller is not initialized")
return self.integration_controller
def initialize_integration_server(self, key_file: str, cert_file: str, port: int) -> None:
self.integration_controller = IntegrationController(key_file, cert_file, port)
|
from facade_formatter import FacadeFormatter
class InterfaceDisplay:
def __init__(self):
self.formatter = FacadeFormatter()
def format_rows(self, rows):
formatted_rows = []
for row in rows:
formatted_rows.append(self.formatter.format_row(row))
return formatted_rows
def display_all_items(self, facade):
return self.format_rows(facade.get_all_items())
def display_overdue_items(self, facade):
return self.format_rows(facade.get_overdue_items())
def display_last_days_items(self, facade):
return self.format_rows(facade.get_last_days_items())
|
import numpy as np
havaDurumu = [[12,21,31],[6,17,18],[11,12,13]]
print(havaDurumu)
print('-----------------------------------------')
a = np.arange(15).reshape(3,5)
print(a)
print(type(a))
print("Dimension Count : " + str(a.ndim)) # Boyut
print('-----------------------------------------')
b = np.arange(10)
print(b.shape)
print(b)
print(type(b))
print("Dimension Count : " + str(b.ndim)) #Boyut
|
import logging
class _Decorators:
@classmethod
def try_decorator(cls, fn):
logger = logging.getLogger(__name__)
async def decorated(*args, **kw):
for _ in range(5):
try:
value = fn(*args, **kw)
except Exception as error:
logger.log(logging.ERROR, f'The function "{fn.__name__}" failed\n%s', error)
continue
else:
break
else:
raise Exception(f'Function "{fn.__name__}" somehow did not work for 5 times')
return await value
return decorated
|
#!/usr/bin/env python
#
# freqresp_test.py - test frequency response functions
# RMM, 30 May 2016 (based on timeresp_test.py)
#
# This is a rudimentary set of tests for frequency response functions,
# including bode plots.
import unittest
import numpy as np
import control as ctrl
from control.statesp import StateSpace
from control.matlab import ss, tf, bode
from control.exception import slycot_check
import matplotlib.pyplot as plt
class TestFreqresp(unittest.TestCase):
def setUp(self):
self.A = np.matrix('1,1;0,1')
self.C = np.matrix('1,0')
self.omega = np.linspace(10e-2,10e2,1000)
def test_siso(self):
B = np.matrix('0;1')
D = 0
sys = StateSpace(self.A,B,self.C,D)
# test frequency response
frq=sys.freqresp(self.omega)
# test bode plot
bode(sys)
# Convert to transfer function and test bode
systf = tf(sys)
bode(systf)
def test_superimpose(self):
# Test to make sure that multiple calls to plots superimpose their
# data on the same axes unless told to do otherwise
# Generate two plots in a row; should be on the same axes
plt.figure(1); plt.clf()
ctrl.bode_plot(ctrl.tf([1], [1,2,1]))
ctrl.bode_plot(ctrl.tf([5], [1, 1]))
# Check to make sure there are two axes and that each axes has two lines
assert len(plt.gcf().axes) == 2
for ax in plt.gcf().axes:
# Make sure there are 2 lines in each subplot
assert len(ax.get_lines()) == 2
# Generate two plots as a list; should be on the same axes
plt.figure(2); plt.clf();
ctrl.bode_plot([ctrl.tf([1], [1,2,1]), ctrl.tf([5], [1, 1])])
# Check to make sure there are two axes and that each axes has two lines
assert len(plt.gcf().axes) == 2
for ax in plt.gcf().axes:
# Make sure there are 2 lines in each subplot
assert len(ax.get_lines()) == 2
# Generate two separate plots; only the second should appear
plt.figure(3); plt.clf();
ctrl.bode_plot(ctrl.tf([1], [1,2,1]))
plt.clf()
ctrl.bode_plot(ctrl.tf([5], [1, 1]))
# Check to make sure there are two axes and that each axes has one line
assert len(plt.gcf().axes) == 2
for ax in plt.gcf().axes:
# Make sure there is only 1 line in the subplot
assert len(ax.get_lines()) == 1
# Now add a line to the magnitude plot and make sure if is there
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-magnitude':
break
ax.semilogx([1e-2, 1e1], 20 * np.log10([1, 1]), 'k-')
assert len(ax.get_lines()) == 2
def test_doubleint(self):
# 30 May 2016, RMM: added to replicate typecast bug in freqresp.py
A = np.matrix('0, 1; 0, 0');
B = np.matrix('0; 1');
C = np.matrix('1, 0');
D = 0;
sys = ss(A, B, C, D);
bode(sys);
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_mimo(self):
# MIMO
B = np.matrix('1,0;0,1')
D = np.matrix('0,0')
sysMIMO = ss(self.A,B,self.C,D)
frqMIMO = sysMIMO.freqresp(self.omega)
tfMIMO = tf(sysMIMO)
#bode(sysMIMO) # - should throw not implemented exception
#bode(tfMIMO) # - should throw not implemented exception
#plt.figure(3)
#plt.semilogx(self.omega,20*np.log10(np.squeeze(frq[0])))
#plt.figure(4)
#bode(sysMIMO,self.omega)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestTimeresp)
if __name__ == '__main__':
unittest.main()
|
# --------------------------------------------------------------------------
# Copyright (c) <2017> <Lionel Garcia>
# BE-BI-PM, CERN (European Organization for Nuclear Research)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------------
#
# Not fully documented
import os
import glob
import sys
import time
import shutil
import numpy as np
import configparser
import scipy.io as sio
import scipy.signal as signal
import PyQt5.QtCore as QtCore
from os import walk
#from tqdm import tqdm
from nptdms import TdmsFile
from scipy.interpolate import interp1d
from numpy import NaN, Inf, arange, isscalar, asarray, array
from lib import utils
def butter_lowpass(cutoff, fs, order=5):
"""
Matlab butter style filter design
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
"""
Low pass filtering of data using butter filter
"""
b, a = butter_lowpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
def peakdet(v, delta, x=None):
"""
Peak detection algorithm based on pseudo-prominence criteria
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
Credits : Eli Billauer, 3.4.05 (Explicitly not copyrighted).
This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
def open_mat(matfile):
"""
Load a mat file and return its strucuture into a dictionnary
"""
mat = h5py.File('data/' + matfile)
arrays = {}
for k, v in mat.items():
arrays[k] = np.array(v)
return arrays
def tdms_list_from_folder_sorted(TDMS_folder):
newname = TDMS_folder.split('file:///', 2)
if len(newname) == 2:
TDMS_folder = TDMS_folder.split('file:///', 2)[1]
if not os.path.exists(TDMS_folder):
return -1
tdms_files = glob.glob(TDMS_folder + '/*.tdms')
tdms_files.sort(key=os.path.getmtime)
if len(tdms_files) < 1:
tdms_files = -1
return tdms_files
def tdms_list_from_folder(TDMS_folder):
newname = TDMS_folder.split('file:///', 2)
if len(newname) == 2:
TDMS_folder = TDMS_folder.split('file:///', 2)[1]
if not os.path.exists(TDMS_folder):
return -1
tdms_files = []
for (dir_path, dir_names, file_names) in walk(TDMS_folder):
for files in file_names:
if files.endswith(('.tdms', '.TDMS')):
tdms_files.append(files)
if len(tdms_files) < 1:
tdms_files = -1
dir_path = -1
return -1
print()
return tdms_files, dir_path
def mat_list_from_folder_sorted(mat_folder):
new_name = mat_folder.split('file:///', 2)
if len(new_name) == 2:
mat_folder = mat_folder.split('file:///', 2)[1]
mat_files = glob.glob(mat_folder + '/*.mat')
mat_files.sort(key=os.path.getmtime)
return mat_files
def mat_list_from_folder(mat_folder):
new_name = mat_folder.split('file:///', 2)
if len(new_name) == 2:
mat_folder = mat_folder.split('file:///', 2)[1]
mat_files = []
for (dir_path, dir_names, file_names) in walk(mat_folder):
for files in file_names:
if files.endswith(('.mat', '.Mat')):
mat_files.append(files)
return mat_files, dir_path
class CreateRawDataFolder(QtCore.QThread):
"""
Creation of a folder containing the raw data saved as .mat files. Raw data
are saved in data/RAW_DATA and separated in respect to IN and OUT scans.
The .mat file contains:
- data_SA : Raw data comming from disk sensor A
- data_SB : Raw data comming from disk sensor B
- data_PD : Raw data comming from the photodiode
- time_PD : Global time (range from 0 to size of data_PD)
IN and OUT scans are separated following the parameters described in:
data/parameters.mat
"""
notifyProgress = QtCore.pyqtSignal(int)
notifyState = QtCore.pyqtSignal(str)
notifyFile = QtCore.pyqtSignal(str)
def __init__(self, TDMS_folder, destination_folder, parent = None):
self.TDMS_folder = TDMS_folder
self.destination_folder = destination_folder
super(CreateRawDataFolder, self).__init__(parent)
def run(self):
#parameter_file = utils.resource_path('data/parameters.cfg')
config = configparser.RawConfigParser()
config.read('data/parameters.cfg')
tdms_minimum_size = eval(config.get('OPS processing parameters', 'tdms_minimum_size'))
fatigue_test = config.get('OPS processing parameters', 'fatigue_test')
offset_center = eval(config.get('Geometry', 'stages_position_at_tank_center'))
newname = self.TDMS_folder.split('file:///', 2)
if len(newname) == 2:
TDMS_folder = self.TDMS_folder.split('file:///', 2)[1]
print('------- TDMS Conversion -------')
self.notifyState.emit('TDMS conversion')
time.sleep(0.3)
if os.path.exists(self.destination_folder + '/RAW_DATA'):
shutil.rmtree(self.destination_folder + '/RAW_DATA')
time.sleep(3)
os.makedirs(self.destination_folder + '/RAW_DATA')
os.makedirs(self.destination_folder + '/RAW_DATA/RAW_IN')
os.makedirs(self.destination_folder + '/RAW_DATA/RAW_OUT')
false = 0
#tdms_files, dir_path = tdms_list_from_folder(self.TDMS_folder)
tdms_files = tdms_list_from_folder_sorted(self.TDMS_folder)
# log.log_new_raw_data_extraction(self.TDMS_folder, speed)
i=0
for tdms_file in tdms_files: #tqdm(tdms_files):
self.notifyProgress.emit(int(i*100 / len(tdms_files)))
time.sleep(0.1)
self.notifyFile.emit(tdms_file)
time.sleep(0.1)
# We take only file that are well saved - parameter to be found in config file
#if os.path.getsize(dir_path + '/' + tdms_file) >= tdms_minimum_size:
if os.path.getsize(tdms_file) >= tdms_minimum_size:
if fatigue_test == 'yes':
laser_position = offset_center
scan_number = int(eval(tdms_file.split('__', 2)[1]))
else:
laser_position, scan_number = find_scan_info(tdms_file)
if laser_position == -1:
laser_position = offset_center
scan_number = int(eval(tdms_file.split('__', 2)[1]))
data__s_a_in, data__s_b_in, data__s_a_out, data__s_b_out, data__p_d_in, data__p_d_out, time__in, time__out = utils.extract_from_tdms(tdms_file)
if type(data__s_a_in) is not int:
sio.savemat(self.destination_folder + '/RAW_DATA/RAW_IN/SCAN__P'
+ str(laser_position)
+ '__S' + str(scan_number)
+ '____IN.mat',
dict(data_SA=data__s_a_in, data_SB=data__s_b_in, data_PD=data__p_d_in, start_t = time__in[0], INorOUT='IN'))
sio.savemat(self.destination_folder + '/RAW_DATA/RAW_OUT/SCAN__P'
+ str(laser_position)
+ '__S' + str(scan_number)
+ '____OUT.mat',
dict(data_SA=data__s_a_out, data_SB=data__s_b_out, data_PD=data__p_d_out, start_t = time__out[0], INorOUT='OUT'))
else:
false = false + 1
if false > 15:
self.parent().parent.LogDialog.add('High number of files identified as defective - Please check tdms_minimum_size_ in [LabView output] parameters', 'error')
i += 1
self.notifyState.emit('done convert')
time.sleep(0.1)
def resample(data_B, Timevector):
"""
Resample data_B ([timeB][dataB]) wrt data_A time ([timeA][dataB])
and return resampled_data_B([timeA][resampleddataB]))
"""
data_SB_interp = interp1d(data_B[0], data_B[1], bounds_error=False, fill_value=0)
data_B_R = np.ones((2, Timevector.size))
data_B_R[1] = data_SB_interp(Timevector)
data_B_R[0] = np.copy(Timevector)
return data_B_R
def find_scan_info(filename, position = '__P', scan = '__S', date = '____'):
"""
Find laser position and scan number by looking at the file name
"""
try:
file = filename.split(position, 2)
file = file[1].split(scan, 2)
laser_position = file[0]
file = file[1].split(date, 2)
scan_number = file[0]
except IndexError:
laser_position = -1
scan_number = -1
return laser_position, scan_number
def create_processed_data_folder(raw_data_folder, destination_folder=None, force_overwrite='n'):
if destination_folder is not None:
# print('ola')
# time.sleep(5)
filename = os.path.basename(raw_data_folder)
filename = filename.split('TDMS', 2)[0]
folder_name = destination_folder + '/' + filename + ' PROCESSED'
# print('ola')
# time.sleep(5)
if os.path.exists(folder_name):
if force_overwrite is 'y':
shutil.rmtree(folder_name)
time.sleep(3)
elif force_overwrite is 'n':
overwrite = input(
'You are about to overwrite data from' + filename + 'previous processing. Do you want to continue ? [y/n]')
if overwrite is 'y':
shutil.rmtree(folder_name)
time.sleep(3)
os.makedirs(folder_name)
if os.path.exists(destination_folder + '/PROCESSED_IN.mat'):
shutil.move(destination_folder + '/PROCESSED_IN.mat', folder_name)
else:
print('PROCESSED_IN.mat does not exists in data')
if os.path.exists(destination_folder + '/PROCESSED_OUT.mat'):
shutil.move(destination_folder + '/PROCESSED_OUT.mat', folder_name)
else:
print('PROCESSED_OUT.mat does not exists in data')
if os.path.exists(destination_folder + '/RAW_DATA'):
shutil.rmtree(destination_folder + '/RAW_DATA')
else:
filename = os.path.basename(raw_data_folder)
filename = filename.split('TDMS', 2)[0]
folder_name = '../data/' + filename + ' PROCESSED'
if os.path.exists(folder_name):
if force_overwrite is 'y':
shutil.rmtree(folder_name)
time.sleep(3)
elif force_overwrite is 'n':
overwrite = input(
'You are about to overwrite data from' + filename + 'previous processing. Do you want to continue ? [y/n]')
if overwrite is 'y':
shutil.rmtree(folder_name)
time.sleep(3)
os.makedirs(folder_name)
if os.path.exists('data/PROCESSED_IN.mat'):
shutil.move('data/PROCESSED_IN.mat', folder_name)
else:
print('PROCESSED_IN.mat does not exists in data')
if os.path.exists('data/PROCESSED_OUT.mat'):
shutil.move('data/PROCESSED_OUT.mat', folder_name)
else:
print('PROCESSED_OUT.mat does not exists in data')
if os.path.exists('data/RAW_DATA'):
shutil.rmtree('data/RAW_DATA')
def create_results_file_from_calibration(folder_name, center_IN, center_OUT, sigma_IN, sigma_OUT, f_parameters_IN,
f_parameters_OUT, residuals_IN, residuals_OUT, residuals_IN_origin,
residuals_OUT_origin, laser_position_IN, laser_position_OUT,
origin_file, residuals_IN_origin_mean, residuals_OUT_origin_mean,
laser_position_IN_mean, laser_position_OUT_mean, residuals_IN_mean,
residuals_OUT_mean):
saving_name = folder_name + '/calibration_results.mat'
sio.savemat(saving_name,
dict(center_IN=center_IN,
center_OUT=center_OUT,
sigma_IN=sigma_IN,
sigma_OUT=sigma_OUT,
f_parameters_IN=f_parameters_IN,
f_parameters_OUT=f_parameters_OUT,
residuals_IN=residuals_IN,
residuals_OUT=residuals_OUT,
residuals_IN_origin=residuals_IN_origin,
residuals_OUT_origin=residuals_OUT_origin,
laser_position_IN=laser_position_IN,
laser_position_OUT=laser_position_OUT,
f='b - c * np.cos(np.pi - x + a)',
origin_file=origin_file,
residuals_OUT_mean=residuals_OUT_mean,
residuals_IN_origin_mean=residuals_IN_origin_mean,
residuals_OUT_origin_mean=residuals_OUT_origin_mean,
laser_position_IN_mean=laser_position_IN_mean,
laser_position_OUT_mean=laser_position_OUT_mean))
def theoretical_laser_position(x, a, b, c):
"""
theoretical angular position of the wire in respect to the laser position
"""
return b - c * np.cos(np.pi - (x + a));
def inverse_theoretical_laser_position(y, a, b, c):
"""
theoretical angular position of the wire in respect to the laser position
"""
return np.pi - a - np.arccos((b - y) / c)
def python_lines(folder):
py_files = []
number_of_lines = 0
for (dir_path, dir_names, file_names) in walk(folder):
for files in file_names:
if files.endswith('.py'):
py_files.append(dir_path + '/' + files)
for py_file in py_files:
f = open(py_file, 'r')
lines = f.readlines()
number_of_lines += len(lines)
return number_of_lines
def reformate_path(path):
"""On certain editors (e.g. Spyder on Windows) a copy-paste of the path from the explorer includes a 'file:///'
attribute before the real path. This function removes this extra piece
Args:
path: original path
Returns:
Reformatted path
"""
_path = path.split('file:///', 2)
if len(_path) == 2:
new_path = path.split('file:///', 2)[1]
else:
new_path = path
return new_path
def extract_from_tdms(path):
file = reformate_path(path)
tdms_file = TdmsFile(file)
config = configparser.RawConfigParser()
config.read('data/parameters.cfg')
sampling_frequency = eval(config.get('OPS processing parameters', 'sampling_frequency'))
try:
data__s_a_in = tdms_file.object('Picoscope Data', 'DISC PH. HOME dir IN').data
data__s_a_out = tdms_file.object('Picoscope Data', 'DISC PH. HOME dir HOME').data
data__s_b_in = tdms_file.object('Picoscope Data', 'DISC PH. IN dir IN').data
data__s_b_out = tdms_file.object('Picoscope Data', 'DISC PH. IN dir HOME').data
data__p_d_in = tdms_file.object('Picoscope Data', 'WIRE PH. dir IN').data
data__p_d_out = tdms_file.object('Picoscope Data', 'WIRE PH. dir HOME').data
TimeBoundaries = tdms_file.object('Picoscope Data','Data Description start-stop windows 1 and 2').data / sampling_frequency
time__in = (np.arange(0, data__s_a_in.size, 1)) / sampling_frequency + TimeBoundaries[0]
time__out = (np.arange(0, data__s_a_out.size, 1)) / sampling_frequency + TimeBoundaries[2]
except KeyError:
data__s_a_in = -1
data__s_a_out = -1
data__s_b_in = -1
data__s_b_out = -1
data__p_d_in = -1
data__p_d_out = -1
time__in = -1
time__out = -1
except IndexError:
data__s_a_in = -2
data__s_a_out = -2
data__s_b_in = -2
data__s_b_out = -2
data__p_d_in = -2
data__p_d_out = -2
time__in = -2
time__out = -2
return data__s_a_in, data__s_b_in, data__s_a_out, data__s_b_out, data__p_d_in, data__p_d_out, time__in, time__out
def get_info_from_PROCESSED(path):
speed = None
number_of_scans = None
first_position = None
last_position = None
step_size = None
scan_per_position = None
#parameter_file = resource_path('data/parameters.cfg')
config = configparser.RawConfigParser()
config.read('data/parameters.cfg')
tank_center = eval(config.get('Geometry', 'stages_position_at_tank_center'))
if not os.path.exists(path + '/PROCESSED_IN.mat'):
return -1
else:
data = sio.loadmat(path + '/PROCESSED_IN.mat', struct_as_record=False, squeeze_me=True)
laser_position = data['laser_position']
laser_position = - laser_position + tank_center
number_of_scans = laser_position.size
first_position = np.max(laser_position)
last_position = np.min(laser_position)
speed = np.round(np.max(data['speed_SA'][0]))
scan_per_position = np.where(laser_position == laser_position[0])[0].size
step_size = np.abs(np.mean(np.diff(np.unique(laser_position))))
return speed, number_of_scans, first_position, last_position, step_size, scan_per_position
def raise_dialog_error(text):
pass
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath("..")
return os.path.join(base_path, relative_path)
|
import unittest
from biothings_explorer.registry import Registry
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from .utils import get_apis
reg = Registry()
class TestSingleHopQuery(unittest.TestCase):
def test_anatomy2protein(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(output_cls='Protein',
input_cls='AnatomicalEntity',
input_id='UBERON',
pred="related_to",
values='UBERON:0000013')
seqd.query()
self.assertTrue('PR:000004614' in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']['PR:000004614']
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2genomicentity(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(output_cls='GenomicEntity',
input_cls='AnatomicalEntity',
pred="related_to",
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue('SO:0000140' in seqd.G)
self.assertTrue('SO:0000999' in seqd.G)
def test_anatomy2chemicalsubstance(self):
"""Test gene-genomic entity"""
seqd = SingleEdgeQueryDispatcher(output_cls='ChemicalSubstance',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue('CHEBI:22563' in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']['CHEBI:22563']
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2gene(self):
"""Test gene-gene"""
seqd = SingleEdgeQueryDispatcher(output_cls='Gene',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue('HIF1A' in seqd.G)
self.assertTrue('AR' in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']['HIF1A']
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2anatomy(self):
"""Test gene-anatomy"""
seqd = SingleEdgeQueryDispatcher(output_cls='AnatomicalEntity',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue("UBERON:0000057" in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']['UBERON:0000057']
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2ma(self):
"""Test gene-molecular_activity"""
seqd = SingleEdgeQueryDispatcher(output_cls='MolecularActivity',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue("MOP:0000568" in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']["MOP:0000568"]
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2bp(self):
"""Test gene-biological_process"""
seqd = SingleEdgeQueryDispatcher(output_cls='BiologicalProcess',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue("lipid metabolic process".upper() in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']["lipid metabolic process".upper()]
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2cc(self):
"""Test gene-cellular_component"""
seqd = SingleEdgeQueryDispatcher(output_cls='CellularComponent',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue('MEMBRANE' in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']['MEMBRANE']
self.assertTrue('CORD Anatomy API' in get_apis(edges))
def test_anatomy2cell(self):
"""Test gene-cell"""
seqd = SingleEdgeQueryDispatcher(output_cls='Cell',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue('CL:0007011' in seqd.G)
def test_anatomy2disease(self):
"""Test gene-disease"""
seqd = SingleEdgeQueryDispatcher(output_cls='Disease',
input_cls='AnatomicalEntity',
input_id='UBERON',
values='UBERON:0000013')
seqd.query()
self.assertTrue('NEURONITIS' in seqd.G)
edges = seqd.G['UBERON:UBERON:0000013']['NEURONITIS']
self.assertTrue('CORD Anatomy API' in get_apis(edges))
|
from packetbeat import (BaseTest, TRANS_REQUIRED_FIELDS)
def check_event(event, expected):
for key in expected:
assert key in event, "key '{0}' not found in event".format(key)
assert event[key] == expected[key],\
"key '{0}' has value '{1}', expected '{2}'".format(key,
event[key],
expected[key])
class Test(BaseTest):
def test_unmatched_response(self):
"""
Unmatched response in stream
"""
self.render_config_template(
http_ports=[8080],
)
self.run_packetbeat(pcap="http_unmatched.pcap",
debug_selectors=["http", "httpdetailed"])
# Due to the unmatched response this has event.end and not event.start.
fields = [v for v in TRANS_REQUIRED_FIELDS if v != 'event.start'] + ['event.end']
objs = self.read_output(required_fields=fields)
assert len(objs) == 2
check_event(objs[0], {
"type": "http",
"status": "Error",
"http.response.status_code": 404,
"error.message": "Unmatched response"})
check_event(objs[1], {
"type": "http",
"http.response.status_code": 200,
"http.request.headers": {"content-length": 0},
"status": "OK"})
def test_unmatched_request(self):
"""
Unmatched request due to timeout (15s)
"""
self.render_config_template(
http_ports=[8080],
http_transaction_timeout="1s",
)
self.run_packetbeat(pcap="http_unmatched_timeout.pcap",
debug_selectors=["http", "httpdetailed"],
real_time=True)
objs = self.read_output()
print(objs)
assert len(objs) == 1
check_event(objs[0], {
"type": "http",
"status": "Error",
"query": "GET /something",
"error.message": "Unmatched request"})
|
###################################################################################
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###################################################################################
import os, sys, re, string, math, datetime, time, pkgutil
from optparse import OptionParser
import common_params as cp
import specific_params as sp
import common_functions as cf
import sqlite3
#import matplotlib.pyplot as plt
import numpy as np
results_app_table = {} # app, igid, bfm, outcome,
inj_types = ["inst","rf"]
###############################################################################
# inst_fraction contains the fraction of IADD, FADD, IMAD, FFMA, ISETP, etc.
# instructions per application
###############################################################################
inst_fraction = {}
inst_count = {}
def parse_results_file(app, igid, bfm, c):
if injection_mode == "interval":
results_f_name = sp.app_log_dir[app] + "results-igid" + str(igid) + ".bfm" + str(bfm) + ".interval.txt"
elif injection_mode == "pc":
results_f_name = sp.app_log_dir[app] + "results-igid" + str(igid) + ".bfm" + str(bfm) + "." +\
str(sp.NUM_INJECTIONS) + ".pc.txt"
else:
results_f_name = sp.app_log_dir[app] + "results-igid" + str(igid) + ".bfm" + str(bfm) + "." + str(sp.NUM_INJECTIONS) + ".txt"
try:
rf = open(results_f_name, "r")
except IOError:
print "app=%s, igid=%d, bfm=%d " %(app, igid, bfm),
print "NOT OPEN: " + results_f_name
return
suite = sp.apps[app][0]
print "file is " + rf.name
num_lines = 0
for line in rf: # for each injection site
# print "-------LINE: " + str(num_lines) + "---------------"
#Example line: _Z22bpnn_layerforward_CUDAPfS_S_S_ii-0-26605491-0.506809798834-0.560204950825:..:MOV:773546:17:0.759537:3:dmesg,
#kname-kcount-iid-allIId-opid-bid:pc:opcode:tid:injBID:runtime_sec:outcome_category:dmesg
words = line.split(":")
inj_site_info = words[0].split("-")
if injection_mode == "interval":
[interval_size, interval_id] = [int(inj_site_info[2]), int(inj_site_info[3])]
inst_id = int(inj_site_info[4])
opIdSeed = inj_site_info[5]
bIdSeed = inj_site_info[6]
[opcode, injBID, runtime, outcome] = \
[words[5], int(words[7]), float(words[8]), int(words[9])]
elif injection_mode == "pc":
[opIdSeed, bIdSeed, pc_text, pc_count] = [inj_site_info[3], inj_site_info[4],\
str(inj_site_info[5]), int(inj_site_info[6])]
[bb_id, global_inst_id, app_dyn_inst_id, opcode, tId, injBID, runtime, outcome] = \
[int(words[1]), int(words[2]), int(words[3]), words[4], int(words[5]), int(words[6]),\
float(words[7]), int(words[8])]
else:
[kname, invocation_index, opcode, injBID, runtime, outcome] = \
[inj_site_info[0], int(inj_site_info[1]), words[5], int(words[7]), float(words[8]), int(words[9])]
inst_id = int(inj_site_info[2])
opIdSeed = inj_site_info[3]
bIdSeed = inj_site_info[4]
# print "words[1]: "+ str(words[1]),
if injection_mode != "pc":
pc_text = '0x'+str(words[1])
bb_id = int(words[2])
global_inst_id = int(words[3])
app_dyn_inst_id = int(words[4])
tId = int(words[6])
if pc_text == '0x':
pc_text = "0x0"
# print "PC text: " + " => " + pc_text
# pc = int(pc_text,0)
if injection_mode == "interval":
c.execute('INSERT OR IGNORE INTO Results '\
'VALUES(NULL, \'%s\',\'%s\',%d,\'%s\', \'%s\', %d, %d,'\
' %d, %d, \'%s\', %d, %d, %d, \'%s\', %d, %d, %f, %d)'
%(suite,app, interval_size, opIdSeed, bIdSeed, igid, bfm,
interval_id, inst_id, pc_text, bb_id,
global_inst_id, app_dyn_inst_id, opcode, tId,
injBID, runtime, (outcome-1)))
elif injection_mode == "pc":
c.execute('INSERT OR IGNORE INTO Results '\
'VALUES(NULL, \'%s\', \'%s\', \'%s\', \'%s\', %d, %d, \'%s\', %d, %d, '\
'%d, %d, \'%s\', %d, %d, %f, %d)'
% (suite, app, opIdSeed, bIdSeed, igid, bfm, pc_text, pc_count, bb_id,
global_inst_id, app_dyn_inst_id, opcode, tId, injBID, runtime, (outcome-1)))
else:
c.execute('INSERT OR IGNORE INTO Results '\
'VALUES(NULL, \'%s\',\'%s\',\'%s\',\'%s\', \'%s\''\
', %d, %d, %d, %d, \'%s\', %d, %d, %d, \'%s\', %d, %d, %f, %d)'
%(suite,app, kname, opIdSeed, bIdSeed, igid, bfm,
invocation_index, inst_id, pc_text,
bb_id, global_inst_id, app_dyn_inst_id, opcode,
tId, injBID, runtime, (outcome-1)))
num_lines += 1
rf.close()
if num_lines == 0 and app in results_app_table and os.stat(sp.app_log_dir[app] +
"injection-list/igid" + str(igid) + ".bfm" + str(bfm) + "." +
str(sp.NUM_INJECTIONS) + ".txt").st_size != 0:
print "%s, igid=%d, bfm=%d not done" %(app, igid, bfm)
def parse_mem_accesses(app, c):
try:
rf = open(sp.app_dir[app] + "global_gpr_insts.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "global_gpr_insts.txt"
return
suite = sp.apps[app][0]
print "file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each mem access (or new kernel and invocation)
words = line.split(",")
if words[0] == "INTERVAL":
interval_id = int(words[1])
global_loads = int(words[3])
global_stores = int(words[5])
nonglobal_loads = int(words[7])
nonglobal_stores = int(words[9])
c.execute('INSERT OR IGNORE INTO MemAccesses '\
'VALUES(NULL, \'%s\',%d, %d, %d, %d, %d)'
%(app, interval_id, global_loads, global_stores,
nonglobal_loads, nonglobal_stores))
def parse_pupcs(app, c):
try:
rf = open(sp.app_dir[app] + "pupcs.txt", "r")
except IOError:
print "PUPC - NOT OPEN: " + sp.app_dir[app] + "pupcs.txt"
return
suite = sp.apps[app][0]
print "PUPC - file is " + rf.name
for line in rf: # for each mem access (or new kernel and invocation)
words = line.split(",")
if words[0] == "PUPC":
pupc = '0x' + words[1]
bb_id = int(words[3])
fnName = words[5]
opcode = words[7]
is_mem = int(words[9])
is_dest_reg = int(words[11])
weight = int(words[13])
num_gpr_srcs = int(words[15])
gpr_srcs = ""
gpr_srcs = ",".join(map(str, words[17:17+num_gpr_srcs]))
num_gpr_dsts = int(words[18+num_gpr_srcs])
gpr_dsts = ",".join(map(str, words[20+num_gpr_srcs:18+num_gpr_srcs+num_gpr_dsts]))
c.execute('INSERT OR IGNORE INTO PUPCs '\
'VALUES(NULL, \'%s\', \'%s\', '\
'%d,%d,\'%s\',\'%s\', %d, %d, %d, \'%s\', %d,\'%s\')'
%(app, pupc, weight, bb_id, fnName, opcode, is_mem,is_dest_reg, num_gpr_srcs,
gpr_srcs, num_gpr_dsts, gpr_dsts))
def parse_bb_interval_executions(app, c):
try:
rf = open(sp.app_dir[app] + "basic_block_insts.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "basic_block_insts.txt"
return
suite = sp.apps[app][0]
print "file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each mem access (or new kernel and invocation)
if "kernel," in line:
words = line.split(",")
kName = words[1]
invocation_id = int(words[3])
interval_size = int(words[5])
elif "INTERVAL," in line:
words = line.split(",")
interval_id = int(words[1])
num_gpr_insts = int(words[3])
c.execute('INSERT OR IGNORE INTO BBVIntervalSizes '\
'VALUES(NULL, \'%s\', %d, %d, %d);'
%(app, interval_size, interval_id,num_gpr_insts))
else:
words = line.split(",")
basic_block_id = int(words[0])
num_insts = int(words[2])
func_name = words[1]
inst_interval =int(words[3])
bb_num_execs = int(words[4])
num_succs = int(words[5])
succs = ",".join(map(str, words[6:6+num_succs]))
# print 'INSERT OR IGNORE INTO BBProfile '\
# 'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, \'%s\', %d,'\
# '%d, \'%s\');' %(app, kName, invocation_id, inst_interval, basic_block_id, num_insts,
# func_name, bb_num_execs, num_succs, succs)
c.execute('INSERT OR IGNORE INTO BBProfile '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, \'%s\', %d,'\
'%d, \'%s\');'
%(app, kName, invocation_id, inst_interval, basic_block_id, num_insts,
func_name, bb_num_execs, num_succs, succs))
def parse_bb_executions(app, c):
try:
rf = open(sp.app_dir[app] + "bb_profile.txt", "r")
except IOError:
print "BB Profiling - NOT OPEN: " + sp.app_dir[app] + "bb_profile.txt"
return
suite = sp.apps[app][0]
print "BB Profiling - file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each mem access (or new kernel and invocation)
if "kName," in line:
words = line.split(",")
kName = words[1]
continue
elif "BBId," in line:
words = line.split(",")
basic_block_id = int(words[1])
num_insts = int(words[5])
bb_num_execs = int(words[3])
is_entry = int(words[7])
is_exit = int(words[9])
num_succ = int(words[23])
succs = ",".join(map(str, words[25:25+num_succ]))
c.execute('INSERT OR IGNORE INTO BBExecutions '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, %d, %d, \'%s\');'
%(app, kName, basic_block_id, bb_num_execs, num_insts, is_entry,
is_exit, num_succ, succs))
def parse_path_executions(app, c):
try:
rf = open(sp.app_dir[app] + "path_profile.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "path_profile.txt"
return
suite = sp.apps[app][0]
kInvocation = {}
print "file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each path (or new kernel and invocation)
if "kernel," in line:
words = line.strip().split(",")
kName = words[1]
if kName not in kInvocation:
kInvocation[kName] = 0
else:
kInvocation[kName]+=1
elif "path_id," in line:
words = line.strip().split(",")
kernel = words[1]
invocation_id = kInvocation[kName]
path_id = int(words[3])
bb_start = int(words[5])
bb_end = int(words[7])
count = int(words[9])
c.execute('INSERT OR IGNORE INTO PathProfile '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, %d);'
%(app, kernel, invocation_id, path_id, bb_start, bb_end, count))
def parse_path_incs(app, c):
try:
rf = open(sp.app_dir[app] + "cfgs.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "cfgs.txt"
return
suite = sp.apps[app][0]
print "file is " + rf.name
kName = ""
num_kernels = int(rf.readline())
print "num kernels in app: " + app + " is " + str(num_kernels)
for kernel in range(0,num_kernels): # for each path inc (or new kernel and invocation)
kname=rf.readline().strip()
num_incs=int(rf.readline())
for inc in range(0,num_incs):
[bb_from,bb_to,inc_value] = map(int, rf.readline().split())
c.execute('INSERT OR IGNORE INTO PathIncs '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d);'
%(app, kname, bb_from, bb_to, inc_value))
def parse_full_paths(app, c):
try:
rf = open(sp.app_dir[app] + "full_paths.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "full_paths.txt"
return
print "FILE OPEN: " + rf.name
kInvocation = {}
kName = ""
invocation_id = 0
for line in rf:
if "kernel," in line:
words = line.strip().split(",")
kName = words[1]
if kName not in kInvocation:
kInvocation[kName] = 0
else:
kInvocation[kName] += 1
elif "WARP" in line:
words=line.strip().split("=>")
warp_id = int(words[0].split()[1])
full_path = words[1][:-1]
invocation_id = kInvocation[kName]
c.execute('INSERT OR IGNORE INTO FullPaths '\
'VALUES(NULL, \'%s\', \'%s\', \'%s\');'
% (app, kName, full_path))
full_path_id = c.execute('SELECT ID FROM FullPaths WHERE App IS \'%s\' AND kName IS \'%s\' '\
'AND FullPath IS \'%s\';'
%(app,kName, full_path)).fetchone()[0]
c.execute('INSERT OR IGNORE INTO FullPathExecs '\
'VALUES(NULL, \'%s\',\'%s\',%d,%d,%d);'
% (app, kName, invocation_id, warp_id, full_path_id))
def parse_fipoints(app, c):
try:
rf = open(sp.app_dir[app] + "interval.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "interval.txt"
return
print "file is " + rf.name
next(rf)
next(rf)
next(rf)
for line in rf:
line = line.split(":")
[intervalId, intervalFreq] = [int(line[0]), float(line[2])]
c.execute('INSERT OR IGNORE INTO FiPointClusters '\
'VALUES(NULL, \'%s\', %d, %f);'
% (app, intervalId, intervalFreq))
###################################################################################
# Parse results files and populate summary to results table
###################################################################################
def parse_results_apps(typ,c):
for app in sp.parse_apps:
print app
if typ == "inst":
for igid in sp.parse_igid_bfm_map:
for bfm in sp.parse_igid_bfm_map[igid]:
parse_results_file(app, igid, bfm, c)
else:
for bfm in sp.parse_rf_bfm_list:
parse_results_file(app, "rf", bfm, c)
parse_mem_accesses(app, c)
parse_pupcs(app, c)
#parse_bb_executions(app,c)
parse_bb_interval_executions(app,c)
#parse_path_executions(app,c)
#parse_path_incs(app, c)
#parse_full_paths(app,c)
if injection_mode == "interval":
parse_fipoints(app, c)
def parse_options():
parser = OptionParser()
parser.add_option("-t", "--type", dest="inj_type",
help="Injection Type <inst/rf>", metavar="INJ_TYPE")
parser.add_option("-d", "--database", dest="database_file",
help="Database file where our data is")
parser.add_option("-a", "--app", dest="application",
help="Application to analyze")
parser.add_option("-m", "--mode", dest="injection_mode", default="normal",
help="Mode of injection - normal or interval (fipoint)")
# Create a database if one was not passed.
(options, args) = parser.parse_args()
if options.inj_type:
if options.inj_type not in inj_types:
parser.error("inj_type should be one of: %s - provided:%s"
% (inj_types,options.inj_type))
else:
options.inj_type = "inst"
if not options.database_file:
options.database_file = "data.db"
return options.database_file, options.inj_type, options.application, options.injection_mode
def print_usage():
print "Usage: \n python parse_results.py rf/inst"
exit(1)
def CreateNewDB(c):
print "creating data DB"
if injection_mode == "interval":
c.execute('CREATE TABLE IF NOT EXISTS '\
'Results(ID INTEGER PRIMARY KEY, Suite TEXT, App TEXT, IntervalSize INTEGER, '\
'OpIdSeed TEXT, BIDSeed TEXT, IgId INTEGER, '\
'BFM INTEGER, IntervalId INTEGER, InstId INTERGER, PC TEXT, BBId '\
'INTEGER, GlobalInstId INTEGER, AppDynInstId INTEGER, '\
'Opcode TEXT, TId INTEGER, InjBId INTEGER, Runtime INTEGER, OutcomeID INTEGER)')
elif injection_mode == "pc":
c.execute('CREATE TABLE IF NOT EXISTS '\
'Results(ID INTEGER PRIMARY KEY, Suite TEXT, App TEXT, OpIdSeed TEXT, '\
'BIDSeed TEXT, IgId INTEGER, BFM INTEGER, PC TEXT, PCCount INTEGER, BBId INTEGER, '\
'GlobalInstId INTEGER, AppDynInstId INTEGER, Opcode TEXT, TId INTEGER, '\
'InjBId INTEGER, Runtime INTEGER, OutcomeId INTEGER)')
else:
c.execute('CREATE TABLE IF NOT EXISTS '\
'Results(ID INTEGER PRIMARY KEY, Suite TEXT, App TEXT, kName TEXT, '\
'OpIdSeed TEXT, BIDSeed TEXT, IgId INTEGER, '\
'BFM INTEGER, InvocationIdx INTEGER, InstId INTERGER, PC TEXT, BBId '\
'INTEGER, GlobalInstId INTEGER, AppDynInstId INTEGER, '\
'Opcode TEXT, TId INTEGER, InjBId INTEGER, Runtime INTEGER, OutcomeID INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'OutcomeMap(ID INTEGER PRIMARY KEY, Description TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'IgIdMap(ID INTEGER PRIMARY KEY, IDNum INTEGER, Description TEXT, App TEXT,'\
' InstCount INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BFMMap(ID INTEGER PRIMARY KEY, IDNum INTEGER, Description TEXT, App TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'OpcodeMap(ID INTEGER PRIMARY KEY, Description TEXT, App TEXT, InstCount INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'Kernels(ID INTEGER PRIMARY KEY, Application TEXT, kName TEXT, '\
'InvocationIdx INTEGER, InvInstCount INTEGER, AppInstCount INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'MemAccesses(ID INTEGER PRIMARY KEY, App TEXT, IntervalId INTEGER, '\
'GlobalLoads INTEGER, GlobalStores INTEGER, '\
'NonGlobalLoads INTEGER, NonGlobalStores INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BBProfile(ID INTEGER PRIMARY KEY, App TEXT, KName TEXT, '\
'InvocationIdx INTEGER, InstIntervalId INTEGER, '\
' BasicBlockId INTEGER, BBNumInsts INTEGER, FuncName TEXT, BBNumExecs INTEGER,'\
'numSuccs INTEGER, Succs TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BBExecutions(ID INTEGER PRIMARY KEY, App TEXT, KName TEXT, '\
'BasicBlockId INTEGER, BBNumExecs INTEGER, BBNumInsts INTEGER,'\
'isEntry INTEGER, isExit INTEGER, numSuccs INTEGER, Succs TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'PathProfile(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, '\
'InvocationIdx INTEGER, PathId INTEGER, BBStart INTEGER,'\
'BBEnd INTEGER, Count INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'PathIncs(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, BBFrom INTEGER, '\
'BBTo INTEGER, Inc INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'FullPaths(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, FullPath TEXT, UNIQUE(App,kName,FullPath))')
c.execute('CREATE TABLE IF NOT EXISTS '\
'FullPathExecs(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, InvocationIdx INTEGER, '\
'WarpId INTEGER, FullPathID INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BBVIntervalSizes(ID INTEGER PRIMARY KEY, App TEXT, IntervalSize INTEGER,'\
' IntervalId INTEGER, NumGPRInsts INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'PUPCs(ID INTEGER PRIMARY KEY, App TEXt, PUPC TEXT, Weight INTEGER, BBId INTEGER, '\
'FnName TEXT, Opcode TEXT, IsMem INTEGER, IsDestReg INTEGER, NumGPRSrcs INTEGER, GPRSrcs TEXT, '\
'NumGPRDsts INTEGER, GPRDsts TEXT)')
if injection_mode == "interval":
c.execute('CREATE TABLE IF NOT EXISTS '\
'FIPointClusters(ID INTEGER PRIMARY KEY, App TEXT, IntervalId INTEGER,'\
' IntervalFrequency INTEGER)')
######
# fill up OutcomeMap table
#########
for cat in range(cp.NUM_CATS-1):
# print "cat %d cat_str %s " % (cat, cp.CAT_STR[cat])
c.execute('INSERT OR IGNORE INTO OutcomeMap '\
'VALUES(%d, \'%s\')' % (cat, cp.CAT_STR[cat]))
##########
# Filling up IgIdMap
#########
for app in sp.apps:
countList = cf.read_inst_counts(sp.app_dir[app],app)
#print countList
for igid in range(cp.NUM_INST_TYPES):
igid_inst_count = 0
for l in countList:
igid_inst_count += int(l[igid+2])
c.execute('INSERT OR IGNORE INTO IgIdMap '\
'VALUES(NULL, %d, \'%s\', \'%s\',%d)' % (igid,cp.IGID_STR[igid], app, igid_inst_count))
##########
# Filling up BitFlipModelMap (BFMMap)
#########
for app in sp.apps:
countList = cf.read_inst_counts(sp.app_dir[app],app)
#print countList
for bfm in range(len(cp.EM_STR)):
c.execute('INSERT OR IGNORE INTO BFMMap '\
'VALUES(NULL, %d, \'%s\', \'%s\')'
%(bfm,cp.EM_STR[bfm], app))
###########
# Filling up OpcodeMap
###########
opcode_list_str = "ATOM:ATOMS:B2R:BAR:BFE:BFI:BPT:BRA:BRK:BRX:CAL:CAS:CCTL:CCTLL:CCTLT:CONT:CS2R:CSET:CSETP:DADD:DEPBAR:DFMA:DMNMX:DMUL:DSET:DSETP:EXIT:F2F:F2I:FADD:FADD32I:FCHK:FCMP:FFMA:FFMA32I:FLO:FMNMX:FMUL:FMUL32I:FSET:FSETP:FSWZ:FSWZADD:I2F:I2I:IADD:IADD3:IADD32I:ICMP:IMAD:IMAD32I:IMADSP:IMNMX:IMUL:IMUL32I:ISAD:ISCADD:ISCADD32I:ISET:ISETP:JCAL:JMX:LD:LDC:LDG:LDL:LDLK:LDS:LDSLK:LDS_LDU:LDU:LD_LDU:LEA:LEPC:LONGJMP:LOP:LOP3:LOP32I:MEMBAR:MOV:MUFU:NOP:P2R:PBK:PCNT:PEXIT:PLONGJMP:POPC:PRET:PRMT:PSET:PSETP:R2B:R2P:RED:RET:RRO:S2R:SEL:SHF:SHFL:SHL:SHR:SSY:ST:STG:STL:STS:STSCUL:STSUL:STUL:SUATOM:SUBFM:SUCLAMP:SUEAU:SULD:SULDGA:SULEA:SUQ:SURED:SUST:SUSTGA:SYNC:TEX:TEXDEPBAR:TEXS:TLD:TLD4:TLD4S:TLDS:TXQ:UNMAPPED:USER_DEFINED:VMNMX:VOTE:XMAD"
opcode_list = opcode_list_str.split(":")
# print "OPCODE LIST: " + str(opcode_list)
for app in sp.apps:
countList = cf.read_inst_counts(sp.app_dir[app], app)
total_count = cf.get_total_counts(countList)
for i in range(len(opcode_list)):
c.execute('INSERT OR IGNORE INTO OpcodeMap '\
'VALUES(NULL, \'%s\', \'%s\',%d)' %(opcode_list[i], app, total_count[i+cp.NUM_INST_TYPES+1]))
# print "len total counts " + str(len(total_count))
# print "len opcode_list: " + str(len(opcode_list))
for app in sp.apps:
# print "App: " + app
countList = cf.read_inst_counts(sp.app_dir[app], app)
#print "countList: " + str(countList)
for l in countList:
total_inst_count = 0
for i in range(cp.NUM_INST_TYPES+3, len(countList[0])): # 3: 1 for kname, 1 for kcount and 1 for WILL NOT EXECUTE instruction count
total_inst_count += int(l[i])
kernel_name = str(l[0])
invocation_idx = int(l[1])
app_inst_count = cf.get_total_insts(countList)
c.execute('INSERT OR IGNORE INTO Kernels '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d)'
% (app, kernel_name, invocation_idx, total_inst_count, app_inst_count))
###############################################################################
# Main function that processes files, analyzes results and prints them to an
# xlsx file
###############################################################################
def main():
global injection_mode
db_file, inj_type, application, injection_mode = parse_options()
print "DB file is : " + db_file
conn = sqlite3.connect(db_file)
c = conn.cursor()
if db_file == "data.db":
CreateNewDB(c)
# total_count = cf.get_total_insts(countList)
parse_results_apps(inj_type, c) # parse sassifi results into local data structures
conn.commit()
conn.close()
if __name__ == "__main__":
main()
|
from argparse import ArgumentParser
from functools import partial
from glob import glob
import h5py
import numpy as np
import os
import sys
from .reader import DataCollection, H5File, FileAccess
class ValidationError(Exception):
def __init__(self, problems):
self.problems = problems
def __str__(self):
lines = []
for prob in self.problems:
lines.extend(['', prob['msg']])
for k, v in sorted(prob.items()):
if k != 'msg':
lines.append(" {}: {}".format(k, v))
return '\n'.join(lines)
class FileValidator:
def __init__(self, file: FileAccess):
self.file = file
self.filename = file.filename
self.problems = []
def validate(self):
problems = self.run_checks()
if problems:
raise ValidationError(problems)
def run_checks(self):
self.problems = []
self.check_indices()
self.check_trainids()
return self.problems
def record(self, msg, **kwargs):
self.problems.append(dict(msg=msg, file=self.filename, **kwargs))
def check_trainids(self):
ds_path = 'INDEX/trainId'
train_ids = self.file.file[ds_path][:]
if (train_ids == 0).any():
first0 = train_ids.tolist().index(0)
if not (train_ids[first0:] == 0).all():
self.record(
'Zeroes in trainId index before last train ID', dataset=ds_path
)
nonzero_tids = train_ids[train_ids != 0]
else:
nonzero_tids = train_ids
if len(nonzero_tids) > 1:
non_incr = (nonzero_tids[1:] <= nonzero_tids[:-1]).nonzero()[0]
if non_incr.size > 0:
pos = non_incr[0]
self.record(
'Train IDs are not strictly increasing, e.g. at {} ({} >= {})'.format(
pos, nonzero_tids[pos], nonzero_tids[pos + 1]
),
dataset=ds_path,
)
def check_indices(self):
for src in self.file.instrument_sources:
src_groups = set()
for key in self.file.get_keys(src):
ds_path = 'INSTRUMENT/{}/{}'.format(src, key.replace('.', '/'))
group = key.split('.', 1)[0]
src_groups.add((src, group))
first, count = self.file.get_index(src, group)
data_dim0 = self.file.file[ds_path].shape[0]
if np.any((first + count) > data_dim0):
max_end = (first + count).max()
self.record(
'Index referring to data ({}) outside dataset ({})'.format(
max_end, data_dim0
),
dataset=ds_path,
)
for src, group in src_groups:
record = partial(self.record, dataset='INDEX/{}/{}'.format(src, group))
first, count = self.file._read_index(src, group)
check_index_contiguous(first, count, record)
def check_index_contiguous(firsts, counts, record):
probs = []
if firsts[0] != 0:
record("Index doesn't start at 0")
gaps = firsts[1:].astype(np.int64) - (firsts + counts)[:-1]
gap_ixs = (gaps > 0).nonzero()[0]
if gap_ixs.size > 0:
pos = gap_ixs[0]
record("Gaps ({}) in index, e.g. at {} ({} + {} < {})".format(
gap_ixs.size, pos, firsts[pos], counts[pos], firsts[pos+1]
))
overlap_ixs = (gaps < 0).nonzero()[0]
if overlap_ixs.size > 0:
pos = overlap_ixs[0]
record("Overlaps ({}) in index, e.g. at {} ({} + {} > {})".format(
overlap_ixs.size, pos, firsts[pos], counts[pos], firsts[pos + 1]
))
return probs
class RunValidator:
def __init__(self, run_dir: str):
files = []
self.files_excluded = []
self.run_dir = run_dir
for path in glob(os.path.join(run_dir, '*.h5')):
try:
fa = FileAccess(h5py.File(path, 'r'))
except Exception as e:
self.files_excluded.append((path, e))
else:
files.append(fa)
self.run = DataCollection(files)
self.problems = []
def validate(self):
problems = self.run_checks()
if problems:
raise ValidationError(problems)
def run_checks(self):
self.problems = []
self.check_files_openable()
self.check_files()
return self.problems
def check_files_openable(self):
for path, err in self.files_excluded:
self.problems.append(dict(msg="Could not open file", file=path, error=err))
if not self.run.files:
self.problems.append(
dict(msg="No usable files found", directory=self.run_dir)
)
def check_files(self):
for f in self.run.files:
fv = FileValidator(f)
self.problems.extend(fv.run_checks())
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
ap = ArgumentParser(prog='karabo-data-validate')
ap.add_argument('path', help="HDF5 file or run directory of HDF5 files.")
args = ap.parse_args(argv)
path = args.path
if os.path.isdir(path):
print("Checking run directory:", path)
validator = RunValidator(path)
else:
print("Checking file:", path)
validator = FileValidator(H5File(path).files[0])
try:
validator.validate()
print("No problems found")
except ValidationError as ve:
print("Validation failed!")
print(str(ve))
return 1
if __name__ == '__main__':
sys.exit(main())
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Load_Model import *
import easyocr
detection_threshold = 0.4
region_threshold = 0.6
category_index = label_map_util.create_category_index_from_labelmap(files['LABELMAP'])
def filter_text(region, ocr_result, region_threshold):
rectangle_size = region.shape[0]*region.shape[1]
plate = []
for result in ocr_result:
length = np.sum(np.subtract(result[0][1], result[0][0]))
height = np.sum(np.subtract(result[0][2], result[0][1]))
if length*height / rectangle_size > region_threshold:
plate.append(result[1])
return plate
def ocr_it(image, detections):
# Scores, boxes and classes above threhold
scores = list(filter(lambda x: x> detection_threshold, detections['detection_scores']))
boxes = detections['detection_boxes'][:len(scores)]
classes = detections['detection_classes'][:len(scores)]
# Full image dimensions
width = image.shape[1]
height = image.shape[0]
# Apply ROI filtering and OCR
for idx, box in enumerate(boxes):
roi = box*[height, width, height, width]
region = image[int(roi[0]):int(roi[2]),int(roi[1]):int(roi[3])]
reader = easyocr.Reader(['en'])
ocr_result = reader.readtext(region)
text = filter_text(region, ocr_result, region_threshold)
print(text)
return text, region
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pysensu-yelp',
version='0.4.4',
provides=['pysensu_yelp'],
description='Emits Yelp-flavored Sensu events to a Sensu Client',
url='https://github.com/Yelp/pysensu-yelp',
author='Yelp Operations Team',
author_email='operations@yelp.com',
packages=find_packages(exclude=['tests']),
install_requires=['six'],
license='Copyright Yelp 2014, all rights reserved',
)
|
from grammars.grammars import lang_dict
from treemanager import TreeManager
from grammar_parser.gparser import MagicTerminal
from utils import KEY_UP as UP, KEY_DOWN as DOWN, KEY_LEFT as LEFT, KEY_RIGHT as RIGHT
from grammars.grammars import EcoFile
import pytest
calc = lang_dict["Basic Calculator"]
python = lang_dict["Python 2.7.5"]
php = lang_dict["PHP"]
class Test_MultiTextNode:
def setup_class(cls):
grm = EcoFile("MultiTest", "test/calcmultistring.eco", "Multi")
parser, lexer = grm.load()
cls.lexer = lexer
cls.parser = parser
cls.parser.init_ast()
cls.ast = cls.parser.previous_version
cls.treemanager = TreeManager()
cls.treemanager.add_parser(cls.parser, cls.lexer, calc.name)
cls.treemanager.set_font_test(7, 17) # hard coded. PyQt segfaults in test suite
def reset(self):
self.parser.reset()
self.treemanager = TreeManager()
self.treemanager.add_parser(self.parser, self.lexer, calc.name)
self.treemanager.set_font_test(7, 17)
def test_simple(self):
self.reset()
self.treemanager.key_normal("1")
self.treemanager.key_normal("+")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("abc")
assert self.parser.last_status == False
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
def test_newline(self):
self.reset()
self.treemanager.key_normal("1")
self.treemanager.key_normal("+")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("abc")
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
assert self.parser.last_status == True
def test_doublenewline(self):
self.reset()
self.treemanager.key_normal("1")
self.treemanager.key_normal("+")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("abcd")
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
assert self.parser.last_status == True
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
assert self.parser.last_status == True
def test_doublenewline_delete(self):
self.reset()
self.treemanager.key_normal("1")
self.treemanager.key_normal("+")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("abcd")
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
assert self.parser.last_status == True
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.next_term.next_term.children[0].symbol.name == "\"ab"
assert bos.next_term.next_term.next_term.children[1].symbol.name == "\r"
assert bos.next_term.next_term.next_term.children[2].symbol.name == "c"
assert bos.next_term.next_term.next_term.children[3].symbol.name == "\r"
assert bos.next_term.next_term.next_term.children[4].symbol.name == "d\""
self.treemanager.cursor_movement(DOWN)
self.treemanager.key_backspace()
assert bos.next_term.symbol.name == "1"
assert bos.next_term.next_term.symbol.name == "+"
assert bos.next_term.next_term.next_term.children[0].symbol.name == "\"ab"
assert bos.next_term.next_term.next_term.children[1].symbol.name == "\r"
assert bos.next_term.next_term.next_term.children[2].symbol.name == "cd\""
assert len(bos.next_term.next_term.next_term.children) == 3
assert bos.next_term.next_term.next_term.children[2].next_term is None
assert self.parser.last_status == True
class Test_MultiTextNodePython:
def setup_class(cls):
parser, lexer = python.load()
cls.lexer = lexer
cls.parser = parser
cls.parser.init_ast()
cls.ast = cls.parser.previous_version
cls.treemanager = TreeManager()
cls.treemanager.add_parser(cls.parser, cls.lexer, python.name)
cls.treemanager.set_font_test(7, 17) # hard coded. PyQt segfaults in test suite
def reset(self):
self.parser.reset()
self.treemanager = TreeManager()
self.treemanager.add_parser(self.parser, self.lexer, calc.name)
self.treemanager.set_font_test(7, 17)
def test_simple(self):
self.reset()
inputstring = "x = \"\"\"abcdef\"\"\""
for c in inputstring:
self.treemanager.key_normal(c)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
def test_relex_over_indentation(self):
self.reset()
inputstring = """class X:
x = 1
def x():
pass
y = 2"""
self.treemanager.import_file(inputstring)
assert self.parser.last_status == True
self.treemanager.cursor_movement(DOWN)
self.treemanager.cursor_movement(DOWN)
self.treemanager.cursor_movement(DOWN)
self.treemanager.key_end()
assert self.treemanager.cursor.node.symbol.name == "pass"
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
self.treemanager.cursor_movement(UP)
self.treemanager.cursor_movement(UP)
self.treemanager.key_end()
assert self.treemanager.cursor.node.symbol.name == "1"
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
def test_indentation_to_string_and_back(self):
self.reset()
inputstring = """class X:
a
b"""
self.treemanager.import_file(inputstring)
assert self.parser.last_status == True
self.treemanager.cursor_movement(DOWN)
self.treemanager.cursor_movement(DOWN)
self.treemanager.key_end()
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
self.treemanager.cursor_movement(UP)
self.treemanager.cursor_movement(UP)
self.treemanager.key_home()
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
self.treemanager.cursor_movement(DOWN)
self.treemanager.cursor_movement(DOWN)
self.treemanager.key_end()
self.treemanager.key_backspace()
assert self.parser.last_status == False
def test_remember_open_lexing_states(self):
self.reset()
inputstring = """x = 1
y = 2"""
self.treemanager.import_file(inputstring)
assert self.parser.last_status == True
self.treemanager.key_end()
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\"")
#assert self.parser.last_status == False # unfinished lexing jobs
self.treemanager.key_end()
self.treemanager.key_normal("\"")
assert self.parser.last_status == True
def test_triplequote_string(self):
self.reset()
inputstring = 'x="""abc"""'
for i in inputstring:
self.treemanager.key_normal(i)
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '"""abc"""'
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\"")
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '""'
assert bos.next_term.next_term.next_term.next_term.symbol.name == '"ab"'
assert bos.next_term.next_term.next_term.next_term.lookback == 1
assert bos.next_term.next_term.next_term.next_term.next_term.symbol.name == 'c'
assert bos.next_term.next_term.next_term.next_term.next_term.lookback == 2
assert bos.next_term.next_term.next_term.next_term.next_term.next_term.symbol.name == '""'
assert bos.next_term.next_term.next_term.next_term.next_term.next_term.next_term.symbol.name == '"'
self.treemanager.key_normal("\"")
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '""'
assert bos.next_term.next_term.next_term.next_term.symbol.name == '"ab"'
assert bos.next_term.next_term.next_term.next_term.lookback == 1
assert bos.next_term.next_term.next_term.next_term.next_term.symbol.name == '"c"'
assert bos.next_term.next_term.next_term.next_term.next_term.lookback == 2
assert bos.next_term.next_term.next_term.next_term.next_term.next_term.symbol.name == '""'
self.treemanager.key_normal("\"")
#assert self.parser.last_status == False
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '"""ab"""'
assert bos.next_term.next_term.next_term.next_term.symbol.name == 'c'
assert bos.next_term.next_term.next_term.next_term.next_term.symbol.name == '""'
assert bos.next_term.next_term.next_term.next_term.next_term.next_term.symbol.name == '"'
self.treemanager.key_end()
self.treemanager.key_backspace()
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '"""ab"""'
assert bos.next_term.next_term.next_term.next_term.symbol.name == 'c'
assert bos.next_term.next_term.next_term.next_term.next_term.symbol.name == '""'
def test_ignore_nonlbox_x80(self):
self.reset()
inputstring = 'x="""ab\x80c"""'
for i in inputstring:
self.treemanager.key_normal(i)
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '"""ab\x80c"""'
def test_multinode_from_the_start(self):
self.reset()
inputstring = '''x="""a\rbc"""'''
for i in inputstring:
self.treemanager.key_normal(i)
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '<Multinode>'
def test_multinode_and_nonlbox_x80(self):
self.reset()
inputstring = '''x="""a\x80bc"""'''
for i in inputstring:
self.treemanager.key_normal(i)
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '"""a\x80bc"""'
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_normal("\r")
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '<Multinode>'
def test_multinode_nonlbox_and_lbox(self):
self.reset()
inputstring = '''x="""a\x80bc"""'''
for i in inputstring:
self.treemanager.key_normal(i)
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '"""a\x80bc"""'
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.add_languagebox(lang_dict["SQL (Dummy)"])
self.treemanager.key_normal("S")
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "x"
assert bos.next_term.next_term.symbol.name == "="
assert bos.next_term.next_term.next_term.symbol.name == '<Multinode>'
multi = bos.next_term.next_term.next_term
assert multi.children[0].symbol.name == "\"\"\"a\x80b"
assert type(multi.children[1].symbol) is MagicTerminal
assert multi.children[2].symbol.name == "c\"\"\""
def test_multinode_merged_first(self):
self.reset()
inputstring = '''"""a\rbc"""'''
for i in inputstring:
self.treemanager.key_normal(i)
for i in 'def"""':
self.treemanager.key_normal(i)
bos = self.parser.previous_version.parent.children[0]
assert bos.next_term.symbol.name == "<Multinode>"
assert bos.next_term.next_term.symbol.name == 'def'
assert bos.next_term.next_term.next_term.symbol.name == '""'
assert bos.next_term.next_term.next_term.next_term.symbol.name == '"'
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_backspace()
self.treemanager.key_backspace()
self.treemanager.key_backspace()
assert bos.next_term.symbol.name == "<Multinode>"
assert bos.next_term.next_term.symbol.name == "NEWLINE"
assert bos.next_term.next_term.next_term.symbol.name == "eos"
def test_multinode_string_bug(self):
self.reset()
inputstring = '''x="abc"'''
for i in inputstring:
self.treemanager.key_normal(i)
self.treemanager.cursor_movement(LEFT)
self.treemanager.add_languagebox(lang_dict["SQL (Dummy)"])
self.treemanager.key_normal("x")
bos = self.parser.previous_version.parent.children[0]
x = bos.next_term
assert x.symbol.name == "x"
eq = x.next_term
assert eq.symbol.name == "="
multi = eq.next_term
assert multi.lookup == "dstring"
assert multi.symbol.name == "<Multinode>"
self.treemanager.cursor_movement(RIGHT)
self.treemanager.key_backspace()
# removing the ending quote results in a lexingerror,
# so the multinode remains
assert eq.next_term.symbol.name == "<Multinode>"
# now remove the first quote, which should lead to the destruction of
# the multinode
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.cursor_movement(LEFT)
self.treemanager.key_backspace()
assert eq.next_term.symbol.name == "abc"
def test_multinode_string_bug2(self):
self.reset()
inputstring = '''x="abc"'''
for i in inputstring:
self.treemanager.key_normal(i)
self.treemanager.cursor_movement(LEFT)
self.treemanager.add_languagebox(lang_dict["SQL (Dummy)"])
self.treemanager.key_normal("x")
self.treemanager.leave_languagebox()
self.treemanager.key_normal("z")
bos = self.parser.previous_version.parent.children[0]
x = bos.next_term
assert x.symbol.name == "x"
eq = x.next_term
assert eq.symbol.name == "="
multi = eq.next_term
assert multi.children[0].symbol.name == "\"abc"
assert multi.children[1].symbol.name == "<SQL (Dummy)>"
assert multi.children[2].symbol.name == "z\""
class Test_MultiTextNodePHP:
def setup_class(cls):
parser, lexer = php.load()
cls.lexer = lexer
cls.parser = parser
cls.parser.init_ast()
cls.ast = cls.parser.previous_version
cls.treemanager = TreeManager()
cls.treemanager.add_parser(cls.parser, cls.lexer, php.name)
cls.treemanager.set_font_test(7, 17) # hard coded. PyQt segfaults in test suite
def test_paste_comment(self):
paste = """$shake_error_codes = array( 'empty_password', 'empty_email', 'invalid_email', 'invalidcombo', 'empty_username', 'invalid_username', 'incorrect_password' );
/**
* Filters the error codes array for shaking the login form.
*
* @since 3.0.0
*
* @param array $shake_error_codes Error codes that shake the login form.
*/
$shake_error_codes = apply_filters( 'shake_error_codes', $shake_error_codes );"""
self.treemanager.pasteText(paste)
|
"""
A test spanning all the capabilities of all the serializers.
This class sets up a model for each model field type
(except for image types, because of the Pillow dependency).
"""
from django.db import models
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation
)
from django.contrib.contenttypes.models import ContentType
# The following classes are for testing basic data
# marshalling, including NULL values, where allowed.
class BinaryData(models.Model):
data = models.BinaryField(null=True)
class BooleanData(models.Model):
data = models.BooleanField(default=False)
class CharData(models.Model):
data = models.CharField(max_length=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class DecimalData(models.Model):
data = models.DecimalField(null=True, decimal_places=3, max_digits=5)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True, upload_to='/foo/bar')
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
class BigIntegerData(models.Model):
data = models.BigIntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class GenericIPAddressData(models.Model):
data = models.GenericIPAddressField(null=True)
class NullBooleanData(models.Model):
data = models.NullBooleanField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class Meta:
ordering = ('id',)
class NaturalKeyAnchorManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
class NaturalKeyAnchor(models.Model):
objects = NaturalKeyAnchorManager()
data = models.CharField(max_length=100, unique=True)
title = models.CharField(max_length=100, null=True)
def natural_key(self):
return (self.data,)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, null=True)
class FKDataNaturalKey(models.Model):
data = models.ForeignKey(NaturalKeyAnchor, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor)
class O2OData(models.Model):
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, null=True, to_field='data')
class FKDataToO2O(models.Model):
data = models.ForeignKey(O2OData, null=True)
class M2MIntermediateData(models.Model):
data = models.ManyToManyField(Anchor, through='Intermediate')
class Intermediate(models.Model):
left = models.ForeignKey(M2MIntermediateData)
right = models.ForeignKey(Anchor)
extra = models.CharField(max_length=30, blank=True, default="doesn't matter")
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True, default=False)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)
# class DatePKData(models.Model):
# data = models.DateField(primary_key=True)
# class DateTimePKData(models.Model):
# data = models.DateTimeField(primary_key=True)
class DecimalPKData(models.Model):
data = models.DecimalField(primary_key=True, decimal_places=3, max_digits=5)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
# class FilePKData(models.Model):
# data = models.FileField(primary_key=True, upload_to='/foo/bar')
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class GenericIPAddressPKData(models.Model):
data = models.GenericIPAddressField(primary_key=True)
# This is just a Boolean field with null=True, and we can't test a PK value of NULL.
# class NullBooleanPKData(models.Model):
# data = models.NullBooleanField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class ComplexModel(models.Model):
field1 = models.CharField(max_length=10)
field2 = models.CharField(max_length=10)
field3 = models.CharField(max_length=10)
# Tests for handling fields with pre_save functions, or
# models with save functions that modify data
class AutoNowDateTimeData(models.Model):
data = models.DateTimeField(null=True, auto_now=True)
class ModifyingSaveData(models.Model):
data = models.IntegerField(null=True)
def save(self, *args, **kwargs):
"""
A save method that modifies the data in the object.
Verifies that a user-defined save() method isn't called when objects
are deserialized (#4459).
"""
self.data = 666
super(ModifyingSaveData, self).save(*args, **kwargs)
# Tests for serialization of models using inheritance.
# Regression for #7202, #7350
class AbstractBaseModel(models.Model):
parent_data = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel(AbstractBaseModel):
child_data = models.IntegerField()
class BaseModel(models.Model):
parent_data = models.IntegerField()
class InheritBaseModel(BaseModel):
child_data = models.IntegerField()
class ExplicitInheritBaseModel(BaseModel):
parent = models.OneToOneField(BaseModel)
child_data = models.IntegerField()
class ProxyBaseModel(BaseModel):
class Meta:
proxy = True
class ProxyProxyBaseModel(ProxyBaseModel):
class Meta:
proxy = True
class LengthModel(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
|
import adventure
import json
if __name__ == '__main__':
# load configure file
config_file = open("config/config.json", "r")
config_obj = json.load(config_file)
instance = adventure.default;
instance.init(config_obj)
instance.start()
|
# from bottle import run
import requests
import csv
def send():
while True:
print("Enter number")
number = input()
xml = f"<?xml version='1.0' encoding='utf-8'?><data>{number}</data>"
headers = {"Content-Type": "application/xml"} # set what your server accepts
response = requests.post(
"http://127.0.0.1:2222/receive-xml", data=xml, headers=headers
)
readcsv(response.text.splitlines())
def readcsv(csvdata):
reader = csv.reader(csvdata, delimiter=",")
for row in reader:
print(row)
send()
# run(host="127.0.0.1", port=1111, debug=False, reloader=True, server="paste")
|
n = int(input("Digite o n-ésimo termo para mostra a série de Fibonaci: "))
a = 0
b = 1
print(a)
for x in range(b, n):
c = a
a = b
b = a + c
print(a)
|
from django.db import models
from applications.utils.models import Language
class Information(Language):
full_name = models.CharField(max_length=150)
phone = models.CharField(max_length=20, blank=True, null=True)
website = models.URLField(blank=True, null=True)
email = models.EmailField(blank=True, null=True)
address = models.CharField(max_length=150, blank=True, null=True)
quote = models.CharField(max_length=150, blank=True, null=True)
about_me = models.TextField(blank=True, null=True)
linkedin = models.URLField(blank=True, null=True)
instagram = models.URLField(blank=True, null=True)
twitter = models.URLField(blank=True, null=True)
github = models.URLField(blank=True, null=True)
class Skill(models.Model):
name = models.CharField(max_length=50)
sub_name = models.CharField(max_length=50, blank=True, null=True)
rate = models.IntegerField(blank=True, null=True)
class Meta:
ordering = ("id",)
def __str__(self):
return self.name
class Group(models.Model):
title = models.CharField(max_length=50)
skills = models.ManyToManyField(Skill, blank=True)
class Meta:
ordering = ("id",)
def __str__(self):
return self.title
class Link(Language):
title = models.CharField(max_length=50)
url = models.URLField()
def __str__(self):
return self.title
class LanguageSkill(Language):
name = models.CharField(max_length=50)
reading = models.IntegerField()
writing = models.IntegerField()
listening = models.IntegerField()
speaking = models.IntegerField()
def __str__(self):
return self.name
class Education(Language):
title = models.CharField(max_length=150)
university = models.CharField(max_length=150, blank=True, null=True)
begin_time = models.DateField(blank=True, null=True)
end_time = models.DateField(blank=True, null=True)
current = models.BooleanField(default=False)
description = models.TextField(blank=True, null=True)
def __str__(self):
return self.title
class Experience(Language):
company_name = models.CharField(max_length=100)
company_website = models.URLField(blank=True, null=True)
project_name = models.CharField(max_length=100)
location = models.CharField(max_length=100, blank=True, null=True)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
current = models.BooleanField(default=False)
description = models.TextField(blank=True, null=True)
def __str__(self):
return self.company_name
class Project(Language):
project_name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
company_website = models.URLField(blank=True, null=True)
location = models.CharField(max_length=100, blank=True, null=True)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
current = models.BooleanField(default=False)
description = models.TextField(blank=True, null=True)
def __str__(self):
return self.project_name
class Certificate(Language):
title = models.CharField(max_length=100)
picture = models.FileField(blank=True, null=True)
pdf = models.FileField(blank=True, null=True)
issue_date = models.DateField(blank=True, null=True)
expire_date = models.DateField(blank=True, null=True)
institute = models.CharField(max_length=150, blank=True, null=True)
def __str__(self):
return self.title
|
import uuid
from abc import ABCMeta, abstractmethod
class Recording(object):
"""
Holds a recording of an operation that was recorded using the TapeRecorder
"""
__metaclass__ = ABCMeta
def __init__(self, _id=None):
self.id = _id or uuid.uuid1().hex
self._closed = False
@abstractmethod
def _set_data(self, key, value):
"""
Sets data in the recording
:param key: data key
:type key: basestring
:param value: data value (serializable)
:type value: Any
"""
pass
def set_data(self, key, value):
"""
Sets data in the recording
:param key: data key
:type key: basestring
:param value: data value (serializable)
:type value: Any
"""
assert not self._closed
self._set_data(key, value)
@abstractmethod
def get_data(self, key):
"""
:param key: Data key
:type key: basestring
:return: Recorded data under given key
:rtype: Any
:raise: playback.exceptions.RecordingKeyError
"""
pass
@abstractmethod
def get_all_keys(self):
"""
:return: All recorded keys
:rtype: list of basestring
"""
pass
def add_metadata(self, metadata):
"""
:param metadata: Metadata to add to the recording
:type metadata: dict
"""
assert not self._closed
self._add_metadata(metadata)
@abstractmethod
def _add_metadata(self, metadata):
"""
:param metadata: Metadata to add to the recording
:type metadata: dict
"""
pass
def close(self):
"""
Close the recording and release any underlying resources
"""
self._closed = True
def __setitem__(self, key, value):
self._set_data(key, value)
def __getitem__(self, item):
return self.get_data(item)
@abstractmethod
def get_metadata(self):
"""
:return: Recorded metadata
:rtype: dict
"""
pass
|
import cv2
import yaml
class parkingSpaceBoundary:
def __init__(self, img, file):
self.image = img
self.filePath = file
self.parkingSpace = []
self.id = 1
data = []
def dumpYML(self):
with open(self.filePath, "a") as yml:
yaml.dump(self.data, yml)
def defineBoundaries(self, event, x, y, flags, param):
currentSpace = {'id': id, 'points': []} # Initialize dictionary for 1st parking space
if event == cv2.EVENT_LBUTTONDBLCLK: # If a point on the image is double left clicked
self.parkingSpace.append((x, y)) # Append the point to parkingSpace
if len(self.parkingSpace) == 4: # If 4 points have been appended
cv2.line(self.image, self.parkingSpace[0], self.parkingSpace[1], (0, 255, 0), 1) # Draw the space on the image
cv2.line(self.image, self.parkingSpace[1], self.parkingSpace[2], (0, 255, 0), 1)
cv2.line(self.image, self.parkingSpace[2], self.parkingSpace[3], (0, 255, 0), 1)
cv2.line(self.image, self.parkingSpace[3], self.parkingSpace[0], (0, 255, 0), 1)
temp_lst1 = list(self.parkingSpace[2]) # Turn to list
temp_lst2 = list(self.parkingSpace[3])
temp_lst3 = list(self.parkingSpace[0])
temp_lst4 = list(self.parkingSpace[1])
currentSpace['points'] = [temp_lst1, temp_lst2, temp_lst3, temp_lst4] # Add points to currentSpace
currentSpace['id'] = self.id # Add id to currentSpace
self.data.append(currentSpace) # Add currentSpace to global 'data' list
self.id += 1 #Increment id by 1
self.parkingSpace = [] # Clear parkingSpace for next space
def markSpaces(self):
cv2.namedWindow("Double click to mark points") # Name window
cv2.imshow("Double click to mark points", self.image) # Set captured frame and show
cv2.setMouseCallback("Double click to mark points", self.defineBoundaries) # Set double left click action
while True: # Set parking space boundaries and loop until ESC is pressed
cv2.imshow("Double click to mark points", self.image)
key = cv2.waitKey(1) & 0xFF # 0xFF to ensure we only get the last 8 bits of ASCII character input
if cv2.waitKey(33) == 27: # If ESC key is pressed, break
break
if self.data != []: # After breaking loop, dump collected parking data if not null
self.dumpYML()
cv2.destroyAllWindows() # Close parking boundary window
|
'''
The bulk of this program is based off of the sample code that Instagram put out to run their API
the myInfo section was written by Tim
with minor modifications in the models.py so that the username and id number could be shown to the user
To run it:
* Download bottle if you don't already have it: pip install bottle
* Download bottle-session if you don't already have it: pip install bottle-session
* Download and run a redis instance on port 6379, here's their website http://redis.io
* To do this:
* 1) wget http://download.redis.io/releases/redis-stable.tar.gz
* 2) tar xzf redis-stable.tar.gz
* 3) cd redis-stable
* 4) make test
* 5) sudo make install
* 6) redis-server
* Run the file; it will host a local server on port 8515.
* Visit http://localhost:8515 in a web browser
*
* Login: as a user we created:
* Sandbox username: joejones353
* password: DrwnSurfz
'''
import bottle
import beaker.middleware
from bottle import route, redirect, post, run, request, hook
from instagram import client, subscriptions, helper
from instagram.client import InstagramAPI
import time
from datetime import date
from collections import Counter
bottle.debug(True)
session_opts = {
'session.type': 'file',
'session.data_dir': './session/',
'session.auto': True,
}
app = beaker.middleware.SessionMiddleware(bottle.app(), session_opts)
CONFIG = {
'client_id': '3c83c3ebf36b4445ad069560c0557f70',
'client_secret': '9b2971e8f6f1440c867b8c7a8a7bc77f',
'redirect_uri': 'http://localhost:8515/oauth_callback'
}
unauthenticated_api = client.InstagramAPI(**CONFIG)
@hook('before_request')
def setup_request():
request.session = request.environ['beaker.session']
def process_tag_update(update):
print(update)
reactor = subscriptions.SubscriptionsReactor()
reactor.register_callback(subscriptions.SubscriptionType.TAG, process_tag_update)
myName = ""
@route('/')
def home():
try:
url = unauthenticated_api.get_authorize_url(scope=["likes","comments","relationships","follower_list","basic","public_content"])
return '<a href="%s">Connect with Instagram</a>' % url
except Exception as e:
print(e)
def get_nav():
nav_menu = ("<body style=\"background-color:lightgrey;\"><font size=\"6\"><h1 style=\"font-family:verdana; text-align:center;\">Tim's and Jeff's Instagram API</h1>"
"<h2>Main Menu:</h2>"
"<ul> <font size=\"3\">"
"<li><a href='/myInfo'>My information</a></li>"
"<li><a href='/myFollowers'>My Followers List</a></li>"
"<li><a href='/myFollowing'>My Following List</a></li>"
"<li><a href='/myRecentLikes'>Posts that I liked, Statistics, and Suggested People to Follow</a></li>"
"</ul>")
return nav_menu
@route('/oauth_callback')
def on_callback():
code = request.GET.get("code")
if not code:
return 'Missing code'
try:
access_token, user_info = unauthenticated_api.exchange_code_for_access_token(code)
if not access_token:
return 'Could not get access token'
api = InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
request.session['access_token'] = access_token
except Exception as e:
print(e)
return get_nav()
@route('/myInfo')
def myInfo(): #written by Tim
content = "<h2>User's Information</h2>"
access_token = request.session['access_token']
if not access_token:
return 'Missing Access Token'
try:
api = InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
myUser = api.user() #makes an API call
content += "<img src="+myUser.profile_picture+" alt='Profile Picture' >"
content +="<p>Username : "+myUser.username+"</p>"
content +="<p>Full Name: "+myUser.full_name+"</p>"
content +="<p>ID number: "+myUser.id+"</p>"
content +="<p>Biography: "+myUser.bio+"</p>"
content +="<h3>Counts:</h3>"
content +="<ul><li>Posts: "+ str(myUser.counts.get('media'))+"</li>"
content +="<li><a href='/myFollowers'>Followers: </a>"+ str(myUser.counts.get('followed_by'))+"</li>"
content +="<li><a href='/myFollowing'>Following: </a>"+ str(myUser.counts.get('follows'))+"</li></ul>"
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/myFollowers')
def myFollowers(): #written by Tim
content = "<h2>My Followers</h2>"
access_token = request.session['access_token']
if not access_token:
return 'Missing Access Token'
try:
api = InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
follower_list, next_ = api.user_followed_by()
counter =0
content+="<ul>"
for user in follower_list:
content+="<li><em>"+user.getName()+"</em></li>"
counter = counter +1
content+="</ul>"
content+="</h2>Total follower count: "+str(counter)+"</h2><p></p><p></p>"
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/myFollowing')
def myFollowing(): #written by Tim
content = "<h2>Accounts I Follow</h2>"
access_token = request.session['access_token']
if not access_token:
return 'Missing Access Token'
try:
api = InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
follow_list, next_ = api.user_follows()
counter =0
content+="<ul>"
for user in follow_list:
content+="<li><em>"+user.getName()+"</em></li>"
counter = counter +1
content+="</ul>"
content+="</h2>Total following count: "+str(counter)+"</h2><p></p><p></p>"
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/myRecentLikes')
def myRecentLikes(): #written by Tim
content = "<h2>User's Recent Likes</h2>"
access_token = request.session['access_token']
if not access_token:
print "Missing Access Token"
return 'Missing Access Token'
try:
api = InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
_user_id =(api.user()).id
liked_media, next = api.user_liked_media(count=9)
print "Webpage is loading...."
counter = 0;
photos = []
filters = []
usersThatLiked = []
content += "<div id='liked_media'>"
content +="<style>figure{ width:33.3%; float:left; margin:0px; text-align:center; padding:0px;} </style>"
for media in liked_media:
content += "<figure>"
filters.append(media.filter)
usersThatLiked.extend(api.media_likes(media_id = media.id))
counter = counter +1
#photos.append('<div style="float:left;">')
if(media.type == 'video'):
content += ('<video controls width height="150"><source type="video/mp4" src="%s"/></video>' % (media.get_standard_resolution_url()))
#photos.append('<video controls width height="150"><source type="video/mp4" src="%s"/></video>' % (media.get_standard_resolution_url()))
else:
content+= ("<img src=%s/>" % (media.get_low_resolution_url()))
content+= ("<figcaption>@%s" % (media.user.username))
content+= "</figcaption>"
#photos.append('<div class="floated_img"><img src="%s"/></div>' % (media.get_thumbnail_url()))
content+="</figure>"
content+= "</div><br>"
filterCounter = Counter(filters) #makes a counter object based on the list of filters
usersThatLikedCounter = Counter(usersThatLiked) #counts instances of any person liking the same pictures that the user did
#outputs a ranked list of the filters used in the liked posts above
content += "<h2> Filters used (count): </h2><ol>"
for filterWithCount in filterCounter.most_common():
content += "<li>" + filterWithCount[0] +" ("+str(filterWithCount[1])+")</li>"
content += "</ol>"
#gets a list of people that our user follows (used to make accurate suggestions of people to follow)
following_list, next_ = api.user_follows()
#make a set of user id numbers
following_ids = set()
for user in following_list:
following_ids.add(user.id)
#outputs the most common users that liked the same media
content += "<h2> Top users that also liked these posts: </h2><p>Below is a list of users who also liked the posts above, if you are not already following them, there will be a link.<ol>"
for userWithCount in usersThatLikedCounter.most_common(11):
if (userWithCount[0].id != _user_id): #makes sure that the current user is not displayed
content += "<li>" + userWithCount[0].username +" ("+str(userWithCount[1])+" similar likes)"
if(userWithCount[0].id not in following_ids):
content += (" <a href='/user_follow/%s'>Follow</a>" % (userWithCount[0].id))
content += (" <p>Here's a link to their Instagram Profile:" )
content += (" <a href='https://www.instagram.com/%s'>instagram.com/%s</a></p></li>" % (userWithCount[0].username, userWithCount[0].username))
content += "</ol>"
except Exception as e:
print "in exception ..."
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/user_follow/<id>')
def user_follow(id): #written by Tim
content = ""
access_token = request.session['access_token']
if not access_token:
print "Missing Access Token"
return 'Missing Access Token'
try:
api = InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
api.follow_user(user_id = id)
content += "<h2>Congratulations, you are following that user!</h2>"
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/realtime_callback')
@post('/realtime_callback')
def on_realtime_callback():
mode = request.GET.get("hub.mode")
challenge = request.GET.get("hub.challenge")
verify_token = request.GET.get("hub.verify_token")
if challenge:
return challenge
else:
x_hub_signature = request.header.get('X-Hub-Signature')
raw_response = request.body.read()
try:
reactor.process(CONFIG['client_secret'], raw_response, x_hub_signature)
except subscriptions.SubscriptionVerifyError:
print("Signature mismatch")
bottle.run(app=app, host='localhost', port=8515, reloader=True)
|
#!/usr/bin/env python
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import logging
import os
from build_workflow.build_args import BuildArgs
from build_workflow.build_recorder import BuildRecorder
from build_workflow.build_target import BuildTarget
from build_workflow.builder import Builder
from git.git_repository import GitRepository
from manifests.input_manifest import InputManifest
from system import console
from system.temporary_directory import TemporaryDirectory
args = BuildArgs()
console.configure(level=args.logging_level)
manifest = InputManifest.from_file(args.manifest)
with TemporaryDirectory(keep=args.keep) as work_dir:
output_dir = os.path.join(os.getcwd(), "artifacts")
logging.info(f"Building in {work_dir}")
os.chdir(work_dir)
target = BuildTarget(
name=manifest.build.name,
version=manifest.build.version,
snapshot=args.snapshot,
output_dir=output_dir,
)
os.makedirs(target.output_dir, exist_ok=True)
build_recorder = BuildRecorder(target)
logging.info(
f"Building {manifest.build.name} ({target.arch}) into {target.output_dir}"
)
for component in manifest.components:
if args.component and args.component != component.name:
logging.info(f"Skipping {component.name}")
continue
logging.info(f"Building {component.name}")
repo = GitRepository(
component.repository,
component.ref,
os.path.join(work_dir, component.name),
component.working_directory,
)
try:
builder = Builder(component.name, repo, build_recorder)
builder.build(target)
builder.export_artifacts()
except:
logging.error(
f"Error building {component.name}, retry with: {args.component_command(component.name)}"
)
raise
build_recorder.write_manifest()
logging.info("Done.")
|
import frappe
class TallyImportCurrencyItems:
def __init__(self, value, ow):
self.currency_table_map = { 'MAILINGNAME' : 'name',
'DECIMALSYMBOL' : 'fraction',
'ORIGINALNAME' : 'symbol'
}
self.overwrite = ow
self.process_node = value
self.process()
def process(self):
if not frappe.db.exists({"doctype":"Currency","currency_name": self.process_node['MAILINGNAME']}):
currency_doc = frappe.get_doc('Currency', self.process_node['MAILINGNAME'])
currency_doc.fraction = process_node['DECIMALSYMBOL']
currency_doc.symbol = process_node['ORIGINALNAME']
currency_doc.insert(ignore_permissions=True)
|
from flask import current_app, _app_ctx_stack
import flask_login
from flaskloginintegration import _user_loader, User
from views import login_views
class ZKPP(object):
def __init__(self, app=None, login_manager=flask_login.LoginManager()):
self.app = app
self.login_manager = login_manager
if app is not None:
self.init_app(app)
def init_app(self, app):
self.login_manager.init_app(app)
self.init_login(self.login_manager)
app.config.setdefault('my_greeting', self.greet())
app.teardown_appcontext(self.teardown)
print 'initializing application'
print 'root path: ' + login_views.root_path
app.register_blueprint(login_views) # set login views
def init_login(self, login_manager):
login_manager.user_loader(_user_loader)
#login_manager.request_loader(_request_loader)
def greet(self):
return 'hello my friend why so serious?'
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'my_greeting'):
pass
#ctx.sqlite3_db.close()
print 'teardown called'
@property
def greeting(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'my_greeting'):
ctx.my_greeting = self.greet()
return ctx.my_greeting
|
# -*- coding: utf-8 -*-
import json
from geventwebsocket.exceptions import WebSocketError
from putils.patterns import Singleton
import re
import logging
logger = logging.getLogger(__name__)
class Router(Singleton):
CONNECTED = "router_connected"
DISCONNECTED = "router_disconnected"
def __init__(self, server):
self.controllers_mapping = server.settings["controllers"]
self.sockets = []
self.eventer = server.eventer
def __call__(self, environ, start_response):
if 'wsgi.websocket' in environ:
socket = environ['wsgi.websocket']
if socket:
self.handle(socket)
return None
else:
start_response('404 Not Found', [('Content-Type', 'application/json')])
return ["{'error':'not found}"]
else:
start_response('404 Not Found', [('Content-Type', 'application/json')])
return ["{'error':'not found}"]
def handle(self, socket):
self.on_open(socket)
while True:
try:
message = socket.receive()
if not message:
break
self.on_message(socket, message)
except Exception, e:
logger.error(str(e), exc_info=True)
break
try:
socket.close()
except WebSocketError, e:
logger.error(str(e), exc_info=True)
finally:
self.on_close(socket)
def on_message(self, socket, message):
data = json.loads(message)
data_url = data["url"]
if data_url[len(data_url)-1] == '/':
data_url = data_url.rstrip('/')
url_and_method = data_url.rsplit('/', 1)
url = url_and_method[0]
method = url_and_method[1]
for (match_url, controller) in self.controllers_mapping:
mtch = match_url.match(url)
if mtch:
sender = Sender(socket, data_url, self)
for key, value in mtch.groupdict().iteritems():
data[key] = value
if isinstance(controller, list):
for c in controller:
action = getattr(c, method)
action(sender, data)
else:
action = getattr(controller, method)
action(sender, data)
break
def on_open(self, socket):
self.sockets.append(socket)
logger.info("connected to %s", socket.origin)
self.eventer.publish(Router.CONNECTED, Sender(socket, "/", self))
def on_close(self, socket):
self.sockets.remove(socket)
logger.info("disconnected")
self.eventer.publish(Router.DISCONNECTED, Sender(socket, "/", self))
class Sender(object):
def __init__(self, socket, url, router):
self.socket = socket
self.url = url
self.router = router
def send(self, data):
if self.socket is None:
return
data["url"] = self.url
try:
if self.socket:
self.socket.send(json.dumps(data))
except WebSocketError:
pass
def send_all(self, data):
data["url"] = self.url
json_data = json.dumps(data)
for socket in self.router.sockets:
try:
if socket:
socket.send(json_data)
except WebSocketError:
pass
def send_others(self, data):
data["url"] = self.url
json_data = json.dumps(data)
for socket in self.router.sockets:
if socket and socket != self.socket:
try:
if socket:
socket.send(json_data)
except WebSocketError:
pass
|
import hashlib
import os
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
__all__ = ["inplace", "assert_file_exist", "file_hash"]
@contextmanager
def inplace(filepath: Path):
folder = filepath.parents[0]
name = filepath.name
success = True
try:
with NamedTemporaryFile(dir=folder, prefix="." + name, delete=False) as tmp:
yield Path(tmp.name)
success = False
finally:
if success:
os.replace(tmp.name, filepath)
def assert_file_exist(filepath: Path):
if not filepath.exists():
raise RuntimeError(f"{filepath} does not exist.")
def file_hash(filepath: Path, fast=True) -> int:
filepath = filepath.resolve()
stat = filepath.stat()
key = (bytes(filepath), str(stat.st_size).encode(), str(stat.st_mtime).encode())
return int(hashlib.md5(str(key).encode()).hexdigest(), 16)
|
# -*- coding: utf-8 -*-
from typing import Optional
import os
from matminer.data_retrieval.retrieve_Citrine import CitrineDataRetrieval
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm import tqdm
from src.data.utils import countSimilarEntriesWithMP, LOG
from src.data import get_data_base
class data_Citrine(get_data_base.data_base):
def __init__(self, API_KEY: str):
self.API_KEY = API_KEY
self.data_dir = Path(__file__).resolve().parents[2] / "data"
self.raw_data_path = self.data_dir/ "raw" / "Citrine" / "Citrine.pkl"
self.interim_data_path = self.data_dir / "interim" / "Citrine" / "Citrine.pkl"
super().__init__()
def _apply_query(self, sorted: Optional[bool])-> pd.DataFrame:
cdr = CitrineDataRetrieval(api_key=self.API_KEY)
criteria = {"data_type": "EXPERIMENTAL"}
properties = ['Band gap']
common_fields = ["uid","chemicalFormula", "references", "Crystallinity", "Structure", "Crystal structure", "uid"]
df = cdr.get_dataframe(criteria = criteria,
properties = properties,
common_fields = common_fields)
LOG.info("Writing to raw data...")
df.to_pickle(self.raw_data_path)
return df;
def _sort(self, df: pd.DataFrame, entries: pd.DataFrame)-> pd.DataFrame:
df = df[df["Band gap-dataType"]=="EXPERIMENTAL"]\
.dropna(axis=1, how='all')
bandgap = np.empty(len(entries["full_formula"]))
bandgap[:] = np.nan
for i, entry in tqdm(enumerate(entries["full_formula"])):
for j, exp in enumerate(df["chemicalFormula"]):
if entry == exp and float(df["Band gap"].iloc[j])>=0:
bandgap[i] = float(df["Band gap"].iloc[j])
sorted_df = pd.DataFrame({"citrine_bg": bandgap,
"material_id": entries["material_id"]})
return sorted_df
def sort_with_MP(self, df: pd.DataFrame, entries: pd.DataFrame)-> np.array:
if os.path.exists(self.interim_data_path):
sorted_df = pd.read_pickle(self.interim_data_path)
else:
sorted_df = self._sort(df, entries)
countSimilarEntriesWithMP(sorted_df["citrine_bg"], "Citrine")
return sorted_df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.