text stringlengths 4 1.02M | meta dict |
|---|---|
"""State and behavior for operation context."""
import time
# _interfaces is referenced from specification in this module.
from grpc.framework.core import _interfaces # pylint: disable=unused-import
from grpc.framework.interfaces.base import base
class OperationContext(base.OperationContext):
"""An implementation of interfaces.OperationContext."""
def __init__(
self, lock, termination_manager, transmission_manager,
expiration_manager):
"""Constructor.
Args:
lock: The operation-wide lock.
termination_manager: The _interfaces.TerminationManager for the operation.
transmission_manager: The _interfaces.TransmissionManager for the
operation.
expiration_manager: The _interfaces.ExpirationManager for the operation.
"""
self._lock = lock
self._termination_manager = termination_manager
self._transmission_manager = transmission_manager
self._expiration_manager = expiration_manager
def _abort(self, outcome):
with self._lock:
if self._termination_manager.outcome is None:
self._termination_manager.abort(outcome)
self._transmission_manager.abort(outcome)
self._expiration_manager.terminate()
def outcome(self):
"""See base.OperationContext.outcome for specification."""
with self._lock:
return self._termination_manager.outcome
def add_termination_callback(self, callback):
"""See base.OperationContext.add_termination_callback."""
with self._lock:
if self._termination_manager.outcome is None:
self._termination_manager.add_callback(callback)
return None
else:
return self._termination_manager.outcome
def time_remaining(self):
"""See base.OperationContext.time_remaining for specification."""
with self._lock:
deadline = self._expiration_manager.deadline()
return max(0.0, deadline - time.time())
def cancel(self):
"""See base.OperationContext.cancel for specification."""
self._abort(base.Outcome.CANCELLED)
def fail(self, exception):
"""See base.OperationContext.fail for specification."""
self._abort(base.Outcome.LOCAL_FAILURE)
| {
"content_hash": "fe58b9233b32175e59d1851613f7844d",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.7124364308830329,
"repo_name": "fichter/grpc",
"id": "24a12b612e50310c346d4d32413be22c87f29267",
"size": "3692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/grpcio/grpc/framework/core/_context.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "3674346"
},
{
"name": "C#",
"bytes": "784596"
},
{
"name": "C++",
"bytes": "988770"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "196335"
},
{
"name": "Makefile",
"bytes": "2042006"
},
{
"name": "Objective-C",
"bytes": "243336"
},
{
"name": "PHP",
"bytes": "66550"
},
{
"name": "Protocol Buffer",
"bytes": "105002"
},
{
"name": "Python",
"bytes": "1286466"
},
{
"name": "Ruby",
"bytes": "348704"
},
{
"name": "Shell",
"bytes": "25777"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2016 Ryan Fan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
import requests
from django.views.generic import TemplateView
import logging
import re
from lxml import html
logger = logging.getLogger(__name__)
class QueryView(TemplateView):
template_name = 'apps/wywxjj/query.html'
def first_access(self):
url = "http://szjw.changsha.gov.cn/index.php/home/index/searchdata/"
s = requests.session()
resp = s.get(url)
cookies = s.cookies.get_dict()
logger.debug(cookies)
self.request.session['PHPSESSID'] = cookies.get('PHPSESSID', None)
found = re.findall(r'name="xzyj" value="(.*)"', resp.content)
if found:
self.request.session['xzyj'] = found[0]
found = re.findall(r'name="__hash__" value="(.*)"', resp.content)
if found:
self.request.session['__hash__'] = found[0]
def refresh_vcode_image(self):
url = "http://szjw.changsha.gov.cn/index.php/home/common/verifycode.html"
s = requests.session()
resp = s.get(url, cookies={'PHPSESSID': self.request.session['PHPSESSID']})
with open('/opt/wxgigo/static/wywxjj/query_vcode.jpg', 'w') as f:
f.write(resp.content)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
# get PHPSESSID when first time to access
self.first_access()
# get vcode
#self.refresh_vcode_image()
return self.render_to_response(context)
def parse_xq(self, xq_info):
rows = []
root = html.fromstring(xq_info)
tr_list = root.xpath("///tr[position()>1]")
for tr in tr_list:
row = []
td_list = tr.getchildren()
for td in td_list:
row.append(td.text)
rows.append(row)
return rows
def query_szjw(request):
logger.debug(request.POST)
url = "http://szjw.changsha.gov.cn/index.php/home/Searchdata/maintenancefund"
s = requests.session()
data = {
'search8_idcard':request.POST.get('search8_idcard', None),
'search8_name': request.POST.get('search8_name', None),
'search8_room': request.POST.get('search8_room', None),
'search8_verify_code': request.POST.get('search8_verify_code', None),
'xzyj': request.session['xzyj'],
'__hash__': request.session['__hash__']
}
logger.debug(request.session['PHPSESSID'])
resp = s.get(url, data=data, cookies={'PHPSESSID': request.session['PHPSESSID']})
logger.debug(resp.content)
| {
"content_hash": "b47fbfc9a3dc82f18d0d34864cc8975b",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 85,
"avg_line_length": 37.135416666666664,
"alnum_prop": 0.6603085553997194,
"repo_name": "rfancn/wxgigo",
"id": "9c1d0bcf707a62ae5b518fe0a00aad7f020ef50f",
"size": "3611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/admin/apps/member/testviews.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2696"
},
{
"name": "HTML",
"bytes": "50544"
},
{
"name": "JavaScript",
"bytes": "4201"
},
{
"name": "Python",
"bytes": "356710"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
} |
from collections import Iterable
import numpy as np
from mordred import Result, Descriptor, error, is_missing
from nose.tools import eq_, ok_, raises
class Dummy1(Descriptor):
def __str__(self):
return "Dummy1"
def parameters(self):
return ()
def calculate(self):
return 1
class Dummy2(Descriptor):
def __str__(self):
return "Dummy2"
def parameters(self):
return ()
def calculate(self):
raise ValueError("error")
result = Result(None, [1, error.Error(ValueError("error"), [])], [Dummy1(), Dummy2()])
def test_length():
eq_(len(result), 2)
def test_fill_missing():
assert np.isnan(result.fill_missing()[1])
def test_drop_missing():
eq_(len(result.drop_missing()), 1)
def test_items():
i = result.items()
yield ok_, isinstance(i, Iterable)
li = list(i)
yield eq_, len(li), 2
yield eq_, len(li[0]), 2
d, v = li[0]
yield ok_, isinstance(d, Descriptor)
yield ok_, isinstance(v, int)
def test_keys():
i = result.keys()
yield ok_, isinstance(i, Iterable)
li = list(i)
yield eq_, len(li), 2
yield ok_, isinstance(li[0], Descriptor)
def test_iter():
i = iter(result)
yield ok_, isinstance(i, Iterable)
li = list(i)
yield eq_, len(li), 2
yield ok_, isinstance(li[0], int)
def test_reversed():
i = reversed(result)
yield ok_, isinstance(i, Iterable)
li = list(i)
yield eq_, len(li), 2
yield ok_, isinstance(li[1], int)
def test_asdict_non_rawkey():
d = result.asdict()
yield eq_, len(d), 2
yield ok_, isinstance(d, dict)
for k in d.keys():
yield ok_, isinstance(k, str)
def test_asdict_rawkey():
d = result.asdict(True)
yield eq_, len(d), 2
yield ok_, isinstance(d, dict)
for k in d.keys():
yield ok_, isinstance(k, Descriptor)
def test_ix():
yield eq_, 1, result.ix[0]
yield ok_, is_missing(result.ix[1])
def test_name():
yield eq_, 1, result.name["Dummy1"]
yield ok_, is_missing(result.name["Dummy2"])
def test_getitem():
yield eq_, 1, result[0]
yield ok_, is_missing(result[1])
yield eq_, 1, result["Dummy1"]
yield ok_, is_missing(result["Dummy2"])
@raises(TypeError)
def test_getitem_raise():
result[1.2]
| {
"content_hash": "48c4d87d3203e817fdeebc0402a3a157",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 86,
"avg_line_length": 19.166666666666668,
"alnum_prop": 0.5982608695652174,
"repo_name": "mordred-descriptor/mordred",
"id": "4861e34a1599530b1aa289b31d3accb36249bed5",
"size": "2300",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mordred/tests/test_result_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "252306"
},
{
"name": "Shell",
"bytes": "4077"
}
],
"symlink_target": ""
} |
from django.contrib.auth import get_permission_codename
from wagtail.wagtailcore.models import Page
class PermissionHelper(object):
"""
Provides permission-related helper functions to effectively control what
what a user can and can't do to instances of a 'typical' model, where
permissions are granted model-wide.
"""
def __init__(self, model):
self.model = model
self.opts = model._meta
def has_add_permission(self, user):
"""
For typical models, whether or not a user can add an object depends
on their permissions on that model
"""
return user.has_perm("%s.%s" % (
self.opts.app_label, get_permission_codename('add', self.opts)
))
def has_edit_permission(self, user):
"""
For typical models, whether or not a user can edit an object depends
on their permissions on that model
"""
return user.has_perm("%s.%s" % (
self.opts.app_label, get_permission_codename('change', self.opts)
))
def has_delete_permission(self, user):
"""
For typical models, whether or not a user can delete an object depends
on their permissions on that model
"""
return user.has_perm("%s.%s" % (
self.opts.app_label, get_permission_codename('delete', self.opts)
))
def can_edit_object(self, user, obj):
"""
Used from within templates to decide what functionality to allow
for a specific object. For typical models, we just return the
model-wide permission.
"""
return self.has_edit_permission(user)
def can_delete_object(self, user, obj):
"""
Used from within templates to decide what functionality to allow
for a specific object. For typical models, we just return the
model-wide permission.
"""
return self.has_delete_permission(user)
def can_unpublish_object(self, user, obj):
"""
'Unpublishing' isn't really a valid option for models not extending
Page, so we always return False
"""
return False
def can_copy_object(self, user, obj):
"""
'Copying' isn't really a valid option for models not extending
Page, so we always return False
"""
return False
def allow_list_view(self, user):
"""
For typical models, we only want to allow viewing of the list page
if the user has permission to do something
"""
if (
self.has_add_permission(user) or
self.has_edit_permission(user) or
self.has_delete_permission(user)
):
return True
return False
class PagePermissionHelper(PermissionHelper):
"""
Provides permission-related helper functions to effectively control what
what a user can and can't do to instances of a model extending Wagtail's
Page model. It differs wildly from ModelPermissionHelper, because
model-wide permissions aren't really relevant. We generally need to
determine things on an object-specific basis.
"""
def has_add_permission(self, user):
"""
For models extending Page, whether or not a page of this type can be
added somewhere in the tree essentially determines the add permission,
rather than actual model-wide permissions
"""
return bool(self.get_valid_parent_pages(user).count())
def get_valid_parent_pages(self, user):
"""
Identifies possible parent pages for the current user by first looking
at allowed_parent_page_types() on self.model to limit options to the
correct type of page, then checking permissions on those individual
pages to make sure we have permission to add a subpage to it.
"""
# Start with empty qs
parents_qs = Page.objects.none()
# Add pages of the correct type
valid_parent_types = self.model.allowed_parent_page_types()
for pt in valid_parent_types:
pt_items = Page.objects.type(pt.model_class())
parents_qs = parents_qs | pt_items
# Exclude pages that we can't add subpages to
for page in parents_qs.all():
if not page.permissions_for_user(user).can_add_subpage():
parents_qs = parents_qs.exclude(pk=page.pk)
return parents_qs
def can_edit_object(self, user, obj):
perms = obj.permissions_for_user(user)
return perms.can_edit()
def can_delete_object(self, user, obj):
perms = obj.permissions_for_user(user)
return perms.can_delete()
def can_unpublish_object(self, user, obj):
perms = obj.permissions_for_user(user)
return obj.live and perms.can_unpublish()
def can_copy_object(self, user, obj):
parent_page = obj.get_parent()
return parent_page.permissions_for_user(user).can_publish_subpage()
def allow_list_view(self, user):
"""
For models extending Page, permitted actions are determined by
permissions on individual objects. Rather than check for change
permissions on every object individually (which would be quite
resource intensive), we simply always allow the list view to be
viewed, and limit further functionality when relevant.
"""
return True
| {
"content_hash": "f23242f469ce76541e763f78eed8874d",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 78,
"avg_line_length": 35.95364238410596,
"alnum_prop": 0.6323448148830355,
"repo_name": "tomdyson/wagtailmodeladmin",
"id": "d45ef04e7657bae3b27bf07594bcef8ef24bebec",
"size": "5429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtailmodeladmin/permission_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2606"
},
{
"name": "HTML",
"bytes": "9727"
},
{
"name": "Python",
"bytes": "69701"
}
],
"symlink_target": ""
} |
from django.db import models
# Create your models here.
class Site(models.Model):
# uint serial number
sn = models.CharField(
verbose_name='电站序列号',
max_length=20)
# short name
name = models.CharField(
verbose_name='电站名称',
max_length=40)
# total meters number
meter_num = models.IntegerField(verbose_name='电表数量',)
# total inverters number
inv_num = models.IntegerField(verbose_name='逆变器数量',)
# address name
address = models.CharField(verbose_name='电站地址',max_length=100)
# site register date
create_date = models.DateTimeField(verbose_name='建站日期',auto_now_add=True)
# site updated date
update_date = models.DateTimeField(verbose_name='更新日期',auto_now=True)
# Amends price
subsidy_price = models.FloatField(verbose_name='补贴电价',)
# Selling to SGCC price
selling_price = models.FloatField(verbose_name='售电电价',)
# Selling to user price
# user_price = models.ForeignKey('SellPrice')
# The total generating capacity
total_capacity = models.FloatField(verbose_name='总发电量',)
# paid generating capacity
paid_capacity = models.FloatField(verbose_name='已支付发电量',)
# Site status
SITE_STATUS = (
('RD', 'Ready'),
('RN', 'Running'),
('BD', 'Malfunction'),
('SD', 'Shut down')
)
status = models.CharField(
verbose_name='电站状态',
max_length=2, choices=SITE_STATUS, default='RN')
# Site type
SITE_TYPE = (
('A', 'User site'),
('B', 'Company site'),
('C', 'Cooperate site'),
('D', 'Other type')
)
mode = models.CharField(
verbose_name='电站类型',
max_length=1, choices=SITE_TYPE, default='A')
# aditional note, json format
note = models.TextField(verbose_name='备注',blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name=('电站信息')
class SellPrice(models.Model):
site = models.ForeignKey('Site')
# Price type
PRICE_SEL = (
('P', 'Peak Price'),
('F', 'Flat Price'),
('V', 'Valley Price')
)
price_type = models.CharField(verbose_name='电价类型', max_length=1, choices=PRICE_SEL, default='F')
price = models.FloatField(verbose_name='电价',)
end_time = models.TimeField(verbose_name='统计截至时间',)
class Meta:
verbose_name=('售电电价')
def __str__(self):
return '电价类型:%s, 电价: %f元, 截至时间:%s' % (self.price_type, self.price, self.end_time)
class SiteStatics(models.Model):
site = models.ForeignKey(Site)
# total Generating capacity
total_capacity = models.FloatField(verbose_name='日发电量',)
peak_capacity = models.FloatField(verbose_name='峰值电量')
flat_capacity = models.FloatField(verbose_name='平峰电量')
valley_capacity = models.FloatField(verbose_name="峰谷电量")
peak_sum = models.FloatField(verbose_name='峰值金额')
flat_sum = models.FloatField(verbose_name='平峰金额')
valley_sum = models.FloatField(verbose_name='峰谷金额')
subsidy_sum = models.FloatField(verbose_name='补贴金额')
selling_sum = models.FloatField(verbose_name='售电金额')
cal_date = models.DateField(verbose_name='结算日期')
STATIC_TYPE = (
('D', '日结算'),
('M', '月结算'),
('Y', '年结算')
)
static_type = models.CharField(max_length=1, default='D', verbose_name='统计类型', choices=STATIC_TYPE)
stamp = models.DateTimeField(auto_now=True, verbose_name='计算时间')
class Meta:
verbose_name=('电站统计')
def __str__(self):
return '电站:%s, 类型:%s, 总电量:%f, 平峰:%f, %f, 峰值:%f, %f, 峰谷:%f, %f' % \
(self.site.name, self.static_type, self.total_capacity, self.flat_capacity, \
self.flat_sum, self.peak_capacity, self.peak_sum, self.valley_capacity, \
self.valley_sum)
class SiteError(models.Model):
stamp = models.DateTimeField(auto_now=True)
site = models.ForeignKey(Site)
status = models.IntegerField(verbose_name='异常代码',blank=True)
# snapshot
snapshot = models.TextField(verbose_name='上传日志',blank=True)
def __str__(self):
return '%d, uptime=%s, stamp=%s, status=%d' % (self.id, str(self.uptime),str(self.stamp), self.status)
class Meta:
verbose_name=('电站故障记录')
def __str__(self):
return '电站:%s, 异常代码:%d' % (self.site.name, self.status)
class Env(models.Model):
site = models.ForeignKey('Site')
solarIrr = models.FloatField(verbose_name='光照度',blank=True)
temp = models.FloatField(verbose_name='温度',blank=True)
stamp = models.DateTimeField(verbose_name='时间戳',)
def __str__(self):
return "Irradiance=%d" % self.solarIrr
class Meta:
verbose_name=('环境信息')
def __str__(self):
return '电站:%s, 光照:%f, 温度%s, 时间%s' % (self.site.name, self.solarIrr, self.temp, self.stamp)
class Env_minor(models.Model):
site = models.ForeignKey('Site')
humidity = models.FloatField(verbose_name='湿度', blank=True)
windSpeed = models.FloatField(verbose_name='风速', blank=True)
windDir = models.FloatField(verbose_name='风向', blank=True)
atmo = models.FloatField(verbose_name='大气压', blank=True)
tempPv= models.FloatField(verbose_name='仪器温度', blank=True)
stamp = models.DateTimeField(verbose_name='时间戳',)
def __str__(self):
return '电站:%s, 温度:%f, 风速:%f, 风向:%f,大气压:%f, 仪器温度:%f, 时间:%s' % \
(self.site.name, self.humidity, self.windSpeed, self.windDir, self.atmo,
self.tempPv, self.stamp)
class Meta:
verbose_name=('次要环境信息')
class Meter(models.Model):
site = models.ForeignKey('Site')
typeID = models.IntegerField(verbose_name='电表ID',)
index = models.SmallIntegerField(verbose_name='电表序号',)
ep = models.FloatField(verbose_name='组合有功电度', blank=True)
epp = models.FloatField(verbose_name='正向有功电度', blank=True)
epn = models.FloatField(verbose_name='负向有功电度', blank=True)
stamp = models.DateTimeField(verbose_name='时间戳',)
def __str__(self):
return '电站:%s, ID:%d, 序号:%d, EP:%f, EPP:%f, EPN:%f, 时间:%s' % \
(self.site.name, self.typeID, self.index, self.ep, self.epp, self.epn, self.stamp)
class Meta:
verbose_name=('电表主要读数')
class Meter_minor(models.Model):
site = models.ForeignKey('Site')
typeID = models.IntegerField(verbose_name='电表ID',)
index = models.SmallIntegerField(verbose_name='电表序号',)
stamp = models.DateTimeField(verbose_name='时间戳',)
uab = models.FloatField(verbose_name='AB相电压', blank=True)
ubc = models.FloatField(verbose_name='BC相电压', blank=True)
uca = models.FloatField(verbose_name='CA相电压', blank=True)
ua = models.FloatField(verbose_name='A相电压', blank=True)
ub = models.FloatField(verbose_name='B相电压', blank=True)
uc = models.FloatField(verbose_name='C相电压', blank=True)
ia = models.FloatField(verbose_name='A相电流', blank=True)
ib = models.FloatField(verbose_name='B相电流', blank=True)
ic = models.FloatField(verbose_name='C相电流', blank=True)
fa = models.FloatField(verbose_name='A相频率', blank=True)
fb = models.FloatField(verbose_name='B相频率', blank=True)
fc = models.FloatField(verbose_name='C相频率', blank=True)
pa = models.IntegerField(verbose_name='A相有功功率', blank=True)
pb = models.IntegerField(verbose_name='B相有功功率', blank=True)
pc = models.IntegerField(verbose_name='C相有功功率', blank=True)
p = models.IntegerField(verbose_name='总有功功率', blank=True)
qa = models.IntegerField(verbose_name='A相无功功率', blank=True)
qb = models.IntegerField(verbose_name='B相无功功率', blank=True)
qc = models.IntegerField(verbose_name='C相无功功率', blank=True)
q = models.IntegerField(verbose_name='总无功功率', blank=True)
s = models.IntegerField(verbose_name='总视在功率', blank=True)
pf = models.FloatField(verbose_name='功率因数', blank=True)
eq = models.IntegerField(verbose_name='无功电度', blank=True)
rtc = models.CharField(verbose_name='电表RTC', max_length=21, blank=True)
def __str__(self):
return '电站:%s, ID:%d, 序号:%d, UAB:%f, Ia:%f, Fa:%f, Stamp:%s' % \
(self.site.name, self.typeID, self.index, self.uab, self.ia, self.fa, self.stamp)
class Meta:
verbose_name=('电表次要读数')
class Inverter(models.Model):
site = models.ForeignKey('Site')
typeID = models.IntegerField(verbose_name='逆变器ID', )
index = models.SmallIntegerField(verbose_name='逆变器序号')
sn = models.CharField(verbose_name='逆变器序列号', max_length=20)
pac = models.FloatField(verbose_name='总有功功率', blank=True)
epd = models.FloatField(verbose_name='当日发电量', blank=True)
ept = models.FloatField(verbose_name='总发电量', blank=True)
stamp = models.DateTimeField(verbose_name='时间戳', )
def __str__(self):
return '电站:%s, ID:%d, 序号:%d, sn:%s, EPD:%f, EPT:%f, 时间:%s' % \
(self.site.name, self.typeID, self.index, self.sn, self.epd, self.ept, self.stamp)
class Meta:
verbose_name=('逆变器主要读数')
class Inverter_minor(models.Model):
site = models.ForeignKey('Site')
typeID = models.IntegerField(verbose_name='逆变器ID', )
index = models.SmallIntegerField(verbose_name='逆变器序号', )
sn = models.CharField(verbose_name='逆变器序列号', max_length=20)
pdc = models.FloatField(verbose_name='总直流功率', blank=True)
qac = models.FloatField(verbose_name='总无功功率', blank=True)
pf = models.FloatField(verbose_name='功率因子', blank=True)
freq = models.FloatField(verbose_name='电网频率', blank=True)
etd = models.FloatField(verbose_name='当日发电时长', blank=True)
ett = models.FloatField(verbose_name='总运行时长', blank=True)
uab = models.FloatField(verbose_name='AB线电压', blank=True)
ubc = models.FloatField(verbose_name='BC线电压', blank=True)
uca = models.FloatField(verbose_name='CA线电压', blank=True)
ua = models.FloatField(verbose_name='A相电压', blank=True)
ub = models.FloatField(verbose_name='B相电压', blank=True)
uc = models.FloatField(verbose_name='C相电压', blank=True)
ia = models.FloatField(verbose_name='A相电流', blank=True)
ib = models.FloatField(verbose_name='B相电流', blank=True)
ic = models.FloatField(verbose_name='C相电流', blank=True)
fa = models.FloatField(verbose_name='A相频率', blank=True)
fb = models.FloatField(verbose_name='B相频率', blank=True)
fc = models.FloatField(verbose_name='C相频率', blank=True)
pa = models.IntegerField(verbose_name='A相功率', blank=True)
pb = models.IntegerField(verbose_name='B相功率', blank=True)
pc = models.IntegerField(verbose_name='C相功率', blank=True)
upv1 = models.FloatField(verbose_name='PV1直流电压', blank=True)
ipv1 = models.FloatField(verbose_name='PV1直流电流', blank=True)
upv2 = models.FloatField(verbose_name='PV2直流电压', blank=True)
ipv2 = models.FloatField(verbose_name='PV2直流电流', blank=True)
upv3 = models.FloatField(verbose_name='PV3直流电压', blank=True)
ipv3 = models.FloatField(verbose_name='PV3直流电流', blank=True)
upv4 = models.FloatField(verbose_name='PV4直流电压', blank=True)
ipv4 = models.FloatField(verbose_name='PV4直流电流', blank=True)
eff = models.FloatField(verbose_name='逆变器效率', blank=True)
tmp = models.FloatField(verbose_name='机箱内温度', blank=True)
status = models.IntegerField(verbose_name='设备状态', blank=True)
fcode = models.CharField(verbose_name='故障时间', max_length=21, blank=True)
stamp = models.DateTimeField(verbose_name='时间戳',)
rtc = models.CharField(verbose_name='设备RTC',max_length=21, blank=True)
def __str__(self):
return '电站:%s, ID:%d, 序号:%s, UAB:%f, Ia:%f, Fa:%f, Stamp:%s' % \
(self.site.name, self.typeID, self.index, self.uab, self.ia, self.fa, self.stamp)
class Meta:
verbose_name=('逆变器次要读数')
| {
"content_hash": "2d5736f12868d86d803657c4f07c573e",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 110,
"avg_line_length": 42.785185185185185,
"alnum_prop": 0.6528739612188366,
"repo_name": "mageelen/snow",
"id": "dabcf7303dce7382690eaff149dbf5d99845d65e",
"size": "12732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "device/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "107849"
},
{
"name": "Python",
"bytes": "57627"
}
],
"symlink_target": ""
} |
"""Support for the for Danfoss Air HRV sswitches."""
import logging
from homeassistant.components.switch import (
SwitchDevice)
from homeassistant.components.danfoss_air import DOMAIN \
as DANFOSS_AIR_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Danfoss Air HRV switch platform."""
from pydanfossair.commands import ReadCommand, UpdateCommand
data = hass.data[DANFOSS_AIR_DOMAIN]
switches = [
["Danfoss Air Boost",
ReadCommand.boost,
UpdateCommand.boost_activate,
UpdateCommand.boost_deactivate],
]
dev = []
for switch in switches:
dev.append(DanfossAir(
data, switch[0], switch[1], switch[2], switch[3]))
add_entities(dev)
class DanfossAir(SwitchDevice):
"""Representation of a Danfoss Air HRV Switch."""
def __init__(self, data, name, state_command, on_command, off_command):
"""Initialize the switch."""
self._data = data
self._name = name
self._state_command = state_command
self._on_command = on_command
self._off_command = off_command
self._state = None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
_LOGGER.debug("Turning on switch with command %s", self._on_command)
self._data.update_state(self._on_command, self._state_command)
def turn_off(self, **kwargs):
"""Turn the switch off."""
_LOGGER.debug("Turning of switch with command %s", self._off_command)
self._data.update_state(self._off_command, self._state_command)
def update(self):
"""Update the switch's state."""
self._data.update()
self._state = self._data.get_value(self._state_command)
if self._state is None:
_LOGGER.debug("Could not get data for %s", self._state_command)
| {
"content_hash": "e222ce11d09728a6cc07529462ec98b1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.6221804511278195,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "ec85757be59dd18836b41c47c4f1c735c9a42974",
"size": "2128",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/danfoss_air/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (
range, zip, map, filter,
lrange, lzip, lmap, lfilter,
builtins
)
import unittest
import nose
import pandas.util.testing as tm
class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected, lengths):
self.assertNotIsInstance(iter_res, list)
tm.assert_isinstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
self.assertEqual(iter_res, exp)
self.assertEqual(list_res, exp)
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| {
"content_hash": "ef0fba203166bc91ebc8f373247019dc",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 33.34285714285714,
"alnum_prop": 0.5779777206512425,
"repo_name": "bdh1011/wau",
"id": "242b54c84d0ee94c87f7bd5eccbc4552637ef6dd",
"size": "2358",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pandas/tests/test_compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
} |
"""Demo platform for the Device tracker component."""
import random
from homeassistant.components.device_tracker import DOMAIN
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the demo tracker."""
def offset():
"""Return random offset."""
return (random.randrange(500, 2000)) / 2e5 * random.choice((-1, 1))
def random_see(dev_id, name):
"""Randomize a sighting."""
see(
dev_id=dev_id,
host_name=name,
gps=(hass.config.latitude + offset(), hass.config.longitude + offset()),
gps_accuracy=random.randrange(50, 150),
battery=random.randrange(10, 90),
)
def observe(call=None):
"""Observe three entities."""
random_see("demo_paulus", "Paulus")
random_see("demo_anne_therese", "Anne Therese")
observe()
see(
dev_id="demo_home_boy",
host_name="Home Boy",
gps=[hass.config.latitude - 0.00002, hass.config.longitude + 0.00002],
gps_accuracy=20,
battery=53,
)
hass.services.register(DOMAIN, "demo", observe)
return True
| {
"content_hash": "e183d30bd20ac5648b614b536c618f04",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 84,
"avg_line_length": 27.829268292682926,
"alnum_prop": 0.5898334794040315,
"repo_name": "qedi-r/home-assistant",
"id": "fba8095efd64203f5ff02cfb44c8ebc2e50f3648",
"size": "1141",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/demo/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.sparse as sp
from menpo.shape import TriMesh, PointCloud
from menpo.transform import Translation, UniformScale, AlignmentSimilarity
from menpo3d.vtkutils import trimesh_to_vtk, VTKClosestPointLocator
from menpo3d.morphablemodel.shapemodel import ShapeModel
import os
import sys
from contextlib import contextmanager
import warnings
@contextmanager
def stdout_redirected(to=os.devnull):
r"""
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
"""
fd = sys.stdout.fileno()
# assert that Python and C stdio write using the same file descriptor
# assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
# restore stdout.
# buffering and flags such as CLOEXEC may be different
redirect_stdout(to=old_stdout)
try:
try:
# First try the newer scikit-sparse namespace
from sksparse.cholmod import cholesky_AAt
except ImportError:
# Fall back to the older scikits.sparse namespace
from scikits.sparse.cholmod import cholesky_AAt
# user has cholesky available - provide a fast solve
def spsolve(sparse_X, dense_b):
# wrap the cholesky call in a context manager that swallows the
# low-level std-out to stop it from swamping our stdout (these low-level
# prints come from METIS, but the solution behaves as normal)
with stdout_redirected():
factor = cholesky_AAt(sparse_X.T)
return factor(sparse_X.T.dot(dense_b)).toarray()
except ImportError:
# fallback to (much slower) scipy solve
from scipy.sparse.linalg import spsolve as scipy_spsolve
def spsolve(sparse_X, dense_b):
warnings.warn("suitesparse is not installed - NICP will run "
"considerably (~5-10x) slower. If possible install "
"suitesparse.")
return scipy_spsolve(sparse_X.T.dot(sparse_X),
sparse_X.T.dot(dense_b)).toarray()
def node_arc_incidence_matrix(source):
unique_edge_pairs = source.unique_edge_indices()
m = unique_edge_pairs.shape[0]
# Generate a "node-arc" (i.e. vertex-edge) incidence matrix.
row = np.hstack((np.arange(m), np.arange(m)))
col = unique_edge_pairs.T.ravel()
data = np.hstack((-1 * np.ones(m), np.ones(m)))
return sp.coo_matrix((data, (row, col))), unique_edge_pairs
def validate_weights(label, weights, n_points, n_iterations=None,
verbose=False):
if n_iterations is not None and len(weights) != n_iterations:
raise ValueError('Invalid {label}: - due to other weights there are '
'{n_iterations} iterations but {n_weights} {label} '
'were provided'.format(label=label,
n_iterations=n_iterations,
n_weights=len(weights)))
invalid = []
for i, weight in enumerate(weights):
is_per_vertex = isinstance(weight, np.ndarray)
if is_per_vertex and weight.shape != (n_points,):
invalid.append('({}): {}'.format(i, weight.shape[0]))
if verbose and len(weights) >= 1:
is_per_vertex = isinstance(weights[0], np.ndarray)
if is_per_vertex:
print('Using per-vertex {label}'.format(label=label))
else:
print('Using global {label}'.format(label=label))
if len(invalid) != 0:
raise ValueError('Invalid {label}: expected shape ({n_points},) '
'got: {invalid_cases}'.format(
label=label, n_points=n_points,
invalid_cases='{}'.format(', '.join(invalid))))
def non_rigid_icp(source, target, eps=1e-3, landmark_group=None,
stiffness_weights=None, data_weights=None,
landmark_weights=None, generate_instances=False,
verbose=False):
# call the generator version of NICP, always returning a generator
generator = non_rigid_icp_generator(source, target, eps=eps,
stiffness_weights=stiffness_weights,
verbose=verbose,
landmark_group=landmark_group,
landmark_weights=landmark_weights,
data_weights=data_weights)
# the handler decides whether the user get's details and each iteration
# returned, or just the final result.
return non_rigid_icp_generator_handler(generator, generate_instances)
def active_non_rigid_icp(model, target, eps=1e-3,
stiffness_weights=None, data_weights=None,
landmark_group=None, landmark_weights=None,
model_mean_landmarks=None,
generate_instances=False, verbose=False):
model_mean = model.mean()
if landmark_group is not None:
# user better have provided model landmarks!
if model_mean_landmarks is None:
raise ValueError(
'For Active NICP with landmarks the model_mean_landmarks '
'need to be provided.')
shape_model = ShapeModel(model)
source_lms = model_mean_landmarks
target_lms = target.landmarks[landmark_group]
model_lms_index = model_mean.distance_to(source_lms).argmin(axis=0)
shape_model_lms = shape_model.mask_points(model_lms_index)
# Sim align the target lms to the mean before projecting
target_lms_aligned = AlignmentSimilarity(target_lms,
source_lms).apply(target_lms)
# project to learn the weights for the landmark model
weights = shape_model_lms.project(target_lms_aligned,
n_components=20)
# use these weights on the dense shape model to generate an improved
# instance
source = model.instance(weights)
# update the source landmarks (for the alignment below)
source.landmarks[landmark_group] = PointCloud(source.points[
model_lms_index])
else:
# Start from the mean of the model
source = model_mean
# project onto the shape model to restrict the basis
def project_onto_model(instance):
return model.reconstruct(instance)
# call the generator version of NICP, always returning a generator
generator = non_rigid_icp_generator(source, target, eps=eps,
stiffness_weights=stiffness_weights,
data_weights=data_weights,
landmark_weights=landmark_weights,
landmark_group=landmark_group,
v_i_update_func=project_onto_model,
verbose=verbose)
# the handler decides whether the user get's details and each iteration
# returned, or just the final result.
return non_rigid_icp_generator_handler(generator, generate_instances)
def non_rigid_icp_generator_handler(generator, generate_instances):
if generate_instances:
# the user wants to inspect results per-iteration - return the iterator
# directly to them
return generator
else:
# the user is not interested in per-iteration results. Exhaust the
# generator ourselves and return the last result only.
while True:
try:
instance = next(generator)
except StopIteration:
return instance[0]
def non_rigid_icp_generator(source, target, eps=1e-3,
stiffness_weights=None, data_weights=None,
landmark_group=None, landmark_weights=None,
v_i_update_func=None, verbose=False):
r"""
Deforms the source trimesh to align with to optimally the target.
"""
# If landmarks are provided, we should always start with a simple
# AlignmentSimilarity between the landmarks to initialize optimally.
if landmark_group is not None:
if verbose:
print("'{}' landmarks will be used as "
"a landmark constraint.".format(landmark_group))
print("performing similarity alignment using landmarks")
lm_align = AlignmentSimilarity(source.landmarks[landmark_group],
target.landmarks[landmark_group]).as_non_alignment()
source = lm_align.apply(source)
# Scale factors completely change the behavior of the algorithm - always
# rescale the source down to a sensible size (so it fits inside box of
# diagonal 1) and is centred on the origin. We'll undo this after the fit
# so the user can use whatever scale they prefer.
tr = Translation(-1 * source.centre())
sc = UniformScale(1.0 / np.sqrt(np.sum(source.range() ** 2)), 3)
prepare = tr.compose_before(sc)
source = prepare.apply(source)
target = prepare.apply(target)
# store how to undo the similarity transform
restore = prepare.pseudoinverse()
n_dims = source.n_dims
# Homogeneous dimension (1 extra for translation effects)
h_dims = n_dims + 1
points, trilist = source.points, source.trilist
n = points.shape[0] # record number of points
edge_tris = source.boundary_tri_index()
M_s, unique_edge_pairs = node_arc_incidence_matrix(source)
# weight matrix
G = np.identity(n_dims + 1)
M_kron_G_s = sp.kron(M_s, G)
# build octree for finding closest points on target.
target_vtk = trimesh_to_vtk(target)
closest_points_on_target = VTKClosestPointLocator(target_vtk)
# save out the target normals. We need them for the weight matrix.
target_tri_normals = target.tri_normals()
# init transformation
X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
v_i = points
if stiffness_weights is not None:
if verbose:
print('using user-defined stiffness_weights')
validate_weights('stiffness_weights', stiffness_weights,
source.n_points, verbose=verbose)
else:
# these values have been empirically found to perform well for well
# rigidly aligned facial meshes
stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
if verbose:
print('using default '
'stiffness_weights: {}'.format(stiffness_weights))
n_iterations = len(stiffness_weights)
if landmark_weights is not None:
if verbose:
print('using user defined '
'landmark_weights: {}'.format(landmark_weights))
elif landmark_group is not None:
# these values have been empirically found to perform well for well
# rigidly aligned facial meshes
landmark_weights = [5, 2, .5, 0, 0, 0, 0, 0]
if verbose:
print('using default '
'landmark_weights: {}'.format(landmark_weights))
else:
# no landmark_weights provided - no landmark_group in use. We still
# need a landmark group for the iterator
landmark_weights = [None] * n_iterations
# We should definitely have some landmark weights set now - check the
# number is correct.
# Note we say verbose=False, as we have done custom reporting above, and
# per-vertex landmarks are not supported.
validate_weights('landmark_weights', landmark_weights, source.n_points,
n_iterations=n_iterations, verbose=False)
if data_weights is not None:
if verbose:
print('using user-defined data_weights')
validate_weights('data_weights', data_weights,
source.n_points, n_iterations=n_iterations,
verbose=verbose)
else:
data_weights = [None] * n_iterations
if verbose:
print('Not customising data_weights')
# we need to prepare some indices for efficient construction of the D
# sparse matrix.
row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(),
np.arange(n)))
x = np.arange(n * h_dims).reshape((n, h_dims))
col = np.hstack((x[:, :n_dims].ravel(),
x[:, n_dims]))
o = np.ones(n)
if landmark_group is not None:
source_lm_index = source.distance_to(
source.landmarks[landmark_group]).argmin(axis=0)
target_lms = target.landmarks[landmark_group]
U_L = target_lms.points
n_landmarks = target_lms.n_points
lm_mask = np.in1d(row, source_lm_index)
col_lm = col[lm_mask]
# pull out the rows for the lms - but the values are
# all wrong! need to map them back to the order of the landmarks
row_lm_to_fix = row[lm_mask]
source_lm_index_l = list(source_lm_index)
row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])
for i, (alpha, beta, gamma) in enumerate(zip(stiffness_weights,
landmark_weights,
data_weights), 1):
alpha_is_per_vertex = isinstance(alpha, np.ndarray)
if alpha_is_per_vertex:
# stiffness is provided per-vertex
if alpha.shape[0] != source.n_points:
raise ValueError()
alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1)
alpha_M_s = sp.diags(alpha_per_edge).dot(M_s)
alpha_M_kron_G_s = sp.kron(alpha_M_s, G)
else:
# stiffness is global - just a scalar multiply. Note that here
# we don't have to recalculate M_kron_G_s
alpha_M_kron_G_s = alpha * M_kron_G_s
if verbose:
a_str = (alpha if not alpha_is_per_vertex
else 'min: {:.2f}, max: {:.2f}'.format(alpha.min(),
alpha.max()))
i_str = '{}/{}: stiffness: {}'.format(i, len(stiffness_weights), a_str)
if landmark_group is not None:
i_str += ' lm_weight: {}'.format(beta)
print(i_str)
j = 0
while True: # iterate until convergence
j += 1 # track the iterations for this alpha/landmark weight
# find nearest neighbour and the normals
U, tri_indices = closest_points_on_target(v_i)
# ---- WEIGHTS ----
# 1. Edges
# Are any of the corresponding tris on the edge of the target?
# Where they are we return a false weight (we *don't* want to
# include these points in the solve)
w_i_e = np.in1d(tri_indices, edge_tris, invert=True)
# 2. Normals
# Calculate the normals of the current v_i
v_i_tm = TriMesh(v_i, trilist=trilist)
v_i_n = v_i_tm.vertex_normals()
# Extract the corresponding normals from the target
u_i_n = target_tri_normals[tri_indices]
# If the dot of the normals is lt 0.9 don't contrib to deformation
w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9
# 3. Self-intersection
# This adds approximately 12% to the running cost and doesn't seem
# to be very critical in helping mesh fitting performance so for
# now it's removed. Revisit later.
# # Build an intersector for the current deformed target
# intersect = build_intersector(to_vtk(v_i_tm))
# # budge the source points 1% closer to the target
# source = v_i + ((U - v_i) * 0.5)
# # if the vector from source to target intersects the deformed
# # template we don't want to include it in the optimisation.
# problematic = [i for i, (s, t) in enumerate(zip(source, U))
# if len(intersect(s, t)[0]) > 0]
# print(len(problematic) * 1.0 / n)
# w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
# w_i_i[problematic] = False
# Form the overall w_i from the normals, edge case
# for now disable the edge constraint (it was noisy anyway)
w_i = w_i_n
# w_i = np.logical_and(w_i_n, w_i_e).astype(np.float)
# we could add self intersection at a later date too...
# w_i = np.logical_and(np.logical_and(w_i_n,
# w_i_e,
# w_i_i).astype(np.float)
prop_w_i = (n - w_i.sum() * 1.0) / n
prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
prop_w_i_e = (n - w_i_e.sum() * 1.0) / n
if data_weights is not None:
w_i = w_i * gamma
# Build the sparse diagonal weight matrix
W_s = sp.diags(w_i.astype(np.float)[None, :], [0])
data = np.hstack((v_i.ravel(), o))
D_s = sp.coo_matrix((data, (row, col)))
to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)),
U * w_i[:, None]] # nullify nearest points by w_i
if landmark_group is not None:
D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
shape=(n_landmarks, D_s.shape[1]))
to_stack_A.append(beta * D_L)
to_stack_B.append(beta * U_L)
A_s = sp.vstack(to_stack_A).tocsr()
B_s = sp.vstack(to_stack_B).tocsr()
X = spsolve(A_s, B_s)
# deform template
v_i_prev = v_i
v_i = D_s.dot(X)
delta_v_i = v_i - v_i_prev
if v_i_update_func:
# custom logic is provided to update the current template
# deformation. This is typically used by Active NICP.
# take the v_i points matrix and convert back to a TriMesh in
# the original space
def_template = restore.apply(source.from_vector(v_i.ravel()))
# perform the update
updated_def_template = v_i_update_func(def_template)
# convert back to points in the NICP space
v_i = prepare.apply(updated_def_template.points)
err = np.linalg.norm(X_prev - X, ord='fro')
stop_criterion = err / np.sqrt(np.size(X_prev))
if landmark_group is not None:
src_lms = v_i[source_lm_index]
lm_err = np.sqrt((src_lms - U_L) ** 2).sum(axis=1).mean()
if verbose:
v_str = (' - {} stop crit: {:.3f} '
'total: {:.0%} norms: {:.0%} '
'edges: {:.0%}'.format(j, stop_criterion,
prop_w_i, prop_w_i_n,
prop_w_i_e))
if landmark_group is not None:
v_str += ' lm_err: {:.4f}'.format(lm_err)
print(v_str)
X_prev = X
# track the progress of the algorithm per-iteration
info_dict = {
'alpha': alpha,
'iteration': j,
'prop_omitted': prop_w_i,
'prop_omitted_norms': prop_w_i_n,
'prop_omitted_edges': prop_w_i_e,
'delta': err,
'mask_normals': w_i_n,
'mask_edges': w_i_e,
'mask_all': w_i,
'nearest_points': restore.apply(U),
'deformation_per_step': delta_v_i
}
current_instance = source.copy()
current_instance.points = v_i.copy()
if landmark_group:
info_dict['beta'] = beta
info_dict['lm_err'] = lm_err
current_instance.landmarks[landmark_group] = PointCloud(src_lms)
yield restore.apply(current_instance), info_dict
if stop_criterion < eps:
break
| {
"content_hash": "d4c00560601f9f2f791fa0e59941e330",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 91,
"avg_line_length": 41.72945891783567,
"alnum_prop": 0.5590452864620852,
"repo_name": "grigorisg9gr/menpo3d",
"id": "3cc66283ce36b5449840d6887f546188d38cd331",
"size": "20823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpo3d/correspond/nicp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "Python",
"bytes": "387943"
},
{
"name": "Shell",
"bytes": "124"
}
],
"symlink_target": ""
} |
""" Sahana Eden Automated Tests - INV006 Create Catalog
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateCatalog(SeleniumUnitTest):
def test_inv006_create_catalog(self):
"""
@case: INV006
@description: Create a Catalog
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
# Login, if not-already done so
self.login(account="admin", nexturl="supply/catalog/create")
self.create("supply_catalog",
[( "name",
"IFRC Food Catalogue" ),
( "organisation_id",
"International Federation of Red Cross and Red Crescent Societies (IFRC)",
"option" ),
( "comments",
"This is a test Catalogue")
]
)
| {
"content_hash": "414b32fff62242bb1556d1a3c0f4dc8e",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 110,
"avg_line_length": 41.574074074074076,
"alnum_prop": 0.6418708240534521,
"repo_name": "madhurauti/Map-Polygon",
"id": "e05083cdce291bcb406c50ce57b3d7632ad453b5",
"size": "2270",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tests/inv/create_catalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15527353"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "2202"
},
{
"name": "Python",
"bytes": "23300695"
},
{
"name": "Racket",
"bytes": "166"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.framework import core
paddle.enable_static()
SEED = 2021
def gather_numpy(x, index, axis):
x_transpose = np.swapaxes(x, 0, axis)
tmp_gather = x_transpose[index, ...]
gather = np.swapaxes(tmp_gather, 0, axis)
return gather
class TestGatherOp(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "gather"
self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type)
self.inputs = {
'X': xnp,
'Index': np.array(self.index).astype(self.index_type),
}
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place,
['X'],
'Out',
max_relative_error=0.006,
)
def config(self):
"""
For multi-dimension input
"""
self.x_shape = (10, 20)
self.x_type = "float32"
self.index = [1, 3, 5]
self.index_type = "int32"
class TestCase1(TestGatherOp):
def config(self):
"""
For one dimension input
"""
self.x_shape = 100
self.x_type = "float32"
self.index = [1, 3, 5]
self.index_type = "int32"
class API_TestGather(unittest.TestCase):
def test_out1(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float32')
index = fluid.layers.data('index', shape=[-1, 1], dtype='int32')
out = paddle.gather(data1, index)
place = paddle.NPUPlace(0)
exe = fluid.Executor(place)
input = np.array([[1, 2], [3, 4], [5, 6]])
index_1 = np.array([1, 2])
(result,) = exe.run(
feed={"data1": input, "index": index_1}, fetch_list=[out]
)
expected_output = np.array([[3, 4], [5, 6]])
np.testing.assert_allclose(result, expected_output, rtol=1e-5)
def test_out2(self):
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = paddle.fluid.data('x', shape=[-1, 2], dtype='float32')
index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32')
out = paddle.gather(x, index)
place = paddle.NPUPlace(0)
exe = paddle.static.Executor(place)
x_np = np.array([[1, 2], [3, 4], [5, 6]]).astype('float32')
index_np = np.array([1, 1]).astype('int32')
(result,) = exe.run(
feed={"x": x_np, "index": index_np}, fetch_list=[out]
)
expected_output = gather_numpy(x_np, index_np, axis=0)
np.testing.assert_allclose(result, expected_output, rtol=1e-5)
class TestGatherGrad(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(8192, 768)).astype('float32')
index_np = np.random.randint(0, 8192, size=(1232, 1)).astype('int32')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[8192, 768], dtype='float32')
index = paddle.static.data(
name="index", shape=[1232, 1], dtype='int32'
)
a.stop_gradient = False
b = paddle.gather(a, index)
loss = fluid.layers.reduce_mean(b)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss)
if run_npu:
place = paddle.NPUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np, "index": index_np},
fetch_list=[b, loss],
)
if epoch % 10 == 0:
print(
"Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res[0]
)
)
return pred_res, loss_res
def test_npu(self):
npu_pred, npu_loss = self._test(True)
cpu_pred, cpu_loss = self._test(False)
np.testing.assert_allclose(npu_pred, cpu_pred, rtol=1e-5)
np.testing.assert_allclose(npu_loss, cpu_loss, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "83b5124d552b551b2f7e84e8ca33e022",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 31.32919254658385,
"alnum_prop": 0.5329103885804917,
"repo_name": "luotao1/Paddle",
"id": "1d27eadbc12f3870e975a80b2a05b0a6adedbcf6",
"size": "5657",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
import sys
import os
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
# This ugly hack executes the first few lines of the module file to look up some
# common variables. We cannot just import the module because it depends on other
# modules that might not be installed yet.
filename = os.path.join(os.path.dirname(__file__), 'bottle_mysql.py')
source = open(filename).read().split('### CUT HERE')[0]
exec(source)
setup(
name = 'bottle-mysql',
version = __version__,
url = 'https://github.com/tg123/bottle-mysql',
description = 'MySQL integration for Bottle.',
long_description = __doc__,
author = 'Michael Lustfield',
author_email = 'dev@lustfield.net',
license = __license__,
platforms = 'any',
py_modules = [
'bottle_mysql'
],
requires = [i.strip() for i in open("requirements.txt").readlines()],
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
cmdclass = {'build_py': build_py}
)
| {
"content_hash": "ac5b6ea23b571929f63d4a3dd9fdd5af",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 34.390243902439025,
"alnum_prop": 0.6524822695035462,
"repo_name": "tg123/bottle-mysql",
"id": "57d77adffe7c98aca4e16aded6b6e5d12a38068d",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13717"
}
],
"symlink_target": ""
} |
class Market(object):
def __init__(self):
self.name = __name__
def get_order_book(self):
pass
def get_account_info(self):
pass
def execute_order(self, orderType):
pass
| {
"content_hash": "71b8e4c0a74483af0df0322a68d11855",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 39,
"avg_line_length": 18.25,
"alnum_prop": 0.547945205479452,
"repo_name": "lorganthesorn/CryptoArb",
"id": "ef3d22320a3964bbfc725f4ca1086dcc5db92722",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Markets/Market.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41639"
}
],
"symlink_target": ""
} |
"""change_setatbirth_for_workspace__model
Revision ID: e968d868a097
Revises: ec6018a5919f
Create Date: 2020-02-04 11:33:00.348870
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'e968d868a097'
down_revision = 'ec6018a5919f'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE `workbench_researcher` MODIFY `sex_at_birth` JSON;")
op.execute("ALTER TABLE `workbench_researcher_history` MODIFY `sex_at_birth` JSON;")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE `workbench_researcher` MODIFY `sex_at_birth` smallint(6);")
op.execute("ALTER TABLE `workbench_researcher_history` MODIFY `sex_at_birth` smallint(6);")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"content_hash": "825222b0c1c1b5182f7f4b89dc954776",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 95,
"avg_line_length": 26.38,
"alnum_prop": 0.6747536012130402,
"repo_name": "all-of-us/raw-data-repository",
"id": "52431a406c57ce34498f616632227821b2270bae",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/alembic/versions/e968d868a097_change_setatbirth_for_workspace__model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""
This file parses messages using functions defined in in the template's
parser.py
@copyright: 2012-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Parsing",)
import sys
from gluon import current
# =============================================================================
class S3Parsing(object):
"""
Core Message Parsing Framework
- reusable functions
"""
# -------------------------------------------------------------------------
@staticmethod
def parser(function_name, message_id, **kwargs):
"""
1st Stage Parser
- called by msg.parse()
Sets the appropriate Authorisation level and then calls the
parser function from the template
"""
reply = None
s3db = current.s3db
# Retrieve Message
table = s3db.msg_message
message = current.db(table.message_id == message_id).select(limitby=(0, 1)
).first()
from_address = message.from_address
if "<" in from_address:
from_address = from_address.split("<")[1].split(">")[0]
email = S3Parsing.is_session_alive(from_address)
if email:
current.auth.s3_impersonate(email)
else:
(email, password) = S3Parsing.parse_login(message)
if email and password:
current.auth.login_bare(email, password)
expiration = current.session.auth["expiration"]
table = s3db.msg_session
table.insert(email = email,
expiration_time = expiration,
from_address = from_address)
reply = "Login succesful"
# The message may have multiple purposes
#return reply
# Load the Parser template for this deployment
template = current.deployment_settings.get_msg_parser()
module_name = "applications.%s.private.templates.%s.parser" \
% (current.request.application, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Pass the message to the parser
try:
fn = getattr(S3Parser, function_name)
except:
current.log.error("Parser not found: %s" % function_name)
return None
reply = fn(message, **kwargs) or reply
if not reply:
return None
# Send Reply
current.msg.send(from_address, reply)
# -------------------------------------------------------------------------
@staticmethod
def parse_login(message):
"""
Authenticate a login request
"""
if not message:
return None, None
words = message.body.split(" ")
login = False
email = None
password = None
if "LOGIN" in [word.upper() for word in words]:
login = True
if len(words) == 2 and login:
password = words[1]
elif len(words) == 3 and login:
email = words[1]
password = words[2]
if login:
if password and not email:
email = message.from_address
return email, password
else:
return None, None
# ---------------------------------------------------------------------
@staticmethod
def is_session_alive(from_address):
"""
Check whether there is an alive session from the same sender
"""
email = None
now = current.request.utcnow
stable = current.s3db.msg_session
query = (stable.is_expired == False) & \
(stable.from_address == from_address)
records = current.db(query).select(stable.id,
stable.created_datetime,
stable.expiration_time,
stable.email,
)
for record in records:
time = record.created_datetime
time = time - now
time = time.total_seconds()
if time < record.expiration_time:
email = record.email
break
else:
record.update_record(is_expired = True)
return email
# ---------------------------------------------------------------------
@staticmethod
def lookup_person(address):
"""
Lookup a Person from an Email Address
"""
s3db = current.s3db
if "<" in address:
address = address.split("<")[1].split(">")[0]
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ctable.value == address) & \
(ctable.contact_method == "EMAIL") & \
(ctable.pe_id == ptable.pe_id) & \
(ptable.deleted == False) & \
(ctable.deleted == False)
possibles = current.db(query).select(ptable.id,
limitby=(0, 2))
if len(possibles) == 1:
return possibles.first().id
return None
# ---------------------------------------------------------------------
@staticmethod
def lookup_human_resource(address):
"""
Lookup a Human Resource from an Email Address
"""
db = current.db
s3db = current.s3db
if "<" in address:
address = address.split("<")[1].split(">")[0]
hrtable = s3db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
query = (ctable.value == address) & \
(ctable.contact_method == "EMAIL") & \
(ctable.pe_id == ptable.pe_id) & \
(ptable.id == hrtable.person_id) & \
(ctable.deleted == False) & \
(ptable.deleted == False) & \
(hrtable.deleted == False)
possibles = db(query).select(hrtable.id,
limitby=(0, 2))
if len(possibles) == 1:
return possibles.first().id
return None
# END =========================================================================
| {
"content_hash": "cce01ef8ae6e7c9ca7b2d84dca9681ce",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 82,
"avg_line_length": 34.716279069767445,
"alnum_prop": 0.5061629153269025,
"repo_name": "collective/eden",
"id": "e216e88cced3ef7a6122a13f4afde9f8b36cb460",
"size": "7533",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/s3/s3parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2030949"
},
{
"name": "JavaScript",
"bytes": "19162817"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "666"
},
{
"name": "Python",
"bytes": "28358306"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4846"
},
{
"name": "XSLT",
"bytes": "2644035"
}
],
"symlink_target": ""
} |
import random
import mock
import txamqp.client
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet.task import LoopingCall
from twisted.trial import unittest
from gtxamqp.factory import AmqpReconnectingFactory
from tests import utils
class ChannelClosedTests(unittest.TestCase):
disconnecting_period = 2
def setUp(self):
self.fetched_counter = 0
self.published_counter = 0
self.total_messages_to_send = 500
self.tx = AmqpReconnectingFactory(**utils.generate_factory_config())
self.client = self.tx.get_client(**utils.generate_client_config())
self.exception_raised = Deferred()
self.consumer = LoopingCall(self.get_message_callback_on_exception, d=self.exception_raised)
self.consumer.start(0.01)
@inlineCallbacks
def get_message_callback_on_exception(self, d):
try:
yield self.client.basic_get()
except Exception as e:
if not d.called:
d.callback(e)
@inlineCallbacks
def test_reconnect_on_channel_closed(self):
# mock the retry function and wait for it to get called
# it should get called only if an exception was raised
self.tx.retry = mock.MagicMock()
reconnection_occurred = Deferred()
def retry_called(*x):
AmqpReconnectingFactory.retry(self.tx)
if self.exception_raised.called:
reconnection_occurred.callback(None)
self.tx.retry.side_effect = retry_called
# when ack'ing a message that doesn't exist
yield self.client.basic_ack(random.randint(2, 50))
exc = yield self.exception_raised
fmt= "expected exception {} to be raised, got: {}"
self.assertIs(type(exc), txamqp.client.ChannelClosed,
fmt.format(txamqp.client.ChannelClosed, type(exc)))
yield reconnection_occurred
@inlineCallbacks
def tearDown(self):
yield self.tx.teardown()
stopper = self.consumer.deferred
self.consumer.stop()
yield stopper
| {
"content_hash": "52f245301e599ba676a5f3be1c9e9f36",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 100,
"avg_line_length": 33.74193548387097,
"alnum_prop": 0.6630019120458891,
"repo_name": "devsenexx/gtxamqp",
"id": "dd3cbf7ea5abdde4a48326d560074f9b8773870a",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_channel_closed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75581"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import shutil
import tempfile
import sys
import re
from io import StringIO
import pytest
from cwltool.main import main
import cwltool.process
from .util import get_data, needs_docker, temp_dir, windows_needs_docker
@needs_docker
def test_missing_enable_ext():
# Require that --enable-ext is provided.
assert main([get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]) != 0
@needs_docker
def test_listing_deep():
params = ["--enable-ext", get_data('tests/wf/listing_deep.cwl'),
get_data('tests/listing-job.yml')]
assert main(params) == 0
@needs_docker
def test_listing_shallow():
# This fails on purpose, because it tries to access listing in a subdirectory
# the same way that listing_deep does, but it shouldn't be expanded.
params = ["--enable-ext", get_data('tests/wf/listing_shallow.cwl'),
get_data('tests/listing-job.yml')]
assert main(params) != 0
@needs_docker
def test_listing_none():
# This fails on purpose, because it tries to access listing but it shouldn't be there.
params = ["--enable-ext", get_data('tests/wf/listing_none.cwl'),
get_data('tests/listing-job.yml')]
assert main(params) != 0
@needs_docker
def test_listing_v1_0():
# Default behavior in 1.0 is deep expansion.
assert main([get_data('tests/wf/listing_v1_0.cwl'), get_data('tests/listing-job.yml')]) == 0
@pytest.mark.skip(reason="This is not the default behaviour yet")
@needs_docker
def test_listing_v1_1():
# Default behavior in 1.1 will be no expansion
assert main([get_data('tests/wf/listing_v1_1.cwl'), get_data('tests/listing-job.yml')]) != 0
@needs_docker
def test_double_overwrite(tmpdir):
with temp_dir() as tmp:
tmp_name = os.path.join(tmp, "value")
before_value, expected_value = "1", "3"
with open(tmp_name, "w") as f:
f.write(before_value)
assert main(["--enable-ext", "--outdir", str(tmpdir),
get_data('tests/wf/mut2.cwl'), "-a", tmp_name]) == 0
with open(tmp_name, "r") as f:
actual_value = f.read()
assert actual_value == expected_value
@needs_docker
def test_disable_file_overwrite_without_ext():
with temp_dir() as tmp:
with temp_dir() as out:
tmp_name = os.path.join(tmp, "value")
out_name = os.path.join(out, "value")
before_value, expected_value = "1", "2"
with open(tmp_name, "w") as f:
f.write(before_value)
assert main(["--outdir", out, get_data('tests/wf/updateval.cwl'), "-r", tmp_name]) == 0
with open(tmp_name, "r") as f:
tmp_value = f.read()
with open(out_name, "r") as f:
out_value = f.read()
assert tmp_value == before_value
assert out_value == expected_value
@needs_docker
def test_disable_dir_overwrite_without_ext():
with temp_dir() as tmp:
with temp_dir() as out:
assert main(["--outdir", out, get_data('tests/wf/updatedir.cwl'), "-r", tmp]) == 0
assert not os.listdir(tmp)
assert os.listdir(out)
@needs_docker
def test_disable_file_creation_in_outdir_with_ext():
with temp_dir() as tmp:
with temp_dir() as out:
tmp_name = os.path.join(tmp, "value")
out_name = os.path.join(out, "value")
before_value, expected_value = "1", "2"
with open(tmp_name, "w") as f:
f.write(before_value)
params = ["--enable-ext", "--leave-outputs", "--outdir",
out, get_data('tests/wf/updateval_inplace.cwl'), "-r", tmp_name]
assert main(params) == 0
with open(tmp_name, "r") as f:
tmp_value = f.read()
assert tmp_value == expected_value
assert not os.path.exists(out_name)
@needs_docker
def test_disable_dir_creation_in_outdir_with_ext():
with temp_dir() as tmp:
with temp_dir() as out:
params = ["--enable-ext", "--leave-outputs", "--outdir",
out, get_data('tests/wf/updatedir_inplace.cwl'), "-r", tmp]
assert main(params) == 0
assert os.listdir(tmp)
assert not os.listdir(out)
@needs_docker
def test_write_write_conflict():
with temp_dir('tmp') as tmp:
tmp_name = os.path.join(tmp, "value")
before_value, expected_value = "1", "2"
with open(tmp_name, "w") as f:
f.write(before_value)
assert main(["--enable-ext", get_data('tests/wf/mut.cwl'), "-a", tmp_name]) != 0
with open(tmp_name, "r") as f:
tmp_value = f.read()
assert tmp_value == expected_value
@pytest.mark.skip(reason="This test is non-deterministic")
def test_read_write_conflict():
with temp_dir('tmp') as tmp:
tmp_name = os.path.join(tmp, "value")
with open(tmp_name, "w") as f:
f.write("1")
assert main(["--enable-ext", get_data('tests/wf/mut3.cwl'), "-a", tmp_name]) != 0
@needs_docker
def test_require_prefix_networkaccess():
assert main(["--enable-ext", get_data('tests/wf/networkaccess.cwl')]) == 0
assert main([get_data('tests/wf/networkaccess.cwl')]) != 0
assert main(["--enable-ext", get_data('tests/wf/networkaccess-fail.cwl')]) != 0
@needs_docker
def test_require_prefix_workreuse(tmpdir):
assert main(["--enable-ext", '--outdir', str(tmpdir), get_data('tests/wf/workreuse.cwl')]) == 0
assert main([get_data('tests/wf/workreuse.cwl')]) != 0
assert main(["--enable-ext", get_data('tests/wf/workreuse-fail.cwl')]) != 0
@windows_needs_docker
def test_require_prefix_timelimit():
assert main(["--enable-ext", get_data('tests/wf/timelimit.cwl')]) == 0
assert main([get_data('tests/wf/timelimit.cwl')]) != 0
assert main(["--enable-ext", get_data('tests/wf/timelimit-fail.cwl')]) != 0
def test_warn_large_inputs():
was = cwltool.process.FILE_COUNT_WARNING
try:
stream = StringIO()
cwltool.process.FILE_COUNT_WARNING = 3
main([get_data('tests/wf/listing_v1_0.cwl'), get_data('tests/listing2-job.yml')],
stderr=stream)
assert "Recursive directory listing has resulted in a large number of File objects" in re.sub("\n *", " ", stream.getvalue())
finally:
cwltool.process.FILE_COUNT_WARNING = was
| {
"content_hash": "4e113d39dd1d4b433e90117177de6626",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 134,
"avg_line_length": 32.964285714285715,
"alnum_prop": 0.5988237114997679,
"repo_name": "dleehr/cwltool",
"id": "58ab99f9ad343a9e43f29553e90c3e538ec7b82e",
"size": "6461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ext.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "363"
},
{
"name": "Common Workflow Language",
"bytes": "227670"
},
{
"name": "Dockerfile",
"bytes": "791"
},
{
"name": "Java",
"bytes": "144"
},
{
"name": "JavaScript",
"bytes": "197079"
},
{
"name": "Makefile",
"bytes": "20194"
},
{
"name": "Python",
"bytes": "1650756"
},
{
"name": "Shell",
"bytes": "15892"
},
{
"name": "Tcl",
"bytes": "462"
}
],
"symlink_target": ""
} |
"""Functions to compute local metrics.
To compute the metrics for a set of sequences, the statistics are first computed
for each sequence, these are then summed and normalized.
Sequences are specified by (gt_id_subset, pr_id_subset, similarity):
gt_id_subset: List of integer arrays of groundtruth tracks in each frame.
pr_id_subset: List of integer arrays of predicted tracks in each frame.
similarity: List of 2D float arrays that give similarity between tracks.
The value `similarity[t][i, j]` gives the overlap of tracks `gt_id_subset[t][i]`
and `pr_id_subset[t][j]` in frame `t`.
IDs must be integers but do not need to be consecutive from zero.
"""
__all__ = [
'StatsEvaluator',
'local_stats',
'normalize',
'normalize_diagnostics',
]
import collections
import itertools
from . import horizon_util
from . import util
import numpy as np
import pandas as pd
NAMES = {
'ada': 'DetF1',
'ata': 'ATA',
'ata_assoc': 'ATA / DetF1',
'atr': 'Track Recall',
'atp': 'Track Prec',
'mota': 'MOTA',
'idf1': 'IDF1',
'idf1_assoc': 'IDF1 / DetF1',
'recall': 'Det Recall',
'precision': 'Det Prec',
'det_f1': 'Det F1',
'num_switches': 'Switches',
}
FIELDS_STATS = [
'gt_num_tracks',
'pr_num_tracks',
'gt_num_is_present',
'pr_num_is_present',
'num_frames',
'track_tp',
'idtp',
]
FIELDS_METRICS = [
'ata', 'atr', 'atp',
'idf1', 'idr', 'idp',
]
def strict_stats(
num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=0.5,
with_diagnostics=True):
"""Returns pd.Series of stats for strict metrics (infinite horizon)."""
evaluator = StatsEvaluator(
num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=similarity_threshold,
with_diagnostics=with_diagnostics)
return evaluator.strict()
def local_stats(
num_frames, gt_id_subset, pr_id_subset, similarity, horizons,
time_scale=1,
similarity_threshold=0.5,
with_diagnostics=True):
"""Returns pd.DataFrame indexed by horizons in given time-scale."""
frame_horizons = horizon_util.int_frame_horizon(
horizons, num_frames, time_scale)
stats = local_stats_at_int_horizons(
num_frames, gt_id_subset, pr_id_subset, similarity, frame_horizons,
similarity_threshold=similarity_threshold,
with_diagnostics=with_diagnostics)
return stats.loc[frame_horizons].set_index(pd.Index(horizons, name='horizon'))
def local_stats_at_int_horizons(
num_frames, gt_id_subset, pr_id_subset, similarity, horizons,
similarity_threshold=0.5,
with_diagnostics=True):
"""Returns pd.DataFrame indexed by unique integer horizons."""
horizons = np.atleast_1d(np.asarray(horizons))
assert np.all(horizons >= 0)
assert np.all(horizons < np.inf)
assert np.all(horizons == horizons.astype(int))
horizons = horizons.astype(int)
horizons = np.unique(horizons)
evaluator = StatsEvaluator(
num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=similarity_threshold,
with_diagnostics=with_diagnostics)
stats = [evaluator.local(h) for h in horizons]
return pd.DataFrame(stats, index=pd.Index(horizons, name='horizon'))
class StatsEvaluator:
"""Computes stats for local metrics efficiently at arbitrary horizons."""
def __init__(self, num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=0.5, with_diagnostics=True):
self.num_frames = num_frames
self.with_diagnostics = with_diagnostics
# Obtain indicators for presence, overlap, match.
(gt_is_present, pr_is_present,
overlap_pairs, overlap_occurs,
match_pairs, match_occurs) = _indicator_arrays(
num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=similarity_threshold,
with_diagnostics=self.with_diagnostics)
# Obtain indicators that are sufficient for computation of stats.
overlap_indicators = _overlap_indicators(
gt_is_present, pr_is_present, overlap_pairs, overlap_occurs)
# Pre-compute cumulative sums of indicators for use by all horizons.
cumsum_gt_is_present = _cumsum_with_zero(gt_is_present, axis=-1)
cumsum_pr_is_present = _cumsum_with_zero(pr_is_present, axis=-1)
overlap_cumsums = {k: _cumsum_with_zero(indicator, axis=-1)
for k, indicator in overlap_indicators.items()}
if self.with_diagnostics:
match_indicators = _match_indicators(
gt_is_present, pr_is_present, match_pairs, match_occurs)
match_cumsums = {k: _cumsum_with_zero(indicator, axis=-1)
for k, indicator in match_indicators.items()}
self.gt_is_present = gt_is_present
self.pr_is_present = pr_is_present
self.cumsum_gt_is_present = cumsum_gt_is_present
self.cumsum_pr_is_present = cumsum_pr_is_present
self.overlap_pairs = overlap_pairs
self.overlap_indicators = overlap_indicators
self.overlap_cumsums = overlap_cumsums
if self.with_diagnostics:
self.match_pairs = match_pairs
self.match_indicators = match_indicators
self.match_cumsums = match_cumsums
def strict(self):
"""Computes stats for strict."""
# Compute metrics for full sequence: take sum over time axis.
gt_num_is_present = self.gt_is_present.sum(axis=-1)
pr_num_is_present = self.pr_is_present.sum(axis=-1)
overlap_counts = {k: v.sum(axis=-1)
for k, v in self.overlap_indicators.items()}
stats = _stats_from_overlap_counts(
num_frames=self.num_frames,
gt_num_is_present=gt_num_is_present,
pr_num_is_present=pr_num_is_present,
overlap_pairs=self.overlap_pairs,
**{'overlap_num_' + k: v for k, v in overlap_counts.items()})
if self.with_diagnostics:
# Compute diagnostics for full sequence: take sum over time axis.
match_counts = {k: v.sum(axis=-1)
for k, v in self.match_indicators.items()}
stats.update(_diagnostic_stats_from_overlap_counts(
gt_num_is_present=gt_num_is_present,
pr_num_is_present=pr_num_is_present,
match_pairs=self.match_pairs,
**{'match_num_' + k: v for k, v in match_counts.items()}))
stats = pd.Series(stats)
return stats
def local(self, horizon):
"""Returns stats for local metrics at finite horizon."""
interval_stats = [None for _ in range(self.num_frames)]
for t in range(self.num_frames):
a = max(t - horizon, 0)
b = min(t + horizon + 1, self.num_frames)
# Identify subset of tracks that are present in the interval.
gt_num_is_present = (self.cumsum_gt_is_present[:, b] -
self.cumsum_gt_is_present[:, a])
pr_num_is_present = (self.cumsum_pr_is_present[:, b] -
self.cumsum_pr_is_present[:, a])
gt_mask = (gt_num_is_present > 0)
pr_mask = (pr_num_is_present > 0)
# Indices required for re-indexing.
gt_subset, = gt_mask.nonzero()
pr_subset, = pr_mask.nonzero()
# Take sums over interval.
overlap_mask = np.logical_and(gt_mask[self.overlap_pairs[:, 0]],
pr_mask[self.overlap_pairs[:, 1]])
overlap_counts = {k: cumsum[overlap_mask, b] - cumsum[overlap_mask, a]
for k, cumsum in self.overlap_cumsums.items()}
# Compute metrics for this interval using overlap counts.
curr_interval_stats = _stats_from_overlap_counts(
num_frames=(b - a),
gt_num_is_present=gt_num_is_present[gt_mask],
pr_num_is_present=pr_num_is_present[pr_mask],
overlap_pairs=_reindex_pairs(gt_subset, pr_subset,
self.overlap_pairs[overlap_mask]),
**{'overlap_num_' + k: v for k, v in overlap_counts.items()})
if self.with_diagnostics:
# Take sums over interval.
match_mask = np.logical_and(gt_mask[self.match_pairs[:, 0]],
pr_mask[self.match_pairs[:, 1]])
match_counts = {k: cumsum[match_mask, b] - cumsum[match_mask, a]
for k, cumsum in self.match_cumsums.items()}
# Compute diagnostics for this interval using per-frame matching.
curr_interval_stats.update(_diagnostic_stats_from_overlap_counts(
gt_num_is_present=gt_num_is_present[gt_mask],
pr_num_is_present=pr_num_is_present[pr_mask],
match_pairs=_reindex_pairs(gt_subset, pr_subset,
self.match_pairs[match_mask]),
**{'match_num_' + k: v for k, v in match_counts.items()}))
interval_stats[t] = curr_interval_stats
# Take mean over all windows.
interval_stats = pd.DataFrame(interval_stats)
stats = interval_stats.sum(axis=0) / self.num_frames
return stats
def _indicator_dicts(num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=0.5, with_diagnostics=True):
"""Returns dicts of indicator time-series."""
gt_ids = sorted(set.union(*map(set, gt_id_subset)))
pr_ids = sorted(set.union(*map(set, pr_id_subset)))
# Construct indicator time-series for presence, overlap, per-frame match.
gt_is_present = {gt_id: np.zeros(num_frames, dtype=bool) for gt_id in gt_ids}
pr_is_present = {pr_id: np.zeros(num_frames, dtype=bool) for pr_id in pr_ids}
overlap_occurs = collections.defaultdict(
lambda: np.zeros(num_frames, dtype=bool))
match_occurs = collections.defaultdict(
lambda: np.zeros(num_frames, dtype=bool))
for t in range(num_frames):
gt_curr_ids = gt_id_subset[t]
pr_curr_ids = pr_id_subset[t]
# Require that IDs do not appear twice in the same frame.
_assert_all_different(gt_curr_ids)
_assert_all_different(pr_curr_ids)
for gt_id in gt_curr_ids:
gt_is_present[gt_id][t] = 1
for pr_id in pr_curr_ids:
pr_is_present[pr_id][t] = 1
overlap_matrix = (similarity[t] >= similarity_threshold)
rs, cs = overlap_matrix.nonzero()
for r, c in zip(rs, cs):
gt_id, pr_id = gt_curr_ids[r], pr_curr_ids[c]
overlap_occurs[gt_id, pr_id][t] = 1
if with_diagnostics:
# Solve for independent, per-frame correspondence.
argmax = util.match_detections(overlap_matrix, similarity[t])
for r, c in argmax:
gt_id, pr_id = gt_curr_ids[r], pr_curr_ids[c]
match_occurs[gt_id, pr_id][t] = 1
# Use dict instead of defaultdict to raise KeyError for empty pairs.
overlap_occurs = dict(overlap_occurs)
match_occurs = dict(match_occurs)
return gt_is_present, pr_is_present, overlap_occurs, match_occurs
def _indicator_arrays(num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=0.5,
with_diagnostics=True):
"""Returns arrays containing indicator time-series."""
gt_is_present, pr_is_present, overlap_occurs, match_occurs = (
_indicator_dicts(num_frames, gt_id_subset, pr_id_subset, similarity,
similarity_threshold=similarity_threshold,
with_diagnostics=with_diagnostics))
gt_ids = sorted(gt_is_present.keys())
pr_ids = sorted(pr_is_present.keys())
overlap_pairs = sorted(overlap_occurs.keys())
match_pairs = sorted(match_occurs.keys())
gt_is_present = _stack_maybe_empty(
[gt_is_present[gt_id] for gt_id in gt_ids],
out=np.empty([len(gt_ids), num_frames], dtype=bool))
pr_is_present = _stack_maybe_empty(
[pr_is_present[pr_id] for pr_id in pr_ids],
out=np.empty([len(pr_ids), num_frames], dtype=bool))
overlap_occurs = _stack_maybe_empty(
[overlap_occurs[pair] for pair in overlap_pairs],
out=np.empty([len(overlap_pairs), num_frames], dtype=bool))
match_occurs = _stack_maybe_empty(
[match_occurs[pair] for pair in match_pairs],
out=np.empty([len(match_pairs), num_frames], dtype=bool))
# Replace IDs with zero-based integers.
overlap_pairs = _reindex_pairs(gt_ids, pr_ids, overlap_pairs)
match_pairs = _reindex_pairs(gt_ids, pr_ids, match_pairs)
return (gt_is_present, pr_is_present,
overlap_pairs, overlap_occurs,
match_pairs, match_occurs)
def _overlap_indicators(gt_is_present, pr_is_present,
overlap_pairs, overlap_occurs):
"""Returns indicators that are sufficient to describe track overlaps."""
# Ensure boolean type to use bitwise operators (&, |, ~).
gt_is_present = gt_is_present.astype(bool)
pr_is_present = pr_is_present.astype(bool)
overlap_occurs = overlap_occurs.astype(bool)
# Construct dict of indicator arrays required to compute metrics.
# Final axis of all arrays is time.
overlap_indicators = {}
overlap_indicators['occurs'] = overlap_occurs
overlap_indicators['either_is_present'] = np.logical_or(
gt_is_present[overlap_pairs[:, 0], :],
pr_is_present[overlap_pairs[:, 1], :])
return overlap_indicators
def _match_indicators(gt_is_present, pr_is_present,
match_pairs, match_occurs):
"""Returns indicators that are sufficient to describe track matches."""
num_gt, num_frames = gt_is_present.shape
num_pr, _ = pr_is_present.shape
match_occurs = match_occurs.astype(bool)
match_indicators = {}
match_indicators['occurs'] = match_occurs
# Add indicators that are based on per-frame matches.
gt_has_some_match = np.zeros([num_gt, num_frames], dtype=bool)
pr_has_some_match = np.zeros([num_pr, num_frames], dtype=bool)
for (gt_id, pr_id), pair_match_occurs in zip(match_pairs, match_occurs):
gt_has_some_match[gt_id] |= pair_match_occurs
pr_has_some_match[pr_id] |= pair_match_occurs
match_gt_is_present = gt_is_present[match_pairs[:, 0]]
match_pr_is_present = pr_is_present[match_pairs[:, 1]]
match_gt_is_alone = (match_gt_is_present & ~match_pr_is_present)
match_pr_is_alone = (match_pr_is_present & ~match_gt_is_present)
match_indicators['either_is_present'] = np.logical_or(
match_gt_is_present, match_pr_is_present)
match_indicators['gt_is_alone_with_match'] = (
match_gt_is_alone & gt_has_some_match[match_pairs[:, 0]])
match_indicators['gt_is_alone_sans_match'] = (
match_gt_is_alone & ~gt_has_some_match[match_pairs[:, 0]])
match_indicators['pr_is_alone_with_match'] = (
match_pr_is_alone & pr_has_some_match[match_pairs[:, 1]])
match_indicators['pr_is_alone_sans_match'] = (
match_pr_is_alone & ~pr_has_some_match[match_pairs[:, 1]])
return match_indicators
def _stats_from_overlap_counts(
num_frames,
gt_num_is_present,
pr_num_is_present,
overlap_pairs,
overlap_num_occurs,
overlap_num_either_is_present):
"""Obtains statistics for IDF1 and ATA given number of frames that overlap.
Args:
num_frames: Integer.
gt_num_is_present: Integer array of shape [num_gt].
pr_num_is_present: Integer array of shape [num_pr].
overlap_pairs: Integer array of (gt, pr) pairs with shape [num_pairs, 2].
Indices should be in [0, num_gt) and [0, num_pr) respectively.
overlap_num_occurs: Integer array of shape [num_pairs].
Number of frames where the pair of tracks satisfy overlap criterion.
overlap_num_either_is_present: Integer array of shape [num_pairs].
Number of frames where at least one track in the pair is present.
Returns:
Dict that maps field name to value.
"""
sums = {}
num_gt, = gt_num_is_present.shape
num_pr, = pr_num_is_present.shape
# Ensure counts are floats for division.
overlap_num_occurs = overlap_num_occurs.astype(np.float64)
overlap_num_either_is_present = (
overlap_num_either_is_present.astype(np.float64))
# Find correspondence for ATA.
overlap_pair_track_tp = overlap_num_occurs / overlap_num_either_is_present
track_tp_matrix = _make_dense([num_gt, num_pr],
(overlap_pairs[:, 0], overlap_pairs[:, 1]),
overlap_pair_track_tp)
argmax = util.solve_assignment(-track_tp_matrix, exclude_zero=True)
sums['track_tp'] = track_tp_matrix[argmax[:, 0], argmax[:, 1]].sum()
# Find correspondence for IDF1.
num_overlap_matrix = _make_dense([num_gt, num_pr],
(overlap_pairs[:, 0], overlap_pairs[:, 1]),
overlap_num_occurs)
argmax = util.solve_assignment(-num_overlap_matrix, exclude_zero=True)
sums['idtp'] = num_overlap_matrix[argmax[:, 0], argmax[:, 1]].sum()
sums['num_frames'] = num_frames
sums['gt_num_tracks'] = num_gt
sums['pr_num_tracks'] = num_pr
sums['gt_num_is_present'] = np.sum(gt_num_is_present)
sums['pr_num_is_present'] = np.sum(pr_num_is_present)
return sums
def _diagnostic_stats_from_overlap_counts(
gt_num_is_present,
pr_num_is_present,
match_pairs,
match_num_occurs,
match_num_either_is_present,
match_num_gt_is_alone_with_match,
match_num_pr_is_alone_with_match,
match_num_gt_is_alone_sans_match,
match_num_pr_is_alone_sans_match):
"""Obtains stats for diagnostics from independent per-frame correspondence.
Args:
gt_num_is_present: Integer array of shape [num_gt].
pr_num_is_present: Integer array of shape [num_pr].
match_pairs: Integer array of (gt, pr) pairs with shape [num_pairs, 2].
Indices should be in [0, num_gt) and [0, num_pr) respectively.
match_num_occurs: Integer array of shape [num_pairs].
Number of frames where the pair of tracks are matched.
match_num_either_is_present: Integer array of shape [num_pairs].
Number of frames where at least one track in the pair is present.
match_num_gt_is_alone_with_match: Integer array of shape [num_pairs].
Number of frames where gt is present and matched to a different pr.
match_num_pr_is_alone_with_match: Integer array of shape [num_pairs].
Number of frames where pr is present and matched to a different gt.
match_num_gt_is_alone_sans_match: Integer array of shape [num_pairs].
Number of frames where gt is present and not matched to any pr.
match_num_pr_is_alone_sans_match: Integer array of shape [num_pairs].
Number of frames where pr is present and not matched to any gt.
Returns:
Dict that maps field name to value.
"""
sums = {}
num_gt = len(gt_num_is_present)
num_pr = len(pr_num_is_present)
# Ensure all counts are floats for division.
gt_num_is_present = gt_num_is_present.astype(np.float64)
pr_num_is_present = pr_num_is_present.astype(np.float64)
match_num_occurs = match_num_occurs.astype(np.float64)
match_num_either_is_present = match_num_either_is_present.astype(np.float64)
match_num_gt_is_alone_with_match = (
match_num_gt_is_alone_with_match.astype(np.float64))
match_num_pr_is_alone_with_match = (
match_num_pr_is_alone_with_match.astype(np.float64))
match_num_gt_is_alone_sans_match = (
match_num_gt_is_alone_sans_match.astype(np.float64))
match_num_pr_is_alone_sans_match = (
match_num_pr_is_alone_sans_match.astype(np.float64))
sums['det_tp'] = np.sum(match_num_occurs)
# Find optimal track correspondence using match instead of overlap.
match_pair_approx_track_tp = match_num_occurs / match_num_either_is_present
approx_track_tp_matrix = _make_dense(
[num_gt, num_pr],
(match_pairs[:, 0], match_pairs[:, 1]),
match_pair_approx_track_tp)
opt_pairs = util.solve_assignment(-approx_track_tp_matrix, exclude_zero=True)
sums['track_tp_approx'] = (
approx_track_tp_matrix[opt_pairs[:, 0], opt_pairs[:, 1]].sum())
# Measure fraction of gt/pr track instead of fraction of union.
num_match_matrix = _make_dense([num_gt, num_pr],
(match_pairs[:, 0], match_pairs[:, 1]),
match_num_occurs)
gt_sum_match = num_match_matrix.sum(axis=1)
pr_sum_match = num_match_matrix.sum(axis=0)
gt_max_match = num_match_matrix.max(axis=1, initial=0)
pr_max_match = num_match_matrix.max(axis=0, initial=0)
sums['gt_frac_det'] = np.sum(gt_sum_match / gt_num_is_present)
sums['pr_frac_det'] = np.sum(pr_sum_match / pr_num_is_present)
sums['gt_frac_max'] = np.sum(gt_max_match / gt_num_is_present)
sums['pr_frac_max'] = np.sum(pr_max_match / pr_num_is_present)
opt_num_match = num_match_matrix[opt_pairs[:, 0], opt_pairs[:, 1]]
opt_num_gt_is_present = gt_num_is_present[opt_pairs[:, 0]]
opt_num_pr_is_present = pr_num_is_present[opt_pairs[:, 1]]
gt_opt_match = _make_dense([num_gt], opt_pairs[:, 0], opt_num_match)
pr_opt_match = _make_dense([num_pr], opt_pairs[:, 1], opt_num_match)
sums['gt_frac_opt'] = np.sum(gt_opt_match / gt_num_is_present)
sums['pr_frac_opt'] = np.sum(pr_opt_match / pr_num_is_present)
sums['track_fn_cover'] = np.sum(1 - gt_opt_match / gt_num_is_present)
sums['track_fp_cover'] = np.sum(1 - pr_opt_match / pr_num_is_present)
sums['track_fn_cover_det'] = np.sum(1 - gt_sum_match / gt_num_is_present)
sums['track_fp_cover_det'] = np.sum(1 - pr_sum_match / pr_num_is_present)
sums['track_fn_cover_ass'] = np.sum((gt_sum_match - gt_opt_match) /
gt_num_is_present)
sums['track_fp_cover_ass'] = np.sum((pr_sum_match - pr_opt_match) /
pr_num_is_present)
sums['track_fn_cover_ass_indep'] = np.sum((gt_sum_match - gt_max_match) /
gt_num_is_present)
sums['track_fp_cover_ass_indep'] = np.sum((pr_sum_match - pr_max_match) /
pr_num_is_present)
sums['track_fn_cover_ass_joint'] = np.sum((gt_max_match - gt_opt_match) /
gt_num_is_present)
sums['track_fp_cover_ass_joint'] = np.sum((pr_max_match - pr_opt_match) /
pr_num_is_present)
# Find `union` component.
pair_to_match_index = dict(zip(map(tuple, match_pairs), itertools.count()))
opt_to_match_index = np.array(
[pair_to_match_index[tuple(pair)] for pair in opt_pairs], dtype=int)
opt_num_either_is_present = match_num_either_is_present[opt_to_match_index]
opt_gt_acc_cover = opt_num_match / opt_num_gt_is_present
opt_pr_acc_cover = opt_num_match / opt_num_pr_is_present
opt_gt_err_union = opt_num_match * (1 / opt_num_gt_is_present -
1 / opt_num_either_is_present)
opt_pr_err_union = opt_num_match * (1 / opt_num_pr_is_present -
1 / opt_num_either_is_present)
sums['track_fn_union'] = np.sum(opt_gt_err_union)
sums['track_fp_union'] = np.sum(opt_pr_err_union)
# Decomposition of `union` into `union_det` and `union_ass`.
opt_num_gt_is_alone_with_match = (
match_num_gt_is_alone_with_match[opt_to_match_index])
opt_num_pr_is_alone_with_match = (
match_num_pr_is_alone_with_match[opt_to_match_index])
opt_num_gt_is_alone_sans_match = (
match_num_gt_is_alone_sans_match[opt_to_match_index])
opt_num_pr_is_alone_sans_match = (
match_num_pr_is_alone_sans_match[opt_to_match_index])
opt_gt_err_union_det = (
opt_gt_acc_cover * (opt_num_pr_is_alone_with_match /
opt_num_either_is_present))
opt_pr_err_union_det = (
opt_pr_acc_cover * (opt_num_gt_is_alone_with_match /
opt_num_either_is_present))
opt_gt_err_union_ass = (
opt_gt_acc_cover * (opt_num_pr_is_alone_sans_match /
opt_num_either_is_present))
opt_pr_err_union_ass = (
opt_pr_acc_cover * (opt_num_gt_is_alone_sans_match /
opt_num_either_is_present))
sums['track_fn_union_det'] = np.sum(opt_gt_err_union_det)
sums['track_fp_union_det'] = np.sum(opt_pr_err_union_det)
sums['track_fn_union_ass'] = np.sum(opt_gt_err_union_ass)
sums['track_fp_union_ass'] = np.sum(opt_pr_err_union_ass)
return sums
def normalize(stats):
"""Returns pd.DataFrame or pd.Series of metrics obtained from stats.
Includes diagnostic metrics if present in stats.
Args:
stats: pd.DataFrame or pd.Series.
"""
squeeze = False
if isinstance(stats, pd.Series):
# Create trivial DataFrame with single row.
stats = pd.DataFrame.from_records([stats])
squeeze = True
assert isinstance(stats, pd.DataFrame)
metrics = pd.DataFrame(index=stats.index)
metrics['ata'] = stats['track_tp'] / (0.5 * (
stats['gt_num_tracks'] + stats['pr_num_tracks']))
metrics['atr'] = stats['track_tp'] / stats['gt_num_tracks']
metrics['atp'] = stats['track_tp'] / stats['pr_num_tracks']
metrics['idf1'] = stats['idtp'] / (0.5 * (stats['gt_num_is_present'] +
stats['pr_num_is_present']))
metrics['idr'] = stats['idtp'] / stats['gt_num_is_present']
metrics['idp'] = stats['idtp'] / stats['pr_num_is_present']
# Compute normalized diagnostics if present.
if 'track_tp_approx' in stats:
metrics = metrics.join(normalize_diagnostics(stats))
if squeeze:
metrics = metrics.squeeze(axis=0)
return metrics
def normalize_diagnostics(stats):
"""Returns pd.DataFrame or pd.Series of diagnostic metrics from stats."""
squeeze = False
if isinstance(stats, pd.Series):
# Create trivial DataFrame with single row.
stats = pd.DataFrame.from_records([stats])
squeeze = True
assert isinstance(stats, pd.DataFrame)
metrics = pd.DataFrame(index=stats.index)
metrics['ata_approx'] = stats['track_tp_approx'] / (0.5 * (
stats['gt_num_tracks'] + stats['pr_num_tracks']))
metrics['atr_approx'] = stats['track_tp_approx'] / stats['gt_num_tracks']
metrics['atp_approx'] = stats['track_tp_approx'] / stats['pr_num_tracks']
error = pd.DataFrame(index=stats.index)
error['det_fn'] = stats['track_fn_cover_det'] + stats['track_fp_union_det']
error['det_fp'] = stats['track_fp_cover_det'] + stats['track_fn_union_det']
error['ass_split'] = (stats['track_fn_cover_ass_indep'] +
stats['track_fp_cover_ass_joint'] +
stats['track_fp_union_ass'])
error['ass_merge'] = (stats['track_fp_cover_ass_indep'] +
stats['track_fn_cover_ass_joint'] +
stats['track_fn_union_ass'])
error = error.div(stats['gt_num_tracks'] + stats['pr_num_tracks'], axis=0)
metrics = metrics.join(error.add_prefix('ata_error_'))
error_gt = stats[[
'track_fn_cover',
'track_fn_cover_det',
'track_fn_cover_ass',
'track_fn_cover_ass_indep',
'track_fn_cover_ass_joint',
'track_fn_union',
'track_fn_union_det',
'track_fn_union_ass',
]].div(stats['gt_num_tracks'], axis=0)
error_gt.columns = error_gt.columns.str.replace('^track_fn_', '', regex=True)
# Group by cause of error.
cause_gt = pd.DataFrame({
'det_fn': error_gt['cover_det'],
'det_fp': error_gt['union_det'],
'ass_split': error_gt['cover_ass_indep'],
'ass_merge': error_gt['cover_ass_joint'] + error_gt['union_ass'],
})
metrics = metrics.join(error_gt.add_prefix('atr_error_'))
metrics = metrics.join(cause_gt.add_prefix('atr_error_'))
error_pr = stats[[
'track_fp_cover',
'track_fp_cover_det',
'track_fp_cover_ass',
'track_fp_cover_ass_indep',
'track_fp_cover_ass_joint',
'track_fp_union',
'track_fp_union_det',
'track_fp_union_ass',
]].div(stats['pr_num_tracks'], axis=0)
error_pr.columns = error_pr.columns.str.replace('^track_fp_', '', regex=True)
# Group by cause of error. (Swap FN/FP and split/merge.)
cause_pr = pd.DataFrame({
'det_fp': error_pr['cover_det'],
'det_fn': error_pr['union_det'],
'ass_merge': error_pr['cover_ass_indep'],
'ass_split': error_pr['cover_ass_joint'] + error_pr['union_ass'],
})
metrics = metrics.join(error_pr.add_prefix('atp_error_'))
metrics = metrics.join(cause_pr.add_prefix('atp_error_'))
if squeeze:
metrics = metrics.squeeze(axis=0)
return metrics
def _reindex_pairs(gt_ids, pr_ids, pairs):
"""Re-indexes subsets of integers as consecutive integers from 0."""
if not len(pairs): # pylint: disable=g-explicit-length-test
return np.empty([0, 2], dtype=int)
gt_map = dict(zip(gt_ids, itertools.count()))
pr_map = dict(zip(pr_ids, itertools.count()))
# Will raise KeyError if id was not present.
return np.array([(gt_map[gt_id], pr_map[pr_id]) for gt_id, pr_id in pairs],
dtype=int)
def _cumsum_with_zero(x, axis):
"""Like np.cumsum() but adds a zero at the start."""
x = np.asarray(x)
zero_shape = list(x.shape)
zero_shape[axis] = 1
s = np.cumsum(x, axis=axis)
return np.concatenate([np.zeros(zero_shape, s.dtype), s], axis=axis)
def _make_dense(shape, keys, values, default=0, dtype=None):
"""Creates a dense matrix from (i, j) keys and scalar values."""
if dtype is None:
values = np.asarray(values)
dtype = values.dtype
x = np.full(shape, default, dtype=dtype)
x[keys] = values
return x
def _assert_all_different(xs):
if len(xs) != len(set(xs)):
raise ValueError('elements are not all different', xs)
def _stack_maybe_empty(elems, axis=0, out=None):
"""Like np.stack() but permits elems to be empty if out is empty."""
if elems:
return np.stack(elems, axis=axis, out=out)
else:
assert np.size(out) == 0, 'output is not empty'
return out
| {
"content_hash": "e5138f0026547fbeb3c6a7d621bee707",
"timestamp": "",
"source": "github",
"line_count": 697,
"max_line_length": 80,
"avg_line_length": 42.12338593974175,
"alnum_prop": 0.6396117166212534,
"repo_name": "google-research/localmot",
"id": "da1dcac55b359dc68c23415bf2fd5e3cde4fe6df",
"size": "29936",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "localmot/metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97600"
}
],
"symlink_target": ""
} |
from pandac.PandaModules import Point3, VBase4
from direct.fsm.FSM import FSM
from direct.interval.IntervalGlobal import Sequence, Parallel, ActorInterval, Func, Wait, ParticleInterval, Track, LerpColorScaleInterval, LerpScaleInterval, LerpHprInterval
from direct.task.Task import Task
from toontown.battle import BattleParticles
from toontown.battle import MovieUtil
from toontown.minigame.MazeSuit import MazeSuit
from CogdoMazeGameObjects import CogdoMazeSplattable
import CogdoMazeGameGlobals as Globals
import random
class CogdoMazeSuit(MazeSuit, FSM, CogdoMazeSplattable):
GagHitEventName = 'CogdoMazeSuit_GagHit'
DeathEventName = 'CogdoMazeSuit_Death'
ThinkEventName = 'CogdoMazeSuit_Think'
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile, cogdoSuitType, walkAnimName = None):
data = Globals.SuitData[cogdoSuitType]
MazeSuit.__init__(self, serialNum, maze, randomNumGen, data['cellWalkPeriod'], difficulty, data['dnaName'], startTile=startTile, walkSameDirectionProb=Globals.SuitWalkSameDirectionProb, walkTurnAroundProb=Globals.SuitWalkTurnAroundProb, uniqueRandomNumGen=False, walkAnimName=walkAnimName)
FSM.__init__(self, 'CogdoMazeSuit')
CogdoMazeSplattable.__init__(self, self.suit, '%s-%i' % (Globals.SuitCollisionName, self.serialNum), 1.5)
if data.has_key('scale'):
self.suit.setScale(data['scale'])
self.hp = data['hp']
self.type = cogdoSuitType
self.memos = data['memos']
self.deathSuit = self.suit.getLoseActor()
self.deathSuit.pose('lose', 0)
BattleParticles.loadParticles()
self._initSfx()
def _initSfx(self):
audioMgr = base.cogdoGameAudioMgr
self._deathSoundIval = Sequence(audioMgr.createSfxIval('cogSpin', duration=1.6, startTime=0.6, volume=0.8, source=self.deathSuit), audioMgr.createSfxIval('cogDeath', volume=0.32, source=self.deathSuit))
def _destroySfx(self):
if self._deathSoundIval.isPlaying():
self._deathSoundIval.finish()
del self._deathSoundIval
def destroy(self):
BattleParticles.unloadParticles()
self.ignoreAll()
self._destroySfx()
CogdoMazeSplattable.destroy(self)
MazeSuit.destroy(self)
def handleEnterSphere(self, collEntry):
messenger.send(self.COLLISION_EVENT_NAME, [self.type, self.serialNum])
def gameStart(self, gameStartTime):
MazeSuit.gameStart(self, gameStartTime)
self.accept(Globals.GagCollisionName + '-into-' + self.gagCollisionName, self.handleGagHit)
messenger.send(self.ThinkEventName, [self, self.TX, self.TY])
def initCollisions(self):
MazeSuit.initCollisions(self)
self.collNodePath.setScale(0.75)
self.accept(self.uniqueName('again' + self.COLL_SPHERE_NAME), self.handleEnterSphere)
def think(self, curTic, curT, unwalkables):
MazeSuit.think(self, curTic, curT, unwalkables)
messenger.send(self.ThinkEventName, [self, self.TX, self.TY])
def handleGagHit(self, collEntry):
gagNodePath = collEntry.getFromNodePath().getParent()
messenger.send(self.GagHitEventName, [self.type, self.serialNum, gagNodePath])
def _getSuitAnimationIval(self, animName, startFrame = 0, duration = 1, partName = None, nextState = None):
totalFrames = self.suit.getNumFrames(animName)
frames = totalFrames - 1 - startFrame
frameRate = self.suit.getFrameRate(animName)
newRate = frames / duration
playRate = newRate / frameRate
ival = Sequence(ActorInterval(self.suit, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate, partName=partName))
if nextState is not None:
def done():
self.request(nextState)
ival.append(Func(done))
return ival
def hitByGag(self):
self.hp = self.hp - 1
self.doSplat()
if self.hp <= 0:
self.explode()
def explode(self):
self.doDeathTrack()
messenger.send(self.DeathEventName, [self.type, self.serialNum])
def doDeathTrack(self):
def removeDeathSuit(suit, deathSuit):
if not deathSuit.isEmpty():
deathSuit.detachNode()
suit.cleanupLoseActor()
self.deathSuit.reparentTo(self.suit.getParent())
self.deathSuit.setScale(self.suit.getScale())
self.deathSuit.setPos(render, self.suit.getPos(render))
self.deathSuit.setHpr(render, self.suit.getHpr(render))
self.suit.hide()
self.collNodePath.reparentTo(self.deathSuit)
gearPoint = Point3(0, 0, self.suit.height / 2.0 + 2.0)
smallGears = BattleParticles.createParticleEffect(file='gearExplosionSmall')
singleGear = BattleParticles.createParticleEffect('GearExplosion', numParticles=1)
smallGearExplosion = BattleParticles.createParticleEffect('GearExplosion', numParticles=10)
bigGearExplosion = BattleParticles.createParticleEffect('BigGearExplosion', numParticles=30)
smallGears.setPos(gearPoint)
singleGear.setPos(gearPoint)
smallGearExplosion.setPos(gearPoint)
bigGearExplosion.setPos(gearPoint)
smallGears.setDepthWrite(False)
singleGear.setDepthWrite(False)
smallGearExplosion.setDepthWrite(False)
bigGearExplosion.setDepthWrite(False)
suitTrack = Sequence(Func(self.collNodePath.stash), ActorInterval(self.deathSuit, 'lose', startFrame=80, endFrame=140), Func(removeDeathSuit, self.suit, self.deathSuit, name='remove-death-suit'))
explosionTrack = Sequence(Wait(1.5), MovieUtil.createKapowExplosionTrack(self.deathSuit, explosionPoint=gearPoint))
gears1Track = Sequence(ParticleInterval(smallGears, self.deathSuit, worldRelative=0, duration=4.3, cleanup=True), name='gears1Track')
gears2MTrack = Track((0.0, explosionTrack), (0.7, ParticleInterval(singleGear, self.deathSuit, worldRelative=0, duration=5.7, cleanup=True)), (5.2, ParticleInterval(smallGearExplosion, self.deathSuit, worldRelative=0, duration=1.2, cleanup=True)), (5.4, ParticleInterval(bigGearExplosion, self.deathSuit, worldRelative=0, duration=1.0, cleanup=True)), name='gears2MTrack')
def removeParticle(particle):
if particle and hasattr(particle, 'renderParent'):
particle.cleanup()
del particle
removeParticles = Sequence(Func(removeParticle, smallGears), Func(removeParticle, singleGear), Func(removeParticle, smallGearExplosion), Func(removeParticle, bigGearExplosion))
self.deathTrack = Sequence(Parallel(suitTrack, gears2MTrack, gears1Track, self._deathSoundIval), removeParticles)
self.deathTrack.start()
class CogdoMazeSlowMinionSuit(CogdoMazeSuit):
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile = None):
CogdoMazeSuit.__init__(self, serialNum, maze, randomNumGen, difficulty, startTile, Globals.SuitTypes.SlowMinion)
self.defaultTransitions = {'Off': ['Normal'],
'Normal': ['Attack', 'Off'],
'Attack': ['Normal']}
def gameStart(self, gameStartTime):
CogdoMazeSuit.gameStart(self, gameStartTime)
self.request('Normal')
def enterNormal(self):
self.startWalkAnim()
def exitNormal(self):
pass
def enterAttack(self, elapsedTime):
self._attackIval = self._getSuitAnimationIval('finger-wag', duration=2.0, nextState='Normal')
self._attackIval.start(elapsedTime)
def filterAttack(self, request, args):
if request == 'Attack':
return None
else:
return self.defaultFilter(request, args)
return None
def exitAttack(self):
self._attackIval.pause()
del self._attackIval
class CogdoMazeFastMinionSuit(CogdoMazeSuit):
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile = None):
CogdoMazeSuit.__init__(self, serialNum, maze, randomNumGen, difficulty, startTile, Globals.SuitTypes.FastMinion)
class CogdoMazeBossSuit(CogdoMazeSuit):
BlinkTaskName = 'CogdoMazeBossBlinkTask'
ShakeTaskName = 'CogdoMazeBossShakeTask'
StartWalkTaskName = 'CogdoMazeBossStartWalkTask'
ShakeEventName = 'CogdoMazeSuitShake'
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile = None):
CogdoMazeSuit.__init__(self, serialNum, maze, randomNumGen, difficulty, startTile, Globals.SuitTypes.Boss, walkAnimName='stomp')
self.dropTimer = 0
self._walkSpeed = float(self.maze.cellWidth) / self.cellWalkDuration * 0.5
def _initSfx(self):
CogdoMazeSuit._initSfx(self)
audioMgr = base.cogdoGameAudioMgr
self._stompSfxIval = audioMgr.createSfxIval('cogStomp', source=self.suit, cutoff=Globals.BossStompSfxCutoff, volume=0.3)
self._hitSfx = audioMgr.createSfx('bossCogAngry', self.suit)
def _destroySfx(self):
del self._hitSfx
if self._stompSfxIval.isPlaying():
self._stompSfxIval.finish()
del self._stompSfxIval
CogdoMazeSuit._destroySfx(self)
def spin(self):
part = self.suit
time = Globals.BossSpinTime
degrees = 360 * Globals.BossSpinCount
spinIval = LerpHprInterval(part, time, (self.suit.getH() + degrees, 0, 0), blendType='easeOut')
spinIval.start()
def hitByGag(self):
if self.hp >= 2:
self._hitSfx.play()
self.spin()
self.suit.setColorScale(Globals.BlinkColor)
self.__startBlinkTask()
elif self.hp == 1:
self.__stopBlinkTask()
CogdoMazeSuit.hitByGag(self)
def gameStart(self, gameStartTime):
CogdoMazeSuit.gameStart(self, gameStartTime)
def startWalkAnim(self):
self.suit.loop(self._walkAnimName, fromFrame=43, toFrame=81)
self.suit.setPlayRate(self._walkSpeed * Globals.BossCogStompAnimationPlayrateFactor, self._walkAnimName)
self.__startShakeTask()
def destroy(self):
CogdoMazeSuit.destroy(self)
self.__stopShakeTask()
self.__stopBlinkTask()
def pickRandomValidSpot(self, r = 5):
validSpots = []
for x in xrange(self.TX - r, self.TX + r):
for y in xrange(self.TY - r, self.TY + r):
if self.maze.isWalkable(x, y):
validSpots.append([x, y])
return self.rng.choice(validSpots)
def __startShakeTask(self):
self.__stopShakeTask()
taskMgr.doMethodLater(Globals.BossShakeTime, self.__shake, self.uniqueName(CogdoMazeBossSuit.ShakeTaskName))
self.bossShakeLastTime = 0
def __stopShakeTask(self):
taskMgr.remove(self.uniqueName(CogdoMazeBossSuit.ShakeTaskName))
def __shake(self, task):
if task.time - self.bossShakeLastTime > Globals.BossShakeTime:
self.suit.setPlayRate(self._walkSpeed * Globals.BossCogStompAnimationPlayrateFactor, self._walkAnimName)
self._stompSfxIval.start()
messenger.send(self.ShakeEventName, [self, Globals.BossShakeStrength])
self.bossShakeLastTime = task.time
return task.cont
def __startBlinkTask(self):
self.__stopBlinkTask()
taskMgr.doMethodLater(Globals.BlinkFrequency, self.__blink, CogdoMazeBossSuit.BlinkTaskName)
def __stopBlinkTask(self):
taskMgr.remove(CogdoMazeBossSuit.BlinkTaskName)
def __blink(self, task):
blink = Sequence(LerpColorScaleInterval(self.suit, Globals.BlinkSpeed, VBase4(1.0, 1.0, 1.0, 1.0)), LerpColorScaleInterval(self.suit, Globals.BlinkSpeed, Globals.BlinkColor))
blink.start()
return Task.again
| {
"content_hash": "1430eb1ca9dea03ba0acab1c2160916f",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 380,
"avg_line_length": 44.49242424242424,
"alnum_prop": 0.6877234803337307,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "9b5edbfc737bfe0f915819d6113814a3d6a65f30",
"size": "11746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/cogdominium/CogdoMazeSuits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.use_deterministic_cudnn
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, expected_output_shape=None,
validate_training=True, adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = keras.models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.experimental_run_tf_function = None
_thread_local_data.saved_model_format = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def experimental_run_tf_function_scope(value):
"""Provides a scope within which we compile models to run with distribution.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models with default distribution
in the active test. Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.experimental_run_tf_function
try:
_thread_local_data.experimental_run_tf_function = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.experimental_run_tf_function = previous_value
def should_run_tf_function():
"""Returns whether the models we are testing should be run distributed."""
if _thread_local_data.experimental_run_tf_function is None:
raise ValueError(
'Cannot call `should_run_tf_function()` outside of a '
'`experimental_run_tf_function_scope()` or `run_all_keras_modes` '
'decorator.')
return (_thread_local_data.experimental_run_tf_function and
context.executing_eagerly())
@tf_contextlib.contextmanager
def saved_model_format_scope(value):
"""Provides a scope within which the savde model format to test is `value`.
The saved model format gets restored to its original value upon exiting the
scope.
Arguments:
value: saved model format value
Yields:
The provided value.
"""
previous_value = _thread_local_data.saved_model_format
try:
_thread_local_data.saved_model_format = value
yield value
finally:
# Restore saved model format to initial value.
_thread_local_data.saved_model_format = previous_value
def get_saved_model_format():
"""Gets the saved model format that should be tested."""
if _thread_local_data.saved_model_format is None:
raise ValueError(
'Cannot call `get_saved_model_format()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.saved_model_format
def get_save_format():
if _thread_local_data.saved_model_format is None:
raise ValueError(
'Cannot call `get_saved_model_format()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.saved_model_format
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers, *args, **kwargs):
"""Instantiate a model.
Args:
layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of
input_tensor -> the input tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func, *args, **kwargs):
super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs)
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers,
input_shape=None,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
"""Builds a model from a sequence of layers.
Args:
layers: The layers used to build the network.
input_shape: Shape tuple of the input or 'TensorShape' instance.
input_dtype: Datatype of the input.
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
Returns:
A Keras model.
"""
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
return _SubclassModel(layers, name=name, input_tensor=inputs)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func, name=name)
if model_type == 'sequential':
model = keras.models.Sequential(name=name)
if input_shape:
model.add(
keras.layers.InputLayer(
input_shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs, name=name)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return tf_decorator.make_decorator(fn, wrapper)
| {
"content_hash": "bc623aacd7b7566cbf35ae3a5e6dd608",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 88,
"avg_line_length": 34.88978185993111,
"alnum_prop": 0.6671163907993024,
"repo_name": "ppwwyyxx/tensorflow",
"id": "e4c2406399f40e13cd887d3cd64fc2cc075c962f",
"size": "31078",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/testing_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45318"
},
{
"name": "C",
"bytes": "796611"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76521274"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952883"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1254789"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297774"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38709528"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7469"
},
{
"name": "Shell",
"bytes": "643731"
},
{
"name": "Smarty",
"bytes": "34743"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
from threading import Lock
import numpy as np
import cv2
import time
class FaceDetector():
def __init__(self, mainObj, detector, queue = None, extrap = 0):
self.faceLock = Lock()
self.mainObj = mainObj
self.quit = False
self.detector = detector
self.faces = []
self.extrap = extrap
self.queue = queue
self.lastTime = 0
def __call__(self):
self.faceLoop()
print 'Detector exiting'
def faceLoop(self):
while not self.quit:
if self.queue is not None:
with self.queue:
self.queue.wait()
frame = self.mainObj.getCopyOfFrame()
startTime = time.clock()
newFaces = self.detector(frame)
(maxw, maxh) = self.mainObj.getWindowSize()
for index, (x, y, w, h) in enumerate(newFaces):
extraw = self.extrap * w
extrah = self.extrap * h
left = np.int0(max(x - extraw, 0))
right = np.int0(min(x + w + extraw, maxw))
top = np.int0(max(y - extrah, 0))
bottom = np.int0(min(y + h + extrah, maxh))
newFaces[index] = (left, top, right - left, bottom - top)
with self.faceLock:
self.faces += list(newFaces)
self.lastTime = time.clock() - startTime
def processFaces(self, process):
with self.faceLock:
self.faces = process(self.faces)
def stop(self):
self.quit = True
| {
"content_hash": "f96a4fd8b652e0c2a217bf69fc3e7d4c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 32.851063829787236,
"alnum_prop": 0.5272020725388601,
"repo_name": "lehtolav/distributed-face-recognition",
"id": "aee7b2a676930054f18b0e1e115efdf46bd2791b",
"size": "1829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pi side/detectors/facedetector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31184"
},
{
"name": "Shell",
"bytes": "84"
}
],
"symlink_target": ""
} |
"""Brings up a set of Global Registry nodes along with databases.
They can create separate clusters.
"""
import copy
import json
import common
import docker
def _tweak_config(config, name, uid):
cfg = copy.deepcopy(config)
cfg['nodes'] = {'node': cfg['nodes'][name]}
sys_config = cfg['nodes']['node']['sys.config']
sys_config['db_nodes'] = [common.format_nodename(n, uid) for n in
sys_config['db_nodes']]
vm_args = cfg['nodes']['node']['vm.args']
vm_args['name'] = common.format_nodename(vm_args['name'], uid)
return cfg
def _node_up(image, bindir, uid, config, dns_servers):
node_name = config['nodes']['node']['vm.args']['name']
cookie = config['nodes']['node']['vm.args']['setcookie']
db_nodes = config['nodes']['node']['sys.config']['db_nodes']
(gr_name, sep, gr_hostname) = node_name.partition('@')
gr_dockername = common.format_dockername(gr_name, uid)
gr_command = '''set -e
cat <<"EOF" > /tmp/gen_dev_args.json
{gen_dev_args}
EOF
escript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json
/root/bin/node/bin/globalregistry console'''
gr_command = gr_command.format(
gen_dev_args=json.dumps({'globalregistry': config}))
# Start DB node for current GR instance.
# Currently, only one DB node for GR is allowed, because we are using links.
# It's impossible to create a bigcouch cluster with docker's links.
db_node = db_nodes[0]
(db_name, sep, db_hostname) = db_node.partition('@')
db_dockername = common.format_dockername(db_name, uid)
db_command = '''echo '[httpd]' > /opt/bigcouch/etc/local.ini
echo 'bind_address = 0.0.0.0' >> /opt/bigcouch/etc/local.ini
sed -i 's/-name bigcouch/-name {name}@{host}/g' /opt/bigcouch/etc/vm.args
sed -i 's/-setcookie monster/-setcookie {cookie}/g' /opt/bigcouch/etc/vm.args
/opt/bigcouch/bin/bigcouch'''
db_command = db_command.format(name=db_name, host=db_hostname,
cookie=cookie)
bigcouch = docker.run(
image='onedata/bigcouch',
detach=True,
name=db_dockername,
hostname=db_hostname,
command=db_command)
gr = docker.run(
image=image,
hostname=gr_hostname,
detach=True,
interactive=True,
tty=True,
workdir='/root/build',
name=gr_dockername,
volumes=[(bindir, '/root/build', 'ro')],
dns_list=dns_servers,
link={db_dockername: db_hostname},
command=gr_command)
return {
'docker_ids': [bigcouch, gr],
'gr_db_nodes': ['{0}@{1}'.format(db_name, db_hostname)],
'gr_nodes': ['{0}@{1}'.format(gr_name, gr_hostname)]
}
def up(image, bindir, dns, uid, config_path):
config = common.parse_json_file(config_path)['globalregistry']
config['config']['target_dir'] = '/root/bin'
configs = [_tweak_config(config, node, uid) for node in config['nodes']]
dns_servers, output = common.set_up_dns(dns, uid)
for cfg in configs:
node_out = _node_up(image, bindir, uid, cfg, dns_servers)
common.merge(output, node_out)
return output
| {
"content_hash": "552c029d8a0e2fa75e049ebf738dde7a",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 32.791666666666664,
"alnum_prop": 0.6188055908513341,
"repo_name": "xorver/oneprovider_ccm",
"id": "7d5db407e9baebe5ae35463f137c5d157d47e6bc",
"size": "3148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bamboos/docker/environment/globalregistry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "107505"
},
{
"name": "JavaScript",
"bytes": "413"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "35840"
},
{
"name": "Shell",
"bytes": "24675"
}
],
"symlink_target": ""
} |
"""Unittests for mysql.connector.cursor
"""
import new
import itertools
from decimal import Decimal
import time
import datetime
import inspect
import re
import tests
from mysql.connector import (connection, cursor, conversion, protocol,
utils, errors, constants)
class TestsCursor(tests.MySQLConnectorTests):
def tearDown(self):
if hasattr(self, 'c') and isinstance(self.c, cursor.MySQLCursor):
self.c.close()
if hasattr(self, 'connection') and\
isinstance(self.connection, connection.MySQLConnection):
self.connection.close()
def _test_execute_setup(self,connection,
tbl="myconnpy_cursor", engine="MyISAM"):
self._test_execute_cleanup(connection, tbl)
stmt_create = """CREATE TABLE %s
(col1 INT, col2 VARCHAR(30), PRIMARY KEY (col1))
ENGINE=%s""" % (tbl,engine)
try:
cursor = connection.cursor()
cursor.execute(stmt_create)
except (StandardError), e:
self.fail("Failed setting up test table; %s" % e)
cursor.close()
def _test_execute_cleanup(self, connection, tbl="myconnpy_cursor"):
stmt_drop = """DROP TABLE IF EXISTS %s""" % (tbl)
try:
cursor = connection.cursor()
cursor.execute(stmt_drop)
except (StandardError), e:
self.fail("Failed cleaning up test table; %s" % e)
cursor.close()
class CursorModule(tests.MySQLConnectorTests):
"""
Tests for the cursor module functions and attributes
"""
def test_RE_SQL_INSERT_VALUES(self):
regex = cursor.RE_SQL_INSERT_VALUES
cases = [
("(%s, %s)",
"INSERT INTO t1 VALUES (%s, %s)"),
("( %s, \n %s )",
"INSERT INTO t1 VALUES ( %s, \n %s )"),
("(%(c1)s, %(c2)s)",
"INSERT INTO t1 VALUES (%(c1)s, %(c2)s)"),
("(\n%(c1)s\n, \n%(c2)s\n)",
"INSERT INTO t1 VALUES \n(\n%(c1)s\n, \n%(c2)s\n)"),
("( %(c1)s , %(c2)s )",
"INSERT INTO t1 VALUES ( %(c1)s , %(c2)s ) ON DUPLICATE"),
]
for exp, stmt in cases:
self.assertEqual(exp, re.search(regex, stmt).group(1))
class CursorBaseTests(tests.MySQLConnectorTests):
def setUp(self):
self.c = cursor.CursorBase()
def test___init__(self):
exp = {
'_description': None,
'_rowcount': -1,
'_last_insert_id' : None,
'arraysize': 1,
}
for key, value in exp.items():
self.assertEqual(value, getattr(self.c, key),
msg="Default for '%s' did not match." % key)
def test_callproc(self):
"""CursorBase object callproc()-method"""
self.checkMethod(self.c,'callproc')
try:
self.c.callproc('foo', args=(1,2,3))
except (SyntaxError, TypeError):
self.fail("Cursor callproc(): wrong arguments")
def test_close(self):
"""CursorBase object close()-method"""
self.checkMethod(self.c,'close')
def test_execute(self):
"""CursorBase object execute()-method"""
self.checkMethod(self.c,'execute')
try:
self.c.execute('select', params=(1,2,3))
except (SyntaxError, TypeError):
self.fail("Cursor execute(): wrong arguments")
def test_executemany(self):
"""CursorBase object executemany()-method"""
self.checkMethod(self.c,'executemany')
try:
self.c.executemany('select', [()])
except (SyntaxError, TypeError):
self.fail("Cursor executemany(): wrong arguments")
def test_fetchone(self):
"""CursorBase object fetchone()-method"""
self.checkMethod(self.c,'fetchone')
def test_fetchmany(self):
"""CursorBase object fetchmany()-method"""
self.checkMethod(self.c,'fetchmany')
try:
self.c.fetchmany(size=1)
except (SyntaxError, TypeError):
self.fail("Cursor fetchmany(): wrong arguments")
def test_fetchall(self):
"""CursorBase object fetchall()-method"""
self.checkMethod(self.c,'fetchall')
def test_nextset(self):
"""CursorBase object nextset()-method"""
self.checkMethod(self.c,'nextset')
def test_setinputsizes(self):
"""CursorBase object setinputsizes()-method"""
self.checkMethod(self.c,'setinputsizes')
try:
self.c.setinputsizes((1,))
except (SyntaxError, TypeError):
self.fail("CursorBase setinputsizes(): wrong arguments")
def test_setoutputsize(self):
"""CursorBase object setoutputsize()-method"""
self.checkMethod(self.c,'setoutputsize')
try:
self.c.setoutputsize(1,column=None)
except (SyntaxError, TypeError):
self.fail("CursorBase setoutputsize(): wrong arguments")
def test_description(self):
self.assertEqual(None, self.c.description)
self.assertEqual(self.c._description, self.c.description)
self.c._description = 'ham'
self.assertEqual('ham', self.c.description)
try:
self.c.description = 'spam'
except AttributeError:
pass
else:
self.fail('CursorBase.description is not read-only')
def test_rowcount(self):
self.assertEqual(-1, self.c.rowcount)
self.assertEqual(self.c._rowcount, self.c.rowcount)
self.c._rowcount = 2
self.assertEqual(2, self.c.rowcount)
try:
self.c.rowcount = 3
except AttributeError:
pass
else:
self.fail('CursorBase.rowcount is not read-only')
def test_last_insert_id(self):
self.assertEqual(None, self.c.lastrowid)
self.assertEqual(self.c._last_insert_id, self.c.lastrowid)
self.c._last_insert_id = 2
self.assertEqual(2, self.c.lastrowid)
try:
self.c.lastrowid = 3
except AttributeError:
pass
else:
self.fail('CursorBase.lastrowid is not read-only')
class MySQLCursorTests(TestsCursor):
def setUp(self):
self.c = cursor.MySQLCursor(connection=None)
self.connection = None
def test_init(self):
"""MySQLCursor object init"""
try:
c = cursor.MySQLCursor(connection=None)
except (SyntaxError, TypeError), e:
self.fail("Failed initializing MySQLCursor; %s" % e)
exp = {
'_connection' : None,
'_stored_results' : [],
'_nextrow' : (None, None),
'_warnings' : None,
'_warning_count' : 0,
'_executed' : None,
'_executed_list' : [],
}
for key, value in exp.items():
self.assertEqual(value, getattr(c, key),
msg="Default for '%s' did not match." % key)
self.assertRaises(errors.InterfaceError, cursor.MySQLCursor,
connection='foo')
def test__set_connection(self):
"""MySQLCursor object _set_connection()-method"""
self.checkMethod(self.c, '_set_connection')
self.assertRaises(errors.InterfaceError,
self.c._set_connection, 'foo')
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c._set_connection(self.connection)
self.c.close()
def test__reset_result(self):
"""MySQLCursor object _reset_result()-method"""
self.checkMethod(self.c,'_reset_result')
def reset(self):
self._test = "Reset called"
self.c.reset = new.instancemethod(reset, self.c, cursor.MySQLCursor)
exp = {
'rowcount': -1,
'_stored_results' : [],
'_nextrow' : (None, None),
'_warnings' : None,
'_warning_count' : 0,
'_executed' : None,
'_executed_list' : [],
}
self.c._reset_result()
for key, value in exp.items():
self.assertEqual(value, getattr(self.c, key),
msg="'%s' was not reset." % key)
# MySQLCursor._reset_result() must call MySQLCursor.reset()
self.assertEqual('Reset called', self.c._test)
def test__have_unread_result(self):
"""MySQLCursor object _have_unread_result()-method"""
self.checkMethod(self.c, '_have_unread_result')
class FakeConnection(object):
def __init__(self):
self.unread_result = False
self.c = cursor.MySQLCursor()
self.c._connection = FakeConnection()
self.c._connection.unread_result = True
self.assertTrue(self.c._have_unread_result())
self.c._connection.unread_result = False
self.assertFalse(self.c._have_unread_result())
def test_next(self):
"""MySQLCursor object next()-method"""
self.checkMethod(self.c,'next')
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = cursor.MySQLCursor(self.connection)
self.assertRaises(StopIteration,self.c.next)
self.c.execute("SELECT SHA1('myconnpy')")
exp = (u'c5e24647dbb63447682164d81b34fe493a83610b',)
self.assertEqual(exp,self.c.next())
self.c.close()
def test_close(self):
"""MySQLCursor object close()-method"""
self.checkMethod(self.c, 'close')
self.assertEqual(False, self.c.close(),
"close() should return False with no connection")
self.assertEqual(None, self.c._connection)
def test__process_params(self):
"""MySQLCursor object _process_params()-method"""
self.checkMethod(self.c,'_process_params')
self.assertRaises(errors.ProgrammingError,self.c._process_params,'foo')
self.assertRaises(errors.ProgrammingError,self.c._process_params,())
st_now = time.localtime()
data = (
None,
int(128),
long(1281288),
float(3.14),
Decimal('3.14'),
'back\slash',
'newline\n',
'return\r',
"'single'",
'"double"',
'windows\032',
str("Strings are sexy"),
u'\u82b1',
datetime.datetime(2008, 5, 7, 20, 01, 23),
datetime.date(2008, 5, 7),
datetime.time(20, 03, 23),
st_now,
datetime.timedelta(hours=40,minutes=30,seconds=12),
)
exp = (
'NULL',
'128',
'1281288',
'3.14',
"'3.14'",
"'back\\\\slash'",
"'newline\\n'",
"'return\\r'",
"'\\'single\\''",
'\'\\"double\\"\'',
"'windows\\\x1a'",
"'Strings are sexy'",
"'\xe8\x8a\xb1'",
"'2008-05-07 20:01:23'",
"'2008-05-07'",
"'20:03:23'",
"'%s'" % time.strftime('%Y-%m-%d %H:%M:%S',st_now),
"'40:30:12'",
)
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
self.assertEqual((),self.c._process_params(()),
"_process_params() should return a tuple")
res = self.c._process_params(data)
for (i,v) in enumerate(exp):
self.assertEqual(v,res[i])
self.c.close()
def test__process_params_dict(self):
"""MySQLCursor object _process_params_dict()-method"""
self.checkMethod(self.c,'_process_params')
self.assertRaises(errors.ProgrammingError,self.c._process_params,'foo')
self.assertRaises(errors.ProgrammingError,self.c._process_params,())
st_now = time.localtime()
data = {
'a' : None,
'b' : int(128),
'c' : long(1281288),
'd' : float(3.14),
'e' : Decimal('3.14'),
'f' : 'back\slash',
'g' : 'newline\n',
'h' : 'return\r',
'i' : "'single'",
'j' : '"double"',
'k' : 'windows\032',
'l' : str("Strings are sexy"),
'm' : u'\u82b1',
'n' : datetime.datetime(2008, 5, 7, 20, 01, 23),
'o' : datetime.date(2008, 5, 7),
'p' : datetime.time(20, 03, 23),
'q' : st_now,
'r' : datetime.timedelta(hours=40,minutes=30,seconds=12),
}
exp = {
'a' : 'NULL',
'b' : '128',
'c' : '1281288',
'd' : '3.14',
'e' : "'3.14'",
'f' : "'back\\\\slash'",
'g' : "'newline\\n'",
'h' : "'return\\r'",
'i' : "'\\'single\\''",
'j' : '\'\\"double\\"\'',
'k' : "'windows\\\x1a'",
'l' : "'Strings are sexy'",
'm' : "'\xe8\x8a\xb1'",
'n' : "'2008-05-07 20:01:23'",
'o' : "'2008-05-07'",
'p' : "'20:03:23'",
'q' : "'%s'" % time.strftime('%Y-%m-%d %H:%M:%S',st_now),
'r' : "'40:30:12'",
}
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
self.assertEqual({},self.c._process_params_dict({}),
"_process_params_dict() should return a dict")
self.assertEqual(exp,self.c._process_params_dict(data))
self.c.close()
def test__fetch_warnings(self):
"""MySQLCursor object _fetch_warnings()-method"""
self.checkMethod(self.c,'_fetch_warnings')
self.assertRaises(errors.InterfaceError,self.c._fetch_warnings)
config = self.getMySQLConfig()
config['get_warnings'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
self.c.execute("SELECT 'a' + 'b'")
self.c.fetchone()
exp = [
(u'Warning', 1292L, u"Truncated incorrect DOUBLE value: 'a'"),
(u'Warning', 1292L, u"Truncated incorrect DOUBLE value: 'b'")
]
self.assertTrue(self.cmpResult(exp, self.c._fetch_warnings()))
self.assertEqual(len(exp), self.c._warning_count)
def test__handle_noresultset(self):
"""MySQLCursor object _handle_noresultset()-method"""
self.checkMethod(self.c,'_handle_noresultset')
self.assertRaises(errors.ProgrammingError,
self.c._handle_noresultset, None)
data = {
'affected_rows':1,
'insert_id':10,
'warning_count': 100,
'server_status': 8,
}
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
self.c._handle_noresultset(data)
self.assertEqual(data['affected_rows'],self.c.rowcount)
self.assertEqual(data['insert_id'], self.c._last_insert_id)
self.assertEqual(data['warning_count'],self.c._warning_count)
self.c.close()
def test__handle_result(self):
"""MySQLCursor object _handle_result()-method"""
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
self.assertRaises(errors.InterfaceError, self.c._handle_result, None)
self.assertRaises(errors.InterfaceError, self.c._handle_result,
'spam')
self.assertRaises(errors.InterfaceError, self.c._handle_result,
{ 'spam':5 })
cases = [
{ 'affected_rows': 99999,
'insert_id': 10,
'warning_count': 100,
'server_status': 8,
},
{ 'eof': {'status_flag': 0, 'warning_count': 0},
'columns': [('1', 8, None, None, None, None, 0, 129)]
},
]
self.c._handle_result(cases[0])
self.assertEqual(cases[0]['affected_rows'], self.c.rowcount)
self.assertFalse(self.c._connection.unread_result)
self.assertFalse(self.c._have_unread_result())
self.c._handle_result(cases[1])
self.assertEqual(cases[1]['columns'], self.c.description)
self.assertTrue(self.c._connection.unread_result)
self.assertTrue(self.c._have_unread_result())
def test_execute(self):
"""MySQLCursor object execute()-method"""
self.checkMethod(self.c,'execute')
self.assertEqual(None, self.c.execute(None, None))
config = self.getMySQLConfig()
config['get_warnings'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
self.assertRaises(errors.ProgrammingError,self.c.execute,
'SELECT %s,%s,%s', ('foo','bar',))
self.assertRaises(errors.ProgrammingError,self.c.execute,
'SELECT %s,%s', ('foo','bar','foobar'))
self.c.execute("SELECT 'a' + 'b'")
self.c.fetchone()
exp = [
(u'Warning', 1292L, u"Truncated incorrect DOUBLE value: 'a'"),
(u'Warning', 1292L, u"Truncated incorrect DOUBLE value: 'b'")
]
self.assertTrue(self.cmpResult(exp, self.c._warnings))
self.c.execute("SELECT BINARY 'myconnpy'")
exp = [(u'myconnpy',)]
self.assertEqual(exp, self.c.fetchall())
self.c.close()
tbl = 'myconnpy_cursor'
self._test_execute_setup(self.connection,tbl)
stmt_insert = "INSERT INTO %s (col1,col2) VALUES (%%s,%%s)" % (tbl)
self.c = self.connection.cursor()
res = self.c.execute(stmt_insert, (1,100))
self.assertEqual(None, res, "Return value of execute() is wrong.")
stmt_select = "SELECT col1,col2 FROM %s ORDER BY col1" % (tbl)
self.c.execute(stmt_select)
self.assertEqual([(1L, u'100')],
self.c.fetchall(),"Insert test failed")
data = {'id': 2}
stmt = "SELECT * FROM %s WHERE col1 <= %%(id)s" % tbl
self.c.execute(stmt, data)
self.assertEqual([(1L, u'100')],self.c.fetchall())
self._test_execute_cleanup(self.connection,tbl)
self.c.close()
def test_executemany(self):
"""MySQLCursor object executemany()-method"""
self.checkMethod(self.c,'executemany')
self.assertEqual(None, self.c.executemany(None, []))
config = self.getMySQLConfig()
config['get_warnings'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
self.assertRaises(errors.InterfaceError, self.c.executemany,
'foo', None)
self.assertRaises(errors.ProgrammingError, self.c.executemany,
'foo', 'foo')
self.assertEqual(None, self.c.executemany('foo', []))
self.assertRaises(errors.ProgrammingError, self.c.executemany,
'foo', ['foo'])
self.assertRaises(errors.ProgrammingError,self.c.executemany,
'SELECT %s', [('foo',), 'foo'])
self.c.executemany("SELECT SHA1(%s)", [('foo',),('bar',)])
self.assertEqual(None,self.c.fetchone())
self.c.close()
tbl = 'myconnpy_cursor'
self._test_execute_setup(self.connection,tbl)
stmt_insert = "INSERT INTO %s (col1,col2) VALUES (%%s,%%s)" % (tbl)
stmt_select = "SELECT col1,col2 FROM %s ORDER BY col1" % (tbl)
self.c = self.connection.cursor()
self.c.executemany(stmt_insert,[(1,100),(2,200),(3,300)])
self.assertEqual(3, self.c.rowcount)
self.c.executemany("SELECT %s",[('f',),('o',),('o',)])
self.assertEqual(3, self.c.rowcount)
data = [{'id':2},{'id':3}]
stmt = "SELECT * FROM %s WHERE col1 <= %%(id)s" % tbl
self.c.executemany(stmt, data)
self.assertEqual(5, self.c.rowcount)
self.c.execute(stmt_select)
self.assertEqual([(1L, u'100'), (2L, u'200'), (3L, u'300')],
self.c.fetchall(), "Multi insert test failed")
data = [{'id':2},{'id':3}]
stmt = "DELETE FROM %s WHERE col1 = %%(id)s" % tbl
self.c.executemany(stmt,data)
self.assertEqual(2,self.c.rowcount)
self._test_execute_cleanup(self.connection, tbl)
self.c.close()
def test_fetchwarnings(self):
"""MySQLCursor object fetchwarnings()-method"""
self.checkMethod(self.c,'fetchwarnings')
self.assertEqual(None,self.c.fetchwarnings(),
"There should be no warnings after initiating cursor.")
exp = ['A warning']
self.c._warnings = exp
self.c._warning_count = len(self.c._warnings)
self.assertEqual(exp,self.c.fetchwarnings())
self.c.close()
def test_stored_results(self):
"""MySQLCursor object stored_results()-method"""
self.checkMethod(self.c, 'stored_results')
self.assertEqual([], self.c._stored_results)
self.assertTrue(hasattr(self.c.stored_results(), '__iter__'))
self.c._stored_results.append('abc')
self.assertEqual('abc', self.c.stored_results().next())
try:
result = self.c.stored_results().next()
except StopIteration:
pass
except:
self.fail("StopIteration not raised")
def _test_callproc_setup(self, connection):
self._test_callproc_cleanup(connection)
stmt_create1 = (
"CREATE PROCEDURE myconnpy_sp_1"
"(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) "
"BEGIN SET pProd := pFac1 * pFac2; END;")
stmt_create2 = (
"CREATE PROCEDURE myconnpy_sp_2"
"(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) "
"BEGIN SELECT 'abc'; SELECT 'def'; SET pProd := pFac1 * pFac2; END;"
)
stmt_create3 = (
"CREATE PROCEDURE myconnpy_sp_3"
"(IN pStr1 VARCHAR(20), IN pStr2 VARCHAR(20), "
"OUT pConCat VARCHAR(100)) "
"BEGIN SET pConCat := CONCAT(pStr1, pStr2); END;")
try:
cursor = connection.cursor()
cursor.execute(stmt_create1)
cursor.execute(stmt_create2)
cursor.execute(stmt_create3)
except errors.Error, e:
self.fail("Failed setting up test stored routine; %s" % e)
cursor.close()
def _test_callproc_cleanup(self, connection):
sp_names = ('myconnpy_sp_1', 'myconnpy_sp_2', 'myconnpy_sp_3')
stmt_drop = "DROP PROCEDURE IF EXISTS %s"
try:
cursor = connection.cursor()
for sp_name in sp_names:
cursor.execute(stmt_drop % sp_name)
except errors.Error, e:
self.fail("Failed cleaning up test stored routine; %s" % e)
cursor.close()
def test_callproc(self):
"""MySQLCursor object callproc()-method"""
self.checkMethod(self.c,'callproc')
self.assertRaises(ValueError, self.c.callproc, None)
self.assertRaises(ValueError, self.c.callproc, 'sp1', None)
config = self.getMySQLConfig()
config['get_warnings'] = True
self.connection = connection.MySQLConnection(**config)
self._test_callproc_setup(self.connection)
self.c = self.connection.cursor()
exp = (5, 4, 20)
result = self.c.callproc('myconnpy_sp_1', (exp[0], exp[1], 0))
self.assertEqual([], self.c._stored_results)
self.assertEqual(exp, result)
exp = (6, 5, 30)
result = self.c.callproc('myconnpy_sp_2', (exp[0], exp[1], 0))
self.assertTrue(isinstance(self.c._stored_results, list))
self.assertEqual(exp, result)
exp_results = [
('abc',),
('def',)
]
for result, exp in itertools.izip(self.c.stored_results(),
iter(exp_results)):
self.assertEqual(exp, result.fetchone())
exp = ('ham', 'spam', 'hamspam')
result = self.c.callproc('myconnpy_sp_3', (exp[0], exp[1], ''))
self.assertTrue(isinstance(self.c._stored_results, list))
self.assertEqual(exp, result)
self._test_callproc_cleanup(self.connection)
self.c.close()
def test_fetchone(self):
"""MySQLCursor object fetchone()-method"""
self.checkMethod(self.c,'fetchone')
self.assertEqual(None,self.c.fetchone())
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
self.c.execute("SELECT SHA1('myconnpy')")
exp = (u'c5e24647dbb63447682164d81b34fe493a83610b',)
self.assertEqual(exp, self.c.fetchone())
self.assertEqual(None,self.c.fetchone())
self.c.close()
def test_fetchmany(self):
"""MySQLCursor object fetchmany()-method"""
self.checkMethod(self.c,'fetchmany')
self.assertEqual([],self.c.fetchmany())
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
tbl = 'myconnpy_fetch'
self._test_execute_setup(self.connection,tbl)
stmt_insert = "INSERT INTO %s (col1,col2) VALUES (%%s,%%s)" % (tbl)
stmt_select = "SELECT col1,col2 FROM %s ORDER BY col1 DESC" % (tbl)
self.c = self.connection.cursor()
nrRows = 10
data = [ (i,"%s" % (i*100)) for i in range(0,nrRows)]
self.c.executemany(stmt_insert,data)
self.c.execute(stmt_select)
exp = [(9L, u'900'), (8L, u'800'), (7L, u'700'), (6L, u'600')]
rows = self.c.fetchmany(4)
self.assertTrue(self.cmpResult(exp,rows),
"Fetching first 4 rows test failed.")
exp = [(5L, u'500'), (4L, u'400'), (3L, u'300')]
rows = self.c.fetchmany(3)
self.assertTrue(self.cmpResult(exp,rows),
"Fetching next 3 rows test failed.")
exp = [(2L, u'200'), (1L, u'100'), (0L, u'0')]
rows = self.c.fetchmany(3)
self.assertTrue(self.cmpResult(exp,rows),
"Fetching next 3 rows test failed.")
self.assertEqual([],self.c.fetchmany())
self._test_execute_cleanup(self.connection,tbl)
self.c.close()
def test_fetchall(self):
"""MySQLCursor object fetchall()-method"""
self.checkMethod(self.c,'fetchall')
self.assertRaises(errors.InterfaceError,self.c.fetchall)
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
tbl = 'myconnpy_fetch'
self._test_execute_setup(self.connection,tbl)
stmt_insert = "INSERT INTO %s (col1,col2) VALUES (%%s,%%s)" % (tbl)
stmt_select = "SELECT col1,col2 FROM %s ORDER BY col1 ASC" % (tbl)
self.c = self.connection.cursor()
self.c.execute("SELECT * FROM %s" % tbl)
self.assertEqual([],self.c.fetchall(),
"fetchall() with empty result should return []")
nrRows = 10
data = [ (i,"%s" % (i*100)) for i in range(0,nrRows) ]
self.c.executemany(stmt_insert,data)
self.c.execute(stmt_select)
self.assertTrue(self.cmpResult(data,self.c.fetchall()),
"Fetching all rows failed.")
self.assertEqual(None,self.c.fetchone())
self._test_execute_cleanup(self.connection,tbl)
self.c.close()
def test_raise_on_warning(self):
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.connection.raise_on_warnings = True
self.c = self.connection.cursor()
try:
self.c.execute("SELECT 'a' + 'b'")
self.c.fetchall()
except errors.Error:
pass
else:
self.fail("Did not get exception while raising warnings.")
def test__unicode__(self):
"""MySQLCursor object __unicode__()-method"""
self.assertEqual("MySQLCursor: (Nothing executed yet)",
"%s" % self.c.__unicode__())
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
self.c.execute("SELECT VERSION()")
self.c.fetchone()
self.assertEqual("MySQLCursor: SELECT VERSION()",
"%s" % self.c.__unicode__())
stmt= "SELECT VERSION(),USER(),CURRENT_TIME(),NOW(),SHA1('myconnpy')"
self.c.execute(stmt)
self.c.fetchone()
self.assertEqual("MySQLCursor: %s.." % stmt[:30],
"%s" % self.c.__unicode__())
self.c.close()
def test__str__(self):
self.assertEqual("'MySQLCursor: (Nothing executed yet)'",
"%s" % self.c.__str__())
def test_column_names(self):
self.connection = connection.MySQLConnection(**self.getMySQLConfig())
self.c = self.connection.cursor()
stmt = "SELECT NOW() as now, 'The time' as label, 123 FROM dual"
exp = (u'now', u'label', u'123')
self.c.execute(stmt)
self.c.fetchone()
self.assertEqual(exp, self.c.column_names)
self.c.close()
def test_statement(self):
self.c = cursor.MySQLCursor()
exp = 'SELECT * FROM ham'
self.c._executed = exp
self.assertEqual(exp, self.c.statement)
self.c._executed = ' ' + exp + ' '
self.assertEqual(exp, self.c.statement)
def test_with_rows(self):
self.c = cursor.MySQLCursor()
self.assertFalse(self.c.with_rows)
self.c._description = ('ham','spam')
self.assertTrue(self.c.with_rows)
class MySQLCursorBufferedTests(TestsCursor):
def setUp(self):
self.c = cursor.MySQLCursorBuffered(connection=None)
self.connection = None
def test_init(self):
"""MySQLCursorBuffered object init"""
try:
c = cursor.MySQLCursorBuffered(connection=None)
except (SyntaxError, TypeError), e:
self.fail("Failed initializing MySQLCursorBuffered; %s" % e)
self.assertRaises(errors.InterfaceError,cursor.MySQLCursorBuffered,
connection='foo')
def test__next_row(self):
"""MySQLCursorBuffered object _next_row-attribute"""
self.checkAttr(self.c,'_next_row',0)
def test__rows(self):
"""MySQLCursorBuffered object _rows-attribute"""
self.checkAttr(self.c,'_rows',None)
def test_execute(self):
"""MySQLCursorBuffered object execute()-method
"""
self.checkMethod(self.c,'execute')
self.assertEqual(None, self.c.execute(None, None))
config = self.getMySQLConfig()
config['buffered'] = True
config['get_warnings'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
self.assertEqual(True,isinstance(self.c,cursor.MySQLCursorBuffered))
self.c.execute("SELECT 1")
self.assertEqual([('1',)], self.c._rows)
def test_raise_on_warning(self):
config = self.getMySQLConfig()
config['buffered'] = True
config['raise_on_warnings'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
try:
self.c.execute("SELECT 'a' + 'b'")
except errors.Error:
pass
else:
self.fail("Did not get exception while raising warnings.")
def test_with_rows(self):
c = cursor.MySQLCursorBuffered()
self.assertFalse(c.with_rows)
c._rows = [('ham',)]
self.assertTrue(c.with_rows)
class MySQLCursorRawTests(TestsCursor):
def setUp(self):
config = self.getMySQLConfig()
config['raw'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
def tearDown(self):
self.c.close()
self.connection.close()
def test_fetchone(self):
self.checkMethod(self.c,'fetchone')
self.assertEqual(None,self.c.fetchone())
self.c.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = ('1','string','2010-12-31', '2.5')
self.assertEqual(exp,self.c.fetchone())
class MySQLCursorRawBufferedTests(TestsCursor):
def setUp(self):
config = self.getMySQLConfig()
config['raw'] = True
config['buffered'] = True
self.connection = connection.MySQLConnection(**config)
self.c = self.connection.cursor()
def tearDown(self):
self.c.close()
self.connection.close()
def test_fetchone(self):
self.checkMethod(self.c,'fetchone')
self.assertEqual(None,self.c.fetchone())
self.c.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = ('1','string','2010-12-31', '2.5')
self.assertEqual(exp,self.c.fetchone())
def test_fetchall(self):
self.checkMethod(self.c,'fetchall')
self.assertRaises(errors.InterfaceError,self.c.fetchall)
self.c.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = [('1','string','2010-12-31', '2.5')]
self.assertEqual(exp,self.c.fetchall())
| {
"content_hash": "d53956b927b97926e4a5befd42c32f5b",
"timestamp": "",
"source": "github",
"line_count": 942,
"max_line_length": 80,
"avg_line_length": 36.07643312101911,
"alnum_prop": 0.5470809792843692,
"repo_name": "rcosnita/fantastico",
"id": "99ac25b34443cb2e23dd2d22c02e8dc94868f359",
"size": "35114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtual_env/libs/mysql-connector/python2/tests/test_cursor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "2168052"
},
{
"name": "Shell",
"bytes": "13309"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from pyspark.ml.evaluation import RegressionEvaluator,Evaluator
from math import sqrt
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import cosine_similarity
class EvaluadorRMSE(Evaluator):
"""
Evalua RMSE de forma robusta.
Es Igual que RegressionEvaluator con metric=rmse pero descartando valores no predecidos
"""
def __init__(self,predictionCol, targetCol):
super(EvaluadorRMSE, self).__init__()
self.predictionCol=predictionCol
self.targetCol=targetCol
def _evaluate(self, dataset):
error=rmse(dataset,self.predictionCol,self.targetCol)
print ("Error: {}".format(error))
return error
def isLargerBetter(self):
return False
class ModelBasedALS(object):
"""
Envoltorio para la clase ALS de ml de Spark.
Da soporte a los metodos de ALS de mllib
"""
def __init__(self,modelALS):
super(ModelBasedALS, self).__init__()
"""
Parametros
----------
modelALS : objeto entrenado de pyspark.ml.recommendation.ALS
"""
self.userIndex,self.userFactors = self.toArray(modelALS.userFactors)
self.itemIndex,self.itemFactors = self.toArray(modelALS.itemFactors)
self.prediccion=pd.DataFrame(data=self.userFactors.dot(self.itemFactors.T),columns=self.itemIndex,index=self.userIndex)
self.relacion_index_user=dict(zip(self.userIndex,range(len(self.userIndex))))
self.relacion_index_item=dict(zip(self.itemIndex,range(len(self.itemIndex))))
def predictAll(self,user_item:pd.DataFrame,tag_prediccion='prediccion'):
"""
Devuelve todas las predicciones dado el par (user,item)
"""
estimaciones=[]
for tupla in user_item.values:
try:
estimacion=self.prediccion.iloc[self.relacion_index_user[tupla[0]],self.relacion_index_item[tupla[1]]]
estimaciones.append(estimacion)
except:
estimaciones.append(np.nan)
user_item[tag_prediccion]=estimaciones
return user_item
def recommendProducts(self,user:int,n:int=3):
"""
Devuelve el top de productos recomendados para el usuario
"""
usuario=self.prediccion.loc[user]
usuario.sort(ascending=False)
return usuario.iloc[:n]
def recommendUsers(self,product:int,n:int=3):
"""
Devuelve el top de los usuarios de un producto
"""
productos=self.prediccion.loc[:,product]
productos.sort(ascending=False)
return productos.iloc[:n]
@staticmethod
def toArray(datos):
indices=[]
lista=[]
aaa=datos.rdd.map(lambda l:(l.id,l.features)).collect()
for tupla in aaa:
indices.append(tupla[0])
lista.append(tupla[1])
return indices,np.array(lista)
def rmse(dataset,predictionCol,targetCol):
valores=np.array(dataset.dropna().map(lambda r:[r[predictionCol],r[targetCol]]).collect())
error = sqrt(mean_squared_error(valores[:,0],valores[:,1]))
return error
| {
"content_hash": "31224e037aac3b4ba48367520c0d1a3b",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 127,
"avg_line_length": 33.484848484848484,
"alnum_prop": 0.633182503770739,
"repo_name": "pvalienteverde/MeetUpIntroMLySistemasRecomendacion",
"id": "1f502a260e37ea2896b670eb893bfbf528d7c51d",
"size": "3315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/SistemasRecomendacion/CollaborativeFiltering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "355317"
},
{
"name": "Python",
"bytes": "8487"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| {
"content_hash": "96d69a28f5c03c622ed4b1d34d1e24c4",
"timestamp": "",
"source": "github",
"line_count": 1444,
"max_line_length": 81,
"avg_line_length": 35.887119113573405,
"alnum_prop": 0.6183786495822157,
"repo_name": "anurag313/scikit-learn",
"id": "2ff07f13dae40edf984341596dd268a0af39c88f",
"size": "51821",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/utils/estimator_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5373693"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
"""
nimsdata.medimg.nimsnifti
=========================
NIMSNifti provide NIfti writing capabilities for MR datasets read by any subclass of NIMSMRReader.
Provides nifti specifics, inherits from NIMSMRReader, NIMSMRWriter.
"""
import os
import bson
import logging
import nibabel
import json
import numpy as np
import medimg
log = logging.getLogger(__name__)
# NIFITI1-style slice order codes:
SLICE_ORDER_UNKNOWN = 0
SLICE_ORDER_SEQ_INC = 1
SLICE_ORDER_SEQ_DEC = 2
SLICE_ORDER_ALT_INC = 3
SLICE_ORDER_ALT_DEC = 4
SLICE_ORDER_ALT_INC2 = 5 # interleave, ascending, starting at 2nd slice
SLICE_ORDER_ALT_DEC2 = 6 # interleave, decreasing, starting at 2nd to last slice
class NIMSNiftiError(medimg.MedImgError):
pass
class NIMSNifti(medimg.MedImgReader, medimg.MedImgWriter):
"""
Read elements of a NIMSData subclass.
Dataset must have a 'data' attribute that contains voxel data in a np.darray. Dataset must
also have contain metadata attributes to define nims required attributes.
Parameters
----------
path : str
filepath of input nifti, in .nii or .nii.gz format.
load_data : bool [default False]
attempt to load all data. has no affect.
Returns
-------
None : NoneType
Raises
------
NIMSNiftiError
TODO: explain plz.
"""
domain = u'mr'
filetype = u'nifti'
state = ['orig']
def __init__(self, path, load_data=False):
super(NIMSNifti, self).__init__(path, load_data)
log.debug('reading %s' % path)
try:
# TODO: load sorting/identification header
self.nifti = nibabel.load(path)
except Exception as e:
raise NIMSNiftiError(e)
# TODO: read metadata from nifti extension header
if load_data:
self.load_data()
def load_data(self):
super(NIMSNifti, self).load_data()
log.debug('loading data...')
self.data = {'': self.nifti.get_data()}
self.qto_xyz = self.nifti.get_affine()
self.sform = self.nifti.get_sform()
self.qform = self.nifti.get_qform()
self.scan_type = 'unknown' # FIXME
@property
def nims_group(self):
return self.metadata.group
@property
def nims_project(self):
return self.metadata.project
@property
def nims_session(self):
return self.metadata.exam_uid.replace('.', '_')
@property
def nims_session_name(self):
return self.metadata.timestamp.strftime('%Y-%m-%d %H:%M') if self.metadata.series_no == 1 and self.metadata.acq_no == 1 else None
@property
def nims_session_subject(self):
return self.metadata.subj_code
@property
def nims_acquisition(self):
return self.metadata.acquisition
@property
def nims_acquisition_name(self):
pass
@property
def nims_acquisition_description(self):
pass
@property
def nims_file_name(self):
return self.nims_acquisition + '_' + self.filetype
@property
def nims_file_ext(self):
return '.tgz'
@property
def nims_file_domain(self):
return self.domain
@property
def nims_file_type(self):
return self.filetype
@property
def nims_file_kinds(self):
return self.scan_type
@property
def nims_file_state(self):
return self.state
@property
def nims_timestamp(self): # FIXME: should return UTC time and timezone
return self.timestamp.replace(tzinfo=bson.tz_util.FixedOffset(-7 * 60, 'pacific')) # FIXME: use pytz
@property
def nims_timezone(self):
return None
@classmethod
def write(cls, metadata, imagedata, outbase, voxel_order=None):
"""
Write the metadata and imagedata to niftis.
Constructs a description from the metadata, and applies as much metadata into the nifti
header as is applicable. Creates .bvec and .bval files if bvecs and bvals exist.
Parameters
----------
metadata : object
fully loaded instance of a NIMSReader.
imagedata : dict or string containing a path to a dir containing nifti(s)
dictionary of np.darrays. label suffix as keys, with np.darrays as values.
outbase : str
output name prefix.
voxel_order : str [default None]
three character string indicating the voxel order, ex. 'LPS'.
Returns
-------
results : list
list of files written.
Raises
------
NIMSDataError
metadata or data is None.
"""
if isinstance(imagedata, basestring):
from glob import glob
from shutil import copyfile
# imagedata is a directory containing nifti(s)
log.info('Loading files from %s' % imagedata)
niftis = glob(imagedata + '/' + str(metadata.exam_no) + '_' + str(metadata.series_no) + '_' + str(metadata.acq_no) + '*.nii.gz')
results = []
for f in niftis:
filepath = os.path.join(os.path.dirname(outbase), os.path.basename(f))
copyfile(f, filepath)
results.append(filepath)
else:
super(NIMSNifti, cls).write(metadata, imagedata, outbase, voxel_order) # XXX FAIL! unexpected imagedata = None
results = []
for data_label, data in imagedata.iteritems():
if data is None:
continue
if voxel_order:
data, qto_xyz = cls.reorder_voxels(data, metadata.qto_xyz, voxel_order)
else:
qto_xyz = metadata.qto_xyz
outname = outbase + data_label
log.debug('creating nifti for %s' % data_label)
# TODO: nimsmrdata.adjust_bvecs to use affine from after reorient
if metadata.is_dwi and metadata.bvals is not None and metadata.bvecs is not None:
filepath = outbase + '.bval'
with open(filepath, 'w') as bvals_file:
bvals_file.write(' '.join(['%0.1f' % value for value in metadata.bvals]))
log.debug('generated %s' % os.path.basename(filepath))
filepath = outbase + '.bvec'
with open(filepath, 'w') as bvecs_file:
bvecs_file.write(' '.join(['%0.4f' % value for value in metadata.bvecs[0, :]]) + '\n')
bvecs_file.write(' '.join(['%0.4f' % value for value in metadata.bvecs[1, :]]) + '\n')
bvecs_file.write(' '.join(['%0.4f' % value for value in metadata.bvecs[2, :]]) + '\n')
log.debug('generated %s' % os.path.basename(filepath))
# write nifti
nifti = nibabel.Nifti1Image(data, None)
nii_header = nifti.get_header()
nifti.update_header() # XXX are data and header ever "non-harmonious"
num_slices = data.shape[2] # Don't trust metatdata.num_slices; might not match the # acquired.
nii_header.set_xyzt_units('mm', 'sec')
nii_header.set_qform(qto_xyz, 'scanner')
nii_header.set_sform(qto_xyz, 'scanner')
nii_header.set_dim_info(*([1, 0, 2] if metadata.phase_encode == 0 else [0, 1, 2]))
nii_header['slice_start'] = 0
nii_header['slice_end'] = num_slices - 1
nii_header.set_slice_duration(metadata.slice_duration)
nii_header['slice_code'] = metadata.slice_order
if np.iscomplexobj(data):
clip_vals = np.percentile(np.abs(data), (10.0, 99.5))
else:
clip_vals = np.percentile(data, (10.0, 99.5))
nii_header.structarr['cal_min'] = clip_vals[0]
nii_header.structarr['cal_max'] = clip_vals[1]
nii_header.set_data_dtype(data.dtype)
# Stuff some extra data into the description field (max of 80 chars)
# Other unused fields: nii_header['data_type'] (10 chars), nii_header['db_name'] (18 chars),
te = 0 if not metadata.te else metadata.te
ti = 0 if not metadata.ti else metadata.ti
flip_angle = 0 if not metadata.flip_angle else metadata.flip_angle
effective_echo_spacing = 0. if not metadata.effective_echo_spacing else metadata.effective_echo_spacing
acquisition_matrix = [0, 0] if metadata.acquisition_matrix == (None, None) else metadata.acquisition_matrix
mt_offset_hz = 0. if not metadata.mt_offset_hz else metadata.mt_offset_hz
phase_encode_undersample = 1 if not metadata.phase_encode_undersample else metadata.phase_encode_undersample
slice_encode_undersample = 1 if not metadata.slice_encode_undersample else metadata.slice_encode_undersample
nii_header['descrip'] = 'te=%.2f;ti=%.0f;fa=%.0f;ec=%.4f;acq=[%s];mt=%.0f;rp=%.1f;' % (
te * 1000.,
ti * 1000.,
flip_angle,
effective_echo_spacing * 1000.,
','.join(map(str, acquisition_matrix)),
mt_offset_hz,
1. / phase_encode_undersample,
)
if '3D' in (metadata.acquisition_type or ''):
nii_header['descrip'] = str(nii_header['descrip']) + 'rs=%.1f' % (1. / slice_encode_undersample)
if metadata.phase_encode_direction != None:
nii_header['descrip'] = str(nii_header['descrip']) + 'pe=%d' % (metadata.phase_encode_direction)
if metadata.is_fastcard:
nii_header['descrip'] = str(nii_header['descrip']) + 'ves=%f;ve=%d' % (metadata.velocity_encode_scale or 0., metadata.velocity_encoding or 0)
nii_header['pixdim'][4] = metadata.tr # XXX pixdim[4] = TR, even when non-timeseries. not nifti compliant
filepath = outname + '.nii.gz'
nibabel.save(nifti, filepath)
results.append(filepath)
log.info('generated %s' % os.path.basename(filepath))
if hasattr(metadata, 'md_json'):
filepath = outbase + '.json'
with open(filepath, 'w') as fp:
json.dump(metadata.md_json, fp, indent=2, sort_keys=True)
log.info('generated %s' % os.path.basename(filepath))
results.append(filepath)
log.info('json file %s' % filepath)
return results
write = NIMSNifti.write
| {
"content_hash": "8a25a8cf40ff0bf97f2edb8dd1865e36",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 161,
"avg_line_length": 37.52613240418118,
"alnum_prop": 0.5717734447539461,
"repo_name": "cni/nimsdata",
"id": "c4fa9306f96f17781800a17f38346e939e38b938",
"size": "10850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medimg/nimsnifti.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222096"
},
{
"name": "Shell",
"bytes": "956"
}
],
"symlink_target": ""
} |
import socket
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost', 5228))
s.send("<stream>")
#s.flush()
s.send("<test/>")
s.send("<test/>")
s.send("<test/>")
s.send("</stream>")
#s.flush()
s.close()
| {
"content_hash": "af9f2dbcaaacb9192c2b9fb41a7d46b7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 18.307692307692307,
"alnum_prop": 0.634453781512605,
"repo_name": "sezuan/SleekXMPP",
"id": "50eb6c500eb127043820ed23b72a9d6d296d3501",
"size": "238",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sleekxmpp/xmlstream/testclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188559"
}
],
"symlink_target": ""
} |
"""Support for HomeKit Controller air quality sensors."""
import logging
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from homeassistant.components.air_quality import AirQualityEntity
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
_LOGGER = logging.getLogger(__name__)
AIR_QUALITY_TEXT = {
0: "unknown",
1: "excellent",
2: "good",
3: "fair",
4: "inferior",
5: "poor",
}
class HomeAirQualitySensor(HomeKitEntity, AirQualityEntity):
"""Representation of a HomeKit Controller Air Quality sensor."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.warning(
"The homekit_controller air_quality entity has been "
"deprecated and will be removed in 2021.12.0"
)
await super().async_added_to_hass()
@property
def entity_registry_enabled_default(self) -> bool:
"""Whether or not to enable this entity by default."""
# This entity is deprecated, so don't enable by default
return False
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.AIR_QUALITY,
CharacteristicsTypes.DENSITY_PM25,
CharacteristicsTypes.DENSITY_PM10,
CharacteristicsTypes.DENSITY_OZONE,
CharacteristicsTypes.DENSITY_NO2,
CharacteristicsTypes.DENSITY_SO2,
CharacteristicsTypes.DENSITY_VOC,
]
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.service.value(CharacteristicsTypes.DENSITY_PM25)
@property
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.service.value(CharacteristicsTypes.DENSITY_PM10)
@property
def ozone(self):
"""Return the O3 (ozone) level."""
return self.service.value(CharacteristicsTypes.DENSITY_OZONE)
@property
def sulphur_dioxide(self):
"""Return the SO2 (sulphur dioxide) level."""
return self.service.value(CharacteristicsTypes.DENSITY_SO2)
@property
def nitrogen_dioxide(self):
"""Return the NO2 (nitrogen dioxide) level."""
return self.service.value(CharacteristicsTypes.DENSITY_NO2)
@property
def air_quality_text(self):
"""Return the Air Quality Index (AQI)."""
air_quality = self.service.value(CharacteristicsTypes.AIR_QUALITY)
return AIR_QUALITY_TEXT.get(air_quality, "unknown")
@property
def volatile_organic_compounds(self):
"""Return the volatile organic compounds (VOC) level."""
return self.service.value(CharacteristicsTypes.DENSITY_VOC)
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
data = {"air_quality_text": self.air_quality_text}
if voc := self.volatile_organic_compounds:
data["volatile_organic_compounds"] = voc
return data
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit air quality sensor."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(service):
if service.short_type != ServicesTypes.AIR_QUALITY_SENSOR:
return False
info = {"aid": service.accessory.aid, "iid": service.iid}
async_add_entities([HomeAirQualitySensor(conn, info)], True)
return True
conn.add_listener(async_add_service)
| {
"content_hash": "7bdc96a41841e84eacbd89546746c19b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 74,
"avg_line_length": 32.89380530973451,
"alnum_prop": 0.6655905299973096,
"repo_name": "aronsky/home-assistant",
"id": "df5a89f179ec4840a648e24cc74af589362d66fb",
"size": "3717",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/air_quality.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
def read(*path):
"""Use this to read files from source directory"""
with open(
os.path.join(os.path.dirname(__file__), *path),
encoding="utf8"
) as fp:
return fp.read()
setup(
name="rucola-permalinks",
version='0.0.1',
license="MIT",
description="A Rucola plugin used to create custom permalinks for site pages",
long_description=read("README.rst"),
author="Kasper Minciel",
author_email="kasper.minciel@gmail.com",
url="https://github.com/lecnim/rucola-permalinks",
py_modules=['rucola_permalinks'],
include_package_data=True,
zip_safe=False,
test_suite='tests',
classifiers=[
"Environment :: Plugins"
],
install_requires=["rucola"]
)
| {
"content_hash": "862420311ecf162ea55c9038266ce62d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 82,
"avg_line_length": 22.11111111111111,
"alnum_prop": 0.6256281407035176,
"repo_name": "lecnim/rucola-permalinks",
"id": "b77fc5f8e5dd851c7dc80e6c675e8a94db42880c",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4308"
}
],
"symlink_target": ""
} |
import amulet
import unittest
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Apache Flume HDFS.
This charm cannot do anything useful by itself, so integration testing
is done in the bundle.
"""
def test_deploy(self):
self.d = amulet.Deployment(series='xenial')
self.d.add('flume-hdfs', 'cs:~bigdata-dev/xenial/apache-flume-hdfs')
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=1800)
self.unit = self.d.sentry['flume-hdfs'][0]
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2caa17fb6dc92ac0adf1fde7f900648f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.6473684210526316,
"repo_name": "juju-solutions/layer-apache-flume-hdfs",
"id": "c8041dbb150b398d8935d2448839dc9cdc9aae05",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/01-basic-deployment.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1964"
},
{
"name": "Shell",
"bytes": "509"
}
],
"symlink_target": ""
} |
"""
I/O for boolean regulatory networks.
"""
import numpy as N, matplotlib as M, pylab as P, logging, subprocess, os
from cookbook.pylab_utils import layout_sub_plot, pylab_ioff
from . import network, chow_liu_trees
from .analysis import aggregate_possible_regulations, aggregate_possible_thetas, aggregate_possible_inputs
from .analysis import count_possible_regulations, count_possible_thetas, count_possible_inputs
from .analysis import analyse_dependencies
import networkx as nx
def output_file(options, filename):
"Return a path to the named output file."
return os.path.join(options.output_dir, filename)
def ensure_dir_exists(dir):
"Make sure a directory exists, making it and its parents if necessary."
if not os.path.exists(dir):
os.makedirs(dir)
def configure_matplotlib_for_tex():
"Set up matplotlib parameters for plotting using TeX."
raise ValueError('')
fig_width_pt = 345.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (N.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = (fig_width,fig_height)
params = {
'backend' : 'ps',
'axes.labelsize' : 6,
'axes.titlesize' : 6,
'text.fontsize' : 6,
'legend.fontsize' : 6,
'xtick.labelsize' : 4,
'ytick.labelsize' : 4,
'xtick.direction' : 'out',
'ytick.direction' : 'out',
'xtick.major.size' : 0,
'xtick.minor.size' : 0,
'ytick.major.size' : 0,
'ytick.minor.size' : 0,
'text.usetex' : True,
'figure.figsize' : fig_size
}
P.rcParams.update(params)
def rgb_as_string(rgb):
"@return: A string representing the rgb colour."
return '#%02X%02X%02X' % tuple((rgb * 255).astype(int))
class GraphBuilder(object):
"""
Builds graphs representing the information in the meta data.
"""
def __init__(self, meta_data, options):
self.options = options
try:
import boost.graph as bgl
except ImportError:
logging.warning('Cannot import boost.graph python bindings. Will not create network of graph.')
self.graph = None
return
self.graph = bgl.Digraph()
# add vertices
self.name_map = self.graph.add_vertex_property('node_id', 'string')
self.position_map = self.graph.add_vertex_property('pos', 'string')
if not options.black_and_white:
self.color_map = self.graph.add_vertex_property('color', 'string')
#self.fillcolor_map = self.graph.add_vertex_property('fillcolor', 'string')
self.shape_map = self.graph.add_vertex_property('shape', 'string')
self.style_map = self.graph.add_vertex_property('style', 'string')
self.vertices = [self.graph.add_vertex() for g in xrange(meta_data.G)]
for g, (v, gene) in enumerate(zip(self.vertices, meta_data.genes)):
self.name_map[v] = gene
if g in meta_data.graph_positions: # fix position of node if required
self.position_map[v] = '%s,%s!' % meta_data.graph_positions[g]
if not options.black_and_white:
#self.fillcolor_map[v] = rgb_as_string(meta_data.colours[g])
self.color_map[v] = rgb_as_string(meta_data.colours[g])
if g in meta_data.external_inputs:
self.shape_map[v] = "box" # external inputs are different shape
else:
self.shape_map[v] = "circle"
#self.style_map[v] = "filled"
self.arrowhead_map = self.graph.add_edge_property('arrowhead', 'string')
self.arrowsize_map = self.graph.add_edge_property('arrowsize', 'float')
self.edgestyle_map = self.graph.add_edge_property('style', 'string')
def add_edge(self, src, dst, activatory):
e = self.graph.add_edge(self.vertices[src], self.vertices[dst])
if activatory:
self.arrowhead_map[e] = 'normal'
self.arrowsize_map[e] = 1.5
self.edgestyle_map[e] = '-open triangle 90'
else:
self.arrowhead_map[e] = 'tee'
self.arrowsize_map[e] = 1.5
self.edgestyle_map[e] = '-triangle 90 reversed'
return e
class NetworkXGraphBuilder(object):
"""
Builds graphs representing the information in the meta data using Python package networkx.
"""
def __init__(self, meta_data, options):
self.options = options
self.graph = nx.MultiDiGraph()
# add vertices
for g, gene in enumerate(meta_data.genes):
attributes = {
'label' : gene.name,
# 'style' : 'filled',
}
if gene.position: # fix position of node if required
attributes['pos'] = '%s,%s!' % gene.position
if not options.black_and_white:
attributes['color'] = rgb_as_string(N.asarray(gene.color))
if g in meta_data.external_inputs:
attributes['shape'] = "box" # external inputs are different shape
else:
attributes['shape'] = "circle"
self.graph.add_node(g, **attributes)
def add_edge(self, src, dst, activatory, dashed):
if activatory:
attributes = NetworkXGraphBuilder._activatory_attributes.copy()
else:
attributes = NetworkXGraphBuilder._repressive_attributes.copy()
if not self.options.use_latex:
attributes['style'] = '' # only use style for latex output
if dashed:
attributes['style'] += ',dashed'
self.graph.add_edge(src, dst, **attributes)
_activatory_attributes = {
'arrowhead' : 'normal',
'arrowsize' : 1.5,
'style' : '-open triangle 90'
}
_repressive_attributes = {
'arrowhead' : 'tee',
'arrowsize' : 1.5,
'style' : '-triangle 90 reversed'
}
def graph_network(net, options):
"Create a BGL graph of the network."
builder = NetworkXGraphBuilder(net.meta_data, options)
if builder.graph:
# add edge
for src, dst in zip(*N.asarray(net.J).nonzero()):
builder.add_edge(src, dst, net.J[src,dst] > 0, False)
return builder.graph
def graph_restrictions(meta_data, options, possible_Js=None):
"Create a BGL graph of the possible networks."
if None == possible_Js:
possible_Js = meta_data.possible_Js
builder = NetworkXGraphBuilder(meta_data, options)
# add edges
if builder.graph:
for src in xrange(meta_data.G):
for dst in xrange(meta_data.G):
Js = possible_Js[src,dst]
dashed = 0 in Js
# add edges
for J in Js:
if 0 != J:
activatory = J > 0
builder.add_edge(src, dst, activatory, dashed)
return builder.graph
_dot2tex_cmd = 'dot2tex --autosize --crop --prog=neato -ftikz --nodeoptions "ultra thick,minimum size=1cm" --figonly --tikzedgelabels'
_neato_cmd = 'neato -Nfontsize=16 -Nwidth=1 -Nheight=1 -Nfixedsize=true -Npenwidth=3 -s.4'
def write_graph(graph, name, options):
"Write the graph as a DOT file and a SVG file."
import networkx as nx
dot_file = '%s.dot' % name
nx.write_dot(graph, dot_file)
if options.use_latex:
logging.info('Plotting figures using dot2tex and LaTeX tikz package.')
subprocess.check_call('%s %s > %s.tex' % (_dot2tex_cmd, dot_file, name), shell=True)
for format in options.formats:
subprocess.check_call('%s -T%s %s > %s.%s' % (_neato_cmd, format, dot_file, name, format), shell=True)
def plot_network_realisation(net, X, xlabel=False, ylabel=False):
"Plot the realisation of the network."
colours = N.ones((X.shape[0], net.meta_data.G, 3))
for g, (x_col, gene) in enumerate(zip(X.T, net.meta_data.genes)):
for t, x in enumerate(x_col):
if x:
colours[t,g] = gene.color
P.imshow(colours, interpolation='nearest')
# linewidth = .2
# for t in xrange(X.shape[0]+1):
# P.axhline(y=t-.5, xmin=0, xmax=X.shape[1], color='white', lw=linewidth)
# for g in xrange(X.shape[1]+1):
# P.axvline(x=g-.5, ymin=0, ymax=X.shape[0], color='white', lw=linewidth)
P.xlim((-.5, X.shape[1]-.5))
P.ylim((X.shape[0]-.5, -.5))
axes = P.gca()
if xlabel:
P.xticks(range(net.meta_data.G), [gene.name for gene in net.meta_data.genes])
P.setp(P.gca().get_xticklabels(), rotation=45, horizontalalignment='right', fontsize=7)
for line in axes.get_xticklines():
line.set_visible(False)
else:
P.xticks([], [])
if ylabel:
P.ylabel('time', fontsize=8)
P.setp(P.gca().get_yticklabels(), fontsize=8)
for line in axes.get_yticklines():
line.set_visible(False)
else:
P.yticks([], [])
def regulation_as_str(r):
"Convert a regulatory value to a string."
if -5 == r:
return '-'
elif 0 == r:
return '0'
elif 1 == r:
return '+'
else:
raise ValueError('Unknown regulation value.')
def centre_string(s, width):
"Pad and centre a string to the given width."
length = len(s)
pre = (width - length) / 2
post = width - pre - length
return '%s%s%s' % (' ' * pre, s, ' ' * post)
_matrix_entry_width = 6
def regulatory_matrix_as_string(possible_regulations):
str_possible_regulations = N.empty(possible_regulations.shape, dtype=object)
for g1 in xrange(possible_regulations.shape[0]):
for g2 in xrange(possible_regulations.shape[1]):
pr = list(possible_regulations[g1,g2])
pr.sort()
str_possible_regulations[g1,g2] = centre_string(
'%s' % '/'.join(map(regulation_as_str, pr)),
_matrix_entry_width
)
return str_possible_regulations
def add_gene_headers_to_matrix(genes, matrix, width=5):
"Takes a matrix and creates a new one with headers for rows and columns."
result = N.empty((matrix.shape[0]+1, matrix.shape[1]+1), dtype=object)
result[1:,1:] = matrix
genes = [centre_string(g.name, _matrix_entry_width) for g in genes]
result[0,0] = ' ' * _matrix_entry_width
result[0,1:] = genes
result[1:,0] = genes
return result
def summarise_meta_data(meta_data):
"Log some information about the network constraint meta data."
logging.info('Have %d genes called: %s', meta_data.G, ','.join(gene.name for gene in meta_data.genes))
logging.info(
'The possible regulatory relationships are:\n%s',
str(add_gene_headers_to_matrix(meta_data.genes, regulatory_matrix_as_string(meta_data.possible_Js.T)))
)
for g, (gene, thetas, initial_state) in enumerate(zip(meta_data.genes, meta_data.possible_thetas, meta_data.initial_states)):
if g not in meta_data.external_inputs:
logging.info(
'Gene %7s : initial state %s : constitutive expression %7s',
gene.name, initial_state, thetas
)
for g, gene in enumerate(meta_data.genes):
if g in meta_data.external_inputs:
logging.info(
'%7s is an external input with possible input parameters: %s',
gene.name, ','.join(map(str, meta_data.possible_input_params[g]))
)
logging.info('The conditions to test are: %s', ', '.join(c.name for c in meta_data.conditions))
def summarise_possible_networks(meta_data, networks):
"Log some information about the possible networks."
#
# regulations
#
possible_regulations = aggregate_possible_regulations(meta_data, networks)
str_possible_regulations = regulatory_matrix_as_string(possible_regulations)
logging.info(
'Consistent regulatory relationships in the networks are:\n%s',
str(add_gene_headers_to_matrix(meta_data.genes, str_possible_regulations.T))
)
regulation_counts = count_possible_regulations(meta_data, networks)
for regulatee, regulatee_counts in zip(meta_data.genes, regulation_counts.T):
for regulator, counts in zip(meta_data.genes, regulatee_counts):
if len(counts) > 1:
logging.info(
'How often %7s regulates %7s %s',
regulator.name,
regulatee.name,
' '.join('%s : %2d%%' % (regulation_as_str(r), 100*c/len(networks)) for r, c in counts.iteritems())
)
#
# thetas
#
possible_thetas = aggregate_possible_thetas(meta_data, networks)
theta_counts = count_possible_thetas(meta_data, networks)
for gene, thetas in zip(meta_data.genes, possible_thetas):
logging.info('Consistent constitutive expression levels for %7s are %s', gene.name, ','.join(map(str, thetas)))
for gene, counts in zip(meta_data.genes, theta_counts):
if len(counts) > 1:
logging.info(
'Counts for constitutive expression levels for %7s are %s',
gene.name,
' '.join('%s : %2d%%' % (t, 100*c/len(networks)) for t, c in counts.iteritems())
)
#
# input parameters
#
possible_inputs = aggregate_possible_inputs(meta_data, networks)
input_counts = count_possible_inputs(meta_data, networks)
for g, input_fn in meta_data.external_inputs.iteritems():
gene = meta_data.genes[g]
logging.info(
'Consistent input parameters for the external input of %7s are: %s',
gene.name, ','.join(map(str, possible_inputs[g]))
)
for g, input_fn in meta_data.external_inputs.iteritems():
if len(input_counts[g]) > 1:
gene = meta_data.genes[g]
logging.info(
'Counts for input parameters for the external input of %7s are %s',
gene.name,
' '.join('%s : %2d%%' % (i, 100*c/len(networks)) for i, c in input_counts[g].iteritems())
)
#
# Dependencies where mutual information is positive
#
networks_as_features, T, edges = analyse_dependencies(networks)
for u, v, data in edges:
I = -data['weight']
if I > 0.:
feature1, x1 = network.which_network_feature(meta_data, u)
feature2, x2 = network.which_network_feature(meta_data, v)
logging.info(
'Mutual information between %s:%s and %s:%s is %.2f',
network.feature_string(feature1), x1, network.feature_string(feature2), x2, I
)
logging.debug(chow_liu_trees.marginal_pair_distribution(networks_as_features, u, v))
else:
break
return possible_regulations
@pylab_ioff
def plot_network_realisations(net):
"Plot the network realisations over all the conditions."
num_cols = len(net.meta_data.conditions)
fig = P.figure(figsize=(num_cols + 1, 3))
for i, condition in enumerate(net.meta_data.conditions):
P.subplot(1, num_cols, i+1)
P.title(condition.name, fontsize=10)
X, mismatches = network.evaluate_condition(net, condition)
logging.debug('Condition: %6s; mismatches=%2d', condition, mismatches)
plot_network_realisation(net, X, xlabel=True, ylabel=0==i)
#P.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
P.tight_layout()
return fig
| {
"content_hash": "5c2be5c53842b48431250a5d86bba696",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 134,
"avg_line_length": 37.110328638497656,
"alnum_prop": 0.593965462711114,
"repo_name": "JohnReid/pybool",
"id": "0385feffc676f73c83de6e90b1b40d03d771f3b1",
"size": "15860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pybool/io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4455"
},
{
"name": "Python",
"bytes": "120383"
},
{
"name": "Shell",
"bytes": "2388"
}
],
"symlink_target": ""
} |
"""
kinto
Kinto is a minimalist JSON storage service with synchronisation and sharing abilities. It is meant to be easy to use and easy to self-host. **Limitations of this OpenAPI specification:** 1. Validation on OR clauses is not supported (e.g. provide `data` or `permissions` in patch operations). 2. [Filtering](http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html) is supported on any field by using `?{prefix}{field_name}={value}`. 3. [Backoff headers](http://kinto.readthedocs.io/en/stable/api/1.x/backoff.html) may occur with any response, but they are only present if the server is under in heavy load, so we cannot validate them on every request. They are listed only on the default error message. 4. [Collection schemas](http://kinto.readthedocs.io/en/stable/api/1.x/collections.html#collection-json-schema) can be provided when defining a collection, but they are not validated by this specification.
OpenAPI spec version: 1.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.collection_permissions import CollectionPermissions
class TestCollectionPermissions(unittest.TestCase):
""" CollectionPermissions unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCollectionPermissions(self):
"""
Test CollectionPermissions
"""
model = swagger_client.models.collection_permissions.CollectionPermissions()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f0c8a0bd0e1257df1997cf5cce112162",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 938,
"avg_line_length": 44.35294117647059,
"alnum_prop": 0.7316534040671971,
"repo_name": "gabisurita/kinto-codegen-tutorial",
"id": "984300d05ab6a4ccad63cdea74fda74777fea938",
"size": "2279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-client/test/test_collection_permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "95504"
},
{
"name": "Python",
"bytes": "662063"
},
{
"name": "Shell",
"bytes": "3259"
}
],
"symlink_target": ""
} |
"""
sphinx.builders.devhelp
~~~~~~~~~~~~~~~~~~~~~~~
Build HTML documentation and Devhelp_ support files.
.. _Devhelp: http://live.gnome.org/devhelp
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
import re
import gzip
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.util.osutil import make_filename
from sphinx.builders.html import StandaloneHTMLBuilder
try:
import xml.etree.ElementTree as etree
except ImportError:
import lxml.etree as etree
class DevhelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs GNOME Devhelp file.
"""
name = 'devhelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/png', 'image/gif', 'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
self.out_suffix = '.html'
self.link_suffix = '.html'
def handle_finish(self):
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
self.info('dumping devhelp index...')
# Basic info
root = etree.Element('book',
title=self.config.html_title,
name=self.config.project,
link="index.html",
version=self.config.version)
tree = etree.ElementTree(root)
# TOC
chapters = etree.SubElement(root, 'chapters')
tocdoc = self.env.get_and_resolve_doctree(
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, parent):
if isinstance(node, addnodes.compact_paragraph) or \
isinstance(node, nodes.bullet_list):
for subnode in node:
write_toc(subnode, parent)
elif isinstance(node, nodes.list_item):
item = etree.SubElement(parent, 'sub')
for subnode in node:
write_toc(subnode, item)
elif isinstance(node, nodes.reference):
parent.attrib['link'] = node['refuri']
parent.attrib['name'] = node.astext()
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
for node in tocdoc.traverse(istoctree):
write_toc(node, chapters)
# Index
functions = etree.SubElement(root, 'functions')
index = self.env.create_index(self)
def write_index(title, refs, subitems):
if len(refs) == 0:
pass
elif len(refs) == 1:
etree.SubElement(functions, 'function',
name=title, link=refs[0][1])
else:
for i, ref in enumerate(refs):
etree.SubElement(functions, 'function',
name="[%d] %s" % (i, title),
link=ref[1])
if subitems:
parent_title = re.sub(r'\s*\(.*\)\s*$', '', title)
for subitem in subitems:
write_index("%s %s" % (parent_title, subitem[0]),
subitem[1], [])
for (key, group) in index:
for title, (refs, subitems, key) in group:
write_index(title, refs, subitems)
# Dump the XML file
xmlfile = path.join(outdir, outname + '.devhelp.gz')
with gzip.open(xmlfile, 'w') as f:
tree.write(f, 'utf-8')
def setup(app):
app.setup_extension('sphinx.builders.html')
app.add_builder(DevhelpBuilder)
app.add_config_value('devhelp_basename', lambda self: make_filename(self.project), None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| {
"content_hash": "ea74e1d4e3b6e2e24cbbd4a9535f4685",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 92,
"avg_line_length": 31.12878787878788,
"alnum_prop": 0.5536626916524702,
"repo_name": "axbaretto/beam",
"id": "0849a72ea5121acaf470c2ba3cd37696b300368e",
"size": "4133",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/builders/devhelp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import os
import sys
import logging
import datetime
from dateutil import parser
from volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat
from volttron.platform.agent import utils
from volttron.platform.agent.utils import (get_aware_utc_now, format_timestamp)
from .helpers import *
from .measurement_type import MeasurementType
from .measurement_unit import MeasurementUnit
from .meter_point import MeterPoint
from .market import Market
from .market_state import MarketState
from .neighbor import Neighbor
from .local_asset import LocalAsset
from .local_asset_model import LocalAssetModel
from .myTransactiveNode import myTransactiveNode
from .neighbor_model import NeighborModel
from .temperature_forecast_model import TemperatureForecastModel
from .solar_pv_resource import SolarPvResource
from .solar_pv_resource_model import SolarPvResourceModel
from .openloop_pnnl_load_predictor import OpenLoopPnnlLoadPredictor
from .vertex import Vertex
from .timer import Timer
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '0.1'
class CampusAgent(Agent, myTransactiveNode):
def __init__(self, config_path, **kwargs):
Agent.__init__(self, **kwargs)
myTransactiveNode.__init__(self)
self.config_path = config_path
self.config = utils.load_config(config_path)
self.name = self.config.get('name')
self.market_cycle_in_min = int(self.config.get('market_cycle_in_min', 60))
self.duality_gap_threshold = float(self.config.get('duality_gap_threshold', 0.01))
self.building_names = self.config.get('buildings', [])
self.building_powers = self.config.get('building_powers')
self.db_topic = self.config.get("db_topic", "tnc")
self.PV_max_kW = float(self.config.get("PV_max_kW"))
self.city_loss_factor = float(self.config.get("city_loss_factor"))
self.demand_threshold_coef = float(self.config.get('demand_threshold_coef'))
self.monthly_peak_power = float(self.config.get('monthly_peak_power'))
self.neighbors = []
self.city_supply_topic = "{}/city/campus/supply".format(self.db_topic)
self.building_demand_topic = "/".join([self.db_topic, "{}/campus/demand"])
self.campus_demand_topic = "{}/campus/city/demand".format(self.db_topic)
self.campus_supply_topic = "/".join([self.db_topic, "campus/{}/supply"])
self.solar_topic = "/".join([self.db_topic, "campus/pv"])
self.system_loss_topic = "{}/{}/system_loss".format(self.db_topic, self.name)
self.dc_threshold_topic = "{}/{}/dc_threshold_topic".format(self.db_topic, self.name)
self.price_topic = "{}/{}/marginal_prices".format(self.db_topic, self.name)
self.reschedule_interval = timedelta(minutes=10, seconds=1)
self.simulation = self.config.get('simulation', False)
self.simulation_start_time = parser.parse(self.config.get('simulation_start_time'))
self.simulation_one_hour_in_seconds = int(self.config.get('simulation_one_hour_in_seconds'))
Timer.created_time = datetime.now()
Timer.simulation = self.simulation
Timer.sim_start_time = self.simulation_start_time
Timer.sim_one_hr_in_sec = self.simulation_one_hour_in_seconds
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
# Add other objects: assets, services, neighbors
self.init_objects()
# Subscriptions
self.vip.pubsub.subscribe(peer='pubsub',
prefix=self.city_supply_topic,
callback=self.new_supply_signal)
for bldg in self.building_names:
self.vip.pubsub.subscribe(peer='pubsub',
prefix=self.building_demand_topic.format(bldg),
callback=self.new_demand_signal)
def new_demand_signal(self, peer, sender, bus, topic, headers, message):
_log.debug("At {}, {} receives new demand records: {}".format(Timer.get_cur_time(),
self.name, message))
building_name = message['source']
demand_curves = message['curves']
start_of_cycle = message['start_of_cycle']
fail_to_converged = message['fail_to_converged']
neighbors = [n for n in self.neighbors if n.name == building_name]
if len(neighbors) == 1:
neighbor = neighbors[0]
neighbor.model.receive_transactive_signal(self, demand_curves)
self.balance_market(1, start_of_cycle, fail_to_converged, neighbor)
else:
_log.error("{}: There are {} building(s) with name {}."
.format(self.name, len(neighbors), building_name))
_log.error("Neighbors are: {}".format([x.name for x in self.neighbors]))
_log.error("Message is: {}".format(message))
_log.error("Check value of 'name' key in the config file for building {}.".format(building_name))
def new_supply_signal(self, peer, sender, bus, topic, headers, message):
_log.debug("At {}, {} receives new supply records: {}".format(Timer.get_cur_time(),
self.name, message))
source = message['source']
supply_curves = message['curves']
start_of_cycle = message['start_of_cycle']
fail_to_converged = message['fail_to_converged']
self.city.model.receive_transactive_signal(self, supply_curves)
if start_of_cycle:
self.balance_market(1, start_of_cycle, fail_to_converged)
def balance_market(self, run_cnt, start_of_cycle=False, fail_to_converged=False, fail_to_converged_neighbor=None):
market = self.markets[0] # Assume only 1 TNS market per node
market.signal_new_data = True
market.balance(self) # Assume only 1 TNS market per node
if market.converged:
_log.debug("TNS market {} balanced successfully.".format(market.name))
# Sum all the powers as will be needed by the net supply/demand curve.
market.assign_system_vertices(self)
# Send only if either of the 2 conditions below occurs:
# 1) Model balancing did not converge
# 2) A new cycle (ie. begin of hour)
for n in self.neighbors:
# If the neighbor failed to converge (eg., building1 failed to converge)
if n == fail_to_converged_neighbor and n is not None:
n.model.prep_transactive_signal(market, self)
topic = self.campus_demand_topic
if n != self.city:
topic = self.campus_supply_topic.format(n.name)
n.model.send_transactive_signal(self, topic, start_of_cycle)
_log.debug("NeighborModel {} sent records.".format(n.model.name))
else:
# Always send signal downstream at the start of a new cyle
if start_of_cycle:
if n != self.city:
n.model.prep_transactive_signal(market, self)
topic = self.campus_supply_topic.format(n.name)
n.model.send_transactive_signal(self, topic, start_of_cycle)
_log.debug("NeighborModel {} sent records.".format(n.model.name))
else:
_log.debug("Not start of cycle. Check convergence for neighbor {}.".format(n.model.name))
n.model.check_for_convergence(market)
if not n.model.converged:
n.model.prep_transactive_signal(market, self)
topic = self.campus_demand_topic
if n != self.city:
topic = self.campus_supply_topic.format(n.name)
n.model.send_transactive_signal(self, topic, start_of_cycle)
_log.debug("NeighborModel {} sent records.".format(n.model.name))
else:
_log.debug("{} ({}) did not send records due to check_for_convergence()."
.format(n.model.name, self.name))
# Schedule rerun balancing if not in simulation mode
if not self.simulation:
# For start_of_cyle=True, the code above always send signal to neighbors so don't need to reschedule
# Schedule rerun if any neighbor is not converged
if not start_of_cycle:
if not all([n.model.converged for n in self.neighbors]):
dt = datetime.now()
# Schedule to rerun after 5 minutes if it is in the same hour and is the first reschedule
next_run_dt = dt + self.reschedule_interval
if dt.hour == next_run_dt.hour and run_cnt >= 1:
_log.debug("{} reschedule to run at {}".format(self.name, next_run_dt))
self.core.schedule(next_run_dt, self.balance_market, run_cnt + 1)
prices = market.marginalPrices
# There is a case where the balancing happens at the end of the hour and continues to the next hour, which
# creates 26 values. Get the last 25 values.
prices = prices[-25:]
prices = [x.value for x in prices]
self.vip.pubsub.publish(peer='pubsub',
topic=self.price_topic,
message={'prices': prices,
'current_time': format_timestamp(Timer.get_cur_time())
}
)
else:
_log.debug("Market balancing sub-problem failed.")
self.city.model.prep_transactive_signal(market, self)
self.city.model.send_transactive_signal(self, self.campus_demand_topic, start_of_cycle)
def init_objects(self):
# Add meter
meter = MeterPoint()
meter.measurementType = MeasurementType.PowerReal
meter.name = 'CampusElectricityMeter'
meter.measurementUnit = MeasurementUnit.kWh
self.meterPoints.append(meter)
# Add weather forecast service
weather_service = TemperatureForecastModel(self.config_path, self)
self.informationServiceModels.append(weather_service)
# Add inelastive asset
inelastive_load = LocalAsset()
inelastive_load.name = 'InelasticBuildings' # Campus buildings that are not responsive
inelastive_load.maximumPower = 0 # Remember that a load is a negative power [kW]
inelastive_load.minimumPower = -2 * 8200 # Assume twice the average PNNL load [kW]
# Add inelastive asset model
inelastive_load_model = OpenLoopPnnlLoadPredictor(weather_service)
inelastive_load_model.name = 'InelasticBuildingsModel'
inelastive_load_model.engagementCost = [0, 0, 0] # Transition costs irrelevant
inelastive_load_model.defaultPower = -6000 # [kW]
inelastive_load_model.defaultVertices = [Vertex(0, 0, -6000.0, 1)]
# Cross-reference asset & asset model
inelastive_load_model.object = inelastive_load
inelastive_load.model = inelastive_load_model
# Add solar PV asset
solar_pv = SolarPvResource()
solar_pv.maximumPower = self.PV_max_kW # [avg.kW]
solar_pv.minimumPower = 0.0 # [avg.kW]
solar_pv.name = 'SolarPv'
solar_pv.description = '120 kW solar PV site on the campus'
# Add solar PV asset model
solar_pv_model = SolarPvResourceModel()
solar_pv_model.cloudFactor = 1.0 # dimensionless
solar_pv_model.engagementCost = [0, 0, 0]
solar_pv_model.name = 'SolarPvModel'
solar_pv_model.defaultPower = 0.0 # [avg.kW]
solar_pv_model.defaultVertices = [Vertex(0, 0, 30.0, True)]
solar_pv_model.costParameters = [0, 0, 0]
solar_pv_model.inject(self, power_topic=self.solar_topic)
# Cross-reference asset & asset model
solar_pv.model = solar_pv_model
solar_pv_model.object = solar_pv
# Add inelastive and solar_pv as campus' assets
self.localAssets.extend([inelastive_load, solar_pv])
# Add Market
market = Market()
market.name = 'dayAhead'
market.commitment = False
market.converged = False
market.defaultPrice = 0.04 # [$/kWh]
market.dualityGapThreshold = self.duality_gap_threshold # [0.02 = 2#]
market.initialMarketState = MarketState.Inactive
market.marketOrder = 1 # This is first and only market
market.intervalsToClear = 1 # Only one interval at a time
market.futureHorizon = timedelta(hours=24) # Projects 24 hourly future intervals
market.intervalDuration = timedelta(hours=1) # [h] Intervals are 1 h long
market.marketClearingInterval = timedelta(hours=1) # [h]
market.marketClearingTime = Timer.get_cur_time().replace(hour=0,
minute=0,
second=0,
microsecond=0) # Aligns with top of hour
market.nextMarketClearingTime = market.marketClearingTime + timedelta(hours=1)
self.markets.append(market)
# City object
city = Neighbor()
city.name = 'CoR'
city.description = 'City of Richland (COR) electricity supplier node'
city.maximumPower = 20000 # Remember loads have negative power [avg.kW]
city.minimumPower = 0 # [avg.kW]
city.lossFactor = self.city_loss_factor
# City model
city_model = NeighborModel()
city_model.name = 'CoR_Model'
city_model.location = self.name
city_model.transactive = True
city_model.defaultPower = 10000 # [avg.kW]
city_model.defaultVertices = [Vertex(0.046, 160, 0, True),
Vertex(0.048,
160 + city.maximumPower * (0.046 + 0.5 * (0.048 - 0.046)),
city.maximumPower, True)]
city_model.costParameters = [0, 0, 0]
city_model.demand_threshold_coef = self.demand_threshold_coef
city_model.demandThreshold = self.monthly_peak_power
city_model.inject(self,
system_loss_topic=self.system_loss_topic,
dc_threshold_topic=self.dc_threshold_topic)
# Cross-reference object & model
city_model.object = city
city.model = city_model
self.city = city
# Add city as campus' neighbor
self.neighbors.append(city)
# Add buildings
for bldg_name in self.building_names:
bldg_neighbor = self.make_bldg_neighbor(bldg_name)
self.neighbors.append(bldg_neighbor)
def make_bldg_neighbor(self, name):
bldg_powers = self.building_powers[name]
# Create neighbor
bldg = Neighbor()
bldg.name = name
bldg.maximumPower = bldg_powers[0] # Remember loads have negative power [avg.kW]
bldg.minimumPower = bldg_powers[1] # [avg.kW]
_log.debug("{} has minPower of {} and maxPower of {}".format(bldg.name,
bldg.minimumPower, bldg.maximumPower))
# Create neighbor model
bldg_model = NeighborModel()
bldg_model.name = name + '_Model'
bldg_model.location = self.name
bldg_model.convergenceThreshold = 0.02
bldg_model.friend = True
bldg_model.transactive = True
bldg_model.costParameters = [0, 0, 0]
# This is different building to building
bldg_model.defaultPower = bldg.minimumPower/2 # bldg_powers[2] # [avg.kW]
bldg_model.defaultVertices = [Vertex(float("inf"), 0, bldg_model.defaultPower, True)]
# Cross reference object & model
bldg.model = bldg_model
bldg_model.object = bldg
return bldg
def main(argv=sys.argv):
try:
utils.vip_main(CampusAgent)
except Exception as e:
_log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
sys.exit(main())
| {
"content_hash": "4fece059a738d72dbb46cfd954b266dc",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 118,
"avg_line_length": 47.91379310344828,
"alnum_prop": 0.5906801007556675,
"repo_name": "VOLTTRON/volttron-applications",
"id": "29e75de5f2748d3c86957859709c587c28ef0894",
"size": "19566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TNSAgent/tns/campus_agent.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "221216"
},
{
"name": "CSS",
"bytes": "36026"
},
{
"name": "Gherkin",
"bytes": "18993"
},
{
"name": "Gnuplot",
"bytes": "2486"
},
{
"name": "HTML",
"bytes": "105555"
},
{
"name": "JavaScript",
"bytes": "815273"
},
{
"name": "Makefile",
"bytes": "2413"
},
{
"name": "Objective-C",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "5294800"
},
{
"name": "Shell",
"bytes": "6202"
}
],
"symlink_target": ""
} |
import operator
from collections import namedtuple
import numpy as np
from allel.chunked import util as _util
from allel.abc import ArrayWrapper, DisplayAsTable
from allel.model.ndarray import subset as _numpy_subset, NumpyRecArrayWrapper
def store(data, arr, start=0, stop=None, offset=0, blen=None):
"""Copy `data` block-wise into `arr`."""
# setup
blen = _util.get_blen_array(data, blen)
if stop is None:
stop = len(data)
else:
stop = min(stop, len(data))
length = stop - start
if length < 0:
raise ValueError('invalid stop/start')
# copy block-wise
for bi in range(start, stop, blen):
bj = min(bi+blen, stop)
bl = bj - bi
arr[offset:offset+bl] = data[bi:bj]
offset += bl
def copy(data, start=0, stop=None, blen=None, storage=None, create='array',
**kwargs):
"""Copy `data` block-wise into a new array."""
# setup
storage = _util.get_storage(storage)
blen = _util.get_blen_array(data, blen)
if stop is None:
stop = len(data)
else:
stop = min(stop, len(data))
length = stop - start
if length < 0:
raise ValueError('invalid stop/start')
# copy block-wise
out = None
for i in range(start, stop, blen):
j = min(i+blen, stop)
block = data[i:j]
if out is None:
out = getattr(storage, create)(block, expectedlen=length, **kwargs)
else:
out.append(block)
return out
def copy_table(tbl, start=0, stop=None, blen=None, storage=None,
create='table', **kwargs):
"""Copy `tbl` block-wise into a new table."""
# setup
names, columns = _util.check_table_like(tbl)
storage = _util.get_storage(storage)
blen = _util.get_blen_table(tbl, blen)
if stop is None:
stop = len(columns[0])
else:
stop = min(stop, len(columns[0]))
length = stop - start
if length < 0:
raise ValueError('invalid stop/start')
# copy block-wise
out = None
for i in range(start, stop, blen):
j = min(i+blen, stop)
res = [c[i:j] for c in columns]
if out is None:
out = getattr(storage, create)(res, names=names,
expectedlen=length, **kwargs)
else:
out.append(res)
return out
def map_blocks(data, f, blen=None, storage=None, create='array', **kwargs):
"""Apply function `f` block-wise over `data`."""
# setup
storage = _util.get_storage(storage)
if isinstance(data, tuple):
blen = max(_util.get_blen_array(d, blen) for d in data)
else:
blen = _util.get_blen_array(data, blen)
if isinstance(data, tuple):
_util.check_equal_length(*data)
length = len(data[0])
else:
length = len(data)
# block-wise iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
# obtain blocks
if isinstance(data, tuple):
blocks = [d[i:j] for d in data]
else:
blocks = [data[i:j]]
# map
res = f(*blocks)
# store
if out is None:
out = getattr(storage, create)(res, expectedlen=length, **kwargs)
else:
out.append(res)
return out
def reduce_axis(data, reducer, block_reducer, mapper=None, axis=None,
blen=None, storage=None, create='array', **kwargs):
"""Apply an operation to `data` that reduces over one or more axes."""
# setup
storage = _util.get_storage(storage)
blen = _util.get_blen_array(data, blen)
length = len(data)
# normalise axis arg
if isinstance(axis, int):
axis = (axis,)
# deal with 'out' kwarg if supplied, can arise if a chunked array is
# passed as an argument to numpy.sum(), see also
# https://github.com/cggh/scikit-allel/issues/66
kwarg_out = kwargs.pop('out', None)
if kwarg_out is not None:
raise ValueError('keyword argument "out" is not supported')
if axis is None or 0 in axis:
# two-step reduction
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
block = data[i:j]
if mapper:
block = mapper(block)
res = reducer(block, axis=axis)
if out is None:
out = res
else:
out = block_reducer(out, res)
if np.isscalar(out):
return out
elif len(out.shape) == 0:
return out[()]
else:
return getattr(storage, create)(out, **kwargs)
else:
# first dimension is preserved, no need to reduce blocks
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
block = data[i:j]
if mapper:
block = mapper(block)
r = reducer(block, axis=axis)
if out is None:
out = getattr(storage, create)(r, expectedlen=length, **kwargs)
else:
out.append(r)
return out
def amax(data, axis=None, mapper=None, blen=None, storage=None,
create='array', **kwargs):
"""Compute the maximum value."""
return reduce_axis(data, axis=axis, reducer=np.amax,
block_reducer=np.maximum, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs)
def amin(data, axis=None, mapper=None, blen=None, storage=None,
create='array', **kwargs):
"""Compute the minimum value."""
return reduce_axis(data, axis=axis, reducer=np.amin,
block_reducer=np.minimum, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs)
# noinspection PyShadowingBuiltins
def asum(data, axis=None, mapper=None, blen=None, storage=None,
create='array', **kwargs):
"""Compute the sum."""
return reduce_axis(data, axis=axis, reducer=np.sum,
block_reducer=np.add, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs)
def count_nonzero(data, mapper=None, blen=None, storage=None,
create='array', **kwargs):
"""Count the number of non-zero elements."""
return reduce_axis(data, reducer=np.count_nonzero,
block_reducer=np.add, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs)
def compress(condition, data, axis=0, out=None, blen=None, storage=None, create='array',
**kwargs):
"""Return selected slices of an array along given axis."""
# setup
if out is not None:
# argument is only there for numpy API compatibility
raise NotImplementedError('out argument is not supported')
storage = _util.get_storage(storage)
blen = _util.get_blen_array(data, blen)
length = len(data)
nnz = count_nonzero(condition)
if axis == 0:
_util.check_equal_length(data, condition)
# block iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
bcond = np.asarray(condition[i:j])
# don't access any data unless we have to
if np.any(bcond):
block = np.asarray(data[i:j])
res = np.compress(bcond, block, axis=0)
if out is None:
out = getattr(storage, create)(res, expectedlen=nnz, **kwargs)
else:
out.append(res)
return out
elif axis == 1:
# block iteration
out = None
condition = np.asanyarray(condition)
for i in range(0, length, blen):
j = min(i+blen, length)
block = np.asarray(data[i:j])
res = np.compress(condition, block, axis=1)
if out is None:
out = getattr(storage, create)(res, expectedlen=length,
**kwargs)
else:
out.append(res)
return out
else:
raise NotImplementedError('axis not supported: %s' % axis)
def take(data, indices, axis=0, out=None, mode='raise', blen=None, storage=None,
create='array', **kwargs):
"""Take elements from an array along an axis."""
# setup
if out is not None:
# argument is only there for numpy API compatibility
raise NotImplementedError('out argument is not supported')
length = len(data)
if axis == 0:
# check that indices are strictly increasing
indices = np.asanyarray(indices)
if np.any(indices[1:] <= indices[:-1]):
raise NotImplementedError(
'indices must be strictly increasing'
)
# implement via compress()
condition = np.zeros((length,), dtype=bool)
condition[indices] = True
return compress(condition, data, axis=0, blen=blen, storage=storage,
create=create, **kwargs)
elif axis == 1:
# setup
storage = _util.get_storage(storage)
blen = _util.get_blen_array(data, blen)
# block iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
block = data[i:j]
res = np.take(block, indices, axis=1, mode=mode)
if out is None:
out = getattr(storage, create)(res, expectedlen=length,
**kwargs)
else:
out.append(res)
return out
else:
raise NotImplementedError('axis not supported: %s' % axis)
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None,
create='table', **kwargs):
"""Return selected rows of a table."""
# setup
if axis is not None and axis != 0:
raise NotImplementedError('only axis 0 is supported')
if out is not None:
# argument is only there for numpy API compatibility
raise NotImplementedError('out argument is not supported')
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
blen = _util.get_blen_table(tbl, blen)
_util.check_equal_length(columns[0], condition)
length = len(columns[0])
nnz = count_nonzero(condition)
# block iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
bcond = condition[i:j]
# don't access any data unless we have to
if np.any(bcond):
bcolumns = [c[i:j] for c in columns]
res = [np.compress(bcond, c, axis=0) for c in bcolumns]
if out is None:
out = getattr(storage, create)(res, names=names,
expectedlen=nnz, **kwargs)
else:
out.append(res)
return out
def take_table(tbl, indices, axis=None, out=None, mode='raise', blen=None, storage=None,
create='table', **kwargs):
"""Return selected rows of a table."""
# setup
if axis is not None and axis != 0:
raise NotImplementedError('only axis 0 is supported')
if out is not None:
# argument is only there for numpy API compatibility
raise NotImplementedError('out argument is not supported')
if mode is not None and mode != 'raise':
raise NotImplementedError('only mode=raise is supported')
names, columns = _util.check_table_like(tbl)
length = len(columns[0])
# check that indices are strictly increasing
indices = np.asanyarray(indices)
if np.any(indices[1:] <= indices[:-1]):
raise NotImplementedError(
'indices must be strictly increasing'
)
# implement via compress()
condition = np.zeros((length,), dtype=bool)
condition[indices] = True
return compress_table(condition, tbl, blen=blen, storage=storage,
create=create, **kwargs)
def subset(data, sel0=None, sel1=None, blen=None, storage=None, create='array',
**kwargs):
"""Return selected rows and columns of an array."""
# TODO refactor sel0 and sel1 normalization with ndarray.subset
# setup
storage = _util.get_storage(storage)
blen = _util.get_blen_array(data, blen)
length = len(data)
if sel0 is not None:
sel0 = np.asanyarray(sel0)
if sel1 is not None:
sel1 = np.asanyarray(sel1)
# ensure boolean array for dim 0
if sel0 is not None and sel0.dtype.kind != 'b':
# assume indices, convert to boolean condition
tmp = np.zeros(length, dtype=bool)
tmp[sel0] = True
sel0 = tmp
# ensure indices for dim 1
if sel1 is not None and sel1.dtype.kind == 'b':
# assume boolean condition, convert to indices
sel1, = np.nonzero(sel1)
# shortcuts
if sel0 is None and sel1 is None:
return copy(data, blen=blen, storage=storage, create=create, **kwargs)
elif sel1 is None:
return compress(sel0, data, axis=0, blen=blen, storage=storage,
create=create, **kwargs)
elif sel0 is None:
return take(data, sel1, axis=1, blen=blen, storage=storage,
create=create, **kwargs)
# build output
sel0_nnz = count_nonzero(sel0)
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
bsel0 = sel0[i:j]
# don't access data unless we have to
if np.any(bsel0):
block = data[i:j]
res = _numpy_subset(block, bsel0, sel1)
if out is None:
out = getattr(storage, create)(res, expectedlen=sel0_nnz,
**kwargs)
else:
out.append(res)
return out
def concatenate_table(tup, blen=None, storage=None, create='table', **kwargs):
"""Stack tables in sequence vertically (row-wise)."""
# setup
storage = _util.get_storage(storage)
if not isinstance(tup, (tuple, list)):
raise ValueError('expected tuple or list, found %r' % tup)
if len(tup) < 2:
raise ValueError('expected two or more tables to stack')
# build output
expectedlen = sum(len(t) for t in tup)
out = None
tnames = None
for tdata in tup:
tblen = _util.get_blen_table(tdata, blen)
tnames, tcolumns = _util.check_table_like(tdata, names=tnames)
tlen = len(tcolumns[0])
for i in range(0, tlen, tblen):
j = min(i+tblen, tlen)
bcolumns = [c[i:j] for c in tcolumns]
if out is None:
out = getattr(storage, create)(bcolumns, names=tnames,
expectedlen=expectedlen,
**kwargs)
else:
out.append(bcolumns)
return out
def concatenate(tup, axis=0, blen=None, storage=None, create='array', **kwargs):
"""Concatenate arrays."""
# setup
storage = _util.get_storage(storage)
if not isinstance(tup, (tuple, list)):
raise ValueError('expected tuple or list, found %r' % tup)
if len(tup) < 2:
raise ValueError('expected two or more arrays')
if axis == 0:
# build output
expectedlen = sum(len(a) for a in tup)
out = None
for a in tup:
ablen = _util.get_blen_array(a, blen)
for i in range(0, len(a), ablen):
j = min(i+ablen, len(a))
block = a[i:j]
if out is None:
out = getattr(storage, create)(block, expectedlen=expectedlen, **kwargs)
else:
out.append(block)
else:
def f(*blocks):
return np.concatenate(blocks, axis=axis)
out = map_blocks(tup, f, blen=blen, storage=storage, create=create, **kwargs)
return out
def binary_op(data, op, other, blen=None, storage=None, create='array',
**kwargs):
"""Compute a binary operation block-wise over `data`."""
# normalise scalars
if hasattr(other, 'shape') and len(other.shape) == 0:
other = other[()]
if np.isscalar(other):
def f(block):
return op(block, other)
return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs)
elif len(data) == len(other):
def f(a, b):
return op(a, b)
return map_blocks((data, other), f, blen=blen, storage=storage, create=create,
**kwargs)
else:
raise NotImplementedError('argument type not supported')
# based on bcolz.chunked_eval
def _get_expression_variables(expression, vm):
cexpr = compile(expression, '<string>', 'eval')
if vm == 'numexpr':
# Check that var is not a numexpr function here. This is useful for
# detecting unbound variables in expressions. This is not necessary
# for the 'python' engine.
from numexpr.expressions import functions as numexpr_functions
return [var for var in cexpr.co_names
if var not in ['None', 'False', 'True'] and
var not in numexpr_functions]
else:
return [var for var in cexpr.co_names
if var not in ['None', 'False', 'True']]
# based on bcolz.chunked_eval
def eval_table(tbl, expression, vm='python', blen=None, storage=None,
create='array', vm_kwargs=None, **kwargs):
"""Evaluate `expression` against columns of a table."""
# setup
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
length = len(columns[0])
if vm_kwargs is None:
vm_kwargs = dict()
# setup vm
if vm == 'numexpr':
import numexpr
evaluate = numexpr.evaluate
elif vm == 'python':
# noinspection PyUnusedLocal
def evaluate(expr, local_dict=None, **kw):
# takes no keyword arguments
return eval(expr, dict(), local_dict)
else:
raise ValueError('expected vm either "numexpr" or "python"')
# compile expression and get required columns
variables = _get_expression_variables(expression, vm)
required_columns = {v: columns[names.index(v)] for v in variables}
# determine block size for evaluation
blen = _util.get_blen_table(required_columns, blen=blen)
# build output
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
blocals = {v: c[i:j] for v, c in required_columns.items()}
res = evaluate(expression, local_dict=blocals, **vm_kwargs)
if out is None:
out = getattr(storage, create)(res, expectedlen=length, **kwargs)
else:
out.append(res)
return out
class ChunkedArrayWrapper(ArrayWrapper):
"""Wrapper class for chunked array-like data.
Parameters
----------
data : array_like
Data to be wrapped. May be a Zarr array, h5py dataset, or
anything providing a similar interface.
"""
def __init__(self, data):
data = _util.ensure_array_like(data)
super(ChunkedArrayWrapper, self).__init__(data)
@property
def caption(self):
r = '<%s' % type(self).__name__
r += ' shape=%s' % str(self.shape)
r += ' dtype=%s' % str(self.dtype)
if self.chunks is not None:
r += ' chunks=%s' % str(self.chunks)
if self.nbytes:
r += '\n nbytes=%s' % _util.human_readable_size(self.nbytes)
if self.cbytes:
r += ' cbytes=%s' % _util.human_readable_size(self.cbytes)
if self.cratio:
r += ' cratio=%.1f' % self.cratio
if self.compression:
r += '\n compression=%s' % self.compression
if self.compression_opts is not None:
r += ' compression_opts=%s' % self.compression_opts
values_cls = type(self.values)
r += '\n values=%s.%s' % (values_cls.__module__, values_cls.__name__)
r += '>'
return r
def __repr__(self):
return self.caption
@property
def nbytes(self):
return _util.get_nbytes(self.values)
@property
def cbytes(self):
return _util.get_cbytes(self.values)
@property
def compression(self):
return _util.get_compression(self.values)
@property
def compression_opts(self):
return _util.get_compression_opts(self.values)
@property
def shuffle(self):
return _util.get_shuffle(self.values)
@property
def chunks(self):
return _util.get_chunks(self.values)
@property
def cratio(self):
nbytes = self.nbytes
cbytes = self.cbytes
if nbytes and cbytes:
return nbytes / cbytes
return None
# outputs from these methods are not wrapped
store = store
count_nonzero = count_nonzero
def map_blocks(self, f, blen=None, storage=None, create='array', **kwargs):
out = map_blocks(self, f, blen=blen, storage=storage, create=create, **kwargs)
return ChunkedArrayWrapper(out)
def map_blocks_method(self, method_name, kwargs=None, **storage_kwargs):
if kwargs is None:
kwargs = dict()
def f(block):
method = getattr(block, method_name)
return method(**kwargs)
out = self.map_blocks(f, **storage_kwargs)
return out
def copy(self, start=0, stop=None, blen=None, storage=None, create='array',
**kwargs):
out = copy(self, start=start, stop=stop, blen=blen, storage=storage,
create=create, **kwargs)
# can always wrap this as sub-class
return type(self)(out)
def binary_op(self, op, other, blen=None, storage=None, create='array',
**kwargs):
out = binary_op(self, op, other, blen=blen, storage=storage,
create=create, **kwargs)
return ChunkedArrayWrapper(out)
def __eq__(self, other, **kwargs):
return self.binary_op(operator.eq, other, **kwargs)
def __ne__(self, other, **kwargs):
return self.binary_op(operator.ne, other, **kwargs)
def __lt__(self, other, **kwargs):
return self.binary_op(operator.lt, other, **kwargs)
def __gt__(self, other, **kwargs):
return self.binary_op(operator.gt, other, **kwargs)
def __le__(self, other, **kwargs):
return self.binary_op(operator.le, other, **kwargs)
def __ge__(self, other, **kwargs):
return self.binary_op(operator.ge, other, **kwargs)
def __add__(self, other, **kwargs):
return self.binary_op(operator.add, other, **kwargs)
def __floordiv__(self, other, **kwargs):
return self.binary_op(operator.floordiv, other, **kwargs)
def __mod__(self, other, **kwargs):
return self.binary_op(operator.mod, other, **kwargs)
def __mul__(self, other, **kwargs):
return self.binary_op(operator.mul, other, **kwargs)
def __pow__(self, other, **kwargs):
return self.binary_op(operator.pow, other, **kwargs)
def __sub__(self, other, **kwargs):
return self.binary_op(operator.sub, other, **kwargs)
def __truediv__(self, other, **kwargs):
return self.binary_op(operator.truediv, other, **kwargs)
def __and__(self, other, **kwargs):
return self.binary_op(operator.and_, other, **kwargs)
def __lshift__(self, other, **kwargs):
return self.binary_op(operator.lshift, other, **kwargs)
def __or__(self, other, **kwargs):
return self.binary_op(operator.or_, other, **kwargs)
def __rshift__(self, other, **kwargs):
return self.binary_op(operator.rshift, other, **kwargs)
def __xor__(self, other, **kwargs):
return self.binary_op(operator.xor, other, **kwargs)
def compress(self, condition, axis=0, out=None, **kwargs):
out = compress(condition, self.values, axis=axis, out=out, **kwargs)
return ChunkedArrayWrapper(out)
def take(self, indices, axis=0, out=None, **kwargs):
out = take(self.values, indices, axis=axis, out=out, **kwargs)
return ChunkedArrayWrapper(out)
def subset(self, sel0=None, sel1=None, **kwargs):
out = subset(self.values, sel0, sel1, **kwargs)
return ChunkedArrayWrapper(out)
def concatenate(self, others, axis=0, **kwargs):
if not isinstance(others, (tuple, list)):
others = others,
tup = (self,) + tuple(others)
out = concatenate(tup, axis=axis, **kwargs)
return ChunkedArrayWrapper(out)
def max(self, axis=None, **kwargs):
out = amax(self, axis=axis, **kwargs)
if np.isscalar(out):
return out
else:
return ChunkedArrayWrapper(out)
def min(self, axis=None, **kwargs):
out = amin(self, axis=axis, **kwargs)
if np.isscalar(out):
return out
else:
return ChunkedArrayWrapper(out)
def sum(self, axis=None, **kwargs):
out = asum(self, axis=axis, **kwargs)
if np.isscalar(out):
return out
else:
return ChunkedArrayWrapper(out)
class ChunkedTableWrapper(DisplayAsTable):
"""Wrapper class for chunked table-like data.
Parameters
----------
data: table_like
Data to be wrapped. May be a tuple or list of columns (array-like),
a dict mapping names to columns, h5py group, numpy recarray, or
anything providing a similar interface.
names : sequence of strings
Column names.
"""
array_cls = NumpyRecArrayWrapper
# noinspection PyMissingConstructor
def __init__(self, data, names=None):
names, columns = _util.check_table_like(data, names=names)
# skip super-class constructor because we are more flexible about type of values here
self._values = data
self._names = names
self._columns = columns
self.rowcls = namedtuple('row', names)
@property
def names(self):
return self._names
@property
def columns(self):
return self._columns
def __getitem__(self, item):
if isinstance(item, str):
# item is column name, return column
idx = self._names.index(item)
return ChunkedArrayWrapper(self._columns[idx])
elif isinstance(item, int):
# item is row index, return row
return self.rowcls(*(col[item] for col in self._columns))
elif isinstance(item, slice):
# item is row slice, return numpy recarray
start = 0 if item.start is None else item.start
if start < 0:
raise ValueError('negative indices not supported')
stop = len(self) if item.stop is None else item.stop
stop = min(stop, len(self))
step = 1 if item.step is None else item.step
outshape = (stop - start) // step
out = np.empty(outshape, dtype=self.dtype)
for n, c in zip(self._names, self._columns):
out[n] = c[start:stop:step]
out = out.view(np.recarray)
if self.array_cls is not None:
out = self.array_cls(out)
return out
elif isinstance(item, (list, tuple)) and \
all(isinstance(i, str) for i in item):
# item is sequence of column names, return table
columns = [self._columns[self._names.index(n)] for n in item]
return type(self)(columns, names=item)
else:
raise IndexError('item not supported for indexing: %s' % repr(item))
def __array__(self, *args):
a = np.asanyarray(self[:])
if args:
a = a.astype(args[0])
return a
def __getattr__(self, item):
if item in self._names:
idx = self._names.index(item)
return ChunkedArrayWrapper(self._columns[idx])
else:
return super(ChunkedTableWrapper, self).__getattr__(item)
@property
def caption(self):
r = '<%s' % type(self).__name__
r += ' shape=%s' % str(self.shape)
r += ' dtype=%s' % str(self.dtype)
if self.nbytes:
r += '\n nbytes=%s' % _util.human_readable_size(self.nbytes)
if self.cbytes:
r += ' cbytes=%s' % _util.human_readable_size(self.cbytes)
if self.cratio:
r += ' cratio=%.1f' % self.cratio
values_cls = type(self.values)
r += '\n values=%s.%s' % (values_cls.__module__, values_cls.__name__)
r += '>'
return r
def __repr__(self):
return self.caption
def __len__(self):
return len(self._columns[0])
@property
def shape(self):
return len(self),
@property
def ndim(self):
return len(self.shape)
@property
def dtype(self):
items = []
for n, c in zip(self._names, self._columns):
# need to account for multidimensional columns
t = (n, c.dtype) if len(c.shape) == 1 else \
(n, c.dtype, c.shape[1:])
items.append(t)
return np.dtype(items)
@property
def nbytes(self):
cols_nbytes = [_util.get_nbytes(c) for c in self._columns]
if all(cols_nbytes):
return sum(cols_nbytes)
return None
@property
def cbytes(self):
cols_cbytes = [_util.get_cbytes(c) for c in self._columns]
if all(cols_cbytes):
return sum(cols_cbytes)
return None
@property
def cratio(self):
nbytes = self.nbytes
cbytes = self.cbytes
if nbytes and cbytes:
return nbytes / cbytes
return None
def copy(self, start=0, stop=None, blen=None, storage=None,
create='table', **kwargs):
out = copy_table(self, start=start, stop=stop, blen=blen,
storage=storage, create=create, **kwargs)
# can always wrap this
return type(self)(out, names=self._names)
def eval(self, expression, **kwargs):
out = eval_table(self, expression, **kwargs)
return ChunkedArrayWrapper(out)
def query(self, expression, vm='python', blen=None, storage=None, create='table',
vm_kwargs=None, **kwargs):
condition = self.eval(expression, vm=vm, blen=blen, storage=storage, create='array',
vm_kwargs=vm_kwargs)
out = self.compress(condition, blen=blen, storage=storage, create=create, **kwargs)
# should already be wrapped
return out
def compress(self, condition, axis=None, out=None, blen=None, storage=None, create='table',
**kwargs):
out = compress_table(condition, self, axis=axis, out=out, blen=blen, storage=storage,
create=create, **kwargs)
return type(self)(out)
def take(self, indices, axis=None, out=None, mode='raise', blen=None, storage=None,
create='table', **kwargs):
out = take_table(self, indices, axis=axis, out=out, mode=mode, blen=blen,
storage=storage, create=create, **kwargs)
return type(self)(out)
| {
"content_hash": "1ecc0b48e2b0700c3ee5269c93c67808",
"timestamp": "",
"source": "github",
"line_count": 959,
"max_line_length": 95,
"avg_line_length": 32.74452554744526,
"alnum_prop": 0.569900006369021,
"repo_name": "cggh/scikit-allel",
"id": "2d72effa0717b2c2acbeba1576089c69cc38787c",
"size": "31426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allel/chunked/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "194890"
},
{
"name": "Jupyter Notebook",
"bytes": "14432699"
},
{
"name": "Python",
"bytes": "960034"
}
],
"symlink_target": ""
} |
def partition(arr, l, r):
x = arr[r]
cur = l
for i in range(l, r):
if arr[i] <= x:
arr[cur], arr[i] = arr[i], arr[cur]
cur += 1
arr[cur], arr[r] = arr[r], arr[cur]
return cur
# finds the kth position (of the sorted array)
# in a given unsorted array i.e this function
# can be used to find both kth largest and
# kth smallest element in the array.
# ASSUMPTION: all elements in arr[] are distinct
def kthSmallest(arr, l, r, k):
# if k is smaller than number of
# elements in array
if (k > 0 and k <= r - l + 1):
# Partition the array around last
# element and get position of pivot
# element in sorted array
index = partition(arr, l, r)
# if position is same as k
if (index - l == k - 1):
return arr[index]
# If position is more, recur
# for left subarray
if (index - l > k - 1):
return kthSmallest(arr, l, index - 1, k)
# Else recur for right subarray
return kthSmallest(arr, index + 1, r,
k - index + l - 1)
return INT_MAX
# Driver Code
arr = [ 10, 4, 5, 11, 6, 26, 8 ]
n = len(arr)
k = 3
print("K-th smallest element is ", end = "")
print(kthSmallest(arr, 0, n - 1, k))
| {
"content_hash": "4e56bacc83c052575faca4d8a729d879",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 49,
"avg_line_length": 24.458333333333332,
"alnum_prop": 0.5979557069846678,
"repo_name": "teckoo/teckoo.github.io",
"id": "21e4763bfac090cd090dcbf605d373dcd29c295b",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_posts/coding/summary/sort/quick_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14507"
},
{
"name": "HTML",
"bytes": "33249"
},
{
"name": "JavaScript",
"bytes": "18896"
},
{
"name": "Python",
"bytes": "20621"
},
{
"name": "Ruby",
"bytes": "124"
}
],
"symlink_target": ""
} |
print("__name__ is ", __name__)
print("__package__ is ", __package__)
import sys
print("From sys.modules", sys.modules["kitty"])
from kitty.speak.hello import speak
| {
"content_hash": "31d6d79fc3e3865e9d3707467e132185",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.6526946107784432,
"repo_name": "tempbottle/Nuitka",
"id": "542e87ea9c7566297b80d9ce9005da2ec5dd5459",
"size": "1062",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/packages/sub_package/kitty/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "433315"
},
{
"name": "Python",
"bytes": "4356577"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |
import csv
from csv import excel_tab
from sklearn.feature_extraction.text import TfidfVectorizer as TFIDF
from sklearn.naive_bayes import MultinomialNB as MNB
from sklearn.linear_model import LogisticRegression as LR
from sklearn.cross_validation import cross_val_score
from bs4 import BeautifulSoup
import cPickle
import numpy as np
# 'Blog author gender classification data set associated with the paper
# (Mukherjee and Liu, EMNLP-2010)'
# from http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
class Detective_(object):
def __init__(self, gender_data='texts/blog-gender-dataset.csv'):
self.gender_data = gender_data
self.clf = self.load_pickle('classifier')
self.vocab = self.load_pickle('vocab')
def vectorize(self, X, vocab_=None):
stopwords = open('texts/stopwords.txt').read().lower().split()
vec = TFIDF(
analyzer='word',
stop_words=stopwords,
encoding='latin-1',
vocabulary=vocab_
)
print "Building X, Y..."
X = vec.fit_transform(X).toarray()
return X, vec.get_feature_names()
def fit_classifier(self, X, y):
# clf = MNB(alpha=1E-2)
clf = LR()
print "Running cross-validation."
score = np.mean(cross_val_score(clf, X, y, cv=10))
print score
return clf.fit(X, y)
def read_gender_data_file(self):
lines = csv.reader(open(self.gender_data, 'rU'), dialect=excel_tab)
X, y = [], []
labels = ['M', 'F']
print "Reading in files"
for line in lines:
line = [i for i in line[0].split(',') if len(i)]
if len(line):
g = line.pop().strip().upper()
if g in labels:
y.append(g)
X.append(" ".join(line))
print "Read in files"
return X, y
def train_teller(self):
X, y = self.read_gender_data_file()
Y = np.array(y)
X, vocab = self.vectorize(X)
clf = self.fit_classifier(X, Y)
print "Finishing fitting classifier"
return (clf, 'classifier'), (vocab, 'vocab')
def pickle_prediction_tools(self):
for el in self.train_teller():
pickle_file = open('pickles/%s' % el[1], 'wb')
cPickle.dump(el[0], pickle_file)
pickle_file.close()
print "Finished pickling", el[1]
def load_pickle(self, item):
pickle_file = open('pickles/%s' % str(item), 'rb')
X = cPickle.load(pickle_file)
pickle_file.close()
print item, 'pickle loaded'
return X
def prettify_prediction(self, sample, pred, prob, top_fts):
genders = {"M": 'man', "F": 'woman'}
snip = self.get_snippet(sample).encode('utf-8')
with open('texts/prediction.txt', 'r') as f:
p = f.read()
prediction = p.format(
snip, str("%.2f" % prob)[2:], genders[pred], top_fts
)
return prediction
def show_most_informative_features(self, n=20):
u"""Code adapted from stack overflow discussion;
http://stackoverflow.com/questions/11116697/
how-to-get-most-informative-features-for-scikit-learn-classifiers"""
coefs_with_fns = sorted(zip(self.clf.coef_[0], self.vocab))
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
for (coef_1, fn_1), (coef_2, fn_2) in top:
print "\t%.4f\t%-15s\t\t%.4f\t%-15s" % (coef_1, fn_1, coef_2, fn_2)
def show_features_from_sample(self, sample, pred, n=10):
coefs_with_fns = sorted(zip(self.clf.coef_[0], self.vocab))
top = zip(coefs_with_fns, coefs_with_fns[::-1])
out, sample_w = [], sample.split()
for (coef_1, fn_1), (coef_2, fn_2) in top:
w = fn_2 if pred[0] == 'M' else fn_1
if w in sample_w:
out.append(w)
if len(out) > n:
break
return ", ".join(out)
def get_snippet(self, sample):
bits = BeautifulSoup(sample).get_text().split()
first, last = " ".join(bits[:15]), " ".join(bits[-15:])
return " [ . . . ] ".join([first, last])
def test_teller(self, sample):
test_x, vocab_ = self.vectorize([sample], self.vocab)
pred = self.clf.predict(test_x)
prob = max(self.clf.predict_proba(test_x)[0])
top_fts = self.show_features_from_sample(sample, pred)
print zip(self.clf.classes_, self.clf.predict_proba(test_x)[0])
return self.prettify_prediction(sample, pred[0], prob, top_fts)
if __name__ == '__main__':
ft = Detective_()
# ft.pickle_prediction_tools()
# ft.train_teller()
ft.show_most_informative_features()
| {
"content_hash": "f7d30390fb832a7922c7ee29be96b519",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 37.24409448818898,
"alnum_prop": 0.5714587737843552,
"repo_name": "corinnelhh/text_detective",
"id": "0dbe71de6d43210ead6e47cb1c5ad3cfbad08ff7",
"size": "4730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "detective.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "5754"
}
],
"symlink_target": ""
} |
import ipaddress
import logging
import re
from collections import Counter
from typing import Callable, List, Collection, Union, Tuple, Optional, Dict, Pattern, Any
from . import connectors
from .command_handlers import OTCommandHandler, OtCliCommandRunner, OtbrSshCommandRunner, OtbrAdbCommandRunner
from .connectors import Simulator
from .errors import UnexpectedCommandOutput, ExpectLineTimeoutError, CommandError, InvalidArgumentsError
from .types import ChildId, Rloc16, Ip6Addr, ThreadState, PartitionId, DeviceMode, RouterId, SecurityPolicy, Ip6Prefix, \
RouterTableEntry, NetifIdentifier
from .utils import match_line, constant_property
class OTCI(object):
"""
This class represents an OpenThread Controller Interface instance that provides versatile interfaces to
manipulate an OpenThread device.
"""
DEFAULT_EXEC_COMMAND_RETRY = 4 # A command is retried 4 times if failed.
__exec_command_retry = DEFAULT_EXEC_COMMAND_RETRY
def __init__(self, otcmd: OTCommandHandler):
"""
This method initializes an OTCI instance.
:param otcmd: An OpenThread Command Handler instance to execute OpenThread CLI commands.
"""
self.__otcmd: OTCommandHandler = otcmd
self.__logger = logging.getLogger(name=str(self))
def __repr__(self):
"""Gets the string representation of the OTCI instance."""
return repr(self.__otcmd)
def wait(self, duration: float, expect_line: Union[str, Pattern, Collection[Any]] = None):
"""Wait for a given duration.
:param duration: The duration (in seconds) wait for.
:param expect_line: The line expected to output if given.
Raise ExpectLineTimeoutError if expect_line is not found within the given duration.
"""
self.log('info', "wait for %.3f seconds", duration)
if expect_line is None:
self.__otcmd.wait(duration)
else:
success = False
while duration > 0:
output = self.__otcmd.wait(1)
if any(match_line(line, expect_line) for line in output):
success = True
break
duration -= 1
if not success:
raise ExpectLineTimeoutError(expect_line)
def close(self):
"""Close the OTCI instance."""
self.__otcmd.close()
def execute_command(self,
cmd: str,
timeout: float = 10,
silent: bool = False,
already_is_ok: bool = True) -> List[str]:
for i in range(self.__exec_command_retry + 1):
try:
return self.__execute_command(cmd, timeout, silent, already_is_ok=already_is_ok)
except Exception:
self.wait(2)
if i == self.__exec_command_retry:
raise
def __execute_command(self,
cmd: str,
timeout: float = 10,
silent: bool = False,
already_is_ok: bool = True) -> List[str]:
"""Execute the OpenThread CLI command.
:param cmd: The command to execute.
:param timeout: The command timeout.
:param silent: Whether to run the command silent without logging.
:returns: The command output as a list of lines.
"""
if not silent:
self.log('info', '> %s', cmd)
output = self.__otcmd.execute_command(cmd, timeout)
if not silent:
for line in output:
self.log('info', '%s', line)
if cmd in ('reset', 'factoryreset'):
return output
if output[-1] == 'Done' or (already_is_ok and output[-1] == 'Error 24: Already'):
output = output[:-1]
return output
else:
raise CommandError(cmd, output)
def set_execute_command_retry(self, n: int):
assert n >= 0
self.__exec_command_retry = n
def shell(self, cmd: str, timeout: float = 10):
self.log('info', '# %s', cmd)
output = self.__otcmd.shell(cmd, timeout=timeout)
for line in output:
self.log('info', '%s', line)
return output
def set_logger(self, logger: logging.Logger):
"""Set the logger for the OTCI instance, or None to disable logging."""
self.__logger = logger
def log(self, level, fmt, *args, **kwargs):
if self.__logger is not None:
getattr(self.__logger, level)('(%s) ' + fmt, repr(self), *args, **kwargs)
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
"""Set the callback that will be called for each line output by the CLI."""
self.__otcmd.set_line_read_callback(callback)
#
# Constant properties
#
@constant_property
def version(self):
"""Returns the firmware version. (e.g. "OPENTHREAD/20191113-01411-gb2d66e424-dirty; SIMULATION; Nov 14 2020 14:24:38")"""
return self.__parse_str(self.execute_command('version'))
@constant_property
def thread_version(self):
"""Get the Thread Version number."""
return self.__parse_int(self.execute_command('thread version'))
@constant_property
def api_version(self):
"""Get API version number."""
try:
return self.__parse_int(self.execute_command('version api'))
except ValueError:
# If the device does not have `version api` command, it will print the firmware version, which would lead to ValueError.
return 0
#
# Basic device operations
#
def ifconfig_up(self):
"""Bring up the IPv6 interface."""
self.execute_command('ifconfig up')
def ifconfig_down(self):
"""Bring down the IPv6 interface."""
self.execute_command('ifconfig down')
def get_ifconfig_state(self) -> bool:
"""Get the status of the IPv6 interface."""
return self.__parse_values(self.execute_command('ifconfig'), up=True, down=False)
def thread_start(self):
"""Enable Thread protocol operation and attach to a Thread network."""
self.execute_command('thread start')
def thread_stop(self):
"""Disable Thread protocol operation and detach from a Thread network."""
self.execute_command('thread stop')
def reset(self):
"""Signal a platform reset."""
self.execute_command('reset')
def factory_reset(self):
"""Delete all stored settings, and signal a platform reset."""
self.execute_command('factoryreset')
#
# Network Operations
#
_PING_STATISTICS_PATTERN = re.compile(
r'^(?P<transmitted>\d+) packets transmitted, (?P<received>\d+) packets received.(?: Packet loss = (?P<loss>\d+\.\d+)%.)?(?: Round-trip min/avg/max = (?P<min>\d+)/(?P<avg>\d+\.\d+)/(?P<max>\d+) ms.)?$'
)
def ping(self,
ip: str,
size: int = 8,
count: int = 1,
interval: float = 1,
hoplimit: int = 64,
timeout: float = 3) -> Dict:
"""Send an ICMPv6 Echo Request.
The default arguments are consistent with https://github.com/openthread/openthread/blob/main/src/core/utils/ping_sender.hpp.
:param ip: The target IPv6 address to ping.
:param size: The number of data bytes in the payload. Default is 8.
:param count: The number of ICMPv6 Echo Requests to be sent. Default is 1.
:param interval: The interval between two consecutive ICMPv6 Echo Requests in seconds. The value may have fractional form, for example 0.5. Default is 1.
:param hoplimit: The hoplimit of ICMPv6 Echo Request to be sent. Default is 64. See OPENTHREAD_CONFIG_IP6_HOP_LIMIT_DEFAULT in src/core/config/ip6.h.
:param timeout: The maximum duration in seconds for the ping command to wait after the final echo request is sent. Default is 3.
"""
cmd = f'ping {ip} {size} {count} {interval} {hoplimit} {timeout}'
timeout_allowance = 3
lines = self.execute_command(cmd, timeout=(count - 1) * interval + timeout + timeout_allowance)
statistics = {}
for line in lines:
m = OTCI._PING_STATISTICS_PATTERN.match(line)
if m is not None:
if m.group('transmitted') is not None:
statistics['transmitted_packets'] = int(m.group('transmitted'))
statistics['received_packets'] = int(m.group('received'))
if m.group('loss') is not None:
statistics['packet_loss'] = float(m.group('loss')) / 100
if m.group('min') is not None:
statistics['round_trip_time'] = {
'min': int(m.group('min')),
'avg': float(m.group('avg')),
'max': int(m.group('max'))
}
return statistics
def ping_stop(self):
"""Stop sending ICMPv6 Echo Requests."""
self.execute_command('ping stop')
def discover(self, channel: int = None) -> List[Dict[str, Any]]:
"""Perform an MLE Discovery operation."""
return self.__scan_networks('discover', channel)
def scan(self, channel: int = None) -> List[Dict[str, Any]]:
"""Perform an IEEE 802.15.4 Active Scan."""
return self.__scan_networks('scan', channel)
def __scan_networks(self, cmd: str, channel: int = None) -> List[Dict[str, Any]]:
if channel is not None:
cmd += f' {channel}'
output = self.execute_command(cmd, timeout=10)
if len(output) < 2:
raise UnexpectedCommandOutput(output)
networks = []
for line in output[2:]:
fields = line.strip().split('|')
try:
_, J, netname, extpanid, panid, extaddr, ch, dbm, lqi, _ = fields
except Exception:
logging.warning('ignored output: %r', line)
continue
networks.append({
'joinable': bool(int(J)),
'network_name': netname.strip(),
'extpanid': extpanid,
'panid': int(panid, 16),
'extaddr': extaddr,
'channel': int(ch),
'dbm': int(dbm),
'lqi': int(lqi),
})
return networks
def scan_energy(self, duration: float = None, channel: int = None) -> Dict[int, int]:
"""Perform an IEEE 802.15.4 Energy Scan."""
cmd = 'scan energy'
if duration is not None:
cmd += f' {duration * 1000:d}'
if channel is not None:
cmd += f' {channel}'
output = self.execute_command(cmd, timeout=10)
if len(output) < 2:
raise UnexpectedCommandOutput(output)
channels = {}
for line in output[2:]:
fields = line.strip().split('|')
_, Ch, RSSI, _ = fields
channels[int(Ch)] = int(RSSI)
return channels
def mac_send_data_request(self):
"""Instruct an Rx-Off-When-Idle device to send a Data Request mac frame to its parent."""
self.execute_command('mac send datarequest')
def mac_send_empty_data(self):
"""Instruct an Rx-Off-When-Idle device to send a Empty Data mac frame to its parent."""
self.execute_command('mac send emptydata')
# TODO: discover
# TODO: dns resolve <hostname> [DNS server IP] [DNS server port]
# TODO: fake /a/an <dst-ipaddr> <target> <meshLocalIid>
# TODO: sntp query
#
# Set or get device/network parameters
#
def get_mode(self) -> str:
"""Get the Thread Device Mode value.
-: no flags set (rx-off-when-idle, minimal Thread device, stable network data)
r: rx-on-when-idle
d: Full Thread Device
n: Full Network Data
"""
return self.__parse_str(self.execute_command('mode'))
def set_mode(self, mode: str):
"""Set the Thread Device Mode value.
-: no flags set (rx-off-when-idle, minimal Thread device, stable network data)
r: rx-on-when-idle
d: Full Thread Device
n: Full Network Data
"""
self.execute_command(f'mode {DeviceMode(mode)}')
def get_extaddr(self) -> str:
"""Get the IEEE 802.15.4 Extended Address."""
return self.__parse_extaddr(self.execute_command('extaddr'))
def set_extaddr(self, extaddr: str):
"""Set the IEEE 802.15.4 Extended Address."""
self.__validate_hex64b(extaddr)
self.execute_command(f'extaddr {extaddr}')
def get_eui64(self) -> str:
"""Get the factory-assigned IEEE EUI-64."""
return self.__parse_eui64(self.execute_command('eui64'))
def set_extpanid(self, extpanid: str):
"""Set the Thread Extended PAN ID value."""
self.__validate_extpanid(extpanid)
self.execute_command(f'extpanid {extpanid}')
def get_extpanid(self) -> str:
"""Get the Thread Extended PAN ID value."""
return self.__parse_extpanid(self.execute_command('extpanid'))
def set_channel(self, ch):
"""Set the IEEE 802.15.4 Channel value."""
self.execute_command('channel %d' % ch)
def get_channel(self):
"""Get the IEEE 802.15.4 Channel value."""
return self.__parse_int(self.execute_command('channel'))
def get_preferred_channel_mask(self) -> int:
"""Get preferred channel mask."""
return self.__parse_int(self.execute_command('channel preferred'))
def get_supported_channel_mask(self):
"""Get supported channel mask."""
return self.__parse_int(self.execute_command('channel supported'))
def get_panid(self):
"""Get the IEEE 802.15.4 PAN ID value."""
return self.__parse_int(self.execute_command('panid'), 16)
def set_panid(self, panid):
"""Get the IEEE 802.15.4 PAN ID value."""
self.execute_command('panid %d' % panid)
def set_network_name(self, name):
"""Set network name."""
self.execute_command('networkname %s' % self.__escape_escapable(name))
def get_network_name(self):
"""Get network name."""
return self.__parse_str(self.execute_command('networkname'))
def get_network_key(self) -> str:
"""Get the network key."""
return self.__parse_network_key(self.execute_command(self.__detect_networkkey_cmd()))
def set_network_key(self, networkkey: str):
"""Set the network key."""
self.__validate_network_key(networkkey)
cmd = self.__detect_networkkey_cmd()
self.execute_command(f'{cmd} {networkkey}')
def get_key_sequence_counter(self) -> int:
"""Get the Thread Key Sequence Counter."""
return self.__parse_int(self.execute_command('keysequence counter'))
def set_key_sequence_counter(self, counter: int):
"""Set the Thread Key Sequence Counter."""
self.execute_command(f'keysequence counter {counter}')
def get_key_sequence_guard_time(self) -> int:
"""Get Thread Key Switch Guard Time (in hours)."""
return self.__parse_int(self.execute_command('keysequence guardtime'))
def set_key_sequence_guard_time(self, hours: int):
"""Set Thread Key Switch Guard Time (in hours) 0 means Thread Key Switch immediately if key index match."""
self.execute_command(f'keysequence guardtime {hours}')
def get_cca_threshold(self) -> int:
"""Get the CCA threshold in dBm measured at antenna connector per IEEE 802.15.4 - 2015 section 10.1.4."""
output = self.execute_command(f'ccathreshold')
val = self.__parse_str(output)
if not val.endswith(' dBm'):
raise UnexpectedCommandOutput(output)
return int(val[:-4])
def set_cca_threshold(self, val: int):
"""Set the CCA threshold measured at antenna connector per IEEE 802.15.4 - 2015 section 10.1.4."""
self.execute_command(f'ccathreshold {val}')
def get_promiscuous(self) -> bool:
"""Get radio promiscuous property."""
return self.__parse_Enabled_or_Disabled(self.execute_command('promiscuous'))
def enable_promiscuous(self):
"""Enable radio promiscuous operation and print raw packet content."""
self.execute_command('promiscuous enable')
def disable_promiscuous(self):
"""Disable radio promiscuous operation."""
self.execute_command('promiscuous disable')
def get_txpower(self) -> int:
"""Get the transmit power in dBm."""
line = self.__parse_str(self.execute_command('txpower'))
if not line.endswith(' dBm'):
raise UnexpectedCommandOutput([line])
return int(line.split()[0])
def set_txpower(self, val: int):
"""Set the transmit power in dBm."""
self.execute_command(f'txpower {val}')
# TODO: fem
# TODO: fem lnagain
# TODO: fem lnagain <LNA gain>
# TODO: mac retries direct
# TODO: mac retries direct
# TODO: mac retries indirect
# TODO: mac retries indirect <number>
#
# Basic Node states and properties
#
def get_state(self) -> ThreadState:
"""Get the current Thread state."""
return ThreadState(self.__parse_str(self.execute_command('state')))
def set_state(self, state: str):
"""Try to switch to state detached, child, router or leader."""
self.execute_command(f'state {state}')
def get_rloc16(self) -> int:
"""Get the Thread RLOC16 value."""
return self.__parse_int(self.execute_command('rloc16'), 16)
def get_router_id(self) -> int:
"""Get the Thread Router ID value."""
return self.get_rloc16() >> 10
def prefer_router_id(self, routerid: int):
"""Prefer a Router ID when solicit router id from Leader."""
self.execute_command(f'preferrouterid {routerid}')
def is_singleton(self) -> bool:
return self.__parse_values(self.execute_command('singleton'), true=True, false=False)
#
# RCP related utilities
#
def get_rcp_version(self):
return self.__parse_str(self.execute_command('rcp version'))
#
# Unsecure port utilities
#
def get_unsecure_ports(self) -> List[int]:
"""all ports from the allowed unsecured port list."""
return self.__parse_int_list(self.execute_command('unsecureport get'))
def add_unsecure_port(self, port: int):
"""Add a port to the allowed unsecured port list."""
self.execute_command(f'unsecureport add {port}')
def remove_unsecure_port(self, port: int):
"""Remove a port from the allowed unsecured port list."""
self.execute_command(f'unsecureport remove {port}')
def clear_unsecure_ports(self):
"""Remove all ports from the allowed unsecured port list."""
self.execute_command('unsecureport remove all')
#
# Leader configurations
#
def get_preferred_partition_id(self) -> PartitionId:
"""Get the preferred Thread Leader Partition ID."""
return PartitionId(self.__parse_int(self.execute_command(self.__get_partition_preferred_cmd())))
def set_preferred_partition_id(self, parid: int):
"""Set the preferred Thread Leader Partition ID."""
self.execute_command(f'{self.__get_partition_preferred_cmd()} {parid}')
def __get_partition_preferred_cmd(self) -> str:
""""""
return 'partitionid preferred' if self.api_version >= 51 else 'leaderpartitionid'
def get_leader_weight(self) -> int:
"""Get the Thread Leader Weight."""
return self.__parse_int(self.execute_command('leaderweight'))
def set_leader_weight(self, weight: int):
"""Set the Thread Leader Weight."""
self.execute_command(f'leaderweight {weight}')
__LEADER_DATA_KEY_MAP = {
'Partition ID': 'partition_id',
'Weighting': 'weight',
'Data Version': 'data_ver',
'Stable Data Version': 'stable_data_ver',
'Leader Router ID': 'leader_id',
}
def get_leader_data(self) -> Dict[str, int]:
"""Get the Thread Leader Data."""
data = {}
output = self.execute_command('leaderdata')
try:
for line in output:
k, v = line.split(': ')
data[OTCI.__LEADER_DATA_KEY_MAP[k]] = int(v)
except KeyError:
raise UnexpectedCommandOutput(output)
return data
#
# Router configurations
#
def get_router_selection_jitter(self):
"""Get the ROUTER_SELECTION_JITTER value."""
return self.__parse_int(self.execute_command('routerselectionjitter'))
def set_router_selection_jitter(self, jitter):
"""Set the ROUTER_SELECTION_JITTER value."""
self.execute_command(f'routerselectionjitter {jitter}')
def get_network_id_timeout(self) -> int:
"""Get the NETWORK_ID_TIMEOUT parameter used in the Router role."""
return self.__parse_int(self.execute_command('networkidtimeout'))
def set_network_id_timeout(self, timeout: int):
"""Set the NETWORK_ID_TIMEOUT parameter used in the Router role."""
self.execute_command(f'networkidtimeout {timeout}')
def get_parent_priority(self) -> int:
"""Get the assigned parent priority value, -2 means not assigned."""
return self.__parse_int(self.execute_command('parentpriority'))
def set_parent_priority(self, priority: int):
"""Set the assigned parent priority value: 1, 0, -1 or -2."""
self.execute_command(f'parentpriority {priority}')
def get_router_upgrade_threshold(self) -> int:
"""Get the ROUTER_UPGRADE_THRESHOLD value."""
return self.__parse_int(self.execute_command('routerupgradethreshold'))
def set_router_upgrade_threshold(self, threshold: int):
"""Set the ROUTER_UPGRADE_THRESHOLD value."""
self.execute_command(f'routerupgradethreshold {threshold}')
def get_router_downgrade_threshold(self):
"""Set the ROUTER_DOWNGRADE_THRESHOLD value."""
return self.__parse_int(self.execute_command('routerdowngradethreshold'))
def set_router_downgrade_threshold(self, threshold: int):
"""Get the ROUTER_DOWNGRADE_THRESHOLD value."""
self.execute_command(f'routerdowngradethreshold {threshold}')
def get_router_eligible(self) -> bool:
"""Indicates whether the router role is enabled or disabled."""
return self.__parse_Enabled_or_Disabled(self.execute_command('routereligible'))
def enable_router_eligible(self):
"""Disable the router role."""
self.execute_command('routereligible enable')
def disable_router_eligible(self):
"""Disable the router role."""
self.execute_command('routereligible disable')
def get_router_list(self) -> List[RouterId]:
"""Get allocated Router IDs."""
line = self.__parse_str(self.execute_command('router list'))
return list(map(RouterId, line.strip().split()))
def get_router_table(self) -> Dict[RouterId, RouterTableEntry]:
"""table of routers."""
output = self.execute_command('router table')
if len(output) < 2:
raise UnexpectedCommandOutput(output)
#
# Example output:
#
# | ID | RLOC16 | Next Hop | Path Cost | LQ In | LQ Out | Age | Extended MAC |
# +----+--------+----------+-----------+-------+--------+-----+------------------+
# | 21 | 0x5400 | 21 | 0 | 3 | 3 | 5 | d28d7f875888fccb |
# | 56 | 0xe000 | 56 | 0 | 0 | 0 | 182 | f2d92a82c8d8fe43 |
# Done
#
headers = self.__split_table_row(output[0])
table = {}
for line in output[2:]:
line = line.strip()
if not line:
continue
fields = self.__split_table_row(line)
if len(fields) != len(headers):
raise UnexpectedCommandOutput(output)
col = lambda colname: self.__get_table_col(colname, headers, fields)
id = col('ID')
table[RouterId(id)] = router = RouterTableEntry({
'id': RouterId(id),
'rloc16': Rloc16(col('RLOC16'), 16),
'next_hop': int(col('Next Hop')),
'path_cost': int(col('Path Cost')),
'lq_in': int(col('LQ In')),
'lq_out': int(col('LQ Out')),
'age': int(col('Age')),
'extaddr': col('Extended MAC'),
})
if 'Link' in headers:
router['link'] = int(col('Link'))
else:
# support older version of OT which does not output `Link` field
router['link'] = self.get_router_info(router['id'], silent=True)['link']
return table
def get_router_info(self, id: int, silent: bool = False) -> RouterTableEntry:
cmd = f'router {id}'
info = {}
output = self.execute_command(cmd, silent=silent)
items = [line.strip().split(': ') for line in output]
headers = [h for h, _ in items]
fields = [f for _, f in items]
col = lambda colname: self.__get_table_col(colname, headers, fields)
return RouterTableEntry({
'id': RouterId(id),
'rloc16': Rloc16(col('Rloc'), 16),
'alloc': int(col('Alloc')),
'next_hop': int(col('Next Hop'), 16) >> 10, # convert RLOC16 to Router ID
'link': int(col('Link')),
})
#
# Router utilities: Child management
#
def get_child_table(self) -> Dict[ChildId, Dict[str, Any]]:
"""Get the table of attached children."""
output = self.execute_command('child table')
if len(output) < 2:
raise UnexpectedCommandOutput(output)
#
# Example output:
# | ID | RLOC16 | Timeout | Age | LQ In | C_VN |R|D|N|Ver|CSL|QMsgCnt| Extended MAC |
# +-----+--------+------------+------------+-------+------+-+-+-+---+---+-------+------------------+
# | 1 | 0xc801 | 240 | 24 | 3 | 131 |1|0|0| 3| 0 | 0 | 4ecede68435358ac |
# | 2 | 0xc802 | 240 | 2 | 3 | 131 |0|0|0| 3| 1 | 0 | a672a601d2ce37d8 |
# Done
#
headers = self.__split_table_row(output[0])
table = {}
for line in output[2:]:
line = line.strip()
if not line:
continue
fields = self.__split_table_row(line)
col = lambda colname: self.__get_table_col(colname, headers, fields)
id = int(col("ID"))
r, d, n = int(col("R")), int(col("D")), int(col("N"))
mode = DeviceMode(f'{"r" if r else ""}{"d" if d else ""}{"n" if n else ""}')
child = {
'id': ChildId(id),
'rloc16': Rloc16(col('RLOC16'), 16),
'timeout': int(col('Timeout')),
'age': int(col('Age')),
'lq_in': int(col('LQ In')),
'c_vn': int(col('C_VN')),
'mode': mode,
'extaddr': col('Extended MAC')
}
if 'Ver' in headers:
child['ver'] = int(col('Ver'))
if 'CSL' in headers:
child['csl'] = bool(int(col('CSL')))
if 'QMsgCnt' in headers:
child['qmsgcnt'] = int(col('QMsgCnt'))
table[ChildId(id)] = child
return table
#
# DNS server & client utilities
#
_IPV6_SERVER_PORT_PATTERN = re.compile(r'\[(.*)\]:(\d+)')
def dns_get_config(self):
"""Get DNS client query config."""
output = self.execute_command('dns config')
config = {}
for line in output:
k, v = line.split(': ')
if k == 'Server':
ip, port = re.match(OTCI._IPV6_SERVER_PORT_PATTERN, v).groups()
config['server'] = (Ip6Addr(ip), int(port))
elif k == 'ResponseTimeout':
config['response_timeout'] = int(v[:-3])
elif k == 'MaxTxAttempts':
config['max_tx_attempts'] = int(v)
elif k == 'RecursionDesired':
config['recursion_desired'] = (v == 'yes')
else:
logging.warning("dns config ignored: %s", line)
return config
def dns_set_config(self,
server: Tuple[Union[str, ipaddress.IPv6Address], int],
response_timeout: int = None,
max_tx_attempts: int = None,
recursion_desired: bool = None):
"""Set DNS client query config."""
cmd = f'dns config {str(server[0])} {server[1]}'
if response_timeout is not None:
cmd += f' {response_timeout}'
assert max_tx_attempts is None or response_timeout is not None, "must specify `response_timeout` if `max_tx_attempts` is specified."
if max_tx_attempts is not None:
cmd += f' {max_tx_attempts}'
assert recursion_desired is None or max_tx_attempts is not None, 'must specify `max_tx_attempts` if `recursion_desired` is specified.'
if recursion_desired is not None:
cmd += f' {1 if recursion_desired else 0}'
self.execute_command(cmd)
def dns_get_compression(self) -> bool:
"""Get DNS compression mode."""
return self.__parse_Enabled_or_Disabled(self.execute_command('dns compression'))
def dns_enable_compression(self):
"""Enable DNS compression mode."""
self.execute_command('dns compression enable')
def dns_disable_compression(self):
"""Disable DNS compression mode."""
self.execute_command('dns compression disable')
def dns_browse(self, service: str) -> List[Dict]:
"""Browse DNS service instances."""
cmd = f'dns browse {service}'
output = '\n'.join(self.execute_command(cmd, 30.0))
result = []
for ins, port, priority, weight, srv_ttl, hostname, address, aaaa_ttl, txt_data, txt_ttl in re.findall(
r'(.*?)\s+Port:(\d+), Priority:(\d+), Weight:(\d+), TTL:(\d+)\s*Host:(\S+)\s+HostAddress:(\S+) TTL:(\d+)\s+TXT:(\[.*?\]) TTL:(\d+)',
output):
result.append({
'instance': ins,
'service': service,
'port': int(port),
'priority': int(priority),
'weight': int(weight),
'host': hostname,
'address': Ip6Addr(address),
'txt': self.__parse_srp_server_service_txt(txt_data),
'srv_ttl': int(srv_ttl),
'txt_ttl': int(txt_ttl),
'aaaa_ttl': int(aaaa_ttl),
})
return result
def dns_resolve(self, hostname: str) -> List[Dict]:
"""Resolve a DNS host name."""
cmd = f'dns resolve {hostname}'
output = self.execute_command(cmd, 30.0)
dns_resp = output[0]
addrs = dns_resp.strip().split(' - ')[1].split(' ')
ips = [Ip6Addr(item.strip()) for item in addrs[::2]]
ttls = [int(item.split('TTL:')[1]) for item in addrs[1::2]]
return [{
'address': ip,
'ttl': ttl,
} for ip, ttl in zip(ips, ttls)]
def dns_resolve_service(self, instance: str, service: str) -> Dict:
"""Resolves aservice instance."""
instance = self.__escape_escapable(instance)
cmd = f'dns service {instance} {service}'
output = self.execute_command(cmd, 30.0)
m = re.match(
r'.*Port:(\d+), Priority:(\d+), Weight:(\d+), TTL:(\d+)\s+Host:(.*?)\s+HostAddress:(\S+) TTL:(\d+)\s+TXT:(\[.*?\]) TTL:(\d+)',
'\t'.join(output))
if m:
port, priority, weight, srv_ttl, hostname, address, aaaa_ttl, txt_data, txt_ttl = m.groups()
return {
'instance': instance,
'service': service,
'port': int(port),
'priority': int(priority),
'weight': int(weight),
'host': hostname,
'address': Ip6Addr(address),
'txt': self.__parse_srp_server_service_txt(txt_data),
'srv_ttl': int(srv_ttl),
'txt_ttl': int(txt_ttl),
'aaaa_ttl': int(aaaa_ttl),
}
else:
raise CommandError(cmd, output)
#
# SRP server & client utilities
#
def srp_server_get_state(self):
"""Get the SRP server state"""
return self.__parse_str(self.execute_command('srp server state'))
def srp_server_enable(self):
"""Enable SRP server."""
self.execute_command('srp server enable')
def srp_server_disable(self):
"""Disable SRP server."""
self.execute_command('srp server disable')
def srp_server_get_domain(self) -> str:
"""Get the SRP server domain."""
return self.__parse_str(self.execute_command('srp server domain'))
def srp_server_set_domain(self, domain: str):
"""Set the SRP server domain."""
self.execute_command(f'srp server domain {domain}')
def srp_server_get_hosts(self) -> List[Dict]:
"""Get SRP server registered hosts."""
return self.__parse_srp_server_hosts(self.execute_command('srp server host'))
def srp_server_get_services(self) -> List[Dict]:
"""Get SRP server registered services."""
output = self.execute_command('srp server service')
return self.__parse_srp_server_services(output)
def __parse_srp_server_hosts(self, output: List[str]) -> List[Dict]:
result = []
info = None
for line in output:
if not line.startswith(' '):
info = {'host': line}
result.append(info)
else:
k, v = line.strip().split(': ')
if k == 'deleted':
if v not in ('true', 'false'):
raise UnexpectedCommandOutput(output)
info['deleted'] = (v == 'true')
elif k == 'addresses':
if not v.startswith('[') or not v.endswith(']'):
raise UnexpectedCommandOutput(output)
v = v[1:-1]
info['addresses'] = list(map(Ip6Addr, v.split(', ')))
else:
raise UnexpectedCommandOutput(output)
return result
def __parse_srp_server_services(self, output: List[str]) -> List[Dict]:
result = []
info = None
for line in output:
if not line.startswith(' '):
info = {'instance': line}
result.append(info)
else:
k, v = line.strip().split(': ')
if k == 'deleted':
if v not in ('true', 'false'):
raise UnexpectedCommandOutput(output)
info['deleted'] = (v == 'true')
elif k == 'addresses':
if not v.startswith('[') or not v.endswith(']'):
raise UnexpectedCommandOutput(output)
v = v[1:-1]
info['addresses'] = list(map(Ip6Addr, v.split(', ')))
elif k == 'subtypes':
info[k] = list() if v == '(null)' else list(v.split(','))
elif k in ('port', 'weight', 'priority', 'ttl', 'lease', 'key-lease'):
info[k] = int(v)
elif k in ('host',):
info[k] = v
elif k == 'TXT':
info['txt'] = self.__parse_srp_server_service_txt(v)
else:
raise UnexpectedCommandOutput(output)
return result
def __parse_srp_server_service_txt(self, txt: str) -> Dict[str, Union[bytes, bool]]:
# example value: [txt11=76616c3131, txt12=76616c3132]
assert txt.startswith('[') and txt.endswith(']')
txt_dict = {}
for entry in txt[1:-1].split(', '):
if not entry:
continue
equal_pos = entry.find('=')
if equal_pos != -1:
k, v = entry[:equal_pos], entry[equal_pos + 1:]
txt_dict[k] = bytes(int(v[i:i + 2], 16) for i in range(0, len(v), 2))
else:
txt_dict[entry] = True
return txt_dict
def srp_server_get_lease(self) -> Tuple[int, int, int, int]:
"""Get SRP server LEASE & KEY-LEASE range (in seconds)."""
lines = self.execute_command(f'srp server lease')
return tuple([int(line.split(':')[1].strip()) for line in lines])
def srp_server_set_lease(self, min_lease: int, max_lease: int, min_key_lease: int, max_key_lease: int):
"""Configure SRP server LEASE & KEY-LEASE range (in seconds)."""
self.execute_command(f'srp server lease {min_lease} {max_lease} {min_key_lease} {max_key_lease}')
def srp_client_get_state(self) -> bool:
"""Get SRP client state."""
return self.__parse_Enabled_or_Disabled(self.execute_command('srp client state'))
def srp_client_start(self, server_ip: Union[str, ipaddress.IPv6Address], server_port: int):
"""Start SRP client."""
self.execute_command(f'srp client start {str(server_ip)} {server_port}')
def srp_client_stop(self):
"""Stop SRP client."""
self.execute_command('srp client stop')
def srp_client_get_autostart(self) -> bool:
"""Get SRP client autostart mode."""
return self.__parse_Enabled_or_Disabled(self.execute_command('srp client autostart'))
def srp_client_enable_autostart(self):
"""Enable SRP client autostart mode."""
self.execute_command('srp client autostart enable')
def srp_client_disable_autostart(self):
"""Disable SRP client autostart mode."""
self.execute_command('srp client autostart disable')
def srp_client_get_callback(self) -> bool:
"""Get SRP client callback mode."""
return self.__parse_Enabled_or_Disabled(self.execute_command('srp client callback'))
def srp_client_enable_callback(self):
"""Enable SRP client callback mode."""
self.execute_command('srp client callback enable')
def srp_client_disable_callback(self):
"""Disable SRP client callback mode."""
self.execute_command('srp client callback disable')
def srp_client_set_host_name(self, name: str):
"""Set SRP client host name."""
self.execute_command(f'srp client host name {name}')
def srp_client_get_host(self) -> Dict:
"""Get SRP client host."""
output = self.__parse_str(self.execute_command('srp client host'))
return self.__parse_srp_client_host(output)
_SRP_CLIENT_HOST_PATTERN = re.compile(r'name:("(.*)"|(\(null\))), state:(\S+), addrs:\[(.*)\]')
def __parse_srp_client_host(self, line: str) -> Dict:
m = re.match(OTCI._SRP_CLIENT_HOST_PATTERN, line)
if not m:
raise UnexpectedCommandOutput([line])
_, host, _, state, addrs = m.groups()
return {
'host': host or '',
'state': state,
'addresses': [Ip6Addr(ip) for ip in addrs.split(', ')] if addrs else [],
}
def srp_client_get_host_name(self) -> str:
"""Get SRP client host name."""
name = self.__parse_str(self.execute_command('srp client host name'))
return name if name != '(null)' else ''
def srp_client_get_host_addresses(self) -> List[Ip6Addr]:
"""Get SRP client host addresses."""
return self.__parse_ip6addr_list(self.execute_command('srp client host address'))
def srp_client_set_host_addresses(self, *addrs: Union[str, ipaddress.IPv6Address]):
"""Set SRP client host addresses."""
self.execute_command(f'srp client host address {" ".join(map(str, addrs))}')
def srp_client_get_host_state(self):
"""Get SRP client host state."""
return self.__parse_str(self.execute_command('srp client host state'))
def srp_client_remove_host(self, remove_key_lease=False):
"""Remove SRP client host."""
cmd = 'srp client host remove'
if remove_key_lease:
cmd += ' 1'
self.execute_command(cmd)
def srp_client_get_services(self) -> List[Dict]:
"""Get SRP client services."""
output = self.execute_command('srp client service')
return [self.__parse_srp_client_service(line) for line in output]
_SRP_CLIENT_SERVICE_PATTERN = re.compile(
r'instance:"(.*)", name:"(.*)", state:(\S+), port:(\d+), priority:(\d+), weight:(\d+)')
def __parse_srp_client_service(self, line: str) -> Dict:
# e.g. instance:"ins2", name:"_meshcop._udp", state:ToAdd, port:2000, priority:2, weight:2
m = OTCI._SRP_CLIENT_SERVICE_PATTERN.match(line)
if m is None:
raise UnexpectedCommandOutput([line])
instance, service, state, port, priority, weight = m.groups()
port, priority, weight = int(port), int(priority), int(weight)
return {
'instance': instance,
'service': service,
'state': state,
'port': port,
'priority': priority,
'weight': weight,
}
def srp_client_add_service(self,
instance: str,
service: str,
port: int,
priority: int = 0,
weight: int = 0,
txt: Dict[str, Union[str, bytes, bool]] = None):
instance = self.__escape_escapable(instance)
cmd = f'srp client service add {instance} {service} {port} {priority} {weight}'
if txt:
cmd += f' {self.__txt_to_hex(txt)}'
self.execute_command(cmd)
def srp_client_remove_service(self, instance: str, service: str):
"""Remove a service from SRP client."""
self.execute_command(f'srp client service remove {instance} {service}')
def srp_client_clear_service(self, instance: str, service: str):
"""Remove a service from SRP client without notifying the SRP server."""
self.execute_command(f'srp client service clear {instance} {service}')
def srp_client_get_key_lease_interval(self) -> int:
"""Get SRP client key lease interval (in seconds)."""
return self.__parse_int(self.execute_command('srp client keyleaseinterval'))
def srp_client_set_key_lease_interval(self, interval: int):
"""Set SRP client key lease interval (in seconds)."""
self.execute_command(f'srp client keyleaseinterval {interval}')
def srp_client_get_lease_interval(self) -> int:
"""Get SRP client lease interval (in seconds)."""
return self.__parse_int(self.execute_command('srp client leaseinterval'))
def srp_client_set_lease_interval(self, interval: int):
"""Set SRP client lease interval (in seconds)."""
self.execute_command(f'srp client leaseinterval {interval}')
def srp_client_get_server(self) -> Tuple[Ip6Addr, int]:
"""Get the SRP server (IP, port)."""
result = self.__parse_str(self.execute_command('srp client server'))
ip, port = re.match(OTCI._IPV6_SERVER_PORT_PATTERN, result).groups()
return Ip6Addr(ip), int(port)
def srp_client_get_service_key(self) -> bool:
"""Get SRP client "service key record inclusion" mode."""
return self.__parse_Enabled_or_Disabled(self.execute_command('srp client service key'))
def srp_client_enable_service_key(self):
"""Enable SRP client "service key record inclusion" mode."""
self.execute_command('srp client service key enable')
def srp_client_disable_service_key(self):
"""Disable SRP client "service key record inclusion" mode."""
self.execute_command('srp client service key disable')
def __split_table_row(self, row: str) -> List[str]:
if not (row.startswith('|') and row.endswith('|')):
raise ValueError(row)
fields = row.split('|')
fields = [x.strip() for x in fields[1:-1]]
return fields
def __get_table_col(self, colname: str, headers: List[str], fields: List[str]) -> str:
return fields[headers.index(colname)]
def get_child_list(self) -> List[ChildId]:
"""Get attached Child IDs."""
line = self.__parse_str(self.execute_command(f'child list'))
return [ChildId(id) for id in line.strip().split()]
def get_child_info(self, child: Union[ChildId, Rloc16]) -> Dict[str, Any]:
output = self.execute_command(f'child {child}')
info = {}
for line in output:
k, v = line.split(': ')
if k == 'Child ID':
info['id'] = int(v)
elif k == 'Rloc':
info['rloc16'] = int(v, 16)
elif k == 'Ext Addr':
info['extaddr'] = v
elif k == 'Mode':
info['mode'] = DeviceMode(v)
elif k == 'Net Data':
info['c_vn'] = int(v)
elif k == 'Timeout':
info['timeout'] = int(v)
elif k == 'Age':
info['age'] = int(v)
elif k == 'Link Quality In':
info['lq_in'] = int(v)
elif k == 'RSSI':
info['rssi'] = int(v)
else:
self.log('warning', "Child info %s: %s ignored", k, v)
return info
def get_child_ipaddrs(self) -> Dict[Rloc16, List[Ip6Addr]]:
"""Get the list of IP addresses stored for MTD children.
Note: Each MTD child might has multiple IP addresses.
"""
output = self.execute_command('childip')
ipaddrs = {}
for line in output:
rloc16, ip = line.split(': ')
rloc16 = Rloc16(rloc16, 16)
ipaddrs.setdefault(rloc16, []).append(Ip6Addr(ip.strip()))
return ipaddrs
#
# Child configurations
#
def get_max_children(self) -> int:
"""Get the Thread maximum number of allowed children."""
return self.__parse_int(self.execute_command('childmax'))
def set_max_children(self, val: int):
"""Set the Thread maximum number of allowed children."""
self.execute_command(f'childmax {val}')
def get_child_ip_max(self) -> int:
"""Get the maximum number of IP addresses that each MTD child may register with this device as parent."""
return self.__parse_int(self.execute_command('childip max'))
def set_child_ip_max(self, val: int):
"""Get the maximum number of IP addresses that each MTD child may register with this device as parent."""
self.execute_command(f'childip max {val}')
def get_child_timeout(self):
"""Get the Thread Child Timeout value."""
return self.__parse_int(self.execute_command('childtimeout'))
def set_child_timeout(self, timeout):
"""Set the Thread Child Timeout value."""
self.execute_command('childtimeout %d' % timeout)
def get_child_supervision_interval(self) -> int:
"""Get the Child Supervision Check Timeout value."""
return self.__parse_int(self.execute_command('childsupervision interval'))
def set_child_supervision_interval(self, val: int):
"""Set the Child Supervision Interval value.
This command can only be used with FTD devices.
"""
self.execute_command(f'childsupervision interval {val}')
def get_child_supervision_check_timeout(self) -> int:
"""Get the Child Supervision Check Timeout value."""
return self.__parse_int(self.execute_command('childsupervision checktimeout'))
def set_child_supervision_check_timeout(self, val: int):
"""Set the Child Supervision Check Timeout value."""
self.execute_command(f'childsupervision checktimeout {val}')
#
# Neighbor management
#
def get_neighbor_list(self) -> List[Rloc16]:
"""Get a list of RLOC16 of neighbors"""
line = self.__parse_str(self.execute_command('neighbor list')).strip()
return [Rloc16(id, 16) for id in line.split()]
def get_neighbor_table(self) -> Dict[Rloc16, Dict[str, Any]]:
output = self.execute_command('neighbor table')
if len(output) < 2:
raise UnexpectedCommandOutput(output)
#
# Example output:
#
# | Role | RLOC16 | Age | Avg RSSI | Last RSSI |R|D|N| Extended MAC |
# +------+--------+-----+----------+-----------+-+-+-+------------------+
# | C | 0xcc01 | 96 | -46 | -46 |1|1|1| 1eb9ba8a6522636b |
# | R | 0xc800 | 2 | -29 | -29 |1|1|1| 9a91556102c39ddb |
# | R | 0xf000 | 3 | -28 | -28 |1|1|1| 0ad7ed6beaa6016d |
# Done
#
headers = self.__split_table_row(output[0])
table = {}
for line in output[2:]:
line = line.strip()
if not line:
continue
fields = self.__split_table_row(line)
col = lambda colname: self.__get_table_col(colname, headers, fields)
role = col('Role')
is_router = role == 'R'
r, d, n = int(col('R')), int(col('D')), int(col('N'))
mode = DeviceMode(f'{"r" if r else ""}{"d" if d else ""}{"n" if n else ""}')
rloc16 = Rloc16(col('RLOC16'), 16)
table[rloc16] = {
'is_router': is_router,
'rloc16': rloc16,
'age': int(col('Age')),
'avg_rssi': int(col('Avg RSSI')),
'last_rssi': int(col('Last RSSI')),
'mode': mode,
'extaddr': col('Extended MAC'),
}
return table
#
# SED/SSED configuration
#
def get_poll_period(self) -> int:
"""Get the customized data poll period of sleepy end device (milliseconds).
Only for Reference Device."""
return self.__parse_int(self.execute_command('pollperiod'))
def set_poll_period(self, poll_period: int):
"""Set the customized data poll period (in milliseconds) for sleepy end device.
Only for Reference Device."""
self.execute_command(f'pollperiod {poll_period}')
# TODO: csl
# TODO: csl channel <channel>
# TODO: csl period <period>
# TODO: csl timeout <timeout>
_CSL_PERIOD_PATTERN = re.compile(r'(\d+)\(in units of 10 symbols\), \d+ms')
_CSL_TIMEOUT_PATTERN = re.compile(r'(\d+)s')
def get_csl_config(self) -> Dict[str, int]:
"""Get the CSL configuration."""
output = self.execute_command('csl')
cfg = {}
for line in output:
k, v = line.split(': ')
if k == 'Channel':
cfg['channel'] = int(v)
elif k == 'Timeout':
cfg['timeout'] = int(OTCI._CSL_TIMEOUT_PATTERN.match(v).group(1))
elif k == 'Period':
cfg['period'] = int(OTCI._CSL_PERIOD_PATTERN.match(v).group(1))
else:
logging.warning("Ignore unknown CSL parameter: %s: %s", k, v)
return cfg
def config_csl(self, channel: int = None, period: int = None, timeout: int = None):
"""Configure CSL parameters.
:param channel: Set CSL channel.
:param period: Set CSL period in units of 10 symbols. Disable CSL by setting this parameter to 0.
:param timeout: Set the CSL timeout in seconds.
"""
if channel is None and period is None and timeout is None:
raise InvalidArgumentsError("Please specify at least 1 parameter to configure.")
if channel is not None:
self.execute_command(f'csl channel {channel}')
if period is not None:
self.execute_command(f'csl period {period}')
if timeout is not None:
self.execute_command(f'csl timeout {timeout}')
#
# Leader utilities
#
def get_context_id_reuse_delay(self) -> int:
"""Get the CONTEXT_ID_REUSE_DELAY value."""
return self.__parse_int(self.execute_command('contextreusedelay'))
def set_context_id_reuse_delay(self, val: int):
"""Set the CONTEXT_ID_REUSE_DELAY value."""
self.execute_command(f'contextreusedelay {val}')
def release_router_id(self, routerid: int):
"""Release a Router ID that has been allocated by the device in the Leader role."""
self.execute_command(f'releaserouterid {routerid}')
# Time Sync utilities
# TODO: networktime
# TODO: networktime <timesyncperiod> <xtalthreshold>
# TODO: delaytimermin
# TODO: delaytimermin <delaytimermin>
#
# Commissioniner operations
#
def commissioner_start(self):
"""Start the Commissioner role."""
self.execute_command('commissioner start')
def commissioner_stop(self):
"""Stop the Commissioner role."""
self.execute_command('commissioner stop')
def get_commissioiner_state(self) -> str:
"""Get current Commissioner state (active or petitioning or disabled)."""
return self.__parse_str(self.execute_command('commissioner state'))
def get_commissioner_session_id(self) -> int:
"""Get current commissioner session id."""
return self.__parse_int(self.execute_command('commissioner sessionid'))
def commissioner_add_joiner(self, pskd, eui64=None, discerner=None, timeout=None):
"""Add a Joiner entry.
:param pskd: Pre-Shared Key for the Joiner.
:param eui64: The IEEE EUI-64 of the Joiner or '*' to match any Joiner
:param discerner: The Joiner discerner in format number/length.
:param timeout: Joiner timeout in seconds.
"""
if (eui64 is not None) == (discerner is not None):
raise InvalidArgumentsError("Please specify eui64 or discerner, but not both.")
if eui64 is not None and eui64 != '*':
self.__validate_extaddr(eui64)
cmd = f'commissioner joiner add {eui64 or discerner} {pskd}'
if timeout is not None:
cmd += f' {timeout}'
self.execute_command(cmd)
def commissioner_remove_jointer(self, eui64=None, discerner=None):
if (eui64 is not None) == (discerner is not None):
raise InvalidArgumentsError("Please specify eui64 or discerner, but not both.")
if eui64 is not None and eui64 != '*':
self.__validate_extaddr(eui64)
self.execute_command(f'commissioner joiner remove {eui64 or discerner}')
def set_commissioner_provisioning_url(self, url: str):
self.execute_command(f'commissioner provisioningurl {url}')
# TODO: commissioner announce
# TODO: commissioner energy
# TODO: commissioner mgmtget
# TODO: commissioner mgmtset
# TODO: commissioner panid
#
# Joiner operations
#
def joiner_start(self, psk: str, provisioning_url: str = None):
"""Start the Joiner."""
cmd = f'joiner start {psk}'
if provisioning_url is not None:
cmd += f' {provisioning_url}'
self.execute_command(cmd)
def joiner_stop(self):
"""Stop the Joiner role."""
self.execute_command('joiner stop')
def get_joiner_id(self) -> str:
"""Get the Joiner ID."""
return self.__parse_joiner_id(self.execute_command('joiner id'))
def get_joiner_port(self) -> int:
"""Get the Joiner port."""
return self.__parse_int(self.execute_command(f'joinerport'))
def set_joiner_port(self, port: int):
"""Set the Joiner port."""
self.execute_command(f'joinerport {port}')
# TODO: joiner discerner
#
# Network Data utilities
#
def get_local_prefixes(self) -> List[Tuple[Ip6Prefix, str, str, Rloc16]]:
"""Get prefixes from local Network Data."""
output = self.execute_command('prefix')
return self.__parse_prefixes(output)
def __parse_prefixes(self, output: List[str]) -> List[Tuple[Ip6Prefix, str, str, Rloc16]]:
prefixes = []
for line in output:
if line.startswith('- '):
line = line[2:]
prefix, flags, prf, rloc16 = line.split()[:4]
prefixes.append((Ip6Prefix(prefix), flags, prf, Rloc16(rloc16, 16)))
return prefixes
def add_prefix(self, prefix: str, flags='paosr', prf='med'):
"""Add a valid prefix to the Network Data."""
self.execute_command(f'prefix add {prefix} {flags} {prf}')
def remove_prefix(self, prefix: str):
"""Invalidate a prefix in the Network Data."""
self.execute_command(f'prefix remove {prefix}')
def register_network_data(self):
self.execute_command('netdata register')
def get_network_data(self) -> Dict[str, List]:
output = self.execute_command('netdata show')
netdata = {}
if output.pop(0) != 'Prefixes:':
raise UnexpectedCommandOutput(output)
prefixes_output = []
while True:
line = output.pop(0)
if line == 'Routes:':
break
else:
prefixes_output.append(line)
netdata['prefixes'] = self.__parse_prefixes(prefixes_output)
routes_output = []
while True:
line = output.pop(0)
if line == 'Services:':
break
else:
routes_output.append(line)
netdata['routes'] = self.__parse_routes(routes_output)
netdata['services'] = self.__parse_services(output)
return netdata
def get_prefixes(self) -> List[Tuple[Ip6Prefix, str, str, Rloc16]]:
"""Get network prefixes from Thread Network Data."""
network_data = self.get_network_data()
return network_data['prefixes']
def get_routes(self) -> List[Tuple[str, bool, str, Rloc16]]:
"""Get routes from Thread Network Data."""
network_data = self.get_network_data()
return network_data['routes']
def get_services(self) -> List[Tuple[int, bytes, bytes, bool, Rloc16]]:
"""Get services from Thread Network Data"""
network_data = self.get_network_data()
return network_data['services']
def __parse_services(self, output: List[str]) -> List[Tuple[int, bytes, bytes, bool, Rloc16]]:
services = []
for line in output:
line = line.split()
enterprise_number, service_data, server_data = line[:3]
if line[3] == 's':
stable, rloc16 = True, line[4]
else:
stable, rloc16 = False, line[3]
enterprise_number = int(enterprise_number)
service_data = self.__hex_to_bytes(service_data)
server_data = self.__hex_to_bytes(server_data)
rloc16 = Rloc16(rloc16, 16)
services.append((enterprise_number, service_data, server_data, stable, rloc16))
return services
def get_network_data_bytes(self) -> bytes:
"""Get the raw Network Data."""
hexstr = self.__parse_str(self.execute_command('netdata show -x'))
return bytes(int(hexstr[i:i + 2], 16) for i in range(0, len(hexstr), 2))
def get_local_routes(self) -> List[Tuple[str, bool, str, Rloc16]]:
"""Get routes from local Network Data."""
return self.__parse_routes(self.execute_command('route'))
def __parse_routes(self, output: List[str]) -> List[Tuple[str, bool, str, Rloc16]]:
routes = []
for line in output:
line = line.split()
if line[1] == 's':
prefix, _, prf, rloc16 = line
stable = True
else:
prefix, prf, rloc16 = line
stable = False
rloc16 = Rloc16(rloc16, 16)
routes.append((prefix, stable, prf, rloc16))
return routes
def add_route(self, prefix: str, stable=True, prf='med'):
"""Add a valid external route to the Network Data."""
cmd = f'route add {prefix}'
if stable:
cmd += ' s'
cmd += f' {prf}'
self.execute_command(cmd)
def remove_route(self, prefix: str):
"""Invalidate a external route in the Network Data."""
self.execute_command(f'route remove {prefix}')
def add_service(self, enterprise_number: int, service_data: Union[str, bytes], server_data: Union[str, bytes]):
"""Add service to the Network Data.
enterpriseNumber: IANA enterprise number
serviceData: hex-encoded binary service data
serverData: hex-encoded binary server data
"""
service_data = self.__validate_hex_or_bytes(service_data)
server_data = self.__validate_hex_or_bytes(server_data)
self.execute_command(f'service add {enterprise_number} {service_data} {server_data}')
def remove_service(self, enterprise_number, service_data):
"""Remove service from Network Data.
enterpriseNumber: IANA enterprise number
serviceData: hext-encoded binary service data
"""
service_data = self.__validate_hex_or_bytes(service_data)
self.execute_command(f'service remove {enterprise_number} {service_data}')
#
# Dataset management
#
def dataset_init_buffer(self, get_active_dataset=False, get_pending_dataset=False):
"""Initialize operational dataset buffer."""
if get_active_dataset and get_pending_dataset:
raise InvalidArgumentsError("Can not specify both `get_active_dataset` and `get_pending_dataset`.")
if get_active_dataset:
self.execute_command(f'dataset init active')
elif get_pending_dataset:
self.execute_command(f'dataset init pending')
else:
self.execute_command(f'dataset init new')
def dataset_commit_buffer(self, dataset: str):
if dataset in ('active', 'pending'):
cmd = f'dataset commit {dataset}'
else:
raise InvalidArgumentsError(f'Unkonwn dataset: {dataset}')
self.execute_command(cmd)
def dataset_clear_buffer(self):
"""Reset operational dataset buffer."""
self.execute_command('dataset clear')
def get_dataset(self, dataset: str = 'buffer'):
if dataset in ('active', 'pending'):
cmd = f'dataset {dataset}'
elif dataset == 'buffer':
cmd = 'dataset'
else:
raise InvalidArgumentsError(f'Unkonwn dataset: {dataset}')
output = self.execute_command(cmd)
return self.__parse_dataset(output)
def __parse_dataset(self, output: List[str]) -> Dict[str, Any]:
# Example output:
#
# Active Timestamp: 1
# Channel: 22
# Channel Mask: 0x07fff800
# Ext PAN ID: 5c93ae980ff22d35
# Mesh Local Prefix: fdc7:55fe:6363:bd01::/64
# Network Key: d1a8348d59fb1fac1d6c4f95007d487a
# Network Name: OpenThread-7caa
# PAN ID: 0x7caa
# PSKc: 167d89fd169e439ca0b8266de248090f
# Security Policy: 0, onrc
dataset = {}
for line in output:
line = line.split(': ')
key, val = line[0], ': '.join(line[1:])
if key == 'Active Timestamp':
dataset['active_timestamp'] = int(val)
elif key == 'Channel':
dataset['channel'] = int(val)
elif key == 'Channel Mask':
dataset['channel_mask'] = int(val, 16)
elif key == 'Ext PAN ID':
dataset['extpanid'] = val
elif key == 'Mesh Local Prefix':
dataset['mesh_local_prefix'] = val
elif key in ('Network Key', 'Master Key'):
dataset['networkkey'] = val
elif key == 'Network Name':
dataset['network_name'] = val
elif key == 'PAN ID':
dataset['panid'] = int(val, 16)
elif key == 'PSKc':
dataset['pskc'] = val
elif key == 'Security Policy':
rotation_time, flags = val.split(', ') if ', ' in val else val.split(' ')
rotation_time = int(rotation_time)
dataset['security_policy'] = SecurityPolicy(rotation_time, flags)
else:
raise UnexpectedCommandOutput(output)
return dataset
def get_dataset_bytes(self, dataset: str) -> bytes:
if dataset in ('active', 'pending'):
cmd = f'dataset {dataset} -x'
else:
raise InvalidArgumentsError(f'Unkonwn dataset: {dataset}')
hexstr = self.__parse_str(self.execute_command(cmd))
return self.__hex_to_bytes(hexstr)
def set_dataset_bytes(self, dataset: str, data: bytes) -> None:
if dataset in ('active', 'pending'):
cmd = f'dataset set {dataset} {self.__bytes_to_hex(data)}'
else:
raise InvalidArgumentsError(f'Unkonwn dataset: {dataset}')
self.execute_command(cmd)
def dataset_set_buffer(self,
active_timestamp: int = None,
channel: int = None,
channel_mask: int = None,
extpanid: str = None,
mesh_local_prefix: str = None,
network_key: str = None,
network_name: str = None,
panid: int = None,
pskc: str = None,
security_policy: tuple = None,
pending_timestamp: int = None):
if active_timestamp is not None:
self.execute_command(f'dataset activetimestamp {active_timestamp}')
if channel is not None:
self.execute_command(f'dataset channel {channel}')
if channel_mask is not None:
self.execute_command(f'dataset channelmask {channel_mask}')
if extpanid is not None:
self.execute_command(f'dataset extpanid {extpanid}')
if mesh_local_prefix is not None:
self.execute_command(f'dataset meshlocalprefix {mesh_local_prefix}')
if network_key is not None:
nwk_cmd = self.__detect_networkkey_cmd()
self.execute_command(f'dataset {nwk_cmd} {network_key}')
if network_name is not None:
self.execute_command(f'dataset networkname {self.__escape_escapable(network_name)}')
if panid is not None:
self.execute_command(f'dataset panid {panid}')
if pskc is not None:
self.execute_command(f'dataset pskc {pskc}')
if security_policy is not None:
rotation_time, flags = security_policy
self.execute_command(f'dataset securitypolicy {rotation_time} {flags}')
if pending_timestamp is not None:
self.execute_command(f'dataset pendingtimestamp {pending_timestamp}')
# TODO: dataset mgmtgetcommand
# TODO: dataset mgmtsetcommand
# TODO: dataset set <active|pending> <dataset>
#
# Allowlist management
#
def enable_allowlist(self):
self.execute_command(f'macfilter addr {self.__detect_allowlist_cmd()}')
def disable_allowlist(self):
self.execute_command('macfilter addr disable')
def add_allowlist(self, addr: str, rssi: int = None):
cmd = f'macfilter addr add {addr}'
if rssi is not None:
cmd += f' {rssi}'
self.execute_command(cmd)
def remove_allowlist(self, addr: str):
self.execute_command(f'macfilter addr remove {addr}')
def clear_allowlist(self):
self.execute_command('macfilter addr clear')
def set_allowlist(self, allowlist: Collection[Union[str, Tuple[str, int]]]):
self.clear_allowlist()
if allowlist is None:
self.disable_allowlist()
else:
self.enable_allowlist()
for item in allowlist:
if isinstance(item, str):
self.add_allowlist(item)
else:
addr, rssi = item[0], item[1]
self.add_allowlist(addr, rssi)
# TODO: denylist
# TODO: macfilter rss
# TODO: macfilter rss add <extaddr> <rss>
# TODO: macfilter rss add-lqi <extaddr> <lqi>
# TODO: macfilter rss remove <extaddr>
# TODO: macfilter rss clear
def __detect_allowlist_cmd(self):
if self.api_version >= 28:
return 'allowlist'
else:
return '\x77\x68\x69\x74\x65\x6c\x69\x73\x74'
def __detect_networkkey_cmd(self) -> str:
return 'networkkey' if self.api_version >= 126 else 'masterkey'
#
# Unicast Addresses management
#
def add_ipaddr(self, ip: Union[str, ipaddress.IPv6Address]):
"""Add an IPv6 address to the Thread interface."""
self.execute_command(f'ipaddr add {ip}')
def del_ipaddr(self, ip: Union[str, ipaddress.IPv6Address]):
"""Delete an IPv6 address from the Thread interface."""
self.execute_command(f'ipaddr del {ip}')
def get_ipaddrs(self) -> Tuple[Ip6Addr]:
"""Get all IPv6 addresses assigned to the Thread interface."""
return tuple(map(Ip6Addr, self.execute_command('ipaddr')))
def has_ipaddr(self, ip: Union[str, ipaddress.IPv6Address]):
"""Check if a IPv6 address was added to the Thread interface."""
return ip in self.get_ipaddrs()
def get_ipaddr_mleid(self) -> Ip6Addr:
"""Get Thread Mesh Local EID address."""
return self.__parse_ip6addr(self.execute_command('ipaddr mleid'))
def get_ipaddr_linklocal(self) -> Ip6Addr:
"""Get Thread link-local IPv6 address."""
return self.__parse_ip6addr(self.execute_command('ipaddr linklocal'))
def get_ipaddr_rloc(self) -> Ip6Addr:
"""Get Thread Routing Locator (RLOC) address."""
return self.__parse_ip6addr(self.execute_command('ipaddr rloc'))
#
# Multicast Addresses management
#
def add_ipmaddr(self, ip: Union[str, ipaddress.IPv6Address]):
"""Subscribe the Thread interface to the IPv6 multicast address."""
self.execute_command(f'ipmaddr add {ip}')
def del_ipmaddr(self, ip: Union[str, ipaddress.IPv6Address]):
"""Unsubscribe the Thread interface to the IPv6 multicast address."""
self.execute_command(f'ipmaddr del {ip}')
def get_ipmaddrs(self) -> Tuple[Ip6Addr]:
"""Get all IPv6 multicast addresses subscribed to the Thread interface."""
return tuple(map(Ip6Addr, self.execute_command('ipmaddr')))
def has_ipmaddr(self, ip: Union[str, ipaddress.IPv6Address]):
"""Check if a IPv6 multicast address was subscribed by the Thread interface."""
return ip in self.get_ipmaddrs()
def get_ipmaddr_promiscuous(self) -> bool:
"""Get multicast promiscuous mode."""
return self.__parse_Enabled_or_Disabled(self.execute_command("ipmaddr promiscuous"))
def enable_ipmaddr_promiscuous(self):
"""Enable multicast promiscuous mode."""
self.execute_command('ipmaddr promiscuous enable')
def disable_ipmaddr_promiscuous(self):
"""Disable multicast promiscuous mode."""
self.execute_command('ipmaddr promiscuous disable')
def get_ipmaddr_llatn(self) -> Ip6Addr:
"""Get Link Local All Thread Nodes Multicast Address"""
return self.__parse_ip6addr(self.execute_command('ipmaddr llatn'))
def get_ipmaddr_rlatn(self) -> Ip6Addr:
"""Get Realm Local All Thread Nodes Multicast Address"""
return self.__parse_ip6addr(self.execute_command('ipmaddr rlatn'))
#
# Backbone Router Utilities
#
# TODO: bbr mgmt ...
def enable_backbone_router(self):
"""Enable Backbone Router Service for Thread 1.2 FTD.
SRV_DATA.ntf would be triggerred for attached device if there is no Backbone Router Service in Thread Network Data.
"""
self.execute_command('bbr enable')
def disable_backbone_router(self):
"""Disable Backbone Router Service for Thread 1.2 FTD.
SRV_DATA.ntf would be triggerred if Backbone Router is Primary state.
"""
self.execute_command('bbr disable')
def get_backbone_router_state(self) -> str:
"""Get local Backbone state (Disabled or Primary or Secondary) for Thread 1.2 FTD."""
return self.__parse_str(self.execute_command('bbr state'))
def get_primary_backbone_router_info(self) -> Optional[dict]:
"""Show current Primary Backbone Router information for Thread 1.2 device."""
output = self.execute_command('bbr')
if len(output) < 1:
raise UnexpectedCommandOutput(output)
line = output[0]
if line == 'BBR Primary: None':
return None
if line != 'BBR Primary:':
raise UnexpectedCommandOutput(output)
# Example output:
# BBR Primary:
# server16: 0xE400
# seqno: 10
# delay: 120 secs
# timeout: 300 secs
dataset = {}
for line in output[1:]:
key, val = line.split(':')
key, val = key.strip(), val.strip()
if key == 'server16':
dataset[key] = int(val, 16)
elif key == 'seqno':
dataset[key] = int(val)
elif key == 'delay':
if not val.endswith(' secs'):
raise UnexpectedCommandOutput(output)
dataset[key] = int(val.split()[0])
elif key == 'timeout':
if not val.endswith(' secs'):
raise UnexpectedCommandOutput(output)
dataset[key] = int(val.split()[0])
else:
raise UnexpectedCommandOutput(output)
return dataset
def register_backbone_router_dataset(self):
"""Register Backbone Router Service for Thread 1.2 FTD.
SRV_DATA.ntf would be triggerred for attached device.
"""
self.execute_command('bbr register')
def get_backbone_router_config(self) -> dict:
"""Show local Backbone Router configuration for Thread 1.2 FTD."""
output = self.execute_command('bbr config')
# Example output:
# seqno: 10
# delay: 120 secs
# timeout: 300 secs
config = {}
for line in output:
key, val = line.split(':')
key, val = key.strip(), val.strip()
if key == 'seqno':
config[key] = int(val)
elif key in ('delay', 'timeout'):
if not line.endswith(' secs'):
raise UnexpectedCommandOutput(output)
config[key] = int(val.split()[0])
else:
raise UnexpectedCommandOutput(output)
return config
def set_backbone_router_config(self, seqno: int = None, delay: int = None, timeout: int = None):
"""Configure local Backbone Router configuration for Thread 1.2 FTD.
Call register_backbone_router_dataset() to explicitly register Backbone Router service to Leader for Secondary Backbone Router.
"""
if seqno is None and delay is None and timeout is None:
raise InvalidArgumentsError("Please specify seqno or delay or timeout")
cmd = 'bbr config'
if seqno is not None:
cmd += f' seqno {seqno}'
if delay is not None:
cmd += f' delay {delay}'
if timeout is not None:
cmd += f' timeout {timeout}'
self.execute_command(cmd)
def get_backbone_router_jitter(self) -> int:
"""Get jitter (in seconds) for Backbone Router registration for Thread 1.2 FTD."""
return self.__parse_int(self.execute_command('bbr jitter'))
def set_backbone_router_jitter(self, val: int):
"""Set jitter (in seconds) for Backbone Router registration for Thread 1.2 FTD."""
self.execute_command(f'bbr jitter {val}')
def backbone_router_get_multicast_listeners(self) -> List[Tuple[Ip6Addr, int]]:
"""Get Backbone Router Multicast Listeners."""
listeners = []
for line in self.execute_command('bbr mgmt mlr listener'):
ip, timeout = line.split()
listeners.append((Ip6Addr(ip), int(timeout)))
return listeners
#
# Thread 1.2 and DUA/MLR utilities
#
def get_domain_name(self) -> str:
"""Get the Thread Domain Name for Thread 1.2 device."""
return self.__parse_str(self.execute_command('domainname'))
def set_domain_name(self, name: str):
"""Set the Thread Domain Name for Thread 1.2 device."""
self.execute_command('domainname %s' % self.__escape_escapable(name))
# TODO: dua iid
# TODO: dua iid <iid>
# TODO: dua iid clear
# TODO: mlr reg <ipaddr> ... [timeout]
#
# Link metrics management
#
# TODO: linkmetrics mgmt <ipaddr> forward <seriesid> [ldraX][pqmr]
# TODO: linkmetrics probe <ipaddr> <seriesid> <length>
# TODO: linkmetrics query <ipaddr> single [pqmr]
# TODO: linkmetrics query <ipaddr> forward <seriesid>
# TODO: linkquality <extaddr>
# TODO: linkquality <extaddr> <linkquality>
#
#
# Logging
#
def get_log_level(self) -> int:
"""Get the log level."""
return self.__parse_int(self.execute_command('log level'))
def set_log_level(self, level: int):
"""Set the log level."""
self.execute_command(f'log level {level}')
#
# Device performance related information
#
def get_message_buffer_info(self) -> dict:
"""Get the current message buffer information."""
output = self.execute_command('bufferinfo')
info = {}
def _parse_val(val):
vals = val.split()
return int(vals[0]) if len(vals) == 1 else tuple(map(int, vals))
for line in output:
key, val = line.split(':')
key, val = key.strip(), val.strip()
info[key.replace(' ', '_')] = _parse_val(val)
return info
@constant_property
def counter_names(self):
"""Get the supported counter names."""
return tuple(self.execute_command('counters'))
def get_counter(self, name: str) -> Counter:
"""Reset the counter value."""
output = self.execute_command(f'counters {name}')
counter = Counter()
for line in output:
k, v = line.strip().split(': ')
counter[k] = int(v)
return counter
def reset_counter(self, name: str):
"""Reset the counter value."""
self.execute_command(f'counters {name} reset')
def get_eidcache(self) -> Dict[Ip6Addr, Rloc16]:
"""Get the EID-to-RLOC cache entries."""
output = self.execute_command('eidcache')
cache = {}
for line in output:
ip, rloc16, _ = line.split(" ", 2)
cache[Ip6Addr(ip)] = Rloc16(rloc16, 16)
return cache
#
# UDP utilities
#
def udp_open(self):
"""Opens the example socket."""
self.execute_command('udp open')
def udp_close(self):
"""Opens the example socket."""
self.execute_command('udp close')
def udp_bind(self, ip: str, port: int, netif: NetifIdentifier = NetifIdentifier.THERAD):
"""Assigns a name (i.e. IPv6 address and port) to the example socket.
:param ip: the IPv6 address or the unspecified IPv6 address (::).
:param port: the UDP port
"""
bindarg = ''
if netif == NetifIdentifier.UNSPECIFIED:
bindarg += ' -u'
elif netif == NetifIdentifier.BACKBONE:
bindarg += ' -b'
self.execute_command(f'udp bind{bindarg} {ip} {port}')
def udp_connect(self, ip: str, port: int):
"""Specifies the peer with which the socket is to be associated.
ip: the peer's IPv6 address.
port: the peer's UDP port.
"""
self.execute_command(f'udp connect {ip} {port}')
def udp_send(self, ip: str = None, port: int = None, text: str = None, random_bytes: int = None, hex: str = None):
"""Send a few bytes over UDP.
ip: the IPv6 destination address.
port: the UDP destination port.
type: the type of the message: _ -t: text payload in the value, same as without specifying the type. _ -s: autogenerated payload with specified length indicated in the value.
* -x: binary data in hexadecimal representation in the value.
"""
if (ip is None) != (port is None):
raise InvalidArgumentsError("Please specify both `ip` and `port`.")
if (text is not None) + (random_bytes is not None) + (hex is not None) != 1:
raise InvalidArgumentsError("Please specify `text` or `random_bytes` or `hex`.")
cmd = 'udp send'
if ip is not None:
cmd += f' {ip} {port}'
if text is not None:
cmd += f' -t {text}'
elif random_bytes is not None:
cmd += f' -s {random_bytes}'
elif hex is not None:
self.__validate_hex(hex)
cmd += f' -x {hex}'
self.execute_command(cmd)
def udp_get_link_security(self) -> bool:
"""Gets whether the link security is enabled or disabled."""
return self.__parse_Enabled_or_Disabled(self.execute_command('udp linksecurity'))
def udp_enable_link_security(self):
"""Enable link security."""
self.execute_command('udp linksecurity enable')
def udp_disable_link_security(self):
"""Disable link security."""
self.execute_command('udp linksecurity disable')
def netstat(self) -> List[Tuple[Tuple[Ip6Addr, int], Tuple[Ip6Addr, int]]]:
cmd = 'netstat'
output = self.execute_command(cmd)
if len(output) < 2:
raise UnexpectedCommandOutput(output)
socks = []
for line in output[2:]:
_, sock_addr, peer_addr = line.strip().split('|')[:3]
sock_addr = self.__parse_socket_addr(sock_addr.strip())
peer_addr = self.__parse_socket_addr(peer_addr.strip())
socks.append((sock_addr, peer_addr))
return socks
@staticmethod
def __parse_socket_addr(addr: str) -> Tuple[Ip6Addr, int]:
addr, port = addr.rsplit(':', 1)
if addr.startswith('[') and addr.endswith(']'):
addr = addr[1:-1]
return Ip6Addr(addr), int(port) if port != '*' else 0
#
# CoAP CLI (test) utilities
#
def coap_start(self):
"""Starts the application coap service."""
self.execute_command('coap start')
def coap_stop(self):
"""Stops the application coap service."""
self.execute_command('coap stop')
def coap_get(self, addr: str, uri_path: str, type: str = "con"):
cmd = f'coap get {addr} {uri_path} {type}'
self.execute_command(cmd)
def coap_put(self, addr: str, uri_path: str, type: str = "con", payload: str = None):
cmd = f'coap put {addr} {uri_path} {type}'
if payload is not None:
cmd += f' {payload}'
self.execute_command(cmd)
def coap_post(self, addr: str, uri_path: str, type: str = "con", payload: str = None):
cmd = f'coap post {addr} {uri_path} {type}'
if payload is not None:
cmd += f' {payload}'
self.execute_command(cmd)
def coap_delete(self, addr: str, uri_path: str, type: str = "con", payload: str = None):
cmd = f'coap delete {addr} {uri_path} {type}'
if payload is not None:
cmd += f' {payload}'
self.execute_command(cmd)
def coap_get_test_resource_path(self) -> str:
"""Gets the URI path for the test resource."""
return self.__parse_str(self.execute_command('coap resource'))
def coap_set_test_resource_path(self, path: str):
"""Sets the URI path for the test resource."""
self.execute_command(f'coap resource {path}')
def coap_test_set_resource_content(self, content: str):
"""Sets the content sent by the test resource. If a CoAP client is observing the resource, a notification is sent to that client."""
self.execute_command(f'coap set {content}')
# TODO: coap observe <address> <uri-path> [type]
# TODO: coap cancel
# TODO: coap parameters <type> ["default"|<ack_timeout> <ack_random_factor_numerator> <ack_random_factor_denominator> <max_retransmit>]
# TODO: CoAP Secure utilities
#
# Other TODOs
#
# TODO: netstat
# TODO: networkdiagnostic get <addr> <type> ..
# TODO: networkdiagnostic reset <addr> <type> ..
# TODO: parent
# TODO: pskc [-p] <key>|<passphrase>
#
#
# Private methods
#
def __parse_str(self, output: List[str]) -> str:
if len(output) != 1:
raise UnexpectedCommandOutput(output)
return output[0]
def __parse_int_list(self, output: List[str]) -> List[int]:
line = self.__parse_str(output)
return list(map(int, line.strip().split()))
def __parse_ip6addr(self, output: List[str]) -> Ip6Addr:
return Ip6Addr(self.__parse_str(output))
def __parse_ip6addr_list(self, output: List[str]) -> List[Ip6Addr]:
return [Ip6Addr(line) for line in output]
def __parse_int(self, output: List[str], base=10) -> int:
if len(output) != 1:
raise UnexpectedCommandOutput(output)
return int(output[0], base)
def __parse_network_key(self, output: List[str]) -> str:
networkkey = self.__parse_str(output)
try:
self.__validate_network_key(networkkey)
except ValueError:
raise UnexpectedCommandOutput(output)
return networkkey
def __validate_network_key(self, networkkey: str):
if len(networkkey) != 32:
raise ValueError(networkkey)
int(networkkey, 16)
def __parse_hex64b(self, output: List[str]) -> str:
extaddr = self.__parse_str(output)
try:
self.__validate_hex64b(extaddr)
except ValueError:
raise UnexpectedCommandOutput(output)
return extaddr
__parse_extaddr = __parse_hex64b
__parse_extpanid = __parse_hex64b
__parse_eui64 = __parse_hex64b
__parse_joiner_id = __parse_hex64b
def __validate_hex64b(self, extaddr: str):
if len(extaddr) != 16:
raise ValueError(extaddr)
self.__validate_hex(extaddr)
def __validate_hex(self, hexstr: str):
if len(hexstr) % 2 != 0:
raise ValueError(hexstr)
for i in range(0, len(hexstr), 2):
int(hexstr[i:i + 2], 16)
__validate_extaddr = __validate_hex64b
__validate_extpanid = __validate_hex64b
def __parse_Enabled_or_Disabled(self, output: List[str]) -> bool:
return self.__parse_values(output, Enabled=True, Disabled=False)
def __parse_values(self, output: List[str], **vals) -> Any:
val = self.__parse_str(output)
if val not in vals:
raise UnexpectedCommandOutput(output)
return vals[val]
def __validate_hex_or_bytes(self, data: Union[str, bytes]) -> str:
if isinstance(data, bytes):
return ''.join('%02x' % c for c in data)
else:
self.__validate_hex(data)
return data
def __hex_to_bytes(self, hexstr: str) -> bytes:
self.__validate_hex(hexstr)
return bytes(int(hexstr[i:i + 2], 16) for i in range(0, len(hexstr), 2))
def __bytes_to_hex(self, data: bytes) -> str:
return ''.join('%02x' % b for b in data)
def __escape_escapable(self, s: str) -> str:
"""Escape CLI escapable characters in the given string.
"""
escapable_chars = '\\ \t\r\n'
for char in escapable_chars:
s = s.replace(char, '\\%s' % char)
return s
def __txt_to_hex(self, txt: Dict[str, Union[str, bytes, bool]]) -> str:
txt_bin = b''
for k, v in txt.items():
assert '=' not in k, 'TXT key must not contain `=`'
if isinstance(v, str):
entry = f'{k}={v}'.encode('utf8')
elif isinstance(v, bytes):
entry = f'{k}='.encode('utf8') + v
else:
assert v is True, 'TXT val must be str or bytes or True'
entry = k.encode('utf8')
assert len(entry) <= 255, 'TXT entry is too long'
txt_bin += bytes([len(entry)])
txt_bin += entry
return ''.join('%02x' % b for b in txt_bin)
def connect_cli_sim(executable: str, nodeid: int, simulator: Optional[Simulator] = None) -> OTCI:
cli_handler = connectors.OtCliSim(executable, nodeid, simulator=simulator)
cmd_handler = OtCliCommandRunner(cli_handler)
return OTCI(cmd_handler)
def connect_cli_serial(dev: str, baudrate=115200) -> OTCI:
cli_handler = connectors.OtCliSerial(dev, baudrate)
cmd_handler = OtCliCommandRunner(cli_handler)
return OTCI(cmd_handler)
def connect_ncp_sim(executable: str, nodeid: int, simulator: Optional[Simulator] = None) -> OTCI:
ncp_handler = connectors.OtNcpSim(executable, nodeid, simulator=simulator)
cmd_handler = OtCliCommandRunner(ncp_handler, is_spinel_cli=True)
return OTCI(cmd_handler)
def connect_otbr_ssh(host: str, port: int = 22, username='pi', password='raspberry', sudo=True):
cmd_handler = OtbrSshCommandRunner(host, port, username, password, sudo=sudo)
return OTCI(cmd_handler)
def connect_otbr_adb(host: str, port: int = 5555):
cmd_handler = OtbrAdbCommandRunner(host, port)
return OTCI(cmd_handler)
def connect_cmd_handler(cmd_handler: OTCommandHandler) -> OTCI:
return OTCI(cmd_handler)
| {
"content_hash": "328376b31988f7fc2d3396e4bfbb8424",
"timestamp": "",
"source": "github",
"line_count": 2468,
"max_line_length": 208,
"avg_line_length": 36.593598055105346,
"alnum_prop": 0.573881943906193,
"repo_name": "srickardti/openthread",
"id": "ed000dd8414fc7ca62da05d2896a7491f37f4b2f",
"size": "91917",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tools/otci/otci/otci.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2610"
},
{
"name": "C",
"bytes": "1586867"
},
{
"name": "C++",
"bytes": "8331824"
},
{
"name": "CMake",
"bytes": "109816"
},
{
"name": "Dockerfile",
"bytes": "10410"
},
{
"name": "M4",
"bytes": "32369"
},
{
"name": "Makefile",
"bytes": "192208"
},
{
"name": "Python",
"bytes": "4622817"
},
{
"name": "Shell",
"bytes": "165383"
}
],
"symlink_target": ""
} |
from statsmodels.compat.pandas import Appender, is_int_index, to_numpy
from abc import ABC, abstractmethod
import datetime as dt
from typing import Hashable, List, Optional, Sequence, Set, Tuple, Type, Union
import numpy as np
import pandas as pd
from scipy.linalg import qr
from statsmodels.iolib.summary import d_or_f
from statsmodels.tools.validation import (
bool_like,
float_like,
required_int_like,
string_like,
)
from statsmodels.tsa.tsatools import freq_to_period
DateLike = Union[dt.datetime, pd.Timestamp, np.datetime64]
IntLike = Union[int, np.integer]
START_BEFORE_INDEX_ERR = """\
start is less than the first observation in the index. Values can only be \
created for observations after the start of the index.
"""
class DeterministicTerm(ABC):
"""Abstract Base Class for all Deterministic Terms"""
# Set _is_dummy if the term is a dummy variable process
_is_dummy = False
@property
def is_dummy(self) -> bool:
"""Flag indicating whether the values produced are dummy variables"""
return self._is_dummy
@abstractmethod
def in_sample(self, index: Sequence[Hashable]) -> pd.DataFrame:
"""
Produce deterministic trends for in-sample fitting.
Parameters
----------
index : index_like
An index-like object. If not an index, it is converted to an
index.
Returns
-------
DataFrame
A DataFrame containing the deterministic terms.
"""
@abstractmethod
def out_of_sample(
self,
steps: int,
index: Sequence[Hashable],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
"""
Produce deterministic trends for out-of-sample forecasts
Parameters
----------
steps : int
The number of steps to forecast
index : index_like
An index-like object. If not an index, it is converted to an
index.
forecast_index : index_like
An Index or index-like object to use for the forecasts. If
provided must have steps elements.
Returns
-------
DataFrame
A DataFrame containing the deterministic terms.
"""
@abstractmethod
def __str__(self) -> str:
"""A meaningful string representation of the term"""
def __hash__(self) -> int:
name: Tuple[Hashable, ...] = (type(self).__name__,)
return hash(name + self._eq_attr)
@property
@abstractmethod
def _eq_attr(self) -> Tuple[Hashable, ...]:
"""tuple of attributes that are used for equality comparison"""
@staticmethod
def _index_like(index: Sequence[Hashable]) -> pd.Index:
if isinstance(index, pd.Index):
return index
try:
return pd.Index(index)
except Exception:
raise TypeError("index must be a pandas Index or index-like")
@staticmethod
def _extend_index(
index: pd.Index,
steps: int,
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.Index:
"""Extend the forecast index"""
if forecast_index is not None:
forecast_index = DeterministicTerm._index_like(forecast_index)
assert isinstance(forecast_index, pd.Index)
if forecast_index.shape[0] != steps:
raise ValueError(
"The number of values in forecast_index "
f"({forecast_index.shape[0]}) must match steps ({steps})."
)
return forecast_index
if isinstance(index, pd.PeriodIndex):
return pd.period_range(
index[-1] + 1, periods=steps, freq=index.freq
)
elif isinstance(index, pd.DatetimeIndex) and index.freq is not None:
next_obs = pd.date_range(index[-1], freq=index.freq, periods=2)[1]
return pd.date_range(next_obs, freq=index.freq, periods=steps)
elif isinstance(index, pd.RangeIndex):
assert isinstance(index, pd.RangeIndex)
try:
step = index.step
start = index.stop
except AttributeError:
# TODO: Remove after pandas min ver is 1.0.0+
step = index[-1] - index[-2] if len(index) > 1 else 1
start = index[-1] + step
stop = start + step * steps
return pd.RangeIndex(start, stop, step=step)
elif is_int_index(index) and np.all(np.diff(index) == 1):
idx_arr = np.arange(index[-1] + 1, index[-1] + steps + 1)
return pd.Index(idx_arr)
# default range index
import warnings
warnings.warn(
"Only PeriodIndexes, DatetimeIndexes with a frequency set, "
"RangesIndexes, and Index with a unit increment support "
"extending. The index is set will contain the position relative "
"to the data length.",
UserWarning,
stacklevel=2,
)
nobs = index.shape[0]
return pd.RangeIndex(nobs + 1, nobs + steps + 1)
def __repr__(self) -> str:
return self.__str__() + f" at 0x{id(self):0x}"
def __eq__(self, other: object) -> bool:
if isinstance(other, type(self)):
own_attr = self._eq_attr
oth_attr = other._eq_attr
if len(own_attr) != len(oth_attr):
return False
return all([a == b for a, b in zip(own_attr, oth_attr)])
else:
return False
class TimeTrendDeterministicTerm(DeterministicTerm, ABC):
"""Abstract Base Class for all Time Trend Deterministic Terms"""
def __init__(self, constant: bool = True, order: int = 0) -> None:
self._constant = bool_like(constant, "constant")
self._order = required_int_like(order, "order")
@property
def constant(self) -> bool:
"""Flag indicating that a constant is included"""
return self._constant
@property
def order(self) -> int:
"""Order of the time trend"""
return self._order
@property
def _columns(self) -> List[str]:
columns = []
trend_names = {1: "trend", 2: "trend_squared", 3: "trend_cubed"}
if self._constant:
columns.append("const")
for power in range(1, self._order + 1):
if power in trend_names:
columns.append(trend_names[power])
else:
columns.append(f"trend**{power}")
return columns
def _get_terms(self, locs: np.ndarray) -> np.ndarray:
nterms = int(self._constant) + self._order
terms = np.tile(locs, (1, nterms))
power = np.zeros((1, nterms), dtype=int)
power[0, int(self._constant) :] = np.arange(1, self._order + 1)
terms **= power
return terms
def __str__(self) -> str:
terms = []
if self._constant:
terms.append("Constant")
if self._order:
terms.append(f"Powers 1 to {self._order + 1}")
if not terms:
terms = ["Empty"]
terms_str = ",".join(terms)
return f"TimeTrend({terms_str})"
class TimeTrend(TimeTrendDeterministicTerm):
"""
Constant and time trend determinstic terms
Parameters
----------
constant : bool
Flag indicating whether a constant should be included.
order : int
A non-negative int containing the powers to include (1, 2, ..., order).
See Also
--------
DeterministicProcess
Seasonality
Fourier
CalendarTimeTrend
Examples
--------
>>> from statsmodels.datasets import sunspots
>>> from statsmodels.tsa.deterministic import TimeTrend
>>> data = sunspots.load_pandas().data
>>> trend_gen = TimeTrend(True, 3)
>>> trend_gen.in_sample(data.index)
"""
def __init__(self, constant: bool = True, order: int = 0) -> None:
super().__init__(constant, order)
@classmethod
def from_string(cls, trend: str) -> "TimeTrend":
"""
Create a TimeTrend from a string description.
Provided for compatibility with common string names.
Parameters
----------
trend : {"n", "c", "t", "ct", "ctt"}
The string representation of the time trend. The terms are:
* "n": No trend terms
* "c": A constant only
* "t": Linear time trend only
* "ct": A constant and a time trend
* "ctt": A constant, a time trend and a quadratic time trend
Returns
-------
TimeTrend
The TimeTrend instance.
"""
constant = trend.startswith("c")
order = 0
if "tt" in trend:
order = 2
elif "t" in trend:
order = 1
return cls(constant=constant, order=order)
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(
self, index: Union[Sequence[Hashable], pd.Index]
) -> pd.DataFrame:
index = self._index_like(index)
nobs = index.shape[0]
locs = np.arange(1, nobs + 1, dtype=np.double)[:, None]
terms = self._get_terms(locs)
return pd.DataFrame(terms, columns=self._columns, index=index)
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
index: Union[Sequence[Hashable], pd.Index],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
index = self._index_like(index)
nobs = index.shape[0]
fcast_index = self._extend_index(index, steps, forecast_index)
locs = np.arange(nobs + 1, nobs + steps + 1, dtype=np.double)[:, None]
terms = self._get_terms(locs)
return pd.DataFrame(terms, columns=self._columns, index=fcast_index)
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
return self._constant, self._order
class Seasonality(DeterministicTerm):
"""
Seasonal dummy deterministic terms
Parameters
----------
period : int
The length of a full cycle. Must be >= 2.
initial_period : int
The seasonal index of the first observation. 1-indexed so must
be in {1, 2, ..., period}.
See Also
--------
DeterministicProcess
TimeTrend
Fourier
CalendarSeasonality
Examples
--------
Solar data has an 11-year cycle
>>> from statsmodels.datasets import sunspots
>>> from statsmodels.tsa.deterministic import Seasonality
>>> data = sunspots.load_pandas().data
>>> seas_gen = Seasonality(11)
>>> seas_gen.in_sample(data.index)
To start at a season other than 1
>>> seas_gen = Seasonality(11, initial_period=4)
>>> seas_gen.in_sample(data.index)
"""
_is_dummy = True
def __init__(self, period: int, initial_period: int = 1) -> None:
self._period = required_int_like(period, "period")
self._initial_period = required_int_like(
initial_period, "initial_period"
)
if period < 2:
raise ValueError("period must be >= 2")
if not 1 <= self._initial_period <= period:
raise ValueError("initial_period must be in {1, 2, ..., period}")
@property
def period(self) -> int:
"""The period of the seasonality"""
return self._period
@property
def initial_period(self) -> int:
"""The seasonal index of the first observation"""
return self._initial_period
@classmethod
def from_index(
cls, index: Union[Sequence[Hashable], pd.DatetimeIndex, pd.PeriodIndex]
) -> "Seasonality":
"""
Construct a seasonality directly from an index using its frequency.
Parameters
----------
index : {DatetimeIndex, PeriodIndex}
An index with its frequency (`freq`) set.
Returns
-------
Seasonality
The initialized Seasonality instance.
"""
index = cls._index_like(index)
if isinstance(index, pd.PeriodIndex):
freq = index.freq
elif isinstance(index, pd.DatetimeIndex):
freq = index.freq if index.freq else index.inferred_freq
else:
raise TypeError("index must be a DatetimeIndex or PeriodIndex")
if freq is None:
raise ValueError("index must have a freq or inferred_freq set")
period = freq_to_period(freq)
return cls(period=period)
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
return self._period, self._initial_period
def __str__(self) -> str:
return f"Seasonality(period={self._period})"
@property
def _columns(self) -> List[str]:
period = self._period
columns = []
for i in range(1, period + 1):
columns.append(f"s({i},{period})")
return columns
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(
self, index: Union[Sequence[Hashable], pd.Index]
) -> pd.DataFrame:
index = self._index_like(index)
nobs = index.shape[0]
period = self._period
term = np.zeros((nobs, period))
offset = self._initial_period - 1
for i in range(period):
col = (i + offset) % period
term[i::period, col] = 1
return pd.DataFrame(term, columns=self._columns, index=index)
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
index: Union[Sequence[Hashable], pd.Index],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
index = self._index_like(index)
fcast_index = self._extend_index(index, steps, forecast_index)
nobs = index.shape[0]
period = self._period
term = np.zeros((steps, period))
offset = self._initial_period - 1
for i in range(period):
col_loc = (nobs + offset + i) % period
term[i::period, col_loc] = 1
return pd.DataFrame(term, columns=self._columns, index=fcast_index)
class FourierDeterministicTerm(DeterministicTerm, ABC):
"""Abstract Base Class for all Fourier Deterministic Terms"""
def __init__(self, order: int) -> None:
self._order = required_int_like(order, "terms")
@property
def order(self) -> int:
"""The order of the Fourier terms included"""
return self._order
def _get_terms(self, locs: np.ndarray) -> np.ndarray:
locs = 2 * np.pi * locs.astype(np.double)
terms = np.empty((locs.shape[0], 2 * self._order))
for i in range(self._order):
for j, func in enumerate((np.sin, np.cos)):
terms[:, 2 * i + j] = func((i + 1) * locs)
return terms
class Fourier(FourierDeterministicTerm):
r"""
Fourier series deterministic terms
Parameters
----------
period : int
The length of a full cycle. Must be >= 2.
order : int
The number of Fourier components to include. Must be <= 2*period.
See Also
--------
DeterministicProcess
TimeTrend
Seasonality
CalendarFourier
Notes
-----
Both a sine and a cosine term are included for each i=1, ..., order
.. math::
f_{i,s,t} & = \sin\left(2 \pi i \times \frac{t}{m} \right) \\
f_{i,c,t} & = \cos\left(2 \pi i \times \frac{t}{m} \right)
where m is the length of the period.
Examples
--------
Solar data has an 11-year cycle
>>> from statsmodels.datasets import sunspots
>>> from statsmodels.tsa.deterministic import Fourier
>>> data = sunspots.load_pandas().data
>>> fourier_gen = Fourier(11, order=2)
>>> fourier_gen.in_sample(data.index)
"""
_is_dummy = False
def __init__(self, period: float, order: int):
super().__init__(order)
self._period = float_like(period, "period")
if 2 * self._order > self._period:
raise ValueError("2 * order must be <= period")
@property
def period(self) -> float:
"""The period of the Fourier terms"""
return self._period
@property
def _columns(self) -> List[str]:
period = self._period
fmt_period = d_or_f(period).strip()
columns = []
for i in range(1, self._order + 1):
for typ in ("sin", "cos"):
columns.append(f"{typ}({i},{fmt_period})")
return columns
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(
self, index: Union[Sequence[Hashable], pd.Index]
) -> pd.DataFrame:
index = self._index_like(index)
nobs = index.shape[0]
terms = self._get_terms(np.arange(nobs) / self._period)
return pd.DataFrame(terms, index=index, columns=self._columns)
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
index: Union[Sequence[Hashable], pd.Index],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
index = self._index_like(index)
fcast_index = self._extend_index(index, steps, forecast_index)
nobs = index.shape[0]
terms = self._get_terms(np.arange(nobs, nobs + steps) / self._period)
return pd.DataFrame(terms, index=fcast_index, columns=self._columns)
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
return self._period, self._order
def __str__(self) -> str:
return f"Fourier(period={self._period}, order={self._order})"
class CalendarDeterministicTerm(DeterministicTerm, ABC):
"""Abstract Base Class for calendar deterministic terms"""
def __init__(self, freq: str) -> None:
try:
index = pd.date_range("2020-01-01", freq=freq, periods=1)
self._freq = index.freq
except ValueError:
raise ValueError("freq is not understood by pandas")
@property
def freq(self) -> str:
"""The frequency of the deterministic terms"""
return self._freq.freqstr
def _compute_ratio(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex]
) -> np.ndarray:
if isinstance(index, pd.PeriodIndex):
index = index.to_timestamp()
delta = index - index.to_period(self._freq).to_timestamp()
pi = index.to_period(self._freq)
gap = (pi + 1).to_timestamp() - pi.to_timestamp()
return to_numpy(delta) / to_numpy(gap)
def _check_index_type(
self,
index: pd.Index,
allowed: Union[Type, Tuple[Type, ...]] = (
pd.DatetimeIndex,
pd.PeriodIndex,
),
) -> Union[pd.DatetimeIndex, pd.PeriodIndex]:
if isinstance(allowed, type):
allowed = (allowed,)
if not isinstance(index, allowed):
if len(allowed) == 1:
allowed_types = "a " + allowed[0].__name__
else:
allowed_types = ", ".join(a.__name__ for a in allowed[:-1])
if len(allowed) > 2:
allowed_types += ","
allowed_types += " and " + allowed[-1].__name__
msg = (
f"{type(self).__name__} terms can only be computed from "
f"{allowed_types}"
)
raise TypeError(msg)
assert isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex))
return index
class CalendarFourier(CalendarDeterministicTerm, FourierDeterministicTerm):
r"""
Fourier series deterministic terms based on calendar time
Parameters
----------
freq : str
A string convertible to a pandas frequency.
order : int
The number of Fourier components to include. Must be <= 2*period.
See Also
--------
DeterministicProcess
CalendarTimeTrend
CalendarSeasonality
Fourier
Notes
-----
Both a sine and a cosine term are included for each i=1, ..., order
.. math::
f_{i,s,t} & = \sin\left(2 \pi i \tau_t \right) \\
f_{i,c,t} & = \cos\left(2 \pi i \tau_t \right)
where m is the length of the period and :math:`\tau_t` is the frequency
normalized time. For example, when freq is "D" then an observation with
a timestamp of 12:00:00 would have :math:`\tau_t=0.5`.
Examples
--------
Here we simulate irregularly spaced hourly data and construct the calendar
Fourier terms for the data.
>>> import numpy as np
>>> import pandas as pd
>>> base = pd.Timestamp("2020-1-1")
>>> gen = np.random.default_rng()
>>> gaps = np.cumsum(gen.integers(0, 1800, size=1000))
>>> times = [base + pd.Timedelta(gap, unit="s") for gap in gaps]
>>> index = pd.DatetimeIndex(pd.to_datetime(times))
>>> from statsmodels.tsa.deterministic import CalendarFourier
>>> cal_fourier_gen = CalendarFourier("D", 2)
>>> cal_fourier_gen.in_sample(index)
"""
def __init__(self, freq: str, order: int) -> None:
super().__init__(freq)
FourierDeterministicTerm.__init__(self, order)
self._order = required_int_like(order, "terms")
@property
def _columns(self) -> List[str]:
columns = []
for i in range(1, self._order + 1):
for typ in ("sin", "cos"):
columns.append(f"{typ}({i},freq={self._freq.freqstr})")
return columns
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(
self, index: Union[Sequence[Hashable], pd.Index]
) -> pd.DataFrame:
index = self._index_like(index)
index = self._check_index_type(index)
ratio = self._compute_ratio(index)
terms = self._get_terms(ratio)
return pd.DataFrame(terms, index=index, columns=self._columns)
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
index: Union[Sequence[Hashable], pd.Index],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
index = self._index_like(index)
fcast_index = self._extend_index(index, steps, forecast_index)
self._check_index_type(fcast_index)
assert isinstance(fcast_index, (pd.DatetimeIndex, pd.PeriodIndex))
ratio = self._compute_ratio(fcast_index)
terms = self._get_terms(ratio)
return pd.DataFrame(terms, index=fcast_index, columns=self._columns)
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
return self._freq.freqstr, self._order
def __str__(self) -> str:
return f"Fourier(freq={self._freq.freqstr}, order={self._order})"
class CalendarSeasonality(CalendarDeterministicTerm):
"""
Seasonal dummy deterministic terms based on calendar time
Parameters
----------
freq : str
The frequency of the seasonal effect.
period : str
The pandas frequency string describing the full period.
See Also
--------
DeterministicProcess
CalendarTimeTrend
CalendarFourier
Seasonality
Examples
--------
Here we simulate irregularly spaced data (in time) and hourly seasonal
dummies for the data.
>>> import numpy as np
>>> import pandas as pd
>>> base = pd.Timestamp("2020-1-1")
>>> gen = np.random.default_rng()
>>> gaps = np.cumsum(gen.integers(0, 1800, size=1000))
>>> times = [base + pd.Timedelta(gap, unit="s") for gap in gaps]
>>> index = pd.DatetimeIndex(pd.to_datetime(times))
>>> from statsmodels.tsa.deterministic import CalendarSeasonality
>>> cal_seas_gen = CalendarSeasonality("H", "D")
>>> cal_seas_gen.in_sample(index)
"""
_is_dummy = True
# out_of: freq
_supported = {
"W": {"H": 24 * 7, "B": 5, "D": 7},
"D": {"H": 24},
"Q": {"M": 3},
"A": {"M": 12, "Q": 4},
}
def __init__(self, freq: str, period: str) -> None:
freq_options: Set[str] = set()
freq_options.update(
*[list(val.keys()) for val in self._supported.values()]
)
period_options = list(self._supported.keys())
freq = string_like(
freq, "freq", options=tuple(freq_options), lower=False
)
period = string_like(
period, "period", options=period_options, lower=False
)
if freq not in self._supported[period]:
raise ValueError(
f"The combination of freq={freq} and "
f"period={period} is not supported."
)
super().__init__(freq)
self._period = period
self._freq_str = self._freq.freqstr.split("-")[0]
@property
def freq(self) -> str:
"""The frequency of the deterministic terms"""
return self._freq.freqstr
@property
def period(self) -> str:
"""The full period"""
return self._period
def _weekly_to_loc(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex]
) -> np.ndarray:
if self._freq.freqstr == "H":
return index.hour + 24 * index.dayofweek
elif self._freq.freqstr == "D":
return index.dayofweek
else: # "B"
bdays = pd.bdate_range("2000-1-1", periods=10).dayofweek.unique()
loc = index.dayofweek
if not loc.isin(bdays).all():
raise ValueError(
"freq is B but index contains days that are not business "
"days."
)
return loc
def _daily_to_loc(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex]
) -> np.ndarray:
return index.hour
def _quarterly_to_loc(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex]
) -> np.ndarray:
return (index.month - 1) % 3
def _annual_to_loc(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex]
) -> np.ndarray:
if self._freq.freqstr == "M":
return index.month - 1
else: # "Q"
return index.quarter - 1
def _get_terms(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex]
) -> np.ndarray:
if self._period == "D":
locs = self._daily_to_loc(index)
elif self._period == "W":
locs = self._weekly_to_loc(index)
elif self._period == "Q":
locs = self._quarterly_to_loc(index)
else: # "A":
locs = self._annual_to_loc(index)
full_cycle = self._supported[self._period][self._freq_str]
terms = np.zeros((locs.shape[0], full_cycle))
terms[np.arange(locs.shape[0]), locs] = 1
return terms
@property
def _columns(self) -> List[str]:
columns = []
count = self._supported[self._period][self._freq_str]
for i in range(count):
columns.append(
f"s({self._freq_str}={i + 1}, period={self._period})"
)
return columns
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(
self, index: Union[Sequence[Hashable], pd.Index]
) -> pd.DataFrame:
index = self._index_like(index)
index = self._check_index_type(index)
terms = self._get_terms(index)
return pd.DataFrame(terms, index=index, columns=self._columns)
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
index: Union[Sequence[Hashable], pd.Index],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
index = self._index_like(index)
fcast_index = self._extend_index(index, steps, forecast_index)
self._check_index_type(fcast_index)
assert isinstance(fcast_index, (pd.DatetimeIndex, pd.PeriodIndex))
terms = self._get_terms(fcast_index)
return pd.DataFrame(terms, index=fcast_index, columns=self._columns)
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
return self._period, self._freq_str
def __str__(self) -> str:
return f"Seasonal(freq={self._freq_str})"
class CalendarTimeTrend(CalendarDeterministicTerm, TimeTrendDeterministicTerm):
r"""
Constant and time trend determinstic terms based on calendar time
Parameters
----------
freq : str
A string convertible to a pandas frequency.
constant : bool
Flag indicating whether a constant should be included.
order : int
A non-negative int containing the powers to include (1, 2, ..., order).
base_period : {str, pd.Timestamp}, default None
The base period to use when computing the time stamps. This value is
treated as 1 and so all other time indices are defined as the number
of periods since or before this time stamp. If not provided, defaults
to pandas base period for a PeriodIndex.
See Also
--------
DeterministicProcess
CalendarFourier
CalendarSeasonality
TimeTrend
Notes
-----
The time stamp, :math:`\tau_t`, is the number of periods that have elapsed
since the base_period. :math:`\tau_t` may be fractional.
Examples
--------
Here we simulate irregularly spaced hourly data and construct the calendar
time trend terms for the data.
>>> import numpy as np
>>> import pandas as pd
>>> base = pd.Timestamp("2020-1-1")
>>> gen = np.random.default_rng()
>>> gaps = np.cumsum(gen.integers(0, 1800, size=1000))
>>> times = [base + pd.Timedelta(gap, unit="s") for gap in gaps]
>>> index = pd.DatetimeIndex(pd.to_datetime(times))
>>> from statsmodels.tsa.deterministic import CalendarTimeTrend
>>> cal_trend_gen = CalendarTimeTrend("D", True, order=1)
>>> cal_trend_gen.in_sample(index)
Next, we normalize using the first time stamp
>>> cal_trend_gen = CalendarTimeTrend("D", True, order=1,
... base_period=index[0])
>>> cal_trend_gen.in_sample(index)
"""
def __init__(
self,
freq: str,
constant: bool = True,
order: int = 0,
*,
base_period: Optional[Union[str, DateLike]] = None,
) -> None:
super().__init__(freq)
TimeTrendDeterministicTerm.__init__(
self, constant=constant, order=order
)
self._ref_i8 = 0
if base_period is not None:
pr = pd.period_range(base_period, periods=1, freq=self._freq)
self._ref_i8 = pr.asi8[0]
self._base_period = None if base_period is None else str(base_period)
@property
def base_period(self) -> Optional[str]:
"""The base period"""
return self._base_period
@classmethod
def from_string(
cls,
freq: str,
trend: str,
base_period: Optional[Union[str, DateLike]] = None,
) -> "CalendarTimeTrend":
"""
Create a TimeTrend from a string description.
Provided for compatibility with common string names.
Parameters
----------
freq : str
A string convertible to a pandas frequency.
trend : {"n", "c", "t", "ct", "ctt"}
The string representation of the time trend. The terms are:
* "n": No trend terms
* "c": A constant only
* "t": Linear time trend only
* "ct": A constant and a time trend
* "ctt": A constant, a time trend and a quadratic time trend
base_period : {str, pd.Timestamp}, default None
The base period to use when computing the time stamps. This value
is treated as 1 and so all other time indices are defined as the
number of periods since or before this time stamp. If not
provided, defaults to pandas base period for a PeriodIndex.
Returns
-------
TimeTrend
The TimeTrend instance.
"""
constant = trend.startswith("c")
order = 0
if "tt" in trend:
order = 2
elif "t" in trend:
order = 1
return cls(freq, constant, order, base_period=base_period)
def _terms(
self, index: Union[pd.DatetimeIndex, pd.PeriodIndex], ratio: np.ndarray
) -> pd.DataFrame:
if isinstance(index, pd.DatetimeIndex):
index = index.to_period(self._freq)
index_i8 = index.asi8
index_i8 = index_i8 - self._ref_i8 + 1
time = index_i8.astype(np.double) + ratio
time = time[:, None]
terms = self._get_terms(time)
return pd.DataFrame(terms, columns=self._columns, index=index)
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(
self, index: Union[Sequence[Hashable], pd.Index]
) -> pd.DataFrame:
index = self._index_like(index)
index = self._check_index_type(index)
ratio = self._compute_ratio(index)
return self._terms(index, ratio)
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
index: Union[Sequence[Hashable], pd.Index],
forecast_index: Optional[Sequence[Hashable]] = None,
) -> pd.DataFrame:
index = self._index_like(index)
fcast_index = self._extend_index(index, steps, forecast_index)
self._check_index_type(fcast_index)
assert isinstance(fcast_index, (pd.PeriodIndex, pd.DatetimeIndex))
ratio = self._compute_ratio(fcast_index)
return self._terms(fcast_index, ratio)
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
attr: Tuple[Hashable, ...] = (
self._constant,
self._order,
self._freq.freqstr,
)
if self._base_period is not None:
attr += (self._base_period,)
return attr
def __str__(self) -> str:
value = TimeTrendDeterministicTerm.__str__(self)
value = "Calendar" + value[:-1] + f", freq={self._freq.freqstr})"
if self._base_period is not None:
value = value[:-1] + f"base_period={self._base_period})"
return value
class DeterministicProcess:
"""
Container class for deterministic terms.
Directly supports constants, time trends, and either seasonal dummies or
fourier terms for a single cycle. Additional deterministic terms beyond
the set that can be directly initialized through the constructor can be
added.
Parameters
----------
index : {Sequence[Hashable], pd.Index}
The index of the process. Should usually be the "in-sample" index when
used in forecasting applications.
period : {float, int}, default None
The period of the seasonal or fourier components. Must be an int for
seasonal dummies. If not provided, freq is read from index if
available.
constant : bool, default False
Whether to include a constant.
order : int, default 0
The order of the tim trend to include. For example, 2 will include
both linear and quadratic terms. 0 exclude time trend terms.
seasonal : bool = False
Whether to include seasonal dummies
fourier : int = 0
The order of the fourier terms to included.
additional_terms : Sequence[DeterministicTerm]
A sequence of additional deterministic terms to include in the process.
drop : bool, default False
A flag indicating to check for perfect collinearity and to drop any
linearly dependent terms.
See Also
--------
TimeTrend
Seasonality
Fourier
CalendarTimeTrend
CalendarSeasonality
CalendarFourier
Examples
--------
>>> from statsmodels.tsa.deterministic import DeterministicProcess
>>> from pandas import date_range
>>> index = date_range("2000-1-1", freq="M", periods=240)
First a determinstic process with a constant and quadratic time trend.
>>> dp = DeterministicProcess(index, constant=True, order=2)
>>> dp.in_sample().head(3)
const trend trend_squared
2000-01-31 1.0 1.0 1.0
2000-02-29 1.0 2.0 4.0
2000-03-31 1.0 3.0 9.0
Seasonal dummies are included by setting seasonal to True.
>>> dp = DeterministicProcess(index, constant=True, seasonal=True)
>>> dp.in_sample().iloc[:3,:5]
const s(2,12) s(3,12) s(4,12) s(5,12)
2000-01-31 1.0 0.0 0.0 0.0 0.0
2000-02-29 1.0 1.0 0.0 0.0 0.0
2000-03-31 1.0 0.0 1.0 0.0 0.0
Fourier components can be used to alternatively capture seasonal patterns,
>>> dp = DeterministicProcess(index, constant=True, fourier=2)
>>> dp.in_sample().head(3)
const sin(1,12) cos(1,12) sin(2,12) cos(2,12)
2000-01-31 1.0 0.000000 1.000000 0.000000 1.0
2000-02-29 1.0 0.500000 0.866025 0.866025 0.5
2000-03-31 1.0 0.866025 0.500000 0.866025 -0.5
Multiple Seasonalities can be captured using additional terms.
>>> from statsmodels.tsa.deterministic import Fourier
>>> index = date_range("2000-1-1", freq="D", periods=5000)
>>> fourier = Fourier(period=365.25, order=1)
>>> dp = DeterministicProcess(index, period=3, constant=True,
... seasonal=True, additional_terms=[fourier])
>>> dp.in_sample().head(3)
const s(2,3) s(3,3) sin(1,365.25) cos(1,365.25)
2000-01-01 1.0 0.0 0.0 0.000000 1.000000
2000-01-02 1.0 1.0 0.0 0.017202 0.999852
2000-01-03 1.0 0.0 1.0 0.034398 0.999408
"""
def __init__(
self,
index: Union[Sequence[Hashable], pd.Index],
*,
period: Optional[Union[float, int]] = None,
constant: bool = False,
order: int = 0,
seasonal: bool = False,
fourier: int = 0,
additional_terms: Sequence[DeterministicTerm] = (),
drop: bool = False,
):
if not isinstance(index, pd.Index):
index = pd.Index(index)
self._index = index
self._deterministic_terms: List[DeterministicTerm] = []
self._extendable = False
self._index_freq = None
self._validate_index()
period = float_like(period, "period", optional=True)
self._constant = constant = bool_like(constant, "constant")
self._order = required_int_like(order, "order")
self._seasonal = seasonal = bool_like(seasonal, "seasonal")
self._fourier = required_int_like(fourier, "fourier")
additional_terms = tuple(additional_terms)
self._cached_in_sample = None
self._drop = bool_like(drop, "drop")
self._additional_terms = additional_terms
if constant or order:
self._deterministic_terms.append(TimeTrend(constant, order))
if seasonal and fourier:
raise ValueError(
"""seasonal and fourier can be initialized through the \
constructor since these will be necessarily perfectly collinear. Instead, \
you can pass additional components using the additional_terms input."""
)
if (seasonal or fourier) and period is None:
if period is None:
self._period = period = freq_to_period(self._index_freq)
if seasonal:
period = required_int_like(period, "period")
self._deterministic_terms.append(Seasonality(period))
elif fourier:
period = float_like(period, "period")
assert period is not None
self._deterministic_terms.append(Fourier(period, order=fourier))
for term in additional_terms:
if not isinstance(term, DeterministicTerm):
raise TypeError(
"All additional terms must be instances of subsclasses "
"of DeterministicTerm"
)
if term not in self._deterministic_terms:
self._deterministic_terms.append(term)
else:
raise ValueError(
"One or more terms in additional_terms has been added "
"through the parameters of the constructor. Terms must "
"be unique."
)
self._period = period
self._retain_cols: Optional[List[Hashable]] = None
@property
def index(self) -> pd.Index:
"""The index of the process"""
return self._index
@property
def terms(self) -> List[DeterministicTerm]:
"""The deterministic terms included in the process"""
return self._deterministic_terms
def _adjust_dummies(self, terms: List[pd.DataFrame]) -> List[pd.DataFrame]:
has_const: Optional[bool] = None
for dterm in self._deterministic_terms:
if isinstance(dterm, (TimeTrend, CalendarTimeTrend)):
has_const = has_const or dterm.constant
if has_const is None:
has_const = False
for term in terms:
const_col = (term == term.iloc[0]).all() & (term.iloc[0] != 0)
has_const = has_const or const_col.any()
drop_first = has_const
for i, dterm in enumerate(self._deterministic_terms):
is_dummy = dterm.is_dummy
if is_dummy and drop_first:
# drop first
terms[i] = terms[i].iloc[:, 1:]
drop_first = drop_first or is_dummy
return terms
def _remove_zeros_ones(self, terms: pd.DataFrame) -> pd.DataFrame:
all_zero = np.all(terms == 0, axis=0)
if np.any(all_zero):
terms = terms.loc[:, ~all_zero]
is_constant = terms.max(axis=0) == terms.min(axis=0)
if np.sum(is_constant) > 1:
# Retain first
const_locs = np.where(is_constant)[0]
is_constant[const_locs[:1]] = False
terms = terms.loc[:, ~is_constant]
return terms
@Appender(DeterministicTerm.in_sample.__doc__)
def in_sample(self) -> pd.DataFrame:
if self._cached_in_sample is not None:
return self._cached_in_sample
index = self._index
if not self._deterministic_terms:
return pd.DataFrame(np.empty((index.shape[0], 0)), index=index)
raw_terms = []
for term in self._deterministic_terms:
raw_terms.append(term.in_sample(index))
raw_terms = self._adjust_dummies(raw_terms)
terms: pd.DataFrame = pd.concat(raw_terms, axis=1)
terms = self._remove_zeros_ones(terms)
if self._drop:
terms_arr = to_numpy(terms)
res = qr(terms_arr, mode="r", pivoting=True)
r = res[0]
p = res[-1]
abs_diag = np.abs(np.diag(r))
tol = abs_diag[0] * terms_arr.shape[1] * np.finfo(float).eps
rank = int(np.sum(abs_diag > tol))
rpx = r.T @ terms_arr
keep = [0]
last_rank = 1
# Find the left-most columns that produce full rank
for i in range(1, terms_arr.shape[1]):
curr_rank = np.linalg.matrix_rank(rpx[: i + 1, : i + 1])
if curr_rank > last_rank:
keep.append(i)
last_rank = curr_rank
if curr_rank == rank:
break
if len(keep) == rank:
terms = terms.iloc[:, keep]
else:
terms = terms.iloc[:, np.sort(p[:rank])]
self._retain_cols = terms.columns
self._cached_in_sample = terms
return terms
@Appender(DeterministicTerm.out_of_sample.__doc__)
def out_of_sample(
self,
steps: int,
forecast_index: Optional[Union[Sequence[Hashable], pd.Index]] = None,
) -> pd.DataFrame:
steps = required_int_like(steps, "steps")
if self._drop and self._retain_cols is None:
self.in_sample()
index = self._index
if not self._deterministic_terms:
return pd.DataFrame(np.empty((index.shape[0], 0)), index=index)
raw_terms = []
for term in self._deterministic_terms:
raw_terms.append(term.out_of_sample(steps, index, forecast_index))
terms: pd.DataFrame = pd.concat(raw_terms, axis=1)
assert self._retain_cols is not None
if terms.shape[1] != len(self._retain_cols):
terms = terms[self._retain_cols]
return terms
def _extend_time_index(
self,
stop: pd.Timestamp,
) -> Union[pd.DatetimeIndex, pd.PeriodIndex]:
index = self._index
if isinstance(index, pd.PeriodIndex):
return pd.period_range(index[0], end=stop, freq=index.freq)
return pd.date_range(start=index[0], end=stop, freq=self._index_freq)
def _range_from_range_index(self, start: int, stop: int) -> pd.DataFrame:
index = self._index
is_int64_index = is_int_index(index)
assert isinstance(index, pd.RangeIndex) or is_int64_index
if start < index[0]:
raise ValueError(START_BEFORE_INDEX_ERR)
if isinstance(index, pd.RangeIndex):
idx_step = index.step
else:
idx_step = np.diff(index).max() if len(index) > 1 else 1
if idx_step != 1 and ((start - index[0]) % idx_step) != 0:
raise ValueError(
f"The step of the index is not 1 (actual step={idx_step})."
" start must be in the sequence that would have been "
"generated by the index."
)
if is_int64_index:
new_idx = pd.Index(np.arange(start, stop))
else:
new_idx = pd.RangeIndex(start, stop, step=idx_step)
if new_idx[-1] <= self._index[-1]:
# In-sample only
in_sample = self.in_sample()
in_sample = in_sample.loc[new_idx]
return in_sample
elif new_idx[0] > self._index[-1]:
# Out of-sample only
next_value = index[-1] + idx_step
if new_idx[0] != next_value:
tmp = pd.RangeIndex(next_value, stop, step=idx_step)
oos = self.out_of_sample(tmp.shape[0], forecast_index=tmp)
return oos.loc[new_idx]
return self.out_of_sample(new_idx.shape[0], forecast_index=new_idx)
# Using some from each in and out of sample
in_sample_loc = new_idx <= self._index[-1]
in_sample_idx = new_idx[in_sample_loc]
out_of_sample_idx = new_idx[~in_sample_loc]
in_sample_exog = self.in_sample().loc[in_sample_idx]
oos_exog = self.out_of_sample(
steps=out_of_sample_idx.shape[0], forecast_index=out_of_sample_idx
)
return pd.concat([in_sample_exog, oos_exog], axis=0)
def _range_from_time_index(
self, start: pd.Timestamp, stop: pd.Timestamp
) -> pd.DataFrame:
index = self._index
if isinstance(self._index, pd.PeriodIndex):
if isinstance(start, pd.Timestamp):
start = start.to_period(freq=self._index_freq)
if isinstance(stop, pd.Timestamp):
stop = stop.to_period(freq=self._index_freq)
if start < index[0]:
raise ValueError(START_BEFORE_INDEX_ERR)
if stop <= self._index[-1]:
return self.in_sample().loc[start:stop]
new_idx = self._extend_time_index(stop)
oos_idx = new_idx[new_idx > index[-1]]
oos = self.out_of_sample(oos_idx.shape[0], oos_idx)
if start >= oos_idx[0]:
return oos.loc[start:stop]
both = pd.concat([self.in_sample(), oos], axis=0)
return both.loc[start:stop]
def _int_to_timestamp(self, value: int, name: str) -> pd.Timestamp:
if value < 0:
raise ValueError(f"{name} must be non-negative.")
if value < self._index.shape[0]:
return self._index[value]
add_periods = value - (self._index.shape[0] - 1) + 1
index = self._index
if isinstance(self._index, pd.PeriodIndex):
pr = pd.period_range(
index[-1], freq=self._index_freq, periods=add_periods
)
return pr[-1].to_timestamp()
dr = pd.date_range(
index[-1], freq=self._index_freq, periods=add_periods
)
return dr[-1]
def range(
self,
start: Union[IntLike, DateLike, str],
stop: Union[IntLike, DateLike, str],
) -> pd.DataFrame:
"""
Deterministic terms spanning a range of observations
Parameters
----------
start : {int, str, dt.datetime, pd.Timestamp, np.datetime64}
The first observation.
stop : {int, str, dt.datetime, pd.Timestamp, np.datetime64}
The final observation. Inclusive to match most prediction
function in statsmodels.
Returns
-------
DataFrame
A data frame of deterministic terms
"""
if not self._extendable:
raise TypeError(
"""The index in the deterministic process does not \
support extension. Only PeriodIndex, DatetimeIndex with a frequency, \
RangeIndex, and integral Indexes that start at 0 and have only unit \
differences can be extended when producing out-of-sample forecasts.
"""
)
if type(self._index) in (pd.RangeIndex,) or is_int_index(self._index):
start = required_int_like(start, "start")
stop = required_int_like(stop, "stop")
# Add 1 to ensure that the end point is inclusive
stop += 1
return self._range_from_range_index(start, stop)
if isinstance(start, (int, np.integer)):
start = self._int_to_timestamp(start, "start")
else:
start = pd.Timestamp(start)
if isinstance(stop, (int, np.integer)):
stop = self._int_to_timestamp(stop, "stop")
else:
stop = pd.Timestamp(stop)
return self._range_from_time_index(start, stop)
def _validate_index(self) -> None:
if isinstance(self._index, pd.PeriodIndex):
self._index_freq = self._index.freq
self._extendable = True
elif isinstance(self._index, pd.DatetimeIndex):
self._index_freq = self._index.freq or self._index.inferred_freq
self._extendable = self._index_freq is not None
elif isinstance(self._index, pd.RangeIndex):
self._extendable = True
elif is_int_index(self._index):
self._extendable = self._index[0] == 0 and np.all(
np.diff(self._index) == 1
)
def apply(self, index):
"""
Create an identical determinstic process with a different index
Parameters
----------
index : index_like
An index-like object. If not an index, it is converted to an
index.
Returns
-------
DeterministicProcess
The deterministic process applied to a different index
"""
return DeterministicProcess(
index,
period=self._period,
constant=self._constant,
order=self._order,
seasonal=self._seasonal,
fourier=self._fourier,
additional_terms=self._additional_terms,
drop=self._drop,
)
| {
"content_hash": "50c99c8609e579ada516068ecd841d63",
"timestamp": "",
"source": "github",
"line_count": 1480,
"max_line_length": 79,
"avg_line_length": 34.8445945945946,
"alnum_prop": 0.5739189451231336,
"repo_name": "bashtage/statsmodels",
"id": "b75c3dd8766eec32ee42ba3c75a9565ca6bec858",
"size": "51570",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "statsmodels/tsa/deterministic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14433387"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ReaderSS.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "41b8ed5fb29bbf215702cad7ba62e8bf",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "polarkac/ReaderSS",
"id": "691e57ec89689f1280e8e0b80328a7e8af4003ee",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ReaderSS/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202"
},
{
"name": "Python",
"bytes": "20080"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from django.views.generic import list_detail
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from basic.profiles.models import *
from basic.profiles.forms import *
def profile_list(request):
return list_detail.object_list(
request,
queryset=Profile.objects.all(),
paginate_by=20,
)
profile_list.__doc__ = list_detail.object_list.__doc__
def profile_detail(request, username):
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
raise Http404
profile = Profile.objects.get(user=user)
context = { 'object':profile }
return render_to_response('profiles/profile_detail.html', context, context_instance=RequestContext(request))
@login_required
def profile_edit(request, template_name='profiles/profile_form.html'):
"""Edit profile."""
if request.POST:
profile = Profile.objects.get(user=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=profile)
user_form = UserForm(request.POST, instance=request.user)
service_formset = ServiceFormSet(request.POST, instance=profile)
link_formset = LinkFormSet(request.POST, instance=profile)
if profile_form.is_valid() and user_form.is_valid() and service_formset.is_valid() and link_formset.is_valid():
profile_form.save()
user_form.save()
service_formset.save()
link_formset.save()
return HttpResponseRedirect(reverse('profile_detail', kwargs={'username': request.user.username}))
else:
context = {
'profile_form': profile_form,
'user_form': user_form,
'service_formset': service_formset,
'link_formset': link_formset
}
else:
profile = Profile.objects.get(user=request.user)
service_formset = ServiceFormSet(instance=profile)
link_formset = LinkFormSet(instance=profile)
context = {
'profile_form': ProfileForm(instance=profile),
'user_form': UserForm(instance=request.user),
'service_formset': service_formset,
'link_formset': link_formset
}
return render_to_response(template_name, context, context_instance=RequestContext(request)) | {
"content_hash": "4415217fb3094ccc3789d48c4f8ae54e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 119,
"avg_line_length": 39.292307692307695,
"alnum_prop": 0.6714956930305404,
"repo_name": "rg3915/django-basic-apps",
"id": "612f0661d77f399ecc04e2602ab292b68168933b",
"size": "2554",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "basic/profiles/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1122"
},
{
"name": "HTML",
"bytes": "90745"
},
{
"name": "JavaScript",
"bytes": "1420"
},
{
"name": "Python",
"bytes": "195862"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import unittest
from airflow import configuration, DAG
from airflow.contrib.operators import mlengine_operator_utils
from airflow.contrib.operators.mlengine_operator_utils import create_evaluate_ops
from airflow.exceptions import AirflowException
from airflow.version import version
from mock import ANY
from mock import patch
DEFAULT_DATE = datetime.datetime(2017, 6, 6)
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
class CreateEvaluateOpsTest(unittest.TestCase):
INPUT_MISSING_ORIGIN = {
'dataFormat': 'TEXT',
'inputPaths': ['gs://legal-bucket/fake-input-path/*'],
'outputPath': 'gs://legal-bucket/fake-output-path',
'region': 'us-east1',
'versionName': 'projects/test-project/models/test_model/versions/test_version',
}
SUCCESS_MESSAGE_MISSING_INPUT = {
'jobId': 'eval_test_prediction',
'predictionOutput': {
'outputPath': 'gs://fake-output-path',
'predictionCount': 5000,
'errorCount': 0,
'nodeHours': 2.78
},
'state': 'SUCCEEDED'
}
def setUp(self):
super(CreateEvaluateOpsTest, self).setUp()
configuration.load_test_config()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
'project_id': 'test-project',
'region': 'us-east1',
'model_name': 'test_model',
'version_name': 'test_version',
},
schedule_interval='@daily')
self.metric_fn = lambda x: (0.1,)
self.metric_fn_encoded = mlengine_operator_utils.base64.b64encode(
mlengine_operator_utils.dill.dumps(self.metric_fn, recurse=True))
def testSuccessfulRun(self):
input_with_model = self.INPUT_MISSING_ORIGIN.copy()
pred, summary, validate = create_evaluate_ops(
task_prefix='eval-test',
batch_prediction_job_id='eval-test-prediction',
data_format=input_with_model['dataFormat'],
input_paths=input_with_model['inputPaths'],
prediction_path=input_with_model['outputPath'],
metric_fn_and_keys=(self.metric_fn, ['err']),
validate_fn=(lambda x: 'err=%.1f' % x['err']),
dag=self.dag)
with patch('airflow.contrib.operators.mlengine_operator.'
'MLEngineHook') as mock_mlengine_hook:
success_message = self.SUCCESS_MESSAGE_MISSING_INPUT.copy()
success_message['predictionInput'] = input_with_model
hook_instance = mock_mlengine_hook.return_value
hook_instance.create_job.return_value = success_message
result = pred.execute(None)
mock_mlengine_hook.assert_called_with('google_cloud_default', None)
hook_instance.create_job.assert_called_once_with(
'test-project',
{
'jobId': 'eval_test_prediction',
'predictionInput': input_with_model,
},
ANY)
self.assertEqual(success_message['predictionOutput'], result)
with patch('airflow.contrib.operators.dataflow_operator.'
'DataFlowHook') as mock_dataflow_hook:
hook_instance = mock_dataflow_hook.return_value
hook_instance.start_python_dataflow.return_value = None
summary.execute(None)
mock_dataflow_hook.assert_called_with(
gcp_conn_id='google_cloud_default', delegate_to=None, poll_sleep=10)
hook_instance.start_python_dataflow.assert_called_once_with(
'eval-test-summary',
{
'prediction_path': 'gs://legal-bucket/fake-output-path',
'labels': {'airflow-version': TEST_VERSION},
'metric_keys': 'err',
'metric_fn_encoded': self.metric_fn_encoded,
},
'airflow.contrib.operators.mlengine_prediction_summary',
['-m'])
with patch('airflow.contrib.operators.mlengine_operator_utils.'
'GoogleCloudStorageHook') as mock_gcs_hook:
hook_instance = mock_gcs_hook.return_value
hook_instance.download.return_value = '{"err": 0.9, "count": 9}'
result = validate.execute({})
hook_instance.download.assert_called_once_with(
'legal-bucket', 'fake-output-path/prediction.summary.json')
self.assertEqual('err=0.9', result)
def testFailures(self):
dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
'project_id': 'test-project',
'region': 'us-east1',
},
schedule_interval='@daily')
input_with_model = self.INPUT_MISSING_ORIGIN.copy()
other_params_but_models = {
'task_prefix': 'eval-test',
'batch_prediction_job_id': 'eval-test-prediction',
'data_format': input_with_model['dataFormat'],
'input_paths': input_with_model['inputPaths'],
'prediction_path': input_with_model['outputPath'],
'metric_fn_and_keys': (self.metric_fn, ['err']),
'validate_fn': (lambda x: 'err=%.1f' % x['err']),
'dag': dag,
}
with self.assertRaisesRegexp(ValueError, 'Missing model origin'):
_ = create_evaluate_ops(**other_params_but_models)
with self.assertRaisesRegexp(ValueError, 'Ambiguous model origin'):
_ = create_evaluate_ops(model_uri='abc', model_name='cde',
**other_params_but_models)
with self.assertRaisesRegexp(ValueError, 'Ambiguous model origin'):
_ = create_evaluate_ops(model_uri='abc', version_name='vvv',
**other_params_but_models)
with self.assertRaisesRegexp(AirflowException,
'`metric_fn` param must be callable'):
params = other_params_but_models.copy()
params['metric_fn_and_keys'] = (None, ['abc'])
_ = create_evaluate_ops(model_uri='gs://blah', **params)
with self.assertRaisesRegexp(AirflowException,
'`validate_fn` param must be callable'):
params = other_params_but_models.copy()
params['validate_fn'] = None
_ = create_evaluate_ops(model_uri='gs://blah', **params)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f01ae7ce5cbbe9dda92d00fea72666b1",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 87,
"avg_line_length": 40.88165680473373,
"alnum_prop": 0.5656390215660733,
"repo_name": "yati-sagade/incubator-airflow",
"id": "c8f6fb5544f64a572b81e0dcf70fd81d79ce6768",
"size": "7695",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/contrib/operators/test_mlengine_operator_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152530"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2828163"
},
{
"name": "Shell",
"bytes": "34436"
}
],
"symlink_target": ""
} |
from google.cloud import contentwarehouse_v1
async def sample_create_document_link():
# Create a client
client = contentwarehouse_v1.DocumentLinkServiceAsyncClient()
# Initialize request argument(s)
request = contentwarehouse_v1.CreateDocumentLinkRequest(
parent="parent_value",
)
# Make the request
response = await client.create_document_link(request=request)
# Handle the response
print(response)
# [END contentwarehouse_v1_generated_DocumentLinkService_CreateDocumentLink_async]
| {
"content_hash": "4db0ad7cc29762528308d073624eeca3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 82,
"avg_line_length": 28.05263157894737,
"alnum_prop": 0.7467166979362101,
"repo_name": "googleapis/google-cloud-python",
"id": "af966edbb46db53d1b4dcf3646c10b10106ac7ab",
"size": "1953",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-cloud-contentwarehouse/samples/generated_samples/contentwarehouse_v1_generated_document_link_service_create_document_link_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2895"
},
{
"name": "Python",
"bytes": "5620713"
},
{
"name": "Shell",
"bytes": "51704"
}
],
"symlink_target": ""
} |
from get_manifest import *
from check_hash import *
get_MANIFEST(sys.argv[1])
if check_hashes("MANIFEST", "MANIFEST_temp"):
manifest = open("MANIFEST", 'wb')
temp = open("MANIFEST_temp", 'rb')
new_man = temp.read()
manifest.write(new_man)
| {
"content_hash": "e94a09a615f151b1dde076af292f00c1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 23.09090909090909,
"alnum_prop": 0.6653543307086615,
"repo_name": "monzum/tuf-legacy",
"id": "9250e7b92f3e7173e3e555d6899eae5813d84b5d",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/client/get_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "109811"
},
{
"name": "Python",
"bytes": "703260"
},
{
"name": "Shell",
"bytes": "867"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from core import views as core_views
from api.resources import OrderableResource, PostResource, SearchResource
from core.sitemap import IndexSitemap, BlogSitemap, StaticSitemap
sitemaps = {"index": IndexSitemap, "blog": BlogSitemap, "static": StaticSitemap}
urlpatterns = [
url(r"^$", core_views.index, name="index"),
url(r"^email/(?P<email_id>\d+)/$", core_views.email, name="email"),
url(r"^about/", core_views.static, {"page": "about"}, name="about"),
url(r"^resume/", core_views.static, {"page": "resume"}, name="resume"),
url(r"^copyrights/", core_views.static, {"page": "copyrights"}, name="copyrights"),
# API urls
url(r"^api/posts/?$", PostResource.as_view()),
url(r"^api/orderable/?$", OrderableResource.as_view()),
url(r"^api/search/?$", SearchResource.as_view()),
# Blog urls
url(r"^blog/", include("blog.urls"), name="blog"),
# Gallery urls
url(r"^gallery/", include("gallery.urls")),
# Profile urls
url(r"^profile/", include("profiles.urls")),
# URL shortener
url(r"^sr/", include("shortener.urls")),
# Admin urls
url(r"^dashboard/", admin.site.urls),
# Sitemap
url(
r"^sitemap\.xml$",
sitemap,
{"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap",
),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "e42b24289dc6335d056491ffd5b85657",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 87,
"avg_line_length": 35.84,
"alnum_prop": 0.6702008928571429,
"repo_name": "manti-by/m2",
"id": "a8570421d0a1589026d3f875a94e6220bc248ed5",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13672"
},
{
"name": "Batchfile",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "32328"
},
{
"name": "HTML",
"bytes": "53"
},
{
"name": "JavaScript",
"bytes": "39463"
},
{
"name": "PHP",
"bytes": "562276"
},
{
"name": "Shell",
"bytes": "15560"
}
],
"symlink_target": ""
} |
"""
Submitted on behalf of a third-party: sqlalchemy
"""
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
.. versionadded:: 0.8
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
| {
"content_hash": "a8edd34c30da4ee0908773fe2ed6125b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 25.523809523809526,
"alnum_prop": 0.5886194029850746,
"repo_name": "redhat-openstack/heat",
"id": "8646a624071f57c2eabad948f8c3658e50a49f7d",
"size": "1903",
"binary": false,
"copies": "2",
"ref": "refs/heads/f22-patches",
"path": "heat/db/sqlalchemy/mutable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827027"
},
{
"name": "Shell",
"bytes": "26720"
}
],
"symlink_target": ""
} |
from nose.tools import eq_
from kitsune.kbadge.tests import AwardFactory, BadgeFactory
from kitsune.sumo.tests import LocalizingClient, TestCase
from kitsune.sumo.urlresolvers import reverse
class AwardsListTests(TestCase):
client = LocalizingClient()
def test_list_empty(self):
resp = self.client.get(reverse('badger.awards_list'), follow=True)
eq_(200, resp.status_code)
def test_list_with_awards(self):
b = BadgeFactory()
a1 = AwardFactory(description=u'A1 AWARD', badge=b)
a2 = AwardFactory(description=u'A2 AWARD', badge=b)
a3 = AwardFactory(description=u'A3 AWARD', badge=b)
resp = self.client.get(reverse('badger.awards_list'), follow=True)
eq_(200, resp.status_code)
self.assertContains(resp, a1.user.username)
self.assertContains(resp, a1.get_absolute_url())
self.assertContains(resp, a2.user.username)
self.assertContains(resp, a2.get_absolute_url())
self.assertContains(resp, a3.user.username)
self.assertContains(resp, a3.get_absolute_url())
class AwardDetailsTests(TestCase):
def test_details_page(self):
# This is a just basic test to make sure the template loads.
a1 = AwardFactory(description=u'A1 AWARD')
resp = self.client.get(a1.get_absolute_url(), follow=True)
eq_(200, resp.status_code)
| {
"content_hash": "c37def7d64035d7c1e0242ebeaebb218",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 37.21621621621622,
"alnum_prop": 0.6833696441539578,
"repo_name": "mythmon/kitsune",
"id": "30124edd064dca042b11a9e802b370924a67df67",
"size": "1377",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kitsune/kbadge/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "281386"
},
{
"name": "HTML",
"bytes": "624493"
},
{
"name": "JavaScript",
"bytes": "750034"
},
{
"name": "Python",
"bytes": "2721930"
},
{
"name": "Shell",
"bytes": "10281"
},
{
"name": "Smarty",
"bytes": "2062"
}
],
"symlink_target": ""
} |
from GestureAgents.Recognizer import Recognizer, newHypothesis
from GestureAgents.Agent import Agent
import math
from GestureAgentsTUIO.Gestures2D.RecognizerTap import RecognizerTap
def build_and_register_DT(RTKlass=RecognizerTap):
class RecognizerDT_Test(Recognizer):
rtotal = 0
def __init__(self, system):
Recognizer.__init__(self, system)
self.RTKlass = RTKlass
self.agent = None
self.firstap = None
self.secondtap = None
self.register_event(
self.system.newAgent(RTKlass), RecognizerDT_Test.EventNewAgent)
self.time = 0.3
self.maxd = 0.1
@newHypothesis
def EventNewAgent(self, Tap):
self.agent = self.make_DoubleTapAgent()
self.agent.pos = Tap.pos
self.announce()
self.unregister_event(self.system.newAgent(RTKlass))
self.register_event(Tap.newTap, RecognizerDT_Test.FirstTap)
def FirstTap(self, Tap):
self.firstap = Tap
self.unregister_event(Tap.newTap)
self.register_event(
self.system.newAgent(RTKlass), RecognizerDT_Test.EventNewAgent2)
self.expire_in(self.time)
self.acquire(Tap)
@newHypothesis
def EventNewAgent2(self, Tap):
if self.dist(Tap.pos, self.firstap.pos) > self.maxd:
self.fail(cause="Max distance")
else:
self.unregister_event(self.system.newAgent(RTKlass))
self.register_event(Tap.newTap, RecognizerDT_Test.SecondTap)
def SecondTap(self, Tap):
if self.dist(Tap.pos, self.firstap.pos) > self.maxd:
self.fail(cause="Max distance")
else:
self.secondtap = Tap
self.unregister_event(Tap.newTap)
self.cancel_expire()
self.acquire(Tap)
self.complete()
#print "I win",self
#print self.agent.newDoubleTap.registered
#import pdb; pdb.set_trace()
self.fail_all_others()
def execute(self):
#print "I execute",self
self.agent.pos = self.secondtap.pos
#print self.agent.newDoubleTap.registered
self.agent.newDoubleTap(self.agent)
self.finish()
def duplicate(self):
d = self.get_copy(self.system)
d.firstap = self.firstap
d.secondtap = self.secondtap
return d
# def fail(self, cause="Unknown"):
# print "RecognizerDT_Test(",self,") fail, cause="+cause
# #raise Exception("RecognizerDT_Test fail")
# Recognizer.fail(self)
@staticmethod
def dist(a, b):
dx, dy = (a[0] - b[0], a[1] - b[1])
return math.sqrt(dx ** 2 + dy ** 2)
def make_DoubleTapAgent(self):
a = Agent(self,("newDoubleTap",))
return a
return RecognizerDT_Test
| {
"content_hash": "dc16fe90cb37a5887db5fdef44f80afb",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.5605366492146597,
"repo_name": "chaosct/GestureAgents",
"id": "35887c9bfbf7f063528483182f70d72a270d3a39",
"size": "3103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master2",
"path": "Apps/TestComposition/RecognizerDT_Test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "351"
},
{
"name": "Python",
"bytes": "315289"
}
],
"symlink_target": ""
} |
"""
Created on Thu Mar 9 21:15:08 2017
@author: franklin
"""
import xlrd
datafile = "data/2013_ERCOT_Hourly_Load_Data.xls"
def parse_file(datafile):
workbook = xlrd.open_workbook(datafile)
sheet = workbook.sheet_by_index(0)
data = [[sheet.cell_value(r, col)
for col in range(sheet.ncols)]
for r in range(sheet.nrows)]
print "\nList Comprehension"
print "data[3][2]:",
print data[3][2]
print "\nCells in a nested loop:"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
if row == 50:
print sheet.cell_value(row, col),
### other useful methods:
print "\nROWS, COLUMNS, and CELLS:"
print "Number of rows in the sheet:",
print sheet.nrows
print "Type of data in cell (row 3, col 2):",
print sheet.cell_type(3, 2)
print "Value in cell (row 3, col 2):",
print sheet.cell_value(3, 2)
print "Get a slice of values in column 3, from rows 1-3:"
print sheet.col_values(3, start_rowx=1, end_rowx=4)
print "\nDATES:"
print "Type of data in cell (row 1, col 0):",
print sheet.cell_type(1, 0)
exceltime = sheet.cell_value(1, 0)
print "Time in Excel format:",
print exceltime
print "Convert time to a Python datetime tuple, from the Excel float:",
print xlrd.xldate_as_tuple(exceltime, 0)
return data
data = parse_file(datafile) | {
"content_hash": "a39708f08e98e7d4a83c36f0115c7bbf",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 26.943396226415093,
"alnum_prop": 0.6134453781512605,
"repo_name": "franklinsales/udacity-data-analyst-nanodegree",
"id": "2f8804c56f9baa77ac097d5e9fb9255d3da5f075",
"size": "1475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project3/class-works/data-wrangling/data-extract-fundamentals/2013_ERCOT_Hourly_Load_Data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "598280"
},
{
"name": "Jupyter Notebook",
"bytes": "197341"
},
{
"name": "Python",
"bytes": "71271"
}
],
"symlink_target": ""
} |
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT
Converts articles from a Wikipedia dump to a file containing the texts from the
articles. A single line is an article, articles are separted by a newline.
Note: doesn't support lemmatization.
Adapted from:
- http://textminingonline.com/training-word2vec-model-on-english-wikipedia-by-gensim
See also:
- https://github.com/piskvorky/gensim/blob/develop/gensim/scripts/make_wikicorpus.py
"""
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("Running %s", ' '.join(sys.argv))
# Check and process input arguments.
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
# Lemmatization is only available for English.
# Don't construct a dictionary because we're not using it.
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
with open(outp, 'w') as output:
for i, text in enumerate(wiki.get_texts()):
if sys.version_info.major < 3:
output.write(" ".join(unicode(text)) + "\n")
else:
output.write(" ".join(text) + "\n")
if i > 0 and i % 10000 == 0:
logger.info("Saved %s articles", i)
n = i
logger.info("Finished saving %s articles", n)
| {
"content_hash": "3305b24f66a4695973f13185f3e14585",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 32.583333333333336,
"alnum_prop": 0.6406649616368286,
"repo_name": "hgrif/wiki-word2vec",
"id": "a2d3608392b0ba677889a13a083a2e3484dba07e",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process_wiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Python",
"bytes": "3108"
}
],
"symlink_target": ""
} |
"""This command deals with the fact that the NHS mutates its
prescribing identifiers periodically, making tracking changes through
time very difficult.
As of 2017 (but this is expected to change within the next year), NHS
England uses a derivative of the BNF (British National Formulary)
codes to identify each presentation dispensed, called the NHS Pseudo
Classification.
Unfortunately, both the BNF and the NHS make changes to codes
periodically. Sometimes a chemical gets a new code, or sometimes it
moves to a new section. Because the BNF code includes the section in
the first few characters of the code, just reclassifying a drug means
its unique identifier has changed. This makes tracking that drug
through time impossible.
The situation is further complicated that the BNF no longer maintains
its old classification, so the Pseudo codes now used by the NHS no
longer necessarily correspond with official BNF codes at all.
The situation is expected to improve with the introduction of ePACT2
and the moving of prescribing data to use SNOMED CT codes as per dm+d.
For the being, this method aims to normalise all codes in our dataset
so that prescribing is always indexed by the most recent version of
the Pseudo BNF Code.
We achieve this by applying a mapping of old code to new code which
has been applied annualy by NHSBSA to create their Pseduo code list.
This mapping has been supplied to us in private correspondence with
the NHS BSA, and is reproduced (with some corrections to obvious
typos, etc) in the files accompanying this module.
The normalisation process is as follows:
For each old code -> new code mapping, in reverse order of date
(i.e. starting with the most recent mappings):
* If the code is at the section, paragraph, chemical or product level,
mark our internal corresponding model for that classification as no
longer current
* Find every presentation matching the new code (or classification),
and ensure a presentation exists matching the old code. Create a
reference to the new presentation code from the old one.
* Create a table of mappings from old codes to the most recent current
code (taking into account multlple code changes)
* Replace all the codes that have new normalised versions in all local
version of the prescribing data. (If this command ever needs
running again, some time could be saved by applying this only to
prescribing data downloaded since the last this command was run)
* Iterate over all known BNF codes, sections, paragraphs etc, looking
for codes which have never been prescribed, and mark these as not
current. This is necessary because sometimes our mappings involve a
chemical-level change without making this explicit (i.e. a 15
character BNF code mapping has been supplied, but in fact it's the
Chemical part of the code that has changed). In these cases, we
can't tell if the Chemical is now redundant without checking to see
if there is any other prescribing under that code. This process
also has the useful side effect of removing the (many thousands of)
codes that have never actually been prescribed, and are therefore
unhelpful noise in our user interface.
* The problem with this approach is that recently added codes may
not yet have prescribing, but may do so at some point in the
future. Therefore, there is a `refresh_class_currency` method
within the `import_hscic_prescribing` management command, which
iterates over all sections, paragraphs, chemicals and products
currently listed as not current, and checks to see if there has
been any prescribing this month.
This command should in theory only have to be run once a year, as
we've been told mappings only happen this frequently. And in theory,
2017 is the last year of using BNF codes.
"""
import csv
import glob
import logging
import re
import tempfile
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.db import connection
from django.db import transaction
from frontend.models import Chemical
from frontend.models import Presentation
from frontend.models import Product
from frontend.models import Section
from frontend import bq_schemas as schemas
from gcutils.bigquery import Client, TableExporter
logger = logging.getLogger(__name__)
def create_code_mapping(filenames):
"""Given a list of filenames containing tab-delimited old->new BNF
code changes:
* find the matching entity in our local database (e.g. Section
or Presentation, etc); mark the old version as
* no-longer-current add a reference (by BNF code) to its
* replacement
"""
Presentation.objects.filter(replaced_by__isnull=False).delete()
for f in filenames:
with transaction.atomic():
for line in open(f, "r"):
if not line.strip():
continue # skip blank lines
if "\t" not in line:
raise CommandError("Input lines must be tab delimited: %s" % line)
prev_code, next_code = line.split("\t")
prev_code = prev_code.strip()
next_code = next_code.strip()
if not re.match(r"^[0-9A-Z]+$", next_code):
# Skip 'withdrawn' &c
continue
if len(prev_code) <= 7: # section, subsection, paragraph
Section.objects.filter(bnf_id__startswith=prev_code).update(
is_current=False
)
elif len(prev_code) == 9:
Chemical.objects.filter(bnf_code=prev_code).update(is_current=False)
elif len(prev_code) == 11:
Product.objects.filter(bnf_code=prev_code).update(is_current=False)
matches = Presentation.objects.filter(bnf_code__startswith=next_code)
for row in matches:
replaced_by_id = row.pk
old_bnf_code = prev_code + replaced_by_id[len(prev_code) :]
try:
old_version = Presentation.objects.get(pk=old_bnf_code)
except Presentation.DoesNotExist:
old_version = row
old_version.pk = None # allows us to clone
old_version.bnf_code = old_bnf_code
old_version.replaced_by_id = replaced_by_id
old_version.save()
def create_bigquery_table():
"""Create a table in bigquery of all BNF codes for presentations that
are no longer current, along with the BNF code of their latest
incarnation
"""
# output a row for each presentation and its ultimate replacement
with tempfile.NamedTemporaryFile(mode="r+") as csv_file:
writer = csv.writer(csv_file)
for p in Presentation.objects.filter(replaced_by__isnull=False):
writer.writerow([p.bnf_code, p.current_version.bnf_code])
csv_file.seek(0)
client = Client("hscic")
table = client.get_or_create_table("bnf_map", schemas.BNF_MAP_SCHEMA)
table.insert_rows_from_csv(csv_file.name, schemas.BNF_MAP_SCHEMA)
def write_zero_prescribing_codes_table(level):
"""Given a BNF `level` (`section`, `chapter`, `paragraph`, etc), write
a table in bigquery listing all such levels that have zero prescribing.
Returns a bigquery Table.
"""
logger.info("Scanning %s to see if it has zero prescribing" % level)
sql = """
SELECT
bnf.%s
FROM
{hscic}.normalised_prescribing AS prescribing
RIGHT JOIN
{hscic}.bnf bnf
ON
prescribing.bnf_code = bnf.presentation_code
WHERE (
bnf.presentation_code NOT LIKE '2%%' -- appliances, etc
)
GROUP BY
bnf.%s
HAVING
COUNT(prescribing.bnf_code) = 0
""" % (
level,
level,
)
client = Client("tmp_eu")
table = client.get_table("unused_codes_%s" % level)
table.insert_rows_from_query(sql)
return table
def get_csv_of_empty_classes_for_level(level):
"""Using BigQuery, make a CSV of BNF codes at the given level
(e.g. `section`, `paragraph`) that have never had any prescribing.
Returns a path to the CSV
"""
temp_table = write_zero_prescribing_codes_table(level)
storage_prefix = "tmp/{}".format(temp_table.table_id)
exporter = TableExporter(temp_table, storage_prefix)
logger.info("Copying %s to %s" % (temp_table.table_id, storage_prefix))
exporter.export_to_storage()
path = "/%s/%s.csv" % (tempfile.gettempdir(), temp_table.table_id)
logger.info("Downloading %s to %s" % (storage_prefix, path))
with open(path, "w") as f:
exporter.download_from_storage_and_unzip(f)
return path
def cleanup_empty_classes():
"""In BigQuery, find all BNF classes/levels (e.g. `section`,
`paragraph`) that have never had any prescribing, and mark their
corresponding entities in our local database as not current.
"""
classes = [
("section_code", Section, "bnf_id"),
("para_code", Section, "bnf_id"),
("chemical_code", Chemical, "bnf_code"),
("product_code", Product, "bnf_code"),
("presentation_code", Presentation, "bnf_code"),
]
for class_column, model, bnf_field in classes:
csv_path = get_csv_of_empty_classes_for_level(class_column)
logger.info("Marking all classes in %s as not current" % csv_path)
with transaction.atomic():
with open(csv_path, "r") as f:
reader = csv.reader(f)
next(reader) # skip header
for row in reader:
code = row[0]
kwargs = {bnf_field: code}
try:
obj = model.objects.get(**kwargs)
obj.is_current = False
obj.save()
except model.DoesNotExist:
# Reasons this might happen without cause for alarm:
#
# * We don't create paragraphs ending in
# zero, as these data properly belong with
# their section;
#
# * We don't currently import appliances and similar
logger.warn("Couldn't find %s(pk=%s)" % (model.__name__, code))
def update_existing_prescribing():
"""For every child table of the prescribing table, update all the data
so that the BNF codes are always normalised to the current BNF
code.
"""
update_sql = """
UPDATE %s
SET presentation_code = '%s'
WHERE presentation_code = '%s'"""
tables_sql = """
SELECT
c.relname AS child
FROM
pg_inherits
JOIN pg_class AS c
ON (inhrelid=c.oid)
JOIN pg_class AS p
ON (inhparent=p.oid)
WHERE p.relname = 'frontend_prescription'"""
with connection.cursor() as cursor:
cursor.execute(tables_sql)
for row in cursor.fetchall():
table_name = row[0]
with transaction.atomic():
for p in Presentation.objects.filter(replaced_by__isnull=False):
cursor.execute(
update_sql
% (table_name, p.current_version.bnf_code, p.bnf_code)
)
class Command(BaseCommand):
args = ""
help = "Imports presentation replacements."
def add_arguments(self, parser):
parser.add_argument(
"filenames",
nargs="*",
help="This argument only exists for tests. Normally the command "
"is expected to work on the contents of `presentation_commands/`",
)
def handle(self, *args, **options):
if options["filenames"]:
filenames = reversed(sorted(options["filenames"]))
else:
filenames = reversed(
sorted(
glob.glob(
"frontend/management/commands/"
"presentation_replacements/*.txt"
)
)
)
create_code_mapping(filenames)
create_bigquery_table()
update_existing_prescribing()
cleanup_empty_classes()
| {
"content_hash": "37450ce0749a9ee6b0265044bd1038bb",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 88,
"avg_line_length": 38.67601246105919,
"alnum_prop": 0.6364881192106323,
"repo_name": "ebmdatalab/openprescribing",
"id": "bc449e0cce43af436411c2d99bce7ec72acc6b01",
"size": "12415",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "openprescribing/frontend/management/commands/generate_presentation_replacements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "345"
},
{
"name": "CSS",
"bytes": "22395"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "HTML",
"bytes": "515967"
},
{
"name": "JavaScript",
"bytes": "216030"
},
{
"name": "Jupyter Notebook",
"bytes": "112739"
},
{
"name": "Less",
"bytes": "14803"
},
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "1606002"
},
{
"name": "Shell",
"bytes": "6166"
}
],
"symlink_target": ""
} |
import chainer
import chainer.testing
import chainer.testing.attr
import chainermn
import mock
import numpy as np
import unittest
class ExampleModel(chainer.Chain):
def __init__(self):
super(ExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3)
self.b = chainer.links.Linear(3, 4)
self.c = chainer.links.Linear(4, 5)
class TestMultiNodeOptimizer(unittest.TestCase):
def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = ExampleModel()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
device = self.comm.intra_rank
chainer.cuda.get_device_from_id(device).use()
self.target = ExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
@chainer.testing.attr.gpu
def test_update_with_gpu(self):
self.setup_gpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
class DynamicExampleModel(chainer.Chain):
def __init__(self):
super(DynamicExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3)
self.b = chainer.links.Linear(3, 4)
class TestMultiNodeOptimizerWithDynamicModel(unittest.TestCase):
def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = DynamicExampleModel()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
device = self.comm.intra_rank
chainer.cuda.get_device_from_id(device).use()
self.target = DynamicExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
with self.target.init_scope():
self.target.c = chainer.links.Linear(4, 4)
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data)
recv_buf = self.comm.mpi_comm.allgather(send_buf)
for i in range(1, self.comm.size):
chainer.testing.assert_allclose(recv_buf[0], recv_buf[i])
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((4, 4)))
@chainer.testing.attr.gpu
def test_update_with_gpu(self):
self.setup_gpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
with self.target.init_scope():
c = chainer.links.Linear(4, 4)
c.to_gpu()
self.target.c = c
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data)
recv_buf = self.comm.mpi_comm.allgather(send_buf)
for i in range(1, self.comm.size):
chainer.testing.assert_allclose(recv_buf[0], recv_buf[i])
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((4, 4)))
| {
"content_hash": "aafcc9628a717091157e11c9cf8c0dce",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 77,
"avg_line_length": 43.488789237668165,
"alnum_prop": 0.597030315528975,
"repo_name": "rezoo/chainer",
"id": "0c4b7483dc5de39be3dcc68463afcc252b969bcd",
"size": "9698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainermn_tests/optimizer_tests/test_multi_node_optimizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "Dockerfile",
"bytes": "1238"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "4367165"
}
],
"symlink_target": ""
} |
"""SOAP protocol implementation, dispatchers and client stub."""
import logging
import string
import httpx
from . import core, namespaces as ns, soap11, soap12, wsa
from .utils import uncapitalize
SOAP_HTTP_Transport = ns.wsdl_soap_http
logger = logging.getLogger('soapfish')
class SOAPVersion:
SOAP11 = soap11
SOAP12 = soap12
@classmethod
def get_version(cls, namespace):
if namespace == cls.SOAP11.ENVELOPE_NAMESPACE or namespace == cls.SOAP11.BINDING_NAMESPACE:
return cls.SOAP11
elif namespace == cls.SOAP12.ENVELOPE_NAMESPACE or namespace == cls.SOAP12.BINDING_NAMESPACE:
return cls.SOAP12
else:
raise ValueError(f"SOAP version with namespace '{namespace}' is not supported.")
@classmethod
def get_version_name(cls, namespace):
return cls.get_version(namespace).__name__
@classmethod
def get_version_from_xml(cls, xml):
namespaces = {'wsdl': ns.wsdl, 'soap12': ns.wsdl_soap12}
return cls.SOAP12 if xml.xpath('wsdl:binding/soap12:binding', namespaces=namespaces) else cls.SOAP11
class Service:
"""Describe service aggregating information required for dispatching and WSDL generation."""
def __init__(self, targetNamespace, location, schemas, methods, version=SOAPVersion.SOAP11, name='Service',
input_header=None, output_header=None, use_wsa=False):
self.name = name
self.targetNamespace = targetNamespace
self.location = location
self.schemas = schemas
self.methods = methods
self.version = version
self.use_wsa = use_wsa
if use_wsa and input_header is None:
input_header = wsa.WSAsaHeader
if use_wsa and output_header is None:
output_header = wsa.WSAHeader
self.input_header = input_header
self.output_header = output_header
def get_method(self, operationName):
return next(m for m in self.methods if m.operationName == operationName)
def find_element_by_name(self, name):
element = None
for schema in self.schemas:
element = schema.get_element_by_name(name)
if element is not None:
break
return element
def route(self, operationName):
"""Return a decorator that binds a Python function to service method."""
method = self.get_method(operationName)
def wrapper(func):
method.function = func
return func
return wrapper
class Stub:
"""Client stub. Handles only document style calls."""
SERVICE = None
SCHEME = 'http'
HOST = 'www.example.net'
def __init__(self, username=None, password=None, service=None, location=None):
self.username = username
self.password = password
self.service = service if service else self.SERVICE
context = {'scheme': self.SCHEME, 'host': self.HOST}
if location is None:
location = lambda template, context: string.Template(template).safe_substitute(**context)
if callable(location):
self.location = location(self.service.location, context)
elif isinstance(location, str):
self.location = location
else:
raise TypeError('Expected string or callable for location.')
def _handle_response(self, method, http_headers, content):
soap = self.service.version
envelope = soap.Envelope.parsexml(content)
if envelope.Header and method and method.output_header:
response_header = envelope.Header.parse_as(method.output_header)
else:
response_header = None
if envelope.Body.Fault:
code, message, actor = soap.parse_fault_message(envelope.Body.Fault)
error = core.SOAPError(code=code, message=message, actor=actor)
raise error
if isinstance(method.output, str):
_type = self.service.find_element_by_name(method.output)._type.__class__
else:
_type = method.output
body = envelope.Body.parse_as(_type)
return core.SOAPResponse(body, soap_header=response_header)
def call(self, operationName, parameter, header=None):
soap = self.service.version
method = self.service.get_method(operationName)
tagname = method.input if isinstance(method.input, str) else uncapitalize(parameter.__class__.__name__)
auth = (self.username, self.password) if self.username else None
data = soap.Envelope.response(tagname, parameter, header=header)
headers = soap.build_http_request_headers(method.soapAction)
logger.info("Call '%s' on '%s'", operationName, self.location)
logger.debug('Request Headers: %s', headers)
logger.debug('Request Envelope: %s', data)
r = httpx.post(self.location, auth=auth, headers=headers, data=data)
logger.debug('Response Headers: %s', r.headers)
logger.debug('Response Envelope: %s', r.content)
return self._handle_response(method, r.headers, r.content)
| {
"content_hash": "c0a88d1002a377f6c27a44ddf9cf9637",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 111,
"avg_line_length": 36.457142857142856,
"alnum_prop": 0.6490987460815048,
"repo_name": "soapteam/soapfish",
"id": "9dd053f2ff73215943f6f25490c3658b401c3c57",
"size": "5104",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "soapfish/soap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jinja",
"bytes": "17316"
},
{
"name": "Python",
"bytes": "245622"
}
],
"symlink_target": ""
} |
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| {
"content_hash": "f4a61c5397612cf43b2ded9f8aecd419",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 26.64,
"alnum_prop": 0.6291291291291291,
"repo_name": "insta-code1/Instafit-ecommerce-Django",
"id": "4ef479a28cd3795d405dcd88dfa52f478210e939",
"size": "1522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/enhancer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1350"
},
{
"name": "Batchfile",
"bytes": "870"
},
{
"name": "CSS",
"bytes": "43857"
},
{
"name": "HTML",
"bytes": "23288"
},
{
"name": "JavaScript",
"bytes": "79496"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "81309"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import CustomerCustomizerServiceTransport
from .grpc import CustomerCustomizerServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomerCustomizerServiceTransport]]
_transport_registry["grpc"] = CustomerCustomizerServiceGrpcTransport
__all__ = (
"CustomerCustomizerServiceTransport",
"CustomerCustomizerServiceGrpcTransport",
)
| {
"content_hash": "90ae7912238047d9146b2c8b88717af5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 30.6875,
"alnum_prop": 0.8044806517311609,
"repo_name": "googleads/google-ads-python",
"id": "323aab7586df512c4fefbfca1e42d8ebe5c7b966",
"size": "1091",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v10/services/services/customer_customizer_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import signal
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **kwargs):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **kwargs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
return result
| {
"content_hash": "10169ab8df02778c77ef6790750b15c4",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 40.43478260869565,
"alnum_prop": 0.432258064516129,
"repo_name": "mredar/oac-ead-to-pdf",
"id": "b2e88ed3e29a7fdb2962a4c3eed61e2ae67c8e11",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timeout.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "89119"
},
{
"name": "Shell",
"bytes": "13283"
},
{
"name": "XSLT",
"bytes": "253002"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "dotDefender (Applicure Technologies)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retVal = headers.get("X-dotDefender-denied", "") == "1"
if retVal:
break
return retval
| {
"content_hash": "a2a0d907f38504fdd80cb4688948cb52",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 25.105263157894736,
"alnum_prop": 0.6540880503144654,
"repo_name": "JeyZeta/Dangerous",
"id": "9cc66e18b758352168371c4e26fb950be6d5483d",
"size": "500",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/waf/dotdefender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
"""Main test class for app_cli"""
from src import app
class TestApp(object):
def setup_method(self, _):
self.app = app
def test_function(self):
self.app.set_name("test")
assert self.app.get_name() == "test" | {
"content_hash": "4f61ddd3fb3dd71b4949afb2e45f5b49",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 21.90909090909091,
"alnum_prop": 0.6016597510373444,
"repo_name": "kefniark/turnkey-tools",
"id": "1656216cd9c102f6cd8c4d0f456ef1aada443ce5",
"size": "241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15865"
}
],
"symlink_target": ""
} |
from pygbe.util import an_solution
from convergence import (run_convergence, picklesave, pickleload,
report_results, mesh)
def main():
print('{:-^60}'.format('Running molecule_neumann test'))
try:
test_outputs = pickleload()
except FileNotFoundError:
test_outputs = {}
problem_folder = 'input_files'
# molecule_neumann
print('Runs for molecule + set phi/dphi surface')
param = 'sphere_fine.param'
test_name = 'molecule_neumann'
if test_name not in test_outputs.keys():
N, iterations, Esolv, Esurf, Ecoul, Time = run_convergence(
mesh, test_name, problem_folder, param)
test_outputs[test_name] = [N, iterations, Esolv, Esurf, Ecoul, Time]
picklesave(test_outputs)
# molecule_single_center
print('Runs for isolated molecule')
param = 'sphere_fine.param'
test_name = 'molecule_single_center'
if test_name not in test_outputs.keys():
N, iterations, Esolv, Esurf, Ecoul, Time = run_convergence(
mesh, test_name, problem_folder, param)
test_outputs[test_name] = [N, iterations, Esolv, Esurf, Ecoul, Time]
picklesave(test_outputs)
# neumann_surface
print('Runs for isolated surface')
param = 'sphere_fine.param'
test_name = 'neumann_surface'
if test_name not in test_outputs.keys():
N, iterations, Esolv, Esurf, Ecoul, Time = run_convergence(
mesh, test_name, problem_folder, param)
test_outputs[test_name] = [N, iterations, Esolv, Esurf, Ecoul, Time]
picklesave(test_outputs)
# Load results for analysis
Esolv, Esurf, Ecoul = test_outputs['molecule_neumann'][2:5]
Esolv_mol, Esurf_mol, Ecoul_mol = test_outputs['molecule_single_center'][2:
5]
Esolv_surf, Esurf_surf, Ecoul_surf = test_outputs['neumann_surface'][2:5]
Time = test_outputs['molecule_neumann'][-1]
Time_mol = test_outputs['molecule_single_center'][-1]
Time_surf = test_outputs['neumann_surface'][-1]
N, iterations = test_outputs['molecule_neumann'][:2]
Einter = (Esolv + Esurf + Ecoul - Esolv_surf - Esurf_mol - Ecoul_mol -
Esolv_mol - Esurf_surf - Ecoul_surf)
total_time = Time + Time_mol + Time_surf
analytical = an_solution.molecule_constant_charge(1., -80 * 1., 5., 4.,
12., 0.125, 4., 80.)
error = abs(Einter - analytical) / abs(analytical)
report_results(error, N, iterations, Einter, analytical, total_time,
test_name='molecule neumann')
if __name__ == "__main__":
from check_for_meshes import check_mesh
mesh_file = 'https://zenodo.org/record/55349/files/pygbe_regresion_test_meshes.zip'
folder_name = 'regresion_tests_meshes'
rename_folder = 'geometry'
size = '~10MB'
check_mesh(mesh_file, folder_name, rename_folder, size)
main() | {
"content_hash": "2d17024badb6d390678df45bc01b8ba4",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 87,
"avg_line_length": 37.59493670886076,
"alnum_prop": 0.6148148148148148,
"repo_name": "barbagroup/pygbe",
"id": "852a5f81d670c55c6cda3662e55426abff7425a6",
"size": "2970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/convergence_tests/molecule_neumann.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "217312"
},
{
"name": "C++",
"bytes": "72804"
},
{
"name": "Dockerfile",
"bytes": "2273"
},
{
"name": "Jupyter Notebook",
"bytes": "2311"
},
{
"name": "Python",
"bytes": "566209"
},
{
"name": "TeX",
"bytes": "6758"
}
],
"symlink_target": ""
} |
from apgl.graph.DictGraph import DictGraph
from apgl.util.Util import Util
import unittest
import numpy
import logging
import numbers
import numpy.testing as nptst
class DictGraphTest(unittest.TestCase):
def setUp(self):
self.graph = DictGraph()
self.graph.addEdge(0, 1, 1)
self.graph.addEdge(1, 3, 1)
self.graph.addEdge(0, 2, 2)
self.graph.addEdge(2, 3, 5)
self.graph.addEdge(0, 4, 1)
self.graph.addEdge(3, 4, 1)
self.graph.setVertex(5, None)
self.graph2 = DictGraph(False)
self.graph2.addEdge(0, 1, 1)
self.graph2.addEdge(1, 3, 1)
self.graph2.addEdge(0, 2, 2)
self.graph2.addEdge(2, 3, 5)
self.graph2.addEdge(0, 4, 1)
self.graph2.addEdge(3, 4, 1)
self.graph2.setVertex(5, 1)
def testInit(self):
dictGraph = DictGraph()
def testAddEdge(self):
dictGraph = DictGraph()
dictGraph.addEdge("A", "B", [1,2,3])
dictGraph.addEdge("A", "C", "HelloThere")
dictGraph.addEdge(12, 8, [1,2,3, 12])
self.assertEquals(dictGraph.getEdge("A", "B"), [1,2,3])
self.assertEquals(dictGraph.getEdge("B", "A"), [1,2,3])
self.assertEquals(dictGraph.getEdge("A", "C"), "HelloThere")
self.assertEquals(dictGraph.getEdge("C", "A"), "HelloThere")
self.assertEquals(dictGraph.getEdge(12, 8), [1,2,3, 12])
self.assertEquals(dictGraph.getEdge(8, 12), [1,2,3, 12])
dictGraph.addEdge(2, 8)
dictGraph = DictGraph(False)
dictGraph.addEdge("A", "B", [1,2,3])
dictGraph.addEdge("A", "C", "HelloThere")
dictGraph.addEdge(12, 8, [1,2,3, 12])
self.assertEquals(dictGraph.getEdge("A", "B"), [1,2,3])
self.assertEquals(dictGraph.getEdge("B", "A"), None)
self.assertEquals(dictGraph.getEdge("A", "C"), "HelloThere")
self.assertEquals(dictGraph.getEdge("C", "A"), None)
self.assertEquals(dictGraph.getEdge(12, 8), [1,2,3, 12])
self.assertEquals(dictGraph.getEdge(8, 12), None)
dictGraph.addEdge(2, 8)
#Test directed graphs
def testRemoveEdge(self):
dictGraph = DictGraph()
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.addEdge(3, 4, 1)
self.assertEquals(dictGraph.getEdge(1, 2), 12)
self.assertEquals(dictGraph.getEdge(1, 3), 18)
self.assertEquals(dictGraph.getEdge(3, 4), 1)
dictGraph.removeEdge(1, 3)
self.assertEquals(dictGraph.getEdge(1, 3), None)
self.assertEquals(dictGraph.getEdge(1, 2), 12)
self.assertEquals(dictGraph.getEdge(3, 4), 1)
#Some tests on directed graphs
dictGraph = DictGraph(False)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(2, 1, 12)
dictGraph.removeEdge(1, 2)
self.assertEquals(dictGraph.getEdge(1, 2), None)
self.assertEquals(dictGraph.getEdge(2, 1), 12)
def testIsUndirected(self):
dictGraph = DictGraph(True)
self.assertEquals(dictGraph.isUndirected(), True)
dictGraph = DictGraph(False)
self.assertEquals(dictGraph.isUndirected(), False)
def testGetNumEdges(self):
dictGraph = DictGraph(True)
self.assertEquals(dictGraph.getNumEdges(), 0)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.addEdge(3, 4, 1)
self.assertEquals(dictGraph.getNumEdges(), 3)
dictGraph.addEdge(3, 4, 1)
self.assertEquals(dictGraph.getNumEdges(), 3)
dictGraph.addEdge(3, 5, 1)
self.assertEquals(dictGraph.getNumEdges(), 4)
dictGraph.addEdge(3, 3, 1)
self.assertEquals(dictGraph.getNumEdges(), 5)
#Identical tests with directed graphs
dictGraph = DictGraph(False)
self.assertEquals(dictGraph.getNumEdges(), 0)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.addEdge(3, 4, 1)
self.assertEquals(dictGraph.getNumEdges(), 3)
dictGraph.addEdge(3, 4, 1)
self.assertEquals(dictGraph.getNumEdges(), 3)
dictGraph.addEdge(3, 5, 1)
self.assertEquals(dictGraph.getNumEdges(), 4)
dictGraph.addEdge(3, 3, 1)
self.assertEquals(dictGraph.getNumEdges(), 5)
def testGetEdge(self):
dictGraph = DictGraph(True)
dictGraph.addEdge(1, 2, 12)
self.assertEquals(dictGraph.getEdge(1, 2), 12)
self.assertEquals(dictGraph.getEdge(2, 1), 12)
self.assertEquals(dictGraph.getEdge(2, 2), None)
self.assertRaises(ValueError, dictGraph.getEdge, 5, 8)
dictGraph = DictGraph(False)
dictGraph.addEdge(1, 2, 12)
self.assertEquals(dictGraph.getEdge(1, 2), 12)
self.assertEquals(dictGraph.getEdge(2, 1), None)
def testGetNeighbours(self):
dictGraph = DictGraph(True)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.addEdge(1, 4, 1)
dictGraph.addEdge(3, 4, 1)
dictGraph.addEdge(2, 2, 1)
dictGraph.setVertex(5, 12)
self.assertEquals(dictGraph.neighbours(1), [2, 3, 4])
self.assertEquals(dictGraph.neighbours(3), [1, 4])
self.assertEquals(dictGraph.neighbours(2), [1, 2])
self.assertEquals(dictGraph.neighbours(5), [])
#Directed graphs
dictGraph = DictGraph(False)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.addEdge(1, 4, 1)
dictGraph.addEdge(3, 4, 1)
dictGraph.addEdge(2, 2, 1)
dictGraph.setVertex(5, 12)
self.assertEquals(dictGraph.neighbours(1), [2,3,4])
self.assertEquals(dictGraph.neighbours(3), [4])
self.assertEquals(dictGraph.neighbours(2), [2])
self.assertEquals(dictGraph.neighbours(5), [])
def testGetVertex(self):
dictGraph = DictGraph(True)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.setVertex(5, 12)
self.assertEquals(dictGraph.getVertex(1), None)
self.assertEquals(dictGraph.getVertex(2), None)
self.assertEquals(dictGraph.getVertex(3), None)
self.assertEquals(dictGraph.getVertex(5), 12)
self.assertRaises(ValueError, dictGraph.getVertex, 4)
#Directed graphs
dictGraph = DictGraph(False)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.setVertex(5, 12)
self.assertEquals(dictGraph.getVertex(1), None)
self.assertEquals(dictGraph.getVertex(2), None)
self.assertEquals(dictGraph.getVertex(3), None)
self.assertEquals(dictGraph.getVertex(5), 12)
self.assertRaises(ValueError, dictGraph.getVertex, 4)
def testAddVertex(self):
dictGraph = DictGraph(True)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.setVertex(5, 12)
self.assertEquals(dictGraph.getVertex(5), 12)
dictGraph.setVertex(5, 22)
self.assertEquals(dictGraph.getVertex(5), 22)
dictGraph.addEdge(5, 11, 18)
self.assertEquals(dictGraph.getVertex(5), 22)
def testGetAllVertexIds(self):
dictGraph = DictGraph(True)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
dictGraph.setVertex(5, 12)
self.assertEquals(dictGraph.getAllVertexIds(), [1, 2, 3, 5])
def testGetAllEdges(self):
dictGraph = DictGraph(True)
dictGraph.setVertex(5, 12)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(1, 3, 18)
edges = dictGraph.getAllEdges()
self.assertEquals(len(edges), 2)
self.assertTrue((1,2) in edges)
self.assertTrue((1,3) in edges)
dictGraph = DictGraph(False)
dictGraph.setVertex(5, 12)
dictGraph.addEdge(1, 2, 12)
dictGraph.addEdge(2, 1, 12)
dictGraph.addEdge(1, 3, 18)
edges = dictGraph.getAllEdges()
self.assertEquals(len(edges), 3)
self.assertTrue((1,2) in edges)
self.assertTrue((2,1) in edges)
self.assertTrue((1,3) in edges)
def testDensity(self):
numVertices = 10
graph = DictGraph(True)
for i in range(numVertices):
graph.setVertex(i, 0)
graph.addEdge(0, 1)
self.assertEquals(graph.density(), float(1)/45)
graph.addEdge(0, 2)
self.assertEquals(graph.density(), float(2)/45)
graph = DictGraph(False)
for i in range(numVertices):
graph.setVertex(i, 0)
graph.addEdge(0, 1)
self.assertEquals(graph.density(), float(1)/90)
graph.addEdge(0, 2)
self.assertEquals(graph.density(), float(2)/90)
#Test a graph with 1 vertex
graph = DictGraph(True)
graph.setVertex(0, 12)
self.assertEquals(graph.density(), 0)
graph.addEdge(0, 0)
self.assertEquals(graph.density(), 1)
def testSetVertices(self):
graph = DictGraph()
vertexIndices = [1, 2, 3]
vertices = ["a", "b", "c"]
graph.setVertices(vertexIndices, vertices)
vertexIndices2 = graph.getAllVertexIds()
vertices2 = graph.getVertices(vertexIndices2)
self.assertEquals(vertexIndices, vertexIndices2)
self.assertEquals(vertices, vertices2)
def testGetWeightMatrix(self):
graph = DictGraph()
graph.addEdge("a", "b")
graph.addEdge("a", "c")
graph.addEdge("a", "d")
graph.addEdge("d", "e")
W = graph.getWeightMatrix()
keys = graph.getAllVertexIds()
for i in range(len(keys)):
for j in range(len(keys)):
if W[i, j] == 1:
self.assertEquals(graph.getEdge(keys[i], keys[j]), 1)
else:
self.assertEquals(graph.getEdge(keys[i], keys[j]), None)
#Try a directed graph
graph = DictGraph(False)
graph.addEdge("a", "b")
graph.addEdge("a", "c")
graph.addEdge("a", "d")
graph.addEdge("d", "e")
W = graph.getWeightMatrix()
for i in range(len(keys)):
for j in range(len(keys)):
if W[i, j] == 1:
self.assertEquals(graph.getEdge(keys[i], keys[j]), 1)
else:
self.assertEquals(graph.getEdge(keys[i], keys[j]), None)
def testGetSparseWeightMatrix(self):
graph = DictGraph()
graph.addEdge("a", "b")
graph.addEdge("a", "c")
graph.addEdge("a", "d", "blah")
graph.addEdge("d", "e", -1.1)
graph.addEdge("c", "b", 2)
W = graph.getSparseWeightMatrix()
keys = graph.getAllVertexIds()
for i in range(len(keys)):
for j in range(len(keys)):
if graph.edgeExists(keys[i], keys[j]) and not isinstance(graph.getEdge(keys[i], keys[j]), numbers.Number):
self.assertEquals(1, W[i, j])
elif W[i, j] != 0:
self.assertEquals(graph.getEdge(keys[i], keys[j]), W[i, j])
else:
self.assertEquals(graph.getEdge(keys[i], keys[j]), None)
#Try a directed graph
graph = DictGraph(False)
graph.addEdge("a", "b")
graph.addEdge("a", "c", "test")
graph.addEdge("a", "d")
graph.addEdge("d", "e")
graph.addEdge("c", "a", 0.1)
W = graph.getSparseWeightMatrix()
for i in range(len(keys)):
for j in range(len(keys)):
if graph.edgeExists(keys[i], keys[j]) and not isinstance(graph.getEdge(keys[i], keys[j]), numbers.Number):
self.assertEquals(1, W[i, j])
elif W[i, j] != 0:
self.assertEquals(graph.getEdge(keys[i], keys[j]), W[i, j])
else:
self.assertEquals(graph.getEdge(keys[i], keys[j]), None)
def testGetAllEdgeIndices(self):
graph = DictGraph()
graph.addEdge("a", "b")
graph.addEdge("a", "c")
graph.addEdge("a", "d")
graph.addEdge("d", "e")
edgeIndices = graph.getAllEdgeIndices()
keys = graph.getAllVertexIds()
self.assertEquals(edgeIndices.shape[0], graph.getNumEdges())
for i in range(edgeIndices.shape[0]):
self.assertTrue(graph.getEdge(keys[int(edgeIndices[i, 0])], keys[edgeIndices[i, 1]]) == 1)
graph = DictGraph(False)
graph.addEdge("a", "b")
graph.addEdge("b", "a")
graph.addEdge("a", "c")
graph.addEdge("a", "d")
graph.addEdge("d", "e")
edgeIndices = graph.getAllEdgeIndices()
keys = graph.getAllVertexIds()
self.assertEquals(edgeIndices.shape[0], graph.getNumEdges())
for i in range(edgeIndices.shape[0]):
self.assertTrue(graph.getEdge(keys[int(edgeIndices[i, 0])], keys[edgeIndices[i, 1]]) == 1)
def testGetItem(self):
graph = DictGraph()
graph.addEdge(1, 1, 0.1)
graph.addEdge(1, 3, 0.5)
graph.addEdge(2, 4, 1)
graph.addEdge(2, 3, 2)
graph.setVertex(0, "abc")
self.assertEquals(graph[1,1], 0.1)
self.assertEquals(graph[1,3], 0.5)
def testSetItem(self):
graph = DictGraph()
graph.addEdge(1, 1, 0.1)
graph.addEdge(1, 3, 0.5)
self.assertEquals(graph[1,3], 0.5)
graph[1, 3] = 2
self.assertEquals(graph[1,3], 2)
def testAddEdges(self):
graph = DictGraph()
edgeList = [(1, 2), (2, 1), (5, 2), (8, 8)]
graph.addEdges(edgeList)
self.assertEquals(graph.getNumEdges(), 3)
self.assertEquals(graph.getEdge(1, 2), 1)
self.assertEquals(graph.getEdge(5, 2), 1)
self.assertEquals(graph.getEdge(2, 1), 1)
self.assertEquals(graph.getEdge(8, 8), 1)
edgeValues = [1, 2, 3, 4]
graph.addEdges(edgeList, edgeValues)
self.assertEquals(graph.getEdge(1, 2), 2)
self.assertEquals(graph.getEdge(5, 2), 3)
self.assertEquals(graph.getEdge(2, 1), 2)
self.assertEquals(graph.getEdge(8, 8), 4)
#Now test directed graphs
graph = DictGraph(False)
graph.addEdges(edgeList)
self.assertEquals(graph.getNumEdges(), 4)
self.assertEquals(graph.getEdge(1, 2), 1)
self.assertEquals(graph.getEdge(5, 2), 1)
self.assertEquals(graph.getEdge(2, 1), 1)
self.assertEquals(graph.getEdge(8, 8), 1)
edgeValues = [1, 2, 3, 4]
graph.addEdges(edgeList, edgeValues)
self.assertEquals(graph.getEdge(1, 2), 1)
self.assertEquals(graph.getEdge(5, 2), 3)
self.assertEquals(graph.getEdge(2, 1), 2)
self.assertEquals(graph.getEdge(8, 8), 4)
def testSubgraph(self):
graph = DictGraph()
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
graph.setVertex(0, "abc")
graph.setVertex(3, "cde")
self.assertEquals(graph.getNumEdges(), 5)
subgraph = graph.subgraph([0, 1, 2])
self.assertEquals(subgraph.getNumVertices(), 3)
self.assertEquals(subgraph.getNumEdges(), 3)
self.assertEquals(subgraph.isUndirected(), True)
self.assertEquals(subgraph.getEdge(0, 1), 1)
self.assertEquals(subgraph.getEdge(0, 2), 1)
self.assertEquals(subgraph.getEdge(1, 2), 1)
self.assertEquals(subgraph.getVertex(0), "abc")
#Check the original graph is fine
self.assertEquals(graph.getNumVertices(), 4)
self.assertEquals(graph.getNumEdges(), 5)
self.assertEquals(graph.getVertex(0), "abc")
self.assertEquals(graph.getVertex(3), "cde")
#Now a quick test for directed graphs
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
subgraph = graph.subgraph([0, 1, 2])
self.assertEquals(subgraph.getNumEdges(), 3)
self.assertEquals(subgraph.isUndirected(), False)
self.assertEquals(subgraph.getEdge(0, 1), 1)
self.assertEquals(subgraph.getEdge(0, 2), 1)
self.assertEquals(subgraph.getEdge(1, 2), 1)
def testNeighbourOf(self):
graph = DictGraph(True)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
for i in range(4):
self.assertEquals(graph.neighbours(i), graph.neighbourOf(i))
#Now test directed graph
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
self.assertEquals(graph.neighbourOf(0), [])
self.assertEquals(graph.neighbourOf(1), [0])
self.assertEquals(graph.neighbourOf(2), [0,1])
self.assertEquals(graph.neighbourOf(3), [0, 2])
def testOutDegreeSequence(self):
graph = DictGraph(True)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
degSeq, vertices = graph.outDegreeSequence()
self.assertTrue((degSeq == numpy.array([ 3, 2, 3, 2.])).all())
self.assertTrue(vertices == [0, 1, 2, 3])
#Test results on a directed graph
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
degSeq, vertices = graph.outDegreeSequence()
self.assertTrue((degSeq == numpy.array([ 3, 1, 1, 0])).all())
def testInDegreeSequence(self):
graph = DictGraph(True)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
degSeq, vertices = graph.inDegreeSequence()
self.assertTrue((degSeq == numpy.array([ 3, 2, 3, 2.])).all())
self.assertTrue(vertices == [0, 1, 2, 3])
#Test results on a directed graph
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
degSeq, vertices = graph.inDegreeSequence()
self.assertTrue((degSeq == numpy.array([ 0, 1, 2, 2])).all())
def testVertexExists(self):
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
self.assertTrue(graph.vertexExists(0))
self.assertTrue(graph.vertexExists(1))
self.assertTrue(graph.vertexExists(2))
self.assertTrue(graph.vertexExists(3))
self.assertFalse(graph.vertexExists(4))
def testRemoveVertex(self):
graph = DictGraph()
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
graph.addEdge(3, 4)
graph.removeVertex(4)
self.assertFalse(graph.vertexExists(4))
self.assertFalse(graph.edgeExists(3, 4))
graph.removeVertex(3)
self.assertFalse(graph.vertexExists(3))
self.assertFalse(graph.edgeExists(2, 3))
self.assertFalse(graph.edgeExists(0, 3))
graph.removeVertex(2)
self.assertFalse(graph.vertexExists(2))
self.assertFalse(graph.edgeExists(1, 2))
self.assertFalse(graph.edgeExists(0, 2))
self.assertTrue(graph.getAllVertexIds() == [0, 1])
self.assertTrue(graph.getAllEdges() == [(0, 1)])
#Try directed graph
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(1, 0)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
graph.addEdge(3, 4)
graph.removeVertex(0)
self.assertFalse(graph.vertexExists(0))
self.assertFalse(graph.edgeExists(0, 1))
self.assertFalse(graph.edgeExists(0, 3))
self.assertFalse(graph.edgeExists(1, 0))
graph.removeVertex(2)
self.assertFalse(graph.vertexExists(2))
self.assertFalse(graph.edgeExists(1, 2))
self.assertFalse(graph.edgeExists(2, 3))
self.assertTrue(graph.getAllVertexIds() == [1, 3, 4])
self.assertTrue(graph.getAllEdges() == [(3, 4)])
def testToSparseGraph(self):
graph = DictGraph()
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(1, 2)
graph.addEdge(2, 3)
graph.addEdge(3, 4)
graph2 = graph.toSparseGraph()
self.assertEquals(graph2[0, 1], 1)
self.assertEquals(graph2[0, 2], 1)
self.assertEquals(graph2[0, 3], 1)
self.assertEquals(graph2[2, 1], 1)
self.assertEquals(graph2[2, 3], 1)
self.assertEquals(graph2[3, 4], 1)
def testDepthFirstSearch(self):
graph = DictGraph()
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
graph.addEdge(2, 6)
graph.addEdge(4, 5)
self.assertEquals(graph.depthFirstSearch(0), [0,1,2,6,3])
self.assertEquals(graph.depthFirstSearch(1), [1,0,2,6,3])
self.assertEquals(graph.depthFirstSearch(6), [6,2,1,0,3])
self.assertEquals(graph.depthFirstSearch(4), [4, 5])
self.assertEquals(graph.depthFirstSearch(5), [5, 4])
def testBreadthFirstSearch(self):
graph = DictGraph()
graph.addEdge(0, 1)
graph.addEdge(0, 7)
graph.addEdge(7, 8)
graph.addEdge(7, 9)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
graph.addEdge(2, 6)
graph.addEdge(4, 5)
self.assertEquals(graph.breadthFirstSearch(0), [0,1, 7,2,3,8,9,6])
self.assertEquals(graph.breadthFirstSearch(1), [1,0,2,3,7,6,8,9])
self.assertEquals(graph.breadthFirstSearch(6), [6, 2,1,0,3,7,8,9])
self.assertEquals(graph.breadthFirstSearch(4), [4, 5])
self.assertEquals(graph.breadthFirstSearch(5), [5, 4])
self.assertEquals(graph.breadthFirstSearch(7), [7, 0, 8, 9, 1, 2, 3, 6])
def testDegreeSequence(self):
graph = DictGraph()
graph.setVertex("a", 10)
graph["b", "c"] = 1
graph["b", "d"] = 1
graph["d", "e"] = 1
graph["e", "e"] = 1
degreeDict = {}
degreeDict2 = {"a": 0, "b": 2, "c": 1, "d": 2, "e": 3}
for i, id in enumerate(graph.getAllVertexIds()):
degreeDict[id] = graph.degreeSequence()[i]
self.assertEquals(degreeDict, degreeDict2)
def testGetNumDirEdges(self):
graph = DictGraph()
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.1)
self.assertTrue(graph.getNumDirEdges() == 4)
graph.addEdge(1, 1)
self.assertTrue(graph.getNumDirEdges() == 5)
graph = DictGraph(False)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
self.assertTrue(graph.getNumDirEdges() == 2)
graph.addEdge(1, 1)
self.assertTrue(graph.getNumDirEdges() == 3)
def testDijkstrasAlgorithm(self):
graph = DictGraph()
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(1, 3, 1)
graph.addEdge(2, 4, 1)
graph.setVertex(4, 1)
self.assertTrue((graph.dijkstrasAlgorithm(0) == numpy.array([0, 1, 2, 2, 3])).all())
self.assertTrue((graph.dijkstrasAlgorithm(1) == numpy.array([1, 0, 1, 1, 2])).all())
self.assertTrue((graph.dijkstrasAlgorithm(2) == numpy.array([2, 1, 0, 2, 1])).all())
self.assertTrue((graph.dijkstrasAlgorithm(3) == numpy.array([2, 1, 2, 0, 3])).all())
self.assertTrue((graph.dijkstrasAlgorithm(4) == numpy.array([3, 2, 1, 3, 0])).all())
#Test a graph which has an isolated node
graph = DictGraph()
graph.setVertex(5, 1)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(1, 3, 1)
self.assertTrue((graph.dijkstrasAlgorithm(0) == numpy.array([0, 1, 2, 2, numpy.inf])).all())
#Test a graph in a ring
graph = DictGraph()
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(3, 4, 1)
graph.addEdge(4, 0, 1)
self.assertTrue((graph.dijkstrasAlgorithm(0) == numpy.array([0, 1, 2, 2, 1])).all())
#Try case in which vertex ids are not numbers
graph = DictGraph()
graph.addEdge("a", "b", 1)
graph.addEdge("b", "c", 1)
graph.addEdge("b", "d", 1)
graph.addEdge("c", "e", 1)
inds = Util.argsort(graph.getAllVertexIds())
self.assertTrue((graph.dijkstrasAlgorithm("a")[inds] == numpy.array([0, 1, 2, 2, 3])).all())
self.assertTrue((graph.dijkstrasAlgorithm("b")[inds] == numpy.array([1, 0, 1, 1, 2])).all())
self.assertTrue((graph.dijkstrasAlgorithm("c")[inds] == numpy.array([2, 1, 0, 2, 1])).all())
self.assertTrue((graph.dijkstrasAlgorithm("d")[inds] == numpy.array([2, 1, 2, 0, 3])).all())
self.assertTrue((graph.dijkstrasAlgorithm("e")[inds] == numpy.array([3, 2, 1, 3, 0])).all())
def testAdjacencyList(self):
graph = DictGraph()
graph.addEdge("a", "b", 1)
graph.addEdge("b", "c", 1)
graph.addEdge("b", "d", 1)
graph.addEdge("c", "e", 1)
graph.setVertex("f", 1)
neighbourIndices, neighbourWeights = graph.adjacencyList()
vertexIds = graph.getAllVertexIds()
for i in range(len(neighbourIndices)):
for k, j in enumerate(neighbourIndices[i]):
self.assertTrue(graph.edgeExists(vertexIds[i], vertexIds[j]))
self.assertEquals(graph[vertexIds[i], vertexIds[j]], neighbourWeights[i][k])
def testFindAllDistances(self):
P = self.graph.findAllDistances()
P2 = numpy.zeros((self.graph.size, self.graph.size))
P2[0, :] = numpy.array([0, 1, 2, 2, 1, numpy.inf])
P2[1, :] = numpy.array([1, 0, 3, 1, 2, numpy.inf])
P2[2, :] = numpy.array([2, 3, 0, 4, 3, numpy.inf])
P2[3, :] = numpy.array([2, 1, 4, 0, 1, numpy.inf])
P2[4, :] = numpy.array([1, 2, 3, 1, 0, numpy.inf])
P2[5, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0])
self.assertTrue((P == P2).all())
#Now test the directed graph
P = self.graph2.findAllDistances()
P2 = numpy.zeros((self.graph.size, self.graph.size))
P2[0, :] = numpy.array([0, 1, 2, 2, 1, numpy.inf])
P2[1, :] = numpy.array([numpy.inf, 0, numpy.inf, 1, 2, numpy.inf])
P2[2, :] = numpy.array([numpy.inf, numpy.inf, 0, 5, 6, numpy.inf])
P2[3, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, 0, 1, numpy.inf])
P2[4, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0, numpy.inf])
P2[5, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0])
self.assertTrue((P == P2).all())
def testToIGraph(self):
try:
import igraph
except ImportError as error:
logging.debug(error)
return
graph = DictGraph()
graph["a", "b"] = 1
graph["b", "c"] = 2
ig = graph.toIGraph()
self.assertEquals(len(ig.vs), 3)
self.assertEquals(ig[0, 2], 1)
self.assertEquals(ig[1, 2], 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| {
"content_hash": "e02b5b5b21d24cd01f16b0de547430d1",
"timestamp": "",
"source": "github",
"line_count": 831,
"max_line_length": 123,
"avg_line_length": 33.61492178098676,
"alnum_prop": 0.5748908140617169,
"repo_name": "charanpald/APGL",
"id": "de17a81602564b3e1029e9e52fb18ab8651bcb01",
"size": "27934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apgl/graph/test/DictGraphTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "532692"
}
],
"symlink_target": ""
} |
import functools
import datetime
import mock
from factory import SubFactory
from factory.fuzzy import FuzzyDateTime, FuzzyAttribute, FuzzyChoice
from mock import patch, Mock
import factory
import pytz
from factory.django import DjangoModelFactory
from django.utils import timezone
from django.db.utils import IntegrityError
from faker import Factory
from modularodm.exceptions import NoResultsFound
from website import settings
from website.notifications.constants import NOTIFICATION_TYPES
from website.util import permissions
from website.project.licenses import ensure_licenses
from website.project.model import ensure_schemas
from website.archiver import ARCHIVER_SUCCESS
from website.identifiers.utils import parse_identifiers
from framework.auth.core import Auth
from osf import models
from osf.models.sanctions import Sanction
from osf.utils.names import impute_names_model
from osf.modm_compat import Q
from addons.osfstorage.models import OsfStorageFile
fake = Factory.create()
ensure_licenses = functools.partial(ensure_licenses, warn=False)
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
try:
return models.MetaSchema.find()[0]
except IndexError:
ensure_schemas()
return models.MetaSchema.find()[0]
def FakeList(provider, n, *args, **kwargs):
func = getattr(fake, provider)
return [func(*args, **kwargs) for _ in range(n)]
class UserFactory(DjangoModelFactory):
# TODO: Change this to only generate long names and see what breaks
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
username = factory.Faker('email')
password = factory.PostGenerationMethodCall('set_password',
'queenfan86')
is_registered = True
is_claimed = True
date_confirmed = factory.Faker('date_time_this_decade', tzinfo=pytz.utc)
merged_by = None
verification_key = None
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._build(target_class, *args, **kwargs)
if emails:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._create(target_class, *args, **kwargs)
if emails and not instance.pk:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@factory.post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@factory.post_generation
def set_emails(self, create, extracted):
if not self.emails.filter(address=self.username).exists():
if not self.id:
if create:
# Perform implicit save to populate M2M
self.save()
else:
# This might lead to strange behavior
return
self.emails.create(address=str(self.username).lower())
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@factory.post_generation
def add_auth(self, create, extracted):
self.auth = (self.username, 'queenfan86')
class AuthFactory(factory.base.Factory):
class Meta:
model = Auth
user = factory.SubFactory(UserFactory)
class UnregUserFactory(DjangoModelFactory):
email = factory.Faker('email')
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
date_registered = factory.Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
return ret
@classmethod
def _create(cls, target_class, *args, **kwargs):
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
ret.save()
return ret
class UnconfirmedUserFactory(DjangoModelFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = models.OSFUser
username = factory.Faker('email')
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
password = 'lolomglgt'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
return instance
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
instance.save()
return instance
class BaseNodeFactory(DjangoModelFactory):
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
date_created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.Node
class ProjectFactory(BaseNodeFactory):
category = 'project'
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
class NodeFactory(BaseNodeFactory):
category = 'hypothesis'
parent = factory.SubFactory(ProjectFactory)
class InstitutionFactory(DjangoModelFactory):
name = factory.Faker('company')
login_url = factory.Faker('url')
logout_url = factory.Faker('url')
domains = FakeList('url', n=3)
email_domains = FakeList('domain_name', n=1)
logo_name = factory.Faker('file_name')
class Meta:
model = models.Institution
class NodeLicenseRecordFactory(DjangoModelFactory):
year = factory.Faker('year')
copyright_holders = FakeList('name', n=3)
class Meta:
model = models.NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
try:
models.NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
except NoResultsFound:
ensure_licenses()
kwargs['node_license'] = kwargs.get(
'node_license',
models.NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class NodeLogFactory(DjangoModelFactory):
class Meta:
model = models.NodeLog
action = 'file_added'
user = SubFactory(UserFactory)
class PrivateLinkFactory(DjangoModelFactory):
class Meta:
model = models.PrivateLink
name = factory.Faker('word')
key = factory.Faker('md5')
anonymous = False
creator = factory.SubFactory(UserFactory)
class CollectionFactory(DjangoModelFactory):
class Meta:
model = models.Collection
is_bookmark_collection = False
title = factory.Faker('catch_phrase')
creator = factory.SubFactory(UserFactory)
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class RegistrationFactory(BaseNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, data=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
*args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.pop('user', None) or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be registered
project = project or target_class(*args, **kwargs)
if project.has_permission(user, 'admin'):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
reg.archive_job.status = ARCHIVER_SUCCESS
reg.archive_job.save()
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
# models.ArchiveJob(
# src_node=project,
# dst_node=reg,
# initiator=user,
# )
if is_public:
reg.is_public = True
reg.save()
return reg
class WithdrawnRegistrationFactory(BaseNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
token = withdrawal.approval_state.values()[0]['approval_token']
with patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
return withdrawal
class SanctionFactory(DjangoModelFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.pop('user', None) or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = super(SanctionFactory, cls)._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = models.Retraction
user = factory.SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = models.Embargo
user = factory.SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = models.RegistrationApproval
user = factory.SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(DjangoModelFactory):
FACTORY_STRATEGY = factory.base.CREATE_STRATEGY
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or UserFactory()
if not embargo:
embargo = EmbargoFactory(state=models.Sanction.APPROVED, approve=True)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('osf.models.sanctions.TokenApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(Auth(user))
return approval
class DraftRegistrationFactory(DjangoModelFactory):
class Meta:
model = models.DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
try:
registration_schema = registration_schema or models.MetaSchema.find()[0]
except IndexError:
ensure_schemas()
registration_metadata = registration_metadata or {}
draft = models.DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class CommentFactory(DjangoModelFactory):
class Meta:
model = models.Comment
content = factory.Sequence(lambda n: 'Comment {0}'.format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class SubjectFactory(DjangoModelFactory):
text = factory.Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = models.Subject
@classmethod
def _create(cls, target_class, parent=None, provider=None, bepress_subject=None, *args, **kwargs):
provider = provider or models.PreprintProvider.objects.first() or PreprintProviderFactory(_id='osf')
if provider._id != 'osf' and not bepress_subject:
osf = models.PreprintProvider.load('osf') or PreprintProviderFactory(_id='osf')
bepress_subject = SubjectFactory(provider=osf)
try:
ret = super(SubjectFactory, cls)._create(target_class, parent=parent, provider=provider, bepress_subject=bepress_subject, *args, **kwargs)
except IntegrityError:
ret = models.Subject.objects.get(text=kwargs['text'])
if parent:
ret.parent = parent
return ret
class PreprintProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.PreprintProvider
def sync_set_identifiers(preprint):
ezid_return_value ={
'response': {
'success': '{doi}osf.io/{guid} | {ark}osf.io/{guid}'.format(
doi=settings.DOI_NAMESPACE, ark=settings.ARK_NAMESPACE, guid=preprint._id
)
},
'already_exists': False
}
id_dict = parse_identifiers(ezid_return_value)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
class PreprintFactory(DjangoModelFactory):
class Meta:
model = models.PreprintService
doi = factory.Sequence(lambda n: '10.123/{}'.format(n))
provider = factory.SubFactory(PreprintProviderFactory)
@classmethod
def _build(cls, target_class, *args, **kwargs):
creator = kwargs.pop('creator', None) or UserFactory()
project = kwargs.pop('project', None) or ProjectFactory(creator=creator)
provider = kwargs.pop('provider', None) or PreprintProviderFactory()
instance = target_class(node=project, provider=provider)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
update_task_patcher = mock.patch('website.preprints.tasks.on_preprint_updated.s')
update_task_patcher.start()
finish = kwargs.pop('finish', True)
is_published = kwargs.pop('is_published', True)
instance = cls._build(target_class, *args, **kwargs)
doi = kwargs.pop('doi', None)
license_details = kwargs.pop('license_details', None)
filename = kwargs.pop('filename', None) or 'preprint_file.txt'
subjects = kwargs.pop('subjects', None) or [[SubjectFactory()._id]]
instance.node.preprint_article_doi = doi
user = kwargs.pop('creator', None) or instance.node.creator
if not instance.node.is_contributor(user):
instance.node.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=True
)
preprint_file = OsfStorageFile.create(
node=instance.node,
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
preprint_file.save()
from addons.osfstorage import settings as osfstorage_settings
preprint_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
if finish:
auth = Auth(user)
instance.set_primary_file(preprint_file, auth=auth, save=True)
instance.set_subjects(subjects, auth=auth)
if license_details:
instance.set_preprint_license(license_details, auth=auth)
create_task_patcher = mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.s')
mock_create_identifier = create_task_patcher.start()
if is_published:
mock_create_identifier.side_effect = sync_set_identifiers(instance)
instance.set_published(is_published, auth=auth)
create_task_patcher.stop()
if not instance.is_published:
instance.node._has_abandoned_preprint = True
instance.node.save()
instance.save()
return instance
class TagFactory(DjangoModelFactory):
class Meta:
model = models.Tag
name = factory.Faker('word')
system = False
class ApiOAuth2PersonalTokenFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2PersonalToken
owner = factory.SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = factory.Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class ApiOAuth2ApplicationFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Application
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class AlternativeCitationFactory(DjangoModelFactory):
class Meta:
model = models.AlternativeCitation
@classmethod
def _create(cls, target_class, *args, **kwargs):
name = kwargs.get('name')
text = kwargs.get('text')
instance = target_class(
name=name,
text=text
)
instance.save()
return instance
class ForkFactory(DjangoModelFactory):
class Meta:
model = models.Node
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class IdentifierFactory(DjangoModelFactory):
class Meta:
model = models.Identifier
referent = factory.SubFactory(RegistrationFactory)
value = factory.Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
class NodeRelationFactory(DjangoModelFactory):
class Meta:
model = models.NodeRelation
child = factory.SubFactory(NodeFactory)
parent = factory.SubFactory(NodeFactory)
class ExternalAccountFactory(DjangoModelFactory):
class Meta:
model = models.ExternalAccount
oauth_key = 'some-silly-key'
oauth_secret = 'some-super-secret'
provider = 'mock2'
provider_id = factory.Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = factory.Sequence(lambda n: 'user-{0}'.format(n))
profile_url = 'http://wutwut.com/'
refresh_token = 'some-sillier-key'
class MockOAuth2Provider(models.ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
auto_refresh_url = "https://mock2.com/callback"
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class NotificationSubscriptionFactory(DjangoModelFactory):
class Meta:
model = models.NotificationSubscription
def make_node_lineage():
node1 = NodeFactory()
node2 = NodeFactory(parent=node1)
node3 = NodeFactory(parent=node2)
node4 = NodeFactory(parent=node3)
return [node1._id, node2._id, node3._id, node4._id]
class NotificationDigestFactory(DjangoModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
node_lineage = FuzzyAttribute(fuzzer=make_node_lineage)
user = factory.SubFactory(UserFactory)
send_type = FuzzyChoice(choices=NOTIFICATION_TYPES.keys())
message = fake.text(max_nb_chars=2048)
event = fake.text(max_nb_chars=50)
class Meta:
model = models.NotificationDigest
class ConferenceFactory(DjangoModelFactory):
class Meta:
model = models.Conference
endpoint = factory.Sequence(lambda n: 'conference{0}'.format(n))
name = factory.Faker('catch_phrase')
active = True
is_meeting = True
@factory.post_generation
def admins(self, create, extracted, **kwargs):
self.admins = extracted or [UserFactory()]
class SessionFactory(DjangoModelFactory):
class Meta:
model = models.Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class ArchiveJobFactory(DjangoModelFactory):
class Meta:
model = models.ArchiveJob
| {
"content_hash": "34f8c290f030ba4cc799f1a53bbbe1ae",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 150,
"avg_line_length": 32.52444987775061,
"alnum_prop": 0.6349934222890434,
"repo_name": "cwisecarver/osf.io",
"id": "a02c05726754b96a7b3520161dcd1c766ea9b3fd",
"size": "26629",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osf_tests/factories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "217501"
},
{
"name": "JavaScript",
"bytes": "1712859"
},
{
"name": "Mako",
"bytes": "622293"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7621431"
}
],
"symlink_target": ""
} |
"""Strategy combinations for combinations.combine()."""
import sys
import unittest
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util as framework_test_util
from tensorflow.python.platform import flags
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import server_lib
from tensorflow.python.util.tf_export import tf_export
_TF_INTERNAL_API_PREFIX = "__internal__.distribute.combinations."
_did_connect_to_cluster = False
_topology = None
CollectiveAllReduceExtended = (
collective_all_reduce_strategy.CollectiveAllReduceExtended)
def _version_chooser(tf1_cls, tf2_cls):
def creator(*args, **kwargs):
if tf2.enabled():
return tf2_cls(*args, **kwargs)
return tf1_cls(*args, **kwargs)
return creator
MirroredStrategy = _version_chooser(mirrored_lib.MirroredStrategyV1,
mirrored_lib.MirroredStrategy)
CentralStorageStrategy = _version_chooser(
central_storage_strategy.CentralStorageStrategyV1,
central_storage_strategy.CentralStorageStrategy)
OneDeviceStrategy = _version_chooser(one_device_lib.OneDeviceStrategyV1,
one_device_lib.OneDeviceStrategy)
# Only V2 CollectiveAllReduceStrategy combinations are supported.
CollectiveAllReduceStrategy = (
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
# pylint: disable=missing-docstring
def _get_tpu_strategy_creator(steps_per_run,
use_single_core=False,
enable_packed_variable=False,
enable_spmd_xla_paritioning=False,
**kwargs):
def _create_tpu_strategy():
FLAGS = flags.FLAGS # pylint: disable=invalid-name
global _did_connect_to_cluster
global _topology
try:
# Attempt to locally discover the TPU. This will fail for Cloud TPU, in
# which case we fall back to the values passed as flags.
resolver = tpu_cluster_resolver.TPUClusterResolver()
did_automatically_resolve = True
except ValueError:
did_automatically_resolve = False
# These flags will be defined by tpu_test_wrapper.py.
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=hasattr(FLAGS, "tpu") and FLAGS.tpu or "",
zone=hasattr(FLAGS, "zone") and FLAGS.zone or None,
project=hasattr(FLAGS, "project") and FLAGS.project or None,
)
# Only connect once per process, rather than per test method.
if not _did_connect_to_cluster:
if getattr(FLAGS, "tpu", "") or did_automatically_resolve:
remote.connect_to_cluster(resolver)
_did_connect_to_cluster = True
_topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
_topology,
core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)
# Steps per run is only supported in TF 1.x
if tf2.enabled():
strategy = tpu_lib.TPUStrategyV2(
resolver,
device_assignment,
experimental_spmd_xla_partitioning=enable_spmd_xla_paritioning,
**kwargs)
else:
strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,
device_assignment, **kwargs)
if enable_packed_variable and enable_spmd_xla_paritioning:
raise ValueError("Packed Variable is not compatiable with SPMD mode")
strategy._enable_packed_variable_in_eager_mode = enable_packed_variable # pylint: disable=protected-access
return strategy
return _create_tpu_strategy
def _mirrored_strategy_with_collective_key_base(devices):
mirrored_lib.MirroredStrategyV1._collective_key_base += 100000
mirrored_lib.MirroredStrategy._collective_key_base += 100000
return MirroredStrategy(devices)
def _mirrored_strategy_with_no_merge_call(devices):
mirrored_lib.MirroredStrategyV1._collective_key_base += 100000
mirrored_lib.MirroredStrategy._collective_key_base += 100000
out = MirroredStrategy(devices)
# Stub out merge call usage.
out.extended._use_merge_call = lambda: False # pylint: disable=protected-access
return out
def _get_multi_worker_mirrored_creator(required_gpus, use_merge_call=True):
def _create_multi_worker_mirrored():
tf_config = cluster_resolver.TFConfigClusterResolver()
master = tf_config.master()
if tf_config.rpc_layer:
# Strip off the rpc_layer suffix.
master = master[len("%s://" % tf_config.rpc_layer):]
resolver = cluster_resolver.SimpleClusterResolver(
cluster_spec=tf_config.cluster_spec(),
task_type=tf_config.task_type,
task_id=tf_config.task_id,
master=master,
environment=tf_config.environment,
num_accelerators={"GPU": required_gpus},
rpc_layer=tf_config.rpc_layer or "grpc",
)
# Disable health check and coordination service. We don't have a reliable
# way to shutdown the strategy (and thus the strategy health check or
# coordination service heartbeat) at the end of a test. Turning on the
# strategy health check or coordination service heartbeat causes some
# flakiness since we re-create part of the server when creating a strategy,
# and our tests are capable of handling failures.
CollectiveAllReduceExtended._enable_check_health = False # pylint: disable=protected-access
context.context().configure_coordination_service(service_type="")
# Always create the strategy in eager mode so that it starts the server and
# configures the eager context. The eager context can no longer be
# configured after initialization.
with context.eager_mode():
strategy = CollectiveAllReduceStrategy(cluster_resolver=resolver)
if not use_merge_call:
strategy.extended._use_merge_call = lambda: False # pylint: disable=protected-access
# TODO(b/152320929): Wait for the cluster before proceeding, otherwise
# collectives may hang if any worker launches collectives before the chief
# creates the strategy.
try:
multi_process_runner.get_barrier().wait()
except ValueError:
# If the creator is called in the main process,
# multi_process_runner.get_barrier() raises ValueError, which is safe to
# ignore.
pass
return strategy
def skip_if_cannot_start_grpc_server():
try:
return _create_multi_worker_mirrored()
except errors.UnknownError as e:
if "Could not start gRPC server" in e.message and (
len(sys.argv) >= 1 and "bazel" in sys.argv[0]):
raise unittest.SkipTest("Cannot start std servers.")
else:
raise
return skip_if_cannot_start_grpc_server
# Due to b/195615322, FixedShardsPartitioner will wrongly partition
# RNG state, so we use MinSizePartitioner as the default. Maximum RNG
# state size is int64[3] which is 8 * 3 bytes, so we set
# min_shard_bytes to 8 * 3 + 1.
DEFAULT_PARTITIONER = sharded_variable.MinSizePartitioner(
min_shard_bytes=8 * 3 + 1, max_shards=2)
def _get_ps_strategy_creator(num_workers,
num_ps,
required_gpus=0,
variable_partitioner=DEFAULT_PARTITIONER):
def _create_ps_strategy(resolver, variable_partitioner):
return parameter_server_strategy_v2.ParameterServerStrategyV2(
resolver, variable_partitioner=variable_partitioner)
def _create_parameter_server():
if framework_test_util.is_xla_enabled():
# To address test failures resulting in XLA with MultiProcessRunner,
# continue to use in-process cluster for XLA tests.
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
resolver = cluster_resolver.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def),
num_accelerators={"GPU": required_gpus},
rpc_layer="grpc")
return _create_ps_strategy(resolver, variable_partitioner)
else:
tf_config = cluster_resolver.TFConfigClusterResolver()
cluster_def = tf_config.cluster_spec().as_dict()
if not cluster_def:
# When MultiProcessRunner cluster is used, the cluster is not created
# initially when the decorator is called. When the test runs, initially
# this method is invoked via decorator before setting up the
# MultiProcessRunner with worker and ps in the combinations.py. After
# setup is done, the subprocess invokes this method again to get
# strategy object. We return None strategy when the main thread invokes
# this method before setting up cluster.
# Returning None is fine here, since this thread will proceed to create
# MultiProcessRunner and invoke tests with decorator inside
# subprocesses.
return None
# MultiProcessRunner is already setup and this method is invoked from a
# subprocess running the actual test.
resolver = cluster_resolver.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def),
num_accelerators={"GPU": required_gpus},
task_type=tf_config.task_type,
task_id=tf_config.task_id,
environment=tf_config.environment,
rpc_layer=tf_config.rpc_layer or "grpc")
if tf_config.task_type in ("worker", "ps"):
worker_config = config_pb2.ConfigProto()
worker_config.inter_op_parallelism_threads = 4 # max num_workers + 1
try:
server = server_lib.Server(
cluster_def,
job_name=tf_config.task_type,
task_index=tf_config.task_id,
protocol="grpc",
config=worker_config)
except errors.UnknownError as e:
if "Could not start gRPC server" in e.message:
raise unittest.SkipTest("Cannot start std servers.")
else:
raise
# Blocking the process that starts a server from exiting.
server.join()
return _create_ps_strategy(resolver, variable_partitioner)
return _create_parameter_server
def _deferred_pool_runner(has_chief,
num_workers,
initializer=None,
share_gpu=True):
"""Returns a callable that returns the pool runner.
It creates the pool runner only upon first invocation. This avoids creating it
when this file is imported.
Args:
has_chief: whether there should be a chief.
num_workers: the number of workers excluding the chief.
initializer: initializer of each process.
share_gpu: whether to share GPU between the workers.
Returns:
A callable that returns the runner.
"""
container = []
def get_or_create():
if not container:
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=num_workers,
num_ps=0,
has_eval=False)
runner = multi_process_runner.MultiProcessPoolRunner(
cluster_spec, initializer=initializer, share_gpu=share_gpu)
container.append(runner)
return container[0]
return get_or_create
# We need to create the strategy in the initializer to start the server before
# any test runs.
_two_worker_pool = _deferred_pool_runner(
has_chief=True,
num_workers=1,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
# Two-worker pool where each worker gets it's own GPU. Useful for testing MWMS
# on a single host.
_two_worker_pool_noshare = _deferred_pool_runner(
has_chief=True,
num_workers=1,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0),
share_gpu=False)
_four_worker_pool = _deferred_pool_runner(
has_chief=True,
num_workers=3,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
"OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
"OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1CPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1GPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_packed_var = combinations.NamedDistribution(
"TPUPackedVar",
_get_tpu_strategy_creator(steps_per_run=2, enable_packed_variable=True),
required_tpu=True)
tpu_strategy_spmd = combinations.NamedDistribution(
"TPUUseSPMD",
_get_tpu_strategy_creator(
steps_per_run=2, enable_spmd_xla_paritioning=True),
required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
"TPUOneCore",
_get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
"TPUOneStepOneCore",
_get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
required_tpu=True)
cloud_tpu_strategy = combinations.NamedDistribution(
"CloudTPU",
_get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True,
use_cloud_tpu=True)
mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
"Mirrored1CPU",
lambda: _mirrored_strategy_with_collective_key_base(["/cpu:0"]))
mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
"Mirrored1GPU",
lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_cpus = combinations.NamedDistribution(
"Mirrored2CPUs",
lambda: _mirrored_strategy_with_collective_key_base(["/cpu:0", "/cpu:1"]),
required_gpus=0)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0", "/gpu:1"]),
required_gpus=2)
mirrored_strategy_with_two_gpus_no_merge_call = combinations.NamedDistribution(
"Mirrored2GPUsNoMergeCall",
lambda: _mirrored_strategy_with_no_merge_call(["/gpu:0", "/gpu:1"]),
required_physical_gpus=2)
# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
# Deprecated, use mirrored_strategy_with_two_cpus instead.
mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
"Mirrored2CPU",
lambda: _mirrored_strategy_with_collective_key_base(["/cpu:1", "/cpu:2"]))
mirrored_strategy_with_cpu_1_and_2.__doc__ = (
"""Mirrored strategy with 2 virtual CPUs.
Should set up logical devices before use
""")
central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
"CentralStorage2GPUs",
lambda: CentralStorageStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"CentralStorageCPUAndGPU",
lambda: CentralStorageStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
# chief + 1 worker, with CPU.
multi_worker_mirrored_2x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=1,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 1 worker, with 1 GPU each.
multi_worker_mirrored_2x1_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1GPU",
_get_multi_worker_mirrored_creator(required_gpus=1),
has_chief=True,
num_workers=1,
required_gpus=1,
pool_runner_fn=_two_worker_pool,
share_gpu=False,
)
# Same as above, but not sharing the GPU between the workers.
multi_worker_mirrored_2x1_gpu_noshare = combinations.NamedDistribution(
"MultiWorkerMirrored2x1GPUNoShare",
_get_multi_worker_mirrored_creator(required_gpus=1),
has_chief=True,
num_workers=1,
required_gpus=1,
pool_runner_fn=_two_worker_pool_noshare,
share_gpu=False,
)
# chief + 1 worker, with 2 GPU each.
multi_worker_mirrored_2x2_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x2GPU",
_get_multi_worker_mirrored_creator(required_gpus=2),
has_chief=True,
num_workers=1,
required_gpus=2,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
multi_worker_mirrored_2x2_gpu_no_merge_call = combinations.NamedDistribution(
"MultiWorkerMirrored2x2GPUNoMergeCall",
_get_multi_worker_mirrored_creator(required_gpus=2, use_merge_call=False),
has_chief=True,
num_workers=1,
required_physical_gpus=2,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 3 workers, with CPU.
multi_worker_mirrored_4x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored4x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=3,
pool_runner_fn=_four_worker_pool,
no_xla=True,
)
def parameter_server_strategy_fn(name,
num_workers,
num_ps,
required_gpus=0,
variable_partitioner=DEFAULT_PARTITIONER):
return combinations.NamedDistribution(
name,
_get_ps_strategy_creator(
num_workers=num_workers,
num_ps=num_ps,
required_gpus=required_gpus,
variable_partitioner=variable_partitioner),
required_gpus=required_gpus,
num_workers=num_workers,
has_chief=True,
num_ps=num_ps)
parameter_server_strategy_3worker_2ps_cpu = parameter_server_strategy_fn(
"ParameterServer3Worker2PSCPU", num_workers=3, num_ps=2)
parameter_server_strategy_1worker_2ps_cpu = parameter_server_strategy_fn(
"ParameterServer1Worker2PSCPU", num_workers=1, num_ps=2)
parameter_server_strategy_3worker_2ps_1gpu = parameter_server_strategy_fn(
"ParameterServer3Worker2PS1GPU", num_workers=3, num_ps=2, required_gpus=1)
parameter_server_strategy_1worker_2ps_1gpu = parameter_server_strategy_fn(
"ParameterServer1Worker2PS1GPU", num_workers=1, num_ps=2, required_gpus=1)
graph_and_eager_modes = ["graph", "eager"]
# TODO(crccw): remove after tf-nightly picks up the new API.
def set_virtual_cpus_to_at_least(num_virtual_cpus):
test_util.set_logical_devices_to_at_least("CPU", num_virtual_cpus)
strategies_minus_tpu = [
default_strategy,
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
central_storage_strategy_with_gpu_and_cpu,
]
strategies_minus_default_and_tpu = [
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
tpu_strategy_packed_var,
cloud_tpu_strategy,
]
all_strategies_minus_default = strategies_minus_default_and_tpu + tpu_strategies
all_strategies = strategies_minus_tpu + tpu_strategies
two_replica_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
multi_worker_mirrored_2x1_cpu,
multi_worker_mirrored_2x1_gpu,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
central_storage_strategy_with_gpu_and_cpu,
]
four_replica_strategies = [
multi_worker_mirrored_2x2_gpu,
multi_worker_mirrored_4x1_cpu,
]
# TODO(b/159831907): replace with two_replica_strategies after the tests using
# it work with MWMS.
multidevice_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step
]
multiworker_strategies = [
multi_worker_mirrored_2x1_cpu, multi_worker_mirrored_2x1_gpu,
multi_worker_mirrored_2x2_gpu
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies, mode=["graph"])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__,
"central_storage_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "central_storage_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "cloud_tpu_strategy",
v1=[]).export_constant(__name__, "cloud_tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "default_strategy",
v1=[]).export_constant(__name__, "default_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_cpu_1_and_2",
v1=[]).export_constant(__name__, "mirrored_strategy_with_cpu_1_and_2")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_gpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "mirrored_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus_no_merge_call",
v1=[]).export_constant(__name__,
"mirrored_strategy_with_two_gpus_no_merge_call")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_cpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu_noshare",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu_noshare")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x2_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu_no_merge_call",
v1=[]).export_constant(__name__,
"multi_worker_mirrored_2x2_gpu_no_merge_call")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy",
v1=[]).export_constant(__name__, "one_device_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy_gpu",
v1=[]).export_constant(__name__, "one_device_strategy_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy",
v1=[]).export_constant(__name__, "tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_3worker_2ps_cpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_3worker_2ps_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_1worker_2ps_cpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_1worker_2ps_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_3worker_2ps_1gpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_3worker_2ps_1gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "parameter_server_strategy_1worker_2ps_1gpu",
v1=[]).export_constant(__name__,
"parameter_server_strategy_1worker_2ps_1gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_one_core",
v1=[]).export_constant(__name__, "tpu_strategy_one_core")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_packed_var",
v1=[]).export_constant(__name__, "tpu_strategy_packed_var")
| {
"content_hash": "89d868fd6775a2f02a5b98b68bca31aa",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 111,
"avg_line_length": 39.57142857142857,
"alnum_prop": 0.6967129013870416,
"repo_name": "yongtang/tensorflow",
"id": "595155988d03953397554614094bc959884e0503",
"size": "27004",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/strategy_combinations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1368342"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125162438"
},
{
"name": "CMake",
"bytes": "179878"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2118448"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792868"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11205807"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300198"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42642473"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7577804"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""This module contains a Google Dataprep operator."""
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.dataprep import GoogleDataprepHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataprepGetJobsForJobGroupOperator(BaseOperator):
"""
Get information about the batch jobs within a Cloud Dataprep job.
API documentation https://clouddataprep.com/documentation/api#section/Overview
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepGetJobsForJobGroupOperator`
:param job_id The ID of the job that will be requests
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(self, *, dataprep_conn_id: str = "dataprep_default", job_id: int, **kwargs) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id = (dataprep_conn_id,)
self.job_id = job_id
def execute(self, context: 'Context') -> dict:
self.log.info("Fetching data for job with id: %d ...", self.job_id)
hook = GoogleDataprepHook(
dataprep_conn_id="dataprep_default",
)
response = hook.get_jobs_for_job_group(job_id=self.job_id)
return response
class DataprepGetJobGroupOperator(BaseOperator):
"""
Get the specified job group.
A job group is a job that is executed from a specific node in a flow.
API documentation https://clouddataprep.com/documentation/api#section/Overview
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepGetJobGroupOperator`
:param job_group_id: The ID of the job that will be requests
:param embed: Comma-separated list of objects to pull in as part of the response
:param include_deleted: if set to "true", will include deleted objects
"""
template_fields: Sequence[str] = ("job_group_id", "embed")
def __init__(
self,
*,
dataprep_conn_id: str = "dataprep_default",
job_group_id: int,
embed: str,
include_deleted: bool,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id: str = dataprep_conn_id
self.job_group_id = job_group_id
self.embed = embed
self.include_deleted = include_deleted
def execute(self, context: 'Context') -> dict:
self.log.info("Fetching data for job with id: %d ...", self.job_group_id)
hook = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hook.get_job_group(
job_group_id=self.job_group_id,
embed=self.embed,
include_deleted=self.include_deleted,
)
return response
class DataprepRunJobGroupOperator(BaseOperator):
"""
Create a ``jobGroup``, which launches the specified job as the authenticated user.
This performs the same action as clicking on the Run Job button in the application.
To get recipe_id please follow the Dataprep API documentation
https://clouddataprep.com/documentation/api#operation/runJobGroup
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepRunJobGroupOperator`
:param dataprep_conn_id: The Dataprep connection ID
:param body_request: Passed as the body_request to GoogleDataprepHook's run_job_group,
where it's the identifier for the recipe to run
"""
template_fields: Sequence[str] = ("body_request",)
def __init__(self, *, dataprep_conn_id: str = "dataprep_default", body_request: dict, **kwargs) -> None:
super().__init__(**kwargs)
self.body_request = body_request
self.dataprep_conn_id = dataprep_conn_id
def execute(self, context: "Context") -> dict:
self.log.info("Creating a job...")
hook = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hook.run_job_group(body_request=self.body_request)
return response
| {
"content_hash": "1c2096cc8fdce65be03343da0b1d1a4b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 108,
"avg_line_length": 37.944954128440365,
"alnum_prop": 0.664651837524178,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "54f76dc381b479095d014d6ebc45b254ae5012e4",
"size": "4923",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/operators/dataprep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
class StripeError(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None, headers=None):
super(StripeError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except:
http_body = ('<Could not decode body as utf-8. '
'Please report to support@stripe.com>')
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get('request-id', None)
def __str__(self):
msg = super(StripeError, self).__str__()
if self.request_id is not None:
return "Request {0}: {1}".format(self.request_id, msg)
else:
return msg
class APIError(StripeError):
pass
class APIConnectionError(StripeError):
pass
class CardError(StripeError):
def __init__(self, message, param, code, http_body=None,
http_status=None, json_body=None, headers=None):
super(CardError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
self.code = code
class InvalidRequestError(StripeError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None, headers=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
class AuthenticationError(StripeError):
pass
class RateLimitError(StripeError):
pass
| {
"content_hash": "f87ad707a7d64587e60b39063425861f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 70,
"avg_line_length": 28.14516129032258,
"alnum_prop": 0.5805157593123209,
"repo_name": "alexmic/stripe-python",
"id": "c3415d7a280d020a231bbafe2ed5c71d1cb7e214",
"size": "1758",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stripe/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161153"
}
],
"symlink_target": ""
} |
__author__ = 'Steven LI'
from test_steps import *
def my_add(*args):
ret = 0
for i in args:
ret += i
return ret
#############################################################################
## Please notice sleep in my_mul function
## for options explanation
def my_mul(*args):
import time, random
time.sleep(random.uniform(1,3))
ret = 1
for i in args:
ret *= i
return ret
def test_check_options():
test_logger.info("To show the check with options functions")
## check("expr1 op expr2", globals=globals(), locals=locals(), **kwargs)
## **kwargs means options, we will see them one by one here
# duration: the step will last at least duration seconds, sleep if return earlier
check("my_add(3,4,5) == my_mul(3,4)", duration=3)
# warning: pass this check anyway, but log a warning message
check("my_add(3,4,6) == 12", warning=True)
# timeout: if the step does not return in timeout seconds, it will fail
check("my_mul(3,4,5) > 20", timeout=1, warning=True)
# exception: expected there is an exception in the step, or fail
check("my_mul(2,3)/my_add() == 5", exception=ZeroDivisionError)
# passdesc/faildesc: the description for pass/fail to replace the code_string in the log
check("my_mul(2,3)*my_add(1) >= 5", passdesc="result is 5, pass", faildesc="result <= 5")
# xfail: expected fail, that is reverse the result
check("my_mul(2,3)*my_add(3) < 5", xfail=True)
# skip: skip this case, do not execute it, just pass
check("my_mul(4,5)*5 == 0", skip=True)
# repeat: repeat the step another second if the condition is not satisfied, until time out in 'repeat' seconds
check("my_add(3,4) >= 11", repeat=5)
################ the log sample of above function test - test_check_options #######################
## Got the following log from he default log file test_20150117_1707.log
'''
2015-01-17 17:07:26,548 - INFO - To show the check with options functions
2015-01-17 17:07:29,549 - INFO - ------------------------------------------------------
2015-01-17 17:07:29,550 - INFO - Func test_check_options in file: /TestSteps/test_examples/test_lesson3_options.py
2015-01-17 17:07:29,550 - INFO - Check-1: my_add(3,4,5) == my_mul(3,4) -PASS- 12 == 12 - sleep 1 seconds (-d 3 set)
2015-01-17 17:07:29,550 - INFO - Check-2: my_add(3,4,6) == 12 -PASS- 13 == 12
2015-01-17 17:07:29,550 - WARNING - ^^^ condition not met (pass due to -w option set) ^^^
2015-01-17 17:07:30,551 - INFO - Check-3: my_mul(3,4,5) > 20 -PASS- - Step Timeout (-t 1 set)
2015-01-17 17:07:30,552 - WARNING - ^^^ condition not met (pass due to -w option set) ^^^
2015-01-17 17:07:33,481 - INFO - Check-4: my_mul(2,3)/my_add() == 5 -PASS- - exception: <class 'ZeroDivisionError'> caught
2015-01-17 17:07:35,560 - INFO - Check-5: result is 5, pass -PASS- 6 >= 5
2015-01-17 17:07:38,415 - DEBUG - -vvv- reverse the result (due to -x option set) -vvv-
2015-01-17 17:07:38,416 - INFO - Check-6: my_mul(2,3)*my_add(3) < 5 -PASS- 18 < 5 - Original result: False (-x option set)
2015-01-17 17:07:38,416 - INFO - Check-7: my_mul(4,5)*5 == 0 -PASS- - SKIPPED (-s option set)
2015-01-17 17:07:43,420 - DEBUG - -vv- Results(-r 5 set) { 1:<7 >= 11> 2:<7 >= 11> 3:<7 >= 11> 4:<7 >= 11> 5:<7 >= 11> } -vv-
2015-01-17 17:07:43,420 - ERROR - Check-8: my_add(3,4) >= 11 -FAIL- 7 >= 11 - tried 5 times in 5 seconds
'''
### If you review the logs, you can see additional information (including info and debug info)
### provided for the tester's information.
#
################### Supported options by default #######################################################
# timeout=30, fail if the step could not complete in 30 seconds
# repeat=20, repeat in another second if fail until pass, timeout in 20s
# duration=15, stay in this step for 15 seconds, even it completed shortly
# xfail=True, expected failure, report pass when fail, vice versa
# warning=True, Pass the step anyway, but log a warning message if the condition is not met
# skip=True, just skip this case.
# exception=NameError, expected exception will be raised. pass if so, or fail
# passdesc="the string to log if passed" (replace the code_string in the log)
# faildesc="the string to log if failed" (replace the code_string in the log)
#
## All the options will let you write a scenario in one line and easy to understand.
## Can we even write the code shorter. The answer is YES, the checks() function
## makes it simpler, and provide more functionalities.
## Take lesson 4 to get more.
if __name__ == '__main__':
test_check_options() | {
"content_hash": "2abee7dcc77854f3ed62f8f5efd2c34a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 133,
"avg_line_length": 52.84090909090909,
"alnum_prop": 0.6236559139784946,
"repo_name": "steven004/TestSteps",
"id": "73051a26fba714fb192c73007d4b9fcf17b38c33",
"size": "4650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_examples/test_lesson3_options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68390"
}
],
"symlink_target": ""
} |
import cocos
from package.helper import ProjectHelper
class FrameworkSet(cocos.CCPlugin):
@staticmethod
def plugin_name():
return "set-framework"
@staticmethod
def brief_description():
return cocos.MultiLanguage.get_string('FRAMEWORK_SET_BRIEF')
# parse arguments
def parse_args(self, argv):
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("name", metavar="NAME", help=cocos.MultiLanguage.get_string('FRAMEWORK_SET_ARG_NAME'))
return parser.parse_args(argv)
def run(self, argv):
args = self.parse_args(argv)
name = args.name
project = ProjectHelper.get_current_project()
ProjectHelper.set_framework(project, name, "1.0")
| {
"content_hash": "748909dc6e97eaf56c95273320bfbaf1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 30.93103448275862,
"alnum_prop": 0.6477146042363434,
"repo_name": "stars2014/quick-ng",
"id": "7d20fad6e5116a333ad9eaeb0544eeb8380850e5",
"size": "898",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/cocos2d-console/plugins/framework/framework_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11465"
},
{
"name": "C",
"bytes": "7452017"
},
{
"name": "C++",
"bytes": "16653757"
},
{
"name": "GLSL",
"bytes": "55973"
},
{
"name": "Java",
"bytes": "304337"
},
{
"name": "Lua",
"bytes": "2503533"
},
{
"name": "Makefile",
"bytes": "42188"
},
{
"name": "Objective-C",
"bytes": "478830"
},
{
"name": "Objective-C++",
"bytes": "358187"
},
{
"name": "Python",
"bytes": "30661"
},
{
"name": "Shell",
"bytes": "14028"
}
],
"symlink_target": ""
} |
import simplejson
import time
import psycopg2
import config
from geojson.examples import SimpleWebFeature
from shapely.geometry import asShape
from geoalchemy2.types import Geography
#trim off excess spaces from strings
def trim_property(property):
if isinstance(property, unicode):
return property.strip()
return property
#get the fields from the properties-dict, based on the table definition
def map_fields(properties, columns):
ret = {}
for column in columns:
ret[column] = trim_property(properties[column])
return ret
def createSimpleWebFeature(o):
try:
g = o['geometry']
p = o['properties']
return SimpleWebFeature(None,
{'type': str(g.get('type')),
'coordinates': g.get('coordinates', [])},
title=p.get('title'),
summary=p.get('summary'),
link=str(p.get('link')))
except (KeyError, TypeError):
pass
return o
def create_insert(fields, table_name):
query = "INSERT INTO " + table_name + " ("
placeholders = "VALUES("
values = ()
for key, value in fields.iteritems():
if key != "geog":
query += str(key) + ", "
placeholders += "%s, "
values += (value,)
query += "geog)"
placeholders += "ST_GeogFromText(%s))"
values += (fields["geog"],)
return query + " " + placeholders, values
def map_ssr_fields(ssr_table):
fields = []
for column in ssr_table.columns:
if not column.primary_key and not isinstance(column.type, Geography):
fields.append(column.name)
return fields
def ssr_2_postgis(file, ssr_table):
table_name = ssr_table.name
ssr_fields = map_ssr_fields(ssr_table)
connection = psycopg2.connect("dbname=" + config.database["dbname"] + " user=" + config.database["user"])
cur = connection.cursor()
start_time = time.time()
num_features_read = 0
with open(file) as infile:
for line in infile:
try:
feature = simplejson.loads(line)
swf = createSimpleWebFeature(feature)
geom = asShape(swf.geometry)
fields = map_fields(feature["properties"], ssr_fields)
fields["geog"] = geom.wkt
query, fields = create_insert(fields, table_name)
try:
cur.execute(query, fields)
num_features_read+=1
except psycopg2.IntegrityError, e:
print e
if num_features_read%10000==0:
print "read %i features" % num_features_read
connection.commit()
except simplejson.decoder.JSONDecodeError:
pass
connection.commit()
cur.close()
connection.close()
elapsed_time = time.time() - start_time
return num_features_read, elapsed_time | {
"content_hash": "5848f21f22c4752746cfeb54afb7b4c1",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 109,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.5851063829787234,
"repo_name": "atlefren/ssr_loader",
"id": "b2f4789402da6a5d8fd13391e3ed91221cbd3a72",
"size": "2914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ssr_2_postgis.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8696"
}
],
"symlink_target": ""
} |
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.misc.instrument import run_experiment_lite
from rllab.envs.gym_env import GymEnv
import sys
from rllab.misc.instrument import VariantGenerator, variant
class VG(VariantGenerator):
@variant
def step_size(self):
return [0.01, 0.05, 0.1]
@variant
def seed(self):
return [1, 11, 21, 31, 41]
def run_task(vv):
env = TfEnv(normalize(GymEnv('HalfCheetah-v1', record_video=False, record_log=False)))
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32),
name="policy"
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=vv["step_size"],
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train()
variants = VG().variants()
for v in variants:
run_experiment_lite(
run_task,
exp_prefix="first_exp",
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=v["seed"],
# mode="local",
mode="ec2",
variant=v,
# plot=True,
# terminate_machine=False,
)
sys.exit()
| {
"content_hash": "b5bd31da18173b35e61396466c848074",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 93,
"avg_line_length": 26.65277777777778,
"alnum_prop": 0.6435643564356436,
"repo_name": "brain-research/mirage-rl-qprop",
"id": "2e163b868a7a2b1e65d9a5eac82fa2d9616ae0b7",
"size": "1919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/cluster_gym_mujoco_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8270"
},
{
"name": "Dockerfile",
"bytes": "2310"
},
{
"name": "HTML",
"bytes": "14896"
},
{
"name": "JavaScript",
"bytes": "28156"
},
{
"name": "Jupyter Notebook",
"bytes": "151886"
},
{
"name": "Mako",
"bytes": "3714"
},
{
"name": "Python",
"bytes": "1831569"
},
{
"name": "Ruby",
"bytes": "12147"
},
{
"name": "Shell",
"bytes": "13760"
}
],
"symlink_target": ""
} |
from django.db import models
import django_filters
from django_filters.rest_framework import FilterSet
from django.core.exceptions import ValidationError
from .fields import CPUField, MemoryField
# front-end API types
TYPE_CHOICES = [("string", "String values"), ("float", "Float values"),
("boolean", "Boolean values"), ("integer", "Integer values"),
("path", "Path values"), ("unextpath", "Unextracted path values")]
# table of equivalence between front-end API types and back-end types
TYPES = {'string': 'str', 'integer': 'int', 'float': 'float', 'boolean': 'bool',
'path': 'path', 'unextpath': 'unextpath'}
PLUGIN_TYPE_CHOICES = [("ds", "Data synthesis"), ("fs", "Feed synthesis"),
("ts", "Topology synthesis")]
class ComputeResource(models.Model):
"""
Model class that defines a remote compute resource for plugins.
"""
creation_date = models.DateTimeField(auto_now_add=True)
modification_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=100, unique=True)
compute_url = models.URLField(max_length=300)
compute_auth_url = models.URLField(max_length=350, blank=True)
compute_user = models.CharField(max_length=32)
compute_password = models.CharField(max_length=100)
compute_auth_token = models.CharField(max_length=500, blank=True,
default='initial_token')
description = models.CharField(max_length=600, blank=True)
max_job_exec_seconds = models.IntegerField(blank=True, default=-1) # unlimited
def __str__(self):
return self.name
def save(self, *args, **kwargs):
"""
Overriden to save a default value for the auth url.
"""
if not self.compute_auth_url:
self.compute_auth_url = str(self.compute_url) + 'auth-token/' # set default
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
"""
Overriden to only allow the delete if no plugin would be left without a compute
resource after the operation.
"""
plg_ids = self.get_plugins_with_self_as_single_compute_resource()
if plg_ids:
plg_ids.sort()
msg = "Can not delete compute resource '%s'. Please first register the " \
"following plugins with another compute resource. Plugin IDs: %s"
raise ValidationError(msg % (self.name, plg_ids))
super().delete()
def get_plugins_with_self_as_single_compute_resource(self):
"""
Custom method to get the list of plugin ids for the plugins that are only
registered with this single compute resource.
"""
return [pl.id for pl in self.plugins.all() if pl.compute_resources.count() == 1]
class ComputeResourceFilter(FilterSet):
"""
Filter class for the ComputeResource model.
"""
name = django_filters.CharFilter(field_name='name', lookup_expr='icontains')
name_exact = django_filters.CharFilter(field_name='name', lookup_expr='exact')
description = django_filters.CharFilter(field_name='description',
lookup_expr='icontains')
plugin_id = django_filters.CharFilter(field_name='plugins__id',
lookup_expr='exact')
class Meta:
model = ComputeResource
fields = ['id', 'name', 'name_exact', 'description', 'plugin_id']
class PluginMeta(models.Model):
"""
Model class that defines the meta info for a plugin that is the same across
plugin's versions.
"""
creation_date = models.DateTimeField(auto_now_add=True)
modification_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=100, unique=True)
title = models.CharField(max_length=400, blank=True)
stars = models.IntegerField(default=0)
public_repo = models.URLField(max_length=300, blank=True)
license = models.CharField(max_length=50, blank=True)
type = models.CharField(choices=PLUGIN_TYPE_CHOICES, default='ds', max_length=4)
icon = models.URLField(max_length=300, blank=True)
category = models.CharField(max_length=100, blank=True)
authors = models.CharField(max_length=200, blank=True)
documentation = models.CharField(max_length=800, blank=True)
class Meta:
ordering = ('type', '-creation_date',)
def __str__(self):
return str(self.name)
class PluginMetaFilter(FilterSet):
"""
Filter class for the PluginMeta model.
"""
min_creation_date = django_filters.IsoDateTimeFilter(field_name='creation_date',
lookup_expr='gte')
max_creation_date = django_filters.IsoDateTimeFilter(field_name='creation_date',
lookup_expr='lte')
name = django_filters.CharFilter(field_name='name', lookup_expr='icontains')
name_exact = django_filters.CharFilter(field_name='name', lookup_expr='exact')
title = django_filters.CharFilter(field_name='title', lookup_expr='icontains')
category = django_filters.CharFilter(field_name='category', lookup_expr='icontains')
type = django_filters.CharFilter(field_name='type', lookup_expr='exact')
authors = django_filters.CharFilter(field_name='authors', lookup_expr='icontains')
name_title_category = django_filters.CharFilter(method='search_name_title_category')
name_authors_category = django_filters.CharFilter(
method='search_name_authors_category')
def search_name_title_category(self, queryset, name, value):
"""
Custom method to get a filtered queryset with all plugins for which name or title
or category matches the search value.
"""
# construct the full lookup expression.
lookup = models.Q(name__icontains=value)
lookup = lookup | models.Q(title__icontains=value)
lookup = lookup | models.Q(category__icontains=value)
return queryset.filter(lookup)
def search_name_authors_category(self, queryset, name, value):
"""
Custom method to get a filtered queryset with all plugins for which name or author
or category matches the search value.
"""
# construct the full lookup expression.
lookup = models.Q(name__icontains=value)
lookup = lookup | models.Q(authors__icontains=value)
lookup = lookup | models.Q(category__icontains=value)
return queryset.filter(lookup)
class Meta:
model = PluginMeta
fields = ['id', 'name', 'name_exact', 'title', 'category', 'type', 'authors',
'min_creation_date', 'max_creation_date', 'name_title_category',
'name_authors_category']
class Plugin(models.Model):
"""
Model class that defines the versioned plugin.
"""
# default resource limits inserted at registration time
defaults = {
'min_cpu_limit': 1000, # in millicores
'min_memory_limit': 200, # in Mi
'max_limit': 2147483647 # maxint
}
creation_date = models.DateTimeField(auto_now_add=True)
meta = models.ForeignKey(PluginMeta, on_delete=models.CASCADE, related_name='plugins')
version = models.CharField(max_length=10)
dock_image = models.CharField(max_length=500)
execshell = models.CharField(max_length=50)
selfpath = models.CharField(max_length=512)
selfexec = models.CharField(max_length=50)
description = models.CharField(max_length=2000, blank=True)
min_gpu_limit = models.IntegerField(null=True, blank=True, default=0)
max_gpu_limit = models.IntegerField(null=True, blank=True, default=0)
min_number_of_workers = models.IntegerField(null=True, blank=True, default=1)
max_number_of_workers = models.IntegerField(null=True, blank=True,
default=defaults['max_limit'])
min_cpu_limit = CPUField(null=True, blank=True,
default=defaults['min_cpu_limit']) # In millicores
max_cpu_limit = CPUField(null=True, blank=True,
default=defaults['max_limit']) # In millicores
min_memory_limit = MemoryField(null=True, blank=True,
default=defaults['min_memory_limit']) # In Mi
max_memory_limit = MemoryField(null=True, blank=True,
default=defaults['max_limit']) # In Mi
compute_resources = models.ManyToManyField(ComputeResource, related_name='plugins')
class Meta:
unique_together = [['meta', 'version'], ['meta', 'dock_image']]
ordering = ('meta', '-creation_date',)
def __str__(self):
return self.meta.name
def get_plugin_parameter_names(self):
"""
Custom method to get the list of plugin parameter names.
"""
return [param.name for param in self.parameters.all()]
def get_registered_compute_resources(self):
return [cr.name for cr in self.compute_resources.all()]
get_registered_compute_resources.admin_order_field = 'id'
get_registered_compute_resources.short_description = 'Associated compute resources'
class PluginFilter(FilterSet):
"""
Filter class for the Plugin model.
"""
min_creation_date = django_filters.IsoDateTimeFilter(field_name="creation_date",
lookup_expr='gte')
max_creation_date = django_filters.IsoDateTimeFilter(field_name="creation_date",
lookup_expr='lte')
name = django_filters.CharFilter(field_name='meta__name', lookup_expr='icontains')
name_exact = django_filters.CharFilter(field_name='meta__name', lookup_expr='exact')
title = django_filters.CharFilter(field_name='meta__title', lookup_expr='icontains')
category = django_filters.CharFilter(field_name='meta__category',
lookup_expr='icontains')
type = django_filters.CharFilter(field_name='meta__type', lookup_expr='exact')
description = django_filters.CharFilter(field_name='description',
lookup_expr='icontains')
name_title_category = django_filters.CharFilter(method='search_name_title_category')
compute_resource_id = django_filters.CharFilter(field_name='compute_resources__id',
lookup_expr='exact')
def search_name_title_category(self, queryset, name, value):
"""
Custom method to get a filtered queryset with all plugins for which name or title
or category matches the search value.
"""
# construct the full lookup expression.
lookup = models.Q(meta__name__icontains=value)
lookup = lookup | models.Q(meta__title__icontains=value)
lookup = lookup | models.Q(meta__category__icontains=value)
return queryset.filter(lookup)
class Meta:
model = Plugin
fields = ['id', 'name', 'name_exact', 'version', 'dock_image', 'type', 'category',
'min_creation_date', 'max_creation_date', 'title', 'description',
'name_title_category', 'compute_resource_id']
class PluginParameter(models.Model):
"""
Model class that defines a plugin parameter.
"""
name = models.CharField(max_length=50)
flag = models.CharField(max_length=52)
short_flag = models.CharField(max_length=52, blank=True)
action = models.CharField(max_length=20, default='store')
optional = models.BooleanField(default=False)
type = models.CharField(choices=TYPE_CHOICES, default='string', max_length=10)
help = models.TextField(blank=True)
ui_exposed = models.BooleanField(default=True)
plugin = models.ForeignKey(Plugin, on_delete=models.CASCADE,
related_name='parameters')
class Meta:
ordering = ('plugin',)
def __str__(self):
return self.name
def get_default(self):
"""
Overriden to get the default parameter instance regardless of its type.
"""
default_attr_name = '%s_default' % self.type
return getattr(self, default_attr_name, None)
class DefaultStrParameter(models.Model):
"""
Model class that defines a default value for a plugin parameter of type string.
"""
value = models.CharField(max_length=600, blank=True)
plugin_param = models.OneToOneField(PluginParameter, on_delete=models.CASCADE,
related_name='string_default')
def __str__(self):
return self.value
class DefaultIntParameter(models.Model):
"""
Model class that defines a default value for a plugin parameter of type integer.
"""
value = models.IntegerField()
plugin_param = models.OneToOneField(PluginParameter, on_delete=models.CASCADE,
related_name='integer_default')
def __str__(self):
return str(self.value)
class DefaultFloatParameter(models.Model):
"""
Model class that defines a default value for a plugin parameter of type float.
"""
value = models.FloatField()
plugin_param = models.OneToOneField(PluginParameter, on_delete=models.CASCADE,
related_name='float_default')
def __str__(self):
return str(self.value)
class DefaultBoolParameter(models.Model):
"""
Model class that defines a default value for a plugin parameter of type boolean.
"""
value = models.BooleanField()
plugin_param = models.OneToOneField(PluginParameter, on_delete=models.CASCADE,
related_name='boolean_default')
def __str__(self):
return str(self.value)
| {
"content_hash": "8d2d5cead7a20aee5db1bf352ff4cc2a",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 90,
"avg_line_length": 43.024767801857585,
"alnum_prop": 0.6309275383176225,
"repo_name": "FNNDSC/ChRIS_ultron_backEnd",
"id": "65a2fb1d95b8b8d7e632998a1de93ccdece0ad7f",
"size": "13898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chris_backend/plugins/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3051"
},
{
"name": "HTML",
"bytes": "2839"
},
{
"name": "JavaScript",
"bytes": "262"
},
{
"name": "Python",
"bytes": "978019"
},
{
"name": "Shell",
"bytes": "74679"
}
],
"symlink_target": ""
} |
try:
from nengo.exceptions import ConfigError
except ImportError:
ConfigError = KeyError
def getconfig(config, object, name, default=None):
"""Get a configuration parameter that may or may not have been added to the
config.
"""
try:
return getattr(config[object], name, default)
except ConfigError:
# This implies that the configuration hasn't been enabled.
return default
| {
"content_hash": "6f1c88f843a2181f27182d0d684441c7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 28.466666666666665,
"alnum_prop": 0.6908665105386417,
"repo_name": "project-rig/nengo_spinnaker",
"id": "05411a4573f4dedca6344ac7718c1fdcb43c4f88",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nengo_spinnaker/utils/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "156127"
},
{
"name": "C++",
"bytes": "4428"
},
{
"name": "Makefile",
"bytes": "3057"
},
{
"name": "Python",
"bytes": "609080"
}
],
"symlink_target": ""
} |
"""A task where the goal is to move the hand close to a target prop or site."""
import collections
from dm_control import composer
from dm_control.composer import initializers
from dm_control.composer.observation import observable
from dm_control.composer.variation import distributions
from dm_control.entities import props
from dm_control.manipulation.shared import arenas
from dm_control.manipulation.shared import cameras
from dm_control.manipulation.shared import constants
from dm_control.manipulation.shared import observations
from dm_control.manipulation.shared import registry
from dm_control.manipulation.shared import robots
from dm_control.manipulation.shared import tags
from dm_control.manipulation.shared import workspaces
from dm_control.utils import rewards
import numpy as np
_ReachWorkspace = collections.namedtuple(
'_ReachWorkspace', ['target_bbox', 'tcp_bbox', 'arm_offset'])
# Ensures that the props are not touching the table before settling.
_PROP_Z_OFFSET = 0.001
_DUPLO_WORKSPACE = _ReachWorkspace(
target_bbox=workspaces.BoundingBox(
lower=(-0.1, -0.1, _PROP_Z_OFFSET),
upper=(0.1, 0.1, _PROP_Z_OFFSET)),
tcp_bbox=workspaces.BoundingBox(
lower=(-0.1, -0.1, 0.2),
upper=(0.1, 0.1, 0.4)),
arm_offset=robots.ARM_OFFSET)
_SITE_WORKSPACE = _ReachWorkspace(
target_bbox=workspaces.BoundingBox(
lower=(-0.2, -0.2, 0.02),
upper=(0.2, 0.2, 0.4)),
tcp_bbox=workspaces.BoundingBox(
lower=(-0.2, -0.2, 0.02),
upper=(0.2, 0.2, 0.4)),
arm_offset=robots.ARM_OFFSET)
_TARGET_RADIUS = 0.05
class Reach(composer.Task):
"""Bring the hand close to a target prop or site."""
def __init__(
self, arena, arm, hand, prop, obs_settings, workspace, control_timestep):
"""Initializes a new `Reach` task.
Args:
arena: `composer.Entity` instance.
arm: `robot_base.RobotArm` instance.
hand: `robot_base.RobotHand` instance.
prop: `composer.Entity` instance specifying the prop to reach to, or None
in which case the target is a fixed site whose position is specified by
the workspace.
obs_settings: `observations.ObservationSettings` instance.
workspace: `_ReachWorkspace` specifying the placement of the prop and TCP.
control_timestep: Float specifying the control timestep in seconds.
"""
self._arena = arena
self._arm = arm
self._hand = hand
self._arm.attach(self._hand)
self._arena.attach_offset(self._arm, offset=workspace.arm_offset)
self.control_timestep = control_timestep
self._tcp_initializer = initializers.ToolCenterPointInitializer(
self._hand, self._arm,
position=distributions.Uniform(*workspace.tcp_bbox),
quaternion=workspaces.DOWN_QUATERNION)
# Add custom camera observable.
self._task_observables = cameras.add_camera_observables(
arena, obs_settings, cameras.FRONT_CLOSE)
target_pos_distribution = distributions.Uniform(*workspace.target_bbox)
self._prop = prop
if prop:
# The prop itself is used to visualize the target location.
self._make_target_site(parent_entity=prop, visible=False)
self._target = self._arena.add_free_entity(prop)
self._prop_placer = initializers.PropPlacer(
props=[prop],
position=target_pos_distribution,
quaternion=workspaces.uniform_z_rotation,
settle_physics=True)
else:
self._target = self._make_target_site(parent_entity=arena, visible=True)
self._target_placer = target_pos_distribution
obs = observable.MJCFFeature('pos', self._target)
obs.configure(**obs_settings.prop_pose._asdict())
self._task_observables['target_position'] = obs
# Add sites for visualizing the prop and target bounding boxes.
workspaces.add_bbox_site(
body=self.root_entity.mjcf_model.worldbody,
lower=workspace.tcp_bbox.lower, upper=workspace.tcp_bbox.upper,
rgba=constants.GREEN, name='tcp_spawn_area')
workspaces.add_bbox_site(
body=self.root_entity.mjcf_model.worldbody,
lower=workspace.target_bbox.lower, upper=workspace.target_bbox.upper,
rgba=constants.BLUE, name='target_spawn_area')
def _make_target_site(self, parent_entity, visible):
return workspaces.add_target_site(
body=parent_entity.mjcf_model.worldbody,
radius=_TARGET_RADIUS, visible=visible,
rgba=constants.RED, name='target_site')
@property
def root_entity(self):
return self._arena
@property
def arm(self):
return self._arm
@property
def hand(self):
return self._hand
@property
def task_observables(self):
return self._task_observables
def get_reward(self, physics):
hand_pos = physics.bind(self._hand.tool_center_point).xpos
target_pos = physics.bind(self._target).xpos
distance = np.linalg.norm(hand_pos - target_pos)
return rewards.tolerance(
distance, bounds=(0, _TARGET_RADIUS), margin=_TARGET_RADIUS)
def initialize_episode(self, physics, random_state):
self._hand.set_grasp(physics, close_factors=random_state.uniform())
self._tcp_initializer(physics, random_state)
if self._prop:
self._prop_placer(physics, random_state)
else:
physics.bind(self._target).pos = (
self._target_placer(random_state=random_state))
def _reach(obs_settings, use_site):
"""Configure and instantiate a `Reach` task.
Args:
obs_settings: An `observations.ObservationSettings` instance.
use_site: Boolean, if True then the target will be a fixed site, otherwise
it will be a moveable Duplo brick.
Returns:
An instance of `reach.Reach`.
"""
arena = arenas.Standard()
arm = robots.make_arm(obs_settings=obs_settings)
hand = robots.make_hand(obs_settings=obs_settings)
if use_site:
workspace = _SITE_WORKSPACE
prop = None
else:
workspace = _DUPLO_WORKSPACE
prop = props.Duplo(observable_options=observations.make_options(
obs_settings, observations.FREEPROP_OBSERVABLES))
task = Reach(arena=arena, arm=arm, hand=hand, prop=prop,
obs_settings=obs_settings,
workspace=workspace,
control_timestep=constants.CONTROL_TIMESTEP)
return task
@registry.add(tags.FEATURES, tags.EASY)
def reach_duplo_features():
return _reach(obs_settings=observations.PERFECT_FEATURES, use_site=False)
@registry.add(tags.VISION, tags.EASY)
def reach_duplo_vision():
return _reach(obs_settings=observations.VISION, use_site=False)
@registry.add(tags.FEATURES, tags.EASY)
def reach_site_features():
return _reach(obs_settings=observations.PERFECT_FEATURES, use_site=True)
@registry.add(tags.VISION, tags.EASY)
def reach_site_vision():
return _reach(obs_settings=observations.VISION, use_site=True)
| {
"content_hash": "7b9a5fc8683ae7bc903c082a3f0b7caf",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 80,
"avg_line_length": 35.03589743589744,
"alnum_prop": 0.6995023419203747,
"repo_name": "deepmind/dm_control",
"id": "b2c8d4d2ff07be3044aadf11d220a4ad5279ef41",
"size": "7499",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dm_control/manipulation/reach.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136624"
},
{
"name": "Python",
"bytes": "2097331"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('formidable', '0005_conditions_default'),
]
operations = [
migrations.RemoveField(
model_name='preset',
name='form',
),
migrations.RemoveField(
model_name='presetarg',
name='preset',
),
]
| {
"content_hash": "9ab62633700724a2d35dd74079e7dbb1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 50,
"avg_line_length": 20.31578947368421,
"alnum_prop": 0.5362694300518135,
"repo_name": "novafloss/django-formidable",
"id": "e56960ba17fe7870d7092326218d0055b205a3b0",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "formidable/migrations/0006_drop_preset_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3427"
},
{
"name": "Python",
"bytes": "108345"
}
],
"symlink_target": ""
} |
import pytest
import aiohttp
from aiohttp import content_disposition_filename, parse_content_disposition
class TestParseContentDisposition:
# http://greenbytes.de/tech/tc2231/
def test_parse_empty(self) -> None:
disptype, params = parse_content_disposition(None)
assert disptype is None
assert {} == params
def test_inlonly(self) -> None:
disptype, params = parse_content_disposition("inline")
assert "inline" == disptype
assert {} == params
def test_inlonlyquoted(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition('"inline"')
assert disptype is None
assert {} == params
def test_semicolon(self) -> None:
disptype, params = parse_content_disposition(
'form-data; name="data"; filename="file ; name.mp4"'
)
assert disptype == "form-data"
assert params == {"name": "data", "filename": "file ; name.mp4"}
def test_inlwithasciifilename(self) -> None:
disptype, params = parse_content_disposition('inline; filename="foo.html"')
assert "inline" == disptype
assert {"filename": "foo.html"} == params
def test_inlwithfnattach(self) -> None:
disptype, params = parse_content_disposition(
'inline; filename="Not an attachment!"'
)
assert "inline" == disptype
assert {"filename": "Not an attachment!"} == params
def test_attonly(self) -> None:
disptype, params = parse_content_disposition("attachment")
assert "attachment" == disptype
assert {} == params
def test_attonlyquoted(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition('"attachment"')
assert disptype is None
assert {} == params
def test_attonlyucase(self) -> None:
disptype, params = parse_content_disposition("ATTACHMENT")
assert "attachment" == disptype
assert {} == params
def test_attwithasciifilename(self) -> None:
disptype, params = parse_content_disposition('attachment; filename="foo.html"')
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_inlwithasciifilenamepdf(self) -> None:
disptype, params = parse_content_disposition('attachment; filename="foo.pdf"')
assert "attachment" == disptype
assert {"filename": "foo.pdf"} == params
def test_attwithasciifilename25(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="0000000000111111111122222"'
)
assert "attachment" == disptype
assert {"filename": "0000000000111111111122222"} == params
def test_attwithasciifilename35(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="00000000001111111111222222222233333"'
)
assert "attachment" == disptype
assert {"filename": "00000000001111111111222222222233333"} == params
def test_attwithasciifnescapedchar(self) -> None:
disptype, params = parse_content_disposition(
r'attachment; filename="f\oo.html"'
)
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_attwithasciifnescapedquote(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename=""quoting" tested.html"'
)
assert "attachment" == disptype
assert {"filename": '"quoting" tested.html'} == params
@pytest.mark.skip("need more smart parser which respects quoted text")
def test_attwithquotedsemicolon(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="Here\'s a semicolon;.html"'
)
assert "attachment" == disptype
assert {"filename": "Here's a semicolon;.html"} == params
def test_attwithfilenameandextparam(self) -> None:
disptype, params = parse_content_disposition(
'attachment; foo="bar"; filename="foo.html"'
)
assert "attachment" == disptype
assert {"filename": "foo.html", "foo": "bar"} == params
def test_attwithfilenameandextparamescaped(self) -> None:
disptype, params = parse_content_disposition(
'attachment; foo=""\\";filename="foo.html"'
)
assert "attachment" == disptype
assert {"filename": "foo.html", "foo": '"\\'} == params
def test_attwithasciifilenameucase(self) -> None:
disptype, params = parse_content_disposition('attachment; FILENAME="foo.html"')
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_attwithasciifilenamenq(self) -> None:
disptype, params = parse_content_disposition("attachment; filename=foo.html")
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_attwithtokfncommanq(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo,bar.html"
)
assert disptype is None
assert {} == params
def test_attwithasciifilenamenqs(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo.html ;"
)
assert disptype is None
assert {} == params
def test_attemptyparam(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition("attachment; ;filename=foo")
assert disptype is None
assert {} == params
def test_attwithasciifilenamenqws(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo bar.html"
)
assert disptype is None
assert {} == params
def test_attwithfntokensq(self) -> None:
disptype, params = parse_content_disposition("attachment; filename='foo.html'")
assert "attachment" == disptype
assert {"filename": "'foo.html'"} == params
def test_attwithisofnplain(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-ä.html"'
)
assert "attachment" == disptype
assert {"filename": "foo-ä.html"} == params
def test_attwithutf8fnplain(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-ä.html"'
)
assert "attachment" == disptype
assert {"filename": "foo-ä.html"} == params
def test_attwithfnrawpctenca(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-%41.html"'
)
assert "attachment" == disptype
assert {"filename": "foo-%41.html"} == params
def test_attwithfnusingpct(self) -> None:
disptype, params = parse_content_disposition('attachment; filename="50%.html"')
assert "attachment" == disptype
assert {"filename": "50%.html"} == params
def test_attwithfnrawpctencaq(self) -> None:
disptype, params = parse_content_disposition(
r'attachment; filename="foo-%\41.html"'
)
assert "attachment" == disptype
assert {"filename": r"foo-%41.html"} == params
def test_attwithnamepct(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-%41.html"'
)
assert "attachment" == disptype
assert {"filename": "foo-%41.html"} == params
def test_attwithfilenamepctandiso(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="ä-%41.html"'
)
assert "attachment" == disptype
assert {"filename": "ä-%41.html"} == params
def test_attwithfnrawpctenclong(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-%c3%a4-%e2%82%ac.html"'
)
assert "attachment" == disptype
assert {"filename": "foo-%c3%a4-%e2%82%ac.html"} == params
def test_attwithasciifilenamews1(self) -> None:
disptype, params = parse_content_disposition('attachment; filename ="foo.html"')
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_attwith2filenames(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html"; filename="bar.html"'
)
assert disptype is None
assert {} == params
def test_attfnbrokentoken(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo[1](2).html"
)
assert disptype is None
assert {} == params
def test_attfnbrokentokeniso(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo-ä.html"
)
assert disptype is None
assert {} == params
def test_attfnbrokentokenutf(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo-ä.html"
)
assert disptype is None
assert {} == params
def test_attmissingdisposition(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition("filename=foo.html")
assert disptype is None
assert {} == params
def test_attmissingdisposition2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition("x=y; filename=foo.html")
assert disptype is None
assert {} == params
def test_attmissingdisposition3(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'"foo; filename=bar;baz"; filename=qux'
)
assert disptype is None
assert {} == params
def test_attmissingdisposition4(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"filename=foo.html, filename=bar.html"
)
assert disptype is None
assert {} == params
def test_emptydisposition(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition("; filename=foo.html")
assert disptype is None
assert {} == params
def test_doublecolon(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
": inline; attachment; filename=foo.html"
)
assert disptype is None
assert {} == params
def test_attandinline(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"inline; attachment; filename=foo.html"
)
assert disptype is None
assert {} == params
def test_attandinline2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; inline; filename=foo.html"
)
assert disptype is None
assert {} == params
def test_attbrokenquotedfn(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html".txt'
)
assert disptype is None
assert {} == params
def test_attbrokenquotedfn2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition('attachment; filename="bar')
assert disptype is None
assert {} == params
def test_attbrokenquotedfn3(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo"bar;baz"qux'
)
assert disptype is None
assert {} == params
def test_attmultinstances(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=foo.html, attachment; filename=bar.html"
)
assert disptype is None
assert {} == params
def test_attmissingdelim(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; foo=foo filename=bar"
)
assert disptype is None
assert {} == params
def test_attmissingdelim2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename=bar foo=foo"
)
assert disptype is None
assert {} == params
def test_attmissingdelim3(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition("attachment filename=bar")
assert disptype is None
assert {} == params
def test_attreversed(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"filename=foo.html; attachment"
)
assert disptype is None
assert {} == params
def test_attconfusedparam(self) -> None:
disptype, params = parse_content_disposition("attachment; xfilename=foo.html")
assert "attachment" == disptype
assert {"xfilename": "foo.html"} == params
def test_attabspath(self) -> None:
disptype, params = parse_content_disposition('attachment; filename="/foo.html"')
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_attabspathwin(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="\\foo.html"'
)
assert "attachment" == disptype
assert {"filename": "foo.html"} == params
def test_attcdate(self) -> None:
disptype, params = parse_content_disposition(
'attachment; creation-date="Wed, 12 Feb 1997 16:29:51 -0500"'
)
assert "attachment" == disptype
assert {"creation-date": "Wed, 12 Feb 1997 16:29:51 -0500"} == params
def test_attmdate(self) -> None:
disptype, params = parse_content_disposition(
'attachment; modification-date="Wed, 12 Feb 1997 16:29:51 -0500"'
)
assert "attachment" == disptype
assert {"modification-date": "Wed, 12 Feb 1997 16:29:51 -0500"} == params
def test_dispext(self) -> None:
disptype, params = parse_content_disposition("foobar")
assert "foobar" == disptype
assert {} == params
def test_dispextbadfn(self) -> None:
disptype, params = parse_content_disposition(
'attachment; example="filename=example.txt"'
)
assert "attachment" == disptype
assert {"example": "filename=example.txt"} == params
def test_attwithisofn2231iso(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=iso-8859-1''foo-%E4.html"
)
assert "attachment" == disptype
assert {"filename*": "foo-ä.html"} == params
def test_attwithfn2231utf8(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-%c3%a4-%e2%82%ac.html"
)
assert "attachment" == disptype
assert {"filename*": "foo-ä-€.html"} == params
def test_attwithfn2231noc(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=''foo-%c3%a4-%e2%82%ac.html"
)
assert "attachment" == disptype
assert {"filename*": "foo-ä-€.html"} == params
def test_attwithfn2231utf8comp(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-a%cc%88.html"
)
assert "attachment" == disptype
assert {"filename*": "foo-ä.html"} == params
@pytest.mark.skip("should raise decoding error: %82 is invalid for latin1")
def test_attwithfn2231utf8_bad(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=iso-8859-1''foo-%c3%a4-%e2%82%ac.html"
)
assert "attachment" == disptype
assert {} == params
@pytest.mark.skip("should raise decoding error: %E4 is invalid for utf-8")
def test_attwithfn2231iso_bad(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=utf-8''foo-%E4.html"
)
assert "attachment" == disptype
assert {} == params
def test_attwithfn2231ws1(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename *=UTF-8''foo-%c3%a4.html"
)
assert "attachment" == disptype
assert {} == params
def test_attwithfn2231ws2(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*= UTF-8''foo-%c3%a4.html"
)
assert "attachment" == disptype
assert {"filename*": "foo-ä.html"} == params
def test_attwithfn2231ws3(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename* =UTF-8''foo-%c3%a4.html"
)
assert "attachment" == disptype
assert {"filename*": "foo-ä.html"} == params
def test_attwithfn2231quot(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=\"UTF-8''foo-%c3%a4.html\""
)
assert "attachment" == disptype
assert {} == params
def test_attwithfn2231quot2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
'attachment; filename*="foo%20bar.html"'
)
assert "attachment" == disptype
assert {} == params
def test_attwithfn2231singleqmissing(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8'foo-%c3%a4.html"
)
assert "attachment" == disptype
assert {} == params
@pytest.mark.skip("urllib.parse.unquote is tolerate to standalone % chars")
def test_attwithfn2231nbadpct1(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo%"
)
assert "attachment" == disptype
assert {} == params
@pytest.mark.skip("urllib.parse.unquote is tolerate to standalone % chars")
def test_attwithfn2231nbadpct2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''f%oo.html"
)
assert "attachment" == disptype
assert {} == params
def test_attwithfn2231dpct(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''A-%2541.html"
)
assert "attachment" == disptype
assert {"filename*": "A-%41.html"} == params
def test_attwithfn2231abspathdisguised(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''%5cfoo.html"
)
assert "attachment" == disptype
assert {"filename*": "\\foo.html"} == params
def test_attfncont(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo."; filename*1="html"'
)
assert "attachment" == disptype
assert {"filename*0": "foo.", "filename*1": "html"} == params
def test_attfncontqs(self) -> None:
disptype, params = parse_content_disposition(
r'attachment; filename*0="foo"; filename*1="\b\a\r.html"'
)
assert "attachment" == disptype
assert {"filename*0": "foo", "filename*1": "bar.html"} == params
def test_attfncontenc(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*0*=UTF-8" 'foo-%c3%a4; filename*1=".html"'
)
assert "attachment" == disptype
assert {"filename*0*": "UTF-8" "foo-%c3%a4", "filename*1": ".html"} == params
def test_attfncontlz(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo"; filename*01="bar"'
)
assert "attachment" == disptype
assert {"filename*0": "foo", "filename*01": "bar"} == params
def test_attfncontnc(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo"; filename*2="bar"'
)
assert "attachment" == disptype
assert {"filename*0": "foo", "filename*2": "bar"} == params
def test_attfnconts1(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo."; filename*2="html"'
)
assert "attachment" == disptype
assert {"filename*0": "foo.", "filename*2": "html"} == params
def test_attfncontord(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*1="bar"; filename*0="foo"'
)
assert "attachment" == disptype
assert {"filename*0": "foo", "filename*1": "bar"} == params
def test_attfnboth(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-ae.html";' " filename*=UTF-8''foo-%c3%a4.html"
)
assert "attachment" == disptype
assert {"filename": "foo-ae.html", "filename*": "foo-ä.html"} == params
def test_attfnboth2(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-%c3%a4.html;" ' filename="foo-ae.html"'
)
assert "attachment" == disptype
assert {"filename": "foo-ae.html", "filename*": "foo-ä.html"} == params
def test_attfnboth3(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*0*=ISO-8859-15''euro-sign%3d%a4;"
" filename*=ISO-8859-1''currency-sign%3d%a4"
)
assert "attachment" == disptype
assert {
"filename*": "currency-sign=¤",
"filename*0*": "ISO-8859-15''euro-sign%3d%a4",
} == params
def test_attnewandfn(self) -> None:
disptype, params = parse_content_disposition(
'attachment; foobar=x; filename="foo.html"'
)
assert "attachment" == disptype
assert {"foobar": "x", "filename": "foo.html"} == params
def test_attrfc2047token(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
"attachment; filename==?ISO-8859-1?Q?foo-=E4.html?="
)
assert disptype is None
assert {} == params
def test_attrfc2047quoted(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="=?ISO-8859-1?Q?foo-=E4.html?="'
)
assert "attachment" == disptype
assert {"filename": "=?ISO-8859-1?Q?foo-=E4.html?="} == params
def test_bad_continuous_param(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*0=foo bar"
)
assert "attachment" == disptype
assert {} == params
class TestContentDispositionFilename:
# http://greenbytes.de/tech/tc2231/
def test_no_filename(self) -> None:
assert content_disposition_filename({}) is None
assert content_disposition_filename({"foo": "bar"}) is None
def test_filename(self) -> None:
params = {"filename": "foo.html"}
assert "foo.html" == content_disposition_filename(params)
def test_filename_ext(self) -> None:
params = {"filename*": "файл.html"}
assert "файл.html" == content_disposition_filename(params)
def test_attfncont(self) -> None:
params = {"filename*0": "foo.", "filename*1": "html"}
assert "foo.html" == content_disposition_filename(params)
def test_attfncontqs(self) -> None:
params = {"filename*0": "foo", "filename*1": "bar.html"}
assert "foobar.html" == content_disposition_filename(params)
def test_attfncontenc(self) -> None:
params = {"filename*0*": "UTF-8''foo-%c3%a4", "filename*1": ".html"}
assert "foo-ä.html" == content_disposition_filename(params)
def test_attfncontlz(self) -> None:
params = {"filename*0": "foo", "filename*01": "bar"}
assert "foo" == content_disposition_filename(params)
def test_attfncontnc(self) -> None:
params = {"filename*0": "foo", "filename*2": "bar"}
assert "foo" == content_disposition_filename(params)
def test_attfnconts1(self) -> None:
params = {"filename*1": "foo", "filename*2": "bar"}
assert content_disposition_filename(params) is None
def test_attfnboth(self) -> None:
params = {"filename": "foo-ae.html", "filename*": "foo-ä.html"}
assert "foo-ä.html" == content_disposition_filename(params)
def test_attfnboth3(self) -> None:
params = {
"filename*0*": "ISO-8859-15''euro-sign%3d%a4",
"filename*": "currency-sign=¤",
}
assert "currency-sign=¤" == content_disposition_filename(params)
def test_attrfc2047quoted(self) -> None:
params = {"filename": "=?ISO-8859-1?Q?foo-=E4.html?="}
assert "=?ISO-8859-1?Q?foo-=E4.html?=" == content_disposition_filename(params)
| {
"content_hash": "80d159a67361290294307e5eb69c69b7",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 88,
"avg_line_length": 39.35765379113018,
"alnum_prop": 0.6072480098869543,
"repo_name": "KeepSafe/aiohttp",
"id": "9516751cba92782f804d5fa5ec4e08d4e504909c",
"size": "27548",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_multipart_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3179"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1236385"
},
{
"name": "Shell",
"bytes": "2309"
}
],
"symlink_target": ""
} |
"""
Forum attachments abstract models
=================================
This module defines abstract models provided by the ``forum_attachments`` application.
"""
import os
from django.db import models
from django.utils.translation import gettext_lazy as _
from machina.conf import settings as machina_settings
def get_attachment_file_upload_to(instance, filename):
""" Returns a valid upload path for the file of an attachment. """
return instance.get_file_upload_to(filename)
class AbstractAttachment(models.Model):
""" Represents a post attachment. An attachment is always linked to a post. """
post = models.ForeignKey(
'forum_conversation.Post', related_name='attachments', on_delete=models.CASCADE,
verbose_name=_('Post'),
)
file = models.FileField(upload_to=get_attachment_file_upload_to, verbose_name=_('File'))
comment = models.CharField(max_length=255, verbose_name=_('Comment'), blank=True, null=True)
class Meta:
abstract = True
app_label = 'forum_attachments'
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
def __str__(self):
return '{}'.format(self.post.subject)
@property
def filename(self):
""" Returns the filename of the considered attachment. """
return os.path.basename(self.file.name)
def get_file_upload_to(self, filename):
""" Returns the path to upload the associated file to. """
return os.path.join(machina_settings.ATTACHMENT_FILE_UPLOAD_TO, filename)
| {
"content_hash": "daf366facca7378534278fd511771800",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 32.583333333333336,
"alnum_prop": 0.6649616368286445,
"repo_name": "ellmetha/django-machina",
"id": "65bbe994dbf4f416348042030eee50010ef9aeb9",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "machina/apps/forum_conversation/forum_attachments/abstract_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "455"
},
{
"name": "HTML",
"bytes": "129594"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Makefile",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "743877"
},
{
"name": "SCSS",
"bytes": "8981"
}
],
"symlink_target": ""
} |
import types
import typing
PackageName = typing.NamedTuple("PackageName", [
("Name", str),
("CompareString", str)
])
PackageName.__repr__ = types.MethodType(lambda s: s.Name)
PackageName.__eq__ = types.MethodType(lambda this, that: isinstance(that, PackageName) and this.CompareString == that.CompareString)
PackageName.__hash__ = types.MethodType(lambda s: s.CompareString.__hash__())
# TODO: missing CompareMethods
# PackageName.__init__ = type.MethodType(lambda s, n: s.Name = )
if __name__ == '__main__':
packageName = PackageName("Guwei", "guwei")
print(packageName)
import re
re.compile("^(\S+)\s*$")
| {
"content_hash": "e2e368e35e3bca3c59bc96d59b94a438",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 132,
"avg_line_length": 33.578947368421055,
"alnum_prop": 0.6724137931034483,
"repo_name": "GuRenYiBeiAiHuangLiang/ScriptTool",
"id": "3876c85aed62e1dc804a0cfe4ac3d7c2b614f700",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Core/Common/domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5935"
}
],
"symlink_target": ""
} |
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ...sipmessaging import SIPHeaderField
from ...sipmessaging import classproperty
class PrivacySIPHeaderField(SIPHeaderField):
# noinspection PyNestedDecorators
@classproperty
@classmethod
def canonical_field_name(cls):
return 'Privacy'
@classmethod
def new_for_attributes(cls, field_name="Privacy", field_value_string=""):
return cls.new_for_field_name_and_value_string(field_name=field_name, field_value_string=field_value_string)
@property
def is_privacy(self):
return True
| {
"content_hash": "f9fcde1bf8c36d1e9c01c7a23b88ec50",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 116,
"avg_line_length": 27.608695652173914,
"alnum_prop": 0.7275590551181103,
"repo_name": "bobjects/BobStack",
"id": "f99754b601240744a30b504826761006be6bf396",
"size": "635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bobstack/sipmessaging/concreteheaderfields/privacySIPHeaderField.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "59"
},
{
"name": "Perl",
"bytes": "576"
},
{
"name": "Python",
"bytes": "922808"
},
{
"name": "Shell",
"bytes": "13516"
}
],
"symlink_target": ""
} |
import copy
import mock
from tempest.lib.services.compute import base_compute_client
from tempest.lib.services.compute import services_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestServicesClient(base.BaseServiceTest):
FAKE_SERVICES = {
"services":
[{
"status": "enabled",
"binary": "nova-conductor",
"zone": "internal",
"state": "up",
"updated_at": "2015-08-19T06:50:55.000000",
"host": "controller",
"disabled_reason": None,
"id": 1
}]
}
FAKE_SERVICE = {
"service":
{
"status": "enabled",
"binary": "nova-conductor",
"host": "controller"
}
}
FAKE_UPDATE_FORCED_DOWN = {
"service":
{
"forced_down": True,
"binary": "nova-conductor",
"host": "controller"
}
}
FAKE_UPDATE_SERVICE = {
"service": {
"id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339",
"binary": "nova-compute",
"disabled_reason": "test2",
"host": "host1",
"state": "down",
"status": "disabled",
"updated_at": "2012-10-29T13:42:05.000000",
"forced_down": False,
"zone": "nova"
}
}
def setUp(self):
super(TestServicesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = services_client.ServicesClient(
fake_auth, 'compute', 'regionOne')
self.addCleanup(mock.patch.stopall)
def test_list_services_with_str_body(self):
self.check_service_client_function(
self.client.list_services,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SERVICES)
def test_list_services_with_bytes_body(self):
self.check_service_client_function(
self.client.list_services,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SERVICES, to_utf=True)
def _test_enable_service(self, bytes_body=False):
self.check_service_client_function(
self.client.enable_service,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_SERVICE,
bytes_body,
host="nova-conductor", binary="controller")
def test_enable_service_with_str_body(self):
self._test_enable_service()
def test_enable_service_with_bytes_body(self):
self._test_enable_service(bytes_body=True)
def _test_disable_service(self, bytes_body=False):
fake_service = copy.deepcopy(self.FAKE_SERVICE)
fake_service["service"]["status"] = "disable"
self.check_service_client_function(
self.client.disable_service,
'tempest.lib.common.rest_client.RestClient.put',
fake_service,
bytes_body,
host="nova-conductor", binary="controller")
def test_disable_service_with_str_body(self):
self._test_disable_service()
def test_disable_service_with_bytes_body(self):
self._test_disable_service(bytes_body=True)
def _test_log_reason_disabled_service(self, bytes_body=False):
resp_body = copy.deepcopy(self.FAKE_SERVICE)
resp_body['service']['disabled_reason'] = 'test reason'
self.check_service_client_function(
self.client.disable_log_reason,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
bytes_body,
host="nova-conductor",
binary="controller",
disabled_reason='test reason')
def _test_update_service(self, bytes_body=False, status=None,
disabled_reason=None, forced_down=None):
resp_body = copy.deepcopy(self.FAKE_UPDATE_SERVICE)
kwargs = {}
if status is not None:
kwargs['status'] = status
if disabled_reason is not None:
kwargs['disabled_reason'] = disabled_reason
if forced_down is not None:
kwargs['forced_down'] = forced_down
resp_body['service'].update(kwargs)
self.check_service_client_function(
self.client.update_service,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
bytes_body,
service_id=resp_body['service']['id'],
**kwargs)
def test_log_reason_disabled_service_with_str_body(self):
self._test_log_reason_disabled_service()
def test_log_reason_disabled_service_with_bytes_body(self):
self._test_log_reason_disabled_service(bytes_body=True)
def _test_update_forced_down(self, bytes_body=False):
self.check_service_client_function(
self.client.update_forced_down,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_FORCED_DOWN,
bytes_body,
host="nova-conductor",
binary="controller",
forced_down=True)
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.11'))
def test_update_forced_down_with_str_body(self, _):
self._test_update_forced_down()
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.11'))
def test_update_forced_down_with_bytes_body(self, _):
self._test_update_forced_down(bytes_body=True)
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.53'))
def test_update_service_disable_scheduling_with_str_body(self, _):
self._test_update_service(status='disabled',
disabled_reason='maintenance')
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.53'))
def test_update_service_disable_scheduling_with_bytes_body(self, _):
self._test_update_service(status='disabled',
disabled_reason='maintenance',
bytes_body=True)
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.53'))
def test_update_service_enable_scheduling_with_str_body(self, _):
self._test_update_service(status='enabled')
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.53'))
def test_update_service_enable_scheduling_with_bytes_body(self, _):
self._test_update_service(status='enabled', bytes_body=True)
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.53'))
def test_update_service_forced_down_with_str_body(self, _):
self._test_update_service(forced_down=True)
@mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
new_callable=mock.PropertyMock(return_value='2.53'))
def test_update_service_forced_down_with_bytes_body(self, _):
self._test_update_service(forced_down=True, bytes_body=True)
| {
"content_hash": "3f039d67966888a41a9b1c0cb5bbc5b5",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 75,
"avg_line_length": 37.07960199004975,
"alnum_prop": 0.5972091775124111,
"repo_name": "masayukig/tempest",
"id": "ba432e39e188d85251e481874e601dd7b58029c3",
"size": "8084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/tests/lib/services/compute/test_services_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4682048"
},
{
"name": "Shell",
"bytes": "12734"
}
],
"symlink_target": ""
} |
import time
import oauth_req
import simplejson
import ratelimit
def getSearch(query, search_type, filename_to_write, friends_dir, count = 100):
# import packages
import csv
import urllib
import save_csv
# get Twitter API keys
key = ratelimit.getKeys(1)
consumer_key, consumer_secret, kunci, rahsia = ratelimit.getKeyData(key)
next_page = ''
loop = True
headers = ['url', 'tweet_id', 'tweet_text', 'timestamp', 'author_id', 'rt_status_tweet_id', 'rt_status_user_id', 'ent_mentions_user_id',
'in_reply_tweet_id', 'in_reply_user_id']
seen_ids = set()
while loop == True:
#print consumer_key, consumer_secret, kunci, rahsia
if next_page:
url2 = "https://api.twitter.com/1.1/search/tweets.json%s" % (next_page)
next_page = ''
else:
url2 = "https://api.twitter.com/1.1/search/tweets.json?q=%s&result_type=%s&count=%d" % (query, search_type, count)
api_response2 = oauth_req.OauthReq(url2, consumer_key, consumer_secret, kunci, rahsia)
if api_response2[0]['status'] == '200' and api_response2[1]:
arrays_to_write = list()
json_response = simplejson.loads(api_response2[1])
for status in json_response['statuses']:
# parse status
new_status = parseStatus(status)
new_status_q = [query]
new_status_q.extend(new_status)
if new_status and new_status_q:
arrays_to_write.append(new_status_q)
# store friends for each tweet's author if not taken yet
#storeFriends(new_status_q[4], friends_dir)
if arrays_to_write:
save_csv.appendCSV(arrays_to_write, filename_to_write, headers)
else:
print "no tweets for %s" % query
if 'next_results' in json_response['search_metadata']:
next_page = json_response['search_metadata']['next_results']
'''print "next_page: %s" % next_page
else:
print "no more next_page"
#print i, next_page'''
elif api_response2[0]['status'] == '403' and api_response2[1]:
json_response = simplejson.loads(api_response2[1])
print "error = %s" % (json_response['error'])
if next_page == '':
loop = False
def parseStatus(status):
parsed_status = list()
tweet_id = status['id_str']
tweet_text = status['text'].encode('utf-8')
timestamp = time.strftime('%d-%m-%Y %H:%M:%S', time.strptime(status['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
author_id = status['user']['id_str']
ent_mentions_user_id = ''
if ('retweeted_status' in status) and status['retweeted_status']:
rt_status_tweet_id = status['retweeted_status']['id_str']
rt_status_user_id = status['retweeted_status']['user']['id_str']
else:
rt_status_tweet_id = '0'
rt_status_user_id = '0'
if status['entities']['user_mentions']:
for mtn in status['entities']['user_mentions']:
if ent_mentions_user_id:
ent_mentions_user_id = ent_mentions_user_id + ',' + mtn['id_str']
else:
ent_mentions_user_id = mtn['id_str']
else:
ent_mentions_user_id = '0'
if status['in_reply_to_status_id_str']:
in_reply_tweet_id = status['in_reply_to_status_id_str']
else:
in_reply_tweet_id = '0'
if status['in_reply_to_user_id_str']:
in_reply_user_id = status['in_reply_to_user_id_str']
else:
in_reply_user_id = '0'
parsed_status = [tweet_id, tweet_text, timestamp, author_id, rt_status_tweet_id, rt_status_user_id, ent_mentions_user_id,
in_reply_tweet_id, in_reply_user_id]
return parsed_status
def storeFriends(auth_id, friends_dir):
import os
import raw
# look for auth's total friends
if os.path.isfile("%s\\%s.csv" % (friends_dir, auth_id)) == False:
print "getting friends for auth: %s at %s" % (auth_id, time.strftime("%H:%M:%S"))
# haven't seen this auth yet, so grab friends list
current_friends = set()
cursor = "-1"
exit_loop = False
# paginating loop
while exit_loop == False:
# only proceed if there's no rate limiting
key = ratelimit.getKeys(1, f = 'friends')
consumer_key, consumer_secret, kunci, rahsia = ratelimit.getKeyData(key)
# get followers for auth_id
url = 'https://api.twitter.com/1.1/friends/ids.json?user_id=%s&cursor=%s&stringify_ids=true' % (auth_id, cursor)
api_response = oauth_req.OauthReq(url, consumer_key, consumer_secret, kunci, rahsia)
if api_response[0]['status'] == '200' and api_response[1]:
json_response = simplejson.loads(api_response[1])
if json_response['ids']:
if current_friends:
# if paginating
current_friends.update(set(json_response['ids']))
else:
# on 1st block, no paginating yet
current_friends = set(json_response['ids'])
if json_response['next_cursor_str'] != '0':
# if paginating, change cursor to next page
cursor = json_response['next_cursor_str']
else:
# no more IDs, break loop
exit_loop = True
else:
# no followers
exit_loop = True
else:
# error, or not authorized (protected)
exit_loop = True
# store followers for auth_id
raw.saveRawSet(current_friends, "%s\\%s.csv" % (friends_dir, auth_id), "friends")
| {
"content_hash": "4accac8a72d31845445d08fcfcb1a7ed",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 140,
"avg_line_length": 36.14792899408284,
"alnum_prop": 0.5338025863480111,
"repo_name": "coolster1/dark-rt-toolkit",
"id": "5139cab6e336537d97ce893cf562b786f7afd30d",
"size": "6109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tk1-random/c_get_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214022"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import cgi
import codecs
import errno
import functools
from multiprocessing import cpu_count
import os.path
import re
import shutil
import sys
from pygments import highlight
from pygments.lexers.c_cpp import CppLexer
from pygments.formatters import HtmlFormatter
import optpmap
import optrecord
desc = '''Generate HTML output to visualize optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
The tools requires PyYAML and Pygments Python packages.'''
# This allows passing the global context to the child processes.
class Context:
def __init__(self, caller_loc = dict()):
# Map function names to their source location for function where inlining happened
self.caller_loc = caller_loc
context = Context()
def suppress(remark):
if remark.Name == 'sil.Specialized':
return remark.getArgDict()['Function'][0].startswith('\"Swift.')
elif remark.Name == 'sil.Inlined':
return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.'))
return False
class SourceFileRenderer:
def __init__(self, source_dir, output_dir, filename, no_highlight):
self.filename = filename
existing_filename = None
if os.path.exists(filename):
existing_filename = filename
else:
fn = os.path.join(source_dir, filename)
if os.path.exists(fn):
existing_filename = fn
self.no_highlight = no_highlight
self.stream = codecs.open(os.path.join(output_dir, optrecord.html_file_name(filename)), 'w', encoding='utf-8')
if existing_filename:
self.source_stream = open(existing_filename)
else:
self.source_stream = None
print('''
<html>
<h1>Unable to locate file {}</h1>
</html>
'''.format(filename), file=self.stream)
self.html_formatter = HtmlFormatter(encoding='utf-8')
self.cpp_lexer = CppLexer(stripnl=False)
def render_source_lines(self, stream, line_remarks):
file_text = stream.read()
if self.no_highlight:
html_highlighted = file_text.decode('utf-8')
else:
html_highlighted = highlight(
file_text,
self.cpp_lexer,
self.html_formatter)
# Note that the API is different between Python 2 and 3. On
# Python 3, pygments.highlight() returns a bytes object, so we
# have to decode. On Python 2, the output is str but since we
# support unicode characters and the output streams is unicode we
# decode too.
html_highlighted = html_highlighted.decode('utf-8')
# Take off the header and footer, these must be
# reapplied line-wise, within the page structure
html_highlighted = html_highlighted.replace('<div class="highlight"><pre>', '')
html_highlighted = html_highlighted.replace('</pre></div>', '')
for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1):
print(u'''
<tr>
<td><a name=\"L{linenum}\">{linenum}</a></td>
<td></td>
<td></td>
<td><div class="highlight"><pre>{html_line}</pre></div></td>
</tr>'''.format(**locals()), file=self.stream)
for remark in line_remarks.get(linenum, []):
if not suppress(remark):
self.render_inline_remarks(remark, html_line)
def render_inline_remarks(self, r, line):
inlining_context = r.DemangledFunctionName
dl = context.caller_loc.get(r.Function)
if dl:
dl_dict = dict(list(dl))
link = optrecord.make_link(dl_dict['File'], dl_dict['Line'] - 2)
inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(**locals())
# Column is the number of characters *including* tabs, keep those and
# replace everything else with spaces.
indent = line[:max(r.Column, 1) - 1]
indent = re.sub('\S', ' ', indent)
print(u'''
<tr>
<td></td>
<td>{r.RelativeHotness}</td>
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
<td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\"> {r.message} </span></td>
<td class=\"column-entry-yellow\">{inlining_context}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, line_remarks):
if not self.source_stream:
return
print('''
<html>
<title>{}</title>
<meta charset="utf-8" />
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table class="source">
<thead>
<tr>
<th style="width: 2%">Line</td>
<th style="width: 3%">Hotness</td>
<th style="width: 10%">Optimization</td>
<th style="width: 70%">Source</td>
<th style="width: 15%">Inline Context</td>
</tr>
</thead>
<tbody>'''.format(os.path.basename(self.filename)), file=self.stream)
self.render_source_lines(self.source_stream, line_remarks)
print('''
</tbody>
</table>
</body>
</html>''', file=self.stream)
class IndexRenderer:
def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_index):
self.stream = codecs.open(os.path.join(output_dir, 'index.html'), 'w', encoding='utf-8')
self.should_display_hotness = should_display_hotness
self.max_hottest_remarks_on_index = max_hottest_remarks_on_index
def render_entry(self, r, odd):
escaped_name = cgi.escape(r.DemangledFunctionName)
print(u'''
<tr>
<td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>
<td class=\"column-entry-{odd}\">{r.RelativeHotness}</td>
<td class=\"column-entry-{odd}\">{escaped_name}</td>
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, all_remarks):
print('''
<html>
<meta charset="utf-8" />
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table>
<tr>
<td>Source Location</td>
<td>Hotness</td>
<td>Function</td>
<td>Pass</td>
</tr>''', file=self.stream)
max_entries = None
if self.should_display_hotness:
max_entries = self.max_hottest_remarks_on_index
for i, remark in enumerate(all_remarks[:max_entries]):
if not suppress(remark):
self.render_entry(remark, i % 2)
print('''
</table>
</body>
</html>''', file=self.stream)
def _render_file(source_dir, output_dir, ctx, no_highlight, entry):
global context
context = ctx
filename, remarks = entry
SourceFileRenderer(source_dir, output_dir, filename, no_highlight).render(remarks)
def map_remarks(all_remarks):
# Set up a map between function names and their source location for
# function where inlining happened
for remark in optrecord.itervalues(all_remarks):
if isinstance(remark, optrecord.Passed) and remark.Pass == "inline" and remark.Name == "Inlined":
for arg in remark.Args:
arg_dict = dict(list(arg))
caller = arg_dict.get('Caller')
if caller:
try:
context.caller_loc[caller] = arg_dict['DebugLoc']
except KeyError:
pass
def generate_report(all_remarks,
file_remarks,
source_dir,
output_dir,
no_highlight,
should_display_hotness,
max_hottest_remarks_on_index,
num_jobs,
should_print_progress):
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise
if should_print_progress:
print('Rendering index page...')
if should_display_hotness:
sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.Hotness, r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function), reverse=True)
else:
sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function))
IndexRenderer(output_dir, should_display_hotness, max_hottest_remarks_on_index).render(sorted_remarks)
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"style.css"), output_dir)
_render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight)
if should_print_progress:
print('Rendering HTML files...')
optpmap.pmap(_render_file_bound,
file_remarks.items(),
num_jobs,
should_print_progress)
def main():
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'yaml_dirs_or_files',
nargs='+',
help='List of optimization record files or directories searched '
'for optimization record files.')
parser.add_argument(
'--output-dir',
'-o',
default='html',
help='Path to a directory where generated HTML files will be output. '
'If the directory does not already exist, it will be created. '
'"%(default)s" by default.')
parser.add_argument(
'--jobs',
'-j',
default=None,
type=int,
help='Max job count (defaults to %(default)s, the current CPU count)')
parser.add_argument(
'--source-dir',
'-s',
default='',
help='set source directory')
parser.add_argument(
'--no-progress-indicator',
'-n',
action='store_true',
default=False,
help='Do not display any indicator of how many YAML files were read '
'or rendered into HTML.')
parser.add_argument(
'--max-hottest-remarks-on-index',
default=1000,
type=int,
help='Maximum number of the hottest remarks to appear on the index page')
parser.add_argument(
'--no-highlight',
action='store_true',
default=False,
help='Do not use a syntax highlighter when rendering the source code')
parser.add_argument(
'--demangler',
help='Set the demangler to be used (defaults to %s)' % optrecord.Remark.default_demangler)
# Do not make this a global variable. Values needed to be propagated through
# to individual classes and functions to be portable with multiprocessing across
# Windows and non-Windows.
args = parser.parse_args()
print_progress = not args.no_progress_indicator
if args.demangler:
optrecord.Remark.set_demangler(args.demangler)
files = optrecord.find_opt_files(*args.yaml_dirs_or_files)
if not files:
parser.error("No *.opt.yaml files found")
sys.exit(1)
all_remarks, file_remarks, should_display_hotness = \
optrecord.gather_results(files, args.jobs, print_progress)
map_remarks(all_remarks)
generate_report(all_remarks,
file_remarks,
args.source_dir,
args.output_dir,
args.no_highlight,
should_display_hotness,
args.max_hottest_remarks_on_index,
args.jobs,
print_progress)
if __name__ == '__main__':
main()
| {
"content_hash": "b7f643f595fcb044070377f2c06d0bf9",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 179,
"avg_line_length": 33.97360703812317,
"alnum_prop": 0.6100129477772982,
"repo_name": "endlessm/chromium-browser",
"id": "4887043e0f9650c7a83f0bc6e7edd13615e5c682",
"size": "11611",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "third_party/swiftshader/third_party/llvm-7.0/llvm/tools/opt-viewer/opt-viewer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/afi-safi/af/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines AFI-SAFI State information
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__afi_name",
"__safi_name",
"__metric",
"__enabled",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
10
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"afi-safi",
"af",
"state",
]
def _get_afi_name(self):
"""
Getter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/afi_name (identityref)
YANG Description: Address-family type.
"""
return self.__afi_name
def _set_afi_name(self, v, load=False):
"""
Setter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/afi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_name() directly.
YANG Description: Address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="afi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__afi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_name(self):
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_safi_name(self):
"""
Getter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/safi_name (identityref)
YANG Description: Subsequent address-family type.
"""
return self.__safi_name
def _set_safi_name(self, v, load=False):
"""
Setter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/safi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_safi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_safi_name() directly.
YANG Description: Subsequent address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """safi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="safi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__safi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_safi_name(self):
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/metric (uint32)
YANG Description: ISIS metric value(default=10).
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: ISIS metric value(default=10).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
10
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(10), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
10
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
afi_name = __builtin__.property(_get_afi_name)
safi_name = __builtin__.property(_get_safi_name)
metric = __builtin__.property(_get_metric)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict(
[
("afi_name", afi_name),
("safi_name", safi_name),
("metric", metric),
("enabled", enabled),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/afi-safi/af/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines AFI-SAFI State information
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__afi_name",
"__safi_name",
"__metric",
"__enabled",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
10
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"afi-safi",
"af",
"state",
]
def _get_afi_name(self):
"""
Getter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/afi_name (identityref)
YANG Description: Address-family type.
"""
return self.__afi_name
def _set_afi_name(self, v, load=False):
"""
Setter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/afi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_name() directly.
YANG Description: Address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="afi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__afi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_name(self):
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_safi_name(self):
"""
Getter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/safi_name (identityref)
YANG Description: Subsequent address-family type.
"""
return self.__safi_name
def _set_safi_name(self, v, load=False):
"""
Setter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/safi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_safi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_safi_name() directly.
YANG Description: Subsequent address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """safi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="safi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__safi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_safi_name(self):
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/metric (uint32)
YANG Description: ISIS metric value(default=10).
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: ISIS metric value(default=10).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
10
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(10), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
10
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
afi_name = __builtin__.property(_get_afi_name)
safi_name = __builtin__.property(_get_safi_name)
metric = __builtin__.property(_get_metric)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict(
[
("afi_name", afi_name),
("safi_name", safi_name),
("metric", metric),
("enabled", enabled),
]
)
| {
"content_hash": "421b6755678d35fd6c2304fd7c060152",
"timestamp": "",
"source": "github",
"line_count": 1117,
"max_line_length": 881,
"avg_line_length": 43.127126230975826,
"alnum_prop": 0.52054055176136,
"repo_name": "napalm-automation/napalm-yang",
"id": "01bdc7b49155afe165e04be2208e379d10df75be",
"size": "48197",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/state/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import unittest
class TestModules(unittest.TestCase):
def test_variance_decomposition_import(self):
try:
import limix.modules.varianceDecomposition as vd
except ImportError:
self.fail()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3f4c09e7d13d3db22b89e344c953060c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 25.363636363636363,
"alnum_prop": 0.6344086021505376,
"repo_name": "PMBio/limix",
"id": "7fd1577f9d5180378f14f3a80d3d7dc3fed9bc0e",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "limix/test/test_modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "C",
"bytes": "1550482"
},
{
"name": "C++",
"bytes": "8073525"
},
{
"name": "CMake",
"bytes": "21097"
},
{
"name": "Fortran",
"bytes": "363470"
},
{
"name": "M4",
"bytes": "16520"
},
{
"name": "Makefile",
"bytes": "11605"
},
{
"name": "Matlab",
"bytes": "25435"
},
{
"name": "PowerShell",
"bytes": "3104"
},
{
"name": "Python",
"bytes": "1704175"
},
{
"name": "Roff",
"bytes": "66747"
},
{
"name": "Shell",
"bytes": "15645"
},
{
"name": "TeX",
"bytes": "26251"
}
],
"symlink_target": ""
} |
import docker
from docker import errors
import mock
import six
from magnum.common import docker_utils
from magnum.common import exception
from magnum.conductor.handlers import docker_conductor
from magnum import objects
from magnum.objects import fields
from magnum.tests import base
class TestDockerHandler(base.BaseTestCase):
def setUp(self):
super(TestDockerHandler, self).setUp()
self.conductor = docker_conductor.Handler()
dfc_patcher = mock.patch.object(docker_utils,
'docker_for_container')
docker_for_container = dfc_patcher.start()
self.dfc_context_manager = docker_for_container.return_value
self.mock_docker = mock.MagicMock()
self.dfc_context_manager.__enter__.return_value = self.mock_docker
self.addCleanup(dfc_patcher.stop)
@mock.patch.object(docker_utils, 'is_docker_api_version_atleast')
def _test_container_create(self, container_dict, expected_kwargs,
mock_version, expected_image='test_image',
expected_tag='some_tag',
api_version='1.18'):
mock_version.return_value = (float(api_version) > 1.18)
name = container_dict.pop('name')
mock_container = mock.MagicMock(**container_dict)
type(mock_container).name = mock.PropertyMock(return_value=name)
container = self.conductor.container_create(
None, mock_container)
utf8_image = self.conductor._encode_utf8(mock_container.image)
self.mock_docker.inspect_image.assert_called_once_with(utf8_image)
self.mock_docker.pull.assert_called_once_with(expected_image,
tag=expected_tag)
self.mock_docker.create_container.assert_called_once_with(
mock_container.image, **expected_kwargs)
self.assertEqual(fields.ContainerStatus.STOPPED, container.status)
def test_container_create(self):
container_dict = {
'name': 'some-name',
'uuid': 'some-uuid',
'image': 'test_image:some_tag',
'command': None,
'memory': None,
'environment': None,
}
expected_kwargs = {
'name': 'some-name',
'hostname': 'some-uuid',
'command': None,
'mem_limit': None,
'environment': None,
}
self._test_container_create(container_dict, expected_kwargs)
def test_container_create_api_1_19(self):
container_dict = {
'name': 'some-name',
'uuid': 'some-uuid',
'image': 'test_image:some_tag',
'command': None,
'memory': '100m',
'environment': None,
}
expected_kwargs = {
'name': 'some-name',
'hostname': 'some-uuid',
'command': None,
'host_config': {'Memory': 100 * 1024 * 1024},
'environment': None,
}
self._test_container_create(container_dict, expected_kwargs,
api_version='1.19')
def test_container_create_with_command(self):
container_dict = {
'name': 'some-name',
'uuid': 'some-uuid',
'image': 'test_image:some_tag',
'command': 'env',
'memory': None,
'environment': None,
}
expected_kwargs = {
'name': 'some-name',
'hostname': 'some-uuid',
'command': 'env',
'mem_limit': None,
'environment': None,
}
self._test_container_create(container_dict, expected_kwargs)
def test_container_create_with_memory(self):
container_dict = {
'name': 'some-name',
'uuid': 'some-uuid',
'image': 'test_image:some_tag',
'command': None,
'memory': '512m',
'environment': None,
}
expected_kwargs = {
'name': 'some-name',
'hostname': 'some-uuid',
'command': None,
'mem_limit': '512m',
'environment': None,
}
self._test_container_create(container_dict, expected_kwargs)
def test_container_create_with_environment(self):
container_dict = {
'name': 'some-name',
'uuid': 'some-uuid',
'image': 'test_image:some_tag',
'command': None,
'memory': '512m',
'environment': {'key1': 'val1', 'key2': 'val2'},
}
expected_kwargs = {
'name': 'some-name',
'hostname': 'some-uuid',
'command': None,
'mem_limit': '512m',
'environment': {'key1': 'val1', 'key2': 'val2'},
}
self._test_container_create(container_dict, expected_kwargs)
def test_encode_utf8_unicode(self):
image = 'some_image:some_tag'
unicode_image = six.u(image)
utf8_image = self.conductor._encode_utf8(unicode_image)
self.assertEqual(unicode_image.encode('utf-8'), utf8_image)
@mock.patch.object(errors.APIError, '__str__')
def test_container_create_with_failure(self, mock_init):
mock_container = mock.MagicMock()
mock_container.image = 'test_image:some_tag'
mock_init.return_value = 'hit error'
self.mock_docker.pull = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_create,
None, mock_container)
self.mock_docker.pull.assert_called_once_with(
'test_image',
tag='some_tag')
self.assertFalse(self.mock_docker.create_container.called)
mock_init.assert_called_with()
self.assertEqual(fields.ContainerStatus.ERROR,
mock_container.status)
def test_find_container_by_name_not_found(self):
mock_docker = mock.MagicMock()
fake_response = mock.MagicMock()
fake_response.content = 'not_found'
fake_response.status_code = 404
mock_docker.list_instances.side_effect = errors.APIError(
'not_found', fake_response)
ret = self.conductor._find_container_by_name(mock_docker, '1')
self.assertEqual({}, ret)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_delete(self, mock_find_container):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_delete(None, mock_container_uuid)
self.mock_docker.remove_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_delete_with_container_not_exist(self,
mock_find_container):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = {}
mock_find_container.return_value = mock_docker_id
res = self.conductor.container_delete(None, mock_container_uuid)
self.assertIsNone(res)
self.assertFalse(self.mock_docker.remove_container.called)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
@mock.patch.object(errors.APIError, '__str__')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_delete_with_failure(self, mock_find_container,
mock_init):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_init.return_value = 'hit error'
self.mock_docker.remove_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_delete,
None, mock_container_uuid)
self.mock_docker.remove_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
mock_init.assert_called_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_action(self, mock_find_container, mock_get_by_uuid):
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor._container_action(None, mock_container_uuid,
'fake-status', 'fake-func')
self.assertEqual('fake-status', mock_container.status)
def _test_container(self, action, docker_func_name, expected_status,
mock_find_container, mock_get_by_uuid):
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
action_func = getattr(self.conductor, action)
action_func(None, mock_container_uuid)
docker_func = getattr(self.mock_docker, docker_func_name)
docker_func.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
self.assertEqual(expected_status, mock_container.status)
@mock.patch.object(errors.APIError, '__str__')
def _test_container_with_failure(
self, action, docker_func_name, mock_find_container, mock_init):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_init.return_value = 'hit error'
setattr(self.mock_docker, docker_func_name, mock.Mock(
side_effect=errors.APIError('Error', '', '')))
self.assertRaises(exception.ContainerException,
getattr(self.conductor, action),
None, mock_container_uuid)
docker_func = getattr(self.mock_docker, docker_func_name)
docker_func.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
mock_init.assert_called_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_reboot(self, mock_find_container, mock_get_by_uuid):
self._test_container(
'container_reboot', 'restart', fields.ContainerStatus.RUNNING,
mock_find_container, mock_get_by_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_reboot_with_failure(self, mock_find_container):
self._test_container_with_failure(
'container_reboot', 'restart', mock_find_container)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_start(self, mock_find_container, mock_get_by_uuid):
self._test_container(
'container_start', 'start', fields.ContainerStatus.RUNNING,
mock_find_container, mock_get_by_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_start_with_failure(self, mock_find_container):
self._test_container_with_failure(
'container_start', 'start', mock_find_container)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_stop(self, mock_find_container, mock_get_by_uuid):
self._test_container(
'container_stop', 'stop', fields.ContainerStatus.STOPPED,
mock_find_container, mock_get_by_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_stop_with_failure(self, mock_find_container):
self._test_container_with_failure(
'container_stop', 'stop', mock_find_container)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_pause(self, mock_find_container, mock_get_by_uuid):
self._test_container(
'container_pause', 'pause', fields.ContainerStatus.PAUSED,
mock_find_container, mock_get_by_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_pause_with_failure(self, mock_find_container):
self._test_container_with_failure(
'container_pause', 'pause', mock_find_container)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_unpause(self, mock_find_container, mock_get_by_uuid):
self._test_container(
'container_unpause', 'unpause', fields.ContainerStatus.RUNNING,
mock_find_container, mock_get_by_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_unpause_with_failure(self, mock_find_container):
self._test_container_with_failure(
'container_unpause', 'unpause', mock_find_container)
def _test_container_show(
self, mock_find_container, mock_get_by_uuid, container_detail=None,
expected_status=None, mock_docker_id='2703ef2b705d'):
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_find_container.return_value = mock_docker_id
if container_detail is not None:
self.mock_docker.inspect_container.return_value = container_detail
self.conductor.container_show(None, mock_container_uuid)
if mock_docker_id:
self.mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
if expected_status is not None:
self.assertEqual(expected_status, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show(self, mock_find_container, mock_get_by_uuid):
self._test_container_show(mock_find_container, mock_get_by_uuid)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_running_state(self, mock_find_container,
mock_get_by_uuid):
mock_container_detail = {'State': {'Error': '',
'Running': True,
'Paused': False}}
self._test_container_show(
mock_find_container, mock_get_by_uuid, mock_container_detail,
fields.ContainerStatus.RUNNING)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_stop_state(self, mock_find_container,
mock_get_by_uuid):
mock_container_detail = {'State': {'Error': '',
'Running': False,
'Paused': False}}
self._test_container_show(
mock_find_container, mock_get_by_uuid, mock_container_detail,
fields.ContainerStatus.STOPPED)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_pause_state(self, mock_find_container,
mock_get_by_uuid):
mock_container_detail = {'State': {'Error': '',
'Running': True,
'Paused': True}}
self._test_container_show(
mock_find_container, mock_get_by_uuid, mock_container_detail,
fields.ContainerStatus.PAUSED)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_error_status(self, mock_find_container,
mock_get_by_uuid):
mock_container_detail = {'State': {'Error': True,
'Running': False,
'Paused': False}}
self._test_container_show(
mock_find_container, mock_get_by_uuid, mock_container_detail,
fields.ContainerStatus.ERROR)
def _test_container_show_with_failure(
self, mock_find_container, mock_get_by_uuid, error,
assert_raise=True, expected_status=None):
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with mock.patch.object(errors.APIError, '__str__',
return_value=error) as mock_init:
self.mock_docker.inspect_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
if assert_raise:
self.assertRaises(exception.ContainerException,
self.conductor.container_show,
None, mock_container_uuid)
else:
self.conductor.container_show(None, mock_container_uuid)
self.mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
mock_init.assert_called_with()
if expected_status is not None:
self.assertEqual(expected_status, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_failure(self, mock_find_container,
mock_get_by_uuid):
self._test_container_show_with_failure(
mock_find_container, mock_get_by_uuid, error='hit error')
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_not_found(self, mock_find_container,
mock_get_by_uuid):
self._test_container_show_with_failure(
mock_find_container, mock_get_by_uuid, error='404 error',
assert_raise=False, expected_status=fields.ContainerStatus.ERROR)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_show_with_not_found_from_docker(self,
mock_find_container,
mock_get_by_uuid):
self._test_container_show(
mock_find_container, mock_get_by_uuid, mock_docker_id={},
expected_status=fields.ContainerStatus.ERROR)
def _test_container_exec(self, mock_find_container, docker_version='1.2.2',
deprecated=False):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = docker_version
mock_find_container.return_value = mock_docker_id
mock_create_res = mock.MagicMock()
self.mock_docker.exec_create.return_value = mock_create_res
self.conductor.container_exec(None, mock_container_uuid, 'ls')
if deprecated:
self.mock_docker.execute.assert_called_once_with(
mock_docker_id, 'ls')
else:
self.mock_docker.exec_create.assert_called_once_with(
mock_docker_id, 'ls', True, True, False)
self. mock_docker.exec_start.assert_called_once_with(
mock_create_res, False, False, False)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_exec(self, mock_find_container):
self._test_container_exec(mock_find_container)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_exec_deprecated(self, mock_find_container):
self._test_container_exec(
mock_find_container, docker_version='0.7.0', deprecated=True)
def _test_container_exec_with_failure(
self, mock_find_container, docker_version='1.2.2',
deprecated=False):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = docker_version
mock_find_container.return_value = mock_docker_id
with mock.patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
if deprecated:
self.mock_docker.execute = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
else:
self.mock_docker.exec_create = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_exec,
None, mock_container_uuid, 'ls')
if deprecated:
self.mock_docker.execute.assert_called_once_with(
mock_docker_id, 'ls')
else:
self.mock_docker.exec_create.assert_called_once_with(
mock_docker_id, 'ls', True, True, False)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
mock_init.assert_called_with()
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_exec_with_failure(self, mock_find_container):
self._test_container_exec_with_failure(mock_find_container)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_exec_deprecated_with_failure(self, mock_find_container):
self._test_container_exec_with_failure(
mock_find_container, docker_version='0.7.0', deprecated=True)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_logs(self, mock_find_container):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_logs(None, mock_container_uuid)
self.mock_docker.logs.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
@mock.patch.object(errors.APIError, '__str__')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
def test_container_logs_with_failure(self, mock_find_container, mock_init):
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_init.return_value = 'hit error'
self.mock_docker.logs = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_logs,
None, mock_container_uuid)
self.mock_docker.logs.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(self.mock_docker,
mock_container_uuid)
mock_init.assert_called_with()
def test_container_common_exception(self):
self.dfc_context_manager.__enter__.side_effect = Exception("So bad")
for action in ('container_exec', 'container_logs', 'container_show',
'container_delete', 'container_create',
'_container_action'):
func = getattr(self.conductor, action)
self.assertRaises(exception.ContainerException,
func, None, None)
| {
"content_hash": "549f20b767b1b480a4d6b1619ca1958e",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 79,
"avg_line_length": 47.93211009174312,
"alnum_prop": 0.594265589710217,
"repo_name": "jay-lau/magnum",
"id": "1d4e40dbd3bfe74699f61c0ccf6a15c091499091",
"size": "26719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum/tests/unit/conductor/handlers/test_docker_conductor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Python",
"bytes": "393112"
}
],
"symlink_target": ""
} |
class HttpResponseError(Exception):
"""
Represents an HTTP response error.
"""
def __init__(self, code, reason):
#: Number representing the error (example: 404)
self.status_code = code
#: Reason of the fail (example: "Not found")
self.reason = reason.title()
#: A message for the error (example: "404 - Not found")
self.msg = "%s - %s" % (self.status_code, self.reason)
def __str__(self):
return self.msg
class StatusCode(object):
http_errors = (400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411,
412, 413, 414, 415, 416, 417, 500, 501, 502, 503, 504, 505)
def __init__(self, status_code, reason):
#: A message for the response (example: Success)
self.reason = reason
#: Code of the response (example: 200)
self.code = status_code
def __cmp__(self, other):
if self.code == other:
return 0
return 1
def is_valid_response(self):
"""
Returns ``True`` if the response is valid (:attr:`code` < 400).
Otherwise, raises an :class:`HttpResponseError <splinter.request_handler.status_code.HttpResponseError>`
exception.
"""
if self.code in self.http_errors:
raise HttpResponseError(self.code, self.reason)
return True
def is_success(self):
"""
Returns ``True`` if the response was succed, otherwise, returns ``False``.
"""
if self.code not in self.http_errors:
return True
return False
| {
"content_hash": "7ae8aa79c9ab170dfd70ff8cfd3d6270",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 112,
"avg_line_length": 30.056603773584907,
"alnum_prop": 0.5693659761456371,
"repo_name": "softak/webfaction_demo",
"id": "29603c2461a16a8900337e5794b5e2556b03fcec",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/splinter/request_handler/status_code.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
from core.database.generator import *
from core.Outputs.C import *
from core.Outputs.Cplusplus import *
from core.Outputs.Dll import *
from core.Outputs.exe import *
from core.Outputs.python import *
from core.Outputs.txt import *
shellcode = generator( "windows", "messagebox", "asdasdyrkrykrk")
ExeFile(shellcode, "windows")
#RawFile(shellcode)
| {
"content_hash": "b310fc0143a7fd09be5ff1cfce6a1144",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 18.736842105263158,
"alnum_prop": 0.7584269662921348,
"repo_name": "roissy/l0l",
"id": "93abf673ba30cf1501bb60252bb6740b265ee926",
"size": "364",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/Outputs/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9337"
},
{
"name": "C++",
"bytes": "74201"
},
{
"name": "Makefile",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "95905"
},
{
"name": "Shell",
"bytes": "54"
}
],
"symlink_target": ""
} |
import unittest
import discretize
import discretize
from SimPEG import maps
from SimPEG import simulation
import numpy as np
class TestTimeSimulation(unittest.TestCase):
def setUp(self):
mesh = discretize.TensorMesh([10, 10])
self.sim = simulation.BaseTimeSimulation(mesh)
def test_timeProblem_setTimeSteps(self):
self.sim.time_steps = [(1e-6, 3), 1e-5, (1e-4, 2)]
trueTS = np.r_[1e-6, 1e-6, 1e-6, 1e-5, 1e-4, 1e-4]
self.assertTrue(np.all(trueTS == self.sim.time_steps))
self.sim.time_steps = trueTS
self.assertTrue(np.all(trueTS == self.sim.time_steps))
self.assertTrue(self.sim.nT == 6)
self.assertTrue(np.all(self.sim.times == np.r_[0, trueTS].cumsum()))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e2713125ec6ed00fb0b13be82dad381e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 28.321428571428573,
"alnum_prop": 0.6431273644388399,
"repo_name": "simpeg/simpeg",
"id": "65fa1aa39f7a76a82edb85a2a451fc70631af1db",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/base/test_problem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command()
@click_util.use_args
def shell(args):
"""Start a Python shell for API use.
**NOTE:** This is a developer feature.
"""
from . import shell_impl
shell_impl.main(args)
| {
"content_hash": "a3e69670d871fe8430ac28cdc49605cb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 42,
"avg_line_length": 16.94736842105263,
"alnum_prop": 0.6770186335403726,
"repo_name": "guildai/guild",
"id": "cd9d7404576d2217a984bc9ed9aa8ceb502187d0",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guild/commands/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "416"
},
{
"name": "JavaScript",
"bytes": "29682"
},
{
"name": "Makefile",
"bytes": "2621"
},
{
"name": "Python",
"bytes": "736181"
},
{
"name": "Shell",
"bytes": "1074"
},
{
"name": "Vue",
"bytes": "48469"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.contrib.auth import get_user_model
from django.core import mail
from django.utils.translation import ugettext as _
from django.test.utils import override_settings
from django.core.urlresolvers import NoReverseMatch
from ....core.tests import utils
from ..forms import RegistrationForm, ResendActivationForm, LoginForm
from ..backends import EmailAuthBackend
from ...utils.tokens import UserActivationTokenGenerator
from ...models import UserProfile
from .urls import CustomRegisterForm
User = get_user_model()
class UserViewTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.user2 = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(self.category, user=self.user2)
self.topic2 = utils.create_topic(self.category)
def test_login_email(self):
"""
try to login by email
"""
# get
response = self.client.get(reverse('spirit:user:auth:login'))
self.assertEqual(response.status_code, 200)
# post
form_data = {'username': self.user.email, 'password': "bar"}
response = self.client.post(reverse('spirit:user:auth:login'),
form_data)
expected_url = reverse('spirit:user:update')
self.assertRedirects(response, expected_url, status_code=302)
def test_login_redirect(self):
"""
try to login with a logged in user
"""
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:login'))
expected_url = self.user.st.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302)
# next
response = self.client.get(reverse('spirit:user:auth:login') + '?next=/fakepath/')
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
def test_register(self):
"""
register
"""
# get
response = self.client.get(reverse('spirit:user:auth:register'))
self.assertEqual(response.status_code, 200)
# post
form_data = {'username': 'uniquefoo', 'email': 'some@some.com',
'email2': 'some@some.com', 'password': 'pass'}
response = self.client.post(reverse('spirit:user:auth:register'),
form_data)
expected_url = reverse('spirit:user:auth:login')
self.assertRedirects(response, expected_url, status_code=302)
# redirect logged in user
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:register'))
self.assertRedirects(response, reverse('spirit:user:update'), status_code=302)
def test_register_email_sent(self):
"""
register and send activation email
"""
form_data = {'username': 'uniquefoo', 'email': 'some@some.com',
'email2': 'some@some.com', 'password': 'pass'}
response = self.client.post(reverse('spirit:user:auth:register'), form_data)
self.assertEqual(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, _("User activation"))
def test_register_next_logged_in(self):
"""
redirect next on register
"""
# redirect logged in user
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:register') + "?next=/fakepath/")
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
@override_settings(ROOT_URLCONF='spirit.user.auth.tests.urls')
def test_register_custom_form(self):
"""
Should allow a custom form
"""
response = self.client.get(reverse('spirit:user:auth:register'))
self.assertIsInstance(response.context['form'], CustomRegisterForm)
response = self.client.post(reverse('spirit:user:auth:register'), {})
self.assertIsInstance(response.context['form'], CustomRegisterForm)
def test_login_rate_limit(self):
"""
test rate limit 5/5m
"""
form_data = {'username': self.user.email, 'password': "badpassword"}
for attempt in range(5):
url = reverse('spirit:user:auth:login')
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'spirit/user/auth/login.html')
url = reverse('spirit:user:auth:login') + "?next=/path/"
response = self.client.post(url, form_data)
self.assertRedirects(response, url, status_code=302)
def test_custom_reset_password(self):
"""
test rate limit 5/5m
"""
form_data = {'email': "bademail@bad.com", }
for attempt in range(5):
response = self.client.post(reverse('spirit:user:auth:password-reset'), form_data)
expected_url = reverse("spirit:user:auth:password-reset-done")
self.assertRedirects(response, expected_url, status_code=302)
response = self.client.post(reverse('spirit:user:auth:password-reset'), form_data)
expected_url = reverse("spirit:user:auth:password-reset")
self.assertRedirects(response, expected_url, status_code=302)
def test_password_reset_confirm(self):
"""
test access
"""
response = self.client.get(
reverse(
'spirit:user:auth:password-reset-confirm',
kwargs={'uidb64': 'f-a-k-e', 'token': 'f-a-k-e'}
)
)
self.assertEqual(response.status_code, 200)
def test_admin_login(self):
"""
Redirect to regular user login (optional)
make sure you added:
admin.site.login = login_required(admin.site.login)
to urls.py (the one in your project's root)
"""
# TODO: document that devs should be doing this.
try:
url = reverse('admin:login')
except NoReverseMatch:
return
response = self.client.get(url)
expected_url = reverse("spirit:user:auth:login") + "?next=" + reverse('admin:login')
self.assertRedirects(response, expected_url, status_code=302)
def test_registration_activation(self):
"""
registration activation
"""
self.user.st.is_verified = False
self.user.is_active = False
self.user.save()
token = UserActivationTokenGenerator().generate(self.user)
response = self.client.get(
reverse(
'spirit:user:auth:registration-activation',
kwargs={'pk': self.user.pk, 'token': token}
)
)
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
self.assertTrue(User.objects.get(pk=self.user.pk).is_active)
def test_registration_activation_invalid(self):
"""
Activation token should not work if user is verified
ActiveUserMiddleware required
"""
self.user.st.is_verified = False
token = UserActivationTokenGenerator().generate(self.user)
utils.login(self)
User.objects.filter(pk=self.user.pk).update(is_active=False)
UserProfile.objects.filter(user__pk=self.user.pk).update(is_verified=True)
response = self.client.get(
reverse(
'spirit:user:auth:registration-activation',
kwargs={'pk': self.user.pk, 'token': token}
)
)
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
def test_resend_activation_email(self):
"""
resend_activation_email
"""
user = utils.create_user(password="foo")
form_data = {'email': user.email,
'password': "foo"}
response = self.client.post(reverse('spirit:user:auth:resend-activation'),
form_data)
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, _("User activation"))
# get
response = self.client.get(reverse('spirit:user:auth:resend-activation'))
self.assertEquals(response.status_code, 200)
def test_resend_activation_email_invalid_previously_logged_in(self):
"""
resend_activation_email invalid if is_verified was set
"""
user = utils.create_user(password="foo")
user.st.is_verified = True
user.st.save()
form_data = {'email': user.email,
'password': "foo"}
response = self.client.post(reverse('spirit:user:auth:resend-activation'),
form_data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 0)
def test_resend_activation_email_invalid_email(self):
"""
resend_activation_email invalid password
"""
utils.create_user(password="foo")
form_data = {'email': "bad@foo.com", }
response = self.client.post(reverse('spirit:user:auth:resend-activation'),
form_data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 0)
def test_resend_activation_email_redirect_logged(self):
"""
resend_activation_email redirect to profile if user is logged in
"""
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:resend-activation'))
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
def test_logout(self):
"""
should log out on POST only
"""
utils.login(self)
# get should display confirmation message
response = self.client.get(reverse('spirit:user:auth:logout'))
self.assertEqual(response.status_code, 200)
self.assertTrue(self.client.session.items())
# post should log out the user (clear the session)
response = self.client.post(reverse('spirit:user:auth:logout'))
expected_url = "/"
self.assertRedirects(response, expected_url, status_code=302)
self.assertFalse(self.client.session.items())
# next
utils.login(self)
self.assertTrue(self.client.session.items())
response = self.client.post(reverse('spirit:user:auth:logout') + '?next=/fakepath/')
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
self.assertFalse(self.client.session.items())
def test_logout_anonymous_redirect(self):
"""
should log out on POST only
"""
# redirect to login if user is anonymous
response = self.client.get(reverse('spirit:user:auth:logout'))
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
# next if user is anonymous
response = self.client.get(reverse('spirit:user:auth:logout') + '?next=/fakepath/')
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
class UserFormTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
def test_registration(self):
"""
register
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'email2': 'foo@foo.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
def test_registration_login(self):
"""
Register and login
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'email2': 'foo@foo.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
user = form.save()
self.assertFalse(user.is_active)
user.is_active = True
user.save()
utils.login(self, user=user, password='pass') # Asserts if can't login
def test_registration_email_required(self):
"""
Registration should require the email field
"""
form_data = {'username': 'foo',
'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertIn('email', form.errors)
def test_registration_invalid(self):
"""
invalid email and user
"""
User.objects.create_user(username="foo", password="bar", email="foo@foo.com")
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'email2': 'foo@foo.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('username', form.cleaned_data)
self.assertNotIn('foo@foo.com', form.cleaned_data)
def test_registration_honeypot(self):
"""
registration honeypot
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'email2': 'foo@foo.com', 'password': 'pass',
'honeypot': 'im a robot'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('honeypot', form.cleaned_data)
def test_registration_email_duplication(self):
"""
register, don't allow email duplication
"""
utils.create_user(email='duplicated@bar.com')
form_data = {'username': 'foo', 'email': 'duplicated@bar.com',
'email2': 'duplicated@bar.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email', form.cleaned_data)
@override_settings(ST_UNIQUE_EMAILS=False)
def test_registration_email_duplication_allowed(self):
"""
Duplicated email allowed
"""
utils.create_user(email='duplicated@bar.com')
form_data = {'username': 'foo', 'email': 'duplicated@bar.com',
'email2': 'duplicated@bar.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
def test_registration_email_confirmation(self):
"""
Confirmation email should match email
"""
form_data = {'username': 'foo', 'email': 'foo@bar.com',
'email2': 'foofoo@bar.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email2', form.cleaned_data)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_registration_email_confirmation_case_insensitive(self):
"""
Confirmation email should match email
"""
form_data = {'username': 'foo', 'email': 'FOO@bar.com',
'email2': 'FOO@BAR.COM', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_registration_email_confirmation_case_sensitive(self):
"""
Confirmation email should match email
"""
form_data = {'username': 'foo', 'email': 'FOO@bar.com',
'email2': 'FOO@BAR.COM', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email2', form.cleaned_data)
def test_resend_activation_email(self):
"""
resend activation
"""
user = utils.create_user(email="newfoo@bar.com")
form_data = {'email': 'newfoo@bar.com', }
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user)
def test_resend_activation_email_invalid_email(self):
"""
resend activation invalid
"""
form_data = {'email': 'bad@bar.com', }
form = ResendActivationForm(form_data)
self.assertFalse(form.is_valid())
def test_resend_activation_email_duplication(self):
"""
Send email to the first *not verified* user found
"""
utils.create_user(email="duplicated@bar.com")
user2 = utils.create_user(email="duplicated@bar.com")
user3 = utils.create_user(email="duplicated@bar.com")
form_data = {'email': 'duplicated@bar.com', }
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user3)
user3.st.is_verified = True
user3.st.save()
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user2)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_resend_activation_email_case_insensitive(self):
"""
Should lower the email before checking it
"""
user = utils.create_user(email="newfoo@bar.com")
form_data = {'email': 'NeWfOO@bAr.COM', }
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_resend_activation_email_case_sensitive(self):
"""
Should NOT lower the email before checking it
"""
utils.create_user(email="newfoo@bar.com")
form_data = {'email': 'NeWfOO@bAr.COM', }
form = ResendActivationForm(form_data)
self.assertFalse(form.is_valid())
self.assertRaises(AttributeError, form.get_user)
def test_login(self):
"""
Should login the user
"""
utils.create_user(username="foobar", password="foo")
form_data = {'username': "foobar", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertTrue(form.is_valid())
def test_login_email(self):
"""
Should login the user by email
"""
utils.create_user(email="foobar@bar.com", password="foo")
form_data = {'username': "foobar@bar.com", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertTrue(form.is_valid())
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_login_email_case_sensitive(self):
"""
Should login the user by email
"""
utils.create_user(email="foobar@bar.com", password="foo")
form_data = {'username': "FOOBAR@bar.com", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertFalse(form.is_valid())
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_login_email_case_sensitive(self):
"""
Should login the user by email
"""
utils.create_user(email="foobar@bar.com", password="foo")
form_data = {'username': "FOOBAR@bar.com", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertTrue(form.is_valid())
def test_login_invalid(self):
"""
Should not login invalid user
"""
form = LoginForm(data={})
self.assertFalse(form.is_valid())
def test_login_password_invalid(self):
"""
Should not login invalid user
"""
utils.create_user(username="foobar", password="foo")
form_data = {'username': "foobar", 'password': "bad"}
form = LoginForm(data=form_data)
self.assertFalse(form.is_valid())
def test_login_username_invalid(self):
"""
Should not login invalid user
"""
utils.create_user(username="foobar", password="foo")
form_data = {'username': "bad", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertFalse(form.is_valid())
class UserBackendTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user(email="foobar@bar.com", password="bar")
def test_email_auth_backend(self):
user = EmailAuthBackend().authenticate(username="foobar@bar.com", password="bar")
self.assertEqual(user, self.user)
def test_email_auth_backend_email_duplication(self):
"""
it should NOT authenticate when the email is not unique (current behaviour, sorry)
"""
utils.create_user(email="duplicated@bar.com", password="foo")
utils.create_user(email="duplicated@bar.com", password="foo2")
user = EmailAuthBackend().authenticate(username="duplicated@bar.com", password="foo")
self.assertIsNone(user)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_email_auth_backend_case_insensitive(self):
user = EmailAuthBackend().authenticate(username="FooBar@bAr.COM", password="bar")
self.assertEqual(user, self.user)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_email_auth_backend_case_sensitive(self):
user = EmailAuthBackend().authenticate(username="FooBar@bAr.COM", password="bar")
self.assertIsNone(user)
| {
"content_hash": "a25e03224a9f513321441c07cc9a67f7",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 94,
"avg_line_length": 38.25,
"alnum_prop": 0.6084875264659855,
"repo_name": "david30907d/feedback_django",
"id": "61258ef5365f9b1ccb29792b32854ca6ed34aa27",
"size": "21751",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/spirit/user/auth/tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266788"
},
{
"name": "CoffeeScript",
"bytes": "222884"
},
{
"name": "HTML",
"bytes": "384426"
},
{
"name": "JavaScript",
"bytes": "61542"
},
{
"name": "Python",
"bytes": "1277926"
}
],
"symlink_target": ""
} |
"""Synthetic BERT data loader."""
from typing import Mapping, Any
import numpy as np
from load_test.data import data_loader
class SyntheticBertLoader(data_loader.DataLoader):
"""A simple dataloader that creates synthetic BERT samples."""
def __init__(
self,
seq_length: int = 384,
use_v2_feature_names: bool = True,
**kwargs: Mapping[str, Any]):
self.seq_length = seq_length
if use_v2_feature_names:
self.input_word_ids_field = 'input_word_ids'
self.input_type_ids_field = 'input_type_ids'
else:
self.input_word_ids_field = 'input_ids'
self.input_type_ids_field = 'segment_ids'
self.input_mask_ids_field = 'input_mask'
def get_sample(self, index: int) -> Mapping[str, np.array]:
"""Generates a synthetic BERT query."""
del index
ones_seq = np.ones(self.seq_length, dtype=np.int32)
return {
self.input_word_ids_field: np.copy(ones_seq),
self.input_type_ids_field: np.copy(ones_seq),
self.input_mask_ids_field: np.copy(ones_seq),
}
| {
"content_hash": "9a6593d495b9de1cfc2d32989760b336",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 31.78787878787879,
"alnum_prop": 0.6501429933269781,
"repo_name": "tensorflow/tpu",
"id": "1a97c656766157fe59dbfac6ab2c67fe79bb3e63",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/experimental/inference/load_test/data/synthetic_bert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "754301"
},
{
"name": "Dockerfile",
"bytes": "2734"
},
{
"name": "Go",
"bytes": "226317"
},
{
"name": "Jupyter Notebook",
"bytes": "56231509"
},
{
"name": "Makefile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "3444271"
},
{
"name": "Shell",
"bytes": "21032"
},
{
"name": "Starlark",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""MaskedAutoregressiveFlow bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops import variable_scope as variable_scope_lib
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"MaskedAutoregressiveFlow",
"masked_autoregressive_default_template",
"masked_dense",
]
class MaskedAutoregressiveFlow(bijector.Bijector):
"""Affine MaskedAutoregressiveFlow bijector for vector-valued events.
The affine autoregressive flow [(Papamakarios et al., 2016)][3] provides a
relatively simple framework for user-specified (deep) architectures to learn
a distribution over vector-valued events. Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian."
[(Papamakarios et al., 2016)][3]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
In the `tfp` framework, a "normalizing flow" is implemented as a
`tfp.bijectors.Bijector`. The `forward` "autoregression"
is implemented using a `tf.while_loop` and a deep neural network (DNN) with
masked weights such that the autoregressive property is automatically met in
the `inverse`.
A `TransformedDistribution` using `MaskedAutoregressiveFlow(...)` uses the
(expensive) forward-mode calculation to draw samples and the (cheap)
reverse-mode calculation to compute log-probabilities. Conversely, a
`TransformedDistribution` using `Invert(MaskedAutoregressiveFlow(...))` uses
the (expensive) forward-mode calculation to compute log-probabilities and the
(cheap) reverse-mode calculation to compute samples. See "Example Use"
[below] for more details.
Given a `shift_and_log_scale_fn`, the forward and inverse transformations are
(a sequence of) affine transformations. A "valid" `shift_and_log_scale_fn`
must compute each `shift` (aka `loc` or "mu" in [Germain et al. (2015)][1])
and `log(scale)` (aka "alpha" in [Germain et al. (2015)][1]) such that each
are broadcastable with the arguments to `forward` and `inverse`, i.e., such
that the calculations in `forward`, `inverse` [below] are possible.
For convenience, `masked_autoregressive_default_template` is offered as a
possible `shift_and_log_scale_fn` function. It implements the MADE
architecture [(Germain et al., 2015)][1]. MADE is a feed-forward network that
computes a `shift` and `log(scale)` using `masked_dense` layers in a deep
neural network. Weights are masked to ensure the autoregressive property. It
is possible that this architecture is suboptimal for your task. To build
alternative networks, either change the arguments to
`masked_autoregressive_default_template`, use the `masked_dense` function to
roll-out your own, or use some other architecture, e.g., using `tf.layers`.
Warning: no attempt is made to validate that the `shift_and_log_scale_fn`
enforces the "autoregressive property".
Assuming `shift_and_log_scale_fn` has valid shape and autoregressive
semantics, the forward transformation is
```python
def forward(x):
y = zeros_like(x)
event_size = x.shape[-1]
for _ in range(event_size):
shift, log_scale = shift_and_log_scale_fn(y)
y = x * math_ops.exp(log_scale) + shift
return y
```
and the inverse transformation is
```python
def inverse(y):
shift, log_scale = shift_and_log_scale_fn(y)
return (y - shift) / math_ops.exp(log_scale)
```
Notice that the `inverse` does not need a for-loop. This is because in the
forward pass each calculation of `shift` and `log_scale` is based on the `y`
calculated so far (not `x`). In the `inverse`, the `y` is fully known, thus is
equivalent to the scaling used in `forward` after `event_size` passes, i.e.,
the "last" `y` used to compute `shift`, `log_scale`. (Roughly speaking, this
also proves the transform is bijective.)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
dims = 5
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
maf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])),
event_shape=[dims])
x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
maf.log_prob(x) # Almost free; uses Bijector caching.
maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
# [Papamakarios et al. (2016)][3] also describe an Inverse Autoregressive
# Flow [(Kingma et al., 2016)][2]:
iaf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.Invert(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512]))),
event_shape=[dims])
x = iaf.sample() # Cheap; no `tf.while_loop` despite no Bijector caching.
iaf.log_prob(x) # Almost free; uses Bijector caching.
iaf.log_prob(0.) # Expensive; uses `tf.while_loop`, no Bijector caching.
# In many (if not most) cases the default `shift_and_log_scale_fn` will be a
# poor choice. Here's an example of using a "shift only" version and with a
# different number/depth of hidden layers.
shift_only = True
maf_no_scale_hidden2 = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
tfb.masked_autoregressive_default_template(
hidden_layers=[32],
shift_only=shift_only),
is_constant_jacobian=shift_only),
event_shape=[dims])
```
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
[2]: Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya
Sutskever, and Max Welling. Improving Variational Inference with Inverse
Autoregressive Flow. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.04934
[3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
unroll_loop=False,
name=None):
"""Creates the MaskedAutoregressiveFlow bijector.
Args:
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`. Typically
the function contains `tf.Variables` and is wrapped using
`tf.compat.v1.make_template`. Returning `None` for either (both)
`shift`, `log_scale` is equivalent to (but more efficient than)
returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
unroll_loop: Python `bool` indicating whether the `tf.while_loop` in
`_forward` should be replaced with a static for loop. Requires that the
final dimension of `x` be known at graph construction time. Defaults to
`False`.
name: Python `str`, name given to ops managed by this object.
"""
name = name or "masked_autoregressive_flow"
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._unroll_loop = unroll_loop
super(MaskedAutoregressiveFlow, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self._unroll_loop:
event_size = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if event_size is None:
raise ValueError(
"The final dimension of `x` must be known at graph construction "
"time if `unroll_loop=True`. `x.shape: %r`" % x.shape)
y = array_ops.zeros_like(x, name="y0")
for _ in range(event_size):
shift, log_scale = self._shift_and_log_scale_fn(y)
# next_y = scale * x + shift
next_y = x
if log_scale is not None:
next_y *= math_ops.exp(log_scale)
if shift is not None:
next_y += shift
y = next_y
return y
event_size = array_ops.shape(x)[-1]
# If the event size is available at graph construction time, we can inform
# the graph compiler of the maximum number of steps. If not,
# static_event_size will be None, and the maximum_iterations argument will
# have no effect.
static_event_size = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
y0 = array_ops.zeros_like(x, name="y0")
# call the template once to ensure creation
_ = self._shift_and_log_scale_fn(y0)
def _loop_body(index, y0):
"""While-loop body for autoregression calculation."""
# Set caching device to avoid re-getting the tf.Variable for every while
# loop iteration.
with variable_scope_lib.variable_scope(
variable_scope_lib.get_variable_scope()) as vs:
if vs.caching_device is None:
vs.set_caching_device(lambda op: op.device)
shift, log_scale = self._shift_and_log_scale_fn(y0)
y = x
if log_scale is not None:
y *= math_ops.exp(log_scale)
if shift is not None:
y += shift
return index + 1, y
_, y = control_flow_ops.while_loop(
cond=lambda index, _: index < event_size,
body=_loop_body,
loop_vars=(0, y0),
maximum_iterations=static_event_size)
return y
def _inverse(self, y):
shift, log_scale = self._shift_and_log_scale_fn(y)
x = y
if shift is not None:
x -= shift
if log_scale is not None:
x *= math_ops.exp(-log_scale)
return x
def _inverse_log_det_jacobian(self, y):
_, log_scale = self._shift_and_log_scale_fn(y)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
MASK_INCLUSIVE = "inclusive"
MASK_EXCLUSIVE = "exclusive"
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE):
"""Generate the slices for building an autoregressive mask."""
# TODO(b/67594795): Better support of dynamic shape.
slices = []
col = 0
d_in = n_in // num_blocks
d_out = n_out // num_blocks
row = d_out if mask_type == MASK_EXCLUSIVE else 0
for _ in range(num_blocks):
row_slice = slice(row, None)
col_slice = slice(col, col + d_in)
slices.append([row_slice, col_slice])
col += d_in
row += d_out
return slices
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _gen_mask(num_blocks,
n_in,
n_out,
mask_type=MASK_EXCLUSIVE,
dtype=dtypes.float32):
"""Generate the mask for building an autoregressive dense layer."""
# TODO(b/67594795): Better support of dynamic shape.
mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype())
slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type)
for [row_slice, col_slice] in slices:
mask[row_slice, col_slice] = 1
return mask
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def masked_dense(inputs,
units,
num_blocks=None,
exclusive=False,
kernel_initializer=None,
reuse=None,
name=None,
*args,
**kwargs):
"""A autoregressively masked dense layer.
Analogous to `tf.compat.v1.layers.dense`.
See [Germain et al. (2015)][1] for detailed explanation.
Arguments:
inputs: Tensor input.
units: Python `int` scalar representing the dimensionality of the output
space.
num_blocks: Python `int` scalar representing the number of blocks for the
MADE masks.
exclusive: Python `bool` scalar representing whether to zero the diagonal of
the mask, used for the first layer of a MADE.
kernel_initializer: Initializer function for the weight matrix. If `None`
(default), weights are initialized using the
`tf.glorot_random_initializer`.
reuse: Python `bool` scalar representing whether to reuse the weights of a
previous layer by the same name.
name: Python `str` used to describe ops managed by this function.
*args: `tf.compat.v1.layers.dense` arguments.
**kwargs: `tf.compat.v1.layers.dense` keyword arguments.
Returns:
Output tensor.
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = tensor_shape.dimension_value(
inputs.shape.with_rank_at_least(1)[-1])
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
mask = _gen_mask(num_blocks, input_depth, units,
MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T
if kernel_initializer is None:
kernel_initializer = init_ops.glorot_normal_initializer()
def masked_initializer(shape, dtype=None, partition_info=None):
return mask * kernel_initializer(shape, dtype, partition_info)
with ops.name_scope(name, "masked_dense", [inputs, units, num_blocks]):
layer = layers.Dense(
units,
kernel_initializer=masked_initializer,
kernel_constraint=lambda x: mask * x,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse,
*args,
**kwargs)
return layer.apply(inputs)
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def masked_autoregressive_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
log_scale_min_clip=-5.,
log_scale_max_clip=3.,
log_scale_clip_gradient=False,
name=None,
*args,
**kwargs):
"""Build the Masked Autoregressive Density Estimator (Germain et al., 2015).
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the input and returns the `loc` ("mu" in [Germain et
al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from
the MADE network.
Warning: This function uses `masked_dense` to create randomly initialized
`tf.Variables`. It is presumed that these will be fit, just as you would any
other neural architecture which uses `tf.compat.v1.layers.dense`.
#### About Hidden Layers
Each element of `hidden_layers` should be greater than the `input_depth`
(i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the
neural network). This is necessary to ensure the autoregressivity property.
#### About Clipping
This function also optionally clips the `log_scale` (but possibly not its
gradient). This is useful because if `log_scale` is too small/large it might
underflow/overflow making it impossible for the `MaskedAutoregressiveFlow`
bijector to implement a bijection. Additionally, the `log_scale_clip_gradient`
`bool` indicates whether the gradient should also be clipped. The default does
not clip the gradient; this is useful because it still provides gradient
information (for fitting) yet solves the numerical stability problem. I.e.,
`log_scale_clip_gradient = False` means
`grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual
`grad[clip(x)] exp(clip(x))`.
Args:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed. Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The minimum value to clip by. Default: -5.
log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The maximum value to clip by. Default: 3.
log_scale_clip_gradient: Python `bool` indicating that the gradient of
`tf.clip_by_value` should be preserved. Default: `False`.
name: A name for ops managed by this function. Default:
"masked_autoregressive_default_template".
*args: `tf.compat.v1.layers.dense` arguments.
**kwargs: `tf.compat.v1.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms (the "mu" in
[Germain et al. (2015)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in
[Germain et al. (2015)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
name = name or "masked_autoregressive_default_template"
with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
input_shape = (
np.int32(x.shape.as_list())
if x.shape.is_fully_defined() else array_ops.shape(x))
for i, units in enumerate(hidden_layers):
x = masked_dense(
inputs=x,
units=units,
num_blocks=input_depth,
exclusive=True if i == 0 else False,
activation=activation,
*args,
**kwargs)
x = masked_dense(
inputs=x,
units=(1 if shift_only else 2) * input_depth,
num_blocks=input_depth,
activation=None,
*args,
**kwargs)
if shift_only:
x = array_ops.reshape(x, shape=input_shape)
return x, None
x = array_ops.reshape(
x, shape=array_ops.concat([input_shape, [2]], axis=0))
shift, log_scale = array_ops.unstack(x, num=2, axis=-1)
which_clip = (
math_ops.clip_by_value
if log_scale_clip_gradient else _clip_by_value_preserve_grad)
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
return template_ops.make_template(name, _fn)
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _clip_by_value_preserve_grad(x, clip_value_min, clip_value_max, name=None):
"""Clips input while leaving gradient unaltered."""
with ops.name_scope(name, "clip_by_value_preserve_grad",
[x, clip_value_min, clip_value_max]):
clip_x = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
return x + array_ops.stop_gradient(clip_x - x)
| {
"content_hash": "b4ea6f149ce73bb76e45fbda254a9896",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 80,
"avg_line_length": 41.07446808510638,
"alnum_prop": 0.6781490114823449,
"repo_name": "chemelnucfin/tensorflow",
"id": "88855b27fd3dbdb73b9acd3415e8e393313aad41",
"size": "23855",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
from oscar.apps.shipping import apps
class ShippingConfig(apps.ShippingConfig):
name = 'shipping'
| {
"content_hash": "61229cf30fb646a757dcb440188a3897",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 20.8,
"alnum_prop": 0.7692307692307693,
"repo_name": "django-oscar/django-oscar",
"id": "f146c3fc93aa79e6fd9f7442c06d1b8bee4f8fc1",
"size": "104",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/_site/shipping/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "565297"
},
{
"name": "JavaScript",
"bytes": "41944"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2261460"
},
{
"name": "SCSS",
"bytes": "21815"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
} |
'''
用法例子:
class ExampleDocument( Document ):
name = StringField( required=True, max_length=10 )
email = EmailField( required=True )
password = IntField( max=10000000 )
meta = {
'collection':'example'
}
@gen.engine
def your_method( arg, callback ):
result = yield motor.Op( .... )
callback( result, None )
class ExampleHandler( BaseHandler ):
@asynchronous
@gen.engine
def get( self ):
result = yield motor.Op( ExampleDocument().your_method, arg )
......
'''
import motor
from tornado import gen
from asytormongo.field import *
from exceptions import Exception
from bson.dbref import DBRef
from bson.objectid import ObjectId
db = motor.MotorClient().open_sync().Qingcong
class Document( object ):
def __init__( self ):
#该document所具有的类属性
self.class_attr = self.__class__.__dict__
#该集合的名字
self.collection_name = self.meta[ 'collection' ] if 'meta' in self.class_attr and 'collection' in self.meta else self.__class__.__name__
#该集合
self.collection = eval( 'db.' + self.collection_name.lower() )
#该集合中的field
self.field_list = [ attr for attr in self.class_attr if isinstance( self.class_attr[ attr ], BaseField ) ]
@gen.engine
def insert( self, document, callback=None ):
#先检验传进来的document是否符合规则
for field_name in document:
if field_name in self.field_list:
#进行验证
setattr( self.class_attr[ field_name ], 'value', document[ field_name ] )
getattr( self.class_attr[ field_name ], 'validate', None )()
else:
raise Exception, 'cann\'t find %s in %s' % ( field_name, self.__class__.__name__ )
#进行unique验证
if getattr( self.class_attr[ field_name ], 'unique', False ):
result = yield motor.Op( self.collection.find_one, { field_name: document[ field_name ] } )
if result:
raise Exception, '%s is unique!' % field_name
#进行required验证
for field_name in self.field_list:
if getattr( self.class_attr[ field_name ], 'required', False ) and field_name not in document:
raise Exception, '%s is required!' % field_name
result = yield motor.Op( self.collection.insert, document )
callback( result, None )
@gen.engine
def find_one( self, condition, callback=None ):
result = yield motor.Op( self.collection.find_one, condition )
callback( result, None )
@gen.engine
def update( self, condition, data, callback=None ):
result = yield motor.Op( self.collection.update, condition, data )
callback( result, None )
@gen.engine
def remove( self, condition, callback=None ):
result = yield motor.Op( self.collection.remove, condition )
callback( result, None )
#根据DBRef获取对应的document
@staticmethod
@gen.engine
def translate_one_dbref( dbref, callback=None ):
collection = eval( 'db.' + dbref.collection )
result = yield motor.Op( collection.find_one, { '_id': ObjectId( dbref.id ) } )
callback( result, None )
#把document中的所有的DBRef对应的document都获取过来
@staticmethod
@gen.engine
def translate_dbref_in_one_document( document, callback=None ):
for field in document:
if isinstance( document[ field ], DBRef ):
document[ field ] = yield motor.Op( Document.translate_one_dbref, document[ field ] )
callback( document, None )
#将document_list中的dbref域所对应的document获取过来
@staticmethod
@gen.engine
def translate_dbref_in_document_list( document_list, callback=None ):
for document in document_list:
document = yield motor.Op( Document.translate_dbref_in_one_document, document )
callback( document_list, None )
| {
"content_hash": "e046008943c11ca33f0f0a64e974e115",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 138,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.6983711460151251,
"repo_name": "shiyanhui/Asytormongo",
"id": "689ca6c5aa93f7dde1386ed7edc17abdd240f960",
"size": "3623",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "asytormongo/document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22710"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from django.conf import settings
from django.core.paginator import Paginator
from django.views.generic import FormView
from django.views.generic.edit import FormMixin
from django.views.generic.list import MultipleObjectMixin
from .forms import FacetedSearchForm, ModelSearchForm
from .query import SearchQuerySet
RESULTS_PER_PAGE = getattr(settings, 'HAYSTACK_SEARCH_RESULTS_PER_PAGE', 20)
class SearchMixin(MultipleObjectMixin, FormMixin):
"""
A mixin that allows adding in Haystacks search functionality into
another view class.
This mixin exhibits similar end functionality as the base Haystack search
view, but with some important distinctions oriented around greater
compatibility with Django's built-in class based views and mixins.
Normal flow:
self.request = request
self.form = self.build_form()
self.query = self.get_query()
self.results = self.get_results()
return self.create_response()
This mixin should:
1. Make the form
2. Get the queryset
3. Return the paginated queryset
"""
template_name = 'search/search.html'
load_all = True
form_class = ModelSearchForm
queryset = SearchQuerySet()
context_object_name = None
paginate_by = RESULTS_PER_PAGE
paginate_orphans = 0
paginator_class = Paginator
page_kwarg = 'page'
form_name = 'form'
search_field = 'q'
object_list = None
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method == 'GET':
kwargs.update({
'data': self.request.GET,
})
kwargs.update({
'searchqueryset': self.get_queryset(),
'load_all': self.load_all,
})
return kwargs
def form_invalid(self, form):
context = self.get_context_data(**{
self.form_name: form,
'object_list': self.get_queryset()
})
return self.render_to_response(context)
def form_valid(self, form):
self.queryset = form.search()
context = self.get_context_data(**{
self.form_name: form,
'query': form.cleaned_data.get(self.search_field),
'object_list': self.queryset
})
return self.render_to_response(context)
class FacetedSearchMixin(SearchMixin):
"""
A mixin that allows adding in a Haystack search functionality with search
faceting.
"""
form_class = FacetedSearchForm
facet_fields = None
def get_form_kwargs(self):
kwargs = super(FacetedSearchMixin, self).get_form_kwargs()
kwargs.update({
'selected_facets': self.request.GET.getlist("selected_facets")
})
return kwargs
def get_context_data(self, **kwargs):
context = super(FacetedSearchMixin, self).get_context_data(**kwargs)
context.update({'facets': self.queryset.facet_counts()})
return context
def get_queryset(self):
qs = super(FacetedSearchMixin, self).get_queryset()
for field in self.facet_fields:
qs = qs.facet(field)
return qs
class SearchView(SearchMixin, FormView):
"""A view class for searching a Haystack managed search index"""
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
class FacetedSearchView(FacetedSearchMixin, SearchView):
"""
A view class for searching a Haystack managed search index with
facets
"""
pass
| {
"content_hash": "c192ea6131956f33b36ec8da52e788cf",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 82,
"avg_line_length": 29.574626865671643,
"alnum_prop": 0.6376482462780722,
"repo_name": "sgaist/django-haystack",
"id": "016ca0c65bb804eea1fc32c264059405001dcd23",
"size": "3982",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "haystack/generic_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "875819"
},
{
"name": "Shell",
"bytes": "1961"
}
],
"symlink_target": ""
} |
from flask import request
def pass_token(fn):
"""Передает токен в функцию
:param fn: func
:return: func
"""
def wrapper(*args, **kwargs):
auth_type, token = request.headers['Authorization'].split(None, 1)
kwargs.update({
"sess_token": token,
})
return fn(*args, **kwargs)
return wrapper
| {
"content_hash": "0a697fd803a849e58ae14c23f888ddef",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 21.352941176470587,
"alnum_prop": 0.5647382920110193,
"repo_name": "pablodiguerero/asterisk.api",
"id": "e14f6e5929ea87771acf8a5af9690d77305c495e",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/flask/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "112289"
}
],
"symlink_target": ""
} |
def configuration(parent_package='io',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('arff', parent_package, top_path)
#config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "e8e68054987a48269b80c0f7fe7899af",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 38.111111111111114,
"alnum_prop": 0.6909620991253644,
"repo_name": "huard/scipy-work",
"id": "3ae197f026df39ebb2e4d1cf2469016466ef81b3",
"size": "366",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/io/arff/setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from google.analytics import admin_v1alpha
async def sample_update_data_stream():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceAsyncClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateDataStreamRequest(
)
# Make the request
response = await client.update_data_stream(request=request)
# Handle the response
print(response)
# [END analyticsadmin_v1alpha_generated_AnalyticsAdminService_UpdateDataStream_async]
| {
"content_hash": "3e581b52eb6bbf21eeb1927c6c6ba22f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 27.055555555555557,
"alnum_prop": 0.7515400410677618,
"repo_name": "googleapis/python-analytics-admin",
"id": "686fdd1a8f40b607b01791efae2aee09a4c7846c",
"size": "1901",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticsadmin_v1alpha_generated_analytics_admin_service_update_data_stream_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
import pytest
import sys
import time
from test_base_class import TestBaseClass
aerospike = pytest.importorskip("aerospike")
try:
from aerospike.exception import *
except:
print "Please install aerospike python client."
sys.exit(1)
class TestLList(TestBaseClass):
pytestmark = pytest.mark.skipif(
TestBaseClass.has_ldt_support() == False,
reason="LDTs are not enabled for namespace 'test'")
llist_integer = None
llist_string = None
client = None
key1 = None
key2 = None
def setup_class(self):
print "setup class invoked..."
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user == None and password == None:
self.client = aerospike.client(config).connect()
else:
self.client = aerospike.client(config).connect(user, password)
TestLList.key1 = ('test', 'demo', 'integer_llist_ky')
TestLList.llist_integer = TestLList.client.llist(TestLList.key1,
'integer_bin')
TestLList.key2 = ('test', 'demo', 'string_llist_ky')
TestLList.llist_string = TestLList.client.llist(TestLList.key2,
'string_bin')
TestLList.key3 = ('test', 'demo', 'float_llist_ky')
TestLList.llist_float = TestLList.client.llist(TestLList.key3,
'float_bin')
def teardown_class(self):
print "teardown class invoked..."
try:
TestLList.llist_integer.destroy()
TestLList.llist_string.destroy()
TestLList.list_float.destroy()
except:
pass
self.client.close()
#Add() - Add an object to the llist.
#Get() - Get an object from the llist.
#Size() - Get the current item count of the llist.
def test_llist_add_get_size_positive(self):
"""
Invoke add() an object to LList.
"""
assert 0 == TestLList.llist_integer.add(11)
assert [11] == TestLList.llist_integer.get(11)
assert 0 == TestLList.llist_string.add("abc")
assert ['abc'] == TestLList.llist_string.get('abc')
assert 1 == TestLList.llist_integer.size()
#Add() - Add() unsupported type data to llist.
def test_llist_add_float_positive(self):
"""
Invoke add() float type data.
"""
rec = {"pi": 3.14}
try:
TestLList.llist_float.add(rec)
except LDTKeyFunctionNotFound as exception:
assert exception.code == 1433
assert exception.msg == "LDT-Key Field Not Found"
#Add() - Add() without any mandatory parameters.
def test_llist_no_parameter_negative(self):
"""
Invoke add() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestLList.llist_integer.add()
#Add_many() - Add a list of objects to the set.
def test_llist_add_many_positive(self):
"""
Invoke add_many() to add a list of objects to the set.
"""
policy = {'timeout': 7000}
assert 0 == TestLList.llist_integer.add_many([122, 56, 871], policy)
assert [122] == TestLList.llist_integer.get(122)
assert [56] == TestLList.llist_integer.get(56)
assert [871] == TestLList.llist_integer.get(871)
#Get() - Get without any mandatory parameters.
def test_llist_get_element_negative(self):
"""
Invoke get() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestLList.llist_integer.get()
#Remove() and Get()- Remove an object from the set and get non-existent element.
def test_llist_remove_positive(self):
"""
Invoke remove() to remove element.
"""
assert 0 == TestLList.llist_string.add('remove')
assert 0 == TestLList.llist_string.remove('remove')
try:
TestLList.llist_string.get('remove')
except UDFError as exception:
assert exception.code == 100L
except LargeItemNotFound as exception:
assert exception.code == 125L
#Remove() - Remove non-existent object from the llist.
def test_llist_remove_element_negative(self):
"""
Invoke remove() to remove non-existent element.
"""
try:
TestLList.llist_string.remove('kk')
except UDFError as exception:
assert exception.code == 100L
except LargeItemNotFound as exception:
assert exception.code == 125L
#Destroy() - Delete the entire LList(LDT Remove).
def test_llist_destroy_positive(self):
"""
Invoke destroy() to delete entire LDT.
"""
key = ('test', 'demo', 'remove')
llist = self.client.llist(key, 'llist_add')
try:
llist.add(876)
except:
pass
assert 0 == llist.destroy()
def test_llist_ldt_initialize_negative(self):
"""
Initialize ldt with wrong key.
"""
key = ('test', 'demo', 12.3)
try:
llist = self.client.llist(key, 'ldt_stk')
except ParamError as exception:
assert exception.code == -2
assert exception.msg == "Parameters are incorrect"
def test_llist_find_first_positive_without_policy(self):
"""
Invoke find_first() to access elements
"""
elements_list = TestLList.llist_integer.find_first(2)
assert elements_list == [11, 56]
def test_llist_find_first_positive(self):
"""
Invoke find_first() to access elements
"""
elements_list = TestLList.llist_integer.find_first(2, {'timeout': 1000})
assert elements_list == [11, 56]
def test_llist_find_first_count_large_positive(self):
"""
Invoke find_first() to access elements with a larger count
"""
elements_list = TestLList.llist_integer.find_first(10, {'timeout': 1000})
assert elements_list == [11, 56, 122, 871]
def test_llist_find_first_count_negative(self):
"""
Invoke find_first() to access elements with a negative count
"""
elements_list = TestLList.llist_integer.find_first(-8, {'timeout': 1000})
assert elements_list == [11, 56, 122, 871]
def test_llist_find_last_positive_without_policy(self):
"""
Invoke find_last() to access elements
"""
elements_list = TestLList.llist_integer.find_last(2)
assert elements_list == [871, 122]
def test_llist_find_last_positive(self):
"""
Invoke find_last() to access elements
"""
elements_list = TestLList.llist_integer.find_last(2, {'timeout': 1000})
assert elements_list == [871, 122]
def test_llist_find_last_count_large(self):
"""
Invoke find_last() to access elements
"""
elements_list = TestLList.llist_integer.find_last(15, {'timeout': 1000})
assert elements_list == [871, 122, 56, 11]
def test_llist_find_last_count_negative(self):
"""
Invoke find_last() to access elements
"""
elements_list = TestLList.llist_integer.find_last(-2, {'timeout': 1000})
assert elements_list == [871, 122, 56, 11]
def test_llist_find_last_no_params(self):
"""
Invoke find_last() to access elements
"""
with pytest.raises(TypeError) as typeError:
TestLList.llist_integer.find_last()
assert "Required argument 'count' (pos 1) not found" in typeError.value
def test_llist_find_last_no_parameters_negative(self):
"""
Invoke find_last() to access elements
"""
with pytest.raises(TypeError) as typeError:
TestLList.llist_integer.find_last()
assert "Required argument 'count' (pos 1) not found" in typeError.value
def test_llist_find_from_positive_without_policy(self):
"""
Invoke find_from() to access elements from a given key
"""
elements_list = TestLList.llist_integer.find_from(56, 2)
assert elements_list == [56, 122]
def test_llist_find_from_positive(self):
"""
Invoke find_from() to access elements from a given key
"""
elements_list = TestLList.llist_integer.find_from(56, 2, {'timeout': 1000})
assert elements_list == [56, 122]
def test_llist_find_from_positive_non_existent_key(self):
"""
Invoke find_from() to access elements from a non-existent key
"""
elements_list = TestLList.llist_integer.find_from(21, 2, {'timeout': 1000})
assert elements_list == [56, 122]
def test_llist_range_limit_positive_without_policy(self):
"""
Invoke range_limit() to access elements
"""
elements_list = TestLList.llist_integer.range_limit(56, 871, 2, None, None)
assert elements_list == [56, 122, 871]
def test_llist_range_limit_positive(self):
"""
Invoke range_limit() to access elements
"""
elements_list = TestLList.llist_integer.range_limit(56, 871, 2, None, None, {'timeout': 1000})
assert elements_list == [56, 122, 871]
def test_llist_range_limit_negative_keys(self):
"""
Invoke range_limit() to access elements with negative keys
"""
elements_list = TestLList.llist_integer.range_limit(-56, -871, 2, None, None, {'timeout': 1000})
assert elements_list == []
def test_llist_range_limit_larger_count_positive(self):
"""
Invoke range_limit() to access elements with larger count than list
size
"""
elements_list = TestLList.llist_integer.range_limit(56, 871, 8, None, None, {'timeout': 1000})
assert elements_list == [56, 122, 871]
def test_llist_range_limit_count_negative(self):
"""
Invoke range_limit() to access elements
"""
elements_list = TestLList.llist_integer.range_limit(56, 871, -2, None, None, {'timeout': 1000})
assert elements_list == [56, 122, 871]
def test_llist_set_page_size_without_policy(self):
#Invoke set_page_size() to set page size of ldt bin.
assert 0 == TestLList.llist_integer.set_page_size(8192)
def test_llist_set_page_size(self):
#Invoke set_page_size() to set page size of ldt bin.
assert 0 == TestLList.llist_integer.set_page_size(8192, {'timeout': 0})
def test_llist_set_page_size_string_negative(self):
with pytest.raises(TypeError) as typeError:
TestLList.llist_integer.set_page_size("8192", {'timeout': 0})
assert "an integer is required" in typeError.value
""" Causes db to shutdown
def test_llist_find_from_positive_negative_count(self):
#Invoke find_from() to access elements with a negative count
elements_list = TestLList.llist_integer.find_from(56, -2, {'timeout': 1000})
assert elements_list == [56, 122]
"""
| {
"content_hash": "a6cf4ab62c463a94a3b381a421489a5a",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 104,
"avg_line_length": 35.04347826086956,
"alnum_prop": 0.5875576036866359,
"repo_name": "arthurprs/aerospike-client-python",
"id": "d1e546222cdd97446eeca16ea82061ec6a61fd06",
"size": "11309",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_llist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "649569"
},
{
"name": "Lua",
"bytes": "6124"
},
{
"name": "Python",
"bytes": "523698"
},
{
"name": "Shell",
"bytes": "13148"
}
],
"symlink_target": ""
} |
"""Unit tests for UnboundedThreadPoolExecutor."""
# pytype: skip-file
from __future__ import absolute_import
import itertools
import threading
import time
import traceback
import unittest
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
class UnboundedThreadPoolExecutorTest(unittest.TestCase):
def setUp(self):
self._lock = threading.Lock()
self._worker_idents = []
def append_and_sleep(self, sleep_time):
with self._lock:
self._worker_idents.append(threading.current_thread().ident)
time.sleep(sleep_time)
def raise_error(self, message):
raise ValueError(message)
def test_shutdown_with_no_workers(self):
with UnboundedThreadPoolExecutor():
pass
def test_shutdown_with_fast_workers(self):
futures = []
with UnboundedThreadPoolExecutor() as executor:
for _ in range(0, 5):
futures.append(executor.submit(self.append_and_sleep, 0.01))
for future in futures:
future.result(timeout=10)
with self._lock:
self.assertEqual(5, len(self._worker_idents))
def test_shutdown_with_slow_workers(self):
futures = []
with UnboundedThreadPoolExecutor() as executor:
for _ in range(0, 5):
futures.append(executor.submit(self.append_and_sleep, 1))
for future in futures:
future.result(timeout=10)
with self._lock:
self.assertEqual(5, len(self._worker_idents))
def test_worker_reuse(self):
futures = []
with UnboundedThreadPoolExecutor() as executor:
for _ in range(0, 5):
futures.append(executor.submit(self.append_and_sleep, 0.01))
time.sleep(3)
for _ in range(0, 5):
futures.append(executor.submit(self.append_and_sleep, 0.01))
for future in futures:
future.result(timeout=10)
with self._lock:
self.assertEqual(10, len(self._worker_idents))
self.assertTrue(len(set(self._worker_idents)) < 10)
def test_exception_propagation(self):
with UnboundedThreadPoolExecutor() as executor:
future = executor.submit(self.raise_error, 'footest')
try:
future.result()
except Exception:
message = traceback.format_exc()
else:
raise AssertionError('expected exception not raised')
self.assertIn('footest', message)
self.assertIn('raise_error', message)
def test_map(self):
with UnboundedThreadPoolExecutor() as executor:
executor.map(self.append_and_sleep, itertools.repeat(0.01, 5))
with self._lock:
self.assertEqual(5, len(self._worker_idents))
def test_shared_shutdown_does_nothing(self):
thread_pool_executor.shared_unbounded_instance().shutdown()
futures = []
with thread_pool_executor.shared_unbounded_instance() as executor:
for _ in range(0, 5):
futures.append(executor.submit(self.append_and_sleep, 0.01))
for future in futures:
future.result(timeout=10)
with self._lock:
self.assertEqual(5, len(self._worker_idents))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "048f8683d3017c7cbd25e35952c6d900",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 27.93859649122807,
"alnum_prop": 0.6888540031397175,
"repo_name": "iemejia/incubator-beam",
"id": "b9251cad00a15ed4f8034adc8b4539031efb0191",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/utils/thread_pool_executor_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo', 'sphinx.ext.coverage']
#extensions = ['sphinx.ext.autodoc',
# 'sphinx.ext.todo',
# # 'sphinx.ect.intersphinx',
# 'sphinx.ext.coverage']
#
#todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'keystone-voms'
copyright = u'2012, Spanish National Research Council'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from keystone_voms.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['old']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['keystone.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# man_pages = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme_path = ["."]
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'keystonevomsdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'keystone-voms.tex', u'Keystone VOMS module Documentation',
u'IFCA - CSIC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'keystone-voms', u'Keystone VOMS module Documentation',
u'IFCA - CSIC', 'keystone-voms', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'nova': ('http://nova.openstack.org', None),
'swift': ('http://swift.openstack.org', None),
'glance': ('http://glance.openstack.org', None)}
| {
"content_hash": "de366496d995f76a75b16e94a6508b24",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 33.32,
"alnum_prop": 0.6879951980792317,
"repo_name": "IFCA/keystone-voms",
"id": "1ce0f8523d62d1f6ccb8ef5911fd463e4b784114",
"size": "8752",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable/newton",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86950"
}
],
"symlink_target": ""
} |
"""
Colour Analysis
===============
Defines the *Colour - Analysis* main class:
- :class:`ColourAnalysis`
"""
from __future__ import division, unicode_literals
import json
import os
from collections import OrderedDict, deque, namedtuple
from itertools import cycle
import numpy as np
from vispy.scene import SceneCanvas
from colour import RGB_COLOURSPACES
from colour.utilities import is_string
from colour_analysis import __application_name__, __version__
from colour_analysis.constants import (DEFAULT_FAILSAFE_IMAGE, SETTINGS_FILE,
REFERENCE_COLOURSPACES)
from colour_analysis.views import (ConsoleView, DiagramView, GamutView,
ImageView)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'Sequence', 'Action', 'ViewPreset', 'LayoutPreset', 'ColourAnalysis'
]
Sequence = namedtuple('Sequence', ('modifiers', 'key'))
"""
Defines a modifier and key keyboard sequence.
Sequence : namedtuple
"""
Action = namedtuple('Action', ('name', 'description', 'sequence'))
"""
Defines a user action / interaction associated with a :class:`Sequence`.
Actions are name bound to methods affixed with *_action* in
:class:`ColourAnalysis` or its children views. For example an action
named *toggle_blacks_clamp* will be bound to available
*toggle_blacks_clamp_action* methods in :class:`ColourAnalysis` or its children
views.
Action : namedtuple
"""
ViewPreset = namedtuple('ViewPreset', ('name', 'description', 'view', 'row',
'column', 'row_span', 'column_span'))
"""
Defines a view preset used with :class:`LayoutPreset` describing the location
of the view in the layout grid.
ViewPreset : namedtuple
"""
LayoutPreset = namedtuple('LayoutPreset', ('name', 'description', 'views'))
"""
Defines a layout preset describing which views are added to the
:class:`ColourAnalysis` class.
LayoutPreset : namedtuple
"""
class ColourAnalysis(SceneCanvas):
"""
Defines *Colour - Analysis* artist, a class inheriting from
:class:`vispy.scene.SceneCanvas`.
Parameters
----------
image : array_like, optional
Image to analyse.
image_path : unicode, optional
Image path.
input_colourspace : unicode, optional
**{'ITU-R BT.709', 'ACES2065-1', 'ACEScc', 'ACEScg', 'ACESproxy',
'ALEXA Wide Gamut', 'Adobe RGB (1998)', 'Adobe Wide Gamut RGB',
'Apple RGB', 'Best RGB', 'Beta RGB', 'CIE RGB', 'Cinema Gamut',
'ColorMatch RGB', 'DCI-P3', 'DCI-P3+', 'DRAGONcolor', 'DRAGONcolor2',
'Don RGB 4', 'ECI RGB v2', 'ERIMM RGB', 'Ekta Space PS 5', 'Max RGB',
'NTSC', 'Pal/Secam', 'ProPhoto RGB', 'REDcolor', 'REDcolor2',
'REDcolor3', 'REDcolor4', 'RIMM RGB', 'ROMM RGB', 'ITU-R BT.2020',
'Russell RGB', 'S-Gamut', 'S-Gamut3', 'S-Gamut3.Cine', 'SMPTE-C RGB',
'V-Gamut', 'Xtreme RGB', 'sRGB'}**,
:class:`colour.RGB_Colourspace` class instance name defining `image`
argument colourspace.
input_oecf : unicode, optional
See `input_colourspace` argument for possible values.
:class:`colour.RGB_Colourspace` class instance name defining the image
opto-electronic transfer function.
input_linear : bool, optional
Is input image linear.
reference_colourspace : unicode, optional
**{'CIE XYZ', 'CIE xyY', 'CIE Lab', 'CIE Luv', 'CIE UCS', 'CIE UVW',
'IPT', 'Hunter Lab', 'Hunter Rdab'}**,
Reference colourspace to use for colour conversions / transformations.
correlate_colourspace : unicode, optional
See `input_colourspace` argument for possible values, default value is
*ACEScg*.
:class:`colour.RGB_Colourspace` class instance name defining the
comparison / correlate colourspace.
settings : dict, optional
Settings for the :class:`ColourAnalysis` class and its children views.
layout : unicode, optional
Layout the :class:`ColourAnalysis` class will use.
Attributes
----------
image
image_path
input_colourspace
input_oecf
input_linear
reference_colourspace
correlate_colourspace
settings
layout
actions
console_view
gamut_view
image_view
diagram_view
clamp_blacks
clamp_whites
Methods
-------
on_key_press
cycle_correlate_colourspace_action
cycle_reference_colourspace_action
toggle_blacks_clamp_action
toggle_whites_clamp_action
"""
def __init__(self,
image=None,
image_path=None,
input_colourspace='ITU-R BT.709',
input_oecf='ITU-R BT.709',
input_linear=True,
reference_colourspace='CIE xyY',
correlate_colourspace='ACEScg',
settings=None,
layout='layout_1'):
self._initialised = False
title = '{0} - {1}'.format(__application_name__, __version__)
SceneCanvas.__init__(
self,
keys='interactive',
title=('{0} - {1}'.format(title, image_path)
if image_path is not None else title),
size=settings['scene_canvas']['size'],
bgcolor=settings['scene_canvas']['scene_canvas_background_colour'],
config={'samples': settings['scene_canvas']['samples']})
self.unfreeze()
self._image = None
self.image = image if image is not None else DEFAULT_FAILSAFE_IMAGE
self._image_path = None
self.image_path = image_path
self._input_colourspace = None
self.input_colourspace = input_colourspace
self._input_oecf = None
self.input_oecf = input_oecf
self._input_linear = None
self.input_linear = input_linear
self._reference_colourspace = None
self.reference_colourspace = reference_colourspace
self._correlate_colourspace = None
self.correlate_colourspace = correlate_colourspace
self._settings = (json.load(open(SETTINGS_FILE))
if settings is None else settings)
self._layout = None
self.layout = layout
self._clamp_blacks = False
self._clamp_whites = False
self._layout_presets = OrderedDict()
self._actions = {}
self._console_view = None
self._gamut_view = None
self._image_view = None
self._diagram_view = None
self._views = None
self._grid = None
self._RGB_colourspaces_cycle = cycle([
c for c in sorted(RGB_COLOURSPACES)
if c not in ('aces', 'adobe1998', 'prophoto')
])
reference_colourspaces_deque = deque(REFERENCE_COLOURSPACES)
reference_colourspaces_deque.rotate(
-REFERENCE_COLOURSPACES.index(self._reference_colourspace) - 1)
self._reference_colourspaces_cycle = cycle(
reference_colourspaces_deque)
self._create_layout_presets()
self._create_actions()
self._create_views()
self._layout_views()
self.show()
self._initialised = True
@property
def image(self):
"""
Property for **self._image** private attribute.
Returns
-------
array_like
self._image.
"""
return self._image
@image.setter
def image(self, value):
"""
Setter for **self._image** private attribute.
Parameters
----------
value : array_like
Attribute value.
"""
if value is not None:
assert isinstance(value, (tuple, list, np.ndarray, np.matrix)), ((
'"{0}" attribute: "{1}" is not a "tuple", "list", "ndarray" '
'or "matrix" instance!').format('image', value))
self._image = value
if self._initialised:
image = self._create_image()
for view in self._views:
if hasattr(view, 'image'):
view.image = image
@property
def image_path(self):
"""
Property for **self._image_path** private attribute.
Returns
-------
unicode
self._image_path.
"""
return self._image_path
@image_path.setter
def image_path(self, value):
"""
Setter for **self._image_path** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert is_string(value), (('"{0}" attribute: "{1}" is not a '
'"string" like object!').format(
'image_path', value))
assert os.path.exists(value), (
'"{0}" input image doesn\'t exists!'.format(value))
self._image_path = value
@property
def input_colourspace(self):
"""
Property for **self._input_colourspace** private attribute.
Returns
-------
unicode
self._input_colourspace.
"""
return self._input_colourspace
@input_colourspace.setter
def input_colourspace(self, value):
"""
Setter for **self._input_colourspace** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert is_string(value), (('"{0}" attribute: "{1}" is not a '
'"string" like object!').format(
'input_colourspace', value))
assert value in RGB_COLOURSPACES, (
'"{0}" colourspace not found in factory RGB colourspaces: '
'"{1}".').format(value, ', '.join(
sorted(RGB_COLOURSPACES.keys())))
self._input_colourspace = value
@property
def input_oecf(self):
"""
Property for **self._input_oecf** private attribute.
Returns
-------
unicode
self._input_oecf.
"""
return self._input_oecf
@input_oecf.setter
def input_oecf(self, value):
"""
Setter for **self._input_oecf** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert is_string(value), (('"{0}" attribute: "{1}" is not a '
'"string" like object!').format(
'input_oecf', value))
assert value in RGB_COLOURSPACES, (
'"{0}" OECF is not associated with any factory '
'RGB colourspaces: "{1}".').format(value, ', '.join(
sorted(RGB_COLOURSPACES.keys())))
self._input_oecf = value
@property
def input_linear(self):
"""
Property for **self._input_linear** private attribute.
Returns
-------
bool
self._input_linear.
"""
return self._input_linear
@input_linear.setter
def input_linear(self, value):
"""
Setter for **self._input_linear** private attribute.
Parameters
----------
value : bool
Attribute value.
"""
if value is not None:
assert isinstance(value, bool), (
'"{0}" attribute: "{1}" is not a "bool" instance!'.format(
'input_linear', value))
self._input_linear = value
@property
def reference_colourspace(self):
"""
Property for **self._reference_colourspace** private attribute.
Returns
-------
unicode
self._reference_colourspace.
"""
return self._reference_colourspace
@reference_colourspace.setter
def reference_colourspace(self, value):
"""
Setter for **self._reference_colourspace** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert is_string(value), (('"{0}" attribute: "{1}" is not a '
'"string" like object!').format(
'reference_colourspace', value))
assert value in REFERENCE_COLOURSPACES, (
'"{0}" reference colourspace not found in factory reference '
'colourspaces: "{1}".').format(value, ', '.join(
sorted(REFERENCE_COLOURSPACES.keys())))
self._reference_colourspace = value
@property
def correlate_colourspace(self):
"""
Property for **self._correlate_colourspace** private attribute.
Returns
-------
unicode
self._correlate_colourspace.
"""
return self._correlate_colourspace
@correlate_colourspace.setter
def correlate_colourspace(self, value):
"""
Setter for **self._correlate_colourspace** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert is_string(value), (('"{0}" attribute: "{1}" is not a '
'"string" like object!').format(
'correlate_colourspace', value))
assert value in RGB_COLOURSPACES, (
'"{0}" colourspace not found in factory RGB colourspaces: '
'"{1}".').format(value, ', '.join(
sorted(RGB_COLOURSPACES.keys())))
self._correlate_colourspace = value
@property
def settings(self):
"""
Property for **self._settings** private attribute.
Returns
-------
dict
self._settings.
"""
return self._settings
@property
def layout(self):
"""
Property for **self._layout** private attribute.
Returns
-------
unicode
self._layout.
"""
return self._layout
@layout.setter
def layout(self, value):
"""
Setter for **self._layout** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert is_string(value), (('"{0}" attribute: "{1}" is not a '
'"string" like object!').format(
'layout', value))
self._layout = value
@property
def actions(self):
"""
Property for **self._actions** private attribute.
Returns
-------
dict
self._actions.
"""
return self._actions
@property
def console_view(self):
"""
Property for **self.console_view** attribute.
Returns
-------
ViewBox
"""
return self._console_view
@property
def gamut_view(self):
"""
Property for **self.gamut_view** attribute.
Returns
-------
ViewBox
"""
return self._gamut_view
@property
def image_view(self):
"""
Property for **self.image_view** attribute.
Returns
-------
ViewBox
"""
return self._image_view
@property
def diagram_view(self):
"""
Property for **self.diagram_view** attribute.
Returns
-------
ViewBox
"""
return self._diagram_view
@property
def clamp_blacks(self):
"""
Property for **self._clamp_blacks** private attribute.
Returns
-------
unicode
self._clamp_blacks.
"""
return self._clamp_blacks
@clamp_blacks.setter
def clamp_blacks(self, value):
"""
Setter for **self._clamp_blacks** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert isinstance(value, bool), (
'"{0}" attribute: "{1}" is not a "bool" instance!'.format(
'clamp_blacks', value))
self._clamp_blacks = value
image = self._create_image()
for view in self._views:
if hasattr(view, 'image'):
view.image = image
@property
def clamp_whites(self):
"""
Property for **self._clamp_whites** private attribute.
Returns
-------
unicode
self._clamp_whites.
"""
return self._clamp_whites
@clamp_whites.setter
def clamp_whites(self, value):
"""
Setter for **self._clamp_whites** private attribute.
Parameters
----------
value : unicode
Attribute value.
"""
if value is not None:
assert isinstance(value, bool), (
'"{0}" attribute: "{1}" is not a "bool" instance!'.format(
'clamp_whites', value))
self._clamp_whites = value
image = self._create_image()
for view in self._views:
if hasattr(view, 'image'):
view.image = image
def on_key_press(self, event):
"""
Reimplements :meth:`vispy.scene.SceneCanvas.on_key_press` method and
triggers the various actions defined by :class:`ColourAnalysis` class
and its children views.
Parameters
----------
event : Object
Event.
"""
key = event.key.name.lower()
modifiers = sorted(
[modifier.name.lower() for modifier in event.modifiers])
for action in self._actions.values():
if (key == action.sequence.key
and modifiers == sorted(action.sequence.modifiers)):
method = '{0}_action'.format(action.name)
hasattr(self, method) and getattr(self, method)()
for view in self._views:
hasattr(view, method) and getattr(view, method)()
def _create_layout_presets(self):
"""
Creates the layout presets from :attr:`ColourAnalysis.settings`
attribute *layout* key value.
Notes
-----
- There is no way to change the current layout at the moment.
"""
layouts = self._settings['layouts']
for layout in layouts:
views = {}
for name, view in layout['views'].items():
views[name] = ViewPreset(
name=view['name'],
description=view['description'],
view=view['view'],
row=view['row'],
column=view['column'],
row_span=view['row_span'],
column_span=view['column_span'])
self._layout_presets[layout['name']] = LayoutPreset(
name=layout['name'],
description=layout['description'],
views=views)
def _create_actions(self):
"""
Creates the actions from :attr:`ColourAnalysis.settings` attribute
*actions* key value.
"""
self._actions = {}
for name, action in self._settings.get('actions', ()).items():
if action.get('sequence') is not None:
sequence = Sequence(
modifiers=action.get('sequence').get('modifiers', ()),
key=action.get('sequence').get('key'))
else:
sequence = Sequence(modifiers=(), key=None)
self._actions[name] = Action(
name=action.get('name'),
description=action.get('description'),
sequence=sequence)
def _create_views(self):
"""
Creates the views from :attr:`ColourAnalysis.settings` attribute value.
"""
background_colour = (
self._settings['scene_canvas']['views_background_colour'])
border_colour = self._settings['scene_canvas']['views_border_colour']
self._console_view = ConsoleView(
scene_canvas=self,
text_color=(0.8, 0.8, 0.8),
font_size=10.0,
bgcolor=background_colour,
border_color=border_colour)
views = self._layout_presets.get(self._layout).views.values()
views = [view.view for view in views]
if 'gamut_view' in views:
self._gamut_view = GamutView(
scene_canvas=self,
image=self._image,
input_colourspace=self._input_colourspace,
reference_colourspace=self._reference_colourspace,
correlate_colourspace=self._correlate_colourspace,
settings=self._settings,
bgcolor=background_colour,
border_color=border_colour)
if 'image_view' in views:
self._image_view = ImageView(
scene_canvas=self,
image=self._image,
input_colourspace=self._input_colourspace,
correlate_colourspace=self._correlate_colourspace,
bgcolor=background_colour,
border_color=border_colour)
if 'diagram_view' in views:
self._diagram_view = DiagramView(
scene_canvas=self,
image=self._image,
input_colourspace=self._input_colourspace,
correlate_colourspace=self._correlate_colourspace,
bgcolor=background_colour,
border_color=border_colour)
self._views = (self._console_view, self._gamut_view, self._image_view,
self._diagram_view)
def _layout_views(self):
"""
Layout the views according to :attr:`ColourAnalysis.layout` attribute
value.
"""
self._grid = self.central_widget.add_grid()
layout = self._layout_presets.get(self._layout)
for view_preset in layout.views.values():
view = getattr(self, '{0}'.format(view_preset.view))
if view is None:
continue
self._grid.add_widget(
view,
row=view_preset.row,
col=view_preset.column,
row_span=view_preset.row_span,
col_span=view_preset.column_span)
def _create_image(self):
"""
Creates the image used by the *Diagram View* according to
:attr:`ColourAnalysis.clamp_blacks` and
:attr:`ColourAnalysis.clamp_whites` attributes values.
Returns
-------
ndarray
Image
"""
image = self._image
if self._clamp_blacks:
image = np.clip(image, 0, np.inf)
if self._clamp_whites:
image = np.clip(image, -np.inf, 1)
return image
def cycle_correlate_colourspace_action(self):
"""
Defines the slot triggered by the *cycle_correlate_colourspace* action.
Returns
-------
bool
Definition success.
"""
self._correlate_colourspace = next(self._RGB_colourspaces_cycle)
for view in self._views:
if hasattr(view, 'correlate_colourspace'):
view.correlate_colourspace = self._correlate_colourspace
return True
def cycle_reference_colourspace_action(self):
"""
Defines the slot triggered by the *cycle_reference_colourspace* action.
Returns
-------
bool
Definition success.
"""
self._reference_colourspace = next(self._reference_colourspaces_cycle)
for view in self._views:
if hasattr(view, 'reference_colourspace'):
view.reference_colourspace = self._reference_colourspace
return True
def toggle_blacks_clamp_action(self):
"""
Defines the slot triggered by the *toggle_blacks_clamp* action.
Returns
-------
bool
Definition success.
"""
self.clamp_blacks = not self.clamp_blacks
return True
def toggle_whites_clamp_action(self):
"""
Defines the slot triggered by the *toggle_whites_clamp* action.
Returns
-------
bool
Definition success.
"""
self.clamp_whites = not self.clamp_whites
return True
| {
"content_hash": "ddf51e3fc899504215095675a3e73bc4",
"timestamp": "",
"source": "github",
"line_count": 873,
"max_line_length": 79,
"avg_line_length": 28.668957617411227,
"alnum_prop": 0.536039635608119,
"repo_name": "colour-science/colour-analysis",
"id": "59ffae8f3e8b8ed816547e6a3d505a4c4a2d2233",
"size": "25075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colour_analysis/analysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "161677"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.