hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6b70da5ea5a9e6511345209e484cf5db4d2d48c
| 18,889
|
py
|
Python
|
blender_bindings/material_loader/shaders/source1_shaders/heroes_hair.py
|
anderlli0053/SourceIO
|
3c0c4839939ce698439987ac52154f89ee2f5341
|
[
"MIT"
] | null | null | null |
blender_bindings/material_loader/shaders/source1_shaders/heroes_hair.py
|
anderlli0053/SourceIO
|
3c0c4839939ce698439987ac52154f89ee2f5341
|
[
"MIT"
] | null | null | null |
blender_bindings/material_loader/shaders/source1_shaders/heroes_hair.py
|
anderlli0053/SourceIO
|
3c0c4839939ce698439987ac52154f89ee2f5341
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import Iterable
import bpy
from ...shader_base import Nodes
from ..source1_shader_base import Source1ShaderBase
class HeroesArmor(Source1ShaderBase):
SHADER: str = 'heroes_hair'
@property
def bumpmap(self):
texture_path = self._vmt.get_string('$bumpmap', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (0.5, 0.5, 1.0, 1.0))
image = self.convert_normalmap(image)
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def basetexture(self):
texture_path = self._vmt.get_string('$basetexture', None)
if texture_path is not None:
return self.load_texture_or_default(texture_path, (0.3, 0, 0.3, 1.0))
return None
@property
def hairmask(self):
texture_path = self._vmt.get_string('$hairmask', None)
if texture_path is not None:
return self.load_texture_or_default(texture_path, (0.3, 0, 0.3, 1.0))
return None
@property
def selfillummask(self):
texture_path = self._vmt.get_string('$selfillummask', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (0.0, 0.0, 0.0, 1.0))
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def phongexponenttexture(self):
texture_path = self._vmt.get_string('$phongexponenttexture', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (0.5, 0.0, 0.0, 1.0))
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def color2(self):
color_value, value_type = self._vmt.get_vector('$color2', None)
if color_value is None:
return None
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
return self.ensure_length(color_value, 4, 1.0)
@property
def color(self):
color_value, value_type = self._vmt.get_vector('$color', None)
if color_value is None:
return None
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
return self.ensure_length(color_value, 4, 1.0)
@property
def translucent(self):
return self._vmt.get_int('$translucent', 0) == 1
@property
def alphatest(self):
return self._vmt.get_int('$alphatest', 0) == 1
@property
def alphatestreference(self):
return self._vmt.get_float('$alphatestreference', 0.5)
@property
def allowalphatocoverage(self):
return self._vmt.get_int('$allowalphatocoverage', 0) == 1
@property
def additive(self):
return self._vmt.get_int('$additive', 0) == 1
@property
def phong(self):
return self._vmt.get_int('$phong_enable', 0) == 1
@property
def selfillum(self):
return self._vmt.get_int('$selfillum', 0) == 1
@property
def basealphaenvmapmask(self):
return self._vmt.get_int('$basealphaenvmapmask', 1) == 1
@property
def basemapalphaphongmask(self):
return self._vmt.get_int('$basemapalphaphongmask', 0) == 1
@property
def normalmapalphaphongmask(self):
return self._vmt.get_int('$normalmapalphaphongmask', 1) == 1
@property
def normalmapalphaenvmapmask(self):
return self._vmt.get_int('$normalmapalphaenvmapmask', 0) == 1
@property
def envmap(self):
return self._vmt.get_string('$envmap', None) is not None
@property
def envmapmask(self):
texture_path = self._vmt.get_string('$envmapmask', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (1, 1, 1, 1.0))
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def envmaptint(self):
color_value, value_type = self._vmt.get_vector('$envmaptint', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
return self.ensure_length(color_value, 4, 1.0)
@property
def phongfresnelranges(self):
value, value_type = self._vmt.get_vector('$phongfresnelranges', None)
if value is not None:
divider = 255 if value_type is int else 1
value = list(map(lambda a: a / divider, value))
return self.ensure_length(value, 3, 0.1)
return None
@property
def phongexponent(self):
value = self._vmt.get_float('$phongexponent', None)
return value
@property
def phongboost(self):
value = self._vmt.get_float('$phongboost', 1)
return value
@property
def phongalbedotint(self):
return self._vmt.get_int('$phongalbedotint', 1) == 1
@property
def phongtint(self):
color_value, value_type = self._vmt.get_vector('$phongtint', None)
if color_value is None:
return None
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
return self.ensure_length(color_value, 4, 1.0)
def create_nodes(self, material_name):
if super().create_nodes(material_name) in ['UNKNOWN', 'LOADED']:
return
if self._vmt.get('proxies', None):
proxies = self._vmt.get('proxies')
for proxy_name, proxy_data in proxies.items():
if proxy_name == 'selectfirstifnonzero':
result_var = proxy_data.get('resultvar')
src1_var = proxy_data.get('srcvar1')
src2_var = proxy_data.get('srcvar2')
src1_value, src1_type = self._vmt.get_vector(src1_var, [0])
if all([val > 0 for val in src1_value]):
self._vmt[result_var] = self._vmt[src1_var]
else:
self._vmt[result_var] = self._vmt[src2_var]
material_output = self.create_node(Nodes.ShaderNodeOutputMaterial)
material_output.location = [250, 0]
parentnode = material_output
if self.alphatest or self.translucent:
if self.translucent:
self.bpy_material.blend_method = 'BLEND'
else:
self.bpy_material.blend_method = 'HASHED'
self.bpy_material.shadow_method = 'HASHED'
if self.use_bvlg_status:
self.do_arrange = False
if self.alphatest or self.translucent:
alphatest_node = self.create_node_group("$alphatest", [250, 0])
parentnode = alphatest_node
material_output.location = [450, 0]
alphatest_node.inputs['$alphatestreference [value]'].default_value = self.alphatestreference
alphatest_node.inputs['$allowalphatocoverage [boolean]'].default_value = self.allowalphatocoverage
self.connect_nodes(alphatest_node.outputs['BSDF'], material_output.inputs['Surface'])
group_node = self.create_node_group("VertexLitGeneric", [-200, 0])
self.connect_nodes(group_node.outputs['BSDF'], parentnode.inputs[0])
if self.basetexture:
basetexture_node = self.create_and_connect_texture_node(self.basetexture,
group_node.inputs['$basetexture [texture]'],
name='$basetexture')
basetexture_node.location = [-800, 0]
if self.basealphaenvmapmask:
self.connect_nodes(basetexture_node.outputs['Alpha'],
group_node.inputs['envmapmask [basemap texture alpha]'])
if self.basemapalphaphongmask:
self.connect_nodes(basetexture_node.outputs['Alpha'],
group_node.inputs['phongmask [bumpmap texture alpha]'])
if self.alphatest:
self.connect_nodes(basetexture_node.outputs['Alpha'],
alphatest_node.inputs['Alpha [basemap texture alpha]'])
if self.color or self.color2:
group_node.inputs['$color2 [RGB field]'].default_value = self.color or self.color2
if self.envmap:
group_node.inputs['$envmap [boolean]'].default_value = 1
if self.envmaptint:
group_node.inputs['$envmaptint [RGB field]'].default_value = self.envmaptint
if self.bumpmap:
bumpmap_node = self.create_and_connect_texture_node(self.bumpmap,
group_node.inputs['$bumpmap [texture]'],
name='$bumpmap')
bumpmap_node.location = [-800, -220]
if self.normalmapalphaenvmapmask:
self.connect_nodes(bumpmap_node.outputs['Alpha'],
group_node.inputs['envmapmask [basemap texture alpha]'])
elif self.normalmapalphaphongmask and not self.basemapalphaphongmask:
self.connect_nodes(bumpmap_node.outputs['Alpha'],
group_node.inputs['phongmask [bumpmap texture alpha]'])
if self.phong:
group_node.inputs['$phong [bool]'].default_value = 1
if self.phongboost:
group_node.inputs['$phongboost [value]'].default_value = self.phongboost
if self.phongexponent:
group_node.inputs['$phongexponent [value]'].default_value = self.phongexponent
elif self.phongexponenttexture:
phongexponent_group_node = self.create_node_group('$phongexponenttexture splitter', [-500, -300])
self.connect_nodes(phongexponent_group_node.outputs['$phongexponent [value]'],
group_node.inputs['$phongexponent [value]'])
self.connect_nodes(phongexponent_group_node.outputs['rimlight mask'],
group_node.inputs['rimlight mask'])
phongexponenttexture_node = self.create_and_connect_texture_node(self.phongexponenttexture,
phongexponent_group_node.inputs[
'$phongexponenttexture [texture]'],
phongexponent_group_node.inputs[
'alpha'],
name='$phongexponenttexture')
phongexponenttexture_node.location = [-800, -470]
if self.phongalbedotint is not None and not self.phongtint:
phongexponent_group_node.location = [-550, -300]
phongalbedo_node = self.create_node_group("$phongalbedotint", [-350, -345])
self.connect_nodes(phongexponent_group_node.outputs['phongalbedotint amount'],
phongalbedo_node.inputs['phongalbedotint amount'])
self.connect_nodes(phongalbedo_node.outputs['$phongtint [RGB field]'],
group_node.inputs['$phongtint [RGB field]'])
if self.basetexture is not None:
self.connect_nodes(basetexture_node.outputs['Color'],
phongalbedo_node.inputs['$basetexture [texture]'])
else:
group_node.inputs['$phongexponent [value]'].default_value = 10
if self.phongtint is not None:
group_node.inputs['$phongtint [RGB field]'].default_value = self.phongtint
if self.phongfresnelranges:
group_node.inputs['$phongfresnelranges [value field]'].default_value = self.phongfresnelranges
if self.selfillum:
group_node.inputs['$selfillum [bool]'].default_value = 1
if self.selfillummask:
selfillummask_node = self.create_and_connect_texture_node(self.selfillummask, group_node.inputs[
'$selfillummask [texture alpha]'])
selfillummask_node.location = [-500, -510]
elif self.basetexture is not None:
self.connect_nodes(basetexture_node.outputs['Alpha'],
group_node.inputs['$selfillummask [texture alpha]'])
else:
shader = self.create_node(Nodes.ShaderNodeBsdfPrincipled, self.SHADER)
self.connect_nodes(shader.outputs['BSDF'], material_output.inputs['Surface'])
basetexture = self.basetexture
if basetexture:
basetexture_node = self.create_node(Nodes.ShaderNodeTexImage, '$basetexture')
basetexture_node.image = basetexture
basetexture_node.id_data.nodes.active = basetexture_node
if self.color or self.color2:
color_mix = self.create_node(Nodes.ShaderNodeMixRGB)
color_mix.blend_type = 'MULTIPLY'
self.connect_nodes(basetexture_node.outputs['Color'], color_mix.inputs['Color1'])
color_mix.inputs['Color2'].default_value = (self.color or self.color2)
color_mix.inputs['Fac'].default_value = 1.0
self.connect_nodes(color_mix.outputs['Color'], shader.inputs['Base Color'])
else:
self.connect_nodes(basetexture_node.outputs['Color'], shader.inputs['Base Color'])
if self.translucent or self.alphatest:
self.connect_nodes(basetexture_node.outputs['Alpha'], shader.inputs['Alpha'])
if self.additive:
basetexture_invert_node = self.create_node(Nodes.ShaderNodeInvert)
basetexture_additive_mix_node = self.create_node(Nodes.ShaderNodeMixRGB)
self.insert_node(basetexture_node.outputs['Color'], basetexture_additive_mix_node.inputs['Color1'],
basetexture_additive_mix_node.outputs['Color'])
basetexture_additive_mix_node.inputs['Color2'].default_value = (1.0, 1.0, 1.0, 1.0)
self.connect_nodes(basetexture_node.outputs['Color'], basetexture_invert_node.inputs['Color'])
self.connect_nodes(basetexture_invert_node.outputs['Color'], shader.inputs['Transmission'])
self.connect_nodes(basetexture_invert_node.outputs['Color'],
basetexture_additive_mix_node.inputs['Fac'])
if self.hairmask:
self.create_texture_node(self.hairmask, 'hairmask')
bumpmap = self.bumpmap
if bumpmap:
bumpmap_node = self.create_node(Nodes.ShaderNodeTexImage, '$bumpmap')
bumpmap_node.image = bumpmap
normalmap_node = self.create_node(Nodes.ShaderNodeNormalMap)
self.connect_nodes(bumpmap_node.outputs['Color'], normalmap_node.inputs['Color'])
self.connect_nodes(normalmap_node.outputs['Normal'], shader.inputs['Normal'])
if self.selfillum:
selfillummask = self.selfillummask
basetexture_node = self.get_node('$basetexture')
if selfillummask is not None:
selfillummask_node = self.create_node(Nodes.ShaderNodeTexImage, '$selfillummask')
selfillummask_node.image = selfillummask
if 'Emission Strength' in shader.inputs:
self.connect_nodes(selfillummask_node.outputs['Color'], shader.inputs['Emission Strength'])
else:
if 'Emission Strength' in shader.inputs:
self.connect_nodes(basetexture_node.outputs['Alpha'], shader.inputs['Emission Strength'])
self.connect_nodes(basetexture_node.outputs['Color'], shader.inputs['Emission'])
if not self.phong:
shader.inputs['Specular'].default_value = 0
elif self.phongboost is not None:
shader.inputs['Specular'].default_value = self.clamp_value(self.phongboost / 64)
phongexponenttexture = self.phongexponenttexture
if self.phongexponent is not None and phongexponenttexture is None:
shader.inputs['Roughness'].default_value = self.clamp_value(self.phongexponent / 256)
elif self.phongexponenttexture is not None:
phongexponenttexture_node = self.create_node(Nodes.ShaderNodeTexImage, '$phongexponenttexture')
phongexponenttexture_node.image = phongexponenttexture
phongexponenttexture_split_node = self.create_node(Nodes.ShaderNodeSeparateRGB)
self.connect_nodes(phongexponenttexture_node.outputs['Color'],
phongexponenttexture_split_node.inputs['Image'])
phongexponenttexture_r_invert_node = self.create_node(Nodes.ShaderNodeInvert)
self.connect_nodes(phongexponenttexture_split_node.outputs['R'],
phongexponenttexture_r_invert_node.inputs['Color'])
self.connect_nodes(phongexponenttexture_split_node.outputs['G'],
shader.inputs['Metallic'])
self.connect_nodes(phongexponenttexture_r_invert_node.outputs['Color'], shader.inputs['Roughness'])
| 49.190104
| 124
| 0.586373
|
75402423b596bfef433802a6f7c11cdc78954eed
| 4,426
|
py
|
Python
|
yatube/posts/views.py
|
kotofey97/yatube_project_finale
|
01a11edd771035f961eecfd8dbf0dc004cee537d
|
[
"MIT"
] | 3
|
2021-10-07T14:11:32.000Z
|
2021-12-07T12:23:53.000Z
|
yatube/posts/views.py
|
kotofey97/yatube_project_finale
|
01a11edd771035f961eecfd8dbf0dc004cee537d
|
[
"MIT"
] | null | null | null |
yatube/posts/views.py
|
kotofey97/yatube_project_finale
|
01a11edd771035f961eecfd8dbf0dc004cee537d
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from .forms import CommentForm, PostForm
from .models import Follow, Group, Post, User
PER_PAGE = 10
def index(request):
post_list = Post.objects.all()
paginator = Paginator(post_list, PER_PAGE)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
template = 'posts/index.html'
context = {
'page_obj': page_obj,
}
return render(request, template, context)
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
post_list = group.posts.all()
paginator = Paginator(post_list, PER_PAGE)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
template = 'posts/group_list.html'
context = {
'group': group,
'page_obj': page_obj,
}
return render(request, template, context)
def profile(request, username):
author = get_object_or_404(User, username=username)
post_list = Post.objects.filter(author__username=username)
paginator = Paginator(post_list, PER_PAGE)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
following = Follow.objects.filter(user__username=request.user,
author=author)
context = {
'author': author,
'page_obj': page_obj,
'following': following,
}
return render(request, 'posts/profile.html', context)
def post_detail(request, post_id):
post = get_object_or_404(Post, id=post_id)
comment_form = CommentForm(request.POST or None)
comments = post.comments.all()
author = post.author
following = Follow.objects.filter(user__username=request.user,
author=author)
context = {
'author': author,
'post': post,
'form': comment_form,
'following': following,
'comments': comments,
}
return render(request, 'posts/post_detail.html', context)
@login_required
def post_create(request):
form = PostForm(request.POST or None, files=request.FILES or None)
if request.method == "POST" and form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('posts:profile', username=post.author)
context = {
'form': form
}
return render(request, 'posts/create_post.html', context)
@login_required
def post_edit(request, post_id):
post = get_object_or_404(Post, pk=post_id)
if request.user != post.author:
return redirect('posts:post_detail', post_id=post_id)
form = PostForm(
request.POST or None,
files=request.FILES or None,
instance=post)
if request.method == "POST" and form.is_valid():
form.save()
return redirect('posts:post_detail', post_id=post_id)
context = {
'form': form,
'is_edit': True,
}
return render(request, 'posts/create_post.html', context)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, pk=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def follow_index(request):
post_list = Post.objects.filter(author__following__user=request.user)
paginator = Paginator(post_list, PER_PAGE)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'page_obj': page_obj,
}
return render(request, 'posts/follow.html', context)
@login_required
def profile_follow(request, username):
user = get_object_or_404(User, username=username)
Follow.objects.get_or_create(user=request.user, author=user)
return redirect('posts:profile', username)
@login_required
def profile_unfollow(request, username):
user = get_object_or_404(User, username=username)
follow, created = Follow.objects.get_or_create(
user=request.user, author=user)
follow.delete()
return redirect('posts:profile', username)
| 30.736111
| 73
| 0.676005
|
b41b794f2b4f981437188ede73fb8dbc1cf341cd
| 1,758
|
py
|
Python
|
localstack/services/secretsmanager/secretsmanager_listener.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | 31,928
|
2017-07-04T03:06:28.000Z
|
2022-03-31T22:33:27.000Z
|
localstack/services/secretsmanager/secretsmanager_listener.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | 5,216
|
2017-07-04T11:45:41.000Z
|
2022-03-31T22:02:14.000Z
|
localstack/services/secretsmanager/secretsmanager_listener.py
|
lambdafunc/localstack
|
6285b43bec57435a2179310a8de2af8d8d8cf8dd
|
[
"Apache-2.0"
] | 3,056
|
2017-06-05T13:29:11.000Z
|
2022-03-31T20:54:43.000Z
|
import json
import logging
from requests.models import Request
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_responses import MessageConversion
from localstack.utils.common import to_str
from localstack.utils.persistence import PersistingProxyListener
LOG = logging.getLogger(__name__)
class ProxyListenerSecretsManager(PersistingProxyListener):
def api_name(self):
return "secretsmanager"
def forward_request(self, method, path, data, headers):
data = json.loads(to_str(data or "{}"))
secret_id = data.get("SecretId") or ""
if ":" in secret_id:
parts = secret_id.split(":")
if parts[3] != aws_stack.get_region():
LOG.info(
'Unexpected request region %s for secret "%s"',
aws_stack.get_region(),
secret_id,
)
# secret ARN ends with "-<randomId>" which we remove in the request for upstream compatibility
# if the full arn is being sent then we remove the string in the end
if parts[-1][-7] == "-":
data["SecretId"] = parts[-1][: len(parts[-1]) - 7]
elif parts[-1][-1] != "-":
data["SecretId"] = data["SecretId"] + "-"
data = json.dumps(data)
return Request(data=data, headers=headers, method=method)
return True
def return_response(self, method, path, data, headers, response):
super(ProxyListenerSecretsManager, self).return_response(
method, path, data, headers, response
)
if response.content:
return MessageConversion.fix_account_id(response)
UPDATE_SECRETSMANAGER = ProxyListenerSecretsManager()
| 35.877551
| 106
| 0.624005
|
c977c3c23897ff6268eb207936bb3dfe5f62c1da
| 796
|
py
|
Python
|
migrations/versions/0c4d3d7722d9_.py
|
CapitalD/taplist
|
08a113f721f0cba2e33aa6cee00c97db1af24b4e
|
[
"MIT"
] | null | null | null |
migrations/versions/0c4d3d7722d9_.py
|
CapitalD/taplist
|
08a113f721f0cba2e33aa6cee00c97db1af24b4e
|
[
"MIT"
] | 34
|
2017-03-30T22:14:02.000Z
|
2017-09-13T03:18:47.000Z
|
migrations/versions/0c4d3d7722d9_.py
|
CapitalD/taplist
|
08a113f721f0cba2e33aa6cee00c97db1af24b4e
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 0c4d3d7722d9
Revises: 602fa0cf66f9
Create Date: 2017-08-23 23:28:01.689046
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0c4d3d7722d9'
down_revision = '602fa0cf66f9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('location', sa.Column('short_name', sa.String(length=255), nullable=True))
op.create_unique_constraint(None, 'location', ['short_name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'location', type_='unique')
op.drop_column('location', 'short_name')
# ### end Alembic commands ###
| 25.677419
| 92
| 0.701005
|
181926489308f2f48beee4b3b398ba427a2197f7
| 731
|
py
|
Python
|
neutron/version.py
|
SnabbCo/neutron
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
[
"Apache-2.0"
] | 1
|
2016-04-23T21:33:31.000Z
|
2016-04-23T21:33:31.000Z
|
neutron/version.py
|
SnabbCo/neutron
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
[
"Apache-2.0"
] | null | null | null |
neutron/version.py
|
SnabbCo/neutron
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
[
"Apache-2.0"
] | 4
|
2015-04-14T10:06:51.000Z
|
2019-10-02T01:28:34.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
version_info = pbr.version.VersionInfo('neutron')
| 36.55
| 78
| 0.735978
|
26aa5ab25b1f17baacf04ac7b8d6876f3b300fd7
| 5,765
|
py
|
Python
|
env/Lib/site-packages/pandas_datareader/tests/test_quandl.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/pandas_datareader/tests/test_quandl.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/pandas_datareader/tests/test_quandl.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import pytest
from pandas_datareader import data as web
from pandas_datareader.compat import assert_frame_equal
TEST_API_KEY = os.getenv("QUANDL_API_KEY")
# Ensure blank TEST_API_KEY not used in pull request
TEST_API_KEY = None if not TEST_API_KEY else TEST_API_KEY
pytestmark = [
pytest.mark.requires_api_key,
pytest.mark.quandl,
pytest.mark.skipif(TEST_API_KEY is None, reason="QUANDL_API_KEY not set"),
]
class TestQuandl(object):
# we test data from 10 years back where possible, 2 years otherwise, or...
start10 = "2007-01-01" # over ten years back
end10 = "2007-01-05"
day10 = "2007-01-04"
start2 = "2015-01-01" # over two years back
end2 = "2015-01-05"
day2 = "2015-01-02"
def check_headers(self, df, expected_cols):
expected_cols = frozenset(expected_cols)
act_cols = frozenset(df.columns.tolist())
assert expected_cols == act_cols, "unexpected cols: " + str(act_cols)
def test_db_wiki_us(self):
df = web.DataReader(
"F", "quandl", self.start10, self.end10, api_key=TEST_API_KEY
)
self.check_headers(
df,
[
"Open",
"High",
"Low",
"Close",
"Volume",
"ExDividend",
"SplitRatio",
"AdjOpen",
"AdjHigh",
"AdjLow",
"AdjClose",
"AdjVolume",
],
)
assert df.Close.at[pd.to_datetime(self.day10)] == 7.70
def test_db_fse_frankfurt(self):
# ALV_X: Allianz SE
df = web.DataReader(
"FSE/ALV_X", "quandl", self.start10, self.end10, api_key=TEST_API_KEY
)
self.check_headers(
df,
[
"Open",
"High",
"Low",
"Close",
"Change",
"TradedVolume",
"Turnover",
"LastPriceoftheDay",
"DailyTradedUnits",
"DailyTurnover",
],
)
assert df.Close.at[pd.to_datetime(self.day10)] == 159.45
def test_fse_eon(self):
# EON_X: E.on Se
df = web.DataReader(
"FSE/EON_X", "quandl", self.start2, self.end2, api_key=TEST_API_KEY
)
self.check_headers(
df,
[
"Low",
"LastPriceoftheDay",
"Turnover",
"Open",
"DailyTurnover",
"TradedVolume",
"Change",
"DailyTradedUnits",
"Close",
"High",
],
)
assert df.Close.at[pd.to_datetime(self.day2)] == 14.03
def test_db_euronext_be_fr_nl_pt(self):
# FP: Total SA
# as of 2017-06-11, some datasets end a few months after their start,
# e.g. ALVD, BASD
df = web.DataReader(
"EURONEXT/FP", "quandl", self.start2, self.end2, api_key=TEST_API_KEY
)
self.check_headers(df, ["Open", "High", "Low", "Last", "Turnover", "Volume"])
assert df.Last.at[pd.to_datetime(self.day2)] == 42.525
df2 = web.DataReader("FP.FR", "quandl", self.start2, self.end2)
assert (df.Last == df2.Last).all()
def test_hk_hsbc_uk(self):
# 00005: HSBC
df = web.DataReader(
"HKEX/00005", "quandl", self.start2, self.end2, api_key=TEST_API_KEY
)
self.check_headers(
df,
[
"NominalPrice",
"NetChange",
"Change",
"Bid",
"Ask",
"PEx",
"High",
"Low",
"PreviousClose",
"ShareVolume000",
"Turnover000",
"LotSize",
],
)
# as of 2017-06-11, Price == LastClose, all others are NaN
assert df.NominalPrice.at[pd.to_datetime(self.day2)] == 74.0
def test_db_nse_in(self):
# TCS: Tata Consutancy Services
df = web.DataReader(
"NSE/TCS", "quandl", self.start10, self.end10, api_key=TEST_API_KEY
)
self.check_headers(
df,
[
"Open",
"High",
"Low",
"Last",
"Close",
"TotalTradeQuantity",
"TurnoverLacs",
],
)
assert df.Close.at[pd.to_datetime(self.day10)] == 1259.05
def test_db_tse_jp(self):
# TSE/6758: Sony Corp.
df = web.DataReader(
"TSE/6758", "quandl", self.start10, self.end10, api_key=TEST_API_KEY
)
self.check_headers(df, ["Open", "High", "Low", "Close", "Volume"])
assert df.Close.at[pd.to_datetime(self.day10)] == 5190.0
df2 = web.get_data_quandl(
"TSE/6758", self.start10, self.end10, api_key=TEST_API_KEY
)
assert_frame_equal(df, df2)
def test_db_hkex_cn(self):
# HKEX/00941: China Mobile
df = web.DataReader(
"HKEX/00941", "quandl", self.start2, self.end2, api_key=TEST_API_KEY
)
self.check_headers(
df,
[
"NominalPrice",
"NetChange",
"Change",
"Bid",
"Ask",
"PEx",
"High",
"Low",
"PreviousClose",
"ShareVolume000",
"Turnover000",
"LotSize",
],
)
assert df.High.at[pd.to_datetime(self.day2)] == 91.9
| 29.870466
| 85
| 0.485343
|
798fc879f847c774d6bceb4e144f59b443a5b8ec
| 1,825
|
py
|
Python
|
src/arch/x86/X86NativeTrace.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 135
|
2016-10-21T03:31:49.000Z
|
2022-03-25T01:22:20.000Z
|
src/arch/x86/X86NativeTrace.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 35
|
2017-03-10T17:57:46.000Z
|
2022-02-18T17:34:16.000Z
|
src/arch/x86/X86NativeTrace.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 48
|
2016-12-08T12:03:13.000Z
|
2022-02-16T09:16:13.000Z
|
# Copyright (c) 2009 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
from m5.params import *
from m5.objects.CPUTracers import NativeTrace
class X86NativeTrace(NativeTrace):
type = 'X86NativeTrace'
cxx_class = 'Trace::X86NativeTrace'
cxx_header = 'arch/x86/nativetrace.hh'
| 48.026316
| 72
| 0.790685
|
0ca87652837f1df905884621ab95c9489b490585
| 416
|
py
|
Python
|
utils.py
|
satyenrajpal/yeet-sparsh
|
a39c7b23bb68ff60f3536b91f01238ff7f7500e9
|
[
"MIT"
] | null | null | null |
utils.py
|
satyenrajpal/yeet-sparsh
|
a39c7b23bb68ff60f3536b91f01238ff7f7500e9
|
[
"MIT"
] | 1
|
2020-10-26T22:05:24.000Z
|
2020-11-09T01:06:41.000Z
|
utils.py
|
satyenrajpal/yeet-sparsh
|
a39c7b23bb68ff60f3536b91f01238ff7f7500e9
|
[
"MIT"
] | null | null | null |
import numpy as np
def average(frame):
return np.average(frame)
# Average tiles in row major format
def average_tiles(frame,coords, max_val, scale = 1):
avgs = []
for section in coords:
for top_left_x, top_left_y, bottom_right_x, bottom_right_y in section:
avgs.append(min(average(frame[top_left_x:bottom_right_x, top_left_y:bottom_right_y]) * scale /max_val, 1.0))
return avgs
| 32
| 120
| 0.713942
|
6b5203f80d18674044dc835e6474dc5c2e2cf143
| 6,166
|
py
|
Python
|
lib/tests/test_wallet_vertical.py
|
GetAywa/electrum-aywa
|
07a548bd14cdf563da49c1f1e52644b833ca972e
|
[
"MIT"
] | null | null | null |
lib/tests/test_wallet_vertical.py
|
GetAywa/electrum-aywa
|
07a548bd14cdf563da49c1f1e52644b833ca972e
|
[
"MIT"
] | null | null | null |
lib/tests/test_wallet_vertical.py
|
GetAywa/electrum-aywa
|
07a548bd14cdf563da49c1f1e52644b833ca972e
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import mock
import lib.bitcoin as bitcoin
import lib.keystore as keystore
import lib.storage as storage
import lib.wallet as wallet
# TODO: 2fa
class TestWalletKeystoreAddressIntegrity(unittest.TestCase):
gap_limit = 1 # make tests run faster
def _check_seeded_keystore_sanity(self, ks):
self.assertTrue (ks.is_deterministic())
self.assertFalse(ks.is_watching_only())
self.assertFalse(ks.can_import())
self.assertTrue (ks.has_seed())
def _check_xpub_keystore_sanity(self, ks):
self.assertTrue (ks.is_deterministic())
self.assertTrue (ks.is_watching_only())
self.assertFalse(ks.can_import())
self.assertFalse(ks.has_seed())
def _create_standard_wallet(self, ks):
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
store.put('keystore', ks.dump())
store.put('gap_limit', self.gap_limit)
w = wallet.Standard_Wallet(store)
w.synchronize()
return w
def _create_multisig_wallet(self, ks1, ks2):
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
multisig_type = "%dof%d" % (2, 2)
store.put('wallet_type', multisig_type)
store.put('x%d/' % 1, ks1.dump())
store.put('x%d/' % 2, ks2.dump())
store.put('gap_limit', self.gap_limit)
w = wallet.Multisig_Wallet(store)
w.synchronize()
return w
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_standard(self, mock_write):
seed_words = 'cycle rocket west magnet parrot shuffle foot correct salt library feed song'
self.assertEqual(bitcoin.seed_type(seed_words), 'standard')
ks = keystore.from_seed(seed_words, '', False)
self._check_seeded_keystore_sanity(ks)
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52CwBdDWroaZf8U')
w = self._create_standard_wallet(ks)
self.assertEqual(w.get_receiving_addresses()[0], 'Xx4bj9RuWdhrnmn5vGjKrTHqQHcy99RsGV')
self.assertEqual(w.get_change_addresses()[0], 'Xu8Vpo1b81a6zCC574LXjEETvV4Wq58z24')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_old(self, mock_write):
seed_words = 'powerful random nobody notice nothing important anyway look away hidden message over'
self.assertEqual(bitcoin.seed_type(seed_words), 'old')
ks = keystore.from_seed(seed_words, '', False)
self._check_seeded_keystore_sanity(ks)
self.assertTrue(isinstance(ks, keystore.Old_KeyStore))
self.assertEqual(ks.mpk, 'e9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b3')
w = self._create_standard_wallet(ks)
self.assertEqual(w.get_receiving_addresses()[0], 'Xpz54Rncf6aC9od2cE64teK5okqgWxVpTk')
self.assertEqual(w.get_change_addresses()[0], 'Xu7Ly4vzExW9r4ijM79KWm1icCTTBoVCFE')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip44_standard(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/44'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xpub, 'xpub6DFh1smUsyqmYD4obDX6ngaxhd53Zx7aeFjoobebm7vbkT6f9awJWFuGzBT9FQJEWFBL7UyhMXtYzRcwDuVbcxtv9Ce2W9eMm4KXLdvdbjv')
w = self._create_standard_wallet(ks)
self.assertEqual(w.get_receiving_addresses()[0], 'XgQx46PwWrSDcZnU9VWiC74CoUw9ibZm8n')
self.assertEqual(w.get_change_addresses()[0], 'XqwvRkJQdt2fgShtC68vjAgrEumkqA7qcK')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_multisig_seed_standard(self, mock_write):
seed_words = 'blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure'
self.assertEqual(bitcoin.seed_type(seed_words), 'standard')
ks1 = keystore.from_seed(seed_words, '', True)
self._check_seeded_keystore_sanity(ks1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xpub, 'xpub661MyMwAqRbcGNEPu3aJQqXTydqR9t49Tkwb4Esrj112kw8xLthv8uybxvaki4Ygt9xiwZUQGeFTG7T2TUzR3eA4Zp3aq5RXsABHFBUrq4c')
ks2 = keystore.from_xpub('xpub661MyMwAqRbcGfCPEkkyo5WmcrhTq8mi3xuBS7VEZ3LYvsgY1cCFDbenT33bdD12axvrmXhuX3xkAbKci3yZY9ZEk8vhLic7KNhLjqdh5ec')
self._check_xpub_keystore_sanity(ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2)
self.assertEqual(w.get_receiving_addresses()[0], '7TTLsc2LVWUd6ZnkhHFGJb3dnZMuSQooiu')
self.assertEqual(w.get_change_addresses()[0], '7XF9mRa2fUHynUGGLyWzpem8DEYDzN7Bew')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_multisig_seed_bip45_standard(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks1 = keystore.from_bip39_seed(seed_words, '', "m/45'/0")
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xpub, 'xpub69xafV4YxC6o8Yiga5EiGLAtqR7rgNgNUGiYgw3S9g9pp6XYUne1KxdcfYtxwmA3eBrzMFuYcNQKfqsXCygCo4GxQFHfywxpUbKNfYvGJka')
ks2 = keystore.from_xpub('xpub6Bco9vrgo8rNUSi8Bjomn8xLA41DwPXeuPcgJamNRhTTyGVHsp8fZXaGzp9ypHoei16J6X3pumMAP1u3Dy4jTSWjm4GZowL7Dcn9u4uZC9W')
self._check_xpub_keystore_sanity(ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2)
self.assertEqual(w.get_receiving_addresses()[0], '7hmMoMUPGKPuCnxK1Yz4wawUZ6PinbtLHj')
self.assertEqual(w.get_change_addresses()[0], '7SRcVV8vWMqMPLLy9jUGJTBvJ6fpavXGvy')
| 46.360902
| 164
| 0.746027
|
8377659567809ecc3b87166715a255799a9e6378
| 1,536
|
py
|
Python
|
examples/multiple_files.py
|
vBLFTePebWNi6c/Flask-Shell2HTTP
|
369cb6a605eca9434e4b6d63f2533dce995dde47
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multiple_files.py
|
vBLFTePebWNi6c/Flask-Shell2HTTP
|
369cb6a605eca9434e4b6d63f2533dce995dde47
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multiple_files.py
|
vBLFTePebWNi6c/Flask-Shell2HTTP
|
369cb6a605eca9434e4b6d63f2533dce995dde47
|
[
"BSD-3-Clause"
] | null | null | null |
# system imports
import requests
import tempfile
import json
# web imports
from flask import Flask
from flask_executor import Executor
from flask_shell2http import Shell2HTTP
# Flask application instance
app = Flask(__name__)
# application factory
executor = Executor()
executor.init_app(app)
shell2http = Shell2HTTP(base_url_prefix="/cmd/")
shell2http.init_app(app, executor)
ENDPOINT = "catthisformeplease"
shell2http.register_command(endpoint=ENDPOINT, command_name="strings")
@app.route("/")
def test():
"""
Prefix each filename with @ in arguments.\n
Files are stored in temporary directories which are flushed on command completion.\n
The final executed command becomes:
```bash
$ strings /tmp/inputfile /tmp/someotherfile
```
"""
url = f"http://localhost:4000/cmd/{ENDPOINT}"
# create and read dummy data from temporary files
with tempfile.TemporaryFile() as fp:
fp.write(b"Hello world!")
fp.seek(0)
f = fp.read()
# they key should be `request_json` only.
form_data = {"args": ["@inputfile", "@someotherfile"]}
req_data = {"request_json": json.dumps(form_data)}
req_files = {"inputfile": f, "someotherfile": f}
resp = requests.post(url=url, files=req_files, data=req_data)
resp_data = resp.json()
print(resp_data)
key = resp_data["key"]
if key:
report = requests.get(f"{url}?key={key}")
return report.json()
return resp_data
# Application Runner
if __name__ == "__main__":
app.run(port=4000)
| 26.482759
| 88
| 0.690755
|
68cbb7e0ed4271f18b5db9e30135fcd547736435
| 25,025
|
py
|
Python
|
DeepLearning UK Lottery -Euro-Hotpick-Latest.py
|
chad-38/EuroHotpicks_Prediction
|
d4ae18ef7e08e14e8bc6e360f46edfa5653f11e7
|
[
"MIT"
] | 1
|
2021-02-11T20:28:31.000Z
|
2021-02-11T20:28:31.000Z
|
DeepLearning UK Lottery -Euro-Hotpick-Latest.py
|
chad-38/EuroHotpicks_Prediction
|
d4ae18ef7e08e14e8bc6e360f46edfa5653f11e7
|
[
"MIT"
] | null | null | null |
DeepLearning UK Lottery -Euro-Hotpick-Latest.py
|
chad-38/EuroHotpicks_Prediction
|
d4ae18ef7e08e14e8bc6e360f46edfa5653f11e7
|
[
"MIT"
] | 1
|
2021-06-25T15:49:35.000Z
|
2021-06-25T15:49:35.000Z
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
# -*- coding: utf-8 -*-
"""
Created on 10th Feb 2021
@author: Chaid Daud
"""
#importing libraries
import tensorflow as tf #tensorflow lib for defining the architecture of neural network
import pandas as pd #pandas lib we will use to load the dataset from excel (.csv) file
import numpy as np # numpy array to deal with arrays
import random # random lib to generate random numbers
#%%
'''
Create input data set according to output numbers
'''
numbers = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
]
#%%
Data = pd.read_csv('hotpic.csv') #read Data from the file
'''
separate the five output value into five different variable
so that we can pass it while training out machine learning model
'''
num1Output = Data['Ball 1']
num2Output = Data['Ball 2']
num3Output = Data['Ball 3']
num4Output = Data['Ball 4']
num5Output = Data['Ball 5']
numbers = np.array(numbers)
numbers.shape # checking the shape of out training data set
#%%
'''
define the NN architecture for number 1
we have 5 layers in this architecture - we use 64, 128, 256, 512 and 60
neurons in our layers respectively, we use relu as aa activation finction in our all layers except last layer.
in last layer we use softmax activation function for classification
we use adam as an optimizer with learning rate of 0.001
'''
num1 = tf.keras.Sequential();
num1.add(tf.keras.layers.Dense(units=64, activation=tf.nn.relu, input_dim=50))
num1.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu))
num1.add(tf.keras.layers.Dense(units=256, activation=tf.nn.relu))
num1.add(tf.keras.layers.Dense(units=512, activation=tf.nn.relu))
num1.add(tf.keras.layers.Dense(units=60, activation=tf.nn.softmax))
num1.compile(loss=tf.keras.metrics.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(0.0005),metrics=['accuracy'])
num1.fit(numbers,num1Output, epochs=10000)
#%%
'''
define the NN architecture for number 2
we have 5 layers in this architecture - we use 64, 128, 256, 512 and 60
neurons in our layers respectively, we use relu as aa activation finction in our all layers except last layer.
in last layer we use softmax activation function for classification
we use adam as an optimizer with learning rate of 0.001
'''
num2 = tf.keras.Sequential();
num2.add(tf.keras.layers.Dense(units=64, activation=tf.nn.relu, input_dim=50))
num2.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu))
num2.add(tf.keras.layers.Dense(units=256, activation=tf.nn.relu))
num2.add(tf.keras.layers.Dense(units=512, activation=tf.nn.relu))
num2.add(tf.keras.layers.Dense(units=60, activation=tf.nn.softmax))
num2.compile(loss=tf.keras.metrics.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(0.0005),metrics=['accuracy'])
num2.fit(numbers,num2Output, epochs=10000)
#%%
'''
define the NN architecture for number 3
we have 5 layers in this architecture - we use 64, 128, 256, 512 and 60
neurons in our layers respectively, we use relu as aa activation finction in our all layers except last layer.
in last layer we use softmax activation function for classification
we use adam as an optimizer with learning rate of 0.001
'''
num3 = tf.keras.Sequential();
num3.add(tf.keras.layers.Dense(units=64, activation=tf.nn.relu, input_dim=50))
num3.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu))
num3.add(tf.keras.layers.Dense(units=256, activation=tf.nn.relu))
num3.add(tf.keras.layers.Dense(units=512, activation=tf.nn.relu))
num3.add(tf.keras.layers.Dense(units=60, activation=tf.nn.softmax))
num3.compile(loss=tf.keras.metrics.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(0.0005),metrics=['accuracy'])
num3.fit(numbers,num3Output, epochs=10000)
#%%
'''
define the NN architecture for number 4
we have 5 layers in this architecture - we use 64, 128, 256, 512 and 60
neurons in our layers respectively, we use relu as aa activation finction in our all layers except last layer.
in last layer we use softmax activation function for classification
we use adam as an optimizer with learning rate of 0.001
'''
num4 = tf.keras.Sequential();
num4.add(tf.keras.layers.Dense(units=64, activation=tf.nn.relu, input_dim=50))
num4.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu))
num4.add(tf.keras.layers.Dense(units=256, activation=tf.nn.relu))
num4.add(tf.keras.layers.Dense(units=512, activation=tf.nn.relu))
num4.add(tf.keras.layers.Dense(units=60, activation=tf.nn.softmax))
num4.compile(loss=tf.keras.metrics.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(0.0005),metrics=['accuracy'])
num4.fit(numbers,num4Output, epochs=10000)
#%%
'''
define the NN architecture for number 5
we have 5 layers in this architecture - we use 64, 128, 256, 512 and 60
neurons in our layers respectively, we use relu as aa activation finction in our all layers except last layer.
in last layer we use softmax activation function for classification
we use adam as an optimizer with learning rate of 0.001
'''
num5 = tf.keras.Sequential();
num5.add(tf.keras.layers.Dense(units=64, activation=tf.nn.relu, input_dim=50))
num5.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu))
num5.add(tf.keras.layers.Dense(units=256, activation=tf.nn.relu))
num5.add(tf.keras.layers.Dense(units=512, activation=tf.nn.relu))
num5.add(tf.keras.layers.Dense(units=60, activation=tf.nn.softmax))
num5.compile(loss=tf.keras.metrics.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(0.0005),metrics=['accuracy'])
num5.fit(numbers,num5Output, epochs=10000)
#%%
#%%
y = []
while(True):
num = random.randint(1,50)
if num in y:
pass
else:
y.append(num)
if len(y)==50:
break
test = [y]
test = np.array(test)
p1 = num1.predict(test)
p2 = num2.predict(test)
p3 = num3.predict(test)
p4 = num4.predict(test)
p5 = num5.predict(test)
#%%
print('Num1 Maybe: ' ,np.argmax(p1)+1)
print('Num2 Maybe: ' , np.argmax(p2)+1)
print('Num3 Maybe: ' , np.argmax(p3)+1)
print('Num4 Maybe: ' , np.argmax(p4)+1)
print('Num5 Maybe: ' , np.argmax(p5)+1)
# In[ ]:
#Prediction with LR 0.001
#Num1 Maybe: 2
#Num2 Maybe: 16
#Num3 Maybe: 17
#Num4 Maybe: 42
#Num5 Maybe: 51
#Prediction with LR 0.0005
#Num1 Maybe: 2
#Num2 Maybe: 20
#Num3 Maybe: 30
#Num4 Maybe: 40
#Num5 Maybe: 51
| 83.97651
| 165
| 0.597682
|
f68f6f5910489f5867b781b7161c0f4b37db14a0
| 5,782
|
py
|
Python
|
metaflow/plugins/kfp/tests/run_integration_tests.py
|
talebzeghmi/metaflow
|
3c56e09c10deca88ebc5b572eac25aa605df1bc1
|
[
"Apache-2.0"
] | null | null | null |
metaflow/plugins/kfp/tests/run_integration_tests.py
|
talebzeghmi/metaflow
|
3c56e09c10deca88ebc5b572eac25aa605df1bc1
|
[
"Apache-2.0"
] | null | null | null |
metaflow/plugins/kfp/tests/run_integration_tests.py
|
talebzeghmi/metaflow
|
3c56e09c10deca88ebc5b572eac25aa605df1bc1
|
[
"Apache-2.0"
] | null | null | null |
from os import listdir
from os.path import isfile, join
from subprocess import run, PIPE
from typing import List, Dict
from .... import R
import kfp
import pytest
import yaml
import tempfile
"""
To run these tests from your terminal, go to the tests directory and run:
`python -m pytest -s -n 3 run_integration_tests.py`
This script runs all the flows in the `flows` directory. It creates
each kfp run, waits for the run to fully complete, and prints whether
or not the run was successful. It also checks to make sure the logging
functionality works.
More specifically, the tests spawn KFP runs and ensure the spawning processes
have a returncode of 0. If any test fails within KFP, an exception
is raised, the test fails, and the user can access the run link to the failed
KFP run.
Parameters:
-n: specifies the number of parallel processes used by PyTest.
Sometimes, the tests may fail on KFP due to resource quota issues. If they do,
try reducing -n (number of parallel processes) so less simultaneous
KFP runs will be scheduled.
"""
def _python():
if R.use_r():
return "python3"
else:
return "python"
def obtain_flow_file_paths(flow_dir_path: str) -> List[str]:
file_paths = [
file_name
for file_name in listdir(flow_dir_path)
if isfile(join(flow_dir_path, file_name))
and not file_name.startswith(".")
and not "raise_error_flow" in file_name
and not "accelerator_flow" in file_name
]
return file_paths
# this test ensures the integration tests fail correctly
def test_raise_failure_flow(pytestconfig) -> None:
test_cmd = (
f"{_python()} flows/raise_error_flow.py --datastore=s3 kfp run "
f"--wait-for-completion --workflow-timeout 1800 "
f"--max-parallelism 3 --experiment metaflow_test --tag test_t1 "
)
if pytestconfig.getoption("image"):
test_cmd += (
f"--no-s3-code-package --base-image {pytestconfig.getoption('image')}"
)
run_and_wait_process = run(
test_cmd,
universal_newlines=True,
stdout=PIPE,
shell=True,
)
# this ensures the integration testing framework correctly catches a failing flow
# and reports the error
assert run_and_wait_process.returncode == 1
return
def exists_nvidia_accelerator(node_selector_term: Dict) -> bool:
for affinity_match_expression in node_selector_term["matchExpressions"]:
if (
affinity_match_expression["key"] == "k8s.amazonaws.com/accelerator"
and affinity_match_expression["operator"] == "In"
and "nvidia-tesla-v100" in affinity_match_expression["values"]
):
return True
return False
def is_nvidia_accelerator_noschedule(toleration: Dict) -> bool:
if (
toleration["effect"] == "NoSchedule"
and toleration["key"] == "k8s.amazonaws.com/accelerator"
and toleration["operator"] == "Equal"
and toleration["value"] == "nvidia-tesla-v100"
):
return True
return False
def test_compile_only_accelerator_test() -> None:
with tempfile.TemporaryDirectory() as yaml_tmp_dir:
yaml_file_path = join(yaml_tmp_dir, "accelerator_flow.yaml")
compile_to_yaml_cmd = (
f"{_python()} flows/accelerator_flow.py --datastore=s3 kfp run "
f" --no-s3-code-package --yaml-only --pipeline-path {yaml_file_path}"
)
compile_to_yaml_process = run(
compile_to_yaml_cmd,
universal_newlines=True,
stdout=PIPE,
shell=True,
)
assert compile_to_yaml_process.returncode == 0
with open(f"{yaml_file_path}", "r") as stream:
try:
flow_yaml = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
for step in flow_yaml["spec"]["templates"]:
if step["name"] == "start":
start_step = step
break
affinity_found = False
for node_selector_term in start_step["affinity"]["nodeAffinity"][
"requiredDuringSchedulingIgnoredDuringExecution"
]["nodeSelectorTerms"]:
if exists_nvidia_accelerator(node_selector_term):
affinity_found = True
break
assert affinity_found
toleration_found = False
for toleration in start_step["tolerations"]:
if is_nvidia_accelerator_noschedule(toleration):
toleration_found = True
break
assert toleration_found
@pytest.mark.parametrize("flow_file_path", obtain_flow_file_paths("flows"))
def test_flows(pytestconfig, flow_file_path: str) -> None:
full_path = join("flows", flow_file_path)
# In the process below, stdout=PIPE because we only want to capture stdout.
# The reason is that the click echo function prints to stderr, and contains
# the main logs (run link, graph validation, package uploading, etc). We
# want to ensure these logs are visible to users and not captured.
# We use the print function in kfp_cli.py to print a magic token containing the
# run id and capture this to correctly test logging. See the
# `check_valid_logs_process` process.
test_cmd = (
f"{_python()} {full_path} --datastore=s3 kfp run "
f"--wait-for-completion --workflow-timeout 1800 "
f"--max-parallelism 3 --experiment metaflow_test --tag test_t1 "
)
if pytestconfig.getoption("image"):
test_cmd += (
f"--no-s3-code-package --base-image {pytestconfig.getoption('image')}"
)
run_and_wait_process = run(
test_cmd,
universal_newlines=True,
stdout=PIPE,
shell=True,
)
assert run_and_wait_process.returncode == 0
return
| 32.301676
| 85
| 0.666378
|
eed727887d12726278ecc0642eb1b262bd22a096
| 2,641
|
py
|
Python
|
carbon_black_protection/komand_carbon_black_protection/triggers/new_approval_request/trigger.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
carbon_black_protection/komand_carbon_black_protection/triggers/new_approval_request/trigger.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
carbon_black_protection/komand_carbon_black_protection/triggers/new_approval_request/trigger.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
import time
from .schema import NewApprovalRequestInput, NewApprovalRequestOutput
# Custom imports below
class NewApprovalRequest(komand.Trigger):
starting_id = 0
def __init__(self):
super(self.__class__, self).__init__(
name='new_approval_request',
description='Triggers when a new approval request is created',
input=NewApprovalRequestInput(),
output=NewApprovalRequestOutput())
def run(self, params={}):
poll_rate = params.get("poll_rate", 10)
self.logger.info("Looking for new approval requests...")
url = self.connection.host + "/api/bit9platform/v1/approvalRequest?q=status:1&q=id>{id}" # status:1 = submitted
while True:
with komand.helper.open_cachefile("cb_protection_new_approval_request") as cache_file:
cache_file.seek(0)
temporary_id = cache_file.readline().strip()
if temporary_id is not '':
self.starting_id = int(temporary_id)
try:
request = self.connection.session.get(url=url.format(id=self.starting_id), verify=self.connection.verify)
results = request.json()
# Clean all the results before we do anything with them
results = komand.helper.clean(results)
for request in results:
self.logger.info("New approval request found, triggering...")
self.send({"approval_request": request})
# Write to cache as soon as we have it in case the trigger is killed mid-parse. This will prevent duplicate triggering
with komand.helper.open_cachefile("cb_protection_new_approval_request") as cache_file:
cache_file.seek(0)
cache_file.write(str(request["id"]))
self.starting_id = request["id"]
except BaseException as e:
raise Exception("Error occurred: %s" % e)
except ValueError as e:
raise e
else:
self.logger.info("Sleeping for %d seconds..." % poll_rate)
time.sleep(poll_rate)
def test(self):
url = self.connection.host + "/api/bit9platform/v1/approvalRequest?limit=-1" # -1 returns just the count (lightweight call)
request = self.connection.session.get(url=url, verify=self.connection.verify)
try:
request.raise_for_status()
except:
raise Exception('Run: HTTPError: %s' % request.text)
return {}
| 39.41791
| 138
| 0.599016
|
48026e0e1631456c4e7fe64de7c449923e533449
| 7,684
|
py
|
Python
|
vnpy/trader/object.py
|
jerryhe26/vnpy
|
d7607da780c57a2ab182688f4e52bc3a300acfda
|
[
"MIT"
] | 2
|
2020-04-17T03:10:17.000Z
|
2020-04-17T03:15:10.000Z
|
vnpy/trader/object.py
|
jerryhe26/vnpy
|
d7607da780c57a2ab182688f4e52bc3a300acfda
|
[
"MIT"
] | 1
|
2020-04-29T01:42:22.000Z
|
2020-04-29T01:42:22.000Z
|
vnpy/trader/object.py
|
jerryhe26/vnpy
|
d7607da780c57a2ab182688f4e52bc3a300acfda
|
[
"MIT"
] | 1
|
2021-02-19T07:25:22.000Z
|
2021-02-19T07:25:22.000Z
|
"""
Basic data structure used for general trading function in VN Trader.
"""
from dataclasses import dataclass
from datetime import datetime
from logging import INFO
from .constant import Direction, Exchange, Interval, Offset, Status, Product, OptionType, OrderType
ACTIVE_STATUSES = set([Status.SUBMITTING, Status.NOTTRADED, Status.PARTTRADED])
@dataclass
class BaseData:
"""
Any data object needs a gateway_name as source
and should inherit base data.
"""
gateway_name: str
@dataclass
class TickData(BaseData):
"""
Tick data contains information about:
* last trade in market
* orderbook snapshot
* intraday market statistics.
"""
symbol: str
exchange: Exchange
datetime: datetime
name: str = ""
volume: float = 0
open_interest: float = 0
last_price: float = 0
last_volume: float = 0
limit_up: float = 0
limit_down: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
pre_close: float = 0
bid_price_1: float = 0
bid_price_2: float = 0
bid_price_3: float = 0
bid_price_4: float = 0
bid_price_5: float = 0
ask_price_1: float = 0
ask_price_2: float = 0
ask_price_3: float = 0
ask_price_4: float = 0
ask_price_5: float = 0
bid_volume_1: float = 0
bid_volume_2: float = 0
bid_volume_3: float = 0
bid_volume_4: float = 0
bid_volume_5: float = 0
ask_volume_1: float = 0
ask_volume_2: float = 0
ask_volume_3: float = 0
ask_volume_4: float = 0
ask_volume_5: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class BarData(BaseData):
"""
Candlestick bar data of a certain trading period.
"""
symbol: str
exchange: Exchange
datetime: datetime
interval: Interval = None
volume: float = 0
open_interest: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
close_price: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderData(BaseData):
"""
Order data contains information for tracking lastest status
of a specific order.
"""
symbol: str
exchange: Exchange
orderid: str
type: OrderType = OrderType.LIMIT
direction: Direction = ""
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
traded: float = 0
status: Status = Status.SUBMITTING
time: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
def is_active(self) -> bool:
"""
Check if the order is active.
"""
if self.status in ACTIVE_STATUSES:
return True
else:
return False
def create_cancel_request(self) -> "CancelRequest":
"""
Create cancel request object from order.
"""
req = CancelRequest(
orderid=self.orderid, symbol=self.symbol, exchange=self.exchange
)
return req
@dataclass
class TradeData(BaseData):
"""
Trade data contains information of a fill of an order. One order
can have several trade fills.
"""
symbol: str
exchange: Exchange
orderid: str
tradeid: str
direction: Direction = ""
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
time: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
self.vt_tradeid = f"{self.gateway_name}.{self.tradeid}"
@dataclass
class PositionData(BaseData):
"""
Positon data is used for tracking each individual position holding.
"""
symbol: str
exchange: Exchange
direction: Direction
volume: float = 0
frozen: float = 0
price: float = 0
pnl: float = 0
yd_volume: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_positionid = f"{self.vt_symbol}.{self.direction.value}"
@dataclass
class AccountData(BaseData):
"""
Account data contains information about balance, frozen and
available.
"""
accountid: str
balance: float = 0
frozen: float = 0
def __post_init__(self):
""""""
self.available = self.balance - self.frozen
self.vt_accountid = f"{self.gateway_name}.{self.accountid}"
@dataclass
class LogData(BaseData):
"""
Log data is used for recording log messages on GUI or in log files.
"""
msg: str
level: int = INFO
def __post_init__(self):
""""""
self.time = datetime.now()
@dataclass
class ContractData(BaseData):
"""
Contract data contains basic information about each contract traded.
"""
symbol: str
exchange: Exchange
name: str
product: Product
size: int
pricetick: float
min_volume: float = 1 # minimum trading volume of the contract
stop_supported: bool = False # whether server supports stop order
net_position: bool = False # whether gateway uses net position volume
history_data: bool = False # whether gateway provides bar history data
option_strike: float = 0
option_underlying: str = "" # vt_symbol of underlying contract
option_type: OptionType = None
option_expiry: datetime = None
option_portfolio: str = ""
option_index: str = "" # for identifying options with same strike price
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class SubscribeRequest:
"""
Request sending to specific gateway for subscribing tick data update.
"""
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderRequest:
"""
Request sending to specific gateway for creating a new order.
"""
symbol: str
exchange: Exchange
direction: Direction
type: OrderType
volume: float
price: float = 0
offset: Offset = Offset.NONE
reference: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
def create_order_data(self, orderid: str, gateway_name: str) -> OrderData:
"""
Create order data from request.
"""
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=orderid,
type=self.type,
direction=self.direction,
offset=self.offset,
price=self.price,
volume=self.volume,
gateway_name=gateway_name,
)
return order
@dataclass
class CancelRequest:
"""
Request sending to specific gateway for canceling an existing order.
"""
orderid: str
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class HistoryRequest:
"""
Request sending to specific gateway for querying history data.
"""
symbol: str
exchange: Exchange
start: datetime
end: datetime = None
interval: Interval = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
| 22.869048
| 99
| 0.621812
|
7891952c419424673afcfae34a388416d6230adb
| 41
|
py
|
Python
|
suppliers/utils/__init__.py
|
dazeus/dazeus-plugin-refter
|
0769fe49bbfbe06273fd26500059108cc6fd13ee
|
[
"BSD-3-Clause"
] | null | null | null |
suppliers/utils/__init__.py
|
dazeus/dazeus-plugin-refter
|
0769fe49bbfbe06273fd26500059108cc6fd13ee
|
[
"BSD-3-Clause"
] | null | null | null |
suppliers/utils/__init__.py
|
dazeus/dazeus-plugin-refter
|
0769fe49bbfbe06273fd26500059108cc6fd13ee
|
[
"BSD-3-Clause"
] | null | null | null |
from .dateformatter import DateFormatter
| 20.5
| 40
| 0.878049
|
0089fd326f628491bf8f8f3002813089b4bae8bc
| 14,888
|
py
|
Python
|
lib/galaxy/datatypes/isa.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/datatypes/isa.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/datatypes/isa.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | null | null | null |
"""
ISA datatype
See https://github.com/ISA-tools
"""
from __future__ import print_function
import io
import json
import logging
import os
import os.path
import re
import shutil
import sys
import tempfile
from cgi import escape
# Imports isatab after turning off warnings inside logger settings to avoid pandas warning making uploads fail.
logging.getLogger("isatools.isatab").setLevel(logging.ERROR)
from isatools import (
isajson,
isatab_meta
)
from galaxy import util
from galaxy.datatypes import data
from galaxy.util.compression_utils import CompressedFile
from galaxy.util.sanitize_html import sanitize_html
# CONSTANTS {{{1
################################################################
# Main files regex
JSON_FILE_REGEX = re.compile(r"^.*\.json$", flags=re.IGNORECASE)
INVESTIGATION_FILE_REGEX = re.compile(r"^i_\w+\.txt$", flags=re.IGNORECASE)
# The name of the ISA archive (compressed file) as saved inside Galaxy
ISA_ARCHIVE_NAME = "archive"
# Set max number of lines of the history peek
_MAX_LINES_HISTORY_PEEK = 11
# Configure logger {{{1
################################################################
logger = logging.getLogger(__name__)
# Function for opening correctly a CSV file for csv.reader() for both Python 2 and 3 {{{1
################################################################
def utf8_text_file_open(path):
if sys.version_info[0] < 3:
fp = open(path, 'rb')
else:
fp = open(path, 'r', newline='', encoding='utf8')
return fp
# ISA class {{{1
################################################################
class _Isa(data.Data):
""" Base class for implementing ISA datatypes """
composite_type = 'auto_primary_file'
allow_datatype_change = False
is_binary = True
_main_file_regex = None
# Make investigation instance {{{2
################################################################
def _make_investigation_instance(self, filename):
raise NotImplementedError()
# Constructor {{{2
################################################################
def __init__(self, main_file_regex, **kwd):
super(_Isa, self).__init__(**kwd)
self._main_file_regex = main_file_regex
# Add the archive file as the only composite file
self.add_composite_file(ISA_ARCHIVE_NAME, is_binary=True, optional=True)
# Get ISA folder path {{{2
################################################################
def _get_isa_folder_path(self, dataset):
isa_folder = dataset.extra_files_path
if not isa_folder:
raise Exception('Unvalid dataset object, or no extra files path found for this dataset.')
return isa_folder
# Get main file {{{2
################################################################
def _get_main_file(self, dataset):
"""Get the main file of the ISA archive. Either the investigation file i_*.txt for ISA-Tab, or the JSON file for ISA-JSON."""
main_file = None
isa_folder = self._get_isa_folder_path(dataset)
if os.path.exists(isa_folder):
# Get ISA archive older
isa_files = os.listdir(isa_folder)
# Try to find main file
main_file = self._find_main_file_in_archive(isa_files)
if main_file is None:
raise Exception('Invalid ISA archive. No main file found.')
# Make full path
main_file = os.path.join(isa_folder, main_file)
return main_file
# Get investigation {{{2
################################################################
def _get_investigation(self, dataset):
"""Create a contained instance specific to the exact ISA type (Tab or Json).
We will use it to parse and access information from the archive."""
investigation = None
main_file = self._get_main_file(dataset)
if main_file is not None:
investigation = self._make_investigation_instance(main_file)
return investigation
# Find main file in archive {{{2
################################################################
def _find_main_file_in_archive(self, files_list):
"""Find the main file inside the ISA archive."""
found_file = None
for f in files_list:
match = self._main_file_regex.match(f)
if match:
if found_file is None:
found_file = match.group()
else:
raise Exception('More than one file match the pattern "', str(self._main_file_regex), '" to identify the investigation file')
return found_file
# Set peek {{{2
################################################################
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text. Get first lines of the main file and set it as the peek."""
main_file = self._get_main_file(dataset)
if main_file is None:
raise RuntimeError("Unable to find the main file within the 'files_path' folder")
# Read first lines of main file
with io.open(main_file, encoding='utf-8') as f:
data = []
for line in f:
if len(data) < _MAX_LINES_HISTORY_PEEK:
data.append(line)
else:
break
if not dataset.dataset.purged and data:
dataset.peek = json.dumps({"data": data})
dataset.blurb = 'data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
# Display peek {{{2
################################################################
def display_peek(self, dataset):
"""Create the HTML table used for displaying peek, from the peek text found by set_peek() method."""
out = ['<table cellspacing="0" cellpadding="3">']
try:
if not dataset.peek:
dataset.set_peek()
json_data = json.loads(dataset.peek)
for line in json_data["data"]:
line = line.strip()
if not line:
continue
out.append('<tr><td>%s</td></tr>' % escape(util.unicodify(line, 'utf-8')))
out.append('</table>')
out = "".join(out)
except Exception as exc:
out = "Can't create peek %s" % str(exc)
return out
# Generate primary file {{{2
################################################################
def generate_primary_file(self, dataset=None):
"""Generate the primary file. It is an HTML file containing description of the composite dataset
as well as a list of the composite files that it contains."""
if dataset:
rval = ['<html><head><title>ISA Dataset </title></head><p/>']
if hasattr(dataset, "extra_files_path"):
rval.append('<div>ISA Dataset composed of the following files:<p/><ul>')
for cmp_file in os.listdir(dataset.extra_files_path):
rval.append('<li><a href="%s" type="text/plain">%s</a></li>' % (cmp_file, escape(cmp_file)))
rval.append('</ul></div></html>')
else:
rval.append('<div>ISA Dataset is empty!<p/><ul>')
return "\n".join(rval)
return "<div>No dataset available</div>"
# Dataset content needs grooming {{{2
################################################################
def dataset_content_needs_grooming(self, file_name):
"""This function is called on an output dataset file after the content is initially generated."""
return os.path.basename(file_name) == ISA_ARCHIVE_NAME
# Groom dataset content {{{2
################################################################
def groom_dataset_content(self, file_name):
"""This method is called by Galaxy to extract files contained in a composite data type."""
# XXX Is the right place to extract files? Should this step not be a cleaning step instead?
# Could extracting be done earlier and composite files declared as files contained inside the archive
# instead of the archive itself?
# extract basename and folder of the current file whose content has to be groomed
basename = os.path.basename(file_name)
output_path = os.path.dirname(file_name)
# extract archive if the file corresponds to the ISA archive
if basename == ISA_ARCHIVE_NAME:
# perform extraction
# For some ZIP files CompressedFile::extract() extract the file inside <output_folder>/<file_name> instead of outputing it inside <output_folder>. So we first create a temporary folder, extract inside it, and move content to final destination.
temp_folder = tempfile.mkdtemp()
CompressedFile(file_name).extract(temp_folder)
shutil.rmtree(output_path)
extracted_files = os.listdir(temp_folder)
logger.debug(' '.join(extracted_files))
if len(extracted_files) == 0:
os.makedirs(output_path)
shutil.rmtree(temp_folder)
elif len(extracted_files) == 1 and os.path.isdir(os.path.join(temp_folder, extracted_files[0])):
shutil.move(os.path.join(temp_folder, extracted_files[0]), output_path)
shutil.rmtree(temp_folder)
else:
shutil.move(temp_folder, output_path)
# Display data {{{2
################################################################
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd):
"""Downloads the ISA dataset if `preview` is `False`;
if `preview` is `True`, it returns a preview of the ISA dataset as a HTML page.
The preview is triggered when user clicks on the eye icon of the composite dataset."""
# if it is not required a preview use the default behaviour of `display_data`
if not preview:
return super(_Isa, self).display_data(trans, dataset, preview, filename, to_ext, **kwd)
# prepare the preview of the ISA dataset
investigation = self._get_investigation(dataset)
if investigation is None:
html = """<html><header><title>Error while reading ISA archive.</title></header>
<body>
<h1>An error occured while reading content of ISA archive.</h1>
<p>If you have tried to load your archive with the uploader by selecting isa-tab as composite data type, then try to load it again with isa-json instead. Conversely, if you have tried to load your archive with the uploader by selecting isa-json as composite data type, then try isa-tab instead.</p>
<p>You may also try to look into your zip file in order to find out if this is a proper ISA archive. If you see a file i_Investigation.txt inside, then it is an ISA-Tab archive. If you see a file with extension .json inside, then it is an ISA-JSON archive. If you see nothing like that, then either your ISA archive is corrupted, or it is not an ISA archive.</p>
</body></html>"""
else:
html = '<html><body>'
html += '<h1>{0} {1}</h1>'.format(investigation.title, investigation.identifier)
# Loop on all studies
for study in investigation.studies:
html += '<h2>Study %s</h2>' % study.identifier
html += '<h3>%s</h3>' % study.title
html += '<p>%s</p>' % study.description
html += '<p>Submitted the %s</p>' % study.submission_date
html += '<p>Released on %s</p>' % study.public_release_date
html += '<p>Experimental factors used: %s</p>' % ', '.join([x.name for x in study.factors])
# Loop on all assays of this study
for assay in study.assays:
html += '<h3>Assay %s</h3>' % assay.filename
html += '<p>Measurement type: %s</p>' % assay.measurement_type.term # OntologyAnnotation
html += '<p>Technology type: %s</p>' % assay.technology_type.term # OntologyAnnotation
html += '<p>Technology platform: %s</p>' % assay.technology_platform
if assay.data_files is not None:
html += '<p>Data files:</p>'
html += '<ul>'
for data_file in assay.data_files:
if data_file.filename != '':
html += '<li>' + escape(util.unicodify(str(data_file.filename), 'utf-8')) + ' - ' + escape(util.unicodify(str(data_file.label), 'utf-8')) + '</li>'
html += '</ul>'
html += '</body></html>'
# Set mime type
mime = 'text/html'
self._clean_and_set_mime_type(trans, mime)
return sanitize_html(html).encode('utf-8')
# ISA-Tab class {{{1
################################################################
class IsaTab(_Isa):
file_ext = "isa-tab"
# Constructor {{{2
################################################################
def __init__(self, **kwd):
super(IsaTab, self).__init__(main_file_regex=INVESTIGATION_FILE_REGEX, **kwd)
# Make investigation instance {{{2
################################################################
def _make_investigation_instance(self, filename):
# Parse ISA-Tab investigation file
parser = isatab_meta.InvestigationParser()
isa_dir = os.path.dirname(filename)
fp = utf8_text_file_open(filename)
parser.parse(fp)
for study in parser.isa.studies:
s_parser = isatab_meta.LazyStudySampleTableParser(parser.isa)
s_parser.parse(os.path.join(isa_dir, study.filename))
for assay in study.assays:
a_parser = isatab_meta.LazyAssayTableParser(parser.isa)
a_parser.parse(os.path.join(isa_dir, assay.filename))
isa = parser.isa
return isa
# ISA-JSON class {{{1
################################################################
class IsaJson(_Isa):
file_ext = "isa-json"
# Constructor {{{2
################################################################
def __init__(self, **kwd):
super(IsaJson, self).__init__(main_file_regex=JSON_FILE_REGEX, **kwd)
# Make investigation instance {{{2
################################################################
def _make_investigation_instance(self, filename):
# Parse JSON file
fp = utf8_text_file_open(filename)
isa = isajson.load(fp)
return isa
| 40.237838
| 386
| 0.556354
|
4155167e48b90597e7530b454f1dbf6cf9eedd27
| 1,024
|
py
|
Python
|
test/transfer_action_test.py
|
mvdbeek/pulsar
|
0ae163325868c575e23ca71e559904d2de981b78
|
[
"Apache-2.0"
] | null | null | null |
test/transfer_action_test.py
|
mvdbeek/pulsar
|
0ae163325868c575e23ca71e559904d2de981b78
|
[
"Apache-2.0"
] | null | null | null |
test/transfer_action_test.py
|
mvdbeek/pulsar
|
0ae163325868c575e23ca71e559904d2de981b78
|
[
"Apache-2.0"
] | null | null | null |
import os
from .test_utils import files_server
from pulsar.client.action_mapper import RemoteTransferAction
def test_write_to_file():
with files_server() as (server, directory):
from_path = os.path.join(directory, "remote_get")
open(from_path, "wb").write(b"123456")
to_path = os.path.join(directory, "local_get")
url = server.application_url + "?path=%s" % from_path
RemoteTransferAction(to_path, url=url).write_to_path(to_path)
assert open(to_path, "rb").read() == b"123456"
def test_write_from_file():
with files_server() as (server, directory):
from_path = os.path.join(directory, "local_post")
open(from_path, "wb").write(b"123456")
to_path = os.path.join(directory, "remote_post")
url = server.application_url + "?path=%s" % to_path
RemoteTransferAction(to_path, url=url).write_from_path(from_path)
posted_contents = open(to_path, "rb").read()
assert posted_contents == b"123456", posted_contents
| 34.133333
| 73
| 0.679688
|
859505897f03d3735f285d5afe178bf8d1b63b9e
| 4,896
|
py
|
Python
|
matches/tests/test_views.py
|
asyler/betleague
|
2ae43ae26d6a6c8582a831bc56c2144ed3134202
|
[
"MIT"
] | null | null | null |
matches/tests/test_views.py
|
asyler/betleague
|
2ae43ae26d6a6c8582a831bc56c2144ed3134202
|
[
"MIT"
] | 1
|
2017-12-14T07:42:02.000Z
|
2017-12-14T10:22:19.000Z
|
matches/tests/test_views.py
|
asyler/betleague
|
2ae43ae26d6a6c8582a831bc56c2144ed3134202
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from accounts.factories import UserFactory
from matches.factories import PastMatchFactory, BetFactory, FutureMatchFactory
from matches.models import Match, Bet, WRONG_BET_FORMAT_ERROR
class UserBetsPageTest(TestCase):
url = reverse('user_bets')
# fixtures = ['accounts/fixtures/users.json', 'matches/fixtures/data.json']
@classmethod
def setUpTestData(cls):
cls.future_match1 = FutureMatchFactory.create(datetime = timezone.now() + timezone.timedelta(days=2))
cls.future_match2 = FutureMatchFactory.create(datetime = timezone.now() + timezone.timedelta(days=1))
cls.future_match3 = FutureMatchFactory.create(datetime = timezone.now() + timezone.timedelta(days=3))
cls.user = UserFactory.create()
def setUp(self):
self.client.login(username=self.user.username, password=self.user.raw_password)
def test_uses_league_template(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'user_bets.html')
def test_passes_matches_to_template(self):
response = self.client.get(self.url)
self.assertIn('matches', response.context)
def test_matches_contain_bets(self):
BetFactory.create(match=self.future_match1, user=self.user, home_score=2, away_score=1)
response = self.client.get(self.url)
self.assertEqual('2 - 1', str(response.context['matches'][1].bet))
def test_POST_redirects_to_same_page(self):
response = self.client.post(self.url)
self.assertRedirects(response, self.url)
def test_POST_saves_bets(self):
self.client.post(self.url, data={
'match_1': '2 - 0'
})
bet = Bet.objects.filter(user=self.user, match=self.future_match1).first()
self.assertEqual(str(bet), '2 - 0')
def test_POST_edit_bets(self):
self.client.post(self.url, data={
'match_1': '2 - 0'
})
self.client.post(self.url, data={
'match_1': '3 - 0'
})
bet = Bet.objects.filter(user=self.user, match=self.future_match1).first()
self.assertEqual(str(bet), '3 - 0')
def test_for_invalid_input_invalid_input_not_saves(self):
self.client.post(self.url, data={
'match_1': '2 0'
})
bet = Bet.objects.filter(user=self.user, match=self.future_match1).first()
self.assertEqual(bet, None)
def test_many_inputs_saves(self):
self.client.post(self.url, data={
'match_1': '2 - 0',
'match_2': '3 - 0',
})
bet1 = Bet.objects.filter(user=self.user, match=self.future_match1).first()
bet2 = Bet.objects.filter(user=self.user, match=self.future_match2).first()
self.assertEqual(str(bet1), '2 - 0')
self.assertEqual(str(bet2), '3 - 0')
def test_for_invalid_input_other_input_saves(self):
self.client.post(self.url, data={
'match_1': '2 0',
'match_2': '3 - 0',
})
bet1 = Bet.objects.filter(user=self.user, match=self.future_match1).first()
bet2 = Bet.objects.filter(user=self.user, match=self.future_match2).first()
self.assertEqual(bet1, None)
self.assertEqual(str(bet2), '3 - 0')
def test_for_invalid_input_show_errors(self):
response = self.client.post(self.url, data={
'match_1': '2 0'
}, follow=True)
self.assertIn(WRONG_BET_FORMAT_ERROR, list(response.context.get('messages'))[0].message)
def test_for_past_matches_is_not_valid(self):
past_match = PastMatchFactory.create()
self.client.post(self.url, data={
f'match_{past_match.id}': '2 - 0'
})
bet = Bet.objects.filter(user=self.user, match=past_match).first()
self.assertEqual(bet, None)
def test_for_empty_input_nor_saving_nor_raising_error(self):
response = self.client.post(self.url, data={
'match_1': ''
}, follow=True)
bet = Bet.objects.filter(user=self.user, match=self.future_match1).first()
self.assertEqual(bet, None)
self.assertEqual(1, len(response.context.get('messages'))) # one for saved successful
def test_matches_are_sorted_by_datetime(self):
response = self.client.get(self.url)
response_matches = response.context['matches']
self.assertEqual(response_matches[0],self.future_match2)
self.assertEqual(response_matches[1],self.future_match1)
self.assertEqual(response_matches[2],self.future_match3)
class UserBetsPageUnauthorized(TestCase):
def test_page_view_redirects_unauthorized_user(self):
url = reverse('user_bets')
response = self.client.get(url)
self.assertIn('/accounts/login', response.url)
| 40.46281
| 109
| 0.664011
|
57964349548021949962e674a636d2d4cd7f4b72
| 5,070
|
py
|
Python
|
generalized_rates/datasets/load_compas.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
generalized_rates/datasets/load_compas.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
generalized_rates/datasets/load_compas.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads COMPAS dataset, processes it and saves it to output folder.
Adapted from code provided by Heinrich Jiang (heinrichj@google.com). The COMPAS
data file can be downloaded from:
https://github.com/propublica/compas-analysis/blob/master/compas-scores-two-years.csv
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import pandas as pd
from sklearn import model_selection
flags.DEFINE_float("test_fraction", 1.0 / 3.0,
"Fraction of overall dataset that constitutes the test set.")
flags.DEFINE_float("vali_fraction", 1.0 / 3.0,
"Fraction of train set that constitutes the validation set.")
flags.DEFINE_string("data_file", "compas-scores-two-years.csv",
"Path to COMPAS dataset csv file.")
flags.DEFINE_string("output_directory", "datasets/",
"Path to store processed dataset.")
FLAGS = flags.FLAGS
def load_data():
"""Load and process dataset from provided data path."""
df = pd.read_csv(FLAGS.data_file)
# Filter relevant features.
features = [
"age", "c_charge_degree", "race", "score_text", "sex", "priors_count",
"days_b_screening_arrest", "decile_score", "is_recid", "two_year_recid"]
df = df[features]
df = df[df.days_b_screening_arrest <= 30]
df = df[df.days_b_screening_arrest >= -30]
df = df[df.is_recid != -1]
df = df[df.c_charge_degree != "O"]
df = df[df.score_text != "N/A"]
# Divide features into: continuous, categorical and those that are continuous,
# but need to be converted to categorical.
categorical_features = ["c_charge_degree", "race", "score_text", "sex"]
continuous_to_categorical_features = ["age", "decile_score", "priors_count"]
# Bucketize features in continuous_to_categorical_features.
for feature in continuous_to_categorical_features:
if feature == "priors_count":
bins = list(np.percentile(df[feature], [0, 50, 70, 80, 90, 100]))
else:
bins = [0] + list(np.percentile(df[feature], [20, 40, 60, 80, 90, 100]))
df[feature] = pd.cut(df[feature], bins, labels=False)
# Binarize all categorical features (including the ones bucketized above).
df = pd.get_dummies(df, columns=categorical_features +
continuous_to_categorical_features)
# Fill values for decile scores and prior counts feature buckets.
to_fill = [u"decile_score_0", u"decile_score_1", u"decile_score_2",
u"decile_score_3", u"decile_score_4", u"decile_score_5"]
for i in range(len(to_fill) - 1):
df[to_fill[i]] = df[to_fill[i:]].max(axis=1)
to_fill = [u"priors_count_0.0", u"priors_count_1.0", u"priors_count_2.0",
u"priors_count_3.0", u"priors_count_4.0"]
for i in range(len(to_fill) - 1):
df[to_fill[i]] = df[to_fill[i:]].max(axis=1)
# Get the labels (two year recidivism) and groups (female defendants).
labels = df["two_year_recid"]
groups = df["sex_Female"]
# Retain all features other than "two_year_recid" and "is_recid".
df.drop(columns=["two_year_recid", "is_recid"], inplace=True)
return df.to_numpy(), labels.to_numpy(), groups.to_numpy()
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Load and pre-process COMPAS dataset.
features, labels, groups = load_data()
# Split dataset indices into train and test.
train_indices, test_indices = model_selection.train_test_split(
np.arange(features.shape[0]), test_size=FLAGS.test_fraction)
# Split train indices further into train and validation,
train_indices, vali_indices = model_selection.train_test_split(
train_indices, test_size=FLAGS.vali_fraction)
# Split features, labels and groups for train, test and validation sets,
x_train = features[train_indices, :]
y_train = labels[train_indices]
z_train = groups[train_indices]
x_test = features[test_indices, :]
y_test = labels[test_indices]
z_test = groups[test_indices]
x_vali = features[vali_indices, :]
y_vali = labels[vali_indices]
z_vali = groups[vali_indices]
train_set = x_train, y_train, z_train
vali_set = x_vali, y_vali, z_vali
test_set = x_test, y_test, z_test
# Save processed dataset.
with open(FLAGS.output_directory + "COMPAS.npy", "wb") as f:
np.save(f, (train_set, vali_set, test_set), allow_pickle=True)
if __name__ == "__main__":
app.run(main)
| 37.007299
| 85
| 0.714596
|
d5ed5ed80ab7c9bf39e1cd6a629e68fd4018b45c
| 1,991
|
py
|
Python
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/hbase_object_dataset.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/hbase_object_dataset.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/hbase_object_dataset.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset import Dataset
class HBaseObjectDataset(Dataset):
"""HBase server dataset.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Constant filled by server.
:type type: str
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
def __init__(self, linked_service_name, additional_properties=None, description=None, structure=None, parameters=None, annotations=None):
super(HBaseObjectDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters, annotations=annotations)
self.type = 'HBaseObject'
| 42.361702
| 228
| 0.69111
|
3fc4012062df8999ba2c526655d85d6efe6ff627
| 4,196
|
py
|
Python
|
TIDALDL-PY/tidal_dl/lang/french.py
|
tahder/Tidal-Media-Downloader
|
6192be9726fc75fd707517f7a8c0f776f66976ba
|
[
"Apache-2.0"
] | 2
|
2020-11-01T09:41:14.000Z
|
2021-12-15T22:51:37.000Z
|
TIDALDL-PY/tidal_dl/lang/french.py
|
tahder/Tidal-Media-Downloader
|
6192be9726fc75fd707517f7a8c0f776f66976ba
|
[
"Apache-2.0"
] | null | null | null |
TIDALDL-PY/tidal_dl/lang/french.py
|
tahder/Tidal-Media-Downloader
|
6192be9726fc75fd707517f7a8c0f776f66976ba
|
[
"Apache-2.0"
] | 1
|
2020-11-03T10:14:02.000Z
|
2020-11-03T10:14:02.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : french.py
@Time : 2020/09/07
@Author : flamme-demon
@Version : 0.2
@Contact :
@Desc :
'''
class LangFrench(object):
SETTING = "RÉGLAGES"
VALUE = "VALEUR"
SETTING_DOWNLOAD_PATH = "Emplacement des téléchargements"
SETTING_ONLY_M4A = "Convertir mp4 en m4a"
SETTING_ADD_EXPLICIT_TAG = "Ajout du tag Explicit - Dossier"
SETTING_ADD_HYPHEN = "Ajouter un trait d'union"
SETTING_ADD_YEAR = "Ajouter l'année avant le nom de l'album - Dosser"
SETTING_USE_TRACK_NUM = "Add user track number"
SETTING_AUDIO_QUALITY = "Qualité Audio"
SETTING_VIDEO_QUALITY = "Qualité Video"
SETTING_CHECK_EXIST = "Vérifier l'existence"
SETTING_ARTIST_BEFORE_TITLE = "Nom de l'artiste avant le titre du morceau - Fichier"
SETTING_ALBUMID_BEFORE_FOLDER = "Id avant le nom d'album - Dossier"
SETTING_INCLUDE_EP = "Inclure les single&ep"
SETTING_SAVE_COVERS = "Sauvegarder les couvertures"
SETTING_LANGUAGE = "Langue"
SETTING_USE_PLAYLIST_FOLDER = "Use playlist folder"
SETTING_MULITHREAD_DOWNLOAD = "Multi thread download"
SETTING_ALBUM_FOLDER_FORMAT = "Album folder format"
SETTING_TRACK_FILE_FORMAT = "Track file format"
SETTING_SHOW_PROGRESS = "Show progress"
CHOICE = "CHOIX"
FUNCTION = "FONCTION"
CHOICE_ENTER = "Saisir"
CHOICE_ENTER_URLID = "Saisir 'Url/ID':"
CHOICE_EXIT = "Quitter"
CHOICE_LOGIN = "Login"
CHOICE_SETTINGS = "Réglages"
CHOICE_SET_ACCESS_TOKEN = "Définir AccessToken"
CHOICE_DOWNLOAD_BY_URL = "Téléchargement par url ou id"
PRINT_ERR = "[ERR]"
PRINT_INFO = "[INFO]"
PRINT_SUCCESS = "[SUCCESS]"
PRINT_ENTER_CHOICE = "Saisir le choix:"
PRINT_LATEST_VERSION = "Dernière version:"
PRINT_USERNAME = "username:"
PRINT_PASSWORD = "password:"
CHANGE_START_SETTINGS = "Commencer les règlages ('0'-Retour,'1'-Oui):"
CHANGE_DOWNLOAD_PATH = "Emplacement des téléchargements('0' ne pas modifier):"
CHANGE_AUDIO_QUALITY = "Qualité audio('0'-Normal,'1'-High,'2'-HiFi,'3'-Master):"
CHANGE_VIDEO_QUALITY = "Qualité Video('0'-1080,'1'-720,'2'-480,'3'-360):"
CHANGE_ONLYM4A = "Convertir mp4 en m4a('0'-Non,'1'-Oui):"
CHANGE_ADD_EXPLICIT_TAG = "Ajout du tag Explicit - Fichier('0'-Non,'1'-Oui):"
CHANGE_ADD_HYPHEN = "Utilisez des traits d'union au lieu d'espaces dans les noms de fichiers('0'-Non,'1'-Oui):"
CHANGE_ADD_YEAR = "Ajouter l'année aux noms des dossiers des albums('0'-Non,'1'-Oui):"
CHANGE_USE_TRACK_NUM = "Ajouter le numéro de piste avant le nom des fichiers('0'-Non,'1'-Oui):"
CHANGE_CHECK_EXIST = "Vérifier l'existence du fichier avant le téléchargement('0'-Non,'1'-Oui):"
CHANGE_ARTIST_BEFORE_TITLE = "Ajouter le nom de l'artiste avant le titre de la piste('0'-Non,'1'-Oui):"
CHANGE_INCLUDE_EP = "Inclure les singles et les EPs lors du téléchargement des albums d'un artiste('0'-Non,'1'-Oui):"
CHANGE_ALBUMID_BEFORE_FOLDER = "Ajouter un identifiant avant le dossier album('0'-Non,'1'-Oui):"
CHANGE_SAVE_COVERS = "Sauvegarder les couvertures('0'-Non,'1'-Oui):"
CHANGE_LANGUAGE = "Sélectionnez une langue"
CHANGE_ALBUM_FOLDER_FORMAT = "Album folder format('0' not modify):"
CHANGE_TRACK_FILE_FORMAT = "Track file format('0' not modify):"
CHANGE_SHOW_PROGRESS = "Show progress('0'-No,'1'-Yes):"
MSG_INVAILD_ACCESSTOKEN = "Jeton d'accès disponible ! Veuillez recommencer."
MSG_PATH_ERR = "L'emplacement est faux"
MSG_INPUT_ERR = "Erreur de saisie !"
MODEL_ALBUM_PROPERTY = "ALBUM-PROPERTY"
MODEL_TRACK_PROPERTY = "TRACK-PROPERTY"
MODEL_VIDEO_PROPERTY = "VIDEO-PROPERTY"
MODEL_ARTIST_PROPERTY = "ARTIST-PROPERTY"
MODEL_PLAYLIST_PROPERTY = "PLAYLIST-PROPERTY"
MODEL_TITLE = 'Titre'
MODEL_TRACK_NUMBER = 'Numéro de piste'
MODEL_VIDEO_NUMBER = 'Numéro de la vidéo'
MODEL_RELEASE_DATE = 'Date de publication'
MODEL_VERSION = 'Version'
MODEL_EXPLICIT = 'Explicit'
MODEL_ALBUM = 'Album'
MODEL_ID = 'ID'
MODEL_NAME = 'Nom'
MODEL_TYPE = 'Type'
| 45.11828
| 122
| 0.68756
|
7614dfef66c7d2c381c366571a6863f7890e5f55
| 3,022
|
py
|
Python
|
refs/initial_Python_port/covariances.py
|
avlab/manifolder
|
9f1aea7a1bd1bafca1dcc6ab32f347f013f9c19d
|
[
"MIT"
] | 1
|
2020-07-12T22:22:15.000Z
|
2020-07-12T22:22:15.000Z
|
refs/initial_Python_port/covariances.py
|
avlab/manifolder
|
9f1aea7a1bd1bafca1dcc6ab32f347f013f9c19d
|
[
"MIT"
] | 7
|
2020-06-24T20:00:46.000Z
|
2020-07-01T21:15:51.000Z
|
refs/initial_Python_port/covariances.py
|
avlab/manifolder
|
9f1aea7a1bd1bafca1dcc6ab32f347f013f9c19d
|
[
"MIT"
] | 1
|
2020-06-26T17:50:32.000Z
|
2020-06-26T17:50:32.000Z
|
### covariances
print('computing local covariances ', end='')
# from scipy.linalg import svd
import manifolder_helper as mh
from numpy.linalg import inv
# import numpy
# Estimate local covariance matrices
# ***************************************************************@
## Configuration
# ncov = 10 # (previous value) size of neighborhood for covariance
ncov = 40 # size of neighborhood for covariance
## Covariance estimation
z_mean = np.zeros_like(z_hist) # Store the mean histogram in each local neighborhood
# NOTE, original matlab call should have used N * nbins ... length(hist_bins) works fine in MATLAB,
# but in python hist_bins has one more element than nbins, since it defines the boundaries ...
# inv_c = zeros(N*length(hist_bins), N*length(hist_bins), length(z_hist)) # Store the inverse covariance matrix of histograms in each local neighborhood
inv_c = np.zeros((N * nbins, N * nbins, z_hist.shape[1]))
# precalculate the values over which i will range ...
# this is like 40 to 17485 (inclusive) in python
# 41 to 17488 in MATLAB ... (check?)
irange = range(ncov, z_hist.shape[1] - ncov - 1)
# instead of waitbar, print .......... to the screen during processing
waitbar_increments = int(irange[-1] / 10)
for i in irange:
if i % waitbar_increments == 0:
print('.', end='')
# not sure of the final number boundary for the loop ...
# win = z_hist(:, i-ncov:i+ncov-1)
# TODO - Alex, is this the right range in MATLAB?
win = z_hist[:, i - ncov:i + ncov] # python, brackets do not include end, in MATLAB () includes end
###
### IMPORTANT - the input to the cov() call in MATLAB is TRANSPOSED compared to numpy
### cov(win.T) <=> np.cov(win)
###
#
# # Python example
# A = np.array([[0, 1 ,2],[3, 4, 5]])
# print(A)
# print(np.cov(A.T))
#
# % MATLAB example
# >> A = [[0 1 2];[3 4 5]]
# >> cov(A)
#
# TODO - lol, don't use 40x40, use a different number of bins, etc.
c = np.cov(win)
# # Denoise via projection on "known" # of dimensions
# [U S V] = svd(c); # matlab
# python SVD looks very similar to MATLAB:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.svd.html
# factors a such that a == U @ S @ Vh
U, S, V = mh.svd_like_matlab(c)
# inverse also works the same in Python as MATLAB ...
# matlab:
# >> X = [1 0 2; -1 5 0; 0 3 -9]
# >> Y = inv(X)
#
# 0.8824 -0.1176 0.1961
# 0.1765 0.1765 0.0392
# 0.0588 0.0588 -0.0980
#
# Python:
# X = np.array([[1, 0, 2],[-1, 5, 0],[0, 3, -9]])
# Y = inv(X)
#
# [[ 0.8824 -0.1176 0.1961]
# [ 0.1765 0.1765 0.0392]
# [ 0.0588 0.0588 -0.098 ]]
# inv_c(:,:,i) = U(:,1:Dim) * inv(S(1:Dim,1:Dim)) * V(:,1:Dim)' # matlab
inv_c[:, :, i] = U[:, :Dim] @ inv(S[:Dim, :Dim]) @ V[:, :Dim].T # NICE!
# z_mean(:, i) = mean(win, 2); # matlab
z_mean[:, i] = np.mean(win, 1)
print(' done')
| 32.847826
| 152
| 0.577101
|
0f75a1c016f0fa73d86874e9563897528d750abd
| 293
|
py
|
Python
|
03-Template-Basics/03-Template-Control-Flow.py
|
saidulislam/flask-bootcamp-1
|
590bcac5a242b0f1f1e7540019bc3fc3e109c9b9
|
[
"Apache-2.0"
] | 1
|
2019-08-07T08:05:32.000Z
|
2019-08-07T08:05:32.000Z
|
Flask/03-Template-Basics/03-Template-Control-Flow.py
|
Sandy1811/demandforecasting
|
fdb6878d93502773ba8da809c2de1b33c96fb9a0
|
[
"Apache-2.0"
] | 8
|
2021-02-08T20:32:03.000Z
|
2022-03-11T23:56:31.000Z
|
Flask/03-Template-Basics/03-Template-Control-Flow.py
|
Sandy1811/demandforecasting
|
fdb6878d93502773ba8da809c2de1b33c96fb9a0
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
puppies = ['Fluffy','Rufus','Spike']
return render_template('03-Template-Control-Flow.html',
puppies=puppies)
if __name__ == '__main__':
app.run(debug=True)
| 19.533333
| 59
| 0.624573
|
9a94a4799fa69e98763b03be6814cf01e5ae67b7
| 25,222
|
py
|
Python
|
models/client.py
|
RichardTZX/FL-Reweighing
|
63125837ccb86a81088d74a1aa324f5191e094a4
|
[
"BSD-2-Clause"
] | null | null | null |
models/client.py
|
RichardTZX/FL-Reweighing
|
63125837ccb86a81088d74a1aa324f5191e094a4
|
[
"BSD-2-Clause"
] | null | null | null |
models/client.py
|
RichardTZX/FL-Reweighing
|
63125837ccb86a81088d74a1aa324f5191e094a4
|
[
"BSD-2-Clause"
] | null | null | null |
import random
import warnings
import timeout_decorator
import sys
import numpy as np
import json
from utils.logger import Logger
from device import Device
from timer import Timer
from grad_compress.grad_drop import GDropUpdate
from grad_compress.sign_sgd import SignSGDUpdate
from comm_effi import StructuredUpdate
L = Logger()
logger = L.get_logger()
class Client:
d = None
try:
with open('../data/state_traces.json', 'r', encoding='utf-8') as f:
d = json.load(f)
except FileNotFoundError as e:
d = None
logger.warn('no user behavior trace was found, running in no-trace mode')
def __init__(self, client_id, group=None, train_data={'x' : [],'y' : []}, eval_data={'x' : [],'y' : []}, model=None, device=None, cfg=None):
self._model = model
self.id = client_id # integer
self.group = group
self.train_data = train_data
self.eval_data = eval_data
self.deadline = 1 # < 0 for unlimited
self.cfg = cfg
self.compressor = None
if self.cfg.compress_algo:
if self.cfg.compress_algo == 'sign_sgd':
self.compressor = SignSGDUpdate()
elif self.cfg.compress_algo == 'grad_drop':
self.compressor = GDropUpdate(client_id,cfg)
else:
logger.error("compress algorithm is not defined")
self.structured_updater = None
if self.cfg.structure_k:
self.structured_updater = StructuredUpdate(self.cfg.structure_k, self.cfg.seed)
self.device = device # if device == none, it will use real time as train time, and set upload/download time as 0
if self.device == None:
logger.warn('client {} with no device init, upload time will be set as 0 and speed will be the gpu speed'.format(self.id))
self.upload_time = 0
# timer
d = Client.d
if d == None:
cfg.behav_hete = False
# uid = random.randint(0, len(d))
if cfg.behav_hete:
if cfg.real_world == False:
uid = random.sample(list(d.keys()), 1)[0]
self.timer = Timer(ubt=d[str(uid)], google=True)
while self.timer.isSuccess != True:
uid = random.sample(list(d.keys()), 1)[0]
self.timer = Timer(ubt=d[str(uid)], google=True)
else:
uid = self.id
self.timer = Timer(ubt=d[str(uid)], google=True)
else:
# no behavior heterogeneity, always available
self.timer = Timer(None)
self.deadline = sys.maxsize # deadline is meaningless without user trace
real_device_model = self.timer.model
if not self.device:
self.device = Device(cfg, 0.0)
if self.cfg.hard_hete:
self.device.set_device_model(real_device_model)
else:
self.device.set_device_model("Redmi Note 8")
def train(self, start_t=None, num_epochs=1, batch_size=10, minibatch=None):
"""Trains on self.model using the client's train_data.
Args:
num_epochs: Number of epochs to train. Unsupported if minibatch is provided (minibatch has only 1 epoch)
batch_size: Size of training batches.
minibatch: fraction of client's data to apply minibatch sgd,
None to use FedAvg
start_t: strat time of the training, only used in train_with_simulate_time
Return:
comp: number of FLOPs executed in training process
num_samples: number of samples used in training
update: set of weights
acc, loss, grad, update_size
"""
def train_with_simulate_time(self, start_t, num_epochs=1, batch_size=10, minibatch=None):
if minibatch is None:
num_data = min(len(self.train_data["x"]), self.cfg.max_sample)
else :
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
train_time = self.device.get_train_time(num_data, batch_size, num_epochs)
logger.debug('client {}: num data:{}'.format(self.id, num_data))
logger.debug('client {}: train time:{}'.format(self.id, train_time))
# compute num_data
if minibatch is None:
num_data = min(len(self.train_data["x"]), self.cfg.max_sample)
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
else:
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
download_time = self.device.get_download_time()
upload_time = self.device.get_upload_time(self.model.size) # will be re-calculated after training
down_end_time = self.timer.get_future_time(start_t, download_time)
logger.debug("client {} download-time-need={}, download-time-cost={} end at {}, "
.format(self.id, download_time, down_end_time-start_t, down_end_time))
train_end_time = self.timer.get_future_time(down_end_time, train_time)
logger.debug("client {} train-time-need={}, train-time-cost={} end at {}, "
.format(self.id, train_time, train_end_time-down_end_time, train_end_time))
up_end_time = self.timer.get_future_time(train_end_time, upload_time)
logger.debug("client {} upload-time-need={}, upload-time-cost={} end at {}, "
.format(self.id, upload_time, up_end_time-train_end_time, up_end_time))
# total_cost = up_end_time - start_t
# logger.debug("client {} task-time-need={}, task-time-cost={}"
# .format(self.id, download_time+train_time+upload_time, total_cost))
self.ori_download_time = download_time # original
self.ori_train_time = train_time
self.before_comp_upload_time = upload_time
self.ori_upload_time = upload_time
self.act_download_time = down_end_time-start_t # actual
self.act_train_time = train_end_time-down_end_time
self.act_upload_time = up_end_time-train_end_time # maybe decrease for the use of conpression algorithm
self.update_size = self.model.size
'''
if not self.timer.check_comm_suc(start_t, download_time):
self.actual_comp = 0.0
download_available_time = self.timer.get_available_time(start_t, download_time)
failed_reason = 'download interruption: download_time({}) > download_available_time({})'.format(download_time, download_available_time)
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
if train_time > train_time_limit:
# data sampling
comp = self.model.get_comp(data, num_epochs, batch_size)
self.actual_comp = int(comp*available_time/train_time) # will be used in get_actual_comp
failed_reason = 'out of deadline: download_time({}) + train_time({}) + upload_time({}) > deadline({})'.format(download_time, train_time, upload_time, self.deadline)
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif train_time > available_time:
# client interruption
comp = self.model.get_comp(data, num_epochs, batch_size)
self.actual_comp = int(comp*available_time/train_time) # will be used in get_actual_comp
failed_reason = 'client interruption: train_time({}) > available_time({})'.format(train_time, available_time)
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
if not self.timer.check_comm_suc(start_t + download_time + train_time, upload_time):
comp = self.model.get_comp(data, num_epochs, batch_size)
self.actual_comp = comp
upload_available_time = self.timer.get_available_time(start_t + download_time + train_time, upload_time)
failed_reason = 'upload interruption: upload_time({}) > upload_available_time({})'.format(upload_time, upload_available_time)
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
'''
if (down_end_time-start_t) > self.deadline:
# download too long
self.actual_comp = 0.0
self.update_size = 0
failed_reason = 'failed when downloading'
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif (train_end_time-start_t) > self.deadline:
# failed when training
train_time_limit = self.deadline - self.act_download_time
if train_time_limit <= 0:
train_time_limit = 0.001
available_time = self.timer.get_available_time(start_t + self.act_download_time, train_time_limit)
comp = self.model.get_comp(data, num_epochs, batch_size)
self.actual_comp = int(comp*available_time/train_time) # will be used in get_actual_comp
self.update_size = 0
if self.cfg.fedprox:
ne = -1
for i in range(1, num_epochs):
et = self.timer.get_future_time(down_end_time, train_time*ne/num_epochs + upload_time)
if et - start_t <= self.deadline:
ne = i
if self.cfg.no_training:
comp = self.model.get_comp(data, num_epochs, batch_size)
update, acc, loss, grad, loss_old = -1,-1,-1,-1,-1
elif self.cfg.fedprox and ne != -1:
comp, update, acc, loss, grad, loss_old = self.model.train(data, ne, batch_size)
train_time *= ne / num_epochs
else:
failed_reason = 'failed when training'
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else:
failed_reason = 'failed when training'
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif (up_end_time-start_t) > self.deadline:
self.actual_comp = self.model.get_comp(data, num_epochs, batch_size)
if self.cfg.fedprox:
ne = -1
for i in range(1, num_epochs):
et = self.timer.get_future_time(down_end_time, train_time*ne/num_epochs + upload_time)
if et - start_t <= self.deadline:
ne = i
if self.cfg.no_training:
comp = self.model.get_comp(data, num_epochs, batch_size)
update, acc, loss, grad, loss_old = -1,-1,-1,-1,-1
elif self.cfg.fedprox and ne != -1:
comp, update, acc, loss, grad, loss_old = self.model.train(data, ne, batch_size)
train_time *= ne / num_epochs
else:
failed_reason = 'failed when uploading'
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else:
failed_reason = 'failed when uploading'
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else :
if self.cfg.sensitive_attribute: #In this section, we add the Disparate Impact measure
if minibatch is None:
if self.cfg.no_training:
comp = self.model.get_comp(data, num_epochs, batch_size)
update, acc, loss, disp_imp, grad, loss_old = -1,-1,-1,-1,-1,-1
else:
comp, update, acc, loss, disp_imp, grad, loss_old = self.model.train(data, num_epochs, batch_size)
else:
num_epochs = 1
if self.cfg.no_training:
comp = self.model.get_comp(data, num_epochs, num_data)
update, acc, loss, disp_imp, grad, loss_old = -1,-1,-1,-1,-1,-1
else:
comp, update, acc, loss, disp_imp, grad, loss_old = self.model.train(data, num_epochs, batch_size)
else:
if minibatch is None:
if self.cfg.no_training:
comp = self.model.get_comp(data, num_epochs, batch_size)
update, acc, loss, grad, loss_old = -1,-1,-1,-1,-1
else:
comp, update, acc, loss, grad, loss_old = self.model.train(data, num_epochs, batch_size)
else:
num_epochs = 1
if self.cfg.no_training:
comp = self.model.get_comp(data, num_epochs, num_data)
update, acc, loss, grad, loss_old = -1,-1,-1,-1,-1
else:
comp, update, acc, loss, grad, loss_old = self.model.train(data, num_epochs, batch_size)
num_train_samples = len(data['y'])
simulate_time_c = train_time + upload_time
self.actual_comp = comp
# gradiant compress and Federated Learning Strategies are mutually-exclusive
# gradiant compress
if self.compressor != None and not self.cfg.no_training:
grad, size_old, size_new = self.compressor.GradientCompress(grad)
# logger.info('compression ratio: {}'.format(size_new/size_old))
self.update_size = self.update_size*size_new/size_old
# re-calculate upload_time
upload_time = self.device.get_upload_time(self.update_size)
self.ori_upload_time = upload_time
up_end_time = self.timer.get_future_time(train_end_time, upload_time)
self.act_upload_time = up_end_time-train_end_time
# Federated Learning Strategies for Improving Communication Efficiency
seed = None
shape_old = None
if self.structured_updater and not self.cfg.no_training:
seed, shape_old, grad = self.structured_updater.struc_update(grad)
# logger.info('compression ratio: {}'.format(sum([np.prod(g.shape) for g in grad]) / sum([np.prod(s) for s in shape_old])))
self.update_size *= sum([np.prod(g.shape) for g in grad]) / sum([np.prod(s) for s in shape_old])
# re-calculate upload_time
upload_time = self.device.get_upload_time(self.update_size)
self.ori_upload_time = upload_time
up_end_time = self.timer.get_future_time(train_end_time, upload_time)
self.act_upload_time = up_end_time-train_end_time
total_cost = self.act_download_time + self.act_train_time + self.act_upload_time
if total_cost > self.deadline:
# failed when uploading
self.actual_comp = self.model.get_comp(data, num_epochs, batch_size)
failed_reason = 'failed when uploading'
# Note that, to simplify, we did not change the update_size here, actually the actual update size is less.
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
# if self.cfg.fedprox:
# print("client {} finish train task".format(self.id))
if self.cfg.sensitive_attribute:
return simulate_time_c, comp, num_train_samples, update, acc, loss, disp_imp, grad, self.update_size, seed, shape_old, loss_old
else:
return simulate_time_c, comp, num_train_samples, update, acc, loss, grad, self.update_size, seed, shape_old, loss_old
'''
# Deprecated
@timeout_decorator.timeout(train_time_limit)
def train_with_real_time_limit(self, num_epochs=1, batch_size=10, minibatch=None):
logger.warn('call train_with_real_time_limit()')
start_time = time.time()
if minibatch is None:
# data = self.train_data
num_data = min(len(self.train_data["x"]), self.cfg.max_sample)
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
if self.cfg.no_training:
comp, update, acc, loss, grad = -1,-1,-1,-1,-1
else:
comp, update, acc, loss, grad = self.model.train(data, num_epochs, batch_size)
else:
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
# Minibatch trains for only 1 epoch - multiple local epochs don't make sense!
num_epochs = 1
if self.cfg.no_training:
comp, update, acc, loss, grad = -1,-1,-1,-1,-1
else:
comp, update, acc, loss, grad = self.model.train(data, num_epochs, num_data)
num_train_samples = len(data['y'])
simulate_time_c = time.time() - start_time
self.ori_download_time = 0 # original
self.ori_train_time = simulate_time_c
self.ori_upload_time = 0
self.act_download_time = 0 # actual
self.act_train_time = simulate_time_c
self.act_upload_time = 0
# gradiant compress
update_size = self.model.size
if grad != -1 and self.cfg.compress_algo:
if self.cfg.compress_algo == 'sign_sgd':
grad, size_old, size_new = sign_sgd_updater.GradientCompress(grad)
update_size = update_size*size_new/size_old
elif self.cfg.compress_algo == 'grad_drop':
grad, size_old, size_new = grad_drop_updater.GradientCompress(grad)
update_size = update_size*size_new/size_old
else:
logger.error("compress algorithm is not defined")
return simulate_time_c, comp, num_train_samples, update, acc, loss, grad, update_size
'''
return train_with_simulate_time(self, start_t, num_epochs, batch_size, minibatch)
'''
if self.device == None:
return train_with_real_time_limit(self, num_epochs, batch_size, minibatch)
else:
return train_with_simulate_time(self, start_t, num_epochs, batch_size, minibatch)
'''
def test(self, set_to_use='test'):
"""Tests self.model on self.test_data.
Args:
set_to_use. Set to test on. Should be in ['train', 'test'].
Return:
dict of metrics returned by the model.
"""
assert set_to_use in ['train', 'test', 'val']
if set_to_use == 'train':
data = self.train_data
elif set_to_use == 'test' or set_to_use == 'val':
data = self.eval_data
return self.model.test(data)
@property
def num_test_samples(self):
"""Number of test samples for this client.
Return:
int: Number of test samples for this client
"""
if self.eval_data is None:
return 0
return len(self.eval_data['y'])
@property
def num_train_samples(self):
"""Number of train samples for this client.
Return:
int: Number of train samples for this client
"""
if self.train_data is None:
return 0
return len(self.train_data['y'])
@property
def num_samples(self):
"""Number samples for this client.
Return:
int: Number of samples for this client
"""
train_size = 0
if self.train_data is not None:
train_size = len(self.train_data['y'])
test_size = 0
if self.eval_data is not None:
test_size = len(self.eval_data['y'])
return train_size + test_size
@property
def model(self):
"""Returns this client reference to model being trained"""
return self._model
@model.setter
def model(self, model):
warnings.warn('The current implementation shares the model among all clients.'
'Setting it on one client will effectively modify all clients.')
self._model = model
def set_deadline(self, deadline = -1):
if deadline < 0 or not self.cfg.behav_hete:
self.deadline = sys.maxsize
else:
self.deadline = deadline
logger.debug('client {}\'s deadline is set to {}'.format(self.id, self.deadline))
'''
def set_upload_time(self, upload_time):
if upload_time > 0:
self.upload_time = upload_time
else:
logger.error('invalid upload time: {}'.format(upload_time))
assert False
logger.debug('client {}\'s upload_time is set to {}'.format(self.id, self.upload_time))
def get_train_time_limit(self):
if self.device != None:
self.upload_time = self.device.get_upload_time()
logger.debug('client {} upload time: {}'.format(self.id, self.upload_time))
if self.upload_time < self.deadline :
# logger.info('deadline: {}'.format(self.deadline))
return self.deadline - self.upload_time
else:
return 0.01
'''
def upload_suc(self, start_t, num_epochs=1, batch_size=10, minibatch=None):
"""Test if this client will upload successfully
Args:
num_epochs: Number of epochs to train. Unsupported if minibatch is provided (minibatch has only 1 epoch)
batch_size: Size of training batches.
minibatch: fraction of client's data to apply minibatch sgd,
None to use FedAvg
start_t: strat time of the training, only used in train_with_simulate_time
Return:
result: test result(True or False)
"""
if minibatch is None:
num_data = min(len(self.train_data["x"]), self.cfg.max_sample)
else :
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
if self.device == None:
download_time = 0.0
upload_time = 0.0
else:
download_time = self.device.get_download_time()
upload_time = self.device.get_upload_time()
train_time = self.device.get_train_time(num_data, batch_size, num_epochs)
train_time_limit = self.deadline - download_time - upload_time
if train_time_limit < 0:
train_time_limit = 0.001
available_time = self.timer.get_available_time(start_t + download_time, train_time_limit)
logger.debug('client {}: train time:{}'.format(self.id, train_time))
logger.debug('client {}: available time:{}'.format(self.id, available_time))
# compute num_data
if minibatch is None:
num_data = min(len(self.train_data["x"]), self.cfg.max_sample)
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
else:
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
if not self.timer.check_comm_suc(start_t, download_time):
return False
if train_time > train_time_limit:
return False
elif train_time > available_time:
return False
if not self.timer.check_comm_suc(start_t + download_time + train_time, upload_time):
return False
else :
return True
def get_device_model(self):
if self.device == None:
return 'None'
return self.device.device_model
def get_actual_comp(self):
'''
get the actual computation in the training process
'''
return self.actual_comp
| 48.041905
| 180
| 0.577472
|
0d36e3985a92a90b5c1fe42c799646c1467b96ed
| 4,909
|
py
|
Python
|
tests/test_sqlalchemy_without_db.py
|
lucafaggianelli/layabase
|
90733c6b9efd56051dfce5c3d89bd4e657ce7b3f
|
[
"MIT"
] | 3
|
2019-12-02T23:29:44.000Z
|
2019-12-31T00:55:01.000Z
|
tests/test_sqlalchemy_without_db.py
|
lucafaggianelli/layabase
|
90733c6b9efd56051dfce5c3d89bd4e657ce7b3f
|
[
"MIT"
] | 29
|
2019-12-02T16:12:45.000Z
|
2022-02-17T16:01:55.000Z
|
tests/test_sqlalchemy_without_db.py
|
lucafaggianelli/layabase
|
90733c6b9efd56051dfce5c3d89bd4e657ce7b3f
|
[
"MIT"
] | 3
|
2020-01-02T10:58:47.000Z
|
2022-02-17T10:55:18.000Z
|
import sqlalchemy
import pytest
import layabase
from layabase.testing import mock_sqlalchemy_health_datetime
@pytest.fixture
def controller():
class TestTable:
__tablename__ = "test"
key = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
return layabase.CRUDController(TestTable)
@pytest.fixture
def disconnected_database(controller: layabase.CRUDController):
_db = layabase.load("sqlite:///:memory:", [controller])
_db.metadata.bind.dispose()
yield _db
def test_get_all_when_db_down(
disconnected_database, controller: layabase.CRUDController
):
with pytest.raises(Exception) as exception_info:
controller.get({})
assert (
str(exception_info.value)
== """A error occurred while querying database: (sqlite3.OperationalError) no such table: test\n[SQL: SELECT test."key" AS test_key \nFROM test]\n(Background on this error at: http://sqlalche.me/e/13/e3q8)"""
)
def test_get_when_db_down(disconnected_database, controller: layabase.CRUDController):
with pytest.raises(Exception) as exception_info:
controller.get_one({})
assert (
str(exception_info.value)
== """A error occurred while querying database: (sqlite3.OperationalError) no such table: test\n[SQL: SELECT test."key" AS test_key \nFROM test]\n(Background on this error at: http://sqlalche.me/e/13/e3q8)"""
)
def test_add_when_db_down(disconnected_database, controller: layabase.CRUDController):
with pytest.raises(Exception) as exception_info:
controller.post({"key": "my_key1", "mandatory": 1, "optional": "my_value1"})
assert (
str(exception_info.value)
== """A error occurred while querying database: (sqlite3.OperationalError) no such table: test\n[SQL: SELECT test."key" AS test_key \nFROM test \nWHERE test."key" = ?\n LIMIT ? OFFSET ?]\n[parameters: ('my_key1', 1, 0)]\n(Background on this error at: http://sqlalche.me/e/13/e3q8)"""
)
def test_post_many_when_db_down(
disconnected_database, controller: layabase.CRUDController
):
with pytest.raises(Exception) as exception_info:
controller.post_many(
[
{"key": "my_key1", "mandatory": 1, "optional": "my_value1"},
{"key": "my_key2", "mandatory": 1, "optional": "my_value1"},
]
)
assert (
str(exception_info.value)
== """A error occurred while querying database: (sqlite3.OperationalError) no such table: test\n[SQL: SELECT test."key" AS test_key \nFROM test \nWHERE test."key" = ?\n LIMIT ? OFFSET ?]\n[parameters: ('my_key1', 1, 0)]\n(Background on this error at: http://sqlalche.me/e/13/e3q8)"""
)
def test_update_when_db_down(
disconnected_database, controller: layabase.CRUDController
):
with pytest.raises(Exception) as exception_info:
controller.put({"key": "my_key1", "mandatory": 1, "optional": "my_value1"})
assert (
str(exception_info.value)
== """A error occurred while querying database: (sqlite3.OperationalError) no such table: test\n[SQL: SELECT test."key" AS test_key \nFROM test \nWHERE test."key" = ?\n LIMIT ? OFFSET ?]\n[parameters: ('my_key1', 1, 0)]\n(Background on this error at: http://sqlalche.me/e/13/e3q8)"""
)
def test_remove_when_db_down(
disconnected_database, controller: layabase.CRUDController
):
with pytest.raises(Exception) as exception_info:
controller.delete({})
assert (
str(exception_info.value)
== """A error occurred while querying database: (sqlite3.OperationalError) no such table: test\n[SQL: SELECT test."key" AS test_key \nFROM test]\n(Background on this error at: http://sqlalche.me/e/13/e3q8)"""
)
def test_health_details_failure(
disconnected_database, mock_sqlalchemy_health_datetime, monkeypatch
):
monkeypatch.setattr(
disconnected_database.metadata.bind.dialect, "do_ping", lambda x: False
)
assert layabase.check(disconnected_database) == (
"fail",
{
"sqlite:select": {
"componentType": "datastore",
"status": "fail",
"time": "2018-10-11T15:05:05.663979",
"output": "Unable to ping database.",
}
},
)
def test_health_details_failure_due_to_exception(
disconnected_database, mock_sqlalchemy_health_datetime, monkeypatch
):
def raise_exception(*args):
raise Exception("This is the error")
monkeypatch.setattr(
disconnected_database.metadata.bind.dialect, "do_ping", raise_exception
)
assert layabase.check(disconnected_database) == (
"fail",
{
"sqlite:select": {
"componentType": "datastore",
"status": "fail",
"time": "2018-10-11T15:05:05.663979",
"output": "This is the error",
}
},
)
| 37.473282
| 291
| 0.662253
|
ed9688ae2a7376784e869510361d45a5cf1dd8c4
| 290
|
py
|
Python
|
Codeforces sheet solutions/Sheet 1/S.py
|
failedconnection/hack_the_fest
|
494a283f4cc52279487b4a57055225ceea95f4d3
|
[
"MIT"
] | 1
|
2020-10-01T16:47:59.000Z
|
2020-10-01T16:47:59.000Z
|
Codeforces sheet solutions/Sheet 1/S.py
|
failedconnection/hack_the_fest
|
494a283f4cc52279487b4a57055225ceea95f4d3
|
[
"MIT"
] | 4
|
2020-10-01T14:41:07.000Z
|
2020-10-04T06:08:04.000Z
|
Codeforces sheet solutions/Sheet 1/S.py
|
failedconnection/hack_the_fest
|
494a283f4cc52279487b4a57055225ceea95f4d3
|
[
"MIT"
] | 15
|
2020-10-01T08:34:34.000Z
|
2020-10-22T18:25:05.000Z
|
x,y=input().split()
x=float(x)
y=float(y)
if (x==0 and y==0):
print ("Origem")
elif (y==0 ):
print("Eixo X")
elif (x==0):
print("Eixo Y")
elif (x>0 and y>0):
print("Q1")
elif (x<0 and y>0):
print("Q2")
elif (x<0 and y<0):
print("Q3")
else :print("Q4")
| 18.125
| 21
| 0.493103
|
d58593dd8c4983a7591a6fe7bb9bbad92fdd2c9f
| 15,661
|
py
|
Python
|
python/ccxt/async_support/base/exchange.py
|
bitzlato/ccxt
|
fe514678d84dfe7c13b7f927707fc0b31bdb6ceb
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/base/exchange.py
|
bitzlato/ccxt
|
fe514678d84dfe7c13b7f927707fc0b31bdb6ceb
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/base/exchange.py
|
bitzlato/ccxt
|
fe514678d84dfe7c13b7f927707fc0b31bdb6ceb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.58.6'
# -----------------------------------------------------------------------------
import asyncio
import concurrent.futures
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttler import Throttler
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.errors import BadSymbol
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.throttle = None
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = Throttler(self.tokenBucket, self.asyncio_loop)
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop, enable_cleanup_closed=True)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
# insert cost into here...
await self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
# CIMultiDictProxy
raw_headers = response.headers
headers = {}
for header in raw_headers:
if header in headers:
headers[header] = headers[header] + ', ' + raw_headers[header]
else:
headers[header] = raw_headers[header]
http_status_code = response.status
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, http_response, request_headers, request_body)
json_response = self.parse_json(http_response)
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.log("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except (concurrent.futures.TimeoutError, asyncio.TimeoutError) as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except aiohttp.ClientConnectionError as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except aiohttp.ClientError as e: # base exception class
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
async def fetchOHLCVC(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_balance(self, params={}):
raise NotSupported('fetch_balance() not supported yet')
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = await self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
async def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = await self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| 43.868347
| 355
| 0.618224
|
1aa45aaaaabace3fd2b3e99b1d5924e8b40b5248
| 434
|
py
|
Python
|
python-basic-grammer/python-basic/02-python-variables-and-string/type_of_variable.py
|
jinrunheng/base-of-python
|
595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39
|
[
"Apache-2.0"
] | null | null | null |
python-basic-grammer/python-basic/02-python-variables-and-string/type_of_variable.py
|
jinrunheng/base-of-python
|
595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39
|
[
"Apache-2.0"
] | null | null | null |
python-basic-grammer/python-basic/02-python-variables-and-string/type_of_variable.py
|
jinrunheng/base-of-python
|
595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39
|
[
"Apache-2.0"
] | null | null | null |
# 字符串类型
name = "Kim"
# 整数类型
age = 28
# 浮点数类型
weight = 143.5
# 布尔型
is_boy = True
print(name)
print(age)
print(weight)
print(is_boy)
# type函数用于得到变量的数据类型
type_of_name = type(name)
type_of_age = type(age)
type_of_weight = type(weight)
type_of_is_boy = type(is_boy)
print(type_of_name)
print(type_of_age)
print(type_of_weight)
print(type_of_is_boy)
# 也可以直接函数套用
print(type(name))
print(type(age))
print(type(weight))
print(type(is_boy))
| 14.466667
| 29
| 0.751152
|
060d8338ac7c4565298ce69c455c6f13e28433dd
| 1,839
|
py
|
Python
|
kmeans/compile.py
|
oprecomp/flexfloat-benchmarks
|
0acb1a53c57901e873ee7ba69e4f2bf9c2fbe348
|
[
"Apache-2.0"
] | 2
|
2019-02-06T15:34:56.000Z
|
2020-02-20T23:44:27.000Z
|
kmeans/compile.py
|
oprecomp/flexfloat-benchmarks
|
0acb1a53c57901e873ee7ba69e4f2bf9c2fbe348
|
[
"Apache-2.0"
] | null | null | null |
kmeans/compile.py
|
oprecomp/flexfloat-benchmarks
|
0acb1a53c57901e873ee7ba69e4f2bf9c2fbe348
|
[
"Apache-2.0"
] | null | null | null |
import sys
import csv
import subprocess
# Pulp with SmallFloat extensions
def set_coefficient_bits(prec):
if(prec <= 3): # float8
return 5
elif(prec > 3 and prec <= 8): # float16ext
return 8
elif(prec > 8 and prec <= 11): # float16
return 5
elif(prec > 11 and prec <= 24): # float32
return 8
elif(prec > 24 and prec <= 53): # float64
return 11
else:
raise Exception
def init_params(config_vals):
result = []
result.append(" -DFRAC_OBJECT=%d" % (config_vals[0] - 1))
result.append(" -DFRAC_CLUSTER=%d" % (config_vals[1] - 1))
result.append(" -DFRAC_DIST=%d" % (config_vals[2] - 1))
result.append(" -DFRAC_TEMP1=%d" % (config_vals[3] - 1))
result.append(" -DFRAC_TEMP2=%d" % (config_vals[4] - 1))
result.append(" -DEXP_OBJECT=%d" % set_coefficient_bits(config_vals[0]))
result.append(" -DEXP_CLUSTER=%d" % set_coefficient_bits(config_vals[1]))
result.append(" -DEXP_DIST=%d" % set_coefficient_bits(config_vals[2]))
result.append(" -DEXP_TEMP1=%d" % set_coefficient_bits(config_vals[3]))
result.append(" -DEXP_TEMP2=%d" % set_coefficient_bits(config_vals[4]))
return "".join(result)
with open(sys.argv[1], 'r') as config_file:
reader = csv.reader(config_file)
row = next(reader)
if row[-1] == '':
del row[-1]
config_vals = [int(x) for x in row]
ext_cflags = init_params(config_vals)
make_process = subprocess.Popen(
"make clean all CONF_MODE=file EXT_CFLAGS=\"" +
ext_cflags + "\" OUTPUT_DIR=\"" + sys.argv[2] + "\" ",
shell=True, stderr=subprocess.STDOUT)
make_process.wait()
| 36.058824
| 164
| 0.568787
|
8989d97c5fe9cec2c566e970332c640a4c321dd0
| 1,588
|
py
|
Python
|
api/app/services/serveQrcode.py
|
JexPY/filemanager-fastapi
|
da830fe6d9a3d515e0d04e6e690ff366225ec251
|
[
"MIT"
] | 24
|
2020-09-15T11:59:55.000Z
|
2022-03-13T19:58:02.000Z
|
api/app/services/serveQrcode.py
|
JexPY/filemanager-fastapi
|
da830fe6d9a3d515e0d04e6e690ff366225ec251
|
[
"MIT"
] | null | null | null |
api/app/services/serveQrcode.py
|
JexPY/filemanager-fastapi
|
da830fe6d9a3d515e0d04e6e690ff366225ec251
|
[
"MIT"
] | 5
|
2020-10-11T08:41:29.000Z
|
2022-03-10T07:23:55.000Z
|
import os
import copy
import _thread
from fastapi import HTTPException,status
from .helpers.alena import local_savings
from .images.generateQr import qr_code_image
from .storage.googleCloud import upload_image_file_to_google_storage
from .storage.s3 import upload_image_file_to_s3_storage
def handle_qr_code(text = str, with_logo = bool):
try:
local_savings(qrCodes=True)
qrCodePaths = qr_code_image(text, with_logo)
if os.environ.get('PREFERED_STORAGE') == 'google':
_thread.start_new_thread(upload_image_file_to_google_storage, (copy.deepcopy(qrCodePaths),))
qrCodePaths['qrImage'] = os.environ.get('GOOGLE_BUCKET_URL') + os.environ.get('QR_IMAGE_GOOGLE_CLOUD_PATH') + qrCodePaths['qrImage'] if qrCodePaths.get('qrImage') else None
elif os.environ.get('PREFERED_STORAGE') == 's3':
_thread.start_new_thread(upload_image_file_to_s3_storage, (copy.deepcopy(qrCodePaths),))
qrCodePaths['qrImage'] = os.environ.get('AWS_BUCKET_URL') + os.environ.get('QR_IMAGE_S3_CLOUD_PATH') + qrCodePaths['qrImage'] if qrCodePaths.get('qrImage') else None
elif os.environ.get('PREFERED_STORAGE') == 'local':
qrCodePaths['qrImage'] = os.environ.get('API_URL') + os.environ.get('QR_IMAGE_LOCAL_PATH') + qrCodePaths['qrImage'] if qrCodePaths.get('qrImage') else None
qrCodePaths['storage'] = os.environ.get('PREFERED_STORAGE')
return qrCodePaths
except:
raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail='The file format not supported')
| 51.225806
| 184
| 0.731108
|
f0ea6c5944d61f5ed47e5caa708ef2cde38647d1
| 6,057
|
py
|
Python
|
CZUR/head_data_preprocess/split_hollywood_train_val.py
|
lhwcv/mmdet
|
eaa05547305441539ba49a08d39bf364c033e9f1
|
[
"Apache-2.0"
] | 1
|
2020-12-14T05:32:10.000Z
|
2020-12-14T05:32:10.000Z
|
CZUR/head_data_preprocess/split_hollywood_train_val.py
|
lhwcv/mmdet
|
eaa05547305441539ba49a08d39bf364c033e9f1
|
[
"Apache-2.0"
] | null | null | null |
CZUR/head_data_preprocess/split_hollywood_train_val.py
|
lhwcv/mmdet
|
eaa05547305441539ba49a08d39bf364c033e9f1
|
[
"Apache-2.0"
] | null | null | null |
import os
import argparse
import glob
import tqdm
import xml.etree.ElementTree as ET
from CZUR.head_data_preprocess.quantize_bbox_hollywood_head import quantize_head_anno
CLASS_NAMES=[
'head'
]
#MIN_SIZE_RATIO = [1/10.0 , 1/ 10.0]
MIN_SIZE_RATIO = [0.2 , 0.2]
MAX_HEAD_N = 4
def get_args():
p = argparse.ArgumentParser()
p.add_argument("--data_dir", type=str,
default='/home/lhw/m2_disk/data/HollyWoodHead/')
p.add_argument("--train_file_save", type=str,
default='/home/lhw/m2_disk/data/HollyWoodHead/train.txt')
p.add_argument("--val_file_save", type=str,
default='/home/lhw/m2_disk/data/HollyWoodHead/val.txt')
p.add_argument("--train_ratio", type=float,
default=0.7)
p.add_argument("--quantize_label", type=bool,
default=True)
return p.parse_args()
def get_movie_frame_names_dict(img_data_dir, anno_dir):
'''
:param img_data_dir: JPEGImages
:param anno_dir: Annotations
:return: {
'mov_001': [file1.jpg, file2.jpg .....],
'mov_002': [file1.jpg, file2.jpg .....]
}
'''
data_dict = {}
filenames = [f for f in os.listdir(img_data_dir) if 'mov' in f]
print('hollywood head frames total: ', len(filenames))
valid_n = 0
for fn in tqdm.tqdm(filenames):
xml_name = fn.replace('.jpeg','.xml')
ann_fn = os.path.join(anno_dir,xml_name )
if os.path.exists(ann_fn):
has_obj = False
tree = ET.parse(ann_fn)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name')
if name is None:
break
name = name.text
if name in CLASS_NAMES:
has_obj = True
break
if has_obj:
### check, if all object is small, break
size = root.find('size')
dw = int(size.find('width').text)
dh = int(size.find('height').text)
min_size = dw * dh * MIN_SIZE_RATIO[0] * MIN_SIZE_RATIO[1]
size_valid = False
n_obj = 0
for obj in root.iter('object'):
n_obj += 1
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
w = abs(b[1] - b[0])
h = abs(b[3] - b[2])
s = w * h
if s > min_size:
size_valid = True
break
if size_valid and n_obj<=MAX_HEAD_N :
valid_n+=1
movie_id = xml_name.split('_')[1]
fn = os.path.join(img_data_dir,fn)
if movie_id not in data_dict.keys():
data_dict[movie_id] = [fn]
else:
data_dict[movie_id].append(fn)
print('valid frames: ', valid_n)
print('movies: ', len(data_dict.keys()) )
return data_dict
def main():
args = get_args()
data_dir = args.data_dir
train_file_save = args.train_file_save
val_file_save = args.val_file_save
train_ratio = args.train_ratio
print('min size: ', MIN_SIZE_RATIO)
movie_frame_names_dict = get_movie_frame_names_dict(
data_dir + '/JPEGImages',
data_dir + '/Annotations',
)
movie_names = list(movie_frame_names_dict.keys() )
N = int(len(movie_names) * train_ratio)
train_movies = movie_names[:N]
val_movies = movie_names[N:]
train_files = []
for n in train_movies:
train_files.extend(movie_frame_names_dict[n])
val_files = []
for n in val_movies:
val_files.extend(movie_frame_names_dict[n])
print('train files n: ', len(train_files))
print('val files n: ', len(val_files))
with open(train_file_save,'w') as f:
for fn in train_files:
f.write(fn+'\n')
with open(val_file_save,'w') as f:
for fn in val_files:
f.write(fn+'\n')
if args.quantize_label:
quantize_anno_save_dir = data_dir + '/Annotations_Quantize/'
if not os.path.exists(quantize_anno_save_dir):
os.mkdir(quantize_anno_save_dir)
for fn in train_files:
fn = fn.split('/')[-1]
fn = fn.replace('.jpeg', '.xml')
anno_in = data_dir + '/Annotations/'+ fn
anno_save = quantize_anno_save_dir+'/'+fn
quantize_head_anno(anno_in, anno_save)
for fn in val_files:
fn = fn.split('/')[-1]
fn = fn.replace('.jpeg', '.xml')
anno_in = data_dir + '/Annotations/'+ fn
anno_save = quantize_anno_save_dir+'/'+fn
quantize_head_anno(anno_in, anno_save)
###write to ImageSets
with open(data_dir+'/ImageSets/Main/trainval.txt', 'w') as f:
for fn in train_files:
fn = fn.split('/')[-1].split('.')[0]
f.write(fn + '\n')
with open(data_dir+'/ImageSets/Main/train.txt', 'w') as f:
for fn in train_files:
fn = fn.split('/')[-1].split('.')[0]
f.write(fn + '\n')
with open(data_dir+'/ImageSets/Main/val.txt', 'w') as f:
for fn in val_files:
fn = fn.split('/')[-1].split('.')[0]
f.write(fn + '\n')
with open(data_dir+'/ImageSets/Main/test.txt', 'w') as f:
for fn in val_files:
fn = fn.split('/')[-1].split('.')[0]
f.write(fn + '\n')
if __name__ == '__main__':
main()
| 35.421053
| 91
| 0.511309
|
ee55f5e3467fb4f98fbb57fe11c424259ed56963
| 886
|
py
|
Python
|
country_date&time.py
|
Adel-Charef/scripts
|
0d80784f74e0e4cb6d38c23d50ba96cf036a3692
|
[
"MIT"
] | null | null | null |
country_date&time.py
|
Adel-Charef/scripts
|
0d80784f74e0e4cb6d38c23d50ba96cf036a3692
|
[
"MIT"
] | null | null | null |
country_date&time.py
|
Adel-Charef/scripts
|
0d80784f74e0e4cb6d38c23d50ba96cf036a3692
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pytz
color = {
'RED': '\033[1;91m',
'UNDERLINE_PURPLE': '\033[4;34m',
'GREEN': '\033[1;92m',
'YELLOW': '\033[1;33m',
'CYAN': '\033[0;36m',
'PURPLE': '\033[0;34m',
'MAGENTA': '\033[0;35m',
'DEFAULT': '\033[0m',
'TWITTER_BLUE': '\033[38;5;33m',
}
country_zones = ["Africa/Algiers", "Africa/Casablanca",
"Africa/Tunis", "Africa/Cairo", "Africa/Khartoum"]
country_time_zones = []
for country_time_zone in country_zones:
country_time_zones.append(pytz.timezone(country_time_zone))
for i in range(len(country_time_zones)):
country_time = datetime.now(country_time_zones[i])
print(f"{color['DEFAULT']}The date in {color['RED']}'{country_zones[i]}': {color['GREEN']}{country_time.strftime('%d-%m-%y')} {color['DEFAULT']}and The Time: {color['PURPLE']}{country_time.strftime('%H:%M')}\n")
| 34.076923
| 215
| 0.638826
|
45fa7cd96afab58bf6c542cd659223859c844cce
| 626
|
py
|
Python
|
bubble_sort.py
|
dlenhart/algorithms
|
23649abfb5750fbf71ef28259534e9840761342b
|
[
"MIT"
] | null | null | null |
bubble_sort.py
|
dlenhart/algorithms
|
23649abfb5750fbf71ef28259534e9840761342b
|
[
"MIT"
] | null | null | null |
bubble_sort.py
|
dlenhart/algorithms
|
23649abfb5750fbf71ef28259534e9840761342b
|
[
"MIT"
] | null | null | null |
# Author: Drew D. Lenhart
#
# Desc: Sorting algorithm, repeatedly swapping adjacent elements if they
# are in the wrong order ( repeatedly moves the higher value to the
# right untill all numbers are in sorted order. )
#
# Date: Aug 25th 2021
#
#
def bubble_sort(list):
swapped = True
while swapped:
swapped = False
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
swapped = True
list[i], list[i + 1] = list[i + 1], list[i]
return list
sort = [2, 44, 6, 8, 15, 33, 21, 77, 82, 99, 101]
print(bubble_sort(sort))
| 25.04
| 77
| 0.565495
|
9b0a0e7ccd9fbc5cfa860ad9d21af3574784d7b9
| 1,330
|
py
|
Python
|
ato/ia_rep/urls.py
|
jefke-glider/gliding
|
22ebc03879be7d62ec64a0990cd4d3e1b263409b
|
[
"MIT"
] | null | null | null |
ato/ia_rep/urls.py
|
jefke-glider/gliding
|
22ebc03879be7d62ec64a0990cd4d3e1b263409b
|
[
"MIT"
] | 4
|
2017-05-03T15:10:07.000Z
|
2017-05-05T13:41:18.000Z
|
ato/ia_rep/urls.py
|
jefke-glider/gliding
|
22ebc03879be7d62ec64a0990cd4d3e1b263409b
|
[
"MIT"
] | null | null | null |
"""ia_rep URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
import os
from . import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^accounts/logout/$', auth_views.logout, {'template_name': 'logout.html'}, name='logout'),
url(r'^report_ia/', include('report_ia.urls', namespace='report_ia')),
url(r'^$', views.home),
]
if os.uname()[1] != 'gimli':
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 39.117647
| 95
| 0.718797
|
050557669bf9df5e0fd199d89b5e3904a413f15f
| 42,086
|
py
|
Python
|
mmnrm/models.py
|
T-Almeida/mmnrm
|
f67441a4e2cb0a8335b5e96f3ea9ea0a0eba080a
|
[
"MIT"
] | 1
|
2021-04-19T10:10:43.000Z
|
2021-04-19T10:10:43.000Z
|
mmnrm/models.py
|
T-Almeida/mmnrm
|
f67441a4e2cb0a8335b5e96f3ea9ea0a0eba080a
|
[
"MIT"
] | 1
|
2020-09-30T11:34:01.000Z
|
2020-09-30T11:34:01.000Z
|
mmnrm/models.py
|
T-Almeida/mmnrm
|
f67441a4e2cb0a8335b5e96f3ea9ea0a0eba080a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import backend as K
from mmnrm.layers.interaction import SemanticInteractions, ExactInteractions
from mmnrm.layers.local_relevance import MultipleNgramConvs, MaskedSoftmax
from mmnrm.layers.transformations import *
from mmnrm.layers.aggregation import *
def build_PACRR(max_q_length,
max_d_length,
emb_matrix = None,
learn_context = False,
trainable_embeddings = False,
learn_term_weights = False,
dense_hidden_units = None,
max_ngram = 3,
k_max = 2,
activation="relu",
out_put_dim = 1,
return_embeddings=False,
shuffle_query_terms = False,
k_polling_avg = None, # do k_polling avg after convolution
polling_avg = False, # do avg polling after convolution
use_mask = True,
filters=32, # can be a list or a function of input features and n-gram
name_model = None,
**kwargs):
prefix_name = ""
# init layers
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32")
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
input_sentence = tf.keras.layers.Input((max_d_length,), dtype="int32")
if emb_matrix is None:
interaction = ExactInteractions()
else:
interaction = SemanticInteractions(emb_matrix,
learn_term_weights=learn_term_weights,
trainable_embeddings=trainable_embeddings,
learn_context=learn_context,
use_mask=True,
return_embeddings=return_embeddings)
ngram_convs = MultipleNgramConvs(max_ngram=max_ngram,
k_max=k_max,
k_polling_avg=k_polling_avg,
polling_avg=polling_avg,
use_mask=use_mask,
filters=filters,
activation=activation)
softmax_IDF = MaskedSoftmax()
if use_mask:
concatenate = MaskedConcatenate(0)
else:
concatenate = tf.keras.layers.Concatenate()
if dense_hidden_units is None:
aggregation_layer = tf.keras.layers.LSTM(out_put_dim,
dropout=0.0,
recurrent_regularizer=None,
recurrent_dropout=0.0,
unit_forget_bias=True,
recurrent_activation="hard_sigmoid",
bias_regularizer=None,
activation=activation,
recurrent_initializer="orthogonal",
kernel_regularizer=None,
kernel_initializer="glorot_uniform",
unroll=True) # speed UP!!!
elif isinstance(dense_hidden_units, list) :
def _network(x):
x = tf.keras.layers.Flatten()(x)
for i,h in enumerate(dense_hidden_units):
x = tf.keras.layers.Dense(h, activation="relu", name="aggregation_dense_"+str(i))(x)
dout = tf.keras.layers.Dense(1, name="aggregation_output")(x)
return dout
aggregation_layer = _network
else:
raise RuntimeError("dense_hidden_units must be a list with the hidden size per layer")
# build layers
norm_idf = K.expand_dims(softmax_IDF(input_query_idf))
if return_embeddings:
_out = interaction([input_query, input_sentence])
x = _out[0]
embeddings = _out[1]
else:
x = interaction([input_query, input_sentence])
x = ngram_convs(x)
x = concatenate([x, norm_idf])
if shuffle_query_terms:
shuffle = ShuffleRows()
prefix_name += "S"
x = shuffle(x)
x = aggregation_layer(x)
if name_model is None:
name_model = (prefix_name+"_" if prefix_name != "" else "") + "PACRR"
if return_embeddings:
return tf.keras.models.Model(inputs=[input_query, input_sentence, input_query_idf], outputs=[x, embeddings])
else:
return tf.keras.models.Model(inputs=[input_query, input_sentence, input_query_idf], outputs=x)
def sentence_PACRR(pacrr, sentence_per_doc, type_combination=0, activation="relu"):
"""
type_combination - 0: use MLP
1: use WeightedCombination + MLP
2: GRU
"""
max_q_length = pacrr.input[0].shape[1]
max_d_length = pacrr.input[1].shape[1]
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32") # (None, Q)
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32") # (None, Q)
input_doc = tf.keras.layers.Input((sentence_per_doc, max_d_length), dtype="int32") # (None, P, S)
#aggregate = tf.keras.layers.GRU(1, activation="relu")
#aggregate = WeightedCombination()
def aggregate(x):
#x = tf.keras.layers.Dense(25, activation="relu")(x)
x = KmaxAggregation(k=5)(x)
#x = tf.squeeze(x, axis=-1)
x = tf.keras.layers.Dense(6, activation="selu")(x)
return tf.keras.layers.Dense(1, activation=None)(x)
#def aggregate(x):
#x = tf.keras.layers.Dense(25, activation="relu")(x)
# return K.max(tf.squeeze(x, axis=-1), axis=-1, keepdims=True)
sentences = tf.unstack(input_doc, axis=1) #[(None,S), (None,S), ..., (None,S)]
pacrr_sentences = []
for sentence in sentences:
pacrr_sentences.append(pacrr([input_query, sentence, input_query_idf]))
pacrr_sentences = tf.stack(pacrr_sentences, axis=1)
#pacrr_sentences = tf.squeeze(pacrr_sentences, axis=-1)
score = aggregate(pacrr_sentences)
return tf.keras.models.Model(inputs=[input_query, input_doc, input_query_idf], outputs=score)
def semantic_exact_PACRR(semantic_pacrr_args,
exact_pacrr_args,
type_combination=0,
semantic_filter_threshold=None,
dense_hidden_units=[4]):
"""
type_combination - 0: use MLP
1: use WeightedCombination + MLP
2: use Sum over score
3: use self-attention over query + dense + attend
4: use RareWordFreqCombine
"""
assert(semantic_pacrr_args["max_q_length"]==exact_pacrr_args["max_q_length"])
assert(semantic_pacrr_args["max_d_length"]==exact_pacrr_args["max_d_length"])
return_embeddings = type_combination in [3]
max_q_length = semantic_pacrr_args["max_q_length"]
max_d_length = semantic_pacrr_args["max_d_length"]
# init layers
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32")
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
input_sentence = tf.keras.layers.Input((max_d_length,), dtype="int32")
# build
semantic_pacrr = build_PACRR(**semantic_pacrr_args)
exact_pacrr = build_PACRR(**exact_pacrr_args)
def _aggregate(x):
if type_combination==0:
return tf.keras.layers.Concatenate(axis=-1)(x)
elif type_combination==1:
x = tf.keras.layers.Lambda(lambda x: K.concatenate(list(map(lambda y: K.expand_dims(y), x))) )(x)
return WeightedCombination()(x)
elif type_combination==2:
x = tf.keras.layers.Concatenate(axis=-1)(x)
return K.sum(x, axis=-1, keepdims=True)
elif type_combination==3:
query_attn = SelfAttention()(embeddings, mask=_mask)
score_query = tf.keras.layers.Dense(1, activation="sigmoid")(query_attn)
return x[0]*score_query + (1-score_query)*x[1]
elif type_combination==4:
return RareWordFreqCombine(semantic_filter_threshold)([x[1], x[0], input_query])
else:
raise RuntimeError("invalid type_combination")
def _score(x):
if type_combination in [2,3,4]:
return x # identity
for i,h in enumerate(dense_hidden_units):
x = tf.keras.layers.Dense(h, activation="relu")(x)
return tf.keras.layers.Dense(1, activation="relu")(x)
# build layers
if semantic_filter_threshold is not None:
semantic_filter_mask = ReplaceValuesByThreashold(semantic_filter_threshold, return_filter_mask=True)
semantic_filter = ReplaceValuesByThreashold(semantic_filter_threshold, return_filter_mask=False)
semantic_filter_idf = ReplaceValuesByMask()
input_query_filter, _mask = semantic_filter_mask(input_query)
input_sentence_filter = semantic_filter(input_sentence)
input_query_idf_filter = semantic_filter_idf([input_query_idf, _mask])
# semantic pacrr with embeddings
if return_embeddings:
semantic_repr, embeddings = semantic_pacrr([input_query_filter, input_sentence_filter, input_query_idf_filter])
else:
semantic_repr = semantic_pacrr([input_query_filter, input_sentence_filter, input_query_idf_filter])
exact_repr = exact_pacrr([input_query, input_sentence, input_query_idf])
combined = _aggregate([semantic_repr, exact_repr])
score = _score(combined)
return tf.keras.models.Model(inputs=[input_query, input_sentence, input_query_idf], outputs=score)
def experimental_semantic_exact_PACRR(semantic_pacrr_args,
exact_pacrr_args,
semantic_filter_threshold=None,
filters=16,
activation="relu"):
assert(semantic_pacrr_args["max_q_length"]==exact_pacrr_args["max_q_length"])
assert(semantic_pacrr_args["max_d_length"]==exact_pacrr_args["max_d_length"])
max_q_length = semantic_pacrr_args["max_q_length"]
max_d_length = semantic_pacrr_args["max_d_length"]
# init layers
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32")
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
input_sentence = tf.keras.layers.Input((max_d_length,), dtype="int32")
# exact pacrr
e_interaction = ExactInteractions()
e_ngram_convs = MultipleNgramConvs(filters=filters,
activation=activation,
max_ngram=3,
k_max=2,
k_polling_avg=None,
polling_avg=False)
e_softmax_IDF = MaskedSoftmax()
e_concatenate = MaskedConcatenate(0)
e_aggregation_layer = tf.keras.layers.LSTM(1,
dropout=0.0,
recurrent_regularizer=None,
recurrent_dropout=0.0,
unit_forget_bias=True,
recurrent_activation="hard_sigmoid",
bias_regularizer=None,
activation=activation,
recurrent_initializer="orthogonal",
kernel_regularizer=None,
kernel_initializer="glorot_uniform",
unroll=True) # speed UP!!!
# build layers
e_norm_idf = K.expand_dims(e_softmax_IDF(input_query_idf))
e_out_interaction = e_interaction([input_query, input_sentence])
e_x = e_ngram_convs(e_out_interaction)
e_x = e_concatenate([e_x, e_norm_idf])
e_x = e_aggregation_layer(e_x)
# semantic pacrr
semantic_filter_mask = ReplaceValuesByThreashold(semantic_filter_threshold, return_filter_mask=True)
semantic_filter = ReplaceValuesByThreashold(semantic_filter_threshold, return_filter_mask=False)
semantic_filter_idf = ReplaceValuesByMask()
s_interaction = SemanticInteractions(semantic_pacrr_args["emb_matrix"])
s_combined_interaction = tf.keras.layers.Lambda(lambda x: x[0]-x[1], mask=lambda x,m:m[0])
s_ngram_convs = MultipleNgramConvs(filters=filters,
activation=activation,
max_ngram=3,
k_max=2,
k_polling_avg=None,
polling_avg=False)
s_softmax_IDF = MaskedSoftmax()
s_concatenate = MaskedConcatenate(0)
s_aggregation_layer = tf.keras.layers.LSTM(1,
dropout=0.0,
recurrent_regularizer=None,
recurrent_dropout=0.0,
unit_forget_bias=True,
recurrent_activation="hard_sigmoid",
bias_regularizer=None,
activation=activation,
recurrent_initializer="orthogonal",
kernel_regularizer=None,
kernel_initializer="glorot_uniform",
unroll=True) # speed UP!!!
# build layers
input_query_filter, _mask = semantic_filter_mask(input_query)
input_sentence_filter = semantic_filter(input_sentence)
input_query_idf_filter = semantic_filter_idf([input_query_idf, _mask])
s_out_interaction = s_interaction([input_query_filter, input_sentence_filter])
s_out_interaction = s_combined_interaction([s_out_interaction, e_out_interaction])
s_norm_idf = K.expand_dims(s_softmax_IDF(input_query_idf_filter))
s_x = s_ngram_convs(s_out_interaction)
s_x = s_concatenate([s_x, s_norm_idf])
s_x = s_aggregation_layer(s_x)
## Agregation
score = RareWordFreqCombine(semantic_filter_threshold)([s_x, e_x, input_query])
return tf.keras.models.Model(inputs=[input_query, input_sentence, input_query_idf], outputs=score)
def simple_sentence_match(max_q_length,
max_s_length,
max_s_per_doc,
emb_matrix,
learn_context = False,
trainable_embeddings = False,
learn_term_weights = False,
use_mask=True,
matching_extraction_mode=0,
q_terms_aggregation = 0,
hidden_term_aggregation = 6,
max_ngram = 3,
k_max = 2,
filters = 16,
activation="relu"):
"""
q_terms_aggregation: 0 - bidirectional lstm
1 - self-attention
matching_extraction_mode: 0 - multiple convs with k_max 2
1 - conv with global max pooling
"""
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32") # (None, Q)
input_doc = tf.keras.layers.Input((max_s_per_doc, max_s_length), dtype="int32") # (None, P, S)
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
softmax_IDF = MaskedSoftmax()
concatenate = MaskedConcatenate(0)
interaction = SemanticInteractions(emb_matrix,
learn_term_weights=learn_term_weights,
trainable_embeddings=trainable_embeddings,
learn_context=learn_context,
use_mask=use_mask,
return_embeddings=True)
# convolutions
ngram_convs = MultipleNgramConvs(max_ngram=max_ngram,
k_max=k_max,
k_polling_avg=None,
polling_avg=False,
use_mask=use_mask,
filters=filters,
activation=activation)
if q_terms_aggregation==0:
sentence_signal = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(hidden_term_aggregation, activation=activation))
elif q_terms_aggregation==1:
sentence_signal = SelfAttention(attention_dimension=hidden_term_aggregation)
else:
raise KeyValueError("invalid value for q_terms_aggregation")
def aggregate(x):
x = KmaxAggregation(k=2)(x)
# x = tf.squeeze(x, axis=-1)
x = tf.keras.layers.Dense(6, activation=activation)(x)
return tf.keras.layers.Dense(1, activation=None)(x)
input_sentences = tf.unstack(input_doc, axis=1) # [(None,S), (None,S), ..., (None,S)]
sentences_hidden = []
for input_sentence in input_sentences:
_out = interaction([input_query, input_sentence])
x = _out[0]
query_embedding = _out[1]
x = ngram_convs(x)
norm_idf = K.expand_dims(softmax_IDF(input_query_idf))
x = concatenate([x, norm_idf])
x = sentence_signal(x)
sentences_hidden.append(x)
sentences_hidden = tf.keras.layers.Lambda(lambda x: tf.stack(x, axis=1))(sentences_hidden)
combined = aggregate(sentences_hidden)
return tf.keras.models.Model(inputs=[input_query, input_doc, input_query_idf], outputs=combined)
def deep_rank(max_q_length,
max_s_length,
max_s_per_q_term,
emb_matrix,
filters=16,
gru=16,
q_term_weight_mode=0,
aggregation_mode=0,
extraction_mode=0):
"""
q_term_weight_mode: 0 - use term aggregation with embeddings
1 - use term aggregation with idf
aggregation_mode: 0 - use Bidirectional GRU
1 - use Bidirectional GRU + sig for sentence score follow another Bidirectional GRU for aggregation
2 - use Bidirectional GRU + sig for sentence score
3 - compute score independently + sig for sentence score
extraction_mode: 0 - use CNN + GlobalMaxPool
1 - use CNN + [GlobalMaxPool, GlobalAvgPool]
2 - use CNN + [GlobalMaxPool, GlobalAvgPool, GlobalK-maxAvgPool]
3 - use CNN + [GlobalMaxPool, GlobalK-maxAvgPool]
4 - use CNN + GlobalKmaxPool
"""
initialized_vars = locals()
return_embeddings = q_term_weight_mode==0
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32") # (None, Q)
input_doc = tf.keras.layers.Input((max_q_length, max_s_per_q_term, max_s_length), dtype="int32") # (None, P, S)
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
interactions = SemanticInteractions(emb_matrix, return_embeddings=return_embeddings)
if extraction_mode==0:
conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), activation="selu")
pool = tf.keras.layers.GlobalMaxPool2D()
def extract(x):
if return_embeddings:
x, query_embeddings, _ = interactions(x)
else:
x = interactions(x)
query_embeddings = K.expand_dims(input_query_idf, axis=-1)
x = conv(x)
x = pool(x)
return x, query_embeddings
elif extraction_mode in [1, 2, 3]:
conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3),padding="SAME", activation="selu")
max_pool = tf.keras.layers.GlobalMaxPool2D()
avg_pool = tf.keras.layers.GlobalAveragePooling2D()
masked_avg_pool = GlobalMaskedAvgPooling2D()
kmax_avg_pool = GlobalKmaxAvgPooling2D(kmax=5)
concatenate = tf.keras.layers.Concatenate(axis=-1)
def extract(x):
if return_embeddings:
x_interaction, query_embeddings, _ = interactions(x)
else:
x_interaction = interactions(x)
query_embeddings = K.expand_dims(input_query_idf, axis=-1)
x = conv(x_interaction)
max_x = max_pool(x)
_concat = [max_x]
if extraction_mode in [1, 2]:
avg_x = avg_pool(x)
_concat.append(avg_x)
elif extraction_mode in [2, 3]:
kmax_x = kmax_avg_pool(x)
_concat.append(kmax_x)
x = concatenate(_concat)
return x, query_embeddings
elif extraction_mode==4:
conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3),padding="SAME", activation="selu")
kmax_pool = GlobalKmax2D()
def extract(x):
if return_embeddings:
x_interaction, query_embeddings, _ = interactions(x)
else:
x_interaction = interactions(x)
query_embeddings = K.expand_dims(input_query_idf, axis=-1)
x = conv(x_interaction)
x = kmax_pool(x)
return x, query_embeddings
else:
raise RuntimeError("invalid extraction_mode")
if aggregation_mode==0:
aggregation_senteces = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru))
elif aggregation_mode==1:
l1 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(1, return_sequences=True), merge_mode="sum")
l2 = tf.keras.layers.Activation('sigmoid')
l3 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru), merge_mode="sum")
def aggregation_senteces(x):
x = l1(x)
x = l2(x)
x = l3(x)
return x
elif aggregation_mode==2:
l1_a = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(1, return_sequences=True), merge_mode="sum")
l2_a = tf.keras.layers.Activation('sigmoid')
l3_a = tf.keras.layers.Lambda(lambda y: tf.squeeze(y, axis=-1))
def aggregation_senteces(x):
x = l1_a(x)
x = l2_a(x)
x = l3_a(x)
return x
elif aggregation_mode==3:
l1_a = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1, activation="sigmoid"))
l2_a = tf.keras.layers.Lambda(lambda y: tf.squeeze(y, axis=-1))
def aggregation_senteces(x):
x = l1_a(x)
x = l2_a(x)
return x
else:
raise RuntimeError("invalid aggregation_mode")
aggregation = TermAggregation()
output_score = tf.keras.layers.Dense(1)
input_doc_unstack = tf.unstack(input_doc, axis=1)
output_i = []
for input_i in input_doc_unstack:
input_i_unstack = tf.unstack(input_i, axis=1)
output_j = []
for input_j in input_i_unstack:
_out, query_embeddings = extract([input_query, input_j])
output_j.append(_out) # [None, FM]
output_j_stack = tf.stack(output_j, axis=1) # [None, P_Q, FM]
output_i.append(aggregation_senteces(output_j_stack)) # [None, FM]
output_i_stack = tf.stack(output_i, axis=1) # [None, Q, FM]
# aggregation
doc_vector = aggregation([output_i_stack, query_embeddings])
# score
score = output_score(doc_vector)
return tf.keras.models.Model(inputs=[input_query, input_doc, input_query_idf], outputs=score), output_i_stack
def deep_snippet_ranker(max_q_length,
max_s_length,
max_s_per_doc,
emb_matrix,
filters=16,
gru=16,
conditional_speed_up=False,
q_term_weight_mode=0,
aggregation_mode=0,
score_mode=0,
extract_mode=0):
"""
q_term_weight_mode: 0 - use idf
1 - use softmax(idf)
2 - use softmax(embedings)
aggregation_mode: 0 - use Bidirectional GRU + sig for sentence score follow another Bidirectional GRU for aggregation
1 - use Bidirectional GRU + sig for sentence score follow GRU to score
extract_mode: 0 - Convolution + global max pool
1 - MultipleNgramConvs + idf + flat
score_mode: 0 - dense
1 - linear
2 - mlp
"""
return_embeddings=False
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32") # (None, Q)
input_doc = tf.keras.layers.Input((max_s_per_doc, max_s_length), dtype="int32") # (None, P, S)
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
#softmax_IDF(x[1])
if q_term_weight_mode==0:
def q_term_weight_fn(x):
interaction_weighted = tf.squeeze(x[0], axis=-1)*K.expand_dims(x[1], axis=-1)
return K.expand_dims(interaction_weighted, axis=-1)
elif q_term_weight_mode==1:
softmax_IDF = MaskedSoftmax()
def q_term_weight_fn(x):
interaction_weighted = tf.squeeze(x[0], axis=-1)*K.expand_dims(softmax_IDF(x[1]), axis=-1)
return K.expand_dims(interaction_weighted, axis=-1)
elif q_term_weight_mode==2:
return_embeddings=True
q_term_aggregation = TermAggregation(aggregate=False)
def q_term_weight_fn(x):
interaction_m = tf.squeeze(x[0], axis=-1)
return K.expand_dims(q_term_aggregation([interaction_m, x[1]]), axis=-1)
else:
raise RuntimeError("invalid q_term_weight_mode")
if extract_mode==0:
interactions = SemanticInteractions(emb_matrix, return_embeddings=return_embeddings)
softmax_IDF = MaskedSoftmax()
normalize_interactions_idf = tf.keras.layers.Lambda(q_term_weight_fn, mask=lambda x,mask=None: x[0])
conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3))
pool = tf.keras.layers.GlobalMaxPool2D()
def extract(x):
if return_embeddings:
out, q_embedding, _ = interactions([x[0], x[1]])
x[2] = q_embedding
else:
out = interactions([x[0], x[1]])
x = normalize_interactions_idf([out, x[2]])
x = conv(x)
x = pool(x)
return x
elif extract_mode==1:
interactions = SemanticInteractions(emb_matrix, return_embeddings=return_embeddings)
ngrams_conv = MultipleNgramConvs(3,
2,
k_polling_avg = None, # do k_polling avg after convolution
polling_avg = False, # do avg polling after convolution
use_mask = True,
filters=filters, # can be a list or a function of input features and n-gram
activation="selu")
softmax_IDF = MaskedSoftmax()
m_concatenate = MaskedConcatenate(0)
flat = tf.keras.layers.Flatten()
def extract(x):
if return_embeddings:
raise RuntimeError("extract_mode 1 does not support")
else:
out = interactions([x[0], x[1]])
idf_norm = K.expand_dims(softmax_IDF(x[2]))
x = ngrams_conv(out)
x = m_concatenate([idf_norm, x])
x = flat(x)
return x
elif extract_mode==2:
interactions = SemanticInteractions(emb_matrix, return_embeddings=return_embeddings)
normalize_interactions_idf = tf.keras.layers.Lambda(q_term_weight_fn, mask=lambda x,mask=None: x[0])
conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3))
pool_1 = tf.keras.layers.MaxPool2D()
conv_2 = tf.keras.layers.Conv2D(filters=filters*2, kernel_size=(3,3))
pool_2 = tf.keras.layers.MaxPool2D()
pool_3 = tf.keras.layers.MaxPool2D()
flatten_3 = tf.keras.layers.Flatten()
def extract(x):
if return_embeddings:
out, q_embedding, _ = interactions([x[0], x[1]])
x[2] = q_embedding
else:
out = interactions([x[0], x[1]])
x = normalize_interactions_idf([out, x[2]])
x = conv_1(x)
x = pool_1(x)
x = conv_2(x)
x = pool_2(x)
x = pool_3(x)
x = flatten_3(x)
return x
else:
raise RuntimeError("invalid extract_mode")
if aggregation_mode==0:
l1_a = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(1, return_sequences=True), merge_mode="sum")
l2_a = tf.keras.layers.Activation('sigmoid')
l3_a = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru), merge_mode="sum")
def aggregation_senteces(x):
x = l1_a(x)
x = l2_a(x)
x = l3_a(x)
return x # tf.keras.layers.Activation('relu')(x)
elif aggregation_mode==1:
l1_a = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(1, return_sequences=True), merge_mode="sum")
l2_a = tf.keras.layers.Activation('selu')
l3_a = tf.keras.layers.Lambda(lambda y: tf.squeeze(y, axis=-1))
def aggregation_senteces(x):
x = l1_a(x)
x = l2_a(x)
x = l3_a(x)
return x
else:
raise RuntimeError("invalid aggregation_mode")
if score_mode==0:
output_score = tf.keras.layers.Dense(1)
elif score_mode==1:
output_score = lambda x:x # identity
elif score_mode==2:
l1_s = tf.keras.layers.Dense(max_s_per_doc, activation="selu")
l2_s = tf.keras.layers.Dense(1, activation="selu")
def output_score(x):
x = l1_s(x)
x = l2_s(x)
return x
else:
raise RuntimeError("invalid score_mode")
input_sentences = tf.unstack(input_doc, axis=1)
sentences_features = []
for input_sentence in input_sentences:
# (None, S)
sentences_features.append(extract([input_query, input_sentence, input_query_idf]))
sentences_features_stack = tf.stack(sentences_features, axis=1)
document_dense = aggregation_senteces(sentences_features_stack)
#print(document_dense)
score = output_score(document_dense)
return tf.keras.models.Model(inputs=[input_query, input_doc, input_query_idf], outputs=score)
def q_aware_sentence_ranker(max_q_length,
max_s_length,
max_s_per_doc,
emb_matrix,
filters=16):
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32") # (None, Q)
input_doc = tf.keras.layers.Input((max_s_per_doc, max_s_length), dtype="int32") # (None, P, S)
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
semantic_interactions = SemanticInteractions(emb_matrix)
exact_interactions = ExactInteractions()
softmax_IDF = MaskedSoftmax()
semantic_conv_layer = tf.keras.layers.Conv2D(filters,
(3,3),
activation="tanh",
padding="SAME",
dtype="float32")
exact_conv_layer = tf.keras.layers.Conv2D(filters,
(3,3),
activation="tanh",
padding="SAME",
dtype="float32")
channels_pool_layer = tf.keras.layers.Lambda(lambda x:K.max(x, axis=-1), name="channels_pool_layer")
squeeze_layer = tf.keras.layers.Lambda(lambda x:tf.squeeze(x, axis=-1), name="squeeze_pool_layer")
concatenate_layer = tf.keras.layers.Concatenate()
max_by_row_layer = tf.keras.layers.Lambda(lambda x:K.max(x, axis=-1, keepdims=True), name="max_by_row_layer")
avg_by_row_layer = tf.keras.layers.Lambda(lambda x:K.mean(x, axis=-1, keepdims=True), name="avg_by_row_layer")
s_l1 = tf.keras.layers.Dense(8, activation="selu", name="mlp_l1")
s_l2 = tf.keras.layers.Dense(1, name="mlp_l2")
def mlp_sentences(x):
x = s_l1(x)
x = s_l2(x)
return x
def setence_model(x):
# connections
semantic_matrix = semantic_interactions([x[0], x[1]])
exact_matrix = exact_interactions([x[0], x[1]])
# print(semantic_matrix._keras_mask)
semantic_feature_maps = semantic_conv_layer(semantic_matrix)
semantic_feature_map = channels_pool_layer(semantic_feature_maps)
semantic_feature_map_max = max_by_row_layer(semantic_feature_map)
semantic_feature_map_avg = avg_by_row_layer(semantic_feature_map)
exact_feature_maps = exact_conv_layer(exact_matrix)
exact_feature_map = channels_pool_layer(exact_feature_maps)
exact_feature_map_max = max_by_row_layer(exact_feature_map)
exact_feature_map_avg = avg_by_row_layer(exact_feature_map)
semantic_matrix = squeeze_layer(semantic_matrix)
semantic_max = max_by_row_layer(semantic_matrix)
semantic_avg = avg_by_row_layer(semantic_matrix)
exact_matrix = squeeze_layer(exact_matrix)
exact_max = max_by_row_layer(exact_matrix)
exact_avg = avg_by_row_layer(exact_matrix)
features_concat = concatenate_layer([semantic_max, semantic_avg, semantic_feature_map_max, semantic_feature_map_avg,
exact_max, exact_avg, exact_feature_map_max, exact_feature_map_avg])
# custom layer to aplly the mlp and do the masking stuff
features_by_q_term = tf.unstack(features_concat, axis=1)
features_q_term_score = []
for f_q_term in features_by_q_term:
features_q_term_score.append(mlp_sentences(f_q_term))
score_by_q_term = tf.stack(features_q_term_score, axis=1)
# compute idf importance
idf_importance = K.expand_dims(softmax_IDF(x[2]))
return tf.squeeze(tf.linalg.matmul(score_by_q_term, idf_importance, transpose_a=True), axis=-1)
# normal flow model
input_sentences = tf.unstack(input_doc, axis=1)
sentences_features = []
for input_sentence in input_sentences:
# (None, S)
sentences_features.append(setence_model([input_query, input_sentence, input_query_idf]))
sentences_features_stack = tf.squeeze(tf.stack(sentences_features, axis=1), axis=-1)
print(sentences_features_stack)
best_sentences_scores, _ = tf.math.top_k(sentences_features_stack, k=3)
score = tf.keras.layers.Dense(1)(best_sentences_scores)
return tf.keras.models.Model(inputs=[input_query, input_doc, input_query_idf], outputs=score)
def deep_rank_extra_features(max_q_length,
max_s_length,
max_s_per_q_term,
emb_matrix,
filters=16,
gru=16,
q_term_weight_mode=0,
aggregation_mode=0,
extraction_mode=0):
"""
q_term_weight_mode: 0 - use term aggregation with embeddings
1 - use term aggregation with idf
aggregation_mode: 0 - use Bidirectional GRU
1 - use Bidirectional GRU + sig for sentence score follow another Bidirectional GRU for aggregation
2 - use Bidirectional GRU + sig for sentence score
3 - use GRU + sig for sentence score
extraction_mode: 0 - use CNN + GlobalMaxPool
1 - use CNN + [GlobalMaxPool, GlobalAvgPool]
2 - use CNN + [GlobalMaxPool, GlobalAvgPool, GlobalK-maxAvgPool]
3 - use CNN + [GlobalMaxPool, GlobalK-maxAvgPool]
"""
return_embeddings = q_term_weight_mode==0
input_query = tf.keras.layers.Input((max_q_length,), dtype="int32") # (None, Q)
input_doc = tf.keras.layers.Input((max_q_length, max_s_per_q_term, max_s_length), dtype="int32") # (None, P, S)
input_query_idf = tf.keras.layers.Input((max_q_length,), dtype="float32")
input_extra_features = tf.keras.layers.Input((4,), dtype="float32") # (None, 4)
interactions = SemanticInteractions(emb_matrix, return_embeddings=return_embeddings)
if extraction_mode==0:
conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), padding="SAME", activation="selu")
pool = tf.keras.layers.GlobalMaxPool2D()
def extract(x):
if return_embeddings:
x, query_embeddings, _ = interactions(x)
else:
x = interactions(x)
query_embeddings = K.expand_dims(input_query_idf, axis=-1)
x = conv(x)
x = pool(x)
return x, query_embeddings
elif extraction_mode in [1, 2, 3]:
conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3),padding="SAME", activation="selu")
max_pool = tf.keras.layers.GlobalMaxPool2D()
avg_pool = tf.keras.layers.GlobalAveragePooling2D()
masked_avg_pool = GlobalMaskedAvgPooling2D()
kmax_avg_pool = GlobalKmaxAvgPooling2D(kmax=5)
concatenate = tf.keras.layers.Concatenate(axis=-1)
def extract(x):
if return_embeddings:
x_interaction, query_embeddings, _ = interactions(x)
else:
x_interaction = interactions(x)
query_embeddings = K.expand_dims(input_query_idf, axis=-1)
x = conv(x_interaction)
max_x = max_pool(x)
_concat = [max_x]
if extraction_mode in [1, 2]:
avg_x = avg_pool(x)
_concat.append(avg_x)
elif extraction_mode in [2, 3]:
kmax_x = kmax_avg_pool(x)
_concat.append(kmax_x)
x = concatenate(_concat)
return x, query_embeddings
if aggregation_mode==0:
aggregation_senteces = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru))
elif aggregation_mode==1:
l1 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(1, return_sequences=True), merge_mode="sum")
l2 = tf.keras.layers.Activation('sigmoid')
l3 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru), merge_mode="sum")
def aggregation_senteces(x):
x = l1(x)
x = l2(x)
x = l3(x)
return x
elif aggregation_mode==2:
l1_a = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(1, return_sequences=True), merge_mode="sum")
l2_a = tf.keras.layers.Activation('sigmoid')
l3_a = tf.keras.layers.Lambda(lambda y: tf.squeeze(y, axis=-1))
def aggregation_senteces(x):
x = l1_a(x)
x = l2_a(x)
x = l3_a(x)
return x
elif aggregation_mode==3:
l1_a = tf.keras.layers.GRU(1, return_sequences=True, activation="sigmoid")
l3_a = tf.keras.layers.Lambda(lambda y: tf.squeeze(y, axis=-1))
def aggregation_senteces(x):
x = l1_a(x)
x = l3_a(x)
return x
else:
raise RuntimeError("invalid aggregation_mode")
aggregation = TermAggregation()
def output_score(x):
tf.keras.layers.Dense(8, activation="selu")(x)
return tf.keras.layers.Dense(1, activation="relu")(x)
input_doc_unstack = tf.unstack(input_doc, axis=1)
output_i = []
for input_i in input_doc_unstack:
input_i_unstack = tf.unstack(input_i, axis=1)
output_j = []
for input_j in input_i_unstack:
_out, query_embeddings = extract([input_query, input_j])
output_j.append(_out) # [None, FM]
output_j_stack = tf.stack(output_j, axis=1) # [None, P_Q, FM]
output_i.append(aggregation_senteces(output_j_stack)) # [None, FM]
output_i_stack = tf.stack(output_i, axis=1) # [None, Q, FM]
# aggregation
doc_vector = aggregation([output_i_stack, query_embeddings])
# score
score = output_score(doc_vector)
# concat extra features
score_more_features = tf.keras.layers.Concatenate(axis=-1)([score, input_extra_features])
score_more_features = tf.keras.layers.Dense(8, activation="selu")(score_more_features)
score_more_features = tf.keras.layers.Dense(1, activation="selu")(score_more_features)
return tf.keras.models.Model(inputs=[input_query, input_doc, input_query_idf, input_extra_features], outputs=score_more_features)
| 40.623552
| 133
| 0.578102
|
52dd20adf2be63c6e164f907551674b2f8190f86
| 1,003
|
py
|
Python
|
setup.py
|
HolmesNL/lir
|
f8a2a66c4bb7b1f1687c5bdca3d39c4b712f7ea4
|
[
"Apache-2.0"
] | 4
|
2020-04-27T11:31:41.000Z
|
2020-12-16T12:33:42.000Z
|
setup.py
|
HolmesNL/lir
|
f8a2a66c4bb7b1f1687c5bdca3d39c4b712f7ea4
|
[
"Apache-2.0"
] | 6
|
2020-03-23T18:01:03.000Z
|
2020-07-28T09:00:15.000Z
|
setup.py
|
HolmesNL/lir
|
f8a2a66c4bb7b1f1687c5bdca3d39c4b712f7ea4
|
[
"Apache-2.0"
] | 1
|
2020-10-02T13:46:53.000Z
|
2020-10-02T13:46:53.000Z
|
from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="lir",
version="0.1.12",
description="scripts for calculating likelihood ratios",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NetherlandsForensicInstitute/lir",
author="Netherlands Forensic Institute",
author_email="fbda@nfi.nl",
packages=find_packages(),
setup_requires=["nose"],
test_suite="nose.collector",
install_requires=["matplotlib", "numpy", "scipy", "scikit-learn", "tqdm"],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
| 35.821429
| 78
| 0.652044
|
bb3de622271b59defbcb7cb7962426ef261c1839
| 340
|
py
|
Python
|
Tuple.py
|
satz2000/Python-practiced-notes
|
30aac786bc75f85ca841a25aa82c9f3d0f4721b2
|
[
"Apache-2.0"
] | null | null | null |
Tuple.py
|
satz2000/Python-practiced-notes
|
30aac786bc75f85ca841a25aa82c9f3d0f4721b2
|
[
"Apache-2.0"
] | null | null | null |
Tuple.py
|
satz2000/Python-practiced-notes
|
30aac786bc75f85ca841a25aa82c9f3d0f4721b2
|
[
"Apache-2.0"
] | null | null | null |
"""
# What is Tuple?
Tuple is same as list, but it is immutable (unchangeable)
() is represented as Tuple
"""
nums = (12, 23, 5, 32, 19)
# There are 2 methods in tuple
print(nums.count(19)) # count the value, How many times its present in it
print(nums.index(5)) # Finds the index value for the elements
| 26.153846
| 80
| 0.629412
|
e121e0bd3126ab74672e98ffaf528aa1ca8d61a1
| 2,135
|
py
|
Python
|
lib/smisk/test/mvc/test_matter.py
|
rsms/smisk
|
f12a5606dfff49a15fa91448ff36652d60add4c0
|
[
"MIT"
] | 4
|
2015-11-05T11:51:12.000Z
|
2020-12-30T18:55:58.000Z
|
lib/smisk/test/mvc/test_matter.py
|
rsms/smisk
|
f12a5606dfff49a15fa91448ff36652d60add4c0
|
[
"MIT"
] | 5
|
2021-11-16T17:21:51.000Z
|
2021-11-16T17:22:09.000Z
|
lib/smisk/test/mvc/test_matter.py
|
rsms/smisk
|
f12a5606dfff49a15fa91448ff36652d60add4c0
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
'''MVC test matter
'''
from smisk.mvc import *
# A controller tree
class root(Controller):
def func_on_root(self): return '/func_on_root'
@expose(delegates=True)
def delegating_func_on_root(self): return '/delegating_func_on_root'
def __call__(self, *va, **kw): return '/'
def one_named_arg1(self, foo=None, *args, **kwargs): return '/one_named_arg1?foo=%s' % foo
def one_named_arg2(self, foo=None, **kwargs): return '/one_named_arg2?foo=%s' % foo
def one_named_arg3(self, foo=None, *args): return '/one_named_arg3?foo=%s' % foo
def one_named_arg4(self, foo=None): return '/one_named_arg4?foo=%s' % foo
def three_named_args(self, one=1, two=2, three=3):
return '/three_named_args?one=%s&two=%s&three=%s' % (one, two, three)
class level2(root):
def __call__(self): return '/level2'
#func_on_level2 = root
def func_on_level2(self, *va, **kw): return '/level2/func_on_level2'
def show_user(self, user, *va, **kw): return '/level2/show_user'
def level3(self):
'''never reached from outside, because it's shadowed by subclass level3.
However, it can still be reaced internally, through for example
control.path_to().
'''
return 'shadowed'
@expose('foo-bar')
def foo_bar(self): return '/level2/foo-bar'
class level3(level2):
def __call__(self): return '/level2/level3'
@hide
def hidden_method_on_level3(self): pass
def func_on_level3(self, *va): return '/level2/level3/func_on_level3'
def func_on_level3_wonlykwa(self, **kva): return '/level2/level3/func_on_level3_wonlykwa'
class level3B(level2):
slug = 'level-3-b'
def func_on_level3B(self): return '/level2/level-3-b/func_on_level3B'
class PostsController(level3):
def list(self, *va, **kw): return '/level2/level3/posts/list'
# For testing method_origin and alike:
class Animal(object):
def name(self): pass
class Fish(Animal):
def color(self): pass
class Bass(Fish):
def eats(self): pass
def sleeps(self): pass
class SpanishBass(Bass):
def on_fiesta(self): pass
def sleeps(self): pass
class EnglishBass(Bass):
def on_fiesta(self): return False
def cheese(self): pass
| 31.397059
| 92
| 0.712412
|
9677e94a394b30ba2eeb317974a669d2f0f03d86
| 10,999
|
py
|
Python
|
froi/gui/component/unused/labeldialog.py
|
zhouguangfu/FreeROI
|
0605c2a0fe2457e3703a4a7548299fc2c1e9aca0
|
[
"BSD-3-Clause"
] | 13
|
2016-02-12T05:10:23.000Z
|
2021-01-13T01:40:12.000Z
|
froi/gui/component/unused/labeldialog.py
|
zhouguangfu/FreeROI
|
0605c2a0fe2457e3703a4a7548299fc2c1e9aca0
|
[
"BSD-3-Clause"
] | 14
|
2015-05-04T05:56:45.000Z
|
2021-01-24T11:49:13.000Z
|
froi/gui/component/unused/labeldialog.py
|
zhouguangfu/FreeROI
|
0605c2a0fe2457e3703a4a7548299fc2c1e9aca0
|
[
"BSD-3-Clause"
] | 8
|
2016-03-07T06:29:51.000Z
|
2017-10-30T13:59:27.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from drawsettings import DrawSettings
class LabelDialog(QDialog, DrawSettings):
"""
A dialog window for label selection.
"""
color_changed = pyqtSignal()
def __init__(self, model, parent=None):
"""
Initialize a dialog widget.
"""
super(LabelDialog, self).__init__(parent)
self._model = model
self._label_config = model.get_current_label_config()
self._label_config_center = model.get_label_config_center()
self.setWindowModality(Qt.NonModal)
self.setWindowFlags(Qt.Tool | Qt.CustomizeWindowHint | Qt.WindowTitleHint)
self._init_gui()
self._create_actions()
#self._update_items()
def _init_gui(self):
"""
Initialize GUI.
"""
self.setWindowTitle('Select a label')
name_label = QLabel('Label:')
self.combobox = QComboBox()
self._update_combobox()
color_label = QLabel('Color:')
label = str(self.combobox.currentText())
if label:
self.color_button = ColorButton(self._label_config.get_label_color(label))
else:
self.color_button = ColorButton()
size_label = QLabel('Size:')
self.size_edit = QSpinBox()
self.size_edit.setRange(1, 10)
self.size_edit.setValue(4)
self.add_label = QPushButton('Add')
self.del_label = QPushButton('Delete')
self.save_label = QPushButton('Save')
grid_layout = QGridLayout()
grid_layout.addWidget(name_label, 0, 0)
grid_layout.addWidget(self.combobox, 0 ,1)
grid_layout.addWidget(color_label, 1, 0)
grid_layout.addWidget(self.color_button, 1, 1)
grid_layout.addWidget(size_label, 2, 0)
grid_layout.addWidget(self.size_edit, 2, 1)
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.add_label)
hbox_layout.addWidget(self.del_label)
hbox_layout.addWidget(self.save_label)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _update_combobox(self):
self.combobox.clear()
label_list = self._label_config.get_label_list()
self.combobox.addItems(QStringList(label_list))
def _create_actions(self):
"""
Create some actions.
"""
#self._model.dataChanged.connect(self._update_items)
# --
# select a label and change current pen value
# --
self.combobox.currentIndexChanged[QString].connect(self._update_color)
self.color_button.color_changed.connect(self._update_label_color)
self.add_label.clicked.connect(self._add_label)
self.del_label.clicked.connect(self._del_label)
self.save_label.clicked.connect(self._save_label)
def _update_items(self):
"""
Add items for combo box.
"""
index = self._model.currentIndex()
label_pairs = self._model.data(index, Qt.UserRole + 4)
label_names = label_pairs.keys()
self.combobox.clear()
self.combobox.addItems(label_names)
def _update_color(self, s):
s = str(s)
if s:
self.color_button.set_current_color(self._label_config.get_label_color(s))
def _update_label_color(self, color):
label = str(self.combobox.currentText())
if label:
self._label_config.update_label_color(label, color)
self.color_changed.emit()
def _add_label(self):
"""
Add a new label.
"""
#name, name_ok = QInputDialog.getText(self, '', 'Input a label name:')
#if name_ok and not name.isEmpty():
# index = self._model.currentIndex()
# if name in self._model.data(index, Qt.UserRole + 4):
# QMessageBox.warning(self, 'Warning',
# 'Label name you input exists!')
# else:
# idx, idx_ok = QInputDialog.getText(self, '', 'Input a value:')
# if idx_ok and not idx.isEmpty():
# temp = self._model.data(index, Qt.UserRole + 4)
# if int(idx) in temp.values():
# QMessageBox.warning(self, 'Warning',
# 'Label value you input exits!')
# else:
# x = (str(name), int(idx))
# self._model.setData(index, Qt.UserRole + 4, x)
add_dialog = AddLabelDialog(self._label_config)
add_dialog.exec_()
self._update_combobox()
def _del_label(self):
"""
Delete a existing label.
"""
label = self.combobox.currentText()
if label:
button = QMessageBox.warning(self, "Delete label",
"Are you sure that you want to delete label %s ?" % label,
QMessageBox.Yes,
QMessageBox.No)
if button == QMessageBox.Yes:
self._label_config.remove_label(str(label))
self._update_combobox()
def _save_label(self):
self._label_config.save()
def is_valid_label(self):
return self.combobox.currentText()
def get_current_label(self):
if self.is_valid_label():
return str(self.combobox.currentText())
raise ValueError, "Current label invalid"
def get_current_index(self):
if self.is_valid_label():
return self._label_config.get_label_index(self.get_current_label())
raise ValueError, "Current label invalid"
def get_current_color(self):
if self.is_valid_label():
return self._label_config.get_label_color(self.get_current_label())
def get_current_size(self):
if self.is_valid_label():
return self.size_edit.value()
# For DrawSettings
def is_brush(self):
return True
def is_drawing_valid(self):
return self.is_valid_label()
def get_drawing_value(self):
return self.get_current_index()
def get_drawing_size(self):
return self.get_current_size()
def get_drawing_color(self):
return self.get_current_color()
class AddLabelDialog(QDialog):
"""
A dialog for adding a new label.
"""
def __init__(self, label_config, parent=None):
super(AddLabelDialog, self).__init__(parent)
self._label_config = label_config
self._init_gui()
self._create_actions()
def _init_gui(self):
self.setWindowTitle("Add a new label")
label_label = QLabel("Label")
self.label_edit = QLineEdit()
index_label = QLabel("Index")
self.index_edit = QLineEdit()
self.index_edit.setText(str(self._label_config.new_index()))
color_label = QLabel("Color")
self.color_button = ColorButton()
grid_layout = QGridLayout()
grid_layout.addWidget(label_label, 0, 0)
grid_layout.addWidget(self.label_edit, 0, 1)
grid_layout.addWidget(index_label, 1, 0)
grid_layout.addWidget(self.index_edit, 1, 1)
grid_layout.addWidget(color_label, 2, 0)
grid_layout.addWidget(self.color_button, 2, 1)
self.add_button = QPushButton("Add")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.add_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.add_button.clicked.connect(self._add_label)
self.cancel_button.clicked.connect(self.done)
def _add_label(self):
label = str(self.label_edit.text())
if not label:
QMessageBox.critical(self, "No label name",
"Please speicify your label name.")
return
index = self.index_edit.text()
if not index:
QMessageBox.critical(self, "No index",
"Please specify a index for your label.")
return
index = int(str(self.index_edit.text()))
color = self.color_button.get_current_color()
if not color.isValid():
QMessageBox.critical(self, "Color invalid",
"Please choose a valid color for your label.")
return
if self._label_config.has_label(label):
button = QMessageBox.warning(self, "Label exists!",
"The label you input already exists, if you change its "
"index, the old voxel valuse you've write won't be "
"updated. Are you sure that you "
"want to overwrite its settings?",
QMessageBox.Yes,
QMessageBox.No)
if button != QMessageBox.Yes:
self.label_edit.setFocus()
return
self._label_config.remove_label(label)
if self._label_config.has_index(index):
QMessageBox.critical(self, "Index exists!",
"The index you input already exists, you must choose"
" another index!")
self.index_edit.setFocus()
return
self._label_config.add_label(label, index, color)
self.done(0)
class ColorButton(QPushButton):
"""
Button to choose color from a color dialog.
"""
default_color = QColor(255, 0, 0)
icon_size = QSize(32, 32)
color_changed = pyqtSignal(QColor)
def __init__(self, init_color=None, parent=None):
super(ColorButton, self).__init__(parent)
if init_color is None:
init_color = self.default_color
self.current_color = init_color
self._update_icon()
self.clicked.connect(self._choose_color)
def _update_icon(self):
icon_image = QImage(self.icon_size, QImage.Format_RGB888)
icon_image.fill(self.current_color.rgb())
icon_image = icon_image.rgbSwapped()
icon_pm = QPixmap.fromImage(icon_image)
self.setIcon(QIcon(icon_pm))
def _choose_color(self):
color = QColorDialog.getColor(self.current_color, self)
if color.isValid():
self._set_current_color(color)
def _set_current_color(self, color):
self.set_current_color(color)
self.color_changed.emit(color)
def set_current_color(self, color):
self.current_color = color
self._update_icon()
def get_current_color(self):
return self.current_color
| 33.431611
| 86
| 0.603509
|
985681cee8f196579fceadbf8db3e58a3ed10d6b
| 469
|
py
|
Python
|
tests/test_prebuilt.py
|
GlobalMaksimum/sadedegel
|
8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b
|
[
"MIT"
] | 100
|
2020-07-06T05:50:49.000Z
|
2022-03-21T21:56:55.000Z
|
tests/test_prebuilt.py
|
LyotardPostmodernizm/sadedegel
|
8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b
|
[
"MIT"
] | 244
|
2020-07-06T06:31:01.000Z
|
2022-02-26T10:40:17.000Z
|
tests/test_prebuilt.py
|
LyotardPostmodernizm/sadedegel
|
8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b
|
[
"MIT"
] | 23
|
2020-07-27T16:32:48.000Z
|
2022-03-18T11:13:07.000Z
|
# flake8: noqa
from pathlib import Path # pylint: disable=unused-import
from os.path import expanduser # pylint: disable=unused-import
import pytest
from .context import tweet_sentiment
@pytest.mark.skip()
@pytest.mark.skipif('not Path(expanduser("~/.sadedegel_data")).exists()')
def test_cv():
tweet_sentiment.cv(k=5)
@pytest.mark.skip()
@pytest.mark.skipif('not Path(expanduser("~/.sadedegel_data")).exists()')
def test_build():
tweet_sentiment.build()
| 26.055556
| 73
| 0.739872
|
482ab7a89e2cddf92847911ee667cf0bd2a111fa
| 2,615
|
py
|
Python
|
get_images.py
|
kvswim/vision_proj_2017
|
9b791e77ab15a3a69637ee50eb1b65cc9f2a2156
|
[
"MIT"
] | null | null | null |
get_images.py
|
kvswim/vision_proj_2017
|
9b791e77ab15a3a69637ee50eb1b65cc9f2a2156
|
[
"MIT"
] | null | null | null |
get_images.py
|
kvswim/vision_proj_2017
|
9b791e77ab15a3a69637ee50eb1b65cc9f2a2156
|
[
"MIT"
] | null | null | null |
#Kyle Verdeyen
#Computer Vision 600.431 Final Project
#get_images.py: captures images for use in training known user database
#to be run on a Raspberry Pi with camera device, not for PC use
#point rpi at subject and run this, will capture approximately 1000 images and save to ./subjectimages
#usage: python get_images.py <subjectnumber>
import os
import sys
import io
import time
import picamera
import shutil
#Takes advantage of the "Magic numbers" in JPEG image files to
#split live feed into many different images
#Since all JPEGs start with FF D8 and end with FF D9, we can use this
#as a delimiter
class SplitFrames(object):
def __init__(self):
#init sets 0 frames taken, null output
self.frame_num = 0
self.output = None
def write(self, buffer):
if buffer.startswith(b'\xff\xd8'): #if buffer starts with bytecode FF D8
if self.output:
self.output.close()
#increment frame counter
self.frame_num += 1
#set output stream to name image based on frame number
#USER MUST SET SUBJECT NUMBER AT COMMAND LINE
#e.g. python get_images.py 1 --> subject1.imagexx.jpg
self.output = io.open('subject%s.image%02d.jpg' % (sys.argv[1], self.frame_num), 'wb')
self.output.write(buffer) #out
#record and split
print("Starting camera...")
#720p60 is plenty of resolution at a framerate that is fast for capture without being too dark
with picamera.PiCamera(resolution = '1080p', framerate = 30) as camera:
#uncomment this if you are running the Pi on a TV or monitor, allows you to see preview to get subject in frame
#camera.start_preview()
time.sleep(2) #wait for the pi to settle its white balance
output = SplitFrames()
print("Starting subject capture, please wait (approx 20 seconds)...")
start = time.time()
camera.start_recording(output, format = 'mjpeg') #sliceable format
camera.wait_recording(40) #oh boy
camera.stop_recording()
finish = time.time()
print("Capture done.")
print("Captured %d frames in %d seconds at %.2f fps." % (output.frame_num, (finish-start), output.frame_num/(finish - start)))
#now to move all the jpg's into the dest folder since SplitFrames doesn't allow us to set destination directory
print("Placing files in destination directory...")
sourcepath = os.getcwd()
source = os.listdir(sourcepath)
destpath = sourcepath + "/subjectimages"
for files in source:
if files.endswith(".jpg"):
shutil.move(os.path.join(sourcepath, files), os.path.join(destpath, files))
print("Finished.")
| 41.507937
| 126
| 0.702103
|
e744aa178d0253de74edefebb12e1886964c4ae6
| 31,086
|
py
|
Python
|
owllook/config/rules.py
|
cklx0719/owllook
|
263cbc5fc40478da140ec0a54684205f92b5403a
|
[
"Apache-2.0"
] | 2,344
|
2017-05-05T00:16:05.000Z
|
2022-03-31T15:46:06.000Z
|
owllook/config/rules.py
|
cklx0719/owllook
|
263cbc5fc40478da140ec0a54684205f92b5403a
|
[
"Apache-2.0"
] | 91
|
2017-05-27T12:43:14.000Z
|
2022-03-20T04:51:35.000Z
|
owllook/config/rules.py
|
cklx0719/owllook
|
263cbc5fc40478da140ec0a54684205f92b5403a
|
[
"Apache-2.0"
] | 811
|
2017-05-05T03:01:25.000Z
|
2022-03-22T02:09:37.000Z
|
#!/usr/bin/env python
from collections import namedtuple
####################################### 规则 ###########################################
# DOMAIN
BLACK_DOMAIN = ['www.17k.com', 'mm.17k.com', 'www.xs8.cn', 'www.zongheng.com', 'yunqi.qq.com', 'chuangshi.qq.com',
'book.qidian.com', 'www.soduso.com', 'pages.book.qq.com', 'book.km.com', 'www.lread.net',
'www.0dsw.com', 'www.5200xsb.com', 'www.80txt.com', 'www.sodu.tw', 'www.shuquge.com',
'www.shenmanhua.com', 'xiaoshuo.sogou.com', 'www.999wx.com', 'zetianji8.com', 'www.bookso.net',
'm.23us.com', 'www.qbxsw.com', 'www.zhuzhudao.com', 'www.shengyan.org', 'www.360doc.com',
'www.ishuo.cn', 'read.qidian.com', 'www.yunlaige.com', 'www.qidian.com', 'www.sodu888.com',
'www.siluke.cc', 'read.10086.cn', 'www.pbtxt.com', 'c4txt.com', 'www.bokon.net', 'www.sikushu.net',
'www.is028.cn', 'www.tadu.com', 'www.kudu8.com', 'www.bmwen.com', 'www.5858xs.com', 'www.yiwan.com',
'www.x81zw.com', 'www.123du.cc', 'www.chashu.cc', '20xs.com', 'www.haxwx.net', 'www.dushiwenxue.com',
"www.yxdown.com", 'www.jingcaiyuedu.com', 'www.zhetian.org', 'www.xiaoshuo02.com', 'www.xiaoshuo77.com',
'www.868xh.com', 'dp.changyou.com', 'www.iyouman.com', 'www.qq717.com', 'www.yznn.com', "www.69w.cc",
"www.doupocangqiong1.com", "www.manhuatai.com", "www.5wxs.com", "www.ggshuji.com", "www.msxf.net",
"www.mianhuatang.la", "www.boluoxs.com", "www.lbiquge.top", "www.69shu.com", "www.qingkan520.com",
"book.douban.com", "movie.douban.com", "www.txshuku.com", "lz.book.sohu.com", "www.3gsc.com.cn",
"www.txtshu365.com", "www.517yuedu.com", "www.baike.com", "read.jd.com", "www.zhihu.com", "wshuyi.com",
"www.19lou.tw", "www.chenwangbook.com", "www.aqtxt.com", "book.114la.com", "www.niepo.net",
"me.qidian.com", "www.gengd.com", "www.77l.com", "www.geilwx.com", "www.97xiao.com", "www.anqu.com",
"www.wuxiaxs.com", "yuedu.163.com", "b.faloo.com", "bbs.qidian.com", "jingji.qidian.com", "www.sodu.cc",
"forum.qdmm.com", "www.qdmm.com", "game.91.com", "www.11773.com", "mt.sohu.com", "book.dajianet.com",
"haokan.17k.com", "www.qmdsj.com", "www.jjwxc.net", "ishare.iask.sina.com.cn", "www.cmread.com",
"www.52ranwen.net", "www.dingdianzw.com", "www.topber.com", "www.391k.com", "www.qqxzb.com",
"www.zojpw.com", "www.pp8.com", "www.bxwx.org", "www.hrsxb.com", "www.497.com", "www.d8qu.com",
"www.duwanjuan.com", "www.05935.com", "book.zongheng.com", "www.55x.cn", "www.freexs.cn",
"xiaoshuo.360.cn", "www.3kw.cc", "www.gzbpi.com", "book.sina.com.cn", "www.vodtw.com", "wenda.so.com",
"product.dangdang.com", "www.chuiyao.com", "novel.slieny.com", "www.bilibili.com", "donghua.dmzj.com",
"www.yaojingweiba.com", "www.qb5200.com", "www.520tingshu.com", "www.567zw.com", "www.zjrxz.com",
"v.qq.com", "blog.sina.com.cn", "www.hackhome.com", "news.fznews.com.cn", "www.jingyu.com",
"news.so.com", "www.sodu3.com", "vipreader.qidian.com", "www.mozhua9.com", "www.iqiyi.com",
"xs.sogou.com"]
# 针对某些网站检索出来的地址和真正的目录地址不一样从而进行替换
REPLACE_RULES = {
"www.miaobige.com": {
'old': 'miaobige.com/book/',
'new': 'miaobige.com/read/'
},
"www.5ccc.net": {
'old': '5ccc.net/wksz_info/',
'new': '5ccc.net/xiaoshuo/'
},
"www.7kankan.com": {
'old': '7kankan.com/files/article/info/',
'new': '7kankan.com/files/article/html/'
},
"www.xqingdou.net": {
'old': 'xqingdou.net/book_',
'new': 'xqingdou.net/chapter_'
},
"www.wuyanxia.net": {
'old': 'wuyanxia.net/book/',
'new': 'wuyanxia.net/read/'
},
"www.263zw.com": {
'old': '263zw.com/402770/',
'new': '263zw.com/402770/list/'
},
}
# 搜索引擎检索优先级
ENGINE_PRIORITY = ['360', 'baidu', 'bing', 'duck_go']
# Rules
Rules = namedtuple('Rules', 'content_url chapter_selector content_selector')
LatestRules = namedtuple('LatestRules', 'plan meta_value selector')
# 获取小说最新章节
PLAN_01 = LatestRules(
True,
{'latest_chapter_name': 'og:novel:latest_chapter_name', 'latest_chapter_url': 'og:novel:latest_chapter_url'},
None,
)
LATEST_RULES = {
"www.biqugex.com": PLAN_01,
"www.x23us.com": PLAN_01,
"www.23us.la": PLAN_01,
"www.sqsxs.com": PLAN_01,
"www.nuomi9.com": PLAN_01,
"www.biquge.info": PLAN_01,
"www.biquge.tw": PLAN_01,
"www.qu.la": PLAN_01,
"www.ybdu.com": PLAN_01,
"www.wenxuemi.com": PLAN_01,
"www.biquge.com": PLAN_01,
"www.23us.cc": PLAN_01,
"www.xs222.com": PLAN_01,
"www.lewen8.com": PLAN_01,
"www.bqg5200.com": PLAN_01,
"www.vodtw.com": PLAN_01,
"www.6mao.com": PLAN_01,
"www.biquge.sh": PLAN_01,
"www.touxiang.la": PLAN_01,
"www.bxquge.com": PLAN_01,
"www.beidouxin.com": PLAN_01,
"www.biquge.lu": PLAN_01,
"www.263zw.com": PLAN_01,
"www.3qzone.com": PLAN_01,
"wwww.yooread.com": PLAN_01,
# "www.suimeng.la": PLAN_01,
"www.bequge.com": PLAN_01,
"www.biquku.co": PLAN_01,
"www.xbqge.com": PLAN_01,
"www.aiquxs.com": PLAN_01,
"www.23us.com": PLAN_01,
"www.biqiuge.com": PLAN_01,
"www.ddbiquge.com": PLAN_01,
"www.abocms.cn": PLAN_01,
"www.a306.com": PLAN_01,
"www.liewen.cc": PLAN_01,
"www.8535.org": PLAN_01,
"www.dingdianzw.com": PLAN_01,
"www.biquge.cc": PLAN_01,
"www.111bz.org": PLAN_01,
"www.biqugebook.com": PLAN_01,
"www.e8zw.com": PLAN_01,
"www.xqqxs.com": PLAN_01,
"tianyibook.la": PLAN_01,
"www.lingdianksw.com": PLAN_01,
"www.qb5.tw": PLAN_01,
"www.quanben.com": PLAN_01,
"www.58xs.com": PLAN_01,
"www.biqukan.com": PLAN_01,
"www.yssm.org": PLAN_01,
"www.81zw.com": PLAN_01,
"www.ymoxuan.com": PLAN_01,
"www.mytxt.cc": PLAN_01,
"www.woquge.com": PLAN_01,
"www.biquguo.com": PLAN_01,
"www.8jzw.cc": PLAN_01,
"www.biquge.tv": PLAN_01,
"www.biquge5200.com": PLAN_01,
"www.8jzw.com": PLAN_01,
"www.23xsw.cc": PLAN_01,
"www.miaobige.com": PLAN_01,
"www.xs.la": PLAN_01,
"www.44pq.co": PLAN_01,
"www.50zw.la": PLAN_01,
"www.33xs.com": PLAN_01,
"www.zwdu.com": PLAN_01,
"www.ttzw.com": PLAN_01,
"www.zanghaihuatxt.com": PLAN_01,
"www.kuxiaoshuo.com": PLAN_01,
"www.biqudu.com": PLAN_01,
"www.biqugeg.com": PLAN_01,
"www.23txt.com": PLAN_01,
"www.baquge.tw": PLAN_01,
"www.23qb.com": PLAN_01,
"www.lread.cc": PLAN_01,
"www.biqudao.com": PLAN_01,
"www.laidudu.com": PLAN_01,
"www.kxs7.com": PLAN_01,
"www.biquguan.com": PLAN_01,
"www.biquta.com": PLAN_01,
"www.xs98.com": PLAN_01,
"www.bqge.org": PLAN_01,
"www.58xs.tw": PLAN_01,
"www.187ks.com": PLAN_01,
"www.yikanxiaoshuo.com": PLAN_01,
"www.23zw.me": PLAN_01,
"www.37zw.net": PLAN_01,
"www.biquge.cm": PLAN_01,
"www.kanshu58.com": PLAN_01,
"www.biqumo.com": PLAN_01,
"www.mpxiaoshuo.com": PLAN_01,
"www.23wx.cm": PLAN_01,
"www.biquge.jp": PLAN_01,
"www.biqugexsw.com": PLAN_01,
"www.biqu6.com": PLAN_01,
"www.xiuxs.com": PLAN_01,
"www.booktxt.net": PLAN_01,
"www.biqule.com": PLAN_01,
"www.biquzi.com": PLAN_01,
"www.biquku.la": PLAN_01,
"www.00ksw.org": PLAN_01,
"www.bqg.cc": PLAN_01,
"www.biqugezw.com": PLAN_01,
"www.bbiquge.com": PLAN_01,
"www.aikantxt.la": PLAN_01,
"www.biquge.com.tw": PLAN_01,
"www.xxbiquge.com": PLAN_01,
"www.biquwo.com": PLAN_01,
"www.ixs.cc": PLAN_01,
# 其他规则
# "www.50331.net": LatestRules(
# False,
# None,
# {'content_url': "http://www.50331.net/", 'tag': 'span.zzjie a'}
# )
}
RULES = {
# demo 'name': Rules('content_url', {chapter_selector}, {content_selector})
# content_url=1 表示章节链接使用本身自带的链接,不用拼接
# content_url=0 表示章节网页需要当前页面url拼接
# 'www.biqule.com': Rules('www.biqule.com', {'class': 'box_con'},{}),
# 'www.lingdiankanshu.com': Rules('www.lingdiankanshu.com', {'class': 'box_con'}, {}),
# 'www.hhlwx.com': Rules('www.hhlwx.co', {'class': 'chapterlist'},{}),
'www.biquwu.cc': Rules('https://www.biquwu.cc/', {'class': 'box_con'}, {'id': 'content'}),
'www.quyuege.com': Rules('0', {'class': 'mod-article-list'}, {'class': 'page-content'}),
# 已解析
'www.biqugex.com': Rules('http://www.biqugex.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.bbiquge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.info': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.37zw.net': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquku.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.sh': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.co': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.00ksw.org': Rules('0', {'class': 'ml_list'}, {'id': 'articlecontent'}),
# 已解析
'www.bqge.org': Rules('http://www.bqge.org/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.aikantxt.la': Rules('http://www.aikantxt.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquzi.com': Rules('http://www.biquzi.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.bqg.cc': Rules('http://www.bqg.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.jp': Rules('0', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.vipzw.com': Rules('http://www.vipzw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge5200.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zanghaihuatxt.com': Rules('http://www.zanghaihuatxt.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.xiuxs.com': Rules('http://www.xiuxs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.1biquge.com': Rules('http://www.1biquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xiaoshuowan.com': Rules('http://www.xiaoshuowan.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugela.com': Rules('http://www.biqugela.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqu6.com': Rules('http://www.biqu6.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zhuaji.org': Rules('0', {'tag': 'dd'}, {'id': 'content'}),
# 已解析
'www.sqsxs.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.tv': Rules('http://www.biquge.tv/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquta.com': Rules('https://www.biquta.com/', {'class': 'box_con'}, {'id': 'content'}),
# # 已解析
'www.xbiquge.la': Rules('http://www.xbiquge.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.cm': Rules('http://www.biquge.cm/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23qb.com': Rules('https://www.23qb.com/', {'id': 'chapterList'}, {'id': 'TextContent'}),
# 已解析
# 'www.txtwan.com': Rules('http://www.txtwan.com/', {'id': 'chapterList'}, {'id': 'txt'}),
# 已解析
'www.biqugexsw.com': Rules('http://www.biqugexsw.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.kuxiaoshuo.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.laidudu.com': Rules('http://www.laidudu.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.kanshu58.com': Rules('0', {'class': 'chapters'}, {'id': 'content'}),
# 已解析
'www.mpxiaoshuo.com': Rules('0', {'class': 'mulu_list'}, {'id': 'htmlContent'}),
# 已解析
'www.23zw.me': Rules('0', {'id': 'chapter_list'}, {'id': 'text_area'}),
# 已解析
'www.187ks.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.58xs.tw': Rules('http://www.58xs.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquguan.com': Rules('http://www.biquguan.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xs98.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kxs7.com': Rules('http://www.kxs7.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqudao.com': Rules('https://www.biqudao.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.nuomi9.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'book.sfacg.com': Rules('http://book.sfacg.com/', {'class': 'story-catalog'}, {'tag': 'p'}),
# 已解析
'www.7kshu.com': Rules('0', {'id': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.lread.cc': Rules('http://www.lread.cc/', {'class': 'box_con'}, {'id': 'booktext'}),
# 已解析
'www.baquge.tw': Rules('http://www.baquge.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqudu.com': Rules('https://www.biqudu.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugeg.com': Rules('http://www.biqugeg.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23txt.com': Rules('http://www.23txt.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.ttzw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zwdu.com': Rules('http://www.zwdu.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.33xs.com': Rules('http://www.33xs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.50zw.la': Rules('0', {'class': 'chapterlist'}, {'id': 'htmlContent'}),
# 已解析
'www.44pq.co': Rules('0', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.wddsnxn.org': Rules('1', {'class': 'booklist'}, {'id': 'BookText'}),
# 已解析
'mianzhuan.wddsnxn.org': Rules('1', {'class': 'booklist'}, {'id': 'BookText'}),
# 已解析
'www.a306.com': Rules('http://www.a306.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xs52.com': Rules('0', {'id': 'chapter_list'}, {'id': 'text_c'}),
# 已解析
'www.xs.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23xsw.cc': Rules('http://www.23xsw.cc/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.8jzw.com': Rules('http://www.8jzw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquguo.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.woquge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zhonghuawuxia.com': Rules('1', {'class': 'list'}, {'id': 'content'}),
# 已解析
'www.mytxt.cc': Rules('http://www.mytxt.cc/', {'class': 'story_list_m62topxs'}, {'class': 'detail_con_m62topxs'}),
# 已解析
'www.136txt.com': Rules('1', {'class': 'directory_con'}, {'id': 'chapterContent'}),
# 已解析
'www.xs74.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.yssm.org': Rules('0', {'class': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.luoxia.com': Rules('1', {'class': 'book-list'}, {'tag': 'p'}),
# 已解析
'www.sbkk88.com': Rules('http://www.sbkk88.com/', {'class': 'leftList'}, {'id': 'f_article'}),
# 已解析
'www.dxsxs.com': Rules('http://www.dxsxs.com/', {'id': 'yuedu'}, {'class': 'zw'}),
# 已解析
'www.wenku8.com': Rules('0', {'class': 'css'}, {'id': 'content'}),
# 已解析
'www.xqingdou.net': Rules('http://www.xqingdou.net/', {'class': 'dirconone'}, {'id': 'chapter_content'}),
# 已解析
'www.zuowe.com': Rules('http://www.zuowe.com/', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.biqugek.com': Rules('1', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.wuyanxia.net': Rules('http://www.wuyanxia.net/', {'class': 'zjlist4'}, {'id': 'htmlContent'}),
# 已解析
'www.50331.net': Rules('http://www.50331.net/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.wenxuemi.com': Rules('http://www.wenxuemi.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xs222.com': Rules('http://www.xs222.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lewen8.com': Rules('http://www.lewen8.com/', {'id': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.5ccc.net': Rules('0', {'class': 'acss'}, {'id': 'content'}),
# 已解析
# 'www.suimeng.la': Rules('0', {'class': 'acss'}, {'id': 'ccontent'}),
# 已解析
'www.bqg5200.com': Rules('http://www.bqg5200.com/', {'id': 'readerlist'}, {'id': 'content'}),
# 已解析
'www.vodtw.com': Rules('0', {'class': 'insert_list'}, {'class': 'contentbox'}),
# 已解析
'www.6mao.com': Rules('http://www.6mao.com/', {'class': 'liebiao_bottom'}, {'id': 'neirong'}),
# 已解析
'www.touxiang.la': Rules('http://www.touxiang.la/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.7kankan.com': Rules('0', {'class': 'uclist'}, {'id': 'content'}),
# 已解析
'www.biqugetw.com': Rules('http://www.biqugetw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'gdbzkz.com': Rules('1', {'class': 'mulu'}, {'class': 'content-body'}),
# 已解析
'www.gdbzkz.com': Rules('1', {'class': 'mulu'}, {'class': 'content-body'}),
# 已解析
'www.freexs.cn': Rules('0', {'class': 'readout'}, {'class': 'shuneirong'}),
# 已解析
'www.bxquge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.beidouxin.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.3qzone.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.97xs.net': Rules('1', {'class': 'box'}, {'id': 'htmlContent'}),
# 已解析
'www.7dsw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.263zw.com': Rules('1', {'class': 'chapter'}, {'id': 'chapterContent'}),
# 已解析
'www.biquge5.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.yooread.com': Rules('http://www.yooread.com', {'id': 'chapterList'}, {'tag': 'p'}),
# 已解析
'www.xs82.com': Rules('0', {'class': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.kanshuhai.com': Rules('0', {'id': 'book'}, {'id': 'content'}),
# 已解析
'www.bequge.com': Rules('https://www.bequge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析 请求失败
# 'www.biquge5200.com': Rules('1', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.biquku.co': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xbqge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.aiquxs.com': Rules('0', {'id': 'list'}, {'id': 'booktext'}),
# 已解析
# 'www.piaotian.com': Rules('0', {'class': 'centent'}, {'class': 'fonts_mesne'}),
# 已解析
'www.ttshu.com': Rules('http://www.ttshu.com', {'class': 'border'}, {'id': 'content'}),
# 已解析
'www.23us.com': Rules('0', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.x23us.com': Rules('0', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.23wx.cc': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23wx.cm': Rules('0', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.ddbiquge.com': Rules('http://www.ddbiquge.com', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.abocms.cn': Rules('http://www.abocms.cn/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.liewen.cc': Rules('https://www.liewen.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.heiyange.com': Rules('http://www.heiyange.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.8535.org': Rules('0', {'class': 'booklist'}, {'class': 'txtc'}),
# 已解析
'www.dingdianzw.com': Rules('http://www.dingdianzw.com/', {'id': 'bgdiv'}, {'id': 'content'}),
# 已解析
'www.biquge.cc': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lewenxiaoshuo.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.111bz.org': Rules('http://www.111bz.org/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugebook.com': Rules('http://www.biqugebook.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.e8zw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xqqxs.com': Rules('0', {'class': 'box_con'}, {'class': 'content'}),
# 已解析
'www.139book.com': Rules('http://www.139book.com/', {'class': 'list_box'}, {'class': 'box_box'}),
# 已解析
'www.jcdf99.com': Rules('0', {'class': 'list_box'}, {'id': 'content'}),
# 已解析
'www.tianzeba.com': Rules('http://www.tianzeba.com/', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.kanshuwangzhan.com': Rules('0', {'id': 'chapterlist'}, {'id': 'booktext'}),
# 已解析
'tianyibook.la': Rules('http://tianyibook.la/', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.quanben.net': Rules('http://www.quanben.net/', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
# 'www.zhetian.org': Rules('http://www.zhetian.org', {'class': 'body '}, {'class': 'content'}),
# 已解析
'www.lingdianksw.com': Rules('0', {'class': 'acss'}, {'id': 'ccontent'}),
# 已解析
'www.qb5.tw': Rules('http://www.qb5.tw/', {'class': 'zjbox'}, {'id': 'content'}),
# 已解析
'www.ybdu.com': Rules('0', {'class': 'mulu_list'}, {'id': 'htmlContent'}),
# 已解析
'www.quanben.com': Rules('0', {'class': 'mulu_list'}, {'id': 'htmlContent'}),
# 已解析
'www.fhxs.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.biz': Rules('http://www.biquge.biz/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.58xs.com': Rules('http://www.58xs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqukan.com': Rules('http://www.biqukan.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.shuyuelou.com': Rules('http://www.shuyuelou.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.mangg.com': Rules('http://www.mangg.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.50zw.com': Rules('0', {'class': 'chapterlist'}, {'id': 'htmlContent'}),
# 已解析
'www.lingdiankanshu.co': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqiku.com': Rules('http://www.biqiku.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.duilianku.com': Rules('http://www.duilianku.com/', {'id': 'list'}, {'class': 'chapter'}),
# 已解析
'www.5xiaxiaoshuo.com': Rules('http://www.5xiaxiaoshuo.com/', {'class': 'art_listmain_main'}, {'id': 'content'}),
# 已解析
'www.81xsw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.wxguan.com': Rules('http://www.wxguan.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.qb5200.tw': Rules('http://www.qb5200.tw/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.fox2008.cn': Rules('http://www.fox2008.cn/', {'class': 'book'}, {'id': 'chapterContent'}),
# 已解析
'www.22zw.com': Rules('0', {'class': 'acss'}, {'id': 'content'}),
# 已解析
'www.k6uk.com': Rules('0', {'class': 'acss'}, {'id': 'content'}),
# 已解析
'www.126shu.com': Rules('http://www.126shu.com/', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.kooxs.com': Rules('0', {'class': 'list'}, {'id': 'content'}),
# 已解析
'www.shubaotxt.com': Rules('0', {'class': 'list'}, {'id': 'content'}),
# 已解析
'www.muyuge.com': Rules('1', {'id': 'xslist'}, {'id': 'content'}),
# 已解析
# 'www.daizhuzai.com': Rules('http://www.daizhuzai.com', {'class': 'dirlist'}, {'class': 'content'}),
# 已解析
'www.biqu.la': Rules('0', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'shushu.com.cn': Rules('http://shushu.com.cn/', {'id': 'dirsort01'}, {'id': 'content'}),
# 已解析
'www.shuhai.com': Rules('0', {'class': 'box_chap'}, {'id': 'readcon'}),
# 已解析
'www.37yue.com': Rules('0', {'class': 'list-chapter'}, {'class': 'chapter'}),
# 已解析
'www.35zw.com': Rules('0', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.xinshu.in': Rules('http://www.xinshu.in/', {'class': 'list_box'}, {'class': 'box_box'}),
# 已解析
'www.lwxs520.com': Rules('0', {'class': 'dccss'}, {'id': 'content'}),
# 已解析
'www.lwxs.la': Rules('http://www.lwxs.la/', {'id': 'defaulthtml4'}, {'id': 'content'}),
# 已解析
'www.biqule.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.33yq.com': Rules('1', {'class': 'box_con'}, {'class': 'zhangjieTXT'}),
# 已解析
'www.dishuge.com': Rules('1', {'class': 'update'}, {'tag': 'p'}),
# 已解析
'www.qu.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.shuge.net': Rules('http://www.shuge.ne/t', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.daomengren.com': Rules('http://www.daomengren.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.81zw.net': Rules('http://www.81zw.net/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.09xs.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.fhxiaoshuo.com': Rules('1', {'class': 'box_con'}, {'class': 'zhangjieTXT'}),
# 已解析
'www.yikanxiaoshuo.com': Rules('http://www.yikanxiaoshuo.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.1xiaoshuo.com': Rules('http://www.1xiaoshuo.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kanshu.la': Rules('http://www.kanshu.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kbiquge.com': Rules('http://www.kbiquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.00ksw.net': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.booktxt.net': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'wanmeishijiexiaoshuo.org': Rules('1', {'class': 'bg'}, {'class': 'content'}),
# 已解析
'www.sosoxiaoshuo.cc': Rules('http://www.sosoxiaoshuo.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.ciluke.com': Rules('0', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.81zw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.cilook.net': Rules('0', {'id': 'cl_content'}, {'id': 'content'}),
# 已解析
'www.baoliny.com': Rules('http://www.baoliny.com/', {'class': 'readerListShow'}, {'id': 'content'}),
# 已解析
'www.biquge.tw': Rules('http://www.biquge.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.7788xs.net': Rules('http://www.7788xs.net/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.06sy.com': Rules('http://www.06sy.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqumo.com': Rules('https://www.biqumo.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.kanshuzhe.com': Rules('http://www.kanshuzhe.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqiuge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lwxs.com': Rules('0', {'class': 'box_con'}, {'id': 'TXT'}),
# 已解析
'www.biqugezw.com': Rules('http://www.biqugezw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析 经常跳到无法预料的网站 故禁止
# 'www.is028.cn': Rules('http://www.biquge.com.tw', {'class': 'box_con'}, {'id': 'content'}),
# www.is028.cn会跳转到http://www.biquge.com.tw
'www.biquge.com.tw': Rules('http://www.biquge.com.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 'www.xs82.com': Rules('-1', {'class': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.shuqizw.com': Rules('http://www.shuqizw.com/', {'class': 'article_texttitleb'}, {'id': 'book_text'}),
# 已解析
'read.ixdzs.com': Rules('0', {'class': 'catalog'}, {'class': 'content'}),
# 已解析
'www.shumilou.net': Rules('0', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.8shuw.com': Rules('1', {'class': 'chapterlist'}, {'id': 'readtext'}),
# 已解析
# 'www.ttshu.com': Rules('http://www.ttshu.com', {'class': 'border'}, {'id': 'content'}),
# 已解析
'www.heiyan.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.bbsa5.com': Rules('1', {'class': 'panel'}, {'class': 'content-body'}),
# 已解析
'www.tycqxs.com': Rules('http://www.tycqxs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.miaobige.com': Rules('https://www.miaobige.com/', {'id': 'readerlists'}, {'id': 'content'}),
# 已解析
'www.dashubao.net': Rules('0', {'class': 'ml_main'}, {'class': 'yd_text2'}),
# 已解析
'www.23zw.com': Rules('0', {'id': 'chapter_list'}, {'id': 'text_area'}),
# 已解析
'www.23us.la': Rules('http://www.23us.la/', {'class': 'inner'}, {'id': 'content'}),
# 已解析
'www.2952.cc': Rules('0', {'class': 'inner'}, {'id': 'content'}),
# 已解析
'www.23us.cc': Rules('0', {'class': 'inner'}, {'id': 'content'}),
# 已解析
'www.13xs.com': Rules('0', {'class': 'box_con'}, {'id': 'booktext'}),
# 已解析
'www.tsxsw.com': Rules('0', {'class': 'bdsub'}, {'id': 'contents'}),
# 已解析
'www.ymoxuan.com': Rules('1', {'class': 'mulu'}, {'id': 'content'}),
'www.ixs.cc': Rules('https:', {'class': 'mulu'}, {'id': 'content'}),
# 已解析
'zetianjiba.net': Rules('1', {'class': 'bg'}, {'class': 'content'}),
# 已解析
'www.37zw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.555zw.com': Rules('0', {'class': 'dir'}, {'id': 'content'}),
# 已解析
'www.jueshitangmen.info': Rules('1', {'class': 'bg'}, {'class': 'content'}),
# 已解析
'www.bxwx9.org': Rules('0', {'class': 'TabCss'}, {'id': 'content'}),
# 已解析
'www.xxbiquge.com': Rules('https://www.xxbiquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquwo.com': Rules('https://www.biquwo.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.fs23.com': Rules('http://www.fs23.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.longtengx.com': Rules('http://www.longtengx.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lingyu.org': Rules('http://www.lingyu.org/', {'class': 'mt10'}, {'id': 'htmlContent'}),
# 已解析
'www.aszw8.com': Rules('0', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.23us.so': Rules('1', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.biquge.lu': Rules('http://www.biquge.lu/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.3zm.net': Rules('http://www.3zm.net/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.biquge.com': Rules('http://www.biquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kanshuzhong.com': Rules('0', {'class': 'bookcontent'}, {'class': 'textcontent'}),
# 已解析
'www.siluke.tw': Rules('http://www.siluke.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
# 'www.ttshu.com': Rules('http://www.ttshu.com', {'class': 'border'}, {'id': 'content'}),
}
| 46.258929
| 120
| 0.540533
|
25c84e4f06773911f2cbd28c87c4c9f60b952cae
| 61
|
py
|
Python
|
Test/import_test.py
|
Spiderz11/ACLTracker
|
fe833ac4bfebf94c8f4f212d849d26f41ebba9f7
|
[
"MIT"
] | 1
|
2016-09-01T02:37:27.000Z
|
2016-09-01T02:37:27.000Z
|
Test/import_test.py
|
Spiderz11/ACLTracker
|
fe833ac4bfebf94c8f4f212d849d26f41ebba9f7
|
[
"MIT"
] | null | null | null |
Test/import_test.py
|
Spiderz11/ACLTracker
|
fe833ac4bfebf94c8f4f212d849d26f41ebba9f7
|
[
"MIT"
] | null | null | null |
import sys
from acltracker import *
print('Passed')
sys.mod
| 10.166667
| 24
| 0.754098
|
257011af524dedb7be6a8a5e4bba6b454cd4146a
| 1,370
|
py
|
Python
|
Fig3-cmm/cmmlib/inout/off.py
|
YuePengUSTC/AADR
|
ed19730fc56f5d019089dbfd7544eeb35ba9c9a2
|
[
"BSD-3-Clause"
] | 7
|
2020-07-01T09:30:18.000Z
|
2022-01-18T04:19:50.000Z
|
Fig3-cmm/cmmlib/inout/off.py
|
YuePengUSTC/AADR
|
ed19730fc56f5d019089dbfd7544eeb35ba9c9a2
|
[
"BSD-3-Clause"
] | null | null | null |
Fig3-cmm/cmmlib/inout/off.py
|
YuePengUSTC/AADR
|
ed19730fc56f5d019089dbfd7544eeb35ba9c9a2
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T03:09:38.000Z
|
2021-01-19T03:09:38.000Z
|
import numpy as np
from io import StringIO
def save_mesh(filename, vertices=None, faces=None):
if vertices is None:
vertices = []
if faces is None:
faces = []
with open(filename, 'w') as f:
f.write("OFF\n%d %d 0\n" % (len(vertices), len(faces)))
if len(vertices) > 1:
np.savetxt(f, vertices, fmt="%f %f %f")
if len(faces) > 1:
for face in faces:
fmt = " ".join(["%d"] * (len(face) + 1)) + "\n"
f.write(fmt % ((len(face),) + tuple(map(int, face))))
def read_mesh(filename, no_colors=False):
lines = open(filename).readlines()
lines = [line for line in lines if line.strip() != '' and line[0] != '#']
assert lines[0].strip() in ['OFF', 'COFF'], 'OFF header missing'
has_colors = lines[0].strip() == 'COFF'
n_verts, n_faces, _ = map(int, lines[1].split())
vertex_data = np.loadtxt(
StringIO(''.join(lines[2:2 + n_verts])),
dtype=np.float)
if n_faces > 0:
faces = np.loadtxt(StringIO(''.join(lines[2+n_verts:])), dtype=np.int)[:,1:]
else:
faces = None
if has_colors:
colors = vertex_data[:,3:].astype(np.uint8)
vertex_data = vertex_data[:,:3]
else:
colors = None
if no_colors:
return vertex_data, faces
else:
return vertex_data, colors, faces
| 34.25
| 84
| 0.554015
|
8c047fad2474a52e691a27d18bcd68866e48fdc3
| 6,396
|
py
|
Python
|
src/hmm_diabimmune.py
|
aametwally/FoodAllergyPredictor
|
2ce44590670f579434480a9286d05e002a53e0ba
|
[
"MIT"
] | 8
|
2019-04-30T03:56:47.000Z
|
2021-02-15T04:36:32.000Z
|
src/hmm_diabimmune.py
|
aametwally/FoodAllergyPredictor
|
2ce44590670f579434480a9286d05e002a53e0ba
|
[
"MIT"
] | null | null | null |
src/hmm_diabimmune.py
|
aametwally/FoodAllergyPredictor
|
2ce44590670f579434480a9286d05e002a53e0ba
|
[
"MIT"
] | 2
|
2020-01-01T15:21:59.000Z
|
2021-06-17T01:57:10.000Z
|
#!/usr/bin/env python -W ignore::DeprecationWarning
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# Third-party libraries
import numpy as np
import os
import sys
import struct
import argparse
from array import array as pyarray
from seqlearn.hmm import MultinomialHMM
from seqlearn.evaluation import whole_sequence_accuracy
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from sklearn.metrics import matthews_corrcoef, accuracy_score, roc_auc_score
import pandas as pd
from math import floor
from hmmlearn import hmm
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = argparse.ArgumentParser(description='RF on data')
parser.add_argument("--data", help="raw or latent")
args = parser.parse_args()
if __name__ == '__main__':
if args.data == None:
print("Please specify raw or latent for data flag")
else:
dataset = args.data
hmm_ws_accuracy = []
hmm_last_accuracy =[]
hmm_ws_mcc = []
hmm_last_mcc = []
hmm_pred = []
hmm_prob = []
hmm_auc = []
fp = pd.read_csv("diabimmune_metadata_allcountries_allergy_noQuotes.csv", index_col=3)
allergy = fp["allergy"]
allergy = pd.factorize(allergy)
subject = fp["subjectID"]
labels = allergy[1]
allergy = allergy[0]
subject_data = {'ID': subject, 'label': allergy}
split_df = pd.DataFrame(data=subject_data)
lengths = split_df.groupby("ID").count()
ids = np.unique(split_df["ID"])
split_lab = np.array(split_df.groupby("ID").median()[["label"]]).reshape(-1)
if dataset == "latent":
data = pd.read_csv("diabimmune_embeddeddata_50_addedHeader.csv")
elif dataset == "raw":
data = pd.read_csv("diabimmune_taxa_genus_allcountries.csv", index_col=0)
elif dataset == "latent_25":
data = pd.read_csv("diabimmune_taxa_genus_allcountries_selected_" + str(dataset) + ".csv")
else:
data = pd.read_csv("diabimmune_taxa_genus_allcountries_selected_" + str(dataset) + ".csv", index_col=0)
data = data.transpose().as_matrix()
for set in range(0,10):
skf = StratifiedKFold(n_splits=10, shuffle=True)
cv = 0
for id_train_index, id_test_index in skf.split(ids, split_lab):
train_index = []
test_index = []
for i in range(0, len(subject)):
if subject[i] in ids[id_train_index]:
train_index.append(i)
else:
test_index.append(i)
train_lengths = []
test_lengths = []
lengths = pd.DataFrame(lengths)
for i in ids[id_train_index]:
train_lengths.append(lengths.loc[i]["label"])
for i in ids[id_test_index]:
test_lengths.append(lengths.loc[i]["label"])
num_features = np.array(data).shape[-1]
x = data[train_index]
y = allergy[train_index]
tx = data[test_index]
ty = allergy[test_index]
print("Running fold %d for set %d" % (cv, set))
clf=hmm.GMMHMM(n_components=2,n_mix=4,n_iter=100)
clf.fit(x, train_lengths)
pred = [row for row in clf.predict(tx, test_lengths)]
pred_last = []
ty_last = []
length_count = 0
for i in range(0, len(test_lengths)):
length_count += test_lengths[i]
pred_last.append(pred[length_count - 1])
ty_last.append(ty[length_count-1])
hmm_pred.append(pred)
acc_ws_0 = whole_sequence_accuracy(ty, pred, test_lengths)
acc_last_0 = accuracy_score(ty_last, pred_last)
mcc_ws_0 = matthews_corrcoef(ty, pred)
mcc_last_0 = matthews_corrcoef(ty_last, pred_last)
acc_ws_1 = whole_sequence_accuracy([(z + 1)%2 for z in ty], pred, test_lengths)
acc_last_1 = accuracy_score([(z + 1)%2 for z in ty_last], pred_last)
mcc_ws_1 = matthews_corrcoef([(z + 1)%2 for z in ty], pred)
mcc_last_1 = matthews_corrcoef([(z + 1)%2 for z in ty_last], pred_last)
if acc_last_0 > acc_last_1:
acc_ws = acc_ws_0
acc_last = acc_last_0
mcc_ws = mcc_ws_0
mcc_last = mcc_last_0
prob = [row[1] for row in clf.predict_proba(tx)]
else:
acc_ws = acc_ws_1
acc_last = acc_last_1
mcc_ws = mcc_ws_1
mcc_last = mcc_last_1
prob = [row[0] for row in clf.predict_proba(tx)]
roc = roc_auc_score(ty, prob)
hmm_prob.append(prob)
print(acc_last)
print(roc)
print(mcc_last)
hmm_ws_accuracy.append(acc_ws)
hmm_last_accuracy.append(acc_last)
hmm_ws_mcc.append(mcc_ws)
hmm_last_mcc.append(mcc_last)
hmm_auc.append(roc)
cv += 1
print("Whole Sequence Accuracy = " + str(np.mean(hmm_ws_accuracy)) + " (" + str(np.std(hmm_ws_accuracy)) + ")\n")
print(hmm_ws_accuracy)
print("\n\nLast Position Accuracy = " + str(np.mean(hmm_last_accuracy)) + " (" + str(np.std(hmm_last_accuracy)) + ")\n")
print(hmm_last_accuracy)
print("\n\nROC AUC = " + str(np.mean(hmm_auc)) + " (" + str(np.std(hmm_auc)) + ")\n")
print(hmm_auc)
print("\n\nWhole Sequence MCC = " + str(np.mean(hmm_ws_mcc)) + " (" + str(np.std(hmm_ws_mcc)) + ")\n")
print(hmm_ws_mcc)
print("\n\nLast Position MCC = " + str(np.mean(hmm_last_mcc)) + " (" + str(np.std(hmm_last_mcc)) + ")\n")
print(hmm_last_mcc)
f = open(dataset + "_hmm.txt", 'w')
f.write("Mean Whole Sequence Accuracy: " + str(np.mean(hmm_ws_accuracy)) + " (" + str(np.std(hmm_ws_accuracy))+ ")\n")
f.write(str(hmm_ws_accuracy) + "\n")
f.write("\Last Position Accuracy: " + str(np.mean(hmm_last_accuracy)) + " (" + str(np.std(hmm_last_accuracy))+ ")\n")
f.write(str(hmm_last_accuracy) + "\n")
f.write("\nMean ROC: " + str(np.mean(hmm_auc)) + " (" + str(np.std(hmm_auc))+ ")\n")
f.write(str(hmm_auc) + "\n")
f.write("\Whole Sequence MCC: " + str(np.mean(hmm_ws_mcc)) + " (" + str(np.std(hmm_ws_mcc))+ ")\n")
f.write(str(hmm_ws_mcc) + "\n")
f.write("\nLast Position MCC: " + str(np.mean(hmm_last_mcc)) + " (" + str(np.std(hmm_last_mcc))+ ")\n")
f.write(str(hmm_last_mcc) + "\n")
for i in range(0,100):
f.write("\nPredictions for " + str(i) + "\n")
f.write("\n" + str(hmm_pred[i]) + "\n")
f.close()
| 33.3125
| 169
| 0.635397
|
aeca2829df54ee11711b00c5bdcc58b723dbff73
| 6,371
|
py
|
Python
|
dummy_data/graphsage/model.py
|
SaeelPai/GraphVizards2
|
f734991a40d30ff6cb89cc42e01b3180e001dc2e
|
[
"MIT"
] | null | null | null |
dummy_data/graphsage/model.py
|
SaeelPai/GraphVizards2
|
f734991a40d30ff6cb89cc42e01b3180e001dc2e
|
[
"MIT"
] | null | null | null |
dummy_data/graphsage/model.py
|
SaeelPai/GraphVizards2
|
f734991a40d30ff6cb89cc42e01b3180e001dc2e
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
import numpy as np
import time
import random
from sklearn.metrics import f1_score
from collections import defaultdict
from graphsage.encoders import Encoder
from graphsage.aggregators import MeanAggregator
"""
Simple supervised GraphSAGE model as well as examples running the model
on the Cora and Pubmed datasets.
"""
class SupervisedGraphSage(nn.Module):
def __init__(self, num_classes, enc):
super(SupervisedGraphSage, self).__init__()
self.enc = enc
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform(self.weight)
def forward(self, nodes):
embeds = self.enc(nodes)
scores = self.weight.mm(embeds)
return scores.t()
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def load_cora():
num_nodes = 2708
num_feats = 1433
feat_data = np.zeros((num_nodes, num_feats))
labels = np.empty((num_nodes,1), dtype=np.int64)
node_map = {}
label_map = {}
with open("cora/cora.content") as fp:
for i,line in enumerate(fp):
info = line.strip().split()
feat_data[i,:] = map(float, info[1:-1])
node_map[info[0]] = i
if not info[-1] in label_map:
label_map[info[-1]] = len(label_map)
labels[i] = label_map[info[-1]]
adj_lists = defaultdict(set)
with open("cora/cora.cites") as fp:
for i,line in enumerate(fp):
info = line.strip().split()
paper1 = node_map[info[0]]
paper2 = node_map[info[1]]
adj_lists[paper1].add(paper2)
adj_lists[paper2].add(paper1)
return feat_data, labels, adj_lists
def run_cora():
np.random.seed(1)
random.seed(1)
num_nodes = 2708
feat_data, labels, adj_lists = load_cora()
features = nn.Embedding(2708, 1433)
features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
# features.cuda()
agg1 = MeanAggregator(features, cuda=True)
enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=True, cuda=False)
agg2 = MeanAggregator(lambda nodes : enc1(nodes).t(), cuda=False)
enc2 = Encoder(lambda nodes : enc1(nodes).t(), enc1.embed_dim, 128, adj_lists, agg2,
base_model=enc1, gcn=True, cuda=False)
enc1.num_samples = 5
enc2.num_samples = 5
graphsage = SupervisedGraphSage(7, enc2)
# graphsage.cuda()
rand_indices = np.random.permutation(num_nodes)
test = rand_indices[:1000]
val = rand_indices[1000:1500]
train = list(rand_indices[1500:])
optimizer = torch.optim.SGD(filter(lambda p : p.requires_grad, graphsage.parameters()), lr=0.7)
times = []
for batch in range(100):
batch_nodes = train[:256]
random.shuffle(train)
start_time = time.time()
optimizer.zero_grad()
loss = graphsage.loss(batch_nodes,
Variable(torch.LongTensor(labels[np.array(batch_nodes)])))
loss.backward()
optimizer.step()
end_time = time.time()
times.append(end_time-start_time)
print batch, loss.data[0]
val_output = graphsage.forward(val)
print "Validation F1:", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average="micro")
print "Average batch time:", np.mean(times)
def load_pubmed():
#hardcoded for simplicity...
num_nodes = 19717
num_feats = 500
feat_data = np.zeros((num_nodes, num_feats))
labels = np.empty((num_nodes, 1), dtype=np.int64)
node_map = {}
with open("pubmed-data/Pubmed-Diabetes.NODE.paper.tab") as fp:
fp.readline()
feat_map = {entry.split(":")[1]:i-1 for i,entry in enumerate(fp.readline().split("\t"))}
for i, line in enumerate(fp):
info = line.split("\t")
node_map[info[0]] = i
labels[i] = int(info[1].split("=")[1])-1
for word_info in info[2:-1]:
word_info = word_info.split("=")
feat_data[i][feat_map[word_info[0]]] = float(word_info[1])
adj_lists = defaultdict(set)
with open("pubmed-data/Pubmed-Diabetes.DIRECTED.cites.tab") as fp:
fp.readline()
fp.readline()
for line in fp:
info = line.strip().split("\t")
paper1 = node_map[info[1].split(":")[1]]
paper2 = node_map[info[-1].split(":")[1]]
adj_lists[paper1].add(paper2)
adj_lists[paper2].add(paper1)
return feat_data, labels, adj_lists
def run_pubmed():
np.random.seed(1)
random.seed(1)
num_nodes = 19717
feat_data, labels, adj_lists = load_pubmed()
features = nn.Embedding(19717, 500)
features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
# features.cuda()
agg1 = MeanAggregator(features, cuda=True)
enc1 = Encoder(features, 500, 128, adj_lists, agg1, gcn=True, cuda=False)
agg2 = MeanAggregator(lambda nodes : enc1(nodes).t(), cuda=False)
enc2 = Encoder(lambda nodes : enc1(nodes).t(), enc1.embed_dim, 128, adj_lists, agg2,
base_model=enc1, gcn=True, cuda=False)
enc1.num_samples = 10
enc2.num_samples = 25
graphsage = SupervisedGraphSage(3, enc2)
# graphsage.cuda()
rand_indices = np.random.permutation(num_nodes)
test = rand_indices[:1000]
val = rand_indices[1000:1500]
train = list(rand_indices[1500:])
optimizer = torch.optim.SGD(filter(lambda p : p.requires_grad, graphsage.parameters()), lr=0.7)
times = []
for batch in range(200):
batch_nodes = train[:1024]
random.shuffle(train)
start_time = time.time()
optimizer.zero_grad()
loss = graphsage.loss(batch_nodes,
Variable(torch.LongTensor(labels[np.array(batch_nodes)])))
loss.backward()
optimizer.step()
end_time = time.time()
times.append(end_time-start_time)
print batch, loss.data[0]
val_output = graphsage.forward(val)
print "Validation F1:", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average="micro")
print "Average batch time:", np.mean(times)
if __name__ == "__main__":
run_cora()
| 35.005495
| 106
| 0.636635
|
ab9b2b659cf8ed87373e260f33182f3e23ba36f8
| 18,840
|
py
|
Python
|
mkt/files/tests/test_views.py
|
spasovski/zamboni
|
c7f4714029e3b2dc918ddfc2103f8e051193c14d
|
[
"BSD-3-Clause"
] | 1
|
2021-07-29T00:51:09.000Z
|
2021-07-29T00:51:09.000Z
|
mkt/files/tests/test_views.py
|
imclab/olympia
|
35bc9c484e384bafab520ca8b5d5b0f8da5b62c0
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/files/tests/test_views.py
|
imclab/olympia
|
35bc9c484e384bafab520ca8b5d5b0f8da5b62c0
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
import shutil
import urlparse
from django.conf import settings
from django.core.cache import cache
from django.utils.http import http_date
from mock import patch
from nose import SkipTest
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.utils import Message
from amo.urlresolvers import reverse
from files.helpers import FileViewer, DiffHelper
from files.models import File
from mkt.webapps.models import Webapp
from users.models import UserProfile
packaged_app = 'mkt/submit/tests/packaged/full-tpa.zip'
not_binary = 'manifest.webapp'
binary = 'icons/256.png'
class FilesBase(object):
def login_as_editor(self):
assert self.client.login(username='editor@mozilla.com',
password='password')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True, status=amo.WEBAPPS_UNREVIEWED_STATUS)
self.dev = self.app.authors.all()[0]
self.regular = UserProfile.objects.get(pk=999)
self.version = self.app.versions.latest()
self.file = self.version.all_files[0]
self.versions = [self.version,
self.app.versions.create(
version='%s.1' % self.version.version)]
self.files = [self.file,
File.objects.create(version=self.versions[1],
filename='webapp.zip')]
self.login_as_editor()
for file_obj in self.files:
src = os.path.join(settings.ROOT, packaged_app)
try:
os.makedirs(os.path.dirname(file_obj.file_path))
except OSError:
pass
shutil.copyfile(src, file_obj.file_path)
self.file_viewer = FileViewer(self.file, is_webapp=True)
# Setting this to True, so we are delaying the extraction of files,
# in the tests, the files won't be extracted.
# Most of these tests extract as needed to.
self.create_switch(name='delay-file-viewer')
def tearDown(self):
self.file_viewer.cleanup()
def files_redirect(self, file):
return reverse('mkt.files.redirect', args=[self.file.pk, file])
def files_serve(self, file):
return reverse('mkt.files.serve', args=[self.file.pk, file])
def test_view_access_anon(self):
self.client.logout()
self.check_urls(403)
def test_view_access_anon_view_unreviewed_source(self):
self.app.update(view_source=True)
self.file_viewer.extract()
self.client.logout()
self.check_urls(403)
def test_view_access_anon_view_source(self):
self.app.update(view_source=True, status=amo.STATUS_PUBLIC)
self.file_viewer.extract()
self.client.logout()
self.check_urls(200)
def test_view_access_editor(self):
self.file_viewer.extract()
self.check_urls(200)
def test_view_access_editor_view_source(self):
self.app.update(view_source=True)
self.file_viewer.extract()
self.check_urls(200)
def test_view_access_developer(self):
self.client.logout()
assert self.client.login(username=self.dev.email, password='password')
self.file_viewer.extract()
self.check_urls(200)
def test_view_access_reviewed(self):
self.app.update(view_source=True)
self.file_viewer.extract()
self.client.logout()
for status in amo.UNREVIEWED_STATUSES:
self.app.update(status=status)
self.check_urls(403)
for status in amo.REVIEWED_STATUSES:
self.app.update(status=status)
self.check_urls(200)
def test_view_access_developer_view_source(self):
self.client.logout()
assert self.client.login(username=self.dev.email, password='password')
self.app.update(view_source=True)
self.file_viewer.extract()
self.check_urls(200)
def test_view_access_another_developer(self):
self.client.logout()
assert self.client.login(username=self.regular.email,
password='password')
self.file_viewer.extract()
self.check_urls(403)
def test_view_access_another_developer_view_source(self):
self.client.logout()
assert self.client.login(username=self.regular.email,
password='password')
self.app.update(view_source=True, status=amo.STATUS_PUBLIC)
self.file_viewer.extract()
self.check_urls(200)
def test_poll_extracted(self):
self.file_viewer.extract()
res = self.client.get(self.poll_url())
eq_(res.status_code, 200)
eq_(json.loads(res.content)['status'], True)
def test_poll_not_extracted(self):
res = self.client.get(self.poll_url())
eq_(res.status_code, 200)
eq_(json.loads(res.content)['status'], False)
def test_poll_extracted_anon(self):
self.client.logout()
res = self.client.get(self.poll_url())
eq_(res.status_code, 403)
def test_content_headers(self):
self.file_viewer.extract()
res = self.client.get(self.file_url('manifest.webapp'))
assert 'etag' in res._headers
assert 'last-modified' in res._headers
def test_content_headers_etag(self):
self.file_viewer.extract()
self.file_viewer.select('manifest.webapp')
obj = getattr(self.file_viewer, 'left', self.file_viewer)
etag = obj.selected.get('md5')
res = self.client.get(self.file_url('manifest.webapp'),
HTTP_IF_NONE_MATCH=etag)
eq_(res.status_code, 304)
def test_content_headers_if_modified(self):
self.file_viewer.extract()
self.file_viewer.select('manifest.webapp')
obj = getattr(self.file_viewer, 'left', self.file_viewer)
date = http_date(obj.selected.get('modified'))
res = self.client.get(self.file_url('manifest.webapp'),
HTTP_IF_MODIFIED_SINCE=date)
eq_(res.status_code, 304)
def test_file_header(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(not_binary))
eq_(res.status_code, 200)
url = res.context['file_link']['url']
eq_(url, reverse('reviewers.apps.review', args=[self.app.app_slug]))
def test_file_header_anon(self):
self.client.logout()
self.file_viewer.extract()
self.app.update(view_source=True, status=amo.STATUS_PUBLIC)
res = self.client.get(self.file_url(not_binary))
eq_(res.status_code, 200)
url = res.context['file_link']['url']
eq_(url, reverse('detail', args=[self.app.pk]))
def test_content_no_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url())
doc = pq(res.content)
eq_(len(doc('#content')), 0)
def test_no_files(self):
res = self.client.get(self.file_url())
eq_(res.status_code, 200)
assert 'files' not in res.context
@patch('waffle.switch_is_active')
def test_no_files_switch(self, switch_is_active):
switch_is_active.side_effect = lambda x: x != 'delay-file-viewer'
# By setting the switch to False, we are not delaying the file
# extraction. The files will be extracted and there will be
# files in context.
res = self.client.get(self.file_url())
eq_(res.status_code, 200)
assert 'files' in res.context
def test_files(self):
self.file_viewer.extract()
res = self.client.get(self.file_url())
eq_(res.status_code, 200)
assert 'files' in res.context
def test_files_anon(self):
self.client.logout()
res = self.client.get(self.file_url())
eq_(res.status_code, 403)
def test_files_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(not_binary))
eq_(res.status_code, 200)
assert 'selected' in res.context
def test_files_back_link(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(not_binary))
doc = pq(res.content)
eq_(doc('#commands td:last').text(), 'Back to review')
def test_files_back_link_anon(self):
self.file_viewer.extract()
self.client.logout()
self.app.update(view_source=True, status=amo.STATUS_PUBLIC)
res = self.client.get(self.file_url(not_binary))
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('#commands td:last').text(), 'Back to app')
def test_diff_redirect(self):
ids = self.files[0].id, self.files[1].id
res = self.client.post(self.file_url(),
{'left': ids[0], 'right': ids[1]})
eq_(res.status_code, 302)
self.assert3xx(res, reverse('mkt.files.compare', args=ids))
def test_browse_redirect(self):
ids = self.files[0].id,
res = self.client.post(self.file_url(), {'left': ids[0]})
eq_(res.status_code, 302)
self.assert3xx(res, reverse('mkt.files.list', args=ids))
def test_file_chooser(self):
res = self.client.get(self.file_url())
doc = pq(res.content)
left = doc('#id_left')
eq_(len(left), 1)
vers = left('option')
eq_(len(vers), 3)
# Only one file per version on Marketplace for the time being.
eq_(vers.eq(0).text(), '')
f = self.versions[1].all_files[0]
eq_(vers.eq(1).text(), '%s (%s)' % (self.versions[1].version,
amo.STATUS_CHOICES_API[f.status]))
f = self.versions[0].all_files[0]
eq_(vers.eq(2).text(), '%s (%s)' % (self.versions[0].version,
amo.STATUS_CHOICES_API[f.status]))
class TestFileViewer(FilesBase, amo.tests.WebappTestCase):
fixtures = ['base/apps', 'base/users', 'webapps/337141-steamcube']
def poll_url(self):
return reverse('mkt.files.poll', args=[self.file.pk])
def file_url(self, file=None):
args = [self.file.pk]
if file:
args.extend(['file', file])
return reverse('mkt.files.list', args=args)
def check_urls(self, status):
for url in [self.poll_url(), self.file_url()]:
status_code = self.client.get(url).status_code
assert status_code == status, (
'Request to %s returned status code %d (expected %d)' %
(url, status_code, status))
def add_file(self, name, contents):
dest = os.path.join(self.file_viewer.dest, name)
open(dest, 'w').write(contents)
def test_files_xss(self):
self.file_viewer.extract()
self.add_file('<script>alert("foo")', '')
res = self.client.get(self.file_url())
eq_(res.status_code, 200)
doc = pq(res.content)
# Note: this is text, not a DOM element, so escaped correctly.
assert '<script>alert("' in doc('#files li a').text()
def test_content_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url('manifest.webapp'))
doc = pq(res.content)
eq_(len(doc('#content')), 1)
def test_content_no_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url())
doc = pq(res.content)
eq_(len(doc('#content')), 1)
eq_(res.context['key'], 'manifest.webapp')
def test_content_xss(self):
self.file_viewer.extract()
for name in ['file.txt', 'file.html', 'file.htm']:
# If you are adding files, you need to clear out the memcache
# file listing.
cache.clear()
self.add_file(name, '<script>alert("foo")</script>')
res = self.client.get(self.file_url(name))
doc = pq(res.content)
# Note: this is text, not a DOM element, so escaped correctly.
assert doc('#content').text().startswith('<script')
def test_binary(self):
self.file_viewer.extract()
self.add_file('file.php', '<script>alert("foo")</script>')
res = self.client.get(self.file_url('file.php'))
eq_(res.status_code, 200)
assert self.file_viewer.get_files()['file.php']['md5'] in res.content
def test_tree_no_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url('doesnotexist.js'))
eq_(res.status_code, 404)
def test_directory(self):
self.file_viewer.extract()
res = self.client.get(self.file_url('doesnotexist.js'))
eq_(res.status_code, 404)
def test_serve_no_token(self):
self.file_viewer.extract()
res = self.client.get(self.files_serve(binary))
eq_(res.status_code, 403)
def test_serve_fake_token(self):
self.file_viewer.extract()
res = self.client.get(self.files_serve(binary) + '?token=aasd')
eq_(res.status_code, 403)
def test_serve_bad_token(self):
self.file_viewer.extract()
res = self.client.get(self.files_serve(binary) + '?token=a asd')
eq_(res.status_code, 403)
def test_serve_get_token(self):
self.file_viewer.extract()
res = self.client.get(self.files_redirect(binary))
eq_(res.status_code, 302)
url = res['Location']
assert url.startswith(settings.STATIC_URL)
assert urlparse.urlparse(url).query.startswith('token=')
def test_memcache_goes_bye_bye(self):
self.file_viewer.extract()
res = self.client.get(self.files_redirect(binary))
url = res['Location'][len(settings.STATIC_URL) - 1:]
cache.clear()
res = self.client.get(url)
eq_(res.status_code, 403)
def test_bounce(self):
# Don't run this test if the server has x-sendfile turned off.
if not settings.XSENDFILE:
raise SkipTest()
self.file_viewer.extract()
res = self.client.get(self.files_redirect(binary), follow=True)
eq_(res.status_code, 200)
eq_(res[settings.XSENDFILE_HEADER],
self.file_viewer.get_files().get(binary)['full'])
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(not_binary))
doc = pq(res.content)
assert doc('.error').text().startswith('File size is')
def test_poll_failed(self):
msg = Message('file-viewer:%s' % self.file_viewer)
msg.save('I like cheese.')
res = self.client.get(self.poll_url())
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['status'], False)
eq_(data['msg'], ['I like cheese.'])
def test_file_chooser_selection(self):
res = self.client.get(self.file_url())
doc = pq(res.content)
eq_(doc('#id_left option[selected]').attr('value'),
str(self.files[0].id))
eq_(len(doc('#id_right option[value][selected]')), 0)
class TestDiffViewer(FilesBase, amo.tests.WebappTestCase):
fixtures = ['base/apps', 'base/users', 'webapps/337141-steamcube']
def setUp(self):
super(TestDiffViewer, self).setUp()
self.file_viewer = DiffHelper(self.files[0], self.files[1],
is_webapp=True)
def poll_url(self):
return reverse('mkt.files.compare.poll', args=[self.files[0].pk,
self.files[1].pk])
def add_file(self, file_obj, name, contents):
dest = os.path.join(file_obj.dest, name)
open(dest, 'w').write(contents)
def file_url(self, file=None):
args = [self.files[0].pk, self.files[1].pk]
if file:
args.extend(['file', file])
return reverse('mkt.files.compare', args=args)
def check_urls(self, status):
for url in [self.poll_url(), self.file_url()]:
status_code = self.client.get(url).status_code
assert status_code == status, (
'Request to %s returned status code %d (expected %d)' %
(url, status_code, status))
def test_tree_no_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url('doesnotexist.js'))
eq_(res.status_code, 404)
def test_content_file(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(not_binary))
doc = pq(res.content)
eq_(len(doc('pre')), 3)
def test_binary_serve_links(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(binary))
doc = pq(res.content)
node = doc('#content-wrapper a')
eq_(len(node), 2)
assert node[0].text.startswith('Download 256.png')
def test_view_both_present(self):
self.file_viewer.extract()
res = self.client.get(self.file_url(not_binary))
doc = pq(res.content)
eq_(len(doc('pre')), 3)
eq_(len(doc('#content-wrapper p')), 4)
def test_view_one_missing(self):
self.file_viewer.extract()
os.remove(os.path.join(self.file_viewer.right.dest, 'manifest.webapp'))
res = self.client.get(self.file_url(not_binary))
doc = pq(res.content)
eq_(len(doc('pre')), 3)
eq_(len(doc('#content-wrapper p')), 2)
def test_view_left_binary(self):
self.file_viewer.extract()
filename = os.path.join(self.file_viewer.left.dest, 'manifest.webapp')
open(filename, 'w').write('MZ')
res = self.client.get(self.file_url(not_binary))
assert 'This file is not viewable online' in res.content
def test_view_right_binary(self):
self.file_viewer.extract()
filename = os.path.join(self.file_viewer.right.dest, 'manifest.webapp')
open(filename, 'w').write('MZ')
assert not self.file_viewer.is_diffable()
res = self.client.get(self.file_url(not_binary))
assert 'This file is not viewable online' in res.content
def test_different_tree(self):
self.file_viewer.extract()
os.remove(os.path.join(self.file_viewer.left.dest, not_binary))
res = self.client.get(self.file_url(not_binary))
doc = pq(res.content)
eq_(doc('h4:last').text(), 'Deleted files:')
eq_(len(doc('ul.root')), 2)
def test_file_chooser_selection(self):
res = self.client.get(self.file_url())
doc = pq(res.content)
eq_(doc('#id_left option[selected]').attr('value'),
str(self.files[0].id))
eq_(doc('#id_right option[selected]').attr('value'),
str(self.files[1].id))
| 36.091954
| 79
| 0.618312
|
12155082692473c88c0892a1f8213dfd73e85b32
| 7,296
|
py
|
Python
|
opentelemetry-sdk/tests/context/propagation/test_b3_format.py
|
Jamim/opentelemetry-python
|
6d1cd1f8f826bd7f36baaee949ff66f3c8007243
|
[
"Apache-2.0"
] | 1
|
2020-01-15T06:58:27.000Z
|
2020-01-15T06:58:27.000Z
|
opentelemetry-sdk/tests/context/propagation/test_b3_format.py
|
willingc/opentelemetry-python
|
c24b3b5032cd27dd4912348e633abcd2bb48d53a
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-sdk/tests/context/propagation/test_b3_format.py
|
willingc/opentelemetry-python
|
c24b3b5032cd27dd4912348e633abcd2bb48d53a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import opentelemetry.sdk.context.propagation.b3_format as b3_format
import opentelemetry.sdk.trace as trace
import opentelemetry.trace as trace_api
FORMAT = b3_format.B3Format()
def get_as_list(dict_object, key):
value = dict_object.get(key)
return [value] if value is not None else []
class TestB3Format(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.serialized_trace_id = b3_format.format_trace_id(
trace.generate_trace_id()
)
cls.serialized_span_id = b3_format.format_span_id(
trace.generate_span_id()
)
def test_extract_multi_header(self):
"""Test the extraction of B3 headers."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY], self.serialized_trace_id
)
self.assertEqual(
new_carrier[FORMAT.SPAN_ID_KEY], self.serialized_span_id
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_extract_single_header(self):
"""Test the extraction from a single b3 header."""
carrier = {
FORMAT.SINGLE_HEADER_KEY: "{}-{}".format(
self.serialized_trace_id, self.serialized_span_id
)
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY], self.serialized_trace_id
)
self.assertEqual(
new_carrier[FORMAT.SPAN_ID_KEY], self.serialized_span_id
)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_extract_header_precedence(self):
"""A single b3 header should take precedence over multiple
headers.
"""
single_header_trace_id = self.serialized_trace_id[:-3] + "123"
carrier = {
FORMAT.SINGLE_HEADER_KEY: "{}-{}".format(
single_header_trace_id, self.serialized_span_id
),
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY], single_header_trace_id
)
def test_enabled_sampling(self):
"""Test b3 sample key variants that turn on sampling."""
for variant in ["1", "True", "true", "d"]:
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: variant,
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_disabled_sampling(self):
"""Test b3 sample key variants that turn off sampling."""
for variant in ["0", "False", "false", None]:
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.SAMPLED_KEY: variant,
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "0")
def test_flags(self):
"""x-b3-flags set to "1" should result in propagation."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_flags_and_sampling(self):
"""Propagate if b3 flags and sampling are set."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1")
def test_64bit_trace_id(self):
"""64 bit trace ids should be padded to 128 bit trace ids."""
trace_id_64_bit = self.serialized_trace_id[:16]
carrier = {
FORMAT.TRACE_ID_KEY: trace_id_64_bit,
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
new_carrier = {}
FORMAT.inject(span_context, dict.__setitem__, new_carrier)
self.assertEqual(
new_carrier[FORMAT.TRACE_ID_KEY], "0" * 16 + trace_id_64_bit
)
def test_invalid_single_header(self):
"""If an invalid single header is passed, return an
invalid SpanContext.
"""
carrier = {FORMAT.SINGLE_HEADER_KEY: "0-1-2-3-4-5-6-7"}
span_context = FORMAT.extract(get_as_list, carrier)
self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID)
self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID)
def test_missing_trace_id(self):
"""If a trace id is missing, populate an invalid trace id."""
carrier = {
FORMAT.SPAN_ID_KEY: self.serialized_span_id,
FORMAT.FLAGS_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID)
def test_missing_span_id(self):
"""If a trace id is missing, populate an invalid trace id."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.FLAGS_KEY: "1",
}
span_context = FORMAT.extract(get_as_list, carrier)
self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID)
| 39.225806
| 75
| 0.645833
|
b6c67ed9e7213a27b6a611c1fb51b0c21b2bca3c
| 5,344
|
py
|
Python
|
code/data_wrangling.py
|
dirac-institute/TESSHacks
|
a6b08102cc30f61e48394bab391001a1cc571046
|
[
"MIT"
] | 1
|
2018-12-06T19:29:53.000Z
|
2018-12-06T19:29:53.000Z
|
code/data_wrangling.py
|
dirac-institute/TESSHacks
|
a6b08102cc30f61e48394bab391001a1cc571046
|
[
"MIT"
] | null | null | null |
code/data_wrangling.py
|
dirac-institute/TESSHacks
|
a6b08102cc30f61e48394bab391001a1cc571046
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import glob
import astropy.io.fits as fits
def read_tess_info(datadir):
"""
Traverse the directory structure of TESS light curve
files and get out some useful information, like
the TESS object ID, the subdirectory, file name
and cadence.
Parameters
----------
datadir: str
The path to the top level directory where the
TESS data is located.
Returns
-------
tess_info : pd.DataFrame
A DataFrame with some useful information about the
TESS light curves.
Columns:
* subdir: the sub-directory where the file is located
* filename: the actual file name of the data file
* daterange: the date range of the data, taken from the file name
* sector_no : the sector number, taken from the file name
* tess_id: the Tess Object Identifier, taken from the file name
* cadence: the cadence of the light curve, taken from the file name
"""
# glob all the data files
datafiles = glob.glob(datadir+"**/*.fits", recursive=True)
tess_info = []
# loop over all data files
for f in datafiles:
# make an empty dictionary for the information
single_info = {}
# split the path into folders
fsplit = f.split("/")
# get out the sub-directory with the file
single_info["subdir"] = fsplit[-2]
# get the filename
single_info["filename"] = fsplit[-1]
# split the filename
fname_split = single_info["filename"].split("-")
# split out information in the filename
single_info["daterange"] = fname_split[0]
single_info["sector_no"] = fname_split[1]
single_info["tess_id"] = np.int(fname_split[2])
single_info["cadence"] = fname_split[3]
# append to list
tess_info.append(single_info)
# make a data frame
tess_info = pd.DataFrame(tess_info)
return tess_info
def crossmatch_gaia(tess_info, gaia_files):
"""
Compute a cross-matched table with the GAIA information.
Parameters
----------
tess_info: pd.DataFrame
A data frame e.g. as made by `read_tess_info` above.
gaia_files: list of str
A list of paths to CSV files that cross-match Gaia on the
TESS object IDs. The relevant column in those files with the
TIC must be called `ticid`, in `tess_info` must be called `tess_id`
Return
------
tess_gaia : pd.DataFrame
Merged data frame.
"""
gaiamatch = []
for gf in gaia_files:
gm = pd.read_csv(gf)
gaiamatch.append(gm)
# concatenate all the individual cross-matches
gaia_merged = pd.concat(gaiamatch, ignore_index=True, sort=True)
# merge the TESS and Gaia data frames
tess_gaia = pd.merge(tess_info, gaiamatch, left_on="tess_id", right_on="ticid")
return tess_gaia
def read_tess_lightcurve(filename, pdc=True, quality_flag=0):
"""
Read out a TESS light curve out of a light curve file.
Parameters
----------
filename : str
The path and file name of the FITS file to be read
pdc : bool, default True
If True, use the Pre-search Data Conditioning (PDC)
corrected light curve (`PDCSAP_FLUX`). If False,
then read out the uncorrected light curves (`SAP_FLUX`).
Note: the uncorrected light curves have lots of instrumental
effects, but the corrected ones might have smoothed out
some periodic structure.
quality_flag, int, default 0
The quality flag to denote "good" data, default is 0
"""
hdulist = fits.open(fname)
# get out some header information
data = {}
data["tstart"] = hdulist[0].header["TSTART"]
data["tstop"] = hdulist[0].header["TSTOP"]
data["date_obs"] = hdulist[0].header["DATE-OBS"]
data["date_end"] = hdulist[0].header["DATE-END"]
data["ticid"] = hdulist[0].header["TICID"]
data["ra"] = hdulist[0].header["RA_OBJ"]
data["dec"] = hdulist[0].header["DEC_OBJ"]
data["pmra"] = hdulist[0].header["PMRA"]
data["pmdec"] = hdulist[0].header["PMDEC"]
data["pmtotal"] = hdulist[0].header["PMTOTAL"]
data["tessmag"] = hdulist[0].header["TESSMAG"]
data["teff"] = hdulist[0].header["TEFF"]
data["log_g"] = hdulist[0].header["LOGG"]
data["mh"] = hdulist[0].header["MH"]
data["radius"] = hdulist[0].header["RADIUS"]
# set the correct key for reading out the flux
flux_key = "SAP_FLUX"
flux_err_key = "SAP_FLUX_ERR"
if pdc:
flux_key = "PDC%s"%flux_key
flux_err_key = "PDC%s"%flux_err_key
# read out the actual data
# note: first data point in flux and flux_err seems
# to be NaN, so I'm going to exclude it:
time = hdulist[1].data.field("TIME")[1:]
flux = hdulist[1].data.field("PDCSAP_FLUX")[1:]
flux_err = hdulist[1].data.field("PDCSAP_FLUX_ERR")[1:]
quality = hdulist[1].data.field("QUALITY")[1:]
hdulist.close()
# get out good quality data and point that are
# not NaN or inf
mask = (quality == quality_flag) & (np.isfinite(flux))
data["time"] = time[mask]
data["flux"] = flux[mask]
data["flux_err"] = flux_err[mask]
return data
| 31.809524
| 82
| 0.622942
|
d4ad464bcfce2d9a45fa5fb431148c2eb7a99e24
| 2,448
|
py
|
Python
|
backend/voting/tests/test_schema.py
|
patrick91/pycon
|
9d7e15f540adcf0eaceb61fdbf67206d6aef73ec
|
[
"MIT"
] | 2
|
2017-07-18T21:51:25.000Z
|
2017-12-23T11:08:39.000Z
|
backend/voting/tests/test_schema.py
|
patrick91/pycon
|
9d7e15f540adcf0eaceb61fdbf67206d6aef73ec
|
[
"MIT"
] | 23
|
2017-07-18T20:22:38.000Z
|
2018-01-05T05:45:15.000Z
|
backend/voting/tests/test_schema.py
|
patrick91/pycon
|
9d7e15f540adcf0eaceb61fdbf67206d6aef73ec
|
[
"MIT"
] | 2
|
2017-07-18T21:27:33.000Z
|
2017-07-18T22:07:03.000Z
|
from pytest import mark
@mark.django_db
def test_get_logged_user_vote_on_a_submission(
graphql_client, user, vote_factory, settings, requests_mock
):
vote = vote_factory(user_id=user.id, value=1)
conference = vote.submission.conference
requests_mock.post(
f"{settings.PRETIX_API}organizers/{conference.pretix_organizer_id}/events/{conference.pretix_event_id}/tickets/attendee-has-ticket/",
json={"user_has_admission_ticket": True},
)
graphql_client.force_login(user)
response = graphql_client.query(
"""query MyVote($conference: String!) {
conference(code: $conference) {
submissions {
myVote {
value
}
}
}
}
""",
variables={"conference": conference.code},
)
assert response["data"]["conference"]["submissions"][0]["myVote"]["value"] == 1
@mark.django_db
def test_cannot_get_my_vote_as_unlogged(graphql_client, user, vote_factory):
vote = vote_factory(user_id=user.id)
response = graphql_client.query(
"""query MyVote($conference: String!) {
conference(code: $conference) {
submissions {
myVote {
value
}
}
}
}
""",
variables={"conference": vote.submission.conference.code},
)
assert (
response["errors"][0]["message"]
== "You need to have a ticket to see submissions"
)
assert response["errors"][0]["path"] == ["conference", "submissions"]
@mark.django_db
def test_get_my_vote_when_the_user_never_voted(
graphql_client, user, submission_factory, requests_mock, settings
):
submission = submission_factory()
conference = submission.conference
requests_mock.post(
f"{settings.PRETIX_API}organizers/{conference.pretix_organizer_id}/events/{conference.pretix_event_id}/tickets/attendee-has-ticket/",
json={"user_has_admission_ticket": True},
)
graphql_client.force_login(user)
response = graphql_client.query(
"""query MyVote($conference: String!) {
conference(code: $conference) {
submissions {
myVote {
value
}
}
}
}
""",
variables={"conference": conference.code},
)
assert response["data"]["conference"]["submissions"][0]["myVote"] is None
| 27.505618
| 141
| 0.609069
|
11d11ab3a01afb04daee15180e6ad7df6279911c
| 3,686
|
py
|
Python
|
airflow/kubernetes/kube_client.py
|
codejunction/airflow
|
04614841c77154cae64df175252a3bcf64d4e6ea
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-11-18T22:51:10.000Z
|
2019-11-18T22:51:10.000Z
|
airflow/kubernetes/kube_client.py
|
codejunction/airflow
|
04614841c77154cae64df175252a3bcf64d4e6ea
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2018-03-23T10:16:45.000Z
|
2018-09-09T11:47:07.000Z
|
airflow/kubernetes/kube_client.py
|
codejunction/airflow
|
04614841c77154cae64df175252a3bcf64d4e6ea
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Client for kubernetes communication"""
from typing import Optional
from airflow.configuration import conf
try:
from kubernetes import config, client
from kubernetes.client.rest import ApiException # pylint: disable=unused-import
from kubernetes.client.api_client import ApiClient
from kubernetes.client import Configuration
from airflow.kubernetes.refresh_config import ( # pylint: disable=ungrouped-imports
load_kube_config,
RefreshConfiguration,
)
has_kubernetes = True
def _get_kube_config(in_cluster: bool,
cluster_context: Optional[str],
config_file: Optional[str]) -> Optional[Configuration]:
if in_cluster:
# load_incluster_config set default configuration with config populated by k8s
config.load_incluster_config()
return None
else:
# this block can be replaced with just config.load_kube_config once
# refresh_config module is replaced with upstream fix
cfg = RefreshConfiguration()
load_kube_config(
client_configuration=cfg, config_file=config_file, context=cluster_context)
return cfg
def _get_client_with_patched_configuration(cfg: Optional[Configuration]) -> client.CoreV1Api:
'''
This is a workaround for supporting api token refresh in k8s client.
The function can be replace with `return client.CoreV1Api()` once the
upstream client supports token refresh.
'''
if cfg:
return client.CoreV1Api(api_client=ApiClient(configuration=cfg))
else:
return client.CoreV1Api()
except ImportError as e:
# We need an exception class to be able to use it in ``except`` elsewhere
# in the code base
ApiException = BaseException
has_kubernetes = False
_import_err = e
def get_kube_client(in_cluster: bool = conf.getboolean('kubernetes', 'in_cluster'),
cluster_context: Optional[str] = None,
config_file: Optional[str] = None):
"""
Retrieves Kubernetes client
:param in_cluster: whether we are in cluster
:type in_cluster: bool
:param cluster_context: context of the cluster
:type cluster_context: str
:param config_file: configuration file
:type config_file: str
:return kubernetes client
:rtype client.CoreV1Api
"""
if not has_kubernetes:
raise _import_err
if not in_cluster:
if cluster_context is None:
cluster_context = conf.get('kubernetes', 'cluster_context', fallback=None)
if config_file is None:
config_file = conf.get('kubernetes', 'config_file', fallback=None)
client_conf = _get_kube_config(in_cluster, cluster_context, config_file)
return _get_client_with_patched_configuration(client_conf)
| 38.395833
| 97
| 0.698589
|
c428eacd1e7bfc6bd588dbe783564cc3a1d7417f
| 26,889
|
py
|
Python
|
UQLibrary/gsa.py
|
HarleyHanes/UQLibrary-CourseProject
|
d66b73b12a68f144be468e6cda109ca0e12a7f9c
|
[
"MIT"
] | null | null | null |
UQLibrary/gsa.py
|
HarleyHanes/UQLibrary-CourseProject
|
d66b73b12a68f144be468e6cda109ca0e12a7f9c
|
[
"MIT"
] | null | null | null |
UQLibrary/gsa.py
|
HarleyHanes/UQLibrary-CourseProject
|
d66b73b12a68f144be468e6cda109ca0e12a7f9c
|
[
"MIT"
] | 2
|
2022-01-25T18:03:53.000Z
|
2022-01-25T18:05:21.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 14:03:35 2022
@author: USER
"""
#3rd party Modules
import numpy as np
#import sys
import warnings
from scipy.stats import qmc
import scipy.stats as sct
import mpi4py.MPI as MPI
class GsaOptions:
def __init__(self, run = True, run_sobol=True, run_morris=True, n_samp_sobol=100000, \
n_samp_morris=4, l_morris=3):
self.run = run
if self.run == False:
self.run_sobol = False
self.run_morris = False
else:
self.run_sobol=run_sobol #Whether to run Sobol (True or False)
self.run_morris=run_morris #Whether to run Morris (True or False)
self.n_samp_sobol = n_samp_sobol #Number of samples to be generated for GSA
self.n_samp_morris = n_samp_morris
self.l_morris=l_morris
pass
class GsaResults:
#
def __init__(self,sobol_base=np.nan, sobol_tot=np.nan, f_a=np.nan, f_b=np.nan, f_d=np.nan, f_ab=np.nan, \
samp_d=np.nan, morris_std=np.nan, morris_mean_abs=np.nan, morris_mean=np.nan):
self.sobol_base=sobol_base
self.sobol_tot=sobol_tot
self.f_a=f_a
self.f_b=f_b
self.f_d=f_d
self.f_ab=f_ab
self.samp_d=samp_d
self.morris_mean_abs=morris_mean_abs
self.morris_mean = morris_mean
self.morris_std=morris_std
pass
##--------------------------------------GSA-----------------------------------------------------
def run_gsa(model, gsa_options, logging = False):
"""Implements global sensitivity analysis using Morris or Sobol analysis.
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
GsaResults
Holds all run results
"""
#GSA implements the following local sensitivity analysis methods on "model" object
# 1) Gets sampling distribution (used only for internal calculations)
# 2) Calculates Sobol Indices
# 3) Performs Morris Screenings (not yet implemented)
# 4) Produces histogram plots for QOI values (not yet implemented)
# Required Inputs: Object of class "model" and object of class "options"
# Outputs: Object of class gsa with fisher and sobol elements
#Load mpi details to keep track of thread number
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
# Initialize gsa_results in all threads
gsa_results = GsaResults()
#Morris Screening
if gsa_options.run_morris:
#Set non-biased perturbation distance for even l
#Source: Smith, R. 2011. Uncertainty Quanitification. p.333
pert_distance = gsa_options.l_morris/ (2*(gsa_options.l_morris-1))
#Create parameter sample only on thread 0 since it need not be parallelized
# initialize memory location on all threads
morris_samp = np.zeros((gsa_options.n_samp_morris*(model.n_poi+1), model.n_poi),dtype = float)
if logging > 1:
print("initialized morris_samp of size: " + str(morris_samp.shape))
if mpi_rank == 0:
if logging:
print("Generating Morris Sample")
morris_samp = get_morris_poi_sample(model.sample_fcn, gsa_options.n_samp_morris,\
model.n_poi, pert_distance)
mpi_comm.Bcast([morris_samp,MPI.DOUBLE], root = 0)
morris_mean_abs, morris_mean, morris_std = calculate_morris(\
model.eval_fcn, morris_samp, \
pert_distance, logging = logging)
gsa_results.morris_mean_abs=morris_mean_abs
gsa_results.morris_mean = morris_mean
gsa_results.morris_std=morris_std
#Sobol Analysis Un parallelized for now
if gsa_options.run_sobol and mpi_rank == 0:
if logging:
print("Generating Sobol Sample")
#Make Distribution Samples and Calculate model results
[f_a, f_b, f_ab, f_d, samp_d] = get_sobol_sample(model, gsa_options)
#Calculate Sobol Indices
if logging:
print("Calculating Sobol Sample")
[sobol_base, sobol_tot]=calculate_sobol(f_a, f_b, f_ab, f_d)
gsa_results.f_d=f_d
gsa_results.f_a=f_a
gsa_results.f_b=f_b
gsa_results.f_ab=f_ab
gsa_results.samp_d=samp_d
gsa_results.sobol_base=sobol_base
gsa_results.sobol_tot=sobol_tot
#------------broadcast gsa results to other threads--------------------
return gsa_results
###----------------------------------------------------------------------------------------------
###-------------------------------------Support Functions----------------------------------------
###----------------------------------------------------------------------------------------------
def get_sobol_sample(model,gsa_options):
"""Constructs and evaluates sobol samples using predefined sampling distributions.
Currently only function for uniform or saltelli normal
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part a
np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part b
np.ndarray
n_samp_sobol x n_qoi array x n_poi array of evaluations of mixed Sobol sample ab
np.ndarray
2*n_samp_sobol x n_qoi array of concatenated evaluations of part a and b
np.ndarray
2*n_samp_sobol x n_poi array of concatenated POI samples of part a and b
"""
n_samp_sobol = gsa_options.n_samp_sobol
# Make 2 POI sample matrices with n_samp_sobol samples each
# if np.all(model.dist_type!=np.array(["satelli normal", "satelli uniform"])):
# warnings.warn("Non-satelli sampling algorithm used for Sobol analysis."\
# + " Suggested distribution types are satelli normal "+\
# "and satelli uniform.")
sample_compact = model.sample_fcn(2*n_samp_sobol)
f_compact = model.eval_fcn(sample_compact)
# Seperate sample into a and b for algorithm
samp_a = sample_compact[:n_samp_sobol]
samp_b = sample_compact[n_samp_sobol:]
f_a = f_compact[:n_samp_sobol]
f_b = f_compact[n_samp_sobol:] # n_samp_sobol x nQOI out matrix from B
# Stack the output matrices into a single matrix
f_d = np.concatenate((f_a.copy(), f_b.copy()), axis=0)
# Initialize combined QOI sample matrices
if model.n_qoi == 1:
f_ab = np.empty([n_samp_sobol, model.n_poi])
else:
f_ab = np.empty([n_samp_sobol, model.n_poi, model.n_qoi])
for i_param in range(0, model.n_poi):
# Define sampC to be A with the ith parameter in B
samp_ab = samp_a.copy()
samp_ab[:, i_param] = samp_b[:, i_param].copy()
if model.n_qoi == 1:
f_ab[:, i_param] = model.eval_fcn(samp_ab).squeeze()
else:
f_ab[:, i_param, :] = model.eval_fcn(samp_ab) # n_samp_sobol x nPOI x nQOI tensor
del samp_ab
return f_a, f_b, f_ab, f_d, sample_compact
def calculate_sobol(f_a, f_b, f_ab, f_d):
"""Calculates 1st order and total sobol indices using Saltelli approximation formula.
Parameters
----------
f_a : np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part a
f_b : np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part b
f_ab : np.ndarray
n_samp_sobol x n_qoi array x n_poi array of evaluations of mixed Sobol sample ab
f_d : np.ndarray
2*n_samp_sobol x n_qoi array of concatenated evaluations of part a and b
Returns
-------
np.ndarray
n_qoi x n_poi array of 1st order Sobol indices
np.ndarray
n_qoi x n_poi array of total Sobol indices
"""
#Calculates calculates sobol indices using satelli approximation method
#Inputs: model object (with eval_fcn, sample, and nParams)
# sobolOptions object
#Determing number of samples, QOIs, and POIs based on inputs
if f_ab.ndim==1:
n_qoi=1
n_poi=1
elif f_ab.ndim==2:
n_qoi=1
n_poi=f_ab.shape[1]
elif f_ab.ndim==3:
n_poi=f_ab.shape[1]
n_qoi=f_ab.shape[2]
else:
raise(Exception('f_ab has greater than 3 dimensions, make sure f_ab is' \
'the squeezed form of n_samp_sobol x nPOI x nQOI'))
#QOI variance
fDvar=np.var(f_d, axis=0)
sobol_base=np.empty((n_qoi, n_poi))
sobol_tot=np.empty((n_qoi, n_poi))
if n_qoi==1:
#Calculate 1st order parameter effects
sobol_base=np.mean(f_b*(f_ab-f_a), axis=0)/(fDvar)
#Caclulate 2nd order parameter effects
sobol_tot=np.mean((f_a-f_ab)**2, axis=0)/(2*fDvar)
else:
for iQOI in range(0,n_qoi):
#Calculate 1st order parameter effects
sobol_base[iQOI,:]=np.mean(f_b[:,[iQOI]]*(f_ab[:,:,iQOI]-f_a[:,[iQOI]]),axis=0)/fDvar[iQOI]
#Caclulate 2nd order parameter effects
sobol_tot[iQOI,:]= np.mean((f_a[:,[iQOI]]-f_ab[:,:,iQOI])**2,axis=0)/(2*fDvar[iQOI])
return sobol_base, sobol_tot
#==============================================================================
#----------------------------------Morris Sampling-----------------------------
#==============================================================================
##--------------------------------calculate_morris-----------------------------
def calculate_morris(eval_fcn, morris_samp, pert_distance, logging = False):
"""Calculates morris samples using information from Model and GsaOptions objects.
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
np.ndarray
n_qoi x n_poi array of morris sensitivity mean indices
np.ndarray
n_qoi x n_poi array of morris sensitivity variance indices
"""
#Evaluate Sample
#Load mpi details to keep track of thread number
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
if logging and mpi_rank == 0:
print("Evaulating Morris Sample")
if mpi_size == 1:
f_eval_compact = eval_fcn(morris_samp)
else:
f_eval_compact = parallel_eval(eval_fcn, morris_samp, logging = logging)
#Make sure all threads finish collecting f_eval_compact before continuing
mpi_comm.Barrier()
if logging > 1 and mpi_rank == 0:
print("f_eval_compact: " + str(f_eval_compact))
# Initialize Morris indices so that the memory is reserved when broadcasting
if f_eval_compact.ndim == 2:
morris_mean_abs = np.zeros((morris_samp.shape[1], f_eval_compact.shape[1]), dtype = float) # n_poi x n_qoi
morris_mean = np.zeros(morris_mean_abs.shape, dtype = float)
morris_std = np.zeros(morris_mean_abs.shape, dtype = float) # n_poi x n_qoi
else :
morris_mean_abs = np.zeros((morris_samp.shape[1]), dtype = float) # n_poi x n_qoi
morris_mean = np.zeros(morris_mean_abs.shape, dtype = float)
morris_std = np.zeros(morris_mean_abs.shape, dtype = float) # n_poi x n_qoi
# Perform morris calculation only on base thread
if mpi_rank == 0:
#Compute # of pois, qois and samples to ensure consitency
if morris_samp.ndim == 1:
n_poi = 1
elif morris_samp.ndim == 2:
n_poi = morris_samp.shape[1]
else:
raise Exception("More than 2 dimensions in morris_samp")
#Convert to int so it can be used in indexing
n_samp = int(morris_samp.shape[0]/(n_poi+1))
if f_eval_compact.ndim == 2:
n_qoi = f_eval_compact.shape[1]
elif f_eval_compact.ndim ==1:
n_qoi = 1
else:
raise Exception("More than 2 dimensions in f_eval")
#Uncompact Samples
f_eval_seperated = morris_seperate(f_eval_compact, n_samp, n_poi, n_qoi)
morris_samp_seperated = morris_seperate(morris_samp, n_samp, n_poi, n_poi)
if logging > 1:
print("morris samp seperated: " + str(morris_samp_seperated))
#Get which sample perturbs which poi
poi_pert_location = get_poi_pert_location(morris_samp_seperated)
if logging > 1:
print("poi_pert_location: " + str(poi_pert_location))
#initialize data storage arrays with 1 dimension lower if n_qoi =1
if n_qoi > 1:
deriv_approx = np.empty((n_samp, n_poi, n_qoi)) # n_samp x n_poi x n_qoi
else:
deriv_approx = np.empty((n_samp, n_poi)) # n_samp x n_poi
if logging >1:
print("QOIs : " + str(f_eval_seperated))
#Apply finite difference formula
#Source: Smith, R. 2011, Uncertainty Quanitification. p.333
if logging > 0:
print("Calculating Morris indices")
for i_samp in range(n_samp):
for i_pert in range(n_poi):
i_poi = poi_pert_location[i_samp, i_pert]
deriv_approx[i_samp,i_poi] = (f_eval_seperated[i_samp,i_pert+1] - \
f_eval_seperated[i_samp,i_pert])/ pert_distance
# for i_poi in range(n_poi):
# deriv_approx[:,i_poi] = f_eval_seperated[:,i_poi+1] - f_eval_seperated[:,i_poi]
if logging > 1:
print("deriv approx: " + str(deriv_approx))
#Apply Morris Index formulas
#Source: Smith, R. 2011, Uncertainty Quanitification. p.332
morris_mean_abs = np.mean(np.abs(deriv_approx),axis = 0) # n_poi x n_qoi
morris_mean = np.mean(deriv_approx, axis = 0)
morris_std=np.sqrt(np.var(deriv_approx, axis=0)) # n_poi x n_qoi
if logging > 1:
print("morris mean abs: " + str(morris_mean_abs))
print("morris mean abs: " + str(morris_mean))
print("morris st: " + str(morris_std))
if logging and mpi_size > 1:
print("Broadcasting Morris Indices")
#Send out finished morris indices to all threads
mpi_comm.Bcast([morris_mean_abs, MPI.DOUBLE], root = 0)
mpi_comm.Bcast([morris_mean, MPI.DOUBLE], root = 0)
mpi_comm.Bcast([morris_std, MPI.DOUBLE], root = 0)
if logging:
print("Finished broadcasting (thread " + str(mpi_rank) + ")")
return morris_mean_abs, morris_mean, morris_std
def morris_seperate(qoi_compact, n_samp, n_poi, n_qoi):
if n_qoi > 1:
qoi_seperated = np.empty((n_samp, n_poi+1, n_qoi))
else:
qoi_seperated = np.empty((n_samp, n_poi+1))
#Seperate each parameter search for ease of computation
for i_samp in range(n_samp):
qoi_seperated[i_samp] = qoi_compact[i_samp*(n_poi+1):(i_samp+1)*(n_poi+1)].squeeze()
return qoi_seperated
def get_poi_pert_location(morris_samp_seperate):
n_samp = morris_samp_seperate.shape[0]
n_poi = morris_samp_seperate.shape[2]
poi_pert_location = np.empty((n_samp, n_poi))
for i_samp in range(n_samp):
for i_poi in range(n_poi):
poi_pert_location[i_samp,i_poi] = np.argmax(np.abs(morris_samp_seperate[i_samp,i_poi+1] \
- morris_samp_seperate[i_samp,i_poi]))
return poi_pert_location.astype(int)
##---------------------------get_morris_poi_sample-----------------------------
def get_morris_poi_sample(param_dist, n_samp, n_poi, pert_distance, random = False):
#Use sobol distributions for low discrepancy
#Generate n_samp_morris samples
random_samp = param_dist(n_samp)
#Define Sampling matrices that are constant
J=np.ones((n_poi+1,n_poi))
B = (np.tril(np.ones(J.shape), -1))
morris_samp_compact = np.empty((n_samp*(n_poi+1), n_poi))
for i_samp in range(n_samp):
jTheta=random_samp[i_samp,]*J
#Calculate Morris Sample matrix
#Source: Smith, R. 2011. Uncertainty Quantification. p.334
if random == True:
#Define Random Sampling matrices
#D=np.diag(np.random.choice(np.array([1,-1]), size=(n_poi,)))
#NOTE: using non-random step direction to keep denominator in deriv approx
# equal to delta rather than -delta for some samples. Random form is
# kept above in comments
D=np.diag(np.random.choice(np.array([1,1]), size=(n_poi,)))
P=np.identity(n_poi)
np.random.shuffle(P)
samp_mat = np.matmul(jTheta+pert_distance/2*(np.matmul((2*B-J),D)+J),P)
elif random == False:
#Define non-random Sampling matrices
D=np.diag(np.random.choice(np.array([1,1]), size=(n_poi,)))
P=np.identity(n_poi)
np.random.shuffle(P)
# Only use non-random formulations for testing matrix generation
samp_mat = jTheta+pert_distance/2*(np.matmul((2*B-J),D)+J)
#Stack each grid seach so that a single eval_fcn call is required
morris_samp_compact[i_samp*(n_poi+1):(i_samp+1)*(n_poi+1),:] = samp_mat
return morris_samp_compact
#======================================================================================================
#----------------------------Parallelization Support---------------------------------------------------
#======================================================================================================
def parallel_eval(eval_fcn, poi_sample, logging = False):
""" Seperates samples and parallelizes model computations
Parameters
----------
eval_fcn : function
User defined function that maps POIs to QOIs
poi_sample : np.ndarray
n_samp x n_poi array of POI samples
Returns
-------
qoi_samp : np.ndarray
n_samp x n_qoi array of QOIs from each POI sample
"""
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
# Seperate poi samples into subsample for each thread
if mpi_rank == 0:
if logging > 1:
print("poi_sample in thread " + str(mpi_rank) + ": " + str(poi_sample))
for i_rank in range(mpi_size):
if mpi_rank == 0:
samp_per_subsample = int(np.round(poi_sample.shape[0]/mpi_size))
if i_rank == 0:
data = poi_sample[0:samp_per_subsample]
else:
if i_rank == mpi_size-1:
data_broadcast = poi_sample[(i_rank*samp_per_subsample):]
else:
data_broadcast = poi_sample[(i_rank*samp_per_subsample):((i_rank+1)*samp_per_subsample)]
mpi_comm.send(data_broadcast.shape, dest = i_rank, tag = 0)
mpi_comm.Send([data_broadcast,MPI.DOUBLE],dest = i_rank, tag = 1)
#print("poi_subsample sent to thread " + str(i_rank) + ": " + str(data_broadcast))
else:
data_shape = mpi_comm.recv(source = 0, tag = 0)
data = np.empty(data_shape)
mpi_comm.Recv(data,source=0, tag=1)
# Evaluate each subsamples
qoi_subsample = eval_fcn(data)
mpi_comm.Barrier()
if qoi_subsample.ndim == 2:
qoi_sample = np.zeros((poi_sample.shape[0], qoi_subsample.shape[1]), dtype = float)
else:
qoi_sample = np.zeros((poi_sample.shape[0]), dtype = float)
#print(poi_reconstructed)
if mpi_rank > 0:
mpi_comm.send(qoi_subsample.shape, dest = 0, tag = 0)
mpi_comm.Send([qoi_subsample, MPI.DOUBLE], dest = 0, tag =1)
#print("sending data from thread " + str(mpi_rank) + ": " + str(data))
elif mpi_rank ==0 :
total_samp=0
for i_rank in range(mpi_size):
if i_rank > 0:
subsample_shape = mpi_comm.recv(source = i_rank, tag = 0)
#print("receiving data from thread " + str(i_rank) + " of shape: " + str(data_shape))
else :
subsample_shape = qoi_subsample.shape
n_samp = subsample_shape[0]
if i_rank > 0:
#print("poi_reconstructed before receiving: " + str(poi_reconstructed))
mpi_comm.Recv(qoi_sample[total_samp:(total_samp+n_samp)], source = i_rank, tag=1)
else :
qoi_sample[total_samp:(total_samp+n_samp)] = qoi_subsample
if logging > 1:
print("qoi_reconstructed after receiving thread " + str(i_rank) + ": " + str(qoi_sample))
total_samp += n_samp
# Send back out qoi_sample so all threads have a return
mpi_comm.Bcast([qoi_sample, MPI.DOUBLE], root = 0)
return qoi_sample
#======================================================================================================
#-----------------------------------Sampling-----------------------------------------------------------
#======================================================================================================
##--------------------------------------GetSampDist----------------------------------------------------
def get_samp_dist(dist_type, dist_param, n_poi, fcn_inverse_cdf = np.nan):
"""Adds sampling function sample to model for drawing of low-discrepency
from given distribution type.
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
Model
model object with added sample function
"""
# Determine Sample Function- Currently only 1 distribution type can be defined for all parameters
if dist_type == 'normal': # Normal Distribution
sample_fcn = lambda n_samp_sobol: np.random.randn(n_samp_sobol, n_poi)*\
np.sqrt(dist_param[[1], :]) + dist_param[[0], :]
elif dist_type == 'saltelli normal':
sample_fcn = lambda n_samp_sobol: saltelli_normal(n_samp_sobol, dist_param)
elif dist_type == 'uniform': # uniform distribution
# doubleParms=np.concatenate(model.dist_param, model.dist_param, axis=1)
sample_fcn = lambda n_samp_sobol: np.random.rand(n_samp_sobol, n_poi)*\
(dist_param[[1], :]-dist_param[[0],:]) + dist_param[[0], :]
elif dist_type == 'saltelli uniform': # uniform distribution
# doubleParms=np.concatenate(model.dist_param, model.dist_param, axis=1)
sample_fcn = lambda n_samp_sobol: saltelli_uniform(n_samp_sobol, dist_param)
elif dist_type == 'exponential': # exponential distribution
sample_fcn = lambda n_samp_sobol: np.random.exponential(dist_param,size=(n_samp_sobol, n_poi))
elif dist_type == 'beta': # beta distribution
sample_fcn = lambda n_samp_sobol:np.random.beta(dist_param[[0],:], dist_param[[1],:],\
size=(n_samp_sobol, n_poi))
elif dist_type == 'InverseCDF': #Arbitrary distribution given by inverse cdf
if fcn_inverse_cdf == np.nan:
raise Exception("InverseCDF distribution selected but no function provided.")
sample_fcn = lambda n_samp_sobol: fcn_inverse_cdf(np.random.rand(n_samp_sobol, n_poi))
else:
raise Exception("Invalid value for model.dist_type. Supported distributions are normal, uniform, exponential, beta, \
and InverseCDF") # Raise Exception if invalide distribution is entered
return sample_fcn
def saltelli_sample(n_samp, n_poi):
"""Constructs a uniform [0,1] low discrepency saltelli sample for use in Sobol
index approximation
Parameters
----------
n_samp_sobol : int
Number of samples to take
dist_param : np.ndarray
2 x n_poi array of min and max sampling bounds for each parameter
Returns
-------
np.ndarray
Low discrepancy POI sample of uniform distribution on [0,1] constructed
using satelli's alrogrithm
"""
#Add .5 to n_samp/2 so that if n_samp is odd, an extra sample is generated
sampler = qmc.Sobol(d= n_poi*2, scramble = True)
#Use the smallest log2 sample size at least as large as n_samp to keep
# quadrature balance
# (see https://scipy.github.io/devdocs/reference/generated/scipy.stats.qmc.Sobol.html )
base_sample = sampler.random_base2(m=int(np.ceil(np.log2(n_samp/2))))
#Add .5 to n_samp/2 so that if n_samp is odd, an extra sample is generated
base_sample=base_sample[:int(n_samp/2+.5),:]
sample = np.empty((n_samp, n_poi))
#Seperate and stack half the samples in the 2nd dimension for saltelli's
# algorithm
if n_samp%2==0:
sample[:int((n_samp)/2),:]=base_sample[:,0:n_poi]
sample[int((n_samp)/2):,:]=base_sample[:,n_poi:]
else :
sample[:int((n_samp+.5)/2),:] = base_sample[:,0:n_poi]
sample[int((n_samp+.5)/2):-1,:] = base_sample[:,n_poi:]
return sample
def saltelli_uniform(n_samp, dist_param):
"""Constructs a uniform low discrepency saltelli sample for use in Sobol
index approximation
Parameters
----------
n_samp: int
Number of samples to take
dist_param : np.ndarray
2 x n_poi array of mean and variance for each parameter
Returns
-------
np.ndarray
Low discrepancy POI sample of uniform distribution constructed using
satelli's alrogrithm
"""
n_poi=dist_param.shape[1]
sample_base = saltelli_sample(n_samp,n_poi)
sample_transformed = dist_param[[0],:]+(dist_param[[1],:]-dist_param[[0],:])*sample_base
return sample_transformed
def saltelli_normal(n_samp, dist_param):
"""Constructs a normal low discrepency saltelli sample for use in Sobol
index approximation
Parameters
----------
n_samp: int
Number of samples to take
dist_param : np.ndarray
2 x n_poi array of mean and variance for each parameter
Returns
-------
np.ndarray
Low discrepancy POI sample of normal distribution constructed using
satelli's alrogrithm
"""
n_poi=dist_param.shape[1]
sample_base = saltelli_sample(n_samp,n_poi)
sample_transform=sct.norm.ppf(sample_base)*np.sqrt(dist_param[[1], :]) \
+ dist_param[[0], :]
return sample_transform
| 41.051908
| 125
| 0.597791
|
c8f01d179cf66c3677d8b84350596fab5d895f50
| 13,991
|
py
|
Python
|
conans/client/rest/rest_client_v1.py
|
chris-se/conan
|
812c8ec8185e24b2bc41fb6e855d35c925526670
|
[
"MIT"
] | null | null | null |
conans/client/rest/rest_client_v1.py
|
chris-se/conan
|
812c8ec8185e24b2bc41fb6e855d35c925526670
|
[
"MIT"
] | null | null | null |
conans/client/rest/rest_client_v1.py
|
chris-se/conan
|
812c8ec8185e24b2bc41fb6e855d35c925526670
|
[
"MIT"
] | null | null | null |
import os
import time
import traceback
from six.moves.urllib.parse import parse_qs, urljoin, urlparse, urlsplit
from conans.client.remote_manager import check_compressed_files
from conans.client.rest.client_routes import ClientV1Router
from conans.client.rest.rest_client_common import RestCommonMethods, handle_return_deserializer
from conans.client.rest.uploader_downloader import FileDownloader, FileUploader
from conans.errors import ConanException, NotFoundException, NoRestV2Available, \
PackageNotFoundException
from conans.model.info import ConanInfo
from conans.model.manifest import FileTreeManifest
from conans.paths import CONANINFO, CONAN_MANIFEST, EXPORT_SOURCES_TGZ_NAME, EXPORT_TGZ_NAME, \
PACKAGE_TGZ_NAME
from conans.util.files import decode_text
from conans.util.log import logger
def complete_url(base_url, url):
""" Ensures that an url is absolute by completing relative urls with
the remote url. urls that are already absolute are not modified.
"""
if bool(urlparse(url).netloc):
return url
return urljoin(base_url, url)
class RestV1Methods(RestCommonMethods):
@property
def router(self):
return ClientV1Router(self.remote_url.rstrip("/"), self._artifacts_properties,
self._matrix_params)
def _download_files(self, file_urls):
"""
:param: file_urls is a dict with {filename: url}
Its a generator, so it yields elements for memory performance
"""
downloader = FileDownloader(self.requester, None, self.verify_ssl, self._config)
# Take advantage of filenames ordering, so that conan_package.tgz and conan_export.tgz
# can be < conanfile, conaninfo, and sent always the last, so smaller files go first
for filename, resource_url in sorted(file_urls.items(), reverse=True):
auth, _ = self._file_server_capabilities(resource_url)
contents = downloader.download(resource_url, auth=auth)
yield os.path.normpath(filename), contents
def _file_server_capabilities(self, resource_url):
auth = None
dedup = False
urltokens = urlsplit(resource_url)
query_string = urltokens[3]
parsed_string_dict = parse_qs(query_string)
if "signature" not in parsed_string_dict and "Signature" not in parsed_string_dict:
# If monolithic server, we can use same auth, and server understand dedup
auth = self.auth
dedup = True
return auth, dedup
def get_recipe_manifest(self, ref):
"""Gets a FileTreeManifest from conans"""
# Obtain the URLs
url = self.router.recipe_manifest(ref)
urls = self._get_file_to_url_dict(url)
# Get the digest
contents = self._download_files(urls)
# Unroll generator and decode shas (plain text)
contents = {key: decode_text(value) for key, value in dict(contents).items()}
return FileTreeManifest.loads(contents[CONAN_MANIFEST])
def get_package_manifest(self, pref):
"""Gets a FileTreeManifest from a package"""
pref = pref.copy_with_revs(None, None)
# Obtain the URLs
url = self.router.package_manifest(pref)
urls = self._get_file_to_url_dict(url)
# Get the digest
contents = self._download_files(urls)
try:
# Unroll generator and decode shas (plain text)
content = dict(contents)[CONAN_MANIFEST]
return FileTreeManifest.loads(decode_text(content))
except Exception as e:
msg = "Error retrieving manifest file for package " \
"'{}' from remote ({}): '{}'".format(repr(pref), self.remote_url, e)
logger.error(msg)
logger.error(traceback.format_exc())
raise ConanException(msg)
def get_package_info(self, pref):
"""Gets a ConanInfo file from a package"""
pref = pref.copy_with_revs(None, None)
url = self.router.package_download_urls(pref)
urls = self._get_file_to_url_dict(url)
if not urls:
raise PackageNotFoundException(pref)
if CONANINFO not in urls:
raise NotFoundException("Package %s doesn't have the %s file!" % (pref,
CONANINFO))
# Get the info (in memory)
contents = self._download_files({CONANINFO: urls[CONANINFO]})
# Unroll generator and decode shas (plain text)
contents = {key: decode_text(value) for key, value in dict(contents).items()}
return ConanInfo.loads(contents[CONANINFO])
def _get_file_to_url_dict(self, url, data=None):
"""Call to url and decode the json returning a dict of {filepath: url} dict
converting the url to a complete url when needed"""
urls = self.get_json(url, data=data)
return {filepath: complete_url(self.remote_url, url) for filepath, url in urls.items()}
def _upload_recipe(self, ref, files_to_upload, retry, retry_wait):
# Get the upload urls and then upload files
url = self.router.recipe_upload_urls(ref)
file_sizes = {filename.replace("\\", "/"): os.stat(abs_path).st_size
for filename, abs_path in files_to_upload.items()}
urls = self._get_file_to_url_dict(url, data=file_sizes)
if self._matrix_params:
urls = self.router.add_matrix_params(urls)
self._upload_files(urls, files_to_upload, self._output, retry, retry_wait,
display_name=str(ref))
def _upload_package(self, pref, files_to_upload, retry, retry_wait):
# Get the upload urls and then upload files
url = self.router.package_upload_urls(pref)
file_sizes = {filename: os.stat(abs_path).st_size for filename,
abs_path in files_to_upload.items()}
logger.debug("Requesting upload urls...")
urls = self._get_file_to_url_dict(url, data=file_sizes)
if self._matrix_params:
urls = self.router.add_matrix_params(urls)
logger.debug("Requesting upload urls...Done!")
short_pref_name = "%s:%s" % (pref.ref, pref.id[0:4])
self._upload_files(urls, files_to_upload, self._output, retry, retry_wait,
display_name=short_pref_name)
def _upload_files(self, file_urls, files, output, retry, retry_wait, display_name=None):
t1 = time.time()
failed = []
uploader = FileUploader(self.requester, output, self.verify_ssl, self._config)
# conan_package.tgz and conan_export.tgz are uploaded first to avoid uploading conaninfo.txt
# or conanamanifest.txt with missing files due to a network failure
for filename, resource_url in sorted(file_urls.items()):
if output and not output.is_terminal:
msg = "Uploading: %s" % filename if not display_name else (
"Uploading %s -> %s" % (filename, display_name))
output.writeln(msg)
auth, dedup = self._file_server_capabilities(resource_url)
try:
headers = self._artifacts_properties if not self._matrix_params else {}
uploader.upload(resource_url, files[filename], auth=auth, dedup=dedup,
retry=retry, retry_wait=retry_wait,
headers=headers, display_name=display_name)
except Exception as exc:
output.error("\nError uploading file: %s, '%s'" % (filename, exc))
failed.append(filename)
if failed:
raise ConanException("Execute upload again to retry upload the failed files: %s"
% ", ".join(failed))
else:
logger.debug("UPLOAD: \nAll uploaded! Total time: %s\n" % str(time.time() - t1))
def _download_files_to_folder(self, file_urls, to_folder):
"""
:param: file_urls is a dict with {filename: abs_path}
It writes downloaded files to disk (appending to file, only keeps chunks in memory)
"""
downloader = FileDownloader(self.requester, self._output, self.verify_ssl, self._config)
ret = {}
# Take advantage of filenames ordering, so that conan_package.tgz and conan_export.tgz
# can be < conanfile, conaninfo, and sent always the last, so smaller files go first
for filename, resource_url in sorted(file_urls.items(), reverse=True):
if self._output and not self._output.is_terminal:
self._output.writeln("Downloading %s" % filename)
auth, _ = self._file_server_capabilities(resource_url)
abs_path = os.path.join(to_folder, filename)
downloader.download(resource_url, abs_path, auth=auth)
ret[filename] = abs_path
return ret
def get_recipe(self, ref, dest_folder):
urls = self._get_recipe_urls(ref)
urls.pop(EXPORT_SOURCES_TGZ_NAME, None)
check_compressed_files(EXPORT_TGZ_NAME, urls)
zipped_files = self._download_files_to_folder(urls, dest_folder)
return zipped_files
def get_recipe_sources(self, ref, dest_folder):
urls = self._get_recipe_urls(ref)
check_compressed_files(EXPORT_SOURCES_TGZ_NAME, urls)
if EXPORT_SOURCES_TGZ_NAME not in urls:
return None
urls = {EXPORT_SOURCES_TGZ_NAME: urls[EXPORT_SOURCES_TGZ_NAME]}
zipped_files = self._download_files_to_folder(urls, dest_folder)
return zipped_files
def _get_recipe_urls(self, ref):
"""Gets a dict of filename:contents from conans"""
# Get the conanfile snapshot first
url = self.router.recipe_download_urls(ref)
urls = self._get_file_to_url_dict(url)
return urls
def get_package(self, pref, dest_folder):
urls = self._get_package_urls(pref)
check_compressed_files(PACKAGE_TGZ_NAME, urls)
zipped_files = self._download_files_to_folder(urls, dest_folder)
return zipped_files
def _get_package_urls(self, pref):
"""Gets a dict of filename:contents from package"""
url = self.router.package_download_urls(pref)
urls = self._get_file_to_url_dict(url)
if not urls:
raise PackageNotFoundException(pref)
return urls
def get_recipe_path(self, ref, path):
url = self.router.recipe_download_urls(ref)
return self._get_path(url, path)
def get_package_path(self, pref, path):
"""Gets a file content or a directory list"""
url = self.router.package_download_urls(pref)
return self._get_path(url, path)
def _get_path(self, url, path):
urls = self._get_file_to_url_dict(url)
def is_dir(the_path):
if the_path == ".":
return True
for _the_file in urls:
if the_path == _the_file:
return False
elif _the_file.startswith(the_path):
return True
raise NotFoundException("The specified path doesn't exist")
if is_dir(path):
ret = []
for the_file in urls:
if path == "." or the_file.startswith(path):
tmp = the_file[len(path) - 1:].split("/", 1)[0]
if tmp not in ret:
ret.append(tmp)
return sorted(ret)
else:
downloader = FileDownloader(self.requester, None, self.verify_ssl, self._config)
auth, _ = self._file_server_capabilities(urls[path])
content = downloader.download(urls[path], auth=auth)
return decode_text(content)
def _get_snapshot(self, url):
try:
snapshot = self.get_json(url)
snapshot = {os.path.normpath(filename): the_md5
for filename, the_md5 in snapshot.items()}
except NotFoundException:
snapshot = []
return snapshot
@handle_return_deserializer()
def _remove_conanfile_files(self, ref, files):
self.check_credentials()
payload = {"files": [filename.replace("\\", "/") for filename in files]}
url = self.router.remove_recipe_files(ref)
return self._post_json(url, payload)
@handle_return_deserializer()
def remove_packages(self, ref, package_ids=None):
""" Remove any packages specified by package_ids"""
self.check_credentials()
payload = {"package_ids": package_ids}
url = self.router.remove_packages(ref)
return self._post_json(url, payload)
@handle_return_deserializer()
def remove_conanfile(self, ref):
""" Remove a recipe and packages """
self.check_credentials()
url = self.router.remove_recipe(ref)
logger.debug("REST: remove: %s" % url)
response = self.requester.delete(url, auth=self.auth, headers=self.custom_headers,
verify=self.verify_ssl)
return response
def get_recipe_revisions(self, ref):
raise NoRestV2Available("The remote doesn't support revisions")
def get_package_revisions(self, pref):
raise NoRestV2Available("The remote doesn't support revisions")
def get_latest_recipe_revision(self, ref):
raise NoRestV2Available("The remote doesn't support revisions")
def get_latest_package_revision(self, pref):
raise NoRestV2Available("The remote doesn't support revisions")
def _post_json(self, url, payload):
logger.debug("REST: post: %s" % url)
response = self.requester.post(url,
auth=self.auth,
headers=self.custom_headers,
verify=self.verify_ssl,
json=payload)
return response
| 43.996855
| 100
| 0.638839
|
31861e0bb469aba8441b4ba66af3c3dc36dd3764
| 1,499
|
py
|
Python
|
profiles_api/serializers.py
|
makimovsky/profiles-rest-api
|
488d9d709e8e821350dd57b317e7b9a4e91cfe6d
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
makimovsky/profiles-rest-api
|
488d9d709e8e821350dd57b317e7b9a4e91cfe6d
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
makimovsky/profiles-rest-api
|
488d9d709e8e821350dd57b317e7b9a4e91cfe6d
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializers a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {
'user_profile': {'read_only': True}
}
| 28.826923
| 68
| 0.620414
|
651541cacd9a3fa21ac8a451224aa34b7da28c73
| 85,073
|
py
|
Python
|
autosar/behavior.py
|
Warchant/autosar
|
beaae0205345a6d695982a670d621ace4e4c6aab
|
[
"MIT"
] | 199
|
2016-07-27T17:14:43.000Z
|
2022-03-30T12:28:02.000Z
|
autosar/behavior.py
|
Warchant/autosar
|
beaae0205345a6d695982a670d621ace4e4c6aab
|
[
"MIT"
] | 50
|
2017-10-10T08:19:21.000Z
|
2022-03-27T18:43:29.000Z
|
autosar/behavior.py
|
Warchant/autosar
|
beaae0205345a6d695982a670d621ace4e4c6aab
|
[
"MIT"
] | 125
|
2016-07-27T17:16:08.000Z
|
2022-03-30T17:03:28.000Z
|
import copy
import autosar.component
import autosar.portinterface
import autosar.base
from autosar.element import Element, DataElement
import collections
###################################### Events ###########################################
class Event(Element):
def __init__(self,name,startOnEventRef=None, parent=None):
super().__init__(name,parent)
self.startOnEventRef = startOnEventRef
self.modeDependency=None #for AUTOSAR3
self.disabledInModes=None #for AUTOSAR4
class ModeSwitchEvent(Event):
def __init__(self,name,startOnEventRef=None, activationType='ENTRY', parent=None, version=3.0):
super().__init__(name, startOnEventRef, parent)
self.modeInstRef=None
if version < 4.0:
if (activationType!='ENTRY') and (activationType != 'EXIT'):
raise ValueError('activationType argument must be either "ENTRY" or "EXIT"')
elif version >= 4.0:
if (activationType=='ENTRY'): activationType='ON-ENTRY'
if (activationType=='EXIT'): activationType='ON-EXIT'
if (activationType!='ON-ENTRY') and (activationType != 'ON-EXIT'):
raise ValueError('activationType argument must be either "ON-ENTRY" or "ON-EXIT"')
self.activationType = activationType
def tag(self,version): return 'SWC-MODE-SWITCH-EVENT' if version >= 4.0 else 'MODE-SWITCH-EVENT'
class TimingEvent(Event):
def __init__(self,name,startOnEventRef=None, period=0, parent=None):
super().__init__(name, startOnEventRef, parent)
self.period=int(period)
def tag(self, version=None):
return 'TIMING-EVENT'
class DataReceivedEvent(Event):
def __init__(self, name, startOnEventRef=None, parent=None):
super().__init__(name, startOnEventRef, parent)
self.dataInstanceRef=None
self.swDataDefsProps=[]
def tag(self, version=None):
return "DATA-RECEIVED-EVENT"
class OperationInvokedEvent(Event):
def __init__(self, name, startOnEventRef=None, parent=None):
super().__init__(name, startOnEventRef, parent)
self.operationInstanceRef=None
self.swDataDefsProps=[]
def tag(self, version=None):
return "OPERATION-INVOKED-EVENT"
class InitEvent(Event):
def __init__(self,name,startOnEventRef=None, parent=None):
super().__init__(name, startOnEventRef, parent)
def tag(self, version=None):
return 'INIT-EVENT'
class ModeSwitchAckEvent(Event):
"""
Represents <MODE-SWITCHED-ACK-EVENT> (AUTOSAR 4)
"""
def __init__(self, name, startOnEventRef=None, eventSourceRef = None, parent=None):
super().__init__(name, startOnEventRef, parent)
self.eventSourceRef = eventSourceRef
def tag(self, version=None):
return 'MODE-SWITCHED-ACK-EVENT'
####################################################################################################
class ModeDependency(object):
def __init__(self):
self.modeInstanceRefs=[]
def asdict(self):
data={'type': self.__class__.__name__,'modeInstanceRefs':[]}
for modeInstanceRef in self.modeInstanceRefs:
data['modeInstanceRefs'].append(modeInstanceRef.asdict())
if len(data['modeInstanceRefs'])==0: del data['modeInstanceRefs']
def append(self, item):
if isinstance(item, ModeInstanceRef) or isinstance(item, ModeDependencyRef):
self.modeInstanceRefs.append(item)
else:
raise ValueError('invalid type: '+str(type(item)))
class ModeInstanceRef:
"""
Implementation of MODE-IREF (AUTOSAR3)
"""
def __init__(self,modeDeclarationRef,modeDeclarationGroupPrototypeRef=None,requirePortPrototypeRef=None):
self.modeDeclarationRef=modeDeclarationRef #MODE-DECLARATION-REF
self.modeDeclarationGroupPrototypeRef=modeDeclarationGroupPrototypeRef #MODE-DECLARATION-GROUP-PROTOTYPE-REF
self.requirePortPrototypeRef=requirePortPrototypeRef #R-PORT-PROTOTYPE-REF
def asdict(self):
data={'type': self.__class__.__name__}
for key, value in self.__dict__.items():
data[key]=value
return data
def tag(self,version=None):
return 'MODE-IREF'
class ModeDependencyRef:
def __init__(self,modeDeclarationRef,modeDeclarationGroupPrototypeRef=None,requirePortPrototypeRef=None):
self.modeDeclarationRef=modeDeclarationRef #MODE-DECLARATION-REF
self.modeDeclarationGroupPrototypeRef=modeDeclarationGroupPrototypeRef #MODE-DECLARATION-GROUP-PROTOTYPE-REF
self.requirePortPrototypeRef=requirePortPrototypeRef #R-PORT-PROTOTYPE-REF
def asdict(self):
data={'type': self.__class__.__name__}
for key, value in self.__dict__.items():
data[key]=value
return data
def tag(self,version=None):
return 'DEPENDENT-ON-MODE-IREF'
class DisabledModeInstanceRef(object):
def __init__(self,modeDeclarationRef,modeDeclarationGroupPrototypeRef=None,requirePortPrototypeRef=None):
self.modeDeclarationRef=modeDeclarationRef #MODE-DECLARATION-REF
self.modeDeclarationGroupPrototypeRef=modeDeclarationGroupPrototypeRef #MODE-DECLARATION-GROUP-PROTOTYPE-REF
self.requirePortPrototypeRef=requirePortPrototypeRef #R-PORT-PROTOTYPE-REF
def asdict(self):
data={'type': self.__class__.__name__}
for key, value in self.__dict__.items():
data[key]=value
return data
def tag(self,version=None):
return 'DISABLED-MODE-IREF'
class ModeGroupInstanceRef:
"""
Base class for RequireModeGroupInstanceRef and ProvideModeGroupInstanceRef
"""
def __init__(self, modeGroupRef, parent = None):
"""
This is a very sneaky XML element. Depending on where it is used in the XML schema (XSD)
it needs to use different XML tags. We solve this by looking at the parent object.
"""
self.modeGroupRef = modeGroupRef
self.parent = parent
class RequireModeGroupInstanceRef(ModeGroupInstanceRef):
def __init__(self, requirePortRef, modeGroupRef):
super().__init__(modeGroupRef)
self.requirePortRef = requirePortRef
def tag(self, version):
if self.parent is None:
raise RuntimeError("self.parent cannot be None")
if version >= 4.0:
if isinstance(self.parent, ModeAccessPoint):
return 'R-MODE-GROUP-IN-ATOMIC-SWC-INSTANCE-REF'
else:
return 'MODE-GROUP-IREF'
else:
raise RuntimeError('Not supported in v%.1f'%version)
class ProvideModeGroupInstanceRef(ModeGroupInstanceRef):
def __init__(self, providePortRef, modeGroupRef):
super().__init__(modeGroupRef)
self.providePortRef = providePortRef
def tag(self, version):
if self.parent is None:
raise RuntimeError("self.parent cannot be None")
if version >= 4.0:
if isinstance(self.parent, ModeAccessPoint):
return 'P-MODE-GROUP-IN-ATOMIC-SWC-INSTANCE-REF'
else:
return 'MODE-GROUP-IREF'
else:
raise RuntimeError('Not supported in v%.1f'%version)
class PortAPIOption():
def __init__(self,portRef,takeAddress=False,indirectAPI=False):
self.portRef = portRef
self.takeAddress = bool(takeAddress)
self.indirectAPI = bool(indirectAPI)
def asdict(self):
data={'type': self.__class__.__name__,'takeAddress':self.takeAddress, 'indirectAPI':self.indirectAPI, 'portRef':self.portRef}
return data
def tag(self,version=None): return "PORT-API-OPTION"
class DataReceivePoint:
def __init__(self,portRef,dataElemRef=None,name=None,parent=None):
self.portRef=portRef
self.dataElemRef=dataElemRef
self.name=name
self.parent=parent
def tag(self,version): return "VARIABLE-ACCESS" if version >= 4.0 else "DATA-RECEIVE-POINT"
class DataSendPoint:
def __init__(self,portRef,dataElemRef=None,name=None,parent=None):
self.portRef=portRef
self.dataElemRef=dataElemRef
self.name=name
self.parent=parent
def tag(self,version): return "VARIABLE-ACCESS" if version >= 4.0 else "DATA-SEND-POINT"
class RunnableEntity(Element):
def __init__(self, name, invokeConcurrently=False, symbol=None, parent=None, adminData=None):
super().__init__(name,parent,adminData)
self.invokeConcurrently = invokeConcurrently
self.minStartInterval = None
if symbol is None:
self.symbol=name
else:
self.symbol = symbol
self.dataReceivePoints=[]
self.dataSendPoints=[]
self.serverCallPoints=[]
self.exclusiveAreaRefs=[]
self.modeAccessPoints=[] #AUTOSAR4 only
self.modeSwitchPoints=[] #AUTOSAR4 only
self.parameterAccessPoints = [] #AUTOSAR4 only
def tag(self,version=None):
return 'RUNNABLE-ENTITY'
def find(self, ref):
if ref is None: return None
if ref[0]=='/': ref=ref[1:] #removes initial '/' if it exists
ref=ref.partition('/')
name=ref[0]
foundElem = None
for elem in self.modeAccessPoints + self.modeSwitchPoints + self.parameterAccessPoints:
if elem.name == name:
foundElem = elem
break
if foundElem is not None:
if len(ref[2])>0:
return foundElem.find(ref[2])
else:
return foundElem
return None
def append(self,elem):
if isinstance(elem, autosar.behavior.DataReceivePoint):
dataReceivePoint=self._verifyDataReceivePoint(copy.copy(elem))
self.dataReceivePoints.append(dataReceivePoint)
dataReceivePoint.parent=self
elif isinstance(elem, autosar.behavior.DataSendPoint):
dataSendPoint=self._verifyDataSendPoint(copy.copy(elem))
self.dataSendPoints.append(dataSendPoint)
dataSendPoint.parent=self
else:
raise NotImplementedError(str(type(elem)))
def _verifyDataReceivePoint(self,dataReceivePoint):
ws=self.rootWS()
assert(ws is not None)
assert(dataReceivePoint.portRef is not None)
if isinstance(dataReceivePoint.portRef,autosar.port.Port):
dataReceivePoint.portRef=dataReceivePoint.portRef.ref
if isinstance(dataReceivePoint.portRef,str):
port=ws.find(dataReceivePoint.portRef)
if dataReceivePoint.dataElemRef is None:
#default rule: set dataElemRef to ref of first dataElement in the portinterface
portInterface=ws.find(port.portInterfaceRef)
assert(portInterface is not None)
if isinstance(portInterface,(autosar.portinterface.SenderReceiverInterface,autosar.portinterface.ParameterInterface)):
dataReceivePoint.dataElemRef=portInterface.dataElements[0].ref
else:
raise ValueError('invalid interface type:%s'%(str(type(portInterface))))
assert(isinstance(dataReceivePoint.dataElemRef,str))
dataElement = ws.find(dataReceivePoint.dataElemRef)
if dataReceivePoint.name is None:
#default rule: set the name to REC_<port.name>_<dataElement.name>
dataReceivePoint.name="REC_{0.name}_{1.name}".format(port,dataElement)
else:
raise ValueError('%s: portRef must be of type string'%self.ref)
return dataReceivePoint
def _verifyDataSendPoint(self,dataSendPoint):
ws=self.rootWS()
assert(ws is not None)
assert(dataSendPoint.portRef is not None)
if isinstance(dataSendPoint.portRef,autosar.port.Port):
dataSendPoint.portRef=dataSendPoint.portRef.ref
if isinstance(dataSendPoint.portRef,str):
port=ws.find(dataSendPoint.portRef)
if dataSendPoint.dataElemRef is None:
#default rule: set dataElemRef to ref of first dataElement in the portinterface
portInterface=ws.find(port.portInterfaceRef)
assert(portInterface is not None)
if isinstance(portInterface,(autosar.portinterface.SenderReceiverInterface,autosar.portinterface.ParameterInterface)):
dataSendPoint.dataElemRef=portInterface.dataElements[0].ref
else:
raise ValueError('invalid interface type:%s'%(str(type(portInterface))))
assert(isinstance(dataSendPoint.dataElemRef,str))
dataElement = ws.find(dataSendPoint.dataElemRef)
if dataSendPoint.name is None:
#default rule: set the name to SEND_<port.name>_<dataElement.name>
dataSendPoint.name="SEND_{0.name}_{1.name}".format(port,dataElement)
else:
raise ValueError('%s: portRef must be of type string'%self.ref)
return dataSendPoint
def rootWS(self):
if self.parent is None:
return autosar.getCurrentWS()
else:
return self.parent.rootWS()
@property
def ref(self):
if self.parent is not None:
return self.parent.ref+'/%s'%self.name
else:
return None
class DataElementInstanceRef(object):
"""
<DATA-ELEMENT-IREF>
Note: This object seems to be identical to an <DATA-IREF>
Note 2: Observe that there are multiple <DATA-ELEMENT-IREF> definitions in the AUTOSAR XSD (used for different purposes)
"""
def __init__(self,portRef,dataElemRef):
self.portRef = portRef
self.dataElemRef = dataElemRef
def asdict(self):
data={'type': self.__class__.__name__,'portRef':self.portRef, 'dataElemRef':self.dataElemRef}
return data
def tag(self, version=None):
return 'DATA-ELEMENT-IREF'
class DataInstanceRef(object):
"""
<DATA-IREF>
Note: This object seems to be identical to an <DATA-ELEMENT-IREF>
"""
def __init__(self,portRef,dataElemRef):
self.portRef = portRef
self.dataElemRef = dataElemRef
def asdict(self):
data={'type': self.__class__.__name__,'portRef':self.portRef, 'dataElemRef':self.dataElemRef}
return data
def tag(self, version=None):
return 'DATA-IREF'
class OperationInstanceRef(object):
"""
<OBJECT-IREF>
"""
def __init__(self,portRef,operationRef):
self.portRef = portRef
self.operationRef = operationRef
def asdict(self):
data={'type': self.__class__.__name__,'portRef':self.portRef, 'operationRef':self.operationRef}
return data
def tag(self, version=None):
return 'OPERATION-IREF'
class PerInstanceMemory(Element):
"""
AUTOSAR 3 <PER-INSTANCE-MEMORY>
Note: I don't know why this XML object has both <TYPE> and <TYPE-DEFINITION> where a simple TYPE-TREF should suffice.
Internally use a typeRef for PerInstanceMemory. We can transform it back to <TYPE> and <TYPE-DEFINITION> when serializing to XML
"""
def __init__(self, name, typeRef, parent=None):
super().__init__(name, parent)
self.typeRef=typeRef
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name, 'typeRef':self.typeRef}
return data
def tag(self, version = None):
return 'PER-INSTANCE-MEMORY'
class SwcNvBlockNeeds(object):
"""
AUTOSAR 3 representation of SWC-NV-BLOCK-NEEDS
"""
def __init__(self,name,numberOfDataSets,readOnly,reliability,resistantToChangedSW,
restoreAtStart,writeOnlyOnce,writingFrequency,writingPriority,
defaultBlockRef,mirrorBlockRef):
self.name=name
self.numberOfDataSets=numberOfDataSets
assert(isinstance(readOnly,bool))
self.readOnly=readOnly
self.reliability=reliability
assert(isinstance(resistantToChangedSW,bool))
self.resistantToChangedSW=resistantToChangedSW
assert(isinstance(restoreAtStart,bool))
self.restoreAtStart=restoreAtStart
assert(isinstance(writeOnlyOnce,bool))
self.writeOnlyOnce=writeOnlyOnce
self.writingFrequency=writingFrequency
self.writingPriority=writingPriority
self.defaultBlockRef=defaultBlockRef
self.mirrorBlockRef=mirrorBlockRef
self.serviceCallPorts=[]
def asdict(self):
data={'type': self.__class__.__name__,'serviceCallPorts':[]}
for key, value in self.__dict__.items():
if 'key'=='serviceCallPorts':
pass
else:
data[key]=value
if len(data['serviceCallPorts'])==0: del data['serviceCallPorts']
return data
def tag(self, version=None):
return 'SWC-NV-BLOCK-NEEDS'
class NvmBlockConfig:
"""
Represents NVM block config, used inside an NvmBlockNeeds object.
All options by default is set to None which means "default configuration".
In practice a None value means that no XML will be generated for that option.
Option List:
- numberOfDataSets: None or int
- numberOfRomBlocks: None or int
- ramBlockStatusControl: None or str ('NV-RAM-MANAGER', 'API')
- reliability: None or str('NO-PROTECTION', 'ERROR-DETECTION', 'ERROR-CORRECTION')
- writingPriority: None or str ('LOW', 'MEDIUM', 'HIGH')
- writingFrequency: None or int
- calcRamBlockCrc: None or bool
- checkStaticBlockId: None or bool
- readOnly: None or bool
- resistantToChangedSw: None or bool
- restoreAtStartup: None or bool
- storeAtShutdown: None or bool
- writeVerification: None or bool
- writeOnlyOnce: None or bool
- autoValidationAtShutdown: None or bool
- useCrcCompMechanism: None or bool
- storeEmergency: None or bool
- storeImmediate: None or bool
- storeCyclic: None or bool
- cyclicWritePeriod: None or float
"""
def __init__(self, numberOfDataSets = None,
numberOfRomBlocks = None,
ramBlockStatusControl = None,
reliability = None,
writingPriority = None,
writingFrequency = None,
calcRamBlockCrc = None,
checkStaticBlockId = None,
readOnly = None,
resistantToChangedSw = None,
restoreAtStartup = None,
storeAtShutdown = None,
writeVerification = None,
writeOnlyOnce = None,
autoValidationAtShutdown = None,
useCrcCompMechanism = None,
storeEmergency = None,
storeImmediate = None,
storeCyclic = None,
cyclicWritePeriod = None,
check_input = True):
self.numberOfDataSets = numberOfDataSets
self.numberOfRomBlocks = numberOfRomBlocks
self.ramBlockStatusControl = ramBlockStatusControl
self.reliability = reliability
self.writingPriority = writingPriority
self.writingFrequency = writingFrequency
self.calcRamBlockCrc = calcRamBlockCrc
self.checkStaticBlockId = checkStaticBlockId
self.readOnly = readOnly
self.resistantToChangedSw = resistantToChangedSw
self.restoreAtStartup = restoreAtStartup
self.storeAtShutdown = storeAtShutdown
self.writeVerification = writeVerification
self.writeOnlyOnce = writeOnlyOnce
self.autoValidationAtShutdown = autoValidationAtShutdown
self.useCrcCompMechanism = useCrcCompMechanism
self.storeEmergency = storeEmergency
self.storeImmediate = storeImmediate
self.storeCyclic = storeCyclic
self.cyclicWritePeriod = cyclicWritePeriod
if isinstance(self.cyclicWritePeriod, int):
self.cyclicWritePeriod = float(self.cyclicWritePeriod)
if check_input:
self.check()
def check(self):
if not (self.numberOfDataSets is None or isinstance(self.numberOfDataSets, int) ):
raise ValueError('numberOfDataSets is incorrectly formatted (None or int expected)')
if not (self.numberOfRomBlocks is None or isinstance(self.numberOfRomBlocks, int) ):
raise ValueError('numberOfRomBlocks is incorrectly formatted (None or int expected)')
if not (self.ramBlockStatusControl is None or isinstance(self.ramBlockStatusControl, str) ):
raise ValueError('ramBlockStatusControl is incorrectly formatted (None or str expected)')
if not (self.reliability is None or isinstance(self.reliability, str) ):
raise ValueError('reliability is incorrectly formatted (None or str expected)')
if not (self.writingPriority is None or isinstance(self.writingPriority, str) ):
raise ValueError('writingPriority is incorrectly formatted (None or str expected)')
if not (self.writingFrequency is None or isinstance(self.writingFrequency, int) ):
raise ValueError('writingFrequency is incorrectly formatted (None or int expected)')
if not (self.calcRamBlockCrc is None or isinstance(self.calcRamBlockCrc, bool) ):
raise ValueError('calcRamBlockCrc is incorrectly formatted (None or bool expected)')
if not (self.checkStaticBlockId is None or isinstance(self.checkStaticBlockId, bool) ):
raise ValueError('checkStaticBlockId is incorrectly formatted (None or bool expected)')
if not (self.readOnly is None or isinstance(self.readOnly, bool) ):
raise ValueError('readOnly is incorrectly formatted (None or bool expected)')
if not (self.resistantToChangedSw is None or isinstance(self.resistantToChangedSw, bool) ):
raise ValueError('resistantToChangedSw is incorrectly formatted (None or bool expected)')
if not (self.restoreAtStartup is None or isinstance(self.restoreAtStartup, bool) ):
raise ValueError('restoreAtStartup is incorrectly formatted (None or bool expected)')
if not (self.storeAtShutdown is None or isinstance(self.storeAtShutdown, bool) ):
raise ValueError('storeAtShutdown is incorrectly formatted (None or bool expected)')
if not (self.writeVerification is None or isinstance(self.writeVerification, bool) ):
raise ValueError('writeVerification is incorrectly formatted (None or bool expected)')
if not (self.writeOnlyOnce is None or isinstance(self.writeOnlyOnce, bool) ):
raise ValueError('writeOnlyOnce is incorrectly formatted (None or bool expected)')
if not (self.autoValidationAtShutdown is None or isinstance(self.autoValidationAtShutdown, bool) ):
raise ValueError('autoValidationAtShutdown is incorrectly formatted (None or bool expected)')
if not (self.useCrcCompMechanism is None or isinstance(self.useCrcCompMechanism, bool) ):
raise ValueError('useCrcCompMechanism is incorrectly formatted (None or bool expected)')
if not (self.storeEmergency is None or isinstance(self.storeEmergency, bool) ):
raise ValueError('storeEmergency is incorrectly formatted (None or bool expected)')
if not (self.storeImmediate is None or isinstance(self.storeImmediate, bool) ):
raise ValueError('storeImmediate is incorrectly formatted (None or bool expected)')
if not (self.storeCyclic is None or isinstance(self.storeCyclic, bool) ):
raise ValueError('storeCyclic is incorrectly formatted (None or bool expected)')
if not (self.cyclicWritePeriod is None or isinstance(self.cyclicWritePeriod, float) ):
raise ValueError('cyclicWritePeriod is incorrectly formatted (None or float expected)')
class NvmBlockNeeds(Element):
"""
AUTOSAR 4 representation of NV-BLOCK-NEEDS
second argument to the init function should be an instance of (a previously configured) NvmBlockConfig
"""
def __init__(self, name, blockConfig = None, parent=None, adminData=None):
super().__init__(name, parent, adminData)
assert(blockConfig is None or isinstance(blockConfig, NvmBlockConfig))
if blockConfig is None:
blockConfig = NvmBlockConfig() #create a default configuration
self.cfg = blockConfig
def tag(self, version): return 'NV-BLOCK-NEEDS'
class RoleBasedRPortAssignment(object):
def __init__(self,portRef,role):
self.portRef=portRef
self.role=role
def asdict(self):
data={'type': self.__class__.__name__}
for key, value in self.__dict__.items():
data[key]=value
return data
def tag(self, version=None):
return 'ROLE-BASED-R-PORT-ASSIGNMENT'
class CalPrmElemPrototype(Element):
"""
<CALPRM-ELEMENT-PROTOTYPE>
"""
def __init__(self,name, typeRef, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.typeRef=typeRef
self.swDataDefsProps=[]
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name,'typeRef':self.typeRef,'swDataDefsProps':[]}
if self.adminData is not None:
data['adminData']=self.adminData.asdict()
for elem in self.swDataDefsProps:
data['swDataDefsProps'].append(elem)
return data
def tag(self, version=None):
return 'CALPRM-ELEMENT-PROTOTYPE'
class ExclusiveArea(Element):
def __init__(self, name, parent=None, adminData=None):
super().__init__(name,parent,adminData)
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name}
return data
def tag(self, version=None):
return 'EXCLUSIVE-AREA'
class SyncServerCallPoint(object):
"""
<SYNCHRONOUS-SERVER-CALL-POINT>
"""
def __init__(self, name, timeout=0.0):
self.name=name
self.timeout=timeout
self.operationInstanceRefs=[]
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name,'timeout':self.timeout}
data['operationInstanceRefs'] = [x.asdict() for x in self.operationInstanceRefs]
if len(data['operationInstanceRefs'])==0: del data['operationInstanceRefs']
return data
class InternalBehaviorCommon(Element):
"""
Base class for InternalBehavior (AUTOSAR 3) and SwcInternalBehavior (AUTOSAR 4)
"""
def __init__(self, name, componentRef, multipleInstance=False, parent=None, adminData=None):
super().__init__(name, parent, adminData)
if not isinstance(componentRef,str): #this is a helper, in case the user called the function with obj instead of obj.ref
if hasattr(componentRef,'ref'):
componentRef=componentRef.ref
if (componentRef is None) or (not isinstance(componentRef,str)):
raise ValueError('componentRef: invalid reference')
self.componentRef=str(componentRef)
self.multipleInstance = bool(multipleInstance)
self.events = []
self.portAPIOptions = []
self.autoCreatePortAPIOptions = False
self.runnables = []
self.exclusiveAreas=[]
self.perInstanceMemories = []
self.swc = None
def createPortAPIOptionDefaults(self):
self.portAPIOptions = []
self._initSWC()
ws = self.rootWS()
tmp = self.swc.providePorts+self.swc.requirePorts
for port in sorted(tmp,key=lambda x: x.name.lower()):
self.portAPIOptions.append(PortAPIOption(port.ref))
def _initSWC(self):
"""
sets up self.swc if not already setup
"""
if self.swc is None:
ws = self.rootWS()
assert(ws is not None)
self.swc = ws.find(self.componentRef)
assert(self.swc is not None)
def find(self,ref):
if ref is None: return None
if ref[0]=='/': ref=ref[1:] #removes initial '/' if it exists
ref=ref.partition('/')
name=ref[0]
foundElem = None
for elem in self.runnables + self.perInstanceMemories + self.exclusiveAreas + self.events:
if elem.name == name:
foundElem = elem
break
if foundElem is not None:
if len(ref[2])>0:
return foundElem.find(ref[2])
else:
return foundElem
return None
def createRunnable(self, name, portAccess=None, symbol=None, concurrent=False, exclusiveAreas=None, modeSwitchPoint = None, minStartInterval = 0, adminData=None):
"""
Creates a new runnable and appends it to this InternalBehavior instance
Parameters:
* name: <SHORT-NAME> (str)
* portAccess: List of strings containing port names or "port-name/element" where element can be data-element or an operation (list(str))
* symbol: Optional symbol name (str). Default is to use self.name string
* concurrent: Enable/Disable if this runnable can run concurrently (bool).
* exclusiveAreas: List of strings containing which exclusive areas this runnable will access.
Note: For mode ports you will at best get read access. If you want to set new modes use modeSwitchPoints.
* modeSwitchPoint: List of strings containing port names that this runnable will explicitly use for setting modes.
* minStartInterval: Specifies the time in milliseconds by which two consecutive starts of an ExecutableEntity are guaranteed to be separated.
* adminData: Optional adminData
"""
runnable = RunnableEntity(name, concurrent, symbol, self, adminData)
runnable.minStartInterval = minStartInterval
self.runnables.append(runnable)
self._initSWC()
ws = self.rootWS()
if portAccess is not None:
if isinstance(portAccess, str):
portAccess = [portAccess]
assert (ws is not None)
for elem in portAccess:
ref = elem.partition('/')
if len(ref[1])==0:
#this section is for portAccess where only the port name is mentioned.
#This method only works if the port interface has only 1 data element,
# i.e. no ambiguity as to what data element is meant
port = self.swc.find(ref[0])
if port is None:
raise ValueError('invalid port reference: '+str(elem))
portInterface = ws.find(port.portInterfaceRef, role='PortInterface')
if portInterface is None:
raise ValueError('invalid portinterface reference: '+str(port.portInterfaceRef))
if isinstance(portInterface, (autosar.portinterface.SenderReceiverInterface, autosar.portinterface.NvDataInterface)):
if len(portInterface.dataElements)==0:
continue
elif len(portInterface.dataElements)==1:
dataElem=portInterface.dataElements[0]
self._createSendReceivePoint(port,dataElem,runnable)
else:
raise NotImplementedError('port interfaces with multiple data elements not supported')
elif isinstance(portInterface, autosar.portinterface.ModeSwitchInterface):
modeGroup = portInterface.modeGroup
self._createModeAccessPoint(port, modeGroup, runnable)
else:
raise NotImplementedError(type(portInterface))
else:
#this section is for portAccess where both port name and dataelement is represented as "portName/dataElementName"
port = self.swc.find(ref[0])
if port is None:
raise ValueError('invalid port reference: '+str(elem))
portInterface = ws.find(port.portInterfaceRef)
if portInterface is None:
raise ValueError('invalid portinterface reference: '+str(port.portInterfaceRef))
if isinstance(portInterface, (autosar.portinterface.SenderReceiverInterface, autosar.portinterface.NvDataInterface)):
dataElem=portInterface.find(ref[2])
if dataElem is None:
raise ValueError('invalid data element reference: '+str(elem))
self._createSendReceivePoint(port,dataElem,runnable)
elif isinstance(portInterface, autosar.portinterface.ClientServerInterface):
operation=portInterface.find(ref[2])
if operation is None:
raise ValueError('invalid operation reference: '+str(elem))
self._createSyncServerCallPoint(port,operation,runnable)
else:
raise NotImplementedError(type(portInterface))
if exclusiveAreas is not None:
if isinstance(exclusiveAreas, str):
exclusiveAreas =[exclusiveAreas]
if isinstance(exclusiveAreas, collections.Iterable):
for exclusiveAreaName in exclusiveAreas:
found = False
for exclusiveArea in self.exclusiveAreas:
if exclusiveArea.name == exclusiveAreaName:
found = True
runnable.exclusiveAreaRefs.append(exclusiveArea.ref)
break
if not found:
raise ValueError('invalid exclusive area name: '+exclusiveAreaName)
else:
raise ValueError('exclusiveAreas must be either string or list')
if modeSwitchPoint is not None:
if isinstance(modeSwitchPoint, str):
modeSwitchPoint = [modeSwitchPoint]
assert (ws is not None)
for portName in modeSwitchPoint:
port = self.swc.find(portName)
if port is None:
raise ValueError('invalid port reference: '+str(portName))
portInterface = ws.find(port.portInterfaceRef, role='PortInterface')
if portInterface is None:
raise ValueError('invalid portinterface reference: '+str(port.portInterfaceRef))
if isinstance(portInterface, autosar.portinterface.ModeSwitchInterface):
modeGroup = portInterface.modeGroup
self._createModeSwitchPoint(port, modeGroup, runnable)
else:
raise NotImplementedError(str(type(portInterface)))
return runnable
def _createSendReceivePoint(self,port,dataElement,runnable):
"""
internal function that create a DataReceivePoint of the the port is a require port or
a DataSendPoint if the port is a provide port
"""
if isinstance(port,autosar.port.RequirePort):
receivePoint=DataReceivePoint(port.ref,dataElement.ref,'REC_{0.name}_{1.name}'.format(port,dataElement),runnable)
runnable.dataReceivePoints.append(receivePoint)
elif isinstance(port,autosar.port.ProvidePort):
sendPoint=DataSendPoint(port.ref,dataElement.ref,'SEND_{0.name}_{1.name}'.format(port,dataElement),runnable)
runnable.dataSendPoints.append(sendPoint)
else:
raise ValueError('unexpected type: '+str(type(port)))
def _createSyncServerCallPoint(self,port,operation,runnable):
"""
internal function that create a SyncServerCallPoint of the the port is a require port or
a DataSendPoint if the port is a provide port
"""
if isinstance(port,autosar.port.RequirePort):
callPoint=SyncServerCallPoint('SC_{0.name}_{1.name}'.format(port,operation))
callPoint.operationInstanceRefs.append(OperationInstanceRef(port.ref, operation.ref))
runnable.serverCallPoints.append(callPoint)
else:
raise ValueError('unexpected type: '+str(type(port)))
def _calcModeInstanceComponentsForRequirePort(self, portName, modeValue):
self._initSWC()
ws = self.rootWS()
port = self.swc.find(portName)
if port is None:
raise ValueError('%s: Invalid port name: %s'%(self.swc.name, portName))
if not isinstance(port, autosar.port.RequirePort):
raise ValueError('%s: port must be a require-port: %s'%(self.swc.name, portName))
portInterface = ws.find(port.portInterfaceRef, role='PortInterface')
if (portInterface is None):
raise ValueError('invalid port interface reference: '+port.portInterfaceRef)
if isinstance(portInterface, autosar.portinterface.SenderReceiverInterface):
if (portInterface.modeGroups is None) or (len(portInterface.modeGroups)==0):
raise ValueError('port interface %s has no valid mode groups'%portInterface.name)
if len(portInterface.modeGroups)>1:
raise NotImplementedError('port interfaces with only one mode group is currently supported')
modeGroup = portInterface.modeGroups[0]
elif isinstance(portInterface, autosar.portinterface.ModeSwitchInterface):
modeGroup = portInterface.modeGroup
else:
raise NotImplementedError(type(portInterface))
assert(modeGroup is not None)
dataType = ws.find(modeGroup.typeRef)
if (dataType is None):
raise ValueError('%s has invalid typeRef: %s'%(modeGroup.name, modeGroup.typeRef))
assert(isinstance(dataType,autosar.mode.ModeDeclarationGroup))
modeDeclarationRef = None
modeDeclarationGroupRef = modeGroup.ref
for modeDeclaration in dataType.modeDeclarations:
if modeDeclaration.name == modeValue:
modeDeclarationRef = modeDeclaration.ref
return (modeDeclarationRef,modeDeclarationGroupRef,port.ref)
raise ValueError('"%s" did not match any of the mode declarations in %s'%(modeValue,dataType.ref))
def _createModeAccessPoint(self, port, modeGroup, runnable):
if isinstance(port, autosar.port.ProvidePort):
modeGroupInstanceRef = ProvideModeGroupInstanceRef(port.ref, modeGroup.ref)
else:
assert isinstance(port, autosar.port.RequirePort)
modeGroupInstanceRef = RequireModeGroupInstanceRef(port.ref, modeGroup.ref)
name = None #TODO: support user-controlled name?
modeAccessPoint = ModeAccessPoint(name, modeGroupInstanceRef)
runnable.modeAccessPoints.append(modeAccessPoint)
def _createModeSwitchPoint(self, port, modeGroup, runnable):
if isinstance(port, autosar.port.ProvidePort):
modeGroupInstanceRef = ProvideModeGroupInstanceRef(port.ref, modeGroup.ref)
else:
assert isinstance(port, autosar.port.RequirePort)
modeGroupInstanceRef = RequireModeGroupInstanceRef(port.ref, modeGroup.ref)
baseName='SWITCH_{0.name}_{1.name}'.format(port, modeGroup)
name = autosar.base.findUniqueNameInList(runnable.modeSwitchPoints, baseName)
modeSwitchPoint = ModeSwitchPoint(name, modeGroupInstanceRef, runnable)
runnable.modeSwitchPoints.append(modeSwitchPoint)
def createModeSwitchEvent(self, runnableName, modeRef, activationType='ENTRY', name=None):
self._initSWC()
ws = self.rootWS()
runnable=self.find(runnableName)
if runnable is None:
raise ValueError('invalid runnable name: '+runnableName)
assert(isinstance(runnable, autosar.behavior.RunnableEntity))
eventName=name
if eventName is None:
baseName = "MST_"+runnable.name
eventName = self._findEventName(baseName)
result = modeRef.partition('/')
if result[1]!='/':
raise ValueError('invalid modeRef, expected "portName/modeValue", got "%s"'%modeRef)
portName = result[0]
modeValue = result[2]
event = autosar.behavior.ModeSwitchEvent(eventName,runnable.ref, activationType, version=ws.version)
(modeDeclarationRef,modeDeclarationGroupRef,portRef) = self._calcModeInstanceComponentsForRequirePort(portName,modeValue)
event.modeInstRef = ModeInstanceRef(modeDeclarationRef, modeDeclarationGroupRef, portRef)
assert(isinstance(event.modeInstRef, autosar.behavior.ModeInstanceRef))
self.events.append(event)
return event
def createTimerEvent(self, runnableName, period, modeDependency=None, name=None ):
self._initSWC()
ws = self.rootWS()
runnable=self.find(runnableName)
if runnable is None:
raise ValueError('invalid runnable name: '+runnableName)
assert(isinstance(runnable, autosar.behavior.RunnableEntity))
eventName=name
if eventName is None:
#try to find a suitable name for the event
baseName = "TMT_"+runnable.name
eventName = self._findEventName(baseName)
event = autosar.behavior.TimingEvent(eventName,runnable.ref,period,self)
if modeDependency is not None:
self._processModeDependency(event, modeDependency, ws.version)
self.events.append(event)
return event
def createTimingEvent(self, runnableName, period, modeDependency=None, name=None):
"""
alias for createTimerEvent
"""
return self.createTimerEvent(runnableName, period, modeDependency, name)
def createOperationInvokedEvent(self, runnableName, operationRef, modeDependency=None, name=None ):
"""
creates a new OperationInvokedEvent
runnableName: name of the runnable to call (runnable must already exist)
operationRef: string using the format 'portName/operationName'
name: optional event name, used to override only
"""
self._initSWC()
ws = self.rootWS()
runnable=self.find(runnableName)
if runnable is None:
raise ValueError('invalid runnable name: '+runnableName)
assert(isinstance(runnable, autosar.behavior.RunnableEntity))
if not isinstance(operationRef, str):
raise ValueError("expected operationRef to be string of the format 'portName/operationName' ")
parts = autosar.base.splitRef(operationRef)
if len(parts)!=2:
raise ValueError("expected operationRef to be string of the format 'portName/operationName' ")
portName,operationName=parts[0],parts[1]
eventName=name
port = self.swc.find(portName)
if (port is None) or not isinstance(port, autosar.port.Port):
raise ValueError('invalid port name: '+portName)
portInterface = ws.find(port.portInterfaceRef)
if portInterface is None:
raise ValueError('invalid reference: '+port.portInterface)
if not isinstance(portInterface, autosar.portinterface.ClientServerInterface):
raise ValueError('The referenced port "%s" does not have a ClientServerInterface'%(port.name))
operation = portInterface.find(operationName)
if (operation is None) or not isinstance(operation, autosar.portinterface.Operation):
raise ValueError('invalid operation name: '+operationName)
if eventName is None:
eventName=self._findEventName('OIT_%s_%s_%s'%(runnable.name, port.name, operation.name))
event = OperationInvokedEvent(eventName, runnable.ref, self)
event.operationInstanceRef=OperationInstanceRef(port.ref, operation.ref)
if modeDependency is not None:
self._processModeDependency(event, modeDependency, ws.version)
self.events.append(event)
return event
def createDataReceivedEvent(self, runnableName, dataElementRef, modeDependency=None, name=None ):
"""
creates a new DataReceivedEvent
runnableName: name of the runnable to call (runnable must already exist)
dataElementRef: string using the format 'portName/dataElementName'. Using 'portName' only is also OK as long as the interface only has one element
name: optional event name, used to override only
"""
self._initSWC()
ws = self.rootWS()
runnable=self.find(runnableName)
if runnable is None:
raise autosar.base.InvalidRunnableRef(runnableName)
assert(isinstance(runnable, autosar.behavior.RunnableEntity))
if not isinstance(dataElementRef, str):
raise autosar.base.InvalidDataElementRef("expected dataElementRef to be string of the format 'portName' or 'portName/dataElementName' ")
parts = autosar.base.splitRef(dataElementRef)
if len(parts)==2:
portName, dataElementName = parts[0], parts[1]
elif len(parts)==1:
portName, dataElementName = parts[0], None
else:
raise autosar.base.InvalidDataElementRef("expected dataElementRef to be string of the format 'portName' or 'portName/dataElementName' ")
eventName=name
port = self.swc.find(portName)
if (port is None) or not isinstance(port, autosar.port.Port):
raise autosar.base.InvalidPortRef(portName)
portInterface = ws.find(port.portInterfaceRef)
if portInterface is None:
raise autosar.base.InvalidPortInterfaceRef('invalid reference: {}'.format(port.portInterface))
if isinstance(portInterface, autosar.portinterface.SenderReceiverInterface):
if dataElementName is None:
if len(portInterface.dataElements) == 1:
dataElement = portInterface.dataElements[0]
elif len(portInterface.dataElements) > 1:
raise autosar.base.InvalidDataElementRef("expected dataElementRef to be string of the format 'portName/dataElementName' ")
else:
raise autosar.base.InvalidDataElementRef('portInterface "{}" has no data elements'.format(portInterface.name))
else:
dataElement = portInterface.find(dataElementName)
if not isinstance(dataElement, autosar.element.DataElement):
raise autosar.base.InvalidDataElementRef(dataElementName)
elif dataElement is None:
raise autosar.base.InvalidDataElementRef('portInterface "{}" has no operation {}'.format(portInterface.name, dataElementName))
elif isinstance(portInterface, autosar.portinterface.NvDataInterface):
if dataElementName is None:
if len(portInterface.nvDatas) == 1:
dataElement = portInterface.nvDatas[0]
elif len(portInterface.nvDatas) > 1:
raise autosar.base.InvalidDataElementRef("expected dataElementRef to be string of the format 'portName/dataElementName' ")
else:
raise autosar.base.InvalidDataElementRef('portInterface "{}" has no nvdata elements'.format(portInterface.name))
else:
dataElement = portInterface.find(dataElementName)
if not isinstance(dataElement, autosar.element.DataElement):
raise autosar.base.InvalidDataElementRef(dataElementName)
elif dataElement is None:
raise autosar.base.InvalidDataElementRef('portInterface "{}" has no nvdata {}'.format(portInterface.name, dataElementName))
else:
raise autosar.base.InvalidPortRef('The referenced port "{}" does not have a SenderReceiverInterface or NvDataInterface'.format(port.name))
if eventName is None:
eventName=self._findEventName('DRT_{}_{}_{}'.format(runnable.name, port.name, dataElement.name))
event = DataReceivedEvent(eventName, runnable.ref, self)
event.dataInstanceRef = DataInstanceRef(port.ref, dataElement.ref)
if modeDependency is not None:
self._processModeDependency(event, modeDependency, ws.version)
self.events.append(event)
return event
def _findEventName(self, baseName):
return autosar.base.findUniqueNameInList(self.events, baseName)
def _processModeDependency(self, event, modeDependencyList, version):
for dependency in list(modeDependencyList):
result = dependency.partition('/')
if result[1]=='/':
portName=result[0]
modeValue=result[2]
(modeDeclarationRef,modeDeclarationGroupPrototypeRef,portRef) = self._calcModeInstanceComponentsForRequirePort(portName,modeValue)
else:
raise ValueError('invalid modeRef, expected "portName/modeValue", got "%s"'%dependency)
if version >= 4.0:
if event.disabledInModes is None:
event.disabledInModes = []
event.disabledInModes.append(DisabledModeInstanceRef(modeDeclarationRef, modeDeclarationGroupPrototypeRef, portRef))
else:
if event.modeDependency is None:
event.modeDependency = ModeDependency()
event.modeDependency.append(ModeDependencyRef(modeDeclarationRef, modeDeclarationGroupPrototypeRef, portRef))
def createExclusiveArea(self, name):
"""
creates a new ExclusiveArea
"""
self._initSWC()
ws = self.rootWS()
exclusiveArea = ExclusiveArea(str(name), self)
self.exclusiveAreas.append(exclusiveArea)
return exclusiveArea
class InternalBehavior(InternalBehaviorCommon):
""" InternalBehavior class (AUTOSAR 3)"""
def __init__(self,name, componentRef, multipleInstance=False,parent=None):
super().__init__(name, componentRef,multipleInstance, parent)
self.swcNvBlockNeeds = []
self.sharedCalParams=[]
def tag(self, version): return 'INTERNAL-BEHAVIOR'
def append(self,elem):
if isinstance(elem,RunnableEntity):
self.runnables.append(elem)
elem.parent=self
else:
raise NotImplementedError(str(type(elem)))
def find(self, ref):
if ref is None: return None
result = super().find(ref)
if result is None:
if ref[0]=='/': ref=ref[1:] #removes initial '/' if it exists
ref=ref.partition('/')
name=ref[0]
for elem in self.sharedCalParams:
if elem.name == name: return elem
else:
return result
return None
def __getitem__(self,key):
return self.find(key)
def createPerInstanceMemory(self, name, typeRef):
"""
creates a new PerInstanceMemory object
name: name of the object (str)
typeRef: dataType reference (str)
"""
self._initSWC()
ws = self.rootWS()
dataType = ws.find(typeRef, role='DataType')
if dataType is None:
raise ValueError('invalid reference: '+typeRef)
perInstanceMemory = PerInstanceMemory(name, dataType.ref, self)
self.perInstanceMemories.append(perInstanceMemory)
return perInstanceMemory
def createSharedCalParam(self, name, typeRef, SwAddrMethodRef, adminData=None):
self._initSWC()
ws = self.rootWS()
dataType = ws.find(typeRef, role='DataType')
if dataType is None:
raise ValueError('invalid reference: '+typeRef)
elem = CalPrmElemPrototype(name, dataType.ref, self, adminData)
elem.swDataDefsProps.append(SwAddrMethodRef)
self.sharedCalParams.append(elem)
return elem
def createNvmBlock(self, name, blockParams):
"""
creates a new SwcNvBlockNeeds object
name: name of the object (str)
blockParams: dict containing additional parameters
"""
self._initSWC()
ws = self.rootWS()
numberOfDataSets= int(blockParams['numberOfDataSets'])
readOnly= bool(blockParams['readOnly'])
reliability= str(blockParams['reliability'])
resistantToChangedSW= bool(blockParams['resistantToChangedSW'])
restoreAtStart= bool(blockParams['restoreAtStart'])
writeOnlyOnce= bool(blockParams['writeOnlyOnce'])
writingFrequency= str(blockParams['writingFrequency'])
writingPriority= str(blockParams['writingPriority'])
defaultBlockRef=None
mirrorBlockRef=None
#defaultBlockRef
defaultBlock = blockParams['defaultBlock']
if '/' in defaultBlock:
defaultBlockRef = defaultBlock #use as is
else:
for sharedCalParam in self.sharedCalParams:
if sharedCalParam.name == defaultBlock:
defaultBlockRef=sharedCalParam.ref
break
if defaultBlockRef is None:
raise ValueError('no SharedCalParam found with name: ' +defaultBlock)
#mirrorBlockref
mirrorBlock = blockParams['mirrorBlock']
if '/' in mirrorBlock:
mirrorBlockRef = mirrorBlock #use as is
else:
for perInstanceMemory in self.perInstanceMemories:
if perInstanceMemory.name == mirrorBlock:
mirrorBlockRef=perInstanceMemory.ref
break
if mirrorBlockRef is None:
raise ValueError('no PerInstanceMemory found with name: ' +mirrorBlock)
elem = SwcNvBlockNeeds(name, numberOfDataSets, readOnly, reliability, resistantToChangedSW, restoreAtStart,
writeOnlyOnce, writingFrequency, writingPriority, defaultBlockRef, mirrorBlockRef)
#serviceCallPorts
if isinstance(blockParams['serviceCallPorts'],str):
serviceCallPorts=[blockParams['serviceCallPorts']]
else:
serviceCallPorts = blockParams['serviceCallPorts']
if isinstance(serviceCallPorts, collections.Iterable):
for data in serviceCallPorts:
parts = autosar.base.splitRef(data)
if len(parts)!=2:
raise ValueError('serviceCallPorts must be either string or list of string of the format "portName/operationName"')
portName,operationName = parts[0],parts[1]
port = self.swc.find(portName)
if not isinstance(port, autosar.port.Port):
raise ValueError("'%s' is not a valid port name"%portName)
elem.serviceCallPorts.append(RoleBasedRPortAssignment(port.ref,operationName))
else:
raise ValueError('serviceCallPorts must be either string or list of string of format the "portName/operationName"')
self.swcNvBlockNeeds.append(elem)
return elem
class SwcInternalBehavior(InternalBehaviorCommon):
"""
AUTOSAR 4 Internal Behavior
"""
def __init__(self,name, componentRef, multipleInstance=False,parent=None):
super().__init__(name, componentRef, multipleInstance, parent)
self.serviceDependencies = [] #list of SwcServiceDependency objects
self.parameterDataPrototype = [] #list of ParameterDataPrototye objects
self.dataTypeMappingRefs = [] #list of strings
def tag(self, version): return "SWC-INTERNAL-BEHAVIOR"
def find(self, ref):
if ref is None: return None
result = super().find(ref)
if result is None:
if ref[0]=='/': ref=ref[1:] #removes initial '/' if it exists
ref=ref.partition('/')
name=ref[0]
foundElem = None
for elem in self.parameterDataPrototype:
if elem.name == name:
foundElem = elem
break
if foundElem is not None:
if len(ref[2])>0:
return foundElem.find(ref[2])
else:
return foundElem
else:
return result
def createPerInstanceMemory(self, name, implementationTypeRef, swAddressMethodRef = None, swCalibrationAccess = None):
"""
AUTOSAR4: Creates a DataElement object and appends to to the internal perInstanceMemories list
name: name of the object (str)
implementationTypeRef: dataType reference (str)
swAddressMethodRef: Software address method reference (str)
swCalibrationAccess: software calibration access (str)
"""
self._initSWC()
ws = self.rootWS()
dataType = ws.find(implementationTypeRef, role='DataType')
if dataType is None:
raise ValueError('invalid reference: '+implementationTypeRef)
dataElement = DataElement(name, dataType.ref, swAddressMethodRef = swAddressMethodRef, swCalibrationAccess=swCalibrationAccess, parent=self)
self.perInstanceMemories.append(dataElement)
return dataElement
def createSharedDataParameter(self, name, implementationTypeRef, swAddressMethodRef = None, swCalibrationAccess = None, initValue = None):
"""
AUTOSAR4: Creates a ParameterDataPrototype object and appends it to the internal parameterDataPrototype list
"""
self._initSWC()
ws = self.rootWS()
dataType = ws.find(implementationTypeRef, role='DataType')
if dataType is None:
raise ValueError('invalid reference: '+implementationTypeRef)
parameter = autosar.element.ParameterDataPrototype(name, dataType.ref, swAddressMethodRef = swAddressMethodRef, swCalibrationAccess=swCalibrationAccess, initValue=initValue, parent=self)
self.parameterDataPrototype.append(parameter)
return parameter
def createNvmBlock(self, name, portName, perInstanceMemoryName, nvmBlockConfig = None, defaultValueName = None, perInstanceMemoryRole='ramBlock', defaultValueRole = 'defaultValue', blockAdminData = None):
"""
AUTOSAR 4: Creates a ServiceNeeds object and appends it to the internal serviceDependencies list
This assumes the service needed is related to NVM
"""
self._initSWC()
ws = self.rootWS()
if nvmBlockConfig is None:
nvmBlockConfig = NvmBlockConfig()
else:
assert(isinstance(nvmBlockConfig, NvmBlockConfig))
nvmBlockNeeds = NvmBlockNeeds(name, nvmBlockConfig, adminData = blockAdminData)
nvmBlockServiceNeeds = NvmBlockServiceNeeds(name, nvmBlockNeeds)
serviceDependency = SwcServiceDependency(name, nvmBlockServiceNeeds)
for port in self.swc.requirePorts:
if port.name == portName:
serviceDependency.roleBasedPortAssignments.append(RoleBasedPortAssignment(port.ref))
break
else:
raise ValueError('%s: No require port found with name "%s"'%(self.swc.name, portName))
for pim in self.perInstanceMemories:
if pim.name == perInstanceMemoryName:
serviceDependency.roleBasedDataAssignments.append(RoleBasedDataAssignment(perInstanceMemoryRole, localVariableRef = pim.ref))
break
else:
raise ValueError('%s: No per-instance-memory found with name "%s"'%(self.swc.name, perInstanceMemoryName))
if defaultValueName is not None:
for param in self.parameterDataPrototype:
if param.name == defaultValueName:
serviceDependency.roleBasedDataAssignments.append(RoleBasedDataAssignment(defaultValueRole, localParameterRef = param.ref))
break
else:
raise ValueError('%s: No shared data parameter found with name "%s"'%(self.swc.name, defaultValueName))
self.serviceDependencies.append(serviceDependency)
return serviceDependency
def createInitEvent(self, runnableName, modeDependency=None, name=None ):
self._initSWC()
ws = self.rootWS()
runnable=self.find(runnableName)
if runnable is None:
raise ValueError('invalid runnable name: '+runnableName)
assert(isinstance(runnable, autosar.behavior.RunnableEntity))
eventName=name
if eventName is None:
baseName = "IT_"+runnable.name
eventName = self._findEventName(baseName)
event = autosar.behavior.InitEvent(eventName, runnable.ref)
if modeDependency is not None:
self._processModeDependency(event, modeDependency, ws.version)
self.events.append(event)
return event
def createModeSwitchAckEvent(self, runnableName, modeSwitchSource, modeDependency=None, name=None ):
"""
Creates a new ModeSwitchAckEvent or <MODE-SWITCHED-ACK-EVENT> (AUTOSAR 4)
Parameters:
* runnableName: Name of the runnable to trigger on this event (str)
* modeSwitchSource: Name of the runnable that has the mode switch point. (str)
If the source runnable has multiple mode switch points, use the pattern "RunnableName/ModePortName"
To select the correct source point.
* modeDependency: Modes this runnable shall be disabled in (list(str))
* name: Event name override (str). Default is to create a name automatically.
"""
self._initSWC()
ws = self.rootWS()
triggerRunnable = self.find(runnableName)
if triggerRunnable is None:
raise ValueError('Invalid runnable name: '+triggerRunnable)
if not isinstance(triggerRunnable, autosar.behavior.RunnableEntity):
raise ValueError('Element with name {} is not a runnable'.format(runnableName))
baseName = 'MSAT_'+triggerRunnable.name
eventName = autosar.base.findUniqueNameInList(self.events, baseName)
ref = modeSwitchSource.partition('/')
sourceRunnableName = ref[0]
sourceModeSwitchPoint = None
sourceRunnable = self.find(sourceRunnableName)
if sourceRunnable is None:
raise ValueError('Invalid runnable name: '+triggerRunnable)
if not isinstance(sourceRunnable, autosar.behavior.RunnableEntity):
raise ValueError('Element with name {} is not a runnable'.format(sourceRunnableName))
if len(sourceRunnable.modeSwitchPoints) == 0:
raise RuntimeError('Runnable {0.name} must have at least one mode switch point'.format(sourceRunnable))
if len(ref[1])==0:
#No '/' delimiter was used. This is OK only when the source runnable has only one modeSwitchPoint (no ambiguity)
if len(sourceRunnable.modeSwitchPoints) > 1:
raise ValueError('Ambiguous use of modeSwitchSource "{}". Please use pattern "RunnableName/PortName" in modeSwitchSource argument')
sourceModeSwitchPoint = sourceRunnable.modeSwitchPoints[0]
else:
#Search through all modeSwitchPoints to find port name that matches second half of the partition split
modePortName = ref[2]
for elem in sourceRunnable.modeSwitchPoints[0]:
port = ws.find(elem.modeGroupInstanceRef.providePortRef)
if port is None:
raise autosar.base.InvalidPortRef(elem.modeGroupInstanceRef.providePortRef)
if port.name == modePortName:
sourceModeSwitchPoint = elem
break
else:
raise ValueError('Invalid modeSwitchSource argument "{0}": Unable to find a ModeSwitchPoint containing the port name in that runnable'.format(modeSwitchSource))
assert(sourceModeSwitchPoint is not None)
#Now that we have collected all the pieces we need we can finally create the event
assert (triggerRunnable.ref is not None) and (sourceModeSwitchPoint.ref is not None)
event = ModeSwitchAckEvent(eventName, triggerRunnable.ref, sourceModeSwitchPoint.ref)
if modeDependency is not None:
self._processModeDependency(event, modeDependency, ws.version)
self.events.append(event)
return event
class VariableAccess(Element):
def __init__(self, name, portPrototypeRef, targetDataPrototypeRef, parent=None):
super().__init__(name, parent)
self.portPrototypeRef=portPrototypeRef
self.targetDataPrototypeRef = targetDataPrototypeRef
def tag(self, version=None):
return 'VARIABLE-ACCESS'
class ServiceNeeds(Element):
"""
Represents <SERVICE-NEEDS> (AUTOSAR 4)
This is a base class, it is expected that different service needs derive from this class
"""
def tag(self, version): return 'SERVICE-NEEDS'
def __init__(self, name = None, nvmBlockNeeds = None, parent=None, adminData = None):
super().__init__(name, parent, adminData)
self.nvmBlockNeeds = nvmBlockNeeds
class NvmBlockServiceNeeds(ServiceNeeds):
def __init__(self, name, nvmBlockNeeds = None, parent=None, adminData = None):
super().__init__(name, parent, adminData)
assert(nvmBlockNeeds is None or isinstance(nvmBlockNeeds, NvmBlockNeeds))
self.nvmBlockNeeds = nvmBlockNeeds
class SwcServiceDependency(Element):
"""
Represents <SWC-SERVICE-DEPENDENCY> (AUTODSAR 4)
"""
def tag(self, version): return 'SWC-SERVICE-DEPENDENCY'
def __init__(self, name=None, serviceNeeds = None, parent=None, adminData = None):
super().__init__(name, parent, adminData)
self._serviceNeeds = None #None or ServiceNeeds object
self.roleBasedDataAssignments = []
self.roleBasedPortAssignments = []
if serviceNeeds is not None:
assert(isinstance(serviceNeeds, ServiceNeeds))
self.serviceNeeds = serviceNeeds #this uses the setter method
@property
def serviceNeeds(self):
return self._serviceNeeds
@serviceNeeds.setter
def serviceNeeds(self, elem):
elem.parent = self
self._serviceNeeds = elem
class RoleBasedDataAssignment:
"""
Represents <ROLE-BASED-DATA-ASSIGNMENT> (AUTOSAR 4)
"""
def __init__(self, role, localVariableRef=None, localParameterRef=None):
assert(isinstance(role, str))
assert(localVariableRef is None or isinstance(localVariableRef, str))
assert(localParameterRef is None or isinstance(localParameterRef, autosar.behavior.LocalParameterRef) or isinstance(localParameterRef, str))
self.role = role
self.localVariableRef = localVariableRef
self.localParameterRef = localParameterRef
def tag(self, version): return 'ROLE-BASED-DATA-ASSIGNMENT'
class RoleBasedPortAssignment:
"""
Represents <ROLE-BASED-PORT-ASSIGNMENT> (AUTOSAR 4)
"""
def __init__(self, portRef, role = None):
assert(isinstance(portRef, str))
self.portRef = portRef
self.role = role
def tag(self, version): return 'ROLE-BASED-PORT-ASSIGNMENT'
class ParameterDataPrototype(Element):
"""
Represents <PARAMETER-DATA-PROTOTYPE> (AUTOSAR 4)
"""
def __init__(self, name, typeRef, swAddressMethodRef=None, swCalibrationAccess=None, initValue = None, initValueRef = None, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.typeRef = typeRef
self.swAddressMethodRef = swAddressMethodRef
self.swCalibrationAccess = swCalibrationAccess
self.initValue = initValue
self.initValueRef = initValueRef
def tag(self, version): return 'PARAMETER-DATA-PROTOTYPE'
class ParameterInstanceRef:
"""
Represents <AUTOSAR-PARAMETER-IREF> (AUTOSAR 4)
"""
def __init__(self, portRef, parameterDataRef):
self.portRef = portRef
self.parameterDataRef = parameterDataRef
def tag(self, version): return 'AUTOSAR-PARAMETER-IREF'
class LocalParameterRef:
"""
Represents <LOCAL-PARAMETER-REF> (AUTOSAR 4)
"""
def __init__(self, parameterDataRef):
self.parameterDataRef = parameterDataRef
def tag(self, version): return 'LOCAL-PARAMETER-REF'
class ParameterAccessPoint(Element):
"""
Represents <PARAMETER-ACCESS> (AUTOSAR 4)
"""
def __init__(self, name, accessedParameter = None, parent = None, adminData = None):
super().__init__(name, parent, adminData)
self.accessedParameter = accessedParameter #this can be NoneType or LocalParameterRef or ParameterInstanceRef
def tag(self, version): return 'PARAMETER-ACCESS'
class ModeAccessPoint:
"""
Represents <MODE-ACCESS-POINT> (AUTOSAR 4)
In the XSD this is not a first-class element.
Therefore we do not inherit from Element but instead allow <SHORT-NAME> only as (optional) identifier
"""
def __init__(self, name = None, modeGroupInstanceRef = None):
"""
Arguments:
* name: <SHORT-NAME> (None or str)
* modeGroupInstanceRef: <MODE-GROUP-IREF> (None or (class derived from) ModeGroupInstanceRef)
"""
self.name = str(name) if name is not None else None
self.modeGroupInstanceRef = modeGroupInstanceRef
def tag(self, version):
return 'MODE-ACCESS-POINT'
@property
def modeGroupInstanceRef(self):
return self._modeGroupInstanceRef
@modeGroupInstanceRef.setter
def modeGroupInstanceRef(self, value):
if value is not None:
if not isinstance(value, ModeGroupInstanceRef):
raise ValueError("Value must be None or an instance of ModeGroupInstanceRef")
self._modeGroupInstanceRef = value
value.parent = self
else:
self._modeGroupInstanceRef = None
class ModeSwitchPoint(Element):
"""
Represents <MODE-SWITCH-POINT> (AUTOSAR 4)
"""
def __init__(self, name, modeGroupInstanceRef = None, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.modeGroupInstanceRef = modeGroupInstanceRef
def tag(self, version):
return 'MODE-SWITCH-POINT'
@property
def modeGroupInstanceRef(self):
return self._modeGroupInstanceRef
@modeGroupInstanceRef.setter
def modeGroupInstanceRef(self, value):
if value is not None:
if not isinstance(value, ModeGroupInstanceRef):
raise ValueError("Value must be None or an instance of ModeGroupInstanceRef")
self._modeGroupInstanceRef = value
value.parent = self
else:
self._modeGroupInstanceRef = None
# Behavior parts of NvBlockComponent.
def createNvBlockDescriptor(parent, portAccess, **kwargs):
"""
Creates a new NvBlockDescriptor object.
* parent: NvBlockComponent to create descriptor in.
* portAccess: String containing port names or "port-name/element" where element is Nvdata (str)
* baseName: overide of the default baseName of the object (str).
"""
descriptor = None
nvData = None
ws = parent.rootWS()
assert (ws is not None)
assert (isinstance(portAccess, str))
adminData = kwargs.get('adminData', None)
baseName = kwargs.get('baseName', None)
if baseName is None:
baseName = 'NvBlckDescr'
ref = portAccess.partition('/')
port = parent.find(ref[0])
if port is None:
raise ValueError('invalid port reference: '+str(portAccess))
portInterface = ws.find(port.portInterfaceRef)
if portInterface is None:
raise ValueError('invalid portinterface reference: '+str(port.portInterfaceRef))
if isinstance(portInterface, autosar.portinterface.NvDataInterface):
if len(ref[1]) == 0:
#this section is for portAccess where only the port name is mentioned.
#This method only works if the port interface has only 1 data element,
# i.e. no ambiguity as to what data element is meant
if len(portInterface.nvDatas)==1:
nvData=portInterface.nvDatas[0]
descriptor = NvBlockDescriptor('{0}_{1.name}_{2.name}'.format(baseName, port, nvData), parent, adminData)
else:
raise NotImplementedError('port interfaces with multiple data elements not supported')
else:
#this section is for portAccess where both port name and dataelement is represented as "portName/dataElementName"
if isinstance(portInterface, autosar.portinterface.NvDataInterface):
nvData=portInterface.find(ref[2])
if nvData is None:
raise ValueError('invalid data element reference: '+str(portAccess))
descriptor = NvBlockDescriptor('{0}_{1.name}_{2.name}'.format(baseName, port, nvData), parent, adminData)
else:
raise autosar.base.InvalidPortInterfaceRef(type(portInterface))
if descriptor is not None:
dataTypeMappingRefs = kwargs.get('dataTypeMappingRefs', None)
nvmBlockConfig = kwargs.get('NvmBlockConfig', None)
timingEventRef = kwargs.get('timingEventRef', None)
swCalibrationAccess = kwargs.get('swCalibrationAccess', None)
supportDirtyFlag = kwargs.get('supportDirtyFlag', False)
ramBlockAdminData = kwargs.get('ramBlockAdminData', None)
romBlockAdminData = kwargs.get('romBlockAdminData', None)
romBlockDesc = kwargs.get('romBlockDesc', None)
romBlockLongName = kwargs.get('romBlockLongName', None)
romBlockInitValueRef = kwargs.get('romBlockInitValueRef', None)
rawRomBlockInitValue = kwargs.get('romBlockInitValue', None)
romBlockInitValue = None
if nvmBlockConfig is None or not isinstance(nvmBlockConfig, autosar.behavior.NvmBlockConfig):
raise autosar.base.InvalidDataTypeRef('NvmBlockConfig is missing or is not an autosar.behavior.NvmBlockConfig')
descriptor.nvBlockNeeds = autosar.behavior.NvmBlockNeeds('NvmBlockNeed', nvmBlockConfig, parent)
if dataTypeMappingRefs is not None:
if isinstance(dataTypeMappingRefs, str):
dataTypeMappingRefs = [dataTypeMappingRefs]
descriptor.dataTypeMappingRefs.extend(dataTypeMappingRefs)
if not isinstance(supportDirtyFlag, bool):
raise ValueError('supportDirtyFlag must be of bool type: '+str(type(supportDirtyFlag)))
descriptor.supportDirtyFlag = supportDirtyFlag
if isinstance(timingEventRef, str):
timingEvent = parent.behavior.find(timingEventRef)
if timingEvent is None:
raise ValueError('invalid data element reference: '+str(timingEventRef))
descriptor.timingEventRef = timingEvent.name
#verify compatibility of romBlockInitValueRef
if romBlockInitValueRef is not None:
initValueTmp = ws.find(romBlockInitValueRef, role='Constant')
if initValueTmp is None:
raise autosar.base.InvalidInitValueRef(str(romBlockInitValueRef))
if isinstance(initValueTmp,autosar.constant.Constant):
romBlockInitValueRef=initValueTmp.ref
elif isinstance(initValueTmp,autosar.constant.Value):
romBlockInitValueRef=initValueTmp.ref
else:
raise ValueError("reference is not a Constant or Value object: '%s'"%romBlockInitValueRef)
if rawRomBlockInitValue is not None:
if isinstance(rawRomBlockInitValue, autosar.constant.ValueAR4):
romBlockInitValue = rawRomBlockInitValue
elif isinstance(rawRomBlockInitValue, (int, float, str)):
dataType = ws.find(nvData.typeRef, role='DataType')
if dataType is None:
raise autosar.base.InvalidDataTypeRef(nvData.typeRef)
valueBuilder = autosar.builder.ValueBuilder()
romBlockInitValue = valueBuilder.buildFromDataType(dataType, rawRomBlockInitValue)
else:
raise ValueError('romBlockInitValue must be an instance of (autosar.constant.ValueAR4, int, float, str)')
dataType = ws.find(nvData.typeRef, role='DataType')
if dataType is None:
raise ValueError('invalid reference: '+nvData.typeRef)
descriptor.romBlock = NvBlockRomBlock('ParameterDataPt', dataType.ref,
swCalibrationAccess=swCalibrationAccess,
initValue=romBlockInitValue,
initValueRef=romBlockInitValueRef,
parent=descriptor,
adminData=romBlockAdminData)
if romBlockDesc is not None:
descriptor.romBlock.desc = romBlockDesc
if romBlockLongName is not None:
descriptor.romBlock.longName = romBlockLongName
descriptor.ramBlock = NvBlockRamBlock('VariableDataPt', dataType.ref, parent = descriptor, adminData = ramBlockAdminData)
nvBlockDataMapping = NvBlockDataMapping(descriptor)
nvBlockDataMapping.nvRamBlockElement = NvRamBlockElement(parent=nvBlockDataMapping, localVariableRef=descriptor.ramBlock)
if isinstance(port, autosar.port.RequirePort):
nvBlockDataMapping.writtenNvData = WrittenNvData(parent=nvBlockDataMapping, autosarVariablePortRef=port, autosarVariableElementRef=nvData)
if isinstance(port, autosar.port.ProvidePort):
nvBlockDataMapping.readNvData = ReadNvData(parent=nvBlockDataMapping, autosarVariablePortRef=port, autosarVariableElementRef=nvData)
descriptor.nvBlockDataMappings.append(nvBlockDataMapping)
parent.nvBlockDescriptors.append(descriptor)
return descriptor
class NvBlockRamBlock(autosar.element.DataElement):
"""
<RAM-BLOCK>
"""
def __init__(self, name, typeRef, isQueued=False, swAddressMethodRef=None, swCalibrationAccess=None, swImplPolicy = None, category = None, parent=None, adminData=None):
super().__init__(name, typeRef, isQueued, swAddressMethodRef, swCalibrationAccess, swImplPolicy, category, parent, adminData)
@classmethod
def cast(cls, ramBlock: autosar.element.DataElement):
"""Cast an autosar.element.DataElement into a NvBlockRamBlock."""
assert isinstance(ramBlock, autosar.element.DataElement)
ramBlock.__class__ = cls
assert isinstance(ramBlock, NvBlockRamBlock)
return ramBlock
def tag(self, version):
return 'RAM-BLOCK'
class NvBlockRomBlock(ParameterDataPrototype):
"""
Represents <ROM-BLOCK>
"""
def __init__(self, name, typeRef, swAddressMethodRef=None, swCalibrationAccess=None, initValue = None, initValueRef = None, parent=None, adminData=None):
super().__init__(name=name, parent=parent, typeRef=typeRef, swAddressMethodRef=swAddressMethodRef, swCalibrationAccess=swCalibrationAccess, initValue=initValue, initValueRef=initValueRef, adminData=adminData)
@classmethod
def cast(cls, romBlock: ParameterDataPrototype):
"""Cast an ParameterDataPrototype into a NvBlockRomBlock."""
assert isinstance(romBlock, ParameterDataPrototype)
romBlock.__class__ = cls
assert isinstance(romBlock, NvBlockRomBlock)
return romBlock
def tag(self, version): return 'ROM-BLOCK'
class NvBlockDescriptor(Element):
"""
<NV-BLOCK-DESCRIPTOR>
"""
def __init__(self, name, parent=None, adminData = None):
super().__init__(name, parent, adminData)
self.dataTypeMappingRefs = []
self.nvBlockDataMappings = []
self.nvBlockNeeds = None
self.ramBlock = None
self.romBlock = None
self.supportDirtyFlag = False
self.timingEventRef = None
def find(self, ref):
parts=ref.partition('/')
for elem in self.ramBlock, self.romBlock:
if elem.name == parts[0]:
return elem
return None
def tag(self, version):
return 'NV-BLOCK-DESCRIPTOR'
class NvBlockDataMapping(object):
"""
<NV-BLOCK-DATA-MAPPING>
"""
def __init__(self, parent=None):
self.parent = parent
self.nvRamBlockElement = None
self.readNvData = None
self.writtenNvData = None
self.writtenReadNvData = None
def tag(self, version):
return 'NV-BLOCK-DATA-MAPPING'
class AutosarVariableRef(object):
"""
Base class for type AUTOSAR-VARIABLE-REF
* localVariableRef: This reference is used if the variable is local to the current component.
* autosarVariablePortRef: Port part of the autosarVariableRef.
* autosarVariableElementRef: Element part of the autosarVariableRef.
"""
def tag(self,version):
return "AUTOSAR-VARIABLE-REF"
def __init__(self, parent=None, localVariableRef=None, autosarVariablePortRef=None, autosarVariableElementRef=None):
self.parent = parent
if isinstance(localVariableRef,str):
self.localVariableRef=localVariableRef
elif hasattr(localVariableRef,'ref'):
assert(isinstance(localVariableRef.ref,str))
self.localVariableRef=localVariableRef.ref
else:
self.localVariableRef=None
if isinstance(autosarVariablePortRef,str):
self.autosarVariablePortRef=autosarVariablePortRef
elif hasattr(autosarVariablePortRef,'ref'):
assert(isinstance(autosarVariablePortRef.ref,str))
self.autosarVariablePortRef=autosarVariablePortRef.ref
else:
self.autosarVariablePortRef=None
if isinstance(autosarVariableElementRef,str):
self.autosarVariableElementRef=autosarVariableElementRef
elif hasattr(autosarVariableElementRef,'ref'):
assert(isinstance(autosarVariableElementRef.ref,str))
self.autosarVariableElementRef=autosarVariableElementRef.ref
else:
self.autosarVariableElementRef=None
class NvRamBlockElement(AutosarVariableRef):
def __init__(self, parent=None, localVariableRef=None):
super().__init__(parent=parent, localVariableRef=localVariableRef)
def tag(self,version):
return "NV-RAM-BLOCK-ELEMENT"
class ReadNvData(AutosarVariableRef):
def __init__(self, parent=None, autosarVariablePortRef=None, autosarVariableElementRef=None):
super().__init__(parent=parent, autosarVariablePortRef=autosarVariablePortRef, autosarVariableElementRef=autosarVariableElementRef)
def tag(self,version):
return "READ-NV-DATA"
class WrittenNvData(AutosarVariableRef):
def __init__(self, parent=None, autosarVariablePortRef=None, autosarVariableElementRef=None):
super().__init__(parent=parent, autosarVariablePortRef=autosarVariablePortRef, autosarVariableElementRef=autosarVariableElementRef)
def tag(self,version):
return "WRITTEN-NV-DATA"
class WrittenReadNvData(AutosarVariableRef):
def __init__(self, parent=None, autosarVariablePortRef=None, autosarVariableElementRef=None):
super().__init__(parent=parent, autosarVariablePortRef=autosarVariablePortRef, autosarVariableElementRef=autosarVariableElementRef)
def tag(self,version):
return "WRITTEN-READ-NV-DATA"
| 46.361308
| 217
| 0.64936
|
77f4aa392774eaf05a823f7c86050cfbd234e463
| 4,162
|
py
|
Python
|
pywikibot/families/wikisource_family.py
|
5j9/pywikibot-core
|
e4af37f1034e63e7027278b81c4c78e80ab4b370
|
[
"MIT"
] | null | null | null |
pywikibot/families/wikisource_family.py
|
5j9/pywikibot-core
|
e4af37f1034e63e7027278b81c4c78e80ab4b370
|
[
"MIT"
] | null | null | null |
pywikibot/families/wikisource_family.py
|
5j9/pywikibot-core
|
e4af37f1034e63e7027278b81c4c78e80ab4b370
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Family module for Wikisource."""
#
# (C) Pywikibot team, 2004-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from pywikibot import family
from pywikibot.tools import classproperty
# The Wikimedia family that is known as Wikisource
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wikisource."""
name = 'wikisource'
closed_wikis = [
# https://noc.wikimedia.org/conf/highlight.php?file=dblists/closed.dblist # noqa
'ang', 'ht',
]
removed_wikis = [
# https://noc.wikimedia.org/conf/highlight.php?file=dblists/deleted.dblist # noqa
'tokipona',
]
languages_by_size = [
'en', 'pl', 'ru', 'de', 'fr', 'zh', 'he', 'it', 'es', 'ar', 'cs', 'pt',
'www', 'fa', 'gu', 'hu', 'ml', 'sv', 'ko', 'bn', 'sr', 'sa', 'uk',
'hy', 'sl', 'te', 'el', 'th', 'ro', 'fi', 'ja', 'vi', 'az', 'ta', 'ca',
'br', 'nl', 'kn', 'hr', 'la', 'no', 'is', 'eo', 'vec', 'tr', 'pms',
'et', 'be', 'da', 'mk', 'id', 'yi', 'bg', 'li', 'as', 'mr', 'or', 'gl',
'bs', 'sah', 'lt', 'sk', 'pa', 'eu', 'cy', 'zh-min-nan', 'fo', 'nap',
]
# Sites we want to edit but not count as real languages
test_codes = ['beta']
category_redirect_templates = {
'_default': (),
'ar': ('قالب:تحويل تصنيف',),
'bn': ('বিষয়শ্রেণী পুনর্নির্দেশ',),
'en': ('Category redirect',),
'ro': ('Redirect categorie',),
'zh': ('分類重定向',),
}
# All requests to 'mul.wikisource.org/*' are redirected to
# the main page, so using 'wikisource.org'
@classproperty
def langs(cls):
cls.langs = super(Family, cls).langs
cls.langs['mul'] = cls.domain
cls.langs['beta'] = 'en.wikisource.beta.wmflabs.org'
return cls.langs
# Need to explicitly inject the beta domain
@classproperty
def domains(cls):
cls.domains = super(Family, cls).domains
cls.domains.append(cls.langs['beta'])
return cls.domains
languages_by_size.append('mul')
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/BPI#Current_implementation
cross_allowed = [
'ca', 'el', 'fa', 'it', 'ko', 'no', 'pl', 'vi', 'zh',
]
authornamespaces = {
'_default': [0],
'ar': [102],
'be': [102],
'bn': [100],
'bg': [100],
'ca': [106],
'cs': [100],
'da': [102],
'en': [102],
'eo': [102],
'et': [106],
'fa': [102],
'fr': [102],
'he': [108],
'hr': [100],
'hu': [100],
'hy': [100],
'it': [102],
'ko': [100],
'la': [102],
'nl': [102],
'no': [102],
'pl': [104],
'pt': [102],
'ro': [102],
'sv': [106],
'tr': [100],
'vi': [102],
'zh': [102],
'beta': [102],
}
# Subpages for documentation.
# TODO: List is incomplete, to be completed for missing languages.
# TODO: Remove comments for appropriate pages
doc_subpages = {
'_default': (('/doc', ),
['ar', 'as', 'az', 'bn', 'en', 'es',
'et', 'gu', 'hu', 'it', 'ja', 'kn', 'ml',
'mk', 'mr', 'pt', 'ro', 'sa', 'sah', 'ta',
'te', 'th', 'vi']
),
'be': ('/Дакументацыя', ),
'bn': ('/নথি', ),
'br': ('/diellerezh', ),
'de': ('/Doku', '/Meta'),
'el': ('/τεκμηρίωση', ),
'eo': ('/dokumentado', ),
# 'fa': ('/صفحه الگو', ),
# 'fa': ('/فضاینام توضیحات', ),
# 'fa': ('/آغاز جعبه', ),
# 'fa': ('/پایان جعبه۲', ),
# 'fa': ('/آغاز جعبه۲', ),
# 'fa': ('/پایان جعبه', ),
# 'fa': ('/توضیحات', ),
'fr': ('/documentation', ),
'id': ('/dok', ),
'ko': ('/설명문서', ),
'no': ('/dok', ),
'ru': ('/Документация', ),
'sl': ('/dok', ),
'sv': ('/dok', ),
'uk': ('/документація', ),
}
| 29.728571
| 90
| 0.451946
|
236953c1621726b582d2f00802ac072dc27b6181
| 11,489
|
py
|
Python
|
tempest/cmd/run.py
|
HybridF5/tempest-jacket
|
7066346b3de76e024df6f1230b8a829b53840d40
|
[
"Apache-2.0"
] | null | null | null |
tempest/cmd/run.py
|
HybridF5/tempest-jacket
|
7066346b3de76e024df6f1230b8a829b53840d40
|
[
"Apache-2.0"
] | null | null | null |
tempest/cmd/run.py
|
HybridF5/tempest-jacket
|
7066346b3de76e024df6f1230b8a829b53840d40
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Runs tempest tests
This command is used for running the tempest tests
Test Selection
==============
Tempest run has several options:
* **--regex/-r**: This is a selection regex like what testr uses. It will run
any tests that match on re.match() with the regex
* **--smoke**: Run all the tests tagged as smoke
There are also the **--blacklist-file** and **--whitelist-file** options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
# Regex file
^regex1 # Match these tests
.*regex2 # Match those tests
The blacklist file will be used to construct a negative lookahead regex and
the whitelist file will simply OR all the regexes in the file. The whitelist
and blacklist file options are mutually exclusive so you can't use them
together. However, you can combine either with a normal regex or the *--smoke*
flag. When used with a blacklist file the generated regex will be combined to
something like::
^((?!black_regex1|black_regex2).)*$cli_regex1
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
If you want to adjust the number of workers use the **--concurrency** option
and if you want to run tests serially use **--serial**
Running with Workspaces
-----------------------
Tempest run enables you to run your tempest tests from any setup tempest
workspace it relies on you having setup a tempest workspace with either the
``tempest init`` or ``tempest workspace`` commands. Then using the
``--workspace`` CLI option you can specify which one of your workspaces you
want to run tempest from. Using this option you don't have to run Tempest
directly with you current working directory being the workspace, Tempest will
take care of managing everything to be executed from there.
Running from Anywhere
---------------------
Tempest run provides you with an option to execute tempest from anywhere on
your system. You are required to provide a config file in this case with the
``--config-file`` option. When run tempest will create a .testrepository
directory and a .testr.conf file in your current working directory. This way
you can use testr commands directly to inspect the state of the previous run.
Test Output
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
output to STDOUT use the **--subunit** flag
"""
import io
import os
import sys
import threading
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
from testrepository.commands import run_argv
from tempest.cmd import init
from tempest.cmd import workspace
from tempest import config
CONF = config.CONF
class TempestRun(command.Command):
def _set_env(self, config_file=None):
if config_file:
CONF.set_config_path(os.path.abspath(config_file))
# NOTE(mtreinish): This is needed so that testr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
return
else:
os.environ["TESTR_PDB"] = ""
def _create_testrepository(self):
if not os.path.isdir('.testrepository'):
returncode = run_argv(['testr', 'init'], sys.stdin, sys.stdout,
sys.stderr)
if returncode:
sys.exit(returncode)
def _create_testr_conf(self):
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
if parsed_args.config_file:
self._set_env(parsed_args.config_file)
else:
self._set_env()
# Workspace execution mode
if parsed_args.workspace:
workspace_mgr = workspace.WorkspaceManager(
parsed_args.workspace_path)
path = workspace_mgr.get_workspace(parsed_args.workspace)
os.chdir(path)
# NOTE(mtreinish): tempest init should create a .testrepository dir
# but since workspaces can be imported let's sanity check and
# ensure that one is created
self._create_testrepository()
# Local execution mode
elif os.path.isfile('.testr.conf'):
# If you're running in local execution mode and there is not a
# testrepository dir create one
self._create_testrepository()
# local execution with config file mode
elif parsed_args.config_file:
self._create_testr_conf()
self._create_testrepository()
else:
print("No .testr.conf file was found for local execution")
sys.exit(2)
regex = self._build_regex(parsed_args)
if parsed_args.list_tests:
argv = ['tempest', 'list-tests', regex]
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
options = self._build_options(parsed_args)
returncode = self._run(regex, options)
sys.exit(returncode)
def get_description(self):
return 'Run tempest'
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
parser = self._add_args(parser)
return parser
def _add_args(self, parser):
# workspace args
parser.add_argument('--workspace', default=None,
help='Name of tempest workspace to use for running'
' tests. You can see a list of workspaces '
'with tempest workspace list')
parser.add_argument('--workspace-path', default=None,
dest='workspace_path',
help="The path to the workspace file, the default "
"is ~/.tempest/workspace.yaml")
# Configuration flags
parser.add_argument('--config-file', default=None, dest='config_file',
help='Configuration file to run tempest with')
# test selection args
regex = parser.add_mutually_exclusive_group()
regex.add_argument('--smoke', action='store_true',
help="Run the smoke tests only")
regex.add_argument('--regex', '-r', default='',
help='A normal testr selection regex used to '
'specify a subset of tests to run')
list_selector = parser.add_mutually_exclusive_group()
list_selector.add_argument('--whitelist-file', '--whitelist_file',
help="Path to a whitelist file, this file "
"contains a separate regex on each "
"newline.")
list_selector.add_argument('--blacklist-file', '--blacklist_file',
help='Path to a blacklist file, this file '
'contains a separate regex exclude on '
'each newline')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
parallel.add_argument('--parallel', dest='parallel',
action='store_true',
help='Run tests in parallel (this is the'
' default)')
parallel.add_argument('--serial', dest='parallel',
action='store_false',
help='Run tests serially')
# output args
parser.add_argument("--subunit", action='store_true',
help='Enable subunit v2 output')
parser.set_defaults(parallel=True)
return parser
def _build_regex(self, parsed_args):
regex = ''
if parsed_args.smoke:
regex = 'smoke'
elif parsed_args.regex:
regex = parsed_args.regex
if parsed_args.whitelist_file or parsed_args.blacklist_file:
regex = regex_builder.construct_regex(parsed_args.blacklist_file,
parsed_args.whitelist_file,
regex, False)
return regex
def _build_options(self, parsed_args):
options = []
if parsed_args.subunit:
options.append("--subunit")
if parsed_args.parallel:
options.append("--parallel")
if parsed_args.concurrency:
options.append("--concurrency=%s" % parsed_args.concurrency)
return options
def _run(self, regex, options):
returncode = 0
argv = ['tempest', 'run', regex] + options
if '--subunit' in options:
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
argv.append('--subunit')
stdin = io.StringIO()
stdout_r, stdout_w = os.pipe()
subunit_w = os.fdopen(stdout_w, 'wt')
subunit_r = os.fdopen(stdout_r)
returncodes = {}
def run_argv_thread():
returncodes['testr'] = run_argv(argv, stdin, subunit_w,
sys.stderr)
subunit_w.close()
run_thread = threading.Thread(target=run_argv_thread)
run_thread.start()
returncodes['subunit-trace'] = subunit_trace.trace(
subunit_r, sys.stdout, post_fails=True, print_failures=True)
run_thread.join()
subunit_r.close()
# python version of pipefail
if returncodes['testr']:
returncode = returncodes['testr']
elif returncodes['subunit-trace']:
returncode = returncodes['subunit-trace']
return returncode
| 41.476534
| 79
| 0.612499
|
6a4bcc8d86878d26a3a90c5375675a6f371d0851
| 7,084
|
py
|
Python
|
vtkPointCloud.py
|
nakawang/py_Pointcloud_viewer_example
|
31918ba354ce99d9c86402ac112d8c08a570bb1c
|
[
"Apache-2.0"
] | null | null | null |
vtkPointCloud.py
|
nakawang/py_Pointcloud_viewer_example
|
31918ba354ce99d9c86402ac112d8c08a570bb1c
|
[
"Apache-2.0"
] | null | null | null |
vtkPointCloud.py
|
nakawang/py_Pointcloud_viewer_example
|
31918ba354ce99d9c86402ac112d8c08a570bb1c
|
[
"Apache-2.0"
] | null | null | null |
import vtk,sys
import numpy as np
from numpy import random
from vtk.util import numpy_support
class VtkPointCloud:
def __init__(self, zMin=-100.0, zMax=100.0, maxNumPoints=5e9):
self.maxNumPoints = maxNumPoints
self.vtkPolyData = vtk.vtkPolyData()
self.clearPoints()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.vtkPolyData)
mapper.SetColorModeToDefault()
#mapper.SetColorModeToDirectScalars()
mapper.SetScalarRange(zMin, zMax)
mapper.SetScalarVisibility(1)
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667,0)
#lut.SetTableRange(0,1)
lut.SetAboveRangeColor(255,255,255,1)
lut.SetBelowRangeColor(0,0,0,0)
lut.UseBelowRangeColorOn()
lut.UseAboveRangeColorOn()
lut.Build()
mapper.SetLookupTable(lut)
mapper.SetUseLookupTableScalarRange(True)
self.mapper=mapper
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(mapper)
self.lut = lut
def addPoint(self, point):
if self.vtkPoints.GetNumberOfPoints() <self.maxNumPoints:
pointId = self.vtkPoints.InsertNextPoint(point[:])
self.vtkDepth.InsertNextValue(point[2])
self.vtkCells.InsertNextCell(1)
self.vtkCells.InsertCellPoint(pointId)
else:
r = random.randint(0, self.maxNumPoints)
self.vtkPoints.SetPoint(r, point[:])
self.vtkCells.Modified()
self.vtkPoints.Modified()
self.vtkDepth.Modified()
def setPoints(self,a,b,c,d):
print("1")
#self.vtkPolyData.SetPoints(vtkVerts)
print("1")
#self.vtkPolyData.SetVerts(vtkCellArray)
print("1")
self.vtkPoints.SetData(a)
self.vtkCells.SetCells(b,c)
self.vtkCells.Modified()
self.vtkPoints.Modified()
print("1")
self.vtkPolyData.GetPointData().SetScalars(d)
def getPointIndex(self):
return ind
def clearPoints(self):
self.vtkPoints = vtk.vtkPoints()
self.vtkCells = vtk.vtkCellArray()
self.vtkDepth = vtk.vtkDoubleArray()
self.vtkDepth.SetName('DepthArray')
self.vtkPolyData.SetPoints(self.vtkPoints)
self.vtkPolyData.SetVerts(self.vtkCells)
self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth)
self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray')
def getBounds(self):
return self.vtkPoints.GetBounds()
def setRGBColor(self,colorArr):
self.vtkPolyData.GetPointData().SetScalars(colorArr)
print("Set color: ",colorArr)
def getLUT(self):
return self.lut
def setLUTRange(self,min,max):
self.lut.SetTableRange(min,max)
def delaunay2D(self):
print("start generate mesh")
boundary = vtk.vtkPolyData()
boundary.SetPoints(self.vtkPolyData.GetPoints())
aCellArray=vtk.vtkCellArray()
boundary.SetPolys(aCellArray)
delaunay=vtk.vtkDelaunay2D()
delaunay.SetSourceData(boundary)
delaunay.SetInputData(self.vtkPolyData)
delaunay.Update()
print("finish delaunay")
meshMapper=vtk.vtkPolyDataMapper()
meshMapper.SetInputConnection(delaunay.GetOutputPort())
meshMapper.SetLookupTable(self.lut)
meshMapper.SetScalarVisibility(1)
meshMapper.SetUseLookupTableScalarRange(True)
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(meshMapper)
self.vtkActor.GetProperty().SetEdgeColor(0, 0, 1)
self.vtkActor.GetProperty().SetInterpolationToFlat()
self.vtkActor.GetProperty().SetRepresentationToWireframe()
boundaryMapper = vtk.vtkPolyDataMapper()
boundaryMapper.SetInputData(boundary)
boundaryActor = vtk.vtkActor()
boundaryActor.SetMapper(boundaryMapper)
boundaryActor.GetProperty().SetColor(1, 0, 0)
self.boundaryActor = vtk.vtkActor()
def delaunay3D(self):
delny = vtk.vtkDelaunay3D()
delny.SetInputData(self.vtkPolyData)
delny.SetTolerance(0.01)
delny.SetAlpha(0.2)
delny.BoundingTriangulationOff()
#shrink = vtk.vtkShrinkFilter()
#shrink.SetInputConnection(delny.GetOutputPort())
#shrink.SetShrinkFactor(0.9)
mapper=vtk.vtkDataSetMapper()
mapper.SetInputConnection(delny.GetOutputPort())
triangulation=vtk.vtkActor()
triangulation.SetMapper(mapper)
triangulation.GetProperty().SetColor(1,0,0)
return triangulation
def surfaceRecon(self):
pointSource=vtk.vtkProgrammableSource()
def readPoints():
output = pointSource.GetPolyDataOutput()
#points = vtk.vtkPoints()
output.SetPoints(self.vtkPoints)
pointSource.SetExecuteMethod(readPoints)
surf = vtk.vtkSurfaceReconstructionFilter()
surf.SetInputConnection(pointSource.GetOutputPort())
print(surf)
contour = vtk.vtkContourFilter()
contour.SetInputConnection(surf.GetOutputPort())
contour.SetValue(0,0.0)
print(contour)
reverse = vtk.vtkReverseSense()
reverse.SetInputConnection(contour.GetOutputPort())
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
contourMapper=vtk.vtkPolyDataMapper()
contourMapper.SetInputConnection(reverse.GetOutputPort())
contourMapper.ScalarVisibilityOff()
print(contourMapper)
contourActor=vtk.vtkActor()
contourActor.SetMapper(contourMapper)
print(contourActor)
return contourActor
def setRTFilter(self,rt):
print("ori:",self.vtkPolyData)
if 0:
rt=vtk.vtkTransform()
rt.RotateX(90)
rt.RotateY(5)
rtFilter=vtk.vtkTransformPolyDataFilter()
rtFilter.SetInputData(self.vtkPolyData)
rtFilter.SetTransform(rt)
rtFilter.Update()
#self.mapper.SetInputConnection(rtFilter.GetOutputPort())
self.vtkPolyData=rtFilter.GetOutput()
print("abc:",self.vtkPolyData)
points=self.vtkPolyData.GetPoints()
print("new data points:",points,points.GetNumberOfPoints())
#for i in range(points.GetNumberOfPoints()):
#print(points.GetPoint(i))
print(self.vtkPolyData.GetPointData().GetArray(0))
#x=points.GetNumberOfPoints()+1
#self.vtkPolyData.GetPointData().GetArray(0)
#get numpy array of vtk array
np_points=numpy_support.vtk_to_numpy(points.GetData())
depth = np_points[:,2]
print(np_points)
print("new depth",depth)
self.vtkPoints=points
self.vtkDepth = numpy_support.numpy_to_vtk(depth)
self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth)
self.mapper.SetInputData(self.vtkPolyData)
print("rt done")
| 40.712644
| 71
| 0.641022
|
0e1faa26329c43e0a7ef6507d29cd11d340a5972
| 602
|
py
|
Python
|
setup.py
|
gytdau/pto-thingy
|
d7c0ef109591d2d0c155f8f728ffbbd9123cb863
|
[
"MIT"
] | 1
|
2020-05-02T16:41:38.000Z
|
2020-05-02T16:41:38.000Z
|
setup.py
|
gytdau/holiday-thingy
|
d7c0ef109591d2d0c155f8f728ffbbd9123cb863
|
[
"MIT"
] | null | null | null |
setup.py
|
gytdau/holiday-thingy
|
d7c0ef109591d2d0c155f8f728ffbbd9123cb863
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for holiday_bot.
This file was generated with PyScaffold 2.5.7, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| 26.173913
| 73
| 0.679402
|
7ca2f46a5690cee08e979481846ca4f405aedf2e
| 3,240
|
py
|
Python
|
ml-agents/mlagents/trainers/components/reward_signals/reward_signal.py
|
peitaozhao/ml-agents
|
f344f10d723120c31c992a57ca8bd1b9f0fa836a
|
[
"Apache-2.0"
] | 1
|
2020-01-29T22:26:49.000Z
|
2020-01-29T22:26:49.000Z
|
ml-agents/mlagents/trainers/components/reward_signals/reward_signal.py
|
isehd/ml-agents
|
d11229820f3d7f090341c76edea7bad83fc0eeec
|
[
"Apache-2.0"
] | 1
|
2020-04-27T01:52:49.000Z
|
2020-04-27T01:52:49.000Z
|
ml-agents/mlagents/trainers/components/reward_signals/reward_signal.py
|
isehd/ml-agents
|
d11229820f3d7f090341c76edea7bad83fc0eeec
|
[
"Apache-2.0"
] | 1
|
2020-04-26T00:17:25.000Z
|
2020-04-26T00:17:25.000Z
|
import logging
from typing import Any, Dict, List
from collections import namedtuple
import numpy as np
import abc
import tensorflow as tf
from mlagents.envs.brain import BrainInfo
from mlagents.trainers.trainer import UnityTrainerException
from mlagents.trainers.tf_policy import TFPolicy
from mlagents.trainers.buffer import Buffer
logger = logging.getLogger("mlagents.trainers")
RewardSignalResult = namedtuple(
"RewardSignalResult", ["scaled_reward", "unscaled_reward"]
)
class RewardSignal(abc.ABC):
def __init__(self, policy: TFPolicy, strength: float, gamma: float):
"""
Initializes a reward signal. At minimum, you must pass in the policy it is being applied to,
the reward strength, and the gamma (discount factor.)
:param policy: The Policy object (e.g. PPOPolicy) that this Reward Signal will apply to.
:param strength: The strength of the reward. The reward's raw value will be multiplied by this value.
:param gamma: The time discounting factor used for this reward.
:return: A RewardSignal object.
"""
class_name = self.__class__.__name__
short_name = class_name.replace("RewardSignal", "")
self.stat_name = f"Policy/{short_name} Reward"
self.value_name = f"Policy/{short_name} Value Estimate"
# Terminate discounted reward computation at Done. Can disable to mitigate positive bias in rewards with
# no natural end, e.g. GAIL or Curiosity
self.use_terminal_states = True
self.gamma = gamma
self.policy = policy
self.strength = strength
def evaluate(
self, current_info: BrainInfo, next_info: BrainInfo
) -> RewardSignalResult:
"""
Evaluates the reward for the agents present in current_info given the next_info
:param current_info: The current BrainInfo.
:param next_info: The BrainInfo from the next timestep.
:return: a RewardSignalResult of (scaled intrinsic reward, unscaled intrinsic reward) provided by the generator
"""
return RewardSignalResult(
self.strength * np.zeros(len(current_info.agents)),
np.zeros(len(current_info.agents)),
)
def update(self, update_buffer: Buffer, num_sequences: int) -> Dict[str, float]:
"""
If the reward signal has an internal model (e.g. GAIL or Curiosity), update that model.
:param update_buffer: An AgentBuffer that contains the live data from which to update.
:param n_sequences: The number of sequences in the training buffer.
:return: A dict of {"Stat Name": stat} to be added to Tensorboard
"""
return {}
@classmethod
def check_config(
cls, config_dict: Dict[str, Any], param_keys: List[str] = None
) -> None:
"""
Check the config dict, and throw an error if there are missing hyperparameters.
"""
param_keys = param_keys or []
for k in param_keys:
if k not in config_dict:
raise UnityTrainerException(
"The hyper-parameter {0} could not be found for {1}.".format(
k, cls.__name__
)
)
| 40.5
| 119
| 0.662037
|
9e8ca99dece1a3dea8aae3b530f640fe35a9f462
| 472
|
py
|
Python
|
mla/tests/test_random_forest.py
|
qianlv/MachineLearningAlgorithm
|
c66d37bc9c0c1bebf97cdc142213b96cb6ceb989
|
[
"MIT"
] | null | null | null |
mla/tests/test_random_forest.py
|
qianlv/MachineLearningAlgorithm
|
c66d37bc9c0c1bebf97cdc142213b96cb6ceb989
|
[
"MIT"
] | null | null | null |
mla/tests/test_random_forest.py
|
qianlv/MachineLearningAlgorithm
|
c66d37bc9c0c1bebf97cdc142213b96cb6ceb989
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sklearn import datasets
from mla.ensemble import RandomForest
from mla.ensemble import DecisionTreeClassifier
from mla.base import plot_decision_bounary
def test_random_forest():
iris = datasets.load_iris()
model = RandomForest(
DecisionTreeClassifier, model_params=(None, 1, 4), max_features=1)
X = iris.data[:, [0, 2]]
y = iris.target
model.train_fit(X, y, max_iters=100)
plot_decision_bounary(X, y, model)
| 29.5
| 74
| 0.71822
|
c1ab07b73b8278c0dac5c257a0c8dbe23fe304ef
| 331
|
py
|
Python
|
September 2020/05-Functions-Advanced/Exercises/04-Negative-vs-Positive.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
636385f9e5521840f680644824d725d074b93c9a
|
[
"MIT"
] | null | null | null |
September 2020/05-Functions-Advanced/Exercises/04-Negative-vs-Positive.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
636385f9e5521840f680644824d725d074b93c9a
|
[
"MIT"
] | null | null | null |
September 2020/05-Functions-Advanced/Exercises/04-Negative-vs-Positive.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
636385f9e5521840f680644824d725d074b93c9a
|
[
"MIT"
] | null | null | null |
numbers = [int(i) for i in input().split()]
negative = sum(filter(lambda x: x < 0, numbers))
positive = sum(filter(lambda x: x >= 0, numbers))
print(f"{negative}\n{positive}")
if abs(negative) > positive:
print(f"The negatives are stronger than the positives")
else:
print(f"The positives are stronger than the negatives")
| 36.777778
| 59
| 0.697885
|
ea212ee19ec0d8da4d891bfb4948533b40096438
| 1,424
|
py
|
Python
|
gridpath/objective/system/reserve_violation_penalties/spinning_reserves.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | 44
|
2020-10-27T19:05:44.000Z
|
2022-03-22T17:17:37.000Z
|
gridpath/objective/system/reserve_violation_penalties/spinning_reserves.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | 67
|
2020-10-08T22:36:53.000Z
|
2022-03-22T22:58:33.000Z
|
gridpath/objective/system/reserve_violation_penalties/spinning_reserves.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | 21
|
2020-10-08T23:23:48.000Z
|
2022-03-28T01:21:21.000Z
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .aggregate_reserve_violation_penalties import \
generic_record_dynamic_components, generic_add_model_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
generic_add_model_components(
m,
d,
scenario_directory, subproblem, stage,
"SPINNING_RESERVES_ZONES",
"Spinning_Reserves_Violation_MW_Expression",
"spinning_reserves_violation_penalty_per_mw",
"Spinning_Reserves_Penalty_Costs"
)
record_dynamic_components(dynamic_components=d)
def record_dynamic_components(dynamic_components):
generic_record_dynamic_components(dynamic_components,
"Spinning_Reserves_Penalty_Costs")
| 30.956522
| 74
| 0.734551
|
6e723a1dbf82e18f51eff58dac46f9b0fe8f40a7
| 187
|
py
|
Python
|
Scripts/pip-script.py
|
alvin-leong/real-python-test
|
b23671f9b8d9fc25e79f31305fe40942f078898b
|
[
"PSF-2.0"
] | null | null | null |
Scripts/pip-script.py
|
alvin-leong/real-python-test
|
b23671f9b8d9fc25e79f31305fe40942f078898b
|
[
"PSF-2.0"
] | null | null | null |
Scripts/pip-script.py
|
alvin-leong/real-python-test
|
b23671f9b8d9fc25e79f31305fe40942f078898b
|
[
"PSF-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 17
| 69
| 0.572193
|
434e97f7ab24857225177625755b3e88b6d0a526
| 8,590
|
py
|
Python
|
copy/routes.py
|
sunilswain/Raspberry
|
54193a3ba50341ff627d88f8c66c69bf6834b7e6
|
[
"MIT"
] | 2
|
2021-05-24T06:30:15.000Z
|
2021-07-28T18:01:17.000Z
|
copy/routes.py
|
sunilswain/Raspberry
|
54193a3ba50341ff627d88f8c66c69bf6834b7e6
|
[
"MIT"
] | null | null | null |
copy/routes.py
|
sunilswain/Raspberry
|
54193a3ba50341ff627d88f8c66c69bf6834b7e6
|
[
"MIT"
] | null | null | null |
import os, secrets
from PIL import Image
from flask import render_template,url_for, flash , redirect, request, abort
from flaskblog import app, db, bcrypt,mail
from flaskblog.forms import( ResistrationForm , LogInForm, UpdateAccountForm,
PostForm, RequestResetForm, ResetPasswordForm)
from flaskblog.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
from flask_mail import Message
@app.route("/")
@app.route("/home")
def home_page():
page = request.args.get('page',1,type = int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page = page ,per_page = 7)
return render_template('home.html',posts = posts)
@app.route("/about")
def about_page():
return render_template('about.html')
@app.route("/login", methods = ["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.home_page'))
form = LogInForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
next_page = request.args.get('next')
view_post = request.args.get('view_post')
if view_post:
return redirect(url_for('posts.post'))
elif next_page:
return redirect(url_for('users.account'))
else:
return redirect(url_for('main.home_page'))
# return redirect(url_for('posts.post')) if view_post else redirect(url_for('main.home_page'))
# return redirect(url_for('users.account')) if next_page else redirect(url_for('main.home_page'))
else:
flash("Unsuccessful login , please check your email and password. " , "warning" )
return render_template('log_in.html', title = "login" , form = form)
@app.route("/register", methods = ["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.home_page'))
form = ResistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user1 = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user1)
db.session.commit()
flash(f'hello {form.username.data} , your account has created . you can log in now .' , "success")
return redirect(url_for('main.home_page'))
return render_template('signUp.html', title = "signup" , form = form)
@app.route("/logout", methods = ["GET", "POST"])
def logout():
logout_user()
return redirect(url_for('main.home_page'))
def SavePicture(form_picture, prev_pfp):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
prev_pfp_path = os.path.join(app.root_path, 'static\profile_picture', prev_pfp)
if prev_pfp != "default.png":
os.remove(prev_pfp_path)
picture_path = os.path.join(app.root_path, 'static\profile_picture', picture_fn)
pfp_size = (125,125)
i = Image.open(form_picture)
i.thumbnail(pfp_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods = ["GET", "POST"])
@login_required
def account():
# return redirect(url_for('users.account'))
form = UpdateAccountForm()
if form.validate_on_submit():
if form.profile_picture.data:
picture_file = SavePicture(form.profile_picture.data,current_user.pfp)
current_user.pfp = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('your account has been updated successfully','success')
return redirect(url_for('users.account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename = 'profile_picture/'+ current_user.pfp )
return render_template('account.html', title = 'account', image_file = image_file, form = form)
@app.route("/post/new", methods = ["GET", "POST"])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title = form.title.data , content = form.content.data ,bg = form.bg.data, author = current_user)
db.session.add(post)
db.session.commit()
flash('your post has successfully posted.', 'success')
return redirect(url_for('posts.new_post'))
return render_template('create_post.html', title = 'new_post', form = form , legend ='create post')
@app.route("/post/<int:post_id>" )
@login_required
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title = post.title, post = post )
@app.route("/post/<int:post_id>/update" , methods = ["GET","POST"])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
# post.bg = form.bg.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete" , methods = ["POST"])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted successfully !', 'success')
return redirect(url_for('main.home_page'))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page',1,type = int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author = user).\
order_by(Post.date_posted.desc()).\
paginate(page = page ,per_page = 7)
return render_template('user_posts.html',posts = posts,user=user)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Reset Password Request',
sender = 'Admin@bluberry.com' ,
recipients = [user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('users.reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route("/reset_password", methods = ["GET", "POST"])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('main.home_page'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
send_reset_email(user)
flash('An email has been sent to your account with proper instructios. ','succes' )
return redirect(url_for('users.login'))
return render_template('reset_request.html',title = 'request reset password ', form = form)
@app.route("/reset_password/<token>", methods = ["GET", "POST"])
def reset_token():
if current_user.is_authenticated:
return redirect(url_for('main.home_page'))
user = User.verify_reset_token(token)
if user is None:
flash('This token is invalid or expired !' , 'warning')
return redirect(url_for('users.reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash(f' your password has been updated successfully . you can log in now .' , "success")
return redirect(url_for('main.home_page'))
return render_template('reset_token.html',title = 'request reset password ', form = form)
| 40.904762
| 117
| 0.651804
|
32aab2d5992ef7dc83cb381cc8d824c43e7efa0f
| 2,901
|
py
|
Python
|
pycn_lite/lib/suite/ccnx2015_enc.py
|
cn-uofbasel/pycn-lite
|
2c2bdbe76b60cd36ebb1f1b51d72cbde553b9351
|
[
"BSD-3-Clause"
] | 6
|
2018-02-13T13:49:15.000Z
|
2020-04-17T22:07:05.000Z
|
pycn_lite/lib/suite/ccnx2015_enc.py
|
cn-uofbasel/pycn-lite
|
2c2bdbe76b60cd36ebb1f1b51d72cbde553b9351
|
[
"BSD-3-Clause"
] | null | null | null |
pycn_lite/lib/suite/ccnx2015_enc.py
|
cn-uofbasel/pycn-lite
|
2c2bdbe76b60cd36ebb1f1b51d72cbde553b9351
|
[
"BSD-3-Clause"
] | 2
|
2020-01-10T05:03:23.000Z
|
2020-02-11T01:39:53.000Z
|
# pycn_lite/lib/suite/ccnx2015.py
# (c) 2018-02-01 <christian.tschudin@unibas.ch>
# encoder for CCNX packets
# this module is not needed for forwarding and repo, saves memory for ESP8266
try:
from uhashlib import sha256
import ustruct as struct
except:
from hashlib import sha256
import struct
import pycn_lite.lib.suite.ccnx2015 as ccnx
# ----------------------------------------------------------------------
# creating TLVs
# def mk_name_tlv(comps):
def prepend_int(buf, start, v): # returns new start
while True:
start -= 1
buf[start] = v & 0x0ff
v >>= 8
if v == 0:
break
return start
def prepend_blob(buf, start, blob): # returns new start
start = start - len(blob)
buf[start:start + len(blob)] = blob
return start
def prepend_tl(buf, start, t, l): # returns new start
if start < 4:
raise IOError
struct.pack_into('>H', buf, start-2, l)
struct.pack_into('>H', buf, start-4, t)
return start - 4
def prepend_name(buf, start, comps): # returns new start
end = start
for i in range(len(comps)-1, -1, -1):
start = prepend_blob(buf, start, comps[i])
start = prepend_tl(buf, start, ccnx.CCNX_TLV_M_Name, end - start)
return start
#def name_components_to_wirebytes(comps):
# n = b''
# for c in comps:
# n += c
# tl = bytearray(4)
# struct.pack_into('>H', tl, 2, len(n))
# struct.pack_into('>H', tl, 0, CCNX_TLV_M_Name)
# return tl + n
# buf = bytearray(3000)
# offs = prepend_name(buf, len(buf), comps)
# buf = buf[offs:]
# return buf
# ----------------------------------------------------------------------
def encode_interest_wirebytes(comps, hashId = None):
buf = bytearray(ccnx.MAX_CHUNK_SIZE)
start = len(buf)
if hashId:
start = prepend_blob(buf, start, hashId)
start = prepend_tl(buf, start, ccnx.CCNX_TLV_M_ObjHashRestriction,
len(hashId))
start = prepend_name(buf, start, comps)
start = prepend_tl(buf, start, ccnx.CCNX_TLV_TL_Interest, len(buf) - start)
hdr = bytearray(b'\x01\x00 \x10\x00\x00\x08')
struct.pack_into('>H', hdr, 2, len(buf) - start + len(hdr))
return hdr + buf[start:]
def encode_data_wirebytes(comps, blob):
buf = bytearray(ccnx.MAX_CHUNK_SIZE)
start = len(buf)
start = prepend_blob(buf, start, blob)
start = prepend_tl(buf, start, ccnx.CCNX_TLV_M_Payload, len(blob))
start = prepend_name(buf, start, comps)
start = prepend_tl(buf, start, ccnx.CCNX_TLV_TL_Object, len(buf) - start)
h = sha256()
h.update(buf[start:])
hdr = bytearray(b'\x01\x01 \x10\x00\x00\x08')
struct.pack_into('>H', hdr, 2, len(buf) - start + len(hdr))
return (hdr + buf[start:], h.digest())
def encode_nack_wirebytes(comps, blob=None):
# ccnx does not have a app-level nack?
return None
# eof
| 29.907216
| 79
| 0.606343
|
5235e10bcd2685997a08a1cd238a57f3a3152cfe
| 3,799
|
py
|
Python
|
pypy/interpreter/test/test_exceptcomp.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/interpreter/test/test_exceptcomp.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/interpreter/test/test_exceptcomp.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
"""Test comparisons of Exceptions in except clauses.
New for PyPy - Could be incorporated into CPython regression tests.
"""
class AppTestExceptionComp:
### XXX - String exceptions depreciated?
## def test_string(self):
## string = "string"
## try:
## raise string
## except string:
## pass
## except:
## self.fail("Identical string exceptions do not match.")
##
## def test_stringfail(self):
## string1 = "string1"
## string1_ = "string" + "1"
## assert string1 is not string1_
## try:
## raise string1
## except "string2":
## self.fail("Different string exceptions match.")
## except string1_:
## self.fail("Non Identical string exceptions match.")
## except string1:
## pass
## except:
## self.fail("Unknown value for variable raise.")
def test_exception(self):
try:
raise TypeError, "nothing"
except TypeError:
pass
except:
self.fail("Identical exceptions do not match.")
def test_exceptionfail(self):
try:
raise TypeError, "nothing"
except KeyError:
self.fail("Different exceptions match.")
except TypeError:
pass
except:
self.fail("Unanticipated value for exception raise.")
def test_called(self):
try:
raise SyntaxError("Invalid")
except SyntaxError:
pass
except:
self.fail("Instantiated exception does not match parent class.")
def test_calledfail(self):
try:
raise SyntaxError("Invalid")
except ZeroDivisionError:
self.fail("Instantiated exception matches different parent class.")
except SyntaxError:
pass
except:
self.fail("Unanticpated value for exception raise.")
def test_userclass(self):
class UserExcept(Exception):
pass
try:
raise UserExcept, "nothing"
except UserExcept:
pass
except:
self.fail("User defined class exceptions do not match.")
def test_subclass(self):
try:
raise KeyError("key")
except LookupError:
pass
except:
self.fail("Exception does not match parent class.")
def test_deepsubclass(self):
try:
raise FloatingPointError("1.2r")
except Exception:
pass
except:
self.fail("Exception does not match grandparent class.")
def test_tuple(self):
try:
raise ArithmeticError("2+jack")
except (ZeroDivisionError, ArithmeticError):
pass
except:
self.fail("Exception does not match self in tuple.")
def test_parenttuple(self):
try:
raise ZeroDivisionError("0")
except (StandardError, SystemExit):
pass
except:
self.fail("Exception does not match parent in tuple.")
def test_nestedtuples(self):
try:
raise AssertionError("0")
except (SystemExit, (KeyboardInterrupt, AssertionError)):
pass
except:
self.fail("Exception does not match self in nested tuple.")
def test_deeptuples(self):
try:
raise IOError
except (FloatingPointError,(OSError,
(SyntaxError,IOError,ZeroDivisionError)),
(MemoryError, NotImplementedError)):
pass
except:
self.fail("Exception does not match self in deeply nested tuple.")
| 29
| 80
| 0.549618
|
5c5ae4bc8e1abbf941bf140312e3c51186462645
| 605
|
py
|
Python
|
main.py
|
enigma10245/discordpy-startup
|
2f139f521f8ffb36d4c3c7a962bed6f1869eb78c
|
[
"MIT"
] | null | null | null |
main.py
|
enigma10245/discordpy-startup
|
2f139f521f8ffb36d4c3c7a962bed6f1869eb78c
|
[
"MIT"
] | null | null | null |
main.py
|
enigma10245/discordpy-startup
|
2f139f521f8ffb36d4c3c7a962bed6f1869eb78c
|
[
"MIT"
] | null | null | null |
import discord
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
# 「おはよう」で始まるか調べる
if message.content.startswith("おはよう"):
# 送り主がBotだった場合反応したくないので
if client.user != message.author:
# メッセージを書きます
m = "おはようございます" + message.author.name + "さん!"
# メッセージが送られてきたチャンネルへメッセージを送ります
await message.channel.send(m)
client.run("NjQ2MTk5MjU3MDE5MTg3MjMz.XdSpjg.WwH8iNnJEzWhAraU2WIjg1eWPdo")
| 26.304348
| 73
| 0.654545
|
3be80da4c38fc3e590b84d25be5177010290f8a9
| 675
|
py
|
Python
|
h2o-bindings/bin/custom/R/gen_generic.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2
|
2020-09-23T14:23:55.000Z
|
2020-09-23T19:26:30.000Z
|
h2o-bindings/bin/custom/R/gen_generic.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2021-04-06T13:07:04.000Z
|
2021-04-06T13:07:04.000Z
|
h2o-bindings/bin/custom/R/gen_generic.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2021-09-09T03:47:11.000Z
|
2021-09-09T03:47:11.000Z
|
extensions = dict(
required_params=[], # empty to override defaults in gen_defaults
validate_required_params="""
# Required args: either model_key or path
if (is.null(model_key) && is.null(path)) stop("argument 'model_key' or 'path' must be provided")
""",
set_required_params="",
)
doc = dict(
preamble="""
Imports a generic model into H2O. Such model can be used then used for scoring and obtaining
additional information about the model. The imported model has to be supported by H2O.
""",
examples="""
# library(h2o)
# h2o.init()
# generic_model <- h2o.genericModel("/path/to/model.zip")
# predictions <- h2o.predict(generic_model, dataset)
"""
)
| 28.125
| 96
| 0.70963
|
652288cad4a32557b51dea4e4b93eebd722ba27a
| 1,030
|
py
|
Python
|
courses/admin.py
|
melodyPereira05/EdZone
|
6180fa4b58d43bcaa4e3c7a7518ce87bc7ce8aaf
|
[
"MIT"
] | null | null | null |
courses/admin.py
|
melodyPereira05/EdZone
|
6180fa4b58d43bcaa4e3c7a7518ce87bc7ce8aaf
|
[
"MIT"
] | null | null | null |
courses/admin.py
|
melodyPereira05/EdZone
|
6180fa4b58d43bcaa4e3c7a7518ce87bc7ce8aaf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Subject, Course, Module,Wishlist,Contact
@admin.register(Subject)
class SubjectAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {'slug': ('title',)}
class ModuleInline(admin.StackedInline):
model = Module
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ['title', 'subject', 'created']
list_filter = ['created', 'subject']
search_fields = ['title', 'overview']
prepopulated_fields = {'slug': ('title',)}
inlines = [ModuleInline]
@admin.register(Wishlist)
class WishlistAdmin(admin.ModelAdmin):
list_display = ['course', 'name', 'wishlisted_date']
list_filter = ['course', 'name']
prepopulated_fields = {'slug': ('course',)}
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
list_display = ['course', 'name', 'email']
list_filter = ['email', 'name']
prepopulated_fields = {'slug': ('course',)}
| 24.52381
| 60
| 0.653398
|
6e1cc169845c0d95ca67f3f19455dee9d7760f95
| 87,991
|
py
|
Python
|
nilearn/plotting/displays.py
|
lemiceterieux/nilearn
|
bb3c9bf0aaf82ae99a5c425d17ce4bd174ec7056
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/plotting/displays.py
|
lemiceterieux/nilearn
|
bb3c9bf0aaf82ae99a5c425d17ce4bd174ec7056
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/plotting/displays.py
|
lemiceterieux/nilearn
|
bb3c9bf0aaf82ae99a5c425d17ce4bd174ec7056
|
[
"BSD-2-Clause"
] | null | null | null |
"""
The Slicer classes.
The main purpose of these classes is to have auto adjust of axes size to
the data with different layout of cuts.
"""
import collections.abc
import numbers
from distutils.version import LooseVersion
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import warnings
from matplotlib import cm as mpl_cm
from matplotlib import (colors,
lines,
transforms,
)
from matplotlib.colorbar import ColorbarBase
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyArrow
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from scipy import sparse, stats
from . import cm, glass_brain
from .edge_detect import _edge_map
from .find_cuts import find_xyz_cut_coords, find_cut_slices
from .. import _utils
from ..image import new_img_like
from ..image.resampling import (get_bounds, reorder_img, coord_transform,
get_mask_bounds)
from nilearn.image import get_data
###############################################################################
# class BaseAxes
###############################################################################
class BaseAxes(object):
""" An MPL axis-like object that displays a 2D view of 3D volumes
"""
def __init__(self, ax, direction, coord):
""" An MPL axis-like object that displays a cut of 3D volumes
Parameters
----------
ax : A MPL axes instance
The axes in which the plots will be drawn.
direction : {'x', 'y', 'z'}
The directions of the view.
coord : float
The coordinate along the direction of the cut.
"""
self.ax = ax
self.direction = direction
self.coord = coord
self._object_bounds = list()
self.shape = None
def transform_to_2d(self, data, affine):
raise NotImplementedError("'transform_to_2d' needs to be implemented "
"in derived classes'")
def add_object_bounds(self, bounds):
"""Ensures that axes get rescaled when adding object bounds
"""
old_object_bounds = self.get_object_bounds()
self._object_bounds.append(bounds)
new_object_bounds = self.get_object_bounds()
if new_object_bounds != old_object_bounds:
self.ax.axis(self.get_object_bounds())
def draw_2d(self, data_2d, data_bounds, bounding_box,
type='imshow', **kwargs):
# kwargs messaging
kwargs['origin'] = 'upper'
if self.direction == 'y':
(xmin, xmax), (_, _), (zmin, zmax) = data_bounds
(xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box
elif self.direction in 'xlr':
(_, _), (xmin, xmax), (zmin, zmax) = data_bounds
(_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box
elif self.direction == 'z':
(xmin, xmax), (zmin, zmax), (_, _) = data_bounds
(xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box
else:
raise ValueError('Invalid value for direction %s' %
self.direction)
ax = self.ax
# Here we need to do a copy to avoid having the image changing as
# we change the data
im = getattr(ax, type)(data_2d.copy(),
extent=(xmin, xmax, zmin, zmax),
**kwargs)
self.add_object_bounds((xmin_, xmax_, zmin_, zmax_))
self.shape = data_2d.T.shape
# The bounds of the object do not take into account a possible
# inversion of the axis. As such, we check that the axis is properly
# inverted when direction is left
if self.direction == 'l' and not (ax.get_xlim()[0] > ax.get_xlim()[1]):
ax.invert_xaxis()
return im
def get_object_bounds(self):
""" Return the bounds of the objects on this axes.
"""
if len(self._object_bounds) == 0:
# Nothing plotted yet
return -.01, .01, -.01, .01
xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T
xmax = max(xmaxs.max(), xmins.max())
xmin = min(xmins.min(), xmaxs.min())
ymax = max(ymaxs.max(), ymins.max())
ymin = min(ymins.min(), ymaxs.min())
return xmin, xmax, ymin, ymax
def draw_left_right(self, size, bg_color, **kwargs):
if self.direction in 'xlr':
return
ax = self.ax
ax.text(.1, .95, 'L',
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
ax.text(.9, .95, 'R',
transform=ax.transAxes,
horizontalalignment='right',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color),
**kwargs)
def draw_scale_bar(self, bg_color, size=5.0, units='cm',
fontproperties=None, frameon=False, loc=4, pad=.1,
borderpad=.5, sep=5, size_vertical=0, label_top=False,
color='black', fontsize=None, **kwargs):
""" Adds a scale bar annotation to the display
Parameters
----------
bgcolor : matplotlib color: str or (r, g, b) value
The background color of the scale bar annotation.
size : float, optional
Horizontal length of the scale bar, given in `units`.
Default=5.0.
units : str, optional
Physical units of the scale bar (`'cm'` or `'mm'`).
Default='cm'.
fontproperties : ``matplotlib.font_manager.FontProperties`` or dict, optional
Font properties for the label text.
frameon : Boolean, optional
Whether the scale bar is plotted with a border. Default=False.
loc : int, optional
Location of this scale bar. Valid location codes are documented
`here <https://matplotlib.org/mpl_toolkits/axes_grid/\
api/anchored_artists_api.html#mpl_toolkits.axes_grid1.\
anchored_artists.AnchoredSizeBar>`__.
Default=4.
pad : int of float, optional
Padding around the label and scale bar, in fraction of the font
size. Default=0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size. Default=0.5.
sep : int or float, optional
Separation between the label and the scale bar, in points.
Default=5.
size_vertical : int or float, optional
Vertical length of the size bar, given in `units`. Default=0.
label_top : bool, optional
If True, the label will be over the scale bar. Default=False.
color : str, optional
Color for the scale bar and label. Default='black'.
fontsize : int, optional
Label font size (overwrites the size passed in through the
``fontproperties`` argument).
**kwargs :
Keyworded arguments to pass to
``matplotlib.offsetbox.AnchoredOffsetbox``.
"""
axis = self.ax
fontproperties = fontproperties or FontProperties()
if fontsize:
fontproperties.set_size(fontsize)
width_mm = size
if units == 'cm':
width_mm *= 10
anchor_size_bar = AnchoredSizeBar(
axis.transData,
width_mm,
'%g%s' % (size, units),
fontproperties=fontproperties,
frameon=frameon,
loc=loc,
pad=pad,
borderpad=borderpad,
sep=sep,
size_vertical=size_vertical,
label_top=label_top,
color=color,
**kwargs)
if frameon:
anchor_size_bar.patch.set_facecolor(bg_color)
anchor_size_bar.patch.set_edgecolor('none')
axis.add_artist(anchor_size_bar)
def draw_position(self, size, bg_color, **kwargs):
raise NotImplementedError("'draw_position' should be implemented "
"in derived classes")
###############################################################################
# class CutAxes
###############################################################################
class CutAxes(BaseAxes):
""" An MPL axis-like object that displays a cut of 3D volumes
"""
def transform_to_2d(self, data, affine):
""" Cut the 3D volume into a 2D slice
Parameters
----------
data : 3D ndarray
The 3D volume to cut.
affine : 4x4 ndarray
The affine of the volume.
"""
coords = [0, 0, 0]
coords['xyz'.index(self.direction)] = self.coord
x_map, y_map, z_map = [int(np.round(c)) for c in
coord_transform(coords[0],
coords[1],
coords[2],
np.linalg.inv(affine))]
if self.direction == 'y':
cut = np.rot90(data[:, y_map, :])
elif self.direction == 'x':
cut = np.rot90(data[x_map, :, :])
elif self.direction == 'z':
cut = np.rot90(data[:, :, z_map])
else:
raise ValueError('Invalid value for direction %s' %
self.direction)
return cut
def draw_position(self, size, bg_color, decimals=False, **kwargs):
if decimals:
text = '%s=%.{}f'.format(decimals)
coord = float(self.coord)
else:
text = '%s=%i'
coord = self.coord
ax = self.ax
ax.text(0, 0, text % (self.direction, coord),
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='bottom',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
def _get_index_from_direction(direction):
"""Returns numerical index from direction
"""
directions = ['x', 'y', 'z']
try:
# l and r are subcases of x
if direction in 'lr':
index = 0
else:
index = directions.index(direction)
except ValueError:
message = (
'{0} is not a valid direction. '
"Allowed values are 'l', 'r', 'x', 'y' and 'z'").format(direction)
raise ValueError(message)
return index
def _coords_3d_to_2d(coords_3d, direction, return_direction=False):
"""Project 3d coordinates into 2d ones given the direction of a cut
"""
index = _get_index_from_direction(direction)
dimensions = [0, 1, 2]
dimensions.pop(index)
if return_direction:
return coords_3d[:, dimensions], coords_3d[:, index]
return coords_3d[:, dimensions]
###############################################################################
# class GlassBrainAxes
###############################################################################
class GlassBrainAxes(BaseAxes):
"""An MPL axis-like object that displays a 2D projection of 3D
volumes with a schematic view of the brain.
"""
def __init__(self, ax, direction, coord, plot_abs=True, **kwargs):
super(GlassBrainAxes, self).__init__(ax, direction, coord)
self._plot_abs = plot_abs
if ax is not None:
object_bounds = glass_brain.plot_brain_schematics(ax,
direction,
**kwargs)
self.add_object_bounds(object_bounds)
def transform_to_2d(self, data, affine):
""" Returns the maximum of the absolute value of the 3D volume
along an axis.
Parameters
----------
data : 3D ndarray
The 3D volume.
affine : 4x4 ndarray
The affine of the volume.
"""
if self.direction in 'xlr':
max_axis = 0
else:
max_axis = '.yz'.index(self.direction)
# set unselected brain hemisphere activations to 0
if self.direction == 'l':
x_center, _, _, _ = np.dot(np.linalg.inv(affine),
np.array([0, 0, 0, 1]))
data_selection = data[:int(x_center), :, :]
elif self.direction == 'r':
x_center, _, _, _ = np.dot(np.linalg.inv(affine),
np.array([0, 0, 0, 1]))
data_selection = data[int(x_center):, :, :]
else:
data_selection = data
# We need to make sure data_selection is not empty in the x axis
# This should be the case since we expect images in MNI space
if data_selection.shape[0] == 0:
data_selection = data
if not self._plot_abs:
# get the shape of the array we are projecting to
new_shape = list(data.shape)
del new_shape[max_axis]
# generate a 3D indexing array that points to max abs value in the
# current projection
a1, a2 = np.indices(new_shape)
inds = [a1, a2]
inds.insert(max_axis, np.abs(data_selection).argmax(axis=max_axis))
# take the values where the absolute value of the projection
# is the highest
maximum_intensity_data = data_selection[tuple(inds)]
else:
maximum_intensity_data = np.abs(data_selection).max(axis=max_axis)
# This work around can be removed bumping matplotlib > 2.1.0. See #1815
# in nilearn for the invention of this work around
if self.direction == 'l' and data_selection.min() is np.ma.masked and \
not (self.ax.get_xlim()[0] > self.ax.get_xlim()[1]):
self.ax.invert_xaxis()
return np.rot90(maximum_intensity_data)
def draw_position(self, size, bg_color, **kwargs):
# It does not make sense to draw crosses for the position of
# the cuts since we are taking the max along one axis
pass
def _add_markers(self, marker_coords, marker_color, marker_size, **kwargs):
"""Plot markers
In the case of 'l' and 'r' directions (for hemispheric projections),
markers in the coordinate x == 0 are included in both hemispheres.
"""
marker_coords_2d = _coords_3d_to_2d(marker_coords, self.direction)
xdata, ydata = marker_coords_2d.T
# Allow markers only in their respective hemisphere when appropriate
if self.direction in 'lr':
if not isinstance(marker_color, str) and \
not isinstance(marker_color, np.ndarray):
marker_color = np.asarray(marker_color)
relevant_coords = []
xcoords, ycoords, zcoords = marker_coords.T
for cidx, xc in enumerate(xcoords):
if self.direction == 'r' and xc >= 0:
relevant_coords.append(cidx)
elif self.direction == 'l' and xc <= 0:
relevant_coords.append(cidx)
xdata = xdata[relevant_coords]
ydata = ydata[relevant_coords]
# if marker_color is string for example 'red' or 'blue', then
# we pass marker_color as it is to matplotlib scatter without
# making any selection in 'l' or 'r' color.
# More likely that user wants to display all nodes to be in
# same color.
if not isinstance(marker_color, str) and \
len(marker_color) != 1:
marker_color = marker_color[relevant_coords]
if not isinstance(marker_size, numbers.Number):
marker_size = np.asarray(marker_size)[relevant_coords]
defaults = {'marker': 'o',
'zorder': 1000}
for k, v in defaults.items():
kwargs.setdefault(k, v)
self.ax.scatter(xdata, ydata, s=marker_size,
c=marker_color, **kwargs)
def _add_lines(self, line_coords, line_values, cmap,
vmin=None, vmax=None, directed=False, **kwargs):
"""Plot lines
Parameters
----------
line_coords : list of numpy arrays of shape (2, 3)
3d coordinates of lines start points and end points.
line_values : array_like
Values of the lines.
cmap : colormap
Colormap used to map line_values to a color.
vmin, vmax : float, optional
If not None, either or both of these values will be used to
as the minimum and maximum values to color lines. If None are
supplied the maximum absolute value within the given threshold
will be used as minimum (multiplied by -1) and maximum
coloring levels.
directed : boolean, optional
Add arrows instead of lines if set to True. Use this when plotting
directed graphs for example. Default=False.
kwargs : dict
Additional arguments to pass to matplotlib Line2D.
"""
# colormap for colorbar
self.cmap = cmap
if vmin is None and vmax is None:
abs_line_values_max = np.abs(line_values).max()
vmin = -abs_line_values_max
vmax = abs_line_values_max
elif vmin is None:
if vmax > 0:
vmin = -vmax
else:
raise ValueError(
"If vmax is set to a non-positive number "
"then vmin needs to be specified"
)
elif vmax is None:
if vmin < 0:
vmax = -vmin
else:
raise ValueError(
"If vmin is set to a non-negative number "
"then vmax needs to be specified"
)
norm = colors.Normalize(vmin=vmin,
vmax=vmax)
# normalization useful for colorbar
self.norm = norm
abs_norm = colors.Normalize(vmin=0,
vmax=vmax)
value_to_color = plt.cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba
# Allow lines only in their respective hemisphere when appropriate
if self.direction in 'lr':
relevant_lines = []
for lidx, line in enumerate(line_coords):
if self.direction == 'r':
if line[0, 0] >= 0 and line[1, 0] >= 0:
relevant_lines.append(lidx)
elif self.direction == 'l':
if line[0, 0] < 0 and line[1, 0] < 0:
relevant_lines.append(lidx)
line_coords = np.array(line_coords)[relevant_lines]
line_values = line_values[relevant_lines]
for start_end_point_3d, line_value in zip(
line_coords, line_values):
start_end_point_2d = _coords_3d_to_2d(start_end_point_3d,
self.direction)
color = value_to_color(line_value)
abs_line_value = abs(line_value)
linewidth = 1 + 2 * abs_norm(abs_line_value)
# Hacky way to put the strongest connections on top of the weakest
# note sign does not matter hence using 'abs'
zorder = 10 + 10 * abs_norm(abs_line_value)
this_kwargs = {'color': color, 'linewidth': linewidth,
'zorder': zorder}
# kwargs should have priority over this_kwargs so that the
# user can override the default logic
this_kwargs.update(kwargs)
xdata, ydata = start_end_point_2d.T
# If directed is True, add an arrow
if directed:
dx = xdata[1] - xdata[0]
dy = ydata[1] - ydata[0]
# Hack to avoid empty arrows to crash with
# matplotlib versions older than 3.1
# This can be removed once support for
# matplotlib pre 3.1 has been dropped.
if dx == 0 and dy == 0:
arrow = FancyArrow(xdata[0], ydata[0],
dx, dy)
else:
arrow = FancyArrow(xdata[0], ydata[0],
dx, dy,
length_includes_head=True,
width=linewidth,
head_width=3*linewidth,
**this_kwargs)
self.ax.add_patch(arrow)
# Otherwise a line
else:
line = lines.Line2D(xdata, ydata, **this_kwargs)
self.ax.add_line(line)
###############################################################################
# class BaseSlicer
###############################################################################
class BaseSlicer(object):
""" The main purpose of these class is to have auto adjust of axes size
to the data with different layout of cuts.
"""
# This actually encodes the figsize for only one axe
_default_figsize = [2.2, 2.6]
_axes_class = CutAxes
def __init__(self, cut_coords, axes=None, black_bg=False,
brain_color=(0.5, 0.5, 0.5), **kwargs):
""" Create 3 linked axes for plotting orthogonal cuts.
Parameters
----------
cut_coords : 3 tuple of ints
The cut position, in world space.
axes : matplotlib axes object, optional
The axes that will be subdivided in 3.
black_bg : boolean, optional
If True, the background of the figure will be put to
black. If you wish to save figures with a black background,
you will need to pass "facecolor='k', edgecolor='k'"
to matplotlib.pyplot.savefig. Default=False.
brain_color : tuple, optional
The brain color to use as the background color (e.g., for
transparent colorbars).
Default=(0.5, 0.5, 0.5)
"""
self.cut_coords = cut_coords
if axes is None:
axes = plt.axes((0., 0., 1., 1.))
axes.axis('off')
self.frame_axes = axes
axes.set_zorder(1)
bb = axes.get_position()
self.rect = (bb.x0, bb.y0, bb.x1, bb.y1)
self._black_bg = black_bg
self._brain_color = brain_color
self._colorbar = False
self._colorbar_width = 0.05 * bb.width
self._colorbar_margin = dict(left=0.25 * bb.width,
right=0.02 * bb.width,
top=0.05 * bb.height,
bottom=0.05 * bb.height)
self._init_axes(**kwargs)
@staticmethod
def find_cut_coords(img=None, threshold=None, cut_coords=None):
# Implement this as a staticmethod or a classmethod when
# subclassing
raise NotImplementedError
@classmethod
def init_with_figure(cls, img, threshold=None,
cut_coords=None, figure=None, axes=None,
black_bg=False, leave_space=False, colorbar=False,
brain_color=(0.5, 0.5, 0.5), **kwargs):
"Initialize the slicer with an image"
# deal with "fake" 4D images
if img is not None and img is not False:
img = _utils.check_niimg_3d(img)
cut_coords = cls.find_cut_coords(img, threshold, cut_coords)
if isinstance(axes, plt.Axes) and figure is None:
figure = axes.figure
if not isinstance(figure, plt.Figure):
# Make sure that we have a figure
figsize = cls._default_figsize[:]
# Adjust for the number of axes
figsize[0] *= len(cut_coords)
# Make space for the colorbar
if colorbar:
figsize[0] += .7
facecolor = 'k' if black_bg else 'w'
if leave_space:
figsize[0] += 3.4
figure = plt.figure(figure, figsize=figsize,
facecolor=facecolor)
if isinstance(axes, plt.Axes):
assert axes.figure is figure, ("The axes passed are not "
"in the figure")
if axes is None:
axes = [0., 0., 1., 1.]
if leave_space:
axes = [0.3, 0, .7, 1.]
if isinstance(axes, collections.abc.Sequence):
axes = figure.add_axes(axes)
# People forget to turn their axis off, or to set the zorder, and
# then they cannot see their slicer
axes.axis('off')
return cls(cut_coords, axes, black_bg, brain_color, **kwargs)
def title(self, text, x=0.01, y=0.99, size=15, color=None, bgcolor=None,
alpha=1, **kwargs):
""" Write a title to the view.
Parameters
----------
text : string
The text of the title.
x : float, optional
The horizontal position of the title on the frame in
fraction of the frame width. Default=0.01.
y : float, optional
The vertical position of the title on the frame in
fraction of the frame height. Default=0.99.
size : integer, optional
The size of the title text. Default=15.
color : matplotlib color specifier, optional
The color of the font of the title.
bgcolor : matplotlib color specifier, optional
The color of the background of the title.
alpha : float, optional
The alpha value for the background. Default=1.
kwargs :
Extra keyword arguments are passed to matplotlib's text
function.
"""
if color is None:
color = 'k' if self._black_bg else 'w'
if bgcolor is None:
bgcolor = 'w' if self._black_bg else 'k'
if hasattr(self, '_cut_displayed'):
# Adapt to the case of mosaic plotting
if isinstance(self.cut_coords, dict):
first_axe = self._cut_displayed[-1]
first_axe = (first_axe, self.cut_coords[first_axe][0])
else:
first_axe = self._cut_displayed[0]
else:
first_axe = self.cut_coords[0]
ax = self.axes[first_axe].ax
ax.text(x, y, text,
transform=self.frame_axes.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size, color=color,
bbox=dict(boxstyle="square,pad=.3",
ec=bgcolor, fc=bgcolor, alpha=alpha),
zorder=1000,
**kwargs)
ax.set_zorder(1000)
def add_overlay(self, img, threshold=1e-6, colorbar=False, **kwargs):
""" Plot a 3D map in all the views.
Parameters
-----------
img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
If it is a masked array, only the non-masked part will be plotted.
threshold : Int or Float or None, optional
If None is given, the maps are not thresholded.
If a number is given, it is used to threshold the maps:
values below the threshold (in absolute value) are
plotted as transparent. Default=1e-6.
colorbar : boolean, optional
If True, display a colorbar on the right of the plots.
Default=False.
kwargs :
Extra keyword arguments are passed to imshow.
"""
if colorbar and self._colorbar:
raise ValueError("This figure already has an overlay with a "
"colorbar.")
else:
self._colorbar = colorbar
img = _utils.check_niimg_3d(img)
# Make sure that add_overlay shows consistent default behavior
# with plot_stat_map
kwargs.setdefault('interpolation', 'nearest')
ims = self._map_show(img, type='imshow', threshold=threshold, **kwargs)
# `ims` can be empty in some corner cases, look at test_img_plotting.test_outlier_cut_coords.
if colorbar and ims:
self._show_colorbar(ims[0].cmap, ims[0].norm, threshold)
plt.draw_if_interactive()
def add_contours(self, img, threshold=1e-6, filled=False, **kwargs):
""" Contour a 3D map in all the views.
Parameters
-----------
img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
Provides image to plot.
threshold : Int or Float or None, optional
If None is given, the maps are not thresholded.
If a number is given, it is used to threshold the maps,
values below the threshold (in absolute value) are plotted
as transparent. Default=1e-6.
filled : boolean, optional
If filled=True, contours are displayed with color fillings.
Default=False.
kwargs :
Extra keyword arguments are passed to contour, see the
documentation of pylab.contour and see pylab.contourf documentation
for arguments related to contours with fillings.
Useful, arguments are typical "levels", which is a
list of values to use for plotting a contour or contour
fillings (if filled=True), and
"colors", which is one color or a list of colors for
these contours.
Notes
-----
If colors are not specified, default coloring choices
(from matplotlib) for contours and contour_fillings can be
different.
"""
if not filled:
threshold = None
self._map_show(img, type='contour', threshold=threshold, **kwargs)
if filled:
if 'levels' in kwargs:
levels = kwargs['levels']
if len(levels) <= 1:
# contour fillings levels should be given as (lower, upper).
levels.append(np.inf)
self._map_show(img, type='contourf', threshold=threshold, **kwargs)
plt.draw_if_interactive()
def _map_show(self, img, type='imshow',
resampling_interpolation='continuous',
threshold=None, **kwargs):
# In the special case where the affine of img is not diagonal,
# the function `reorder_img` will trigger a resampling
# of the provided image with a continuous interpolation
# since this is the default value here. In the special
# case where this image is binary, such as when this function
# is called from `add_contours`, continuous interpolation
# does not make sense and we turn to nearest interpolation instead.
if _utils.niimg._is_binary_niimg(img):
img = reorder_img(img, resample='nearest')
else:
img = reorder_img(img, resample=resampling_interpolation)
threshold = float(threshold) if threshold is not None else None
if threshold is not None:
data = _utils.niimg._safe_get_data(img, ensure_finite=True)
if threshold == 0:
data = np.ma.masked_equal(data, 0, copy=False)
else:
data = np.ma.masked_inside(data, -threshold, threshold,
copy=False)
img = new_img_like(img, data, img.affine)
affine = img.affine
data = _utils.niimg._safe_get_data(img, ensure_finite=True)
data_bounds = get_bounds(data.shape, affine)
(xmin, xmax), (ymin, ymax), (zmin, zmax) = data_bounds
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
xmin, xmax, ymin, ymax, zmin, zmax
# Compute tight bounds
if type in ('contour', 'contourf'):
# Define a pseudo threshold to have a tight bounding box
if 'levels' in kwargs:
thr = 0.9 * np.min(np.abs(kwargs['levels']))
else:
thr = 1e-6
not_mask = np.logical_or(data > thr, data < -thr)
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
get_mask_bounds(new_img_like(img, not_mask, affine))
elif hasattr(data, 'mask') and isinstance(data.mask, np.ndarray):
not_mask = np.logical_not(data.mask)
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
get_mask_bounds(new_img_like(img, not_mask, affine))
data_2d_list = []
for display_ax in self.axes.values():
try:
data_2d = display_ax.transform_to_2d(data, affine)
except IndexError:
# We are cutting outside the indices of the data
data_2d = None
data_2d_list.append(data_2d)
if kwargs.get('vmin') is None:
kwargs['vmin'] = np.ma.min([d.min() for d in data_2d_list
if d is not None])
if kwargs.get('vmax') is None:
kwargs['vmax'] = np.ma.max([d.max() for d in data_2d_list
if d is not None])
bounding_box = (xmin_, xmax_), (ymin_, ymax_), (zmin_, zmax_)
ims = []
to_iterate_over = zip(self.axes.values(), data_2d_list)
for display_ax, data_2d in to_iterate_over:
if data_2d is not None and data_2d.min() is not np.ma.masked:
# If data_2d is completely masked, then there is nothing to
# plot. Hence, no point to do imshow(). Moreover, we see
# problem came up with matplotlib 2.1.0 (issue #9280) when
# data is completely masked or with numpy < 1.14
# (issue #4595). This work around can be removed when bumping
# matplotlib version above 2.1.0
im = display_ax.draw_2d(data_2d, data_bounds, bounding_box,
type=type, **kwargs)
ims.append(im)
return ims
def _show_colorbar(self, cmap, norm, threshold=None):
"""Displays the colorbar.
Parameters
----------
cmap : a matplotlib colormap
The colormap used.
norm : a matplotlib.colors.Normalize object
This object is typically found as the 'norm' attribute of an
matplotlib.image.AxesImage.
threshold : float or None, optional
The absolute value at which the colorbar is thresholded.
"""
if threshold is None:
offset = 0
else:
offset = threshold
if offset > norm.vmax:
offset = norm.vmax
# create new axis for the colorbar
figure = self.frame_axes.figure
_, y0, x1, y1 = self.rect
height = y1 - y0
x_adjusted_width = self._colorbar_width / len(self.axes)
x_adjusted_margin = self._colorbar_margin['right'] / len(self.axes)
lt_wid_top_ht = [x1 - (x_adjusted_width + x_adjusted_margin),
y0 + self._colorbar_margin['top'],
x_adjusted_width,
height - (self._colorbar_margin['top'] +
self._colorbar_margin['bottom'])]
self._colorbar_ax = figure.add_axes(lt_wid_top_ht)
if LooseVersion(matplotlib.__version__) >= LooseVersion("1.6"):
self._colorbar_ax.set_facecolor('w')
else:
self._colorbar_ax.set_axis_bgcolor('w')
our_cmap = mpl_cm.get_cmap(cmap)
# edge case where the data has a single value
# yields a cryptic matplotlib error message
# when trying to plot the color bar
nb_ticks = 5 if norm.vmin != norm.vmax else 1
ticks = np.linspace(norm.vmin, norm.vmax, nb_ticks)
bounds = np.linspace(norm.vmin, norm.vmax, our_cmap.N)
# some colormap hacking
cmaplist = [our_cmap(i) for i in range(our_cmap.N)]
transparent_start = int(norm(-offset, clip=True) * (our_cmap.N - 1))
transparent_stop = int(norm(offset, clip=True) * (our_cmap.N - 1))
for i in range(transparent_start, transparent_stop):
cmaplist[i] = self._brain_color + (0.,) # transparent
if norm.vmin == norm.vmax: # len(np.unique(data)) == 1 ?
return
else:
our_cmap = colors.LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, our_cmap.N)
self._cbar = ColorbarBase(
self._colorbar_ax, ticks=ticks, norm=norm,
orientation='vertical', cmap=our_cmap, boundaries=bounds,
spacing='proportional', format='%.2g')
self._cbar.ax.set_facecolor(self._brain_color)
self._colorbar_ax.yaxis.tick_left()
tick_color = 'w' if self._black_bg else 'k'
outline_color = 'w' if self._black_bg else 'k'
for tick in self._colorbar_ax.yaxis.get_ticklabels():
tick.set_color(tick_color)
self._colorbar_ax.yaxis.set_tick_params(width=0)
self._cbar.outline.set_edgecolor(outline_color)
def add_edges(self, img, color='r'):
""" Plot the edges of a 3D map in all the views.
Parameters
----------
img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
The 3D map to be plotted.
If it is a masked array, only the non-masked part will be plotted.
color : matplotlib color: string or (r, g, b) value
The color used to display the edge map.
Default='r'.
"""
img = reorder_img(img, resample='continuous')
data = get_data(img)
affine = img.affine
single_color_cmap = colors.ListedColormap([color])
data_bounds = get_bounds(data.shape, img.affine)
# For each ax, cut the data and plot it
for display_ax in self.axes.values():
try:
data_2d = display_ax.transform_to_2d(data, affine)
edge_mask = _edge_map(data_2d)
except IndexError:
# We are cutting outside the indices of the data
continue
display_ax.draw_2d(edge_mask, data_bounds, data_bounds,
type='imshow', cmap=single_color_cmap)
plt.draw_if_interactive()
def add_markers(self, marker_coords, marker_color='r', marker_size=30,
**kwargs):
"""Add markers to the plot.
Parameters
----------
marker_coords : array of size (n_markers, 3)
Coordinates of the markers to plot. For each slice, only markers
that are 2 millimeters away from the slice are plotted.
marker_color : pyplot compatible color or list of shape (n_markers,), optional
List of colors for each marker that can be string or matplotlib colors.
Default='r'.
marker_size : single float or list of shape (n_markers,), optional
Size in pixel for each marker. Default=30.
"""
defaults = {'marker': 'o',
'zorder': 1000}
marker_coords = np.asanyarray(marker_coords)
for k, v in defaults.items():
kwargs.setdefault(k, v)
for display_ax in self.axes.values():
direction = display_ax.direction
coord = display_ax.coord
marker_coords_2d, third_d = _coords_3d_to_2d(
marker_coords, direction, return_direction=True)
xdata, ydata = marker_coords_2d.T
# Allow markers only in their respective hemisphere when appropriate
marker_color_ = marker_color
if direction in ('lr'):
if (not isinstance(marker_color, str) and
not isinstance(marker_color, np.ndarray)):
marker_color_ = np.asarray(marker_color)
xcoords, ycoords, zcoords = marker_coords.T
if direction == 'r':
relevant_coords = (xcoords >= 0)
elif direction == 'l':
relevant_coords = (xcoords <= 0)
xdata = xdata[relevant_coords]
ydata = ydata[relevant_coords]
if (not isinstance(marker_color, str) and
len(marker_color) != 1):
marker_color_ = marker_color_[relevant_coords]
# Check if coord has integer represents a cut in direction
# to follow the heuristic. If no foreground image is given
# coordinate is empty or None. This case is valid for plotting
# markers on glass brain without any foreground image.
if isinstance(coord, numbers.Number):
# Heuristic that plots only markers that are 2mm away
# from the current slice.
# XXX: should we keep this heuristic?
mask = np.abs(third_d - coord) <= 2.
xdata = xdata[mask]
ydata = ydata[mask]
display_ax.ax.scatter(xdata, ydata, s=marker_size,
c=marker_color_, **kwargs)
def annotate(self, left_right=True, positions=True, scalebar=False,
size=12, scale_size=5.0, scale_units='cm', scale_loc=4,
decimals=0, **kwargs):
"""Add annotations to the plot.
Parameters
----------
left_right : boolean, optional
If left_right is True, annotations indicating which side
is left and which side is right are drawn. Default=True.
positions : boolean, optional
If positions is True, annotations indicating the
positions of the cuts are drawn. Default=True.
scalebar : boolean, optional
If ``True``, cuts are annotated with a reference scale bar.
For finer control of the scale bar, please check out
the draw_scale_bar method on the axes in "axes" attribute of
this object. Default=False.
size : integer, optional
The size of the text used. Default=12.
scale_size : number, optional
The length of the scalebar, in units of scale_units.
Default=5.0.
scale_units : {'cm', 'mm'}, optional
The units for the scalebar. Default='cm'.
scale_loc : integer, optional
The positioning for the scalebar. Default=4.
Valid location codes are:
- 'upper right' : 1
- 'upper left' : 2
- 'lower left' : 3
- 'lower right' : 4
- 'right' : 5
- 'center left' : 6
- 'center right' : 7
- 'lower center' : 8
- 'upper center' : 9
- 'center' : 10
decimals : integer, optional
Number of decimal places on slice position annotation. If zero,
the slice position is integer without decimal point.
Default=0.
kwargs :
Extra keyword arguments are passed to matplotlib's text
function.
"""
kwargs = kwargs.copy()
if 'color' not in kwargs:
if self._black_bg:
kwargs['color'] = 'w'
else:
kwargs['color'] = 'k'
bg_color = ('k' if self._black_bg else 'w')
if left_right:
for display_axis in self.axes.values():
display_axis.draw_left_right(size=size, bg_color=bg_color,
**kwargs)
if positions:
for display_axis in self.axes.values():
display_axis.draw_position(size=size, bg_color=bg_color,
decimals=decimals,
**kwargs)
if scalebar:
axes = self.axes.values()
for display_axis in axes:
display_axis.draw_scale_bar(bg_color=bg_color,
fontsize=size,
size=scale_size,
units=scale_units,
loc=scale_loc,
**kwargs)
def close(self):
""" Close the figure. This is necessary to avoid leaking memory.
"""
plt.close(self.frame_axes.figure.number)
def savefig(self, filename, dpi=None):
""" Save the figure to a file
Parameters
----------
filename : string
The file name to save to. Its extension determines the
file type, typically '.png', '.svg' or '.pdf'.
dpi : None or scalar, optional
The resolution in dots per inch.
"""
facecolor = edgecolor = 'k' if self._black_bg else 'w'
self.frame_axes.figure.savefig(filename, dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor)
###############################################################################
# class OrthoSlicer
###############################################################################
class OrthoSlicer(BaseSlicer):
""" A class to create 3 linked axes for plotting orthogonal
cuts of 3D maps.
Attributes
----------
axes : dictionary of axes
The 3 axes used to plot each view.
frame_axes : axes
The axes framing the whole set of views.
Notes
-----
The extent of the different axes are adjusted to fit the data
best in the viewing area.
"""
_cut_displayed = 'yxz'
_axes_class = CutAxes
@classmethod
def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):
"Instantiate the slicer and find cut coordinates"
if cut_coords is None:
if img is None or img is False:
cut_coords = (0, 0, 0)
else:
cut_coords = find_xyz_cut_coords(
img, activation_threshold=threshold)
cut_coords = [cut_coords['xyz'.find(c)]
for c in sorted(cls._cut_displayed)]
return cut_coords
def _init_axes(self, **kwargs):
cut_coords = self.cut_coords
if len(cut_coords) != len(self._cut_displayed):
raise ValueError('The number cut_coords passed does not'
' match the display_mode')
x0, y0, x1, y1 = self.rect
facecolor = 'k' if self._black_bg else 'w'
# Create our axes:
self.axes = dict()
for index, direction in enumerate(self._cut_displayed):
fh = self.frame_axes.get_figure()
ax = fh.add_axes([0.3 * index * (x1 - x0) + x0, y0,
.3 * (x1 - x0), y1 - y0], aspect='equal')
if LooseVersion(matplotlib.__version__) >= LooseVersion("1.6"):
ax.set_facecolor(facecolor)
else:
ax.set_axis_bgcolor(facecolor)
ax.axis('off')
coord = self.cut_coords[
sorted(self._cut_displayed).index(direction)]
display_ax = self._axes_class(ax, direction, coord, **kwargs)
self.axes[direction] = display_ax
ax.set_axes_locator(self._locator)
if self._black_bg:
for ax in self.axes.values():
ax.ax.imshow(np.zeros((2, 2, 3)),
extent=[-5000, 5000, -5000, 5000],
zorder=-500, aspect='equal')
# To have a black background in PDF, we need to create a
# patch in black for the background
self.frame_axes.imshow(np.zeros((2, 2, 3)),
extent=[-5000, 5000, -5000, 5000],
zorder=-500, aspect='auto')
self.frame_axes.set_zorder(-1000)
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
x0, y0, x1, y1 = self.rect
width_dict = dict()
# A dummy axes, for the situation in which we are not plotting
# all three (x, y, z) cuts
dummy_ax = self._axes_class(None, None, None)
width_dict[dummy_ax.ax] = 0
display_ax_dict = self.axes
if self._colorbar:
adjusted_width = self._colorbar_width / len(self.axes)
right_margin = self._colorbar_margin['right'] / len(self.axes)
ticks_margin = self._colorbar_margin['left'] / len(self.axes)
x1 = x1 - (adjusted_width + ticks_margin + right_margin)
for display_ax in display_ax_dict.values():
bounds = display_ax.get_object_bounds()
if not bounds:
# This happens if the call to _map_show was not
# successful. As it happens asynchronously (during a
# refresh of the figure) we capture the problem and
# ignore it: it only adds a non informative traceback
bounds = [0, 1, 0, 1]
xmin, xmax, ymin, ymax = bounds
width_dict[display_ax.ax] = (xmax - xmin)
total_width = float(sum(width_dict.values()))
for ax, width in width_dict.items():
width_dict[ax] = width / total_width * (x1 - x0)
direction_ax = []
for d in self._cut_displayed:
direction_ax.append(display_ax_dict.get(d, dummy_ax).ax)
left_dict = dict()
for idx, ax in enumerate(direction_ax):
left_dict[ax] = x0
for prev_ax in direction_ax[:idx]:
left_dict[ax] += width_dict[prev_ax]
return transforms.Bbox([[left_dict[axes], y0],
[left_dict[axes] + width_dict[axes], y1]])
def draw_cross(self, cut_coords=None, **kwargs):
""" Draw a crossbar on the plot to show where the cut is
performed.
Parameters
----------
cut_coords : 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinates are used.
kwargs :
Extra keyword arguments are passed to axhline
"""
if cut_coords is None:
cut_coords = self.cut_coords
coords = dict()
for direction in 'xyz':
coord = None
if direction in self._cut_displayed:
coord = cut_coords[
sorted(self._cut_displayed).index(direction)]
coords[direction] = coord
x, y, z = coords['x'], coords['y'], coords['z']
kwargs = kwargs.copy()
if 'color' not in kwargs:
if self._black_bg:
kwargs['color'] = '.8'
else:
kwargs['color'] = 'k'
if 'y' in self.axes:
ax = self.axes['y'].ax
if x is not None:
ax.axvline(x, ymin=.05, ymax=.95, **kwargs)
if z is not None:
ax.axhline(z, **kwargs)
if 'x' in self.axes:
ax = self.axes['x'].ax
if y is not None:
ax.axvline(y, ymin=.05, ymax=.95, **kwargs)
if z is not None:
ax.axhline(z, xmax=.95, **kwargs)
if 'z' in self.axes:
ax = self.axes['z'].ax
if x is not None:
ax.axvline(x, ymin=.05, ymax=.95, **kwargs)
if y is not None:
ax.axhline(y, **kwargs)
###############################################################################
# class TiledSlicer
###############################################################################
class TiledSlicer(BaseSlicer):
""" A class to create 3 axes for plotting orthogonal
cuts of 3D maps, organized in a 2x2 grid.
Attributes
----------
axes : dictionary of axes
The 3 axes used to plot each view.
frame_axes : axes
The axes framing the whole set of views.
Notes
-----
The extent of the different axes are adjusted to fit the data
best in the viewing area.
"""
_cut_displayed = 'yxz'
_axes_class = CutAxes
_default_figsize = [2.0, 6.0]
@classmethod
def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):
"""Instantiate the slicer and find cut coordinates.
Parameters
----------
img : 3D Nifti1Image
The brain map.
threshold : float, optional
The lower threshold to the positive activation. If None, the
activation threshold is computed using the 80% percentile of
the absolute value of the map.
cut_coords : list of float, optional
xyz world coordinates of cuts.
Returns
-------
cut_coords : list of float
xyz world coordinates of cuts.
"""
if cut_coords is None:
if img is None or img is False:
cut_coords = (0, 0, 0)
else:
cut_coords = find_xyz_cut_coords(
img, activation_threshold=threshold)
cut_coords = [cut_coords['xyz'.find(c)]
for c in sorted(cls._cut_displayed)]
return cut_coords
def _find_initial_axes_coord(self, index):
"""Find coordinates for initial axes placement for xyz cuts.
Parameters
----------
index : int
Index corresponding to current cut 'x', 'y' or 'z'.
Returns
-------
[coord1, coord2, coord3, coord4] : list of int
x0, y0, x1, y1 coordinates used by matplotlib
to position axes in figure.
"""
rect_x0, rect_y0, rect_x1, rect_y1 = self.rect
if index == 0:
coord1 = rect_x1 - rect_x0
coord2 = 0.5 * (rect_y1 - rect_y0) + rect_y0
coord3 = 0.5 * (rect_x1 - rect_x0) + rect_x0
coord4 = rect_y1 - rect_y0
elif index == 1:
coord1 = 0.5 * (rect_x1 - rect_x0) + rect_x0
coord2 = 0.5 * (rect_y1 - rect_y0) + rect_y0
coord3 = rect_x1 - rect_x0
coord4 = rect_y1 - rect_y0
elif index == 2:
coord1 = rect_x1 - rect_x0
coord2 = rect_y1 - rect_y0
coord3 = 0.5 * (rect_x1 - rect_x0) + rect_x0
coord4 = 0.5 * (rect_y1 - rect_y0) + rect_y0
return [coord1, coord2, coord3, coord4]
def _init_axes(self, **kwargs):
"""Initializes and places axes for display of 'xyz' cuts.
Parameters
----------
kwargs :
additional arguments to pass to self._axes_class
"""
cut_coords = self.cut_coords
if len(cut_coords) != len(self._cut_displayed):
raise ValueError('The number cut_coords passed does not'
' match the display_mode')
facecolor = 'k' if self._black_bg else 'w'
self.axes = dict()
for index, direction in enumerate(self._cut_displayed):
fh = self.frame_axes.get_figure()
axes_coords = self._find_initial_axes_coord(index)
ax = fh.add_axes(axes_coords, aspect='equal')
if LooseVersion(matplotlib.__version__) >= LooseVersion("1.6"):
ax.set_facecolor(facecolor)
else:
ax.set_axis_bgcolor(facecolor)
ax.axis('off')
coord = self.cut_coords[
sorted(self._cut_displayed).index(direction)]
display_ax = self._axes_class(ax, direction, coord, **kwargs)
self.axes[direction] = display_ax
ax.set_axes_locator(self._locator)
def _adjust_width_height(self, width_dict, height_dict,
rect_x0, rect_y0, rect_x1, rect_y1):
"""Adjusts absolute image width and height to ratios.
Parameters
----------
width_dict : dict
Width of image cuts displayed in axes.
height_dict : dict
Height of image cuts displayed in axes.
rect_x0, rect_y0, rect_x1, rect_y1 : float
Matplotlib figure boundaries.
Returns
-------
width_dict : dict
Width ratios of image cuts for optimal positioning of axes.
height_dict : dict
Height ratios of image cuts for optimal positioning of axes.
"""
total_height = 0
total_width = 0
if 'y' in self.axes:
ax = self.axes['y'].ax
total_height = total_height + height_dict[ax]
total_width = total_width + width_dict[ax]
if 'x' in self.axes:
ax = self.axes['x'].ax
total_width = total_width + width_dict[ax]
if 'z' in self.axes:
ax = self.axes['z'].ax
total_height = total_height + height_dict[ax]
for ax, width in width_dict.items():
width_dict[ax] = width / total_width * (rect_x1 - rect_x0)
for ax, height in height_dict.items():
height_dict[ax] = height / total_height * (rect_y1 - rect_y0)
return (width_dict, height_dict)
def _find_axes_coord(self, rel_width_dict, rel_height_dict,
rect_x0, rect_y0, rect_x1, rect_y1):
""""Find coordinates for initial axes placement for xyz cuts.
Parameters
----------
rel_width_dict : dict
Width ratios of image cuts for optimal positioning of axes.
rel_height_dict : dict
Height ratios of image cuts for optimal positioning of axes.
rect_x0, rect_y0, rect_x1, rect_y1 : float
Matplotlib figure boundaries.
Returns
-------
coord1, coord2, coord3, coord4 : dict
x0, y0, x1, y1 coordinates per axes used by matplotlib
to position axes in figure.
"""
coord1 = dict()
coord2 = dict()
coord3 = dict()
coord4 = dict()
if 'y' in self.axes:
ax = self.axes['y'].ax
coord1[ax] = rect_x0
coord2[ax] = (rect_y1) - rel_height_dict[ax]
coord3[ax] = rect_x0 + rel_width_dict[ax]
coord4[ax] = rect_y1
if 'x' in self.axes:
ax = self.axes['x'].ax
coord1[ax] = (rect_x1) - rel_width_dict[ax]
coord2[ax] = (rect_y1) - rel_height_dict[ax]
coord3[ax] = rect_x1
coord4[ax] = rect_y1
if 'z' in self.axes:
ax = self.axes['z'].ax
coord1[ax] = rect_x0
coord2[ax] = rect_y0
coord3[ax] = rect_x0 + rel_width_dict[ax]
coord4[ax] = rect_y0 + rel_height_dict[ax]
return(coord1, coord2, coord3, coord4)
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
rect_x0, rect_y0, rect_x1, rect_y1 = self.rect
# image width and height
width_dict = dict()
height_dict = dict()
# A dummy axes, for the situation in which we are not plotting
# all three (x, y, z) cuts
dummy_ax = self._axes_class(None, None, None)
width_dict[dummy_ax.ax] = 0
height_dict[dummy_ax.ax] = 0
display_ax_dict = self.axes
if self._colorbar:
adjusted_width = self._colorbar_width / len(self.axes)
right_margin = self._colorbar_margin['right'] / len(self.axes)
ticks_margin = self._colorbar_margin['left'] / len(self.axes)
rect_x1 = rect_x1 - (adjusted_width + ticks_margin + right_margin)
for display_ax in display_ax_dict.values():
bounds = display_ax.get_object_bounds()
if not bounds:
# This happens if the call to _map_show was not
# successful. As it happens asynchronously (during a
# refresh of the figure) we capture the problem and
# ignore it: it only adds a non informative traceback
bounds = [0, 1, 0, 1]
xmin, xmax, ymin, ymax = bounds
width_dict[display_ax.ax] = (xmax - xmin)
height_dict[display_ax.ax] = (ymax - ymin)
# relative image height and width
rel_width_dict, rel_height_dict = self._adjust_width_height(
width_dict, height_dict,
rect_x0, rect_y0, rect_x1, rect_y1)
direction_ax = []
for d in self._cut_displayed:
direction_ax.append(display_ax_dict.get(d, dummy_ax).ax)
coord1, coord2, coord3, coord4 = self._find_axes_coord(
rel_width_dict, rel_height_dict,
rect_x0, rect_y0, rect_x1, rect_y1)
return transforms.Bbox([[coord1[axes], coord2[axes]],
[coord3[axes], coord4[axes]]])
def draw_cross(self, cut_coords=None, **kwargs):
"""Draw a crossbar on the plot to show where the cut is performed.
Parameters
----------
cut_coords : 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinates are used.
kwargs :
Extra keyword arguments are passed to axhline
"""
if cut_coords is None:
cut_coords = self.cut_coords
coords = dict()
for direction in 'xyz':
coord_ = None
if direction in self._cut_displayed:
sorted_cuts = sorted(self._cut_displayed)
index = sorted_cuts.index(direction)
coord_ = cut_coords[index]
coords[direction] = coord_
x, y, z = coords['x'], coords['y'], coords['z']
kwargs = kwargs.copy()
if 'color' not in kwargs:
try:
kwargs['color'] = '.8' if self._black_bg else 'k'
except KeyError:
pass
if 'y' in self.axes:
ax = self.axes['y'].ax
if x is not None:
ax.axvline(x, **kwargs)
if z is not None:
ax.axhline(z, **kwargs)
if 'x' in self.axes:
ax = self.axes['x'].ax
if y is not None:
ax.axvline(y, **kwargs)
if z is not None:
ax.axhline(z, **kwargs)
if 'z' in self.axes:
ax = self.axes['z'].ax
if x is not None:
ax.axvline(x, **kwargs)
if y is not None:
ax.axhline(y, **kwargs)
###############################################################################
# class BaseStackedSlicer
###############################################################################
class BaseStackedSlicer(BaseSlicer):
""" A class to create linked axes for plotting stacked
cuts of 2D maps.
Attributes
----------
axes : dictionary of axes
The axes used to plot each view.
frame_axes : axes
The axes framing the whole set of views.
Notes
-----
The extent of the different axes are adjusted to fit the data
best in the viewing area.
"""
@classmethod
def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):
"Instantiate the slicer and find cut coordinates"
if cut_coords is None:
cut_coords = 7
if img is None or img is False:
bounds = ((-40, 40), (-30, 30), (-30, 75))
lower, upper = bounds['xyz'.index(cls._direction)]
cut_coords = np.linspace(lower, upper, cut_coords).tolist()
else:
if (not isinstance(cut_coords, collections.abc.Sequence) and
isinstance(cut_coords, numbers.Number)):
cut_coords = find_cut_slices(img,
direction=cls._direction,
n_cuts=cut_coords)
return cut_coords
def _init_axes(self, **kwargs):
x0, y0, x1, y1 = self.rect
# Create our axes:
self.axes = dict()
fraction = 1. / len(self.cut_coords)
for index, coord in enumerate(self.cut_coords):
coord = float(coord)
fh = self.frame_axes.get_figure()
ax = fh.add_axes([fraction * index * (x1 - x0) + x0, y0,
fraction * (x1 - x0), y1 - y0])
ax.axis('off')
display_ax = self._axes_class(ax, self._direction,
coord, **kwargs)
self.axes[coord] = display_ax
ax.set_axes_locator(self._locator)
if self._black_bg:
for ax in self.axes.values():
ax.ax.imshow(np.zeros((2, 2, 3)),
extent=[-5000, 5000, -5000, 5000],
zorder=-500, aspect='equal')
# To have a black background in PDF, we need to create a
# patch in black for the background
self.frame_axes.imshow(np.zeros((2, 2, 3)),
extent=[-5000, 5000, -5000, 5000],
zorder=-500, aspect='auto')
self.frame_axes.set_zorder(-1000)
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
x0, y0, x1, y1 = self.rect
width_dict = dict()
display_ax_dict = self.axes
if self._colorbar:
adjusted_width = self._colorbar_width / len(self.axes)
right_margin = self._colorbar_margin['right'] / len(self.axes)
ticks_margin = self._colorbar_margin['left'] / len(self.axes)
x1 = x1 - (adjusted_width + right_margin + ticks_margin)
for display_ax in display_ax_dict.values():
bounds = display_ax.get_object_bounds()
if not bounds:
# This happens if the call to _map_show was not
# successful. As it happens asynchronously (during a
# refresh of the figure) we capture the problem and
# ignore it: it only adds a non informative traceback
bounds = [0, 1, 0, 1]
xmin, xmax, ymin, ymax = bounds
width_dict[display_ax.ax] = (xmax - xmin)
total_width = float(sum(width_dict.values()))
for ax, width in width_dict.items():
width_dict[ax] = width / total_width * (x1 - x0)
left_dict = dict()
left = float(x0)
for coord, display_ax in display_ax_dict.items():
left_dict[display_ax.ax] = left
this_width = width_dict[display_ax.ax]
left += this_width
return transforms.Bbox([[left_dict[axes], y0],
[left_dict[axes] + width_dict[axes], y1]])
def draw_cross(self, cut_coords=None, **kwargs):
""" Draw a crossbar on the plot to show where the cut is
performed.
Parameters
----------
cut_coords : 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinates are used.
kwargs :
Extra keyword arguments are passed to axhline
"""
return
class XSlicer(BaseStackedSlicer):
_direction = 'x'
_default_figsize = [2.6, 2.3]
class YSlicer(BaseStackedSlicer):
_direction = 'y'
_default_figsize = [2.2, 2.3]
class ZSlicer(BaseStackedSlicer):
_direction = 'z'
_default_figsize = [2.2, 2.3]
class XZSlicer(OrthoSlicer):
_cut_displayed = 'xz'
class YXSlicer(OrthoSlicer):
_cut_displayed = 'yx'
class YZSlicer(OrthoSlicer):
_cut_displayed = 'yz'
class MosaicSlicer(BaseSlicer):
""" A class to create 3 axes for plotting cuts of 3D maps,
in multiple rows and columns.
Attributes
----------
axes : dictionary of axes
The 3 axes used to plot multiple views.
frame_axes : axes
The axes framing the whole set of views.
"""
_cut_displayed = 'yxz'
_axes_class = CutAxes
_default_figsize = [11.1, 7.2]
@classmethod
def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):
"""Instantiate the slicer and find cut coordinates for mosaic plotting.
Parameters
----------
img : 3D Nifti1Image, optional
The brain image.
threshold : float, optional
The lower threshold to the positive activation. If None, the
activation threshold is computed using the 80% percentile of
the absolute value of the map.
cut_coords : list/tuple of 3 floats, integer, optional
xyz world coordinates of cuts. If cut_coords
are not provided, 7 coordinates of cuts are automatically
calculated.
Returns
-------
cut_coords : dict
xyz world coordinates of cuts in a direction. Each key
denotes the direction.
"""
if cut_coords is None:
cut_coords = 7
if (not isinstance(cut_coords, collections.abc.Sequence) and
isinstance(cut_coords, numbers.Number)):
cut_coords = [cut_coords] * 3
cut_coords = cls._find_cut_coords(img, cut_coords,
cls._cut_displayed)
else:
if len(cut_coords) != len(cls._cut_displayed):
raise ValueError('The number cut_coords passed does not'
' match the display_mode. Mosaic plotting '
'expects tuple of length 3.' )
cut_coords = [cut_coords['xyz'.find(c)]
for c in sorted(cls._cut_displayed)]
cut_coords = cls._find_cut_coords(img, cut_coords,
cls._cut_displayed)
return cut_coords
@staticmethod
def _find_cut_coords(img, cut_coords, cut_displayed):
""" Find slicing positions along a given axis.
Helper function to find_cut_coords.
Parameters
----------
img : 3D Nifti1Image
The brain image.
cut_coords : list/tuple of 3 floats, integer, optional
xyz world coordinates of cuts.
cut_displayed : str
Sectional directions 'yxz'
Returns
-------
cut_coords : 1D array of length specified in n_cuts
The computed cut_coords.
"""
coords = dict()
if img is None or img is False:
bounds = ((-40, 40), (-30, 30), (-30, 75))
for direction, n_cuts in zip(sorted(cut_displayed),
cut_coords):
lower, upper = bounds['xyz'.index(direction)]
coords[direction] = np.linspace(lower, upper,
n_cuts).tolist()
else:
for direction, n_cuts in zip(sorted(cut_displayed),
cut_coords):
coords[direction] = find_cut_slices(img, direction=direction,
n_cuts=n_cuts)
return coords
def _init_axes(self, **kwargs):
"""Initializes and places axes for display of 'xyz' multiple cuts.
Parameters
----------
kwargs:
additional arguments to pass to self._axes_class
"""
if not isinstance(self.cut_coords, dict):
self.cut_coords = self.find_cut_coords(cut_coords=self.cut_coords)
if len(self.cut_coords) != len(self._cut_displayed):
raise ValueError('The number cut_coords passed does not'
' match the mosaic mode')
x0, y0, x1, y1 = self.rect
# Create our axes:
self.axes = dict()
# portions for main axes
fraction = y1 / len(self.cut_coords)
height = fraction
for index, direction in enumerate(self._cut_displayed):
coords = self.cut_coords[direction]
# portions allotment for each of 'x', 'y', 'z' coordinate
fraction_c = 1. / len(coords)
fh = self.frame_axes.get_figure()
indices = [x0, fraction * index * (y1 - y0) + y0,
x1, fraction * (y1 - y0)]
ax = fh.add_axes(indices)
ax.axis('off')
this_x0, this_y0, this_x1, this_y1 = indices
for index_c, coord in enumerate(coords):
coord = float(coord)
fh_c = ax.get_figure()
# indices for each sub axes within main axes
indices = [fraction_c * index_c * (this_x1 - this_x0) + this_x0,
this_y0,
fraction_c * (this_x1 - this_x0),
height]
ax = fh_c.add_axes(indices)
ax.axis('off')
display_ax = self._axes_class(ax, direction,
coord, **kwargs)
self.axes[(direction, coord)] = display_ax
ax.set_axes_locator(self._locator)
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
x0, y0, x1, y1 = self.rect
display_ax_dict = self.axes
if self._colorbar:
adjusted_width = self._colorbar_width / len(self.axes)
right_margin = self._colorbar_margin['right'] / len(self.axes)
ticks_margin = self._colorbar_margin['left'] / len(self.axes)
x1 = x1 - (adjusted_width + right_margin + ticks_margin)
# capture widths for each axes for anchoring Bbox
width_dict = dict()
for direction in self._cut_displayed:
this_width = dict()
for display_ax in display_ax_dict.values():
if direction == display_ax.direction:
bounds = display_ax.get_object_bounds()
if not bounds:
# This happens if the call to _map_show was not
# successful. As it happens asynchronously (during a
# refresh of the figure) we capture the problem and
# ignore it: it only adds a non informative traceback
bounds = [0, 1, 0, 1]
xmin, xmax, ymin, ymax = bounds
this_width[display_ax.ax] = (xmax - xmin)
total_width = float(sum(this_width.values()))
for ax, w in this_width.items():
width_dict[ax] = w / total_width * (x1 - x0)
left_dict = dict()
# bottom positions in Bbox according to cuts
bottom_dict = dict()
# fraction is divided by the cut directions 'y', 'x', 'z'
fraction = y1 / len(self._cut_displayed)
height_dict = dict()
for index, direction in enumerate(self._cut_displayed):
left = float(x0)
this_height = fraction + fraction * index
for coord, display_ax in display_ax_dict.items():
if direction == display_ax.direction:
left_dict[display_ax.ax] = left
this_width = width_dict[display_ax.ax]
left += this_width
bottom_dict[display_ax.ax] = fraction * index * (y1 - y0)
height_dict[display_ax.ax] = this_height
return transforms.Bbox([[left_dict[axes], bottom_dict[axes]],
[left_dict[axes] + width_dict[axes],
height_dict[axes]]])
def draw_cross(self, cut_coords=None, **kwargs):
""" Draw a crossbar on the plot to show where the cut is
performed.
Parameters
----------
cut_coords: 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinates are used.
kwargs:
Extra keyword arguments are passed to axhline
"""
return
SLICERS = dict(ortho=OrthoSlicer,
tiled=TiledSlicer,
mosaic=MosaicSlicer,
xz=XZSlicer,
yz=YZSlicer,
yx=YXSlicer,
x=XSlicer,
y=YSlicer,
z=ZSlicer)
class OrthoProjector(OrthoSlicer):
"""A class to create linked axes for plotting orthogonal projections
of 3D maps.
"""
_axes_class = GlassBrainAxes
@classmethod
def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):
return (None, ) * len(cls._cut_displayed)
def draw_cross(self, cut_coords=None, **kwargs):
# It does not make sense to draw crosses for the position of
# the cuts since we are taking the max along one axis
pass
def add_graph(self, adjacency_matrix, node_coords,
node_color='auto', node_size=50,
edge_cmap=cm.bwr,
edge_vmin=None, edge_vmax=None,
edge_threshold=None,
edge_kwargs=None, node_kwargs=None, colorbar=False,
):
"""Plot undirected graph on each of the axes
Parameters
----------
adjacency_matrix : numpy array of shape (n, n)
Represents the edges strengths of the graph.
The matrix can be symmetric which will result in
an undirected graph, or not symmetric which will
result in a directed graph.
node_coords : numpy array_like of shape (n, 3)
3d coordinates of the graph nodes in world space.
node_color : color or sequence of colors, optional
Color(s) of the nodes. Default='auto'.
node_size : scalar or array_like, optional
Size(s) of the nodes in points^2. Default=50.
edge_cmap : colormap, optional
Colormap used for representing the strength of the edges.
Default=cm.bwr.
edge_vmin, edge_vmax : float, optional
If not None, either or both of these values will be used to
as the minimum and maximum values to color edges. If None are
supplied the maximum absolute value within the given threshold
will be used as minimum (multiplied by -1) and maximum
coloring levels.
edge_threshold : str or number, optional
If it is a number only the edges with a value greater than
edge_threshold will be shown.
If it is a string it must finish with a percent sign,
e.g. "25.3%", and only the edges with a abs(value) above
the given percentile will be shown.
edge_kwargs : dict, optional
Will be passed as kwargs for each edge matlotlib Line2D.
node_kwargs : dict
Will be passed as kwargs to the plt.scatter call that plots all
the nodes in one go.
"""
# set defaults
if edge_kwargs is None:
edge_kwargs = {}
if node_kwargs is None:
node_kwargs = {}
if isinstance(node_color, str) and node_color == 'auto':
nb_nodes = len(node_coords)
node_color = mpl_cm.Set2(np.linspace(0, 1, nb_nodes))
node_coords = np.asarray(node_coords)
# decompress input matrix if sparse
if sparse.issparse(adjacency_matrix):
adjacency_matrix = adjacency_matrix.toarray()
# make the lines below well-behaved
adjacency_matrix = np.nan_to_num(adjacency_matrix)
# safety checks
if 's' in node_kwargs:
raise ValueError("Please use 'node_size' and not 'node_kwargs' "
"to specify node sizes")
if 'c' in node_kwargs:
raise ValueError("Please use 'node_color' and not 'node_kwargs' "
"to specify node colors")
adjacency_matrix_shape = adjacency_matrix.shape
if (len(adjacency_matrix_shape) != 2 or
adjacency_matrix_shape[0] != adjacency_matrix_shape[1]):
raise ValueError(
"'adjacency_matrix' is supposed to have shape (n, n)."
' Its shape was {0}'.format(adjacency_matrix_shape))
node_coords_shape = node_coords.shape
if len(node_coords_shape) != 2 or node_coords_shape[1] != 3:
message = (
"Invalid shape for 'node_coords'. You passed an "
"'adjacency_matrix' of shape {0} therefore "
"'node_coords' should be a array with shape ({0[0]}, 3) "
'while its shape was {1}').format(adjacency_matrix_shape,
node_coords_shape)
raise ValueError(message)
if isinstance(node_color, (list, np.ndarray)) and len(node_color) != 1:
if len(node_color) != node_coords_shape[0]:
raise ValueError(
"Mismatch between the number of nodes ({0}) "
"and and the number of node colors ({1})."
.format(node_coords_shape[0], len(node_color)))
if node_coords_shape[0] != adjacency_matrix_shape[0]:
raise ValueError(
"Shape mismatch between 'adjacency_matrix' "
"and 'node_coords'"
"'adjacency_matrix' shape is {0}, 'node_coords' shape is {1}"
.format(adjacency_matrix_shape, node_coords_shape))
# If the adjacency matrix is not symmetric, give a warning
symmetric = True
if not np.allclose(adjacency_matrix, adjacency_matrix.T, rtol=1e-3):
symmetric = False
warnings.warn(("'adjacency_matrix' is not symmetric. "
"A directed graph will be plotted."))
# For a masked array, masked values are replaced with zeros
if hasattr(adjacency_matrix, 'mask'):
if not (adjacency_matrix.mask == adjacency_matrix.mask.T).all():
symmetric = False
warnings.warn(("'adjacency_matrix' was masked with "
"a non symmetric mask. A directed "
"graph will be plotted."))
adjacency_matrix = adjacency_matrix.filled(0)
if edge_threshold is not None:
if symmetric:
# Keep a percentile of edges with the highest absolute
# values, so only need to look at the covariance
# coefficients below the diagonal
lower_diagonal_indices = np.tril_indices_from(adjacency_matrix,
k=-1)
lower_diagonal_values = adjacency_matrix[
lower_diagonal_indices]
edge_threshold = _utils.param_validation.check_threshold(
edge_threshold, np.abs(lower_diagonal_values),
stats.scoreatpercentile, 'edge_threshold')
else:
edge_threshold = _utils.param_validation.check_threshold(
edge_threshold, np.abs(adjacency_matrix.ravel()),
stats.scoreatpercentile, 'edge_threshold')
adjacency_matrix = adjacency_matrix.copy()
threshold_mask = np.abs(adjacency_matrix) < edge_threshold
adjacency_matrix[threshold_mask] = 0
if symmetric:
lower_triangular_adjacency_matrix = np.tril(adjacency_matrix, k=-1)
non_zero_indices = lower_triangular_adjacency_matrix.nonzero()
else:
non_zero_indices = adjacency_matrix.nonzero()
line_coords = [node_coords[list(index)]
for index in zip(*non_zero_indices)]
adjacency_matrix_values = adjacency_matrix[non_zero_indices]
for ax in self.axes.values():
ax._add_markers(node_coords, node_color, node_size, **node_kwargs)
if line_coords:
ax._add_lines(line_coords, adjacency_matrix_values, edge_cmap,
vmin=edge_vmin, vmax=edge_vmax, directed=(not symmetric),
**edge_kwargs)
# To obtain the brain left view, we simply invert the x axis
if ax.direction == 'l' and not (ax.ax.get_xlim()[0] > ax.ax.get_xlim()[1]):
ax.ax.invert_xaxis()
if colorbar:
self._colorbar = colorbar
self._show_colorbar(ax.cmap, ax.norm, threshold=edge_threshold)
plt.draw_if_interactive()
class XProjector(OrthoProjector):
_cut_displayed = 'x'
_default_figsize = [2.6, 2.3]
class YProjector(OrthoProjector):
_cut_displayed = 'y'
_default_figsize = [2.2, 2.3]
class ZProjector(OrthoProjector):
_cut_displayed = 'z'
_default_figsize = [2.2, 2.3]
class XZProjector(OrthoProjector):
_cut_displayed = 'xz'
class YXProjector(OrthoProjector):
_cut_displayed = 'yx'
class YZProjector(OrthoProjector):
_cut_displayed = 'yz'
class LYRZProjector(OrthoProjector):
_cut_displayed = 'lyrz'
class LZRYProjector(OrthoProjector):
_cut_displayed = 'lzry'
class LZRProjector(OrthoProjector):
_cut_displayed = 'lzr'
class LYRProjector(OrthoProjector):
_cut_displayed = 'lyr'
class LRProjector(OrthoProjector):
_cut_displayed = 'lr'
class LProjector(OrthoProjector):
_cut_displayed = 'l'
_default_figsize = [2.6, 2.3]
class RProjector(OrthoProjector):
_cut_displayed = 'r'
_default_figsize = [2.6, 2.3]
PROJECTORS = dict(ortho=OrthoProjector,
xz=XZProjector,
yz=YZProjector,
yx=YXProjector,
x=XProjector,
y=YProjector,
z=ZProjector,
lzry=LZRYProjector,
lyrz=LYRZProjector,
lyr=LYRProjector,
lzr=LZRProjector,
lr=LRProjector,
l=LProjector,
r=RProjector)
def get_create_display_fun(display_mode, class_dict):
try:
return class_dict[display_mode].init_with_figure
except KeyError:
message = ('{0} is not a valid display_mode. '
'Valid options are {1}').format(
display_mode, sorted(class_dict.keys()))
raise ValueError(message)
def get_slicer(display_mode):
"Internal function to retrieve a slicer"
return get_create_display_fun(display_mode, SLICERS)
def get_projector(display_mode):
"Internal function to retrieve a projector"
return get_create_display_fun(display_mode, PROJECTORS)
| 37.347623
| 101
| 0.549965
|
71f1e5ce9af7a05b5bdb4cfdc322061724be5db4
| 5,482
|
py
|
Python
|
python/GafferUI/NodeMenu.py
|
PaulDoessel/gaffer-play
|
8b72dabb388e12424c230acfb0bd209049b01bd6
|
[
"BSD-3-Clause"
] | 1
|
2016-07-31T09:55:09.000Z
|
2016-07-31T09:55:09.000Z
|
python/GafferUI/NodeMenu.py
|
Kthulhu/gaffer
|
8995d579d07231988abc92c3ac2788c15c8bc75c
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/NodeMenu.py
|
Kthulhu/gaffer
|
8995d579d07231988abc92c3ac2788c15c8bc75c
|
[
"BSD-3-Clause"
] | 1
|
2020-02-15T16:15:54.000Z
|
2020-02-15T16:15:54.000Z
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import re
import fnmatch
import inspect
import IECore
import Gaffer
import GafferUI
## The NodeMenu class provides a menu for the creation of new nodes. To allow
# different applications to coexist happily in the same process, separate node
# menus are maintained per application, and NodeMenu.acquire() is used to
# obtain the appropriate menu.
class NodeMenu( object ) :
## Chances are you want to use acquire() to get the NodeMenu for a
# specific application, rather than construct one directly.
def __init__( self ) :
self.__definition = IECore.MenuDefinition()
## Acquires the NodeMenu for the specified application.
@staticmethod
def acquire( applicationOrApplicationRoot ) :
if isinstance( applicationOrApplicationRoot, Gaffer.Application ) :
applicationRoot = applicationOrApplicationRoot.root()
else :
assert( isinstance( applicationOrApplicationRoot, Gaffer.ApplicationRoot ) )
applicationRoot = applicationOrApplicationRoot
nodeMenu = getattr( applicationRoot, "_nodeMenu", None )
if nodeMenu :
return nodeMenu
nodeMenu = NodeMenu()
applicationRoot._nodeMenu = nodeMenu
return nodeMenu
## Returns a menu definition used for the creation of nodes. This is
# initially empty but is expected to be populated during the gaffer
# startup routine.
def definition( self ) :
return self.__definition
## Utility function to append a menu item to definition.
# nodeCreator must be a callable that returns a Gaffer.Node.
def append( self, path, nodeCreator, plugValues={}, postCreator=None, **kw ) :
item = IECore.MenuItemDefinition( command = self.nodeCreatorWrapper( nodeCreator=nodeCreator, plugValues=plugValues, postCreator=postCreator ), **kw )
self.definition().append( path, item )
## Utility function which takes a callable that creates a node, and returns a new
# callable which will add the node to the graph.
@staticmethod
def nodeCreatorWrapper( nodeCreator, plugValues={}, postCreator=None ) :
def f( menu ) :
nodeGraph = menu.ancestor( GafferUI.NodeGraph )
assert( nodeGraph is not None )
gadgetWidget = nodeGraph.graphGadgetWidget()
graphGadget = nodeGraph.graphGadget()
script = nodeGraph.scriptNode()
commandArgs = []
with IECore.IgnoredExceptions( TypeError ) :
commandArgs = inspect.getargspec( nodeCreator )[0]
with Gaffer.UndoContext( script ) :
if "menu" in commandArgs :
node = nodeCreator( menu = menu )
else :
node = nodeCreator()
if node is None :
return
Gaffer.NodeAlgo.applyUserDefaults( node )
for plugName, plugValue in plugValues.items() :
node.descendant( plugName ).setValue( plugValue )
if node.parent() is None :
graphGadget.getRoot().addChild( node )
graphGadget.getLayout().connectNode( graphGadget, node, script.selection() )
# if no connections were made, we can't expect the graph layout to
# know where to put the node, so we'll position it based on
# the click location that opened the menu.
menuPosition = menu.popupPosition( relativeTo = gadgetWidget )
fallbackPosition = gadgetWidget.getViewportGadget().rasterToGadgetSpace(
IECore.V2f( menuPosition.x, menuPosition.y ),
gadget = graphGadget
).p0
fallbackPosition = IECore.V2f( fallbackPosition.x, fallbackPosition.y )
graphGadget.getLayout().positionNode( graphGadget, node, fallbackPosition )
script.selection().clear()
script.selection().add( node )
nodeGraph.frame( [ node ], extend = True )
if postCreator is not None :
postCreator( node, menu )
return f
| 35.830065
| 152
| 0.718898
|
aa31d84de1bcf21e1c9dc6d3060b7cb447b0ce5e
| 178
|
py
|
Python
|
src/ui_macro.py
|
vincentX3/TrainingRecords
|
1863761356887b0d20448fc46cf57d00dbba77b2
|
[
"MIT"
] | null | null | null |
src/ui_macro.py
|
vincentX3/TrainingRecords
|
1863761356887b0d20448fc46cf57d00dbba77b2
|
[
"MIT"
] | null | null | null |
src/ui_macro.py
|
vincentX3/TrainingRecords
|
1863761356887b0d20448fc46cf57d00dbba77b2
|
[
"MIT"
] | null | null | null |
HEIGHT = 900
WIDTH = 1300
SIDEBAR_WIDTH = 200
SIDEBAR_MARGIN = 10
PAGE_WIDTH = 1000
PAGE_TAB_HEIGHT = 800
PAGE_TABLE_HEIGHT = 785
PAGE_MARGIN = 0
DB_PATH = "../db/TRecords_ui.db"
| 19.777778
| 32
| 0.764045
|
55875bd5fd5bcf14b426aed32824a234fd44f18c
| 26,634
|
py
|
Python
|
pandadoc_client/api/document_attachments_api.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | 27
|
2021-11-16T11:30:13.000Z
|
2022-03-17T08:56:18.000Z
|
pandadoc_client/api/document_attachments_api.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | null | null | null |
pandadoc_client/api/document_attachments_api.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | 2
|
2021-12-16T13:38:15.000Z
|
2022-01-09T00:38:00.000Z
|
"""
PandaDoc Public API
PandaDoc Public API documentation # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pandadoc_client.api_client import ApiClient, Endpoint as _Endpoint
from pandadoc_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from pandadoc_client.model.document_attachment_response import DocumentAttachmentResponse
class DocumentAttachmentsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_document_attachment_endpoint = _Endpoint(
settings={
'response_type': (DocumentAttachmentResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/attachments',
'operation_id': 'create_document_attachment',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'file',
'source',
'name',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'file':
(file_type,),
'source':
(str,),
'name':
(str,),
},
'attribute_map': {
'id': 'id',
'file': 'file',
'source': 'source',
'name': 'name',
},
'location_map': {
'id': 'path',
'file': 'form',
'source': 'form',
'name': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client
)
self.delete_document_attachment_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/attachments/{attachment_id}',
'operation_id': 'delete_document_attachment',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
'attachment_id',
],
'required': [
'id',
'attachment_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'attachment_id':
(str,),
},
'attribute_map': {
'id': 'id',
'attachment_id': 'attachment_id',
},
'location_map': {
'id': 'path',
'attachment_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.details_document_attachment_endpoint = _Endpoint(
settings={
'response_type': (DocumentAttachmentResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/attachments/{attachment_id}',
'operation_id': 'details_document_attachment',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'attachment_id',
],
'required': [
'id',
'attachment_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'attachment_id':
(str,),
},
'attribute_map': {
'id': 'id',
'attachment_id': 'attachment_id',
},
'location_map': {
'id': 'path',
'attachment_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.download_document_attachment_endpoint = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/attachments/{attachment_id}/download',
'operation_id': 'download_document_attachment',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'attachment_id',
],
'required': [
'id',
'attachment_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'attachment_id':
(str,),
},
'attribute_map': {
'id': 'id',
'attachment_id': 'attachment_id',
},
'location_map': {
'id': 'path',
'attachment_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/*',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_document_attachments_endpoint = _Endpoint(
settings={
'response_type': ([DocumentAttachmentResponse],),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/attachments',
'operation_id': 'list_document_attachments',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def create_document_attachment(
self,
id,
**kwargs
):
"""Document Attachment Create # noqa: E501
Creates an attachment for a particular document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_document_attachment(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document UUID
Keyword Args:
file (file_type): Binary file to be attached to a document. [optional]
source (str): URL link to the file to be attached to a document. [optional]
name (str): Optional name to set for uploaded file. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentAttachmentResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.create_document_attachment_endpoint.call_with_http_info(**kwargs)
def delete_document_attachment(
self,
id,
attachment_id,
**kwargs
):
"""Document Attachment Delete # noqa: E501
Deletes specific document's attachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_document_attachment(id, attachment_id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document UUID
attachment_id (str): Attachment UUID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['attachment_id'] = \
attachment_id
return self.delete_document_attachment_endpoint.call_with_http_info(**kwargs)
def details_document_attachment(
self,
id,
attachment_id,
**kwargs
):
"""Document Attachment Details # noqa: E501
Returns details of the specific document's attachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.details_document_attachment(id, attachment_id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document UUID
attachment_id (str): Attachment UUID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentAttachmentResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['attachment_id'] = \
attachment_id
return self.details_document_attachment_endpoint.call_with_http_info(**kwargs)
def download_document_attachment(
self,
id,
attachment_id,
**kwargs
):
"""Document Attachment Download # noqa: E501
Returns document attachment file for download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_document_attachment(id, attachment_id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document UUID
attachment_id (str): Attachment UUID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['attachment_id'] = \
attachment_id
return self.download_document_attachment_endpoint.call_with_http_info(**kwargs)
def list_document_attachments(
self,
id,
**kwargs
):
"""Document Attachment List # noqa: E501
Return list of objects attached to particular document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_document_attachments(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document UUID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DocumentAttachmentResponse]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.list_document_attachments_endpoint.call_with_http_info(**kwargs)
| 36.040595
| 98
| 0.495344
|
6714d65c94390efea6bd184fca69066cb41aa97a
| 4,713
|
py
|
Python
|
src/ZServer/Zope2/App/tests/test_schema.py
|
gogobd/ZServer
|
d85912680297f67138ebe38409fcb36068b37c3d
|
[
"ZPL-2.1"
] | 4
|
2017-08-01T15:06:47.000Z
|
2020-04-19T05:22:13.000Z
|
src/ZServer/Zope2/App/tests/test_schema.py
|
gogobd/ZServer
|
d85912680297f67138ebe38409fcb36068b37c3d
|
[
"ZPL-2.1"
] | 12
|
2017-06-21T03:56:04.000Z
|
2021-03-29T05:47:10.000Z
|
src/ZServer/Zope2/App/tests/test_schema.py
|
gogobd/ZServer
|
d85912680297f67138ebe38409fcb36068b37c3d
|
[
"ZPL-2.1"
] | 7
|
2017-05-12T07:30:54.000Z
|
2020-10-08T01:51:50.000Z
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import absolute_import
import os
import cStringIO
import tempfile
import unittest
import ZConfig
import Products
from ZServer.Zope2.Startup import datatypes
from ZServer.Zope2.Startup.options import ZopeOptions
_SCHEMA = {}
TEMPNAME = tempfile.mktemp()
TEMPPRODUCTS = os.path.join(TEMPNAME, "Products")
TEMPVAR = os.path.join(TEMPNAME, "var")
def getSchema(schemafile):
global _SCHEMA
if schemafile not in _SCHEMA:
opts = ZopeOptions()
opts.schemafile = schemafile
opts.load_schema()
_SCHEMA[schemafile] = opts.schema
return _SCHEMA[schemafile]
class ZServerStartupTestCase(unittest.TestCase):
def tearDown(self):
Products.__path__ = [d for d in Products.__path__
if os.path.exists(d)]
@property
def schema(self):
return getSchema('zopeschema.xml')
def load_config_text(self, text):
# We have to create a directory of our own since the existence
# of the directory is checked. This handles this in a
# platform-independent way.
schema = self.schema
sio = cStringIO.StringIO(
text.replace("<<INSTANCE_HOME>>", TEMPNAME))
os.mkdir(TEMPNAME)
os.mkdir(TEMPPRODUCTS)
os.mkdir(TEMPVAR)
try:
conf, handler = ZConfig.loadConfigFile(schema, sio)
finally:
os.rmdir(TEMPPRODUCTS)
os.rmdir(TEMPVAR)
os.rmdir(TEMPNAME)
self.assertEqual(conf.instancehome, TEMPNAME)
return conf, handler
def test_cgi_environment(self):
conf, handler = self.load_config_text("""\
# instancehome is here since it's required
instancehome <<INSTANCE_HOME>>
<cgi-environment>
HEADER value
ANOTHER value2
</cgi-environment>
""")
items = conf.cgi_environment.items()
items.sort()
self.assertEqual(
items, [("ANOTHER", "value2"), ("HEADER", "value")])
def test_ms_public_header(self):
from ZServer.Zope2.Startup import config
from ZServer.Zope2.Startup.handlers import handleConfig
default_setting = config.ZSERVER_ENABLE_MS_PUBLIC_HEADER
try:
conf, handler = self.load_config_text("""\
instancehome <<INSTANCE_HOME>>
enable-ms-public-header true
""")
handleConfig(None, handler)
self.assertTrue(config.ZSERVER_ENABLE_MS_PUBLIC_HEADER)
conf, handler = self.load_config_text("""\
instancehome <<INSTANCE_HOME>>
enable-ms-public-header false
""")
handleConfig(None, handler)
self.assertFalse(config.ZSERVER_ENABLE_MS_PUBLIC_HEADER)
finally:
config.ZSERVER_ENABLE_MS_PUBLIC_HEADER = default_setting
def test_path(self):
p1 = tempfile.mktemp()
p2 = tempfile.mktemp()
try:
os.mkdir(p1)
os.mkdir(p2)
conf, handler = self.load_config_text("""\
# instancehome is here since it's required
instancehome <<INSTANCE_HOME>>
path %s
path %s
""" % (p1, p2))
items = conf.path
self.assertEqual(items, [p1, p2])
finally:
if os.path.exists(p1):
os.rmdir(p1)
if os.path.exists(p2):
os.rmdir(p2)
def test_access_and_trace_logs(self):
fn = tempfile.mktemp()
conf, handler = self.load_config_text("""
instancehome <<INSTANCE_HOME>>
<logger access>
<logfile>
path %s
</logfile>
</logger>
""" % fn)
self.assert_(isinstance(conf.access, datatypes.LoggerFactory))
self.assertEqual(conf.access.name, "access")
self.assertEqual(conf.access.handler_factories[0].section.path, fn)
self.assert_(conf.trace is None)
| 32.958042
| 78
| 0.586039
|
60c418804e867bc504132c5735db91ad01488a36
| 1,000
|
py
|
Python
|
bosm2015/registration/migrations/0003_auto_20150811_1241.py
|
dvm-bitspilani/BITS-BOSM-2015
|
df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a
|
[
"MIT"
] | 1
|
2015-09-15T17:19:30.000Z
|
2015-09-15T17:19:30.000Z
|
bosm2015/registration/migrations/0003_auto_20150811_1241.py
|
DVM-BITS-Pilani/BITS-BOSM-2015
|
df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a
|
[
"MIT"
] | null | null | null |
bosm2015/registration/migrations/0003_auto_20150811_1241.py
|
DVM-BITS-Pilani/BITS-BOSM-2015
|
df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a
|
[
"MIT"
] | 1
|
2016-03-28T19:44:41.000Z
|
2016-03-28T19:44:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_auto_20150731_2332'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='coach',
field=models.NullBooleanField(default=False),
),
migrations.AlterField(
model_name='participant',
name='gender',
field=models.CharField(max_length=10, choices=[(b'M', b'Male'), (b'F', b'Female')]),
),
migrations.AlterField(
model_name='userprofile',
name='firstname',
field=models.CharField(max_length=200, verbose_name=b'First Name'),
),
migrations.AlterField(
model_name='userprofile',
name='lastname',
field=models.CharField(max_length=200, verbose_name=b'Last Name'),
),
]
| 28.571429
| 96
| 0.581
|
7765c29ee59c8b2419b3fc661434d9582c9c34af
| 77,967
|
py
|
Python
|
pandas/tests/arithmetic/test_timedelta64.py
|
IamJasonBian/pandas
|
21024d5a8e05f611d0fef5ddf884ffa237643772
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/arithmetic/test_timedelta64.py
|
IamJasonBian/pandas
|
21024d5a8e05f611d0fef5ddf884ffa237643772
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/arithmetic/test_timedelta64.py
|
IamJasonBian/pandas
|
21024d5a8e05f611d0fef5ddf884ffa237643772
|
[
"BSD-3-Clause"
] | null | null | null |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.errors import (
OutOfBoundsDatetime,
PerformanceWarning,
)
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import (
Float64Index,
Int64Index,
UInt64Index,
)
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
def assert_dtype(obj, expected_dtype):
"""
Helper to check the dtype for a Series, Index, or single-column DataFrame.
"""
dtype = tm.get_dtype(obj)
assert dtype == expected_dtype
def get_expected_name(box, names):
if box is DataFrame:
# Since we are operating with a DataFrame and a non-DataFrame,
# the non-DataFrame is cast to Series and its name ignored.
exname = names[0]
elif box in [tm.to_array, pd.array]:
exname = names[1]
else:
exname = names[2]
return exname
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
tdi = timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
msg = "Invalid comparison between dtype"
with pytest.raises(TypeError, match=msg):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
@pytest.mark.parametrize(
"td_scalar",
[
timedelta(days=1),
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
offsets.Hour(24),
],
)
def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
# regression test for GH#5963
box = box_with_array
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ser = Series([timedelta(days=1), timedelta(days=2)])
ser = tm.box_expected(ser, box)
actual = ser > td_scalar
expected = Series([False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(actual, expected)
@pytest.mark.parametrize(
"invalid",
[
345600000000000,
"a",
Timestamp.now(),
Timestamp.now("UTC"),
Timestamp.now().to_datetime64(),
Timestamp.now().to_pydatetime(),
Timestamp.now().date(),
],
)
def test_td64_comparisons_invalid(self, box_with_array, invalid):
# GH#13624 for str
box = box_with_array
rng = timedelta_range("1 days", periods=10)
obj = tm.box_expected(rng, box)
assert_invalid_comparison(obj, invalid, box)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.date_range("1970-01-01", periods=10, tz="UTC").array,
np.array(pd.date_range("1970-01-01", periods=10)),
list(pd.date_range("1970-01-01", periods=10)),
pd.date_range("1970-01-01", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_td64arr_cmp_arraylike_invalid(self, other):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
rng = timedelta_range("1 days", periods=10)._data
assert_invalid_comparison(rng, other, tm.to_array)
def test_td64arr_cmp_mixed_invalid(self):
rng = timedelta_range("1 days", periods=5)._data
other = np.array([0, 1, 2, rng[3], Timestamp.now()])
result = rng == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = rng != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
rng < other
with pytest.raises(TypeError, match=msg):
rng > other
with pytest.raises(TypeError, match=msg):
rng <= other
with pytest.raises(TypeError, match=msg):
rng >= other
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
left = TimedeltaIndex([Timedelta("1 days"), NaT, Timedelta("3 days")])
right = TimedeltaIndex([NaT, NaT, Timedelta("3 days")])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == NaT, expected)
tm.assert_numpy_array_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != NaT, expected)
tm.assert_numpy_array_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < NaT, expected)
tm.assert_numpy_array_equal(NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = TimedeltaIndex(
[
"1 day",
NaT,
"1 day 00:00:01",
NaT,
"1 day 00:00:01",
"5 day 00:00:03",
]
)
tdidx2 = TimedeltaIndex(
["2 day", "2 day", NaT, NaT, "1 day 00:00:02", "5 days 00:00:03"]
)
tdarr = np.array(
[
np.timedelta64(2, "D"),
np.timedelta64(2, "D"),
np.timedelta64("nat"),
np.timedelta64("nat"),
np.timedelta64(1, "D") + np.timedelta64(2, "s"),
np.timedelta64(5, "D") + np.timedelta64(3, "s"),
]
)
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range("1 days", periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4H"
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "H"
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(
["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "-2H"
idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x")
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x")
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
td - dt
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(["0 days", NaT, "1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(["0 days", NaT, "-1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
["20121231", "20130101", "20130102"], freq="D", name="bar"
)
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(["20121231", NaT, "20121230"], name="foo")
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range("20130101", periods=3)
ts = Timestamp("20130101")
dt = ts.to_pydatetime()
dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")
ts_tz = Timestamp("20130101").tz_localize("US/Eastern")
ts_tz2 = Timestamp("20130101").tz_localize("CET")
dt_tz = ts_tz.to_pydatetime()
td = Timedelta("1 days")
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta("0 days")
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta("0 days")
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta("0 days")
_check(result, expected)
# tz mismatches
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta("0 days")
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
result = tdi - tdi
expected = TimedeltaIndex(["0 days", NaT, "0 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(["2 days", NaT, "4 days"], name="foo")
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(["20121231", NaT, "20130101"])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
result = tdi + dt
expected = DatetimeIndex(["20130102", NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(["20130102", NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
msg = "Addition/subtraction of integers and integer-arrays"
with pytest.raises(TypeError, match=msg):
tdi + Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(["20130102", NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(["20130102", NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp("20130102")
assert result == expected
result = td + dt
expected = Timestamp("20130102")
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize("freq", ["D", "B"])
def test_timedelta(self, freq):
index = pd.date_range("1/1/2000", periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
back = back._with_freq("infer")
tm.assert_index_equal(index, back)
if freq == "D":
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range("2013", "2014")
s = Series(rng)
result1 = rng - offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - offsets.Hour(1))
assert result1.freq == rng.freq
result1 = result1._with_freq(None)
tm.assert_index_equal(result1, result4)
assert result3.freq == rng.freq
result3 = result3._with_freq(None)
tm.assert_index_equal(result2, result3)
def test_tda_add_sub_index(self):
# Check that TimedeltaArray defers to Index on arithmetic ops
tdi = TimedeltaIndex(["1 days", NaT, "2 days"])
tda = tdi.array
dti = pd.date_range("1999-12-31", periods=3, freq="D")
result = tda + dti
expected = tdi + dti
tm.assert_index_equal(result, expected)
result = tda + tdi
expected = tdi + tdi
tm.assert_index_equal(result, expected)
result = tda - tdi
expected = tdi - tdi
tm.assert_index_equal(result, expected)
def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture):
# Result should be cast back to DatetimeArray
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
dti = dti._with_freq(None)
tdi = dti - dti
obj = tm.box_expected(tdi, box)
other = tm.box_expected(dti, box)
with tm.assert_produces_warning(PerformanceWarning):
result = obj + other.astype(object)
tm.assert_equal(result, other)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng += two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
def test_tdi_isub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng -= two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
# -------------------------------------------------------------
def test_tdi_ops_attributes(self):
rng = timedelta_range("2 days", periods=5, freq="2D", name="x")
result = rng + 1 * rng.freq
exp = timedelta_range("4 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng - 2 * rng.freq
exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng * 2
exp = timedelta_range("4 days", periods=5, freq="4D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4D"
result = rng / 2
exp = timedelta_range("1 days", periods=5, freq="D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "D"
result = -rng
exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "-2D"
rng = timedelta_range("-2 days", periods=5, freq="D", name="x")
result = abs(rng)
exp = TimedeltaIndex(
["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq is None
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
# TODO: Make raised error message more informative and test
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = NaT.value + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([NaT])
result = pd.to_timedelta([NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", NaT])
result = pd.to_timedelta(["5 days", NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([NaT, NaT, "5 hours"])
result = pd.to_timedelta([NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
pd.to_timedelta(Series([NaT]))
sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]"))
df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta)
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
DataFrame([NaT]).apply(pd.to_timedelta)
dfn = DataFrame([NaT.value]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
s1 + np.nan
with pytest.raises(TypeError, match=msg):
np.nan + s1
with pytest.raises(TypeError, match=msg):
s1 - np.nan
with pytest.raises(TypeError, match=msg):
-np.nan + s1
actual = s1 + NaT
tm.assert_series_equal(actual, sn)
actual = s2 - NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
msg = "cannot subtract a datelike from|unsupported operand type"
with pytest.raises(TypeError, match=msg):
df1 + np.nan
with pytest.raises(TypeError, match=msg):
df1 - np.nan
actual = df1 + NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D")
v2 = pd.date_range("2012-1-2", periods=3, freq="D")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame({"A": v1})
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(["1 day", "2 day"])
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation"
)
with pytest.raises(TypeError, match=msg):
idx - Timestamp("2011-01-01")
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp("2011-01-01", tz=tz)
idx = TimedeltaIndex(["1 day", "2 day"])
expected = DatetimeIndex(["2011-01-02", "2011-01-03"], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
Timestamp("2012-01-01"),
Timestamp("2012-01-01").to_pydatetime(),
Timestamp("2012-01-01").to_datetime64(),
],
)
def test_td64arr_add_sub_datetimelike_scalar(self, ts, box_with_array):
# GH#11925, GH#29558
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D")
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
msg = "cannot subtract a datelike from"
with pytest.raises(TypeError, match=msg):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Invalid __add__/__sub__ operations
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
msg = "cannot subtract|unsupported operand type"
with pytest.raises(TypeError, match=msg):
tdi - pi
# GH#13078 subtraction of Period scalar not supported
with pytest.raises(TypeError, match=msg):
tdi - pi[0]
@pytest.mark.parametrize(
"other",
[
# GH#12624 for str case
"a",
# GH#19123
1,
1.5,
np.array(2),
],
)
def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
# vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
assert_invalid_addsub_type(tdarr, other)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3]),
DataFrame([[1, 2, 3]]),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_addsub_numeric_arr_invalid(
self, box_with_array, vec, any_real_numpy_dtype
):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
vector = vec.astype(any_real_numpy_dtype)
assert_invalid_addsub_type(tdarr, vector)
def test_td64arr_add_sub_int(self, box_with_array, one):
# Variants of `one` for #19012, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="H", periods=10)
tdarr = tm.box_expected(rng, box_with_array)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, one, msg)
# TODO: get inplace ops into assert_invalid_addsub_type
with pytest.raises(TypeError, match=msg):
tdarr += one
with pytest.raises(TypeError, match=msg):
tdarr -= one
def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
# GH#22696 for DataFrame case, check that we don't dispatch to numpy
# implementation, which treats int64 as m8[ns]
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days 09:00:00", freq="H", periods=3)
tdarr = tm.box_expected(rng, box)
other = tm.box_expected([4, 3, 2], xbox)
msg = "Addition/subtraction of integers and integer-arrays"
assert_invalid_addsub_type(tdarr, other, msg)
def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
tdarr = tm.box_expected(tdi, box)
other = tm.box_expected([14, -1, 16], xbox)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, other, msg)
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_sub_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
expected_sub = 0 * tdi
result = tdi - tdarr
tm.assert_equal(result, expected_sub)
result = tdarr - tdi
tm.assert_equal(result, expected_sub)
def test_td64arr_add_sub_tdi(self, box_with_array, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
box = box_with_array
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[1])
tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[0])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], name=exname)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser + tdi
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=exname
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser - tdi
tm.assert_equal(result, -expected)
assert_dtype(result, "timedelta64[ns]")
@pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT])
def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat):
# GH#18808, GH#23320 special handling for timedelta64("NaT")
box = box_with_array
tdi = TimedeltaIndex([NaT, Timedelta("1s")])
expected = TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + tdnat
tm.assert_equal(result, expected)
result = tdnat + obj
tm.assert_equal(result, expected)
result = obj - tdnat
tm.assert_equal(result, expected)
result = tdnat - obj
tm.assert_equal(result, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
result = two_hours + rng
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
result = two_hours - rng
tm.assert_equal(result, -expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
def test_td64arr_add_sub_offset_index(self, names, box_with_array):
# GH#18849, GH#19744
box = box_with_array
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])
other = np.array(other) if box in [tm.to_array, pd.array] else other
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname
)
expected_sub = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
expected_sub = tm.box_expected(expected_sub, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(PerformanceWarning):
res_sub = tdi - other
tm.assert_equal(res_sub, expected_sub)
def test_td64arr_add_sub_offset_array(self, box_with_array):
# GH#18849, GH#18824
box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
expected_sub = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
expected_sub = tm.box_expected(expected_sub, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res_sub = tdi - other
tm.assert_equal(res_sub, expected_sub)
def test_td64arr_with_offset_series(self, names, box_with_array):
# GH#18849
box = box_with_array
box2 = Series if box in [pd.Index, tm.to_array, pd.array] else box
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname)
obj = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = obj + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + obj
tm.assert_equal(res2, expected_add)
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = obj - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, pd.Index, Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([offsets.MonthEnd(), offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = "has incorrect type|cannot add the type MonthEnd"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
# ------------------------------------------------------------------
# Unsorted
def test_td64arr_add_sub_object_array(self, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = timedelta_range("1 day", periods=3, freq="D")
tdarr = tm.box_expected(tdi, box)
other = np.array([Timedelta(days=1), offsets.Day(2), Timestamp("2000-01-04")])
with tm.assert_produces_warning(PerformanceWarning):
result = tdarr + other
expected = pd.Index(
[Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")]
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdarr - other
with tm.assert_produces_warning(PerformanceWarning):
result = other - tdarr
expected = pd.Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
msg = "argument must be an integer|cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype="int64")
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype="float64")
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize(
"other",
[
np.arange(1, 11),
Int64Index(range(1, 11)),
UInt64Index(range(1, 11)),
Float64Index(range(1, 11)),
pd.RangeIndex(1, 11),
],
ids=lambda x: type(x).__name__,
)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(["1 Day"] * 10)
expected = timedelta_range("1 days", "10 days")
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError, match="unsupported operand type"):
rng / NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days", "10 days")
rng = tm.box_expected(rng, box)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, xbox)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match="Cannot divide"):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days", "10 days", name="foo")
expected = Float64Index((np.arange(10) + 1) * 12, name="foo")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_td64arr_div_td64_scalar(self, m, unit, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
flat = ser
ser = tm.box_expected(ser, box)
# op
expected = Series([x / np.timedelta64(m, unit) for x in flat])
expected = tm.box_expected(expected, xbox)
result = ser / np.timedelta64(m, unit)
tm.assert_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat])
expected = tm.box_expected(expected, xbox)
result = np.timedelta64(m, unit) / ser
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo")
expected = Float64Index([12, np.nan, 24], name="foo")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = TimedeltaIndex(["1 days", NaT, "2 days"])
expected = Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(["1 days", NaT, "2 days"])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
msg = "Cannot divide vectors|Unable to coerce to Series"
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError, match=msg):
rng / other
with pytest.raises(ValueError, match=msg):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_td64arr_with_nat(
self, box_with_array, using_array_manager
):
# GH#35529
box = box_with_array
xbox = np.ndarray if box is pd.array else box
left = Series([1000, 222330, 30], dtype="timedelta64[ns]")
right = Series([1000, 222330, None], dtype="timedelta64[ns]")
left = tm.box_expected(left, box)
right = tm.box_expected(right, box)
expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)
expected = tm.box_expected(expected, xbox)
if box is DataFrame and using_array_manager:
# INFO(ArrayManager) floorfiv returns integer, and ArrayManager
# performs ops column-wise and thus preserves int64 dtype for
# columns without missing values
expected[[0, 1]] = expected[[0, 1]].astype("int64")
result = left // right
tm.assert_equal(result, expected)
# case that goes through __rfloordiv__ with arraylike
result = np.asarray(left) // right
tm.assert_equal(result, expected)
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = Int64Index((np.arange(10) + 1) * 12, name="foo")
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=10, seconds=7),
Timedelta("10m7s"),
Timedelta("10m7s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
box = box_with_array
xbox = np.ndarray if box_with_array is pd.array else box_with_array
tdi = TimedeltaIndex(["00:05:03", "00:05:03", NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
warn = None
if box_with_array is DataFrame and isinstance(three_days, pd.DateOffset):
warn = PerformanceWarning
with tm.assert_produces_warning(warn):
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range("1 ns", "10 ns", periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
msg = "Cannot divide int by"
with pytest.raises(TypeError, match=msg):
2 % tdarr
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = "|".join(
[
"cannot use operands with types dtype",
"Cannot multiply with unequal lengths",
"Unable to coerce to Series",
]
)
with pytest.raises(TypeError, match=msg):
# length check before dtype check
idx * idx[:3]
with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rmul_numeric_array(
self,
box_with_array,
vector,
any_real_numpy_dtype,
):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_numpy_dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(
self, box_with_array, vector, any_real_numpy_dtype, using_array_manager
):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_numpy_dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = "|".join(
[
"true_divide'? cannot use operands",
"cannot perform __div__",
"cannot perform __truediv__",
"unsupported operand",
"Cannot divide",
]
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is DataFrame:
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = pd.Index(expected) # do dtype inference
expected = tm.box_expected(expected, xbox)
assert tm.get_dtype(expected) == "m8[ns]"
if using_array_manager and box_with_array is DataFrame:
# TODO the behaviour is buggy here (third column with all-NaT
# as result doesn't get preserved as timedelta64 dtype).
# Reported at https://github.com/pandas-dev/pandas/issues/39750
# Changing the expected instead of xfailing to continue to test
# the correct behaviour for the other columns
expected[2] = Series([NaT, NaT], dtype=object)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
def test_td64arr_mul_int_series(self, box_with_array, names, request):
# GH#19042 test for correct name attachment
box = box_with_array
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
name=exname,
)
tdi = tm.box_expected(tdi, box)
xbox = get_upcast_box(box, ser)
expected = tm.box_expected(expected, xbox)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
if box is DataFrame:
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box not in [tm.to_array, pd.array] else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
name=xname,
)
xbox = get_upcast_box(box, ser)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rtruediv__(tdi)
if box is DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedelta64ArrayLikeArithmetic:
# Arithmetic tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic
# tests will eventually end up here.
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
def test_add_timestamp_to_timedelta():
# GH: 35897
timestamp = Timestamp.now()
result = timestamp + timedelta_range("0s", "1s", periods=31)
expected = DatetimeIndex(
[
timestamp
+ (
pd.to_timedelta("0.033333333s") * i
+ pd.to_timedelta("0.000000001s") * divmod(i, 3)[0]
)
for i in range(31)
]
)
tm.assert_index_equal(result, expected)
| 35.797521
| 88
| 0.601165
|
ddb38bb70289ed91d23e73f36f410690be82bffd
| 1,515
|
py
|
Python
|
rx/operators/observable/range.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
rx/operators/observable/range.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rx/operators/observable/range.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-08T08:23:08.000Z
|
2020-05-08T08:23:08.000Z
|
from rx.core import ObservableBase, AnonymousObservable
from rx.concurrency import current_thread_scheduler
from rx.disposables import MultipleAssignmentDisposable
def from_range(start: int, stop: int = None, step: int = None) -> ObservableBase:
"""Generates an observable sequence of integral numbers within a
specified range, using the specified scheduler to send out observer
messages.
1 - res = Rx.Observable.range(10)
2 - res = Rx.Observable.range(0, 10)
3 - res = Rx.Observable.range(0, 10, 1)
Keyword arguments:
start -- The value of the first integer in the sequence.
count -- The number of sequential integers to generate.
Returns an observable sequence that contains a range of sequential
integral numbers.
"""
if step is None and stop is None:
range_t = range(start)
elif step is None:
range_t = range(start, stop)
else:
range_t = range(start, stop, step)
def subscribe(observer, scheduler=None):
nonlocal range_t
scheduler = scheduler or current_thread_scheduler
sd = MultipleAssignmentDisposable()
def action(scheduler, iterator):
try:
observer.on_next(next(iterator))
sd.disposable = scheduler.schedule(action, state=iterator)
except StopIteration:
observer.on_completed()
sd.disposable = scheduler.schedule(action, iter(range_t))
return sd
return AnonymousObservable(subscribe)
| 32.934783
| 81
| 0.678548
|
be128cdfc3da41e8fc3d2ef1700e48ca52be4c1a
| 5,885
|
py
|
Python
|
code/visuals_test_cartopy_netcdf_20211202.py
|
goldford/NEMO-Salish-Sea-2021
|
c6a78956293f1741ff9537747dd02ce37c20a4d3
|
[
"MIT"
] | null | null | null |
code/visuals_test_cartopy_netcdf_20211202.py
|
goldford/NEMO-Salish-Sea-2021
|
c6a78956293f1741ff9537747dd02ce37c20a4d3
|
[
"MIT"
] | null | null | null |
code/visuals_test_cartopy_netcdf_20211202.py
|
goldford/NEMO-Salish-Sea-2021
|
c6a78956293f1741ff9537747dd02ce37c20a4d3
|
[
"MIT"
] | null | null | null |
# created Dec 1 2021 by GO
# - issues with applying projections
# - following http://tech.weatherforce.org/blog/ecmwf-data-animation/index.html
# - you can't add cartopy layers for background etc while not declaring projection, but declaring projection doesn't work with my lat lon data
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeat
import matplotlib.animation as animation
import netCDF4 as nc
import numpy as np
from netCDF4 import Dataset
### ANIMATE CODE FOR NEMO (using xarray)
def make_figure_NEMO(xlim, ylim):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return fig, ax
def draw_NEMO(frame, add_colorbar):
field_at_time = field[frame]
contour = field_at_time.plot(ax=ax, add_colorbar=add_colorbar, vmin=min_value, vmax=max_value)
#title = u"%s — %s" % ("field name here", str(ds.time_counter[frame].values)[:19])
#ax.set_title(title)
return contour
def animate_NEMO(frame):
return draw_NEMO(frame, add_colorbar=False)
def init_NEMO():
return draw_NEMO(0, add_colorbar=True)
### ANIMATE CODE FOR ERA (using netcdf4)
def make_figure_ERA():
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
return fig, ax
def init_ERA():
return draw_ERA(0, add_colorbar=True)
def draw_ERA(frame, add_colorbar):
field_at_time = field[frame]
ax.pcolormesh(field_at_time)
#title = u"%s — %s" % ("field name here", str(ds.time_counter[frame].values)[:19])
#ax.set_title(title)
return ax
def animate_ERA(frame):
return draw_ERA(frame, add_colorbar=False)
# original code
def draw_orig(frame, add_colorbar):
field_at_time = ds.mldkz5[frame]
contour = field_at_time.plot(ax=ax, transform=ccrs.PlateCarree(),
add_colorbar=add_colorbar, vmin=min_value, vmax=max_value)
title = u"%s — %s" % ("field name here", str(ds.time_counter[frame].values)[:19])
ax.set_title(title)
return contour
def make_figure():
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
# generate a basemap with country borders, oceans and coastlines
ax.add_feature(cfeat.LAND)
ax.add_feature(cfeat.OCEAN)
ax.add_feature(cfeat.COASTLINE)
ax.add_feature(cfeat.BORDERS, linestyle='dotted')
return fig, ax
make_figure();
#############################
######## NEMO Outputs #######
#############################
nc_file_path = "..//data//temp//"
nc_file = "SalishSea1500-RUN201_MonthlyMeans_grid_T_2D_1981.nc"
ds = xr.open_mfdataset(nc_file_path + nc_file) #open_mfdataset and open_dataset are different
field = ds.mldkz5 # turbocline
max_lat = field.nav_lat.values.max()
min_lat = field.nav_lat.values.min()
max_lon = field.nav_lon.values.max()
min_lon = field.nav_lon.values.min()
print(max_lon)
print("turbocline data retrieved")
print(field.shape)
xlim = (min_lon, max_lon)
ylim = (min_lat, max_lat)
_, ax = make_figure_NEMO(xlim, ylim)
#field_at_time.plot(ax=ax, transform=ccrs.PlateCarree(), vmin=min_value, vmax=max_value)
field_at_time = field[3]
print(field_at_time.shape)
field_at_time.plot(ax = ax)
plt.savefig('nemo_visual_test_mldkz5.png')
plt.close()
ax.cla()
print("printed NEMO turbocline (mldkz5) image")
############ animation ###############
#frames = ds.time.size
##frames = ds.time_counter.size # Number of frames
#print("# of frames" + str(frames))
#ax.cla()
#fig, ax = make_figure_GO(xlim, ylim)
#print("generating animation")
#ani = animation.FuncAnimation(fig, animate, frames, interval=0.01, blit=False,
# init_func=init, repeat=False)
#ani._init_draw()
#print("saving animation")
#fps_go=2
#ani.save('test.mp4', writer=animation.FFMpegWriter(fps=fps_go))
#print("success")
#plt.close(fig)
##################################
######## REANALYSIS DATA #########
##################################
# annual file hourly data trimmed to Salish Sea
#nc_file_path = "C://Users//Greig//Sync//6. SSMSP Model//Model Greig//Data//29. Oceanographic Atmospheric//ECMWF ERA5//adjusted//"
#nc_file="/ERA5_SalishSea_fixed_1981.nc"
# monthly means
nc_file_path = r"C:/Users/Greig/Sync/For Joe/"
nc_file="ERA5_NEMOgrid_light_monthly_1981.nc"
#nc_file="ERA5_NEMOgrid_light_daily_2019.nc"
with nc.Dataset(nc_file_path + nc_file) as ncf:
nlat = ncf.variables['latitude'][0,...]
nlon = ncf.variables['longitude'][0,...]
msdwswrf = ncf.variables['msdwswrf'][:,...] # incoming light
time1 = ncf.variables['time'][:,...]
print("Light data retrieved from reanalysis product")
print(msdwswrf.shape)
field = msdwswrf
max_lat = nlat.max()
min_lat = nlat.min()
max_lon = nlon.max()
min_lon = nlon.min()
print(min_lon)
xlim = (min_lon, max_lon)
ylim = (min_lat, max_lat)
_, ax = make_figure_ERA()
#field_at_time.plot(ax=ax, transform=ccrs.PlateCarree(), vmin=min_value, vmax=max_value)
#print(np.squeeze(field[0]).shape)
field_at_time = field[0]
ax.pcolormesh(field_at_time)
plt.savefig('nemo_visual_test_msdwswrf.png')
plt.close()
ax.cla()
print("printed ERA shortwave (msdwswrf) image")
#======================================
# ============= animation =============
frames = len(time1)
ax.cla()
fig, ax = make_figure_ERA()
print("generating animation")
print("length: " + str(frames))
ani = animation.FuncAnimation(fig, animate_ERA, frames, interval=0.01, blit=False,
init_func=init_ERA, repeat=False)
#ani._init_draw()
print("saving animation")
fps_go=1
ani.save('era_animation_test.mp4', writer=animation.FFMpegWriter(fps=fps_go))
print("success")
plt.close(fig)
#======================================
##########################################
########### ECOSPACE ASC DATA ############
##########################################
| 28.02381
| 142
| 0.663042
|
2402e54f1f504d7fed82bc94c15c82b8d79aef99
| 3,863
|
py
|
Python
|
deep_sort/deep_sort/deep_sort.py
|
rohitkuk/yolox_deepsort
|
d593ab48757a87e2267fb11b35ebf5c4b73d1f89
|
[
"Apache-2.0"
] | null | null | null |
deep_sort/deep_sort/deep_sort.py
|
rohitkuk/yolox_deepsort
|
d593ab48757a87e2267fb11b35ebf5c4b73d1f89
|
[
"Apache-2.0"
] | 1
|
2022-02-04T09:49:10.000Z
|
2022-02-09T09:20:48.000Z
|
deep_sort/deep_sort/deep_sort.py
|
rohitkuk/yolox_deepsort
|
d593ab48757a87e2267fb11b35ebf5c4b73d1f89
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from .deep.feature_extractor import Extractor
from .sort.nn_matching import NearestNeighborDistanceMetric
from .sort.detection import Detection
from .sort.tracker import Tracker
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
self.min_confidence = min_confidence
self.nms_max_overlap = nms_max_overlap
self.extractor = Extractor(model_path, use_cuda=use_cuda)
max_cosine_distance = max_dist
metric = NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(
metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
def update(self, bbox_xywh, confidences, oids, ori_img):
self.height, self.width = ori_img.shape[:2]
# generate detections
features = self._get_features(bbox_xywh, ori_img)
bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
detections = [Detection(bbox_tlwh[i], conf, features[i],oid) for i, (conf,oid) in enumerate(zip(confidences,oids)) if conf > self.min_confidence]
# run on non-maximum supression
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
# update tracker
self.tracker.predict()
self.tracker.update(detections)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
box = track.to_tlwh()
x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
track_id = track.track_id
track_oid = track.oid
outputs.append(np.array([x1, y1, x2, y2, track_id, track_oid], dtype=np.int))
if len(outputs) > 0:
outputs = np.stack(outputs, axis=0)
return outputs
"""
TODO:
Convert bbox from xc_yc_w_h to xtl_ytl_w_h
Thanks JieChen91@github.com for reporting this bug!
"""
@staticmethod
def _xywh_to_tlwh(bbox_xywh):
if isinstance(bbox_xywh, np.ndarray):
bbox_tlwh = bbox_xywh.copy()
elif isinstance(bbox_xywh, torch.Tensor):
bbox_tlwh = bbox_xywh.clone()
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
return bbox_tlwh
def _xywh_to_xyxy(self, bbox_xywh):
x, y, w, h = bbox_xywh
x1 = max(int(x - w / 2), 0)
x2 = min(int(x + w / 2), self.width - 1)
y1 = max(int(y - h / 2), 0)
y2 = min(int(y + h / 2), self.height - 1)
return x1, y1, x2, y2
def _tlwh_to_xyxy(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks JieChen91@github.com for reporting this bug!
"""
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x+w), self.width - 1)
y1 = max(int(y), 0)
y2 = min(int(y+h), self.height - 1)
return x1, y1, x2, y2
def increment_ages(self):
self.tracker.increment_ages()
def _xyxy_to_tlwh(self, bbox_xyxy):
x1, y1, x2, y2 = bbox_xyxy
t = x1
l = y1
w = int(x2 - x1)
h = int(y2 - y1)
return t, l, w, h
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1, y1, x2, y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.extractor(im_crops)
else:
features = np.array([])
return features
| 33.885965
| 164
| 0.598757
|
810cdf162da4245e3a7ad18ff4274731958df3a6
| 4,054
|
py
|
Python
|
dungeon_maps/demos/ego_flow/run.py
|
Ending2015a/dungeon_maps
|
c4f269c7fe67694d9b8a693f1902e70859e5ab25
|
[
"MIT"
] | 1
|
2022-03-26T06:18:26.000Z
|
2022-03-26T06:18:26.000Z
|
dungeon_maps/demos/ego_flow/run.py
|
Ending2015a/dungeon_maps
|
c4f269c7fe67694d9b8a693f1902e70859e5ab25
|
[
"MIT"
] | null | null | null |
dungeon_maps/demos/ego_flow/run.py
|
Ending2015a/dungeon_maps
|
c4f269c7fe67694d9b8a693f1902e70859e5ab25
|
[
"MIT"
] | null | null | null |
# --- built in ---
import os
import math
# --- 3rd party ---
import numpy as np
import torch
import cv2
# --- my module ---
import dungeon_maps as dmap
# import simulators, so that one can use dmap.sim.make()
# to create simulators.
import dungeon_maps.sim
from dungeon_maps.demos.ego_flow import vis
# Some constants
WIDTH, HEIGHT = 800, 600
HFOV = math.radians(70)
CAM_PITCH = math.radians(-10)
CAM_HEIGHT = 0.88 # meter
MIN_DEPTH = 0.1 # meter
MAX_DEPTH = 10.0 # meter
def subtract_pose(p1, p2):
"""Caulate delta pose from p1 -> p2"""
x1, y1, o1 = p1[...,0], p1[...,1], p1[...,2]
x2, y2, o2 = p2[...,0], p2[...,1], p2[...,2]
r = ((x1-x2)**2.0 + (y1-y2)**2.0)**0.5 # distance
p = np.arctan2(y2-y1, x2-x1) - o1 #
do = o2 - o1
do = np.arctan2(np.sin(do), np.cos(do)) # [-pi/2, pi/2]
dx = r * np.cos(p)
dy = r * np.sin(p)
return np.stack([dx, dy, do], axis=-1) # (batch, 3)
def denormalize(depth_map):
"""Denormalize depth map, from [0, 1] to [MIN_DEPTH, MAX_DEPTH]"""
return depth_map * (MAX_DEPTH - MIN_DEPTH) + MIN_DEPTH
def create_simulator():
"""Returns environment and MapProjector"""
# Create simulator
env = dmap.sim.make(
'forest',
width = WIDTH,
height = HEIGHT,
hfov = HFOV,
cam_pitch = CAM_PITCH,
cam_height = CAM_HEIGHT,
min_depth = MIN_DEPTH,
max_depth = MAX_DEPTH
)
proj = dmap.MapProjector(
width = WIDTH,
height = HEIGHT,
hfov = HFOV,
vfov = None,
cam_pose = [0., 0., 0.],
width_offset = 0.,
height_offset = 0.,
cam_pitch = CAM_PITCH,
cam_height = CAM_HEIGHT,
map_res = 0.03,
map_width = 600,
map_height = 600,
trunc_depth_min = 0.15,
trunc_depth_max = 5.05,
clip_border = 50,
fill_value = dmap.NINF,
to_global = True
)
return env, proj
def compute_ego_flow(proj, depth, trans_pose):
# Compute egocentric motion flow
depth_map = np.transpose(denormalize(depth), (2, 0, 1)) # (1, h, w)
depth_map = torch.tensor(depth_map, device='cuda')
grid = proj.camera_affine_grid(depth_map, trans_pose)
x, y = dmap.utils.generate_image_coords(
depth_map.shape,
dtype = torch.float32,
device = 'cuda'
)
coords = torch.stack((x, y), dim=-1)
flow = coords - grid
flow[..., 0] /= grid.shape[1]
flow[..., 1] /= grid.shape[0]
flow[..., 1] = -flow[..., 1] # flip y
return flow[0, 0] # (h, w, 2)
def render_scene(rgb, depth, trans_pose, proj):
bgr_image = rgb[...,::-1].astype(np.uint8) # (h, w, 3)
depth_image = np.concatenate((depth,)*3, axis=-1) # (h, w, 3)
depth_image = (depth_image*255.).astype(np.uint8)
scene = np.concatenate((bgr_image, depth_image), axis=1)
# Render egocentric motion flow
flow = compute_ego_flow(proj, depth, trans_pose)
flow_bgr = vis.draw_flow(flow)
# padding to same size
pad_num = np.abs(flow_bgr.shape[1] - scene.shape[1])
left_pad = pad_num//2
right_pad = pad_num - left_pad
if scene.shape[1] < flow_bgr.shape[1]:
scene = np.pad(scene, ((0, 0), (left_pad, right_pad), (0, 0)),
mode='constant', constant_values=0)
elif scene.shape[1] > flow_bgr.shape[1]:
flow_bgr = np.pad(flow_bgr, ((0, 0), (left_pad, right_pad), (0, 0)),
mode='constant', constant_values=0)
scene = np.concatenate((scene, flow_bgr), axis=0)
return scene
def run_example():
env, proj = create_simulator()
observations = env.reset()
last_pose = np.array((0., 0., 0.), dtype=np.float32)
while True:
# RGB image (h, w, 3), torch.uint8
rgb = observations['rgb']
# Depth image (h, w, 1), torch.float32
depth = observations['depth']
# Ground truth camera pose [x, z, yaw] in world coordinate
cam_pose = observations['pose_gt'].astype(np.float32)
trans_pose = subtract_pose(last_pose, cam_pose)
last_pose = cam_pose
# render scene
scene = render_scene(rgb, depth, trans_pose, proj)
cv2.imshow('Ego motion flow', scene)
key = cv2.waitKey(10)
if key == ord('q'):
print('Quit')
exit()
observations = env.step()
if __name__ == '__main__':
run_example()
| 29.591241
| 72
| 0.630982
|
d91e0f0e5edda0b3fe3140ce21fd03e3925e95ae
| 29,765
|
py
|
Python
|
test/tool_shed/functional/test_1080_advanced_circular_dependency_installation.py
|
blankenberg/galaxy-data-resource
|
ca32a1aafd64948f489a4e5cf88096f32391b1d9
|
[
"CC-BY-3.0"
] | null | null | null |
test/tool_shed/functional/test_1080_advanced_circular_dependency_installation.py
|
blankenberg/galaxy-data-resource
|
ca32a1aafd64948f489a4e5cf88096f32391b1d9
|
[
"CC-BY-3.0"
] | 1
|
2015-02-21T18:48:19.000Z
|
2015-02-27T15:50:32.000Z
|
test/tool_shed/functional/test_1080_advanced_circular_dependency_installation.py
|
blankenberg/galaxy-data-resource
|
ca32a1aafd64948f489a4e5cf88096f32391b1d9
|
[
"CC-BY-3.0"
] | 3
|
2015-02-22T13:34:16.000Z
|
2020-10-01T01:28:04.000Z
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os, logging
column_repository_name = 'column_maker_0080'
column_repository_description = "Add column"
column_repository_long_description = "Compute an expression on every row"
convert_repository_name = 'convert_chars_0080'
convert_repository_description = "Convert delimiters"
convert_repository_long_description = "Convert delimiters to tab"
category_name = 'Test 0080 Advanced Circular Dependencies'
category_description = 'Test circular dependency features'
log = logging.getLogger( __name__ )
running_standalone = False
class TestRepositoryDependencies( ShedTwillTestCase ):
'''Testing uninstalling and reinstalling repository dependencies, and setting tool panel sections.'''
def test_0000_create_or_login_admin_user( self ):
"""Create necessary user accounts and login as an admin user."""
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
galaxy_admin_user = self.test_db_util.get_galaxy_user( common.admin_email )
assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
galaxy_admin_user_private_role = self.test_db_util.get_galaxy_private_role( galaxy_admin_user )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_create_and_populate_column_repository( self ):
"""Create the category for this test suite, then create and populate column_maker."""
category = self.create_category( name=category_name, description=category_description )
global running_standalone
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.get_or_create_repository( name=column_repository_name,
description=column_repository_description,
long_description=column_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
filename='column_maker/column_maker.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded column_maker tarball.',
strings_displayed=[],
strings_not_displayed=[] )
running_standalone = True
def test_0010_create_and_populate_convert_repository( self ):
'''Create and populate the convert_chars repository.'''
global running_standalone
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
category = self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.get_or_create_repository( name=convert_repository_name,
description=convert_repository_description,
long_description=convert_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
filename='convert_chars/convert_chars.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded convert_chars tarball.',
strings_displayed=[],
strings_not_displayed=[] )
running_standalone = True
def test_0015_upload_dependency_xml_if_needed( self ):
'''If this test is being run by itself, it will not have repository dependencies configured yet.'''
global running_standalone
if running_standalone:
convert_repository = self.test_db_util.get_repository_by_name_and_owner( convert_repository_name, common.test_user_1_name )
column_repository = self.test_db_util.get_repository_by_name_and_owner( column_repository_name, common.test_user_1_name )
repository_dependencies_path = self.generate_temp_path( 'test_1080', additional_paths=[ 'convert' ] )
repository_tuple = ( self.url, convert_repository.name, convert_repository.user.username, self.get_repository_tip( convert_repository ) )
self.create_repository_dependency( repository=column_repository, repository_tuples=[ repository_tuple ], filepath=repository_dependencies_path )
repository_tuple = ( self.url, column_repository.name, column_repository.user.username, self.get_repository_tip( column_repository ) )
self.create_repository_dependency( repository=convert_repository, repository_tuples=[ repository_tuple ], filepath=repository_dependencies_path )
def test_0020_install_convert_repository( self ):
'''Install convert_chars without repository dependencies into convert_chars tool panel section.'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
self.install_repository( convert_repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
install_repository_dependencies=False,
new_tool_panel_section_label='convert_chars' )
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
browse_strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision ]
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Missing repository dependencies' ]
self.display_galaxy_browse_repositories_page( strings_displayed=browse_strings_displayed )
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
def test_0025_install_column_repository( self ):
'''Install column maker with repository dependencies into column_maker tool panel section.'''
self.install_repository( column_repository_name,
common.test_user_1_name,
category_name,
install_repository_dependencies=True,
new_tool_panel_section_label='column_maker',
strings_displayed=[ 'install_repository_dependencies' ] )
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
browse_strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
'Add column',
installed_column_repository.installed_changeset_revision ]
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_galaxy_browse_repositories_page( strings_displayed=browse_strings_displayed )
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
def test_0030_deactivate_convert_repository( self ):
'''Deactivate convert_chars, verify that column_maker is installed and missing repository dependencies.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.deactivate_repository( installed_convert_repository )
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Missing repository dependencies',
'Deactivated' ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
def test_0035_reactivate_convert_repository( self ):
'''Reactivate convert_chars, both convert_chars and column_maker should now show as installed.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.reactivate_repository( installed_convert_repository )
strings_displayed = [ 'convert_chars_0080',
'Compute',
'an expression on every row',
'1.1.0',
'column_maker_0080',
'Installed repository dependencies',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
installed_convert_repository.installed_changeset_revision ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
def test_0040_deactivate_column_repository( self ):
'''Deactivate column_maker, verify that convert_chars is installed and missing repository dependencies.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.deactivate_repository( installed_column_repository )
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Missing repository dependencies',
'Deactivated' ]
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
def test_0045_deactivate_convert_repository( self ):
'''Deactivate convert_chars, verify that both convert_chars and column_maker are deactivated.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.deactivate_repository( installed_convert_repository )
strings_not_displayed = [ 'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_not_displayed=strings_not_displayed )
def test_0050_reactivate_column_repository( self ):
'''Reactivate column_maker. This should not automatically reactivate convert_chars, so column_maker should be displayed as installed but missing repository dependencies.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.reactivate_repository( installed_column_repository )
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Missing repository dependencies',
'Deactivated' ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
def test_0055_reactivate_convert_repository( self ):
'''Activate convert_chars. Both convert_chars and column_maker should now show as installed.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.reactivate_repository( installed_convert_repository )
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
def test_0060_uninstall_column_repository( self ):
'''Uninstall column_maker. Verify that convert_chars is installed and missing repository dependencies, and column_maker was in the right tool panel section.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.uninstall_repository( installed_column_repository )
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Missing repository dependencies',
'Uninstalled' ]
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
self.test_db_util.install_session.refresh( installed_column_repository )
self.check_galaxy_repository_tool_panel_section( installed_column_repository, 'column_maker' )
def test_0065_reinstall_column_repository( self ):
'''Reinstall column_maker without repository dependencies, verify both convert_chars and column_maker are installed.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.reinstall_repository( installed_column_repository, install_repository_dependencies=False )
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
def test_0070_uninstall_convert_repository( self ):
'''Uninstall convert_chars, verify column_maker installed but missing repository dependencies.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.deactivate_repository( installed_convert_repository )
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Missing repository dependencies',
'Deactivated' ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
self.test_db_util.install_session.refresh( installed_convert_repository )
self.check_galaxy_repository_tool_panel_section( installed_convert_repository, 'convert_chars' )
def test_0075_uninstall_column_repository( self ):
'''Uninstall column_maker, verify that both convert_chars and column_maker are uninstalled.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.deactivate_repository( installed_column_repository )
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Missing repository dependencies',
'Activate or reinstall repository',
'Deactivated' ]
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
def test_0080_reinstall_convert_repository( self ):
'''Reinstall convert_chars with repository dependencies, verify that this installs both convert_chars and column_maker.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.reinstall_repository( installed_convert_repository,
install_repository_dependencies=True,
no_changes=False )
strings_displayed = [ 'column_maker_0080',
'Add column',
self.url.replace( 'http://', '' ),
installed_column_repository.installed_changeset_revision,
'convert_chars_0080',
installed_convert_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_installed_repository_manage_page( installed_column_repository,
strings_displayed=strings_displayed )
strings_displayed = [ 'convert_chars_0080',
'Convert delimiters',
self.url.replace( 'http://', '' ),
installed_convert_repository.installed_changeset_revision,
'column_maker_0080',
installed_column_repository.installed_changeset_revision,
'Installed repository dependencies' ]
self.display_installed_repository_manage_page( installed_convert_repository,
strings_displayed=strings_displayed )
def test_0085_uninstall_all_repositories( self ):
'''Uninstall convert_chars and column_maker to verify that they are in the right tool panel sections.'''
installed_convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name,
common.test_user_1_name )
installed_column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name,
common.test_user_1_name )
self.deactivate_repository( installed_column_repository )
self.deactivate_repository( installed_convert_repository )
self.test_db_util.install_session.refresh( installed_column_repository )
self.test_db_util.install_session.refresh( installed_convert_repository )
self.check_galaxy_repository_tool_panel_section( installed_column_repository, '' )
self.check_galaxy_repository_tool_panel_section( installed_convert_repository, 'convert_chars' )
| 74.974811
| 180
| 0.582631
|
b7cf11209490dcd07e44a3134c3ea9e84df9da06
| 3,289
|
py
|
Python
|
homeassistant/components/shelly/device_trigger.py
|
amatas/home-assistant-core
|
bdbb4f939f34682b2eca993bb041cfb21214015c
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/shelly/device_trigger.py
|
amatas/home-assistant-core
|
bdbb4f939f34682b2eca993bb041cfb21214015c
|
[
"Apache-2.0"
] | 60
|
2020-07-06T15:10:30.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/shelly/device_trigger.py
|
jotunacorn/home-assistant
|
1a13728cd0241a9e26c687dbd031fd144a748f4b
|
[
"Apache-2.0"
] | null | null | null |
"""Provides device triggers for Shelly."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
CONF_SUBTYPE,
DOMAIN,
EVENT_SHELLY_CLICK,
INPUTS_EVENTS_SUBTYPES,
SUPPORTED_INPUTS_EVENTS_TYPES,
)
from .utils import get_device_wrapper, get_input_triggers
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(SUPPORTED_INPUTS_EVENTS_TYPES),
vol.Required(CONF_SUBTYPE): vol.In(INPUTS_EVENTS_SUBTYPES),
}
)
async def async_validate_trigger_config(hass, config):
"""Validate config."""
config = TRIGGER_SCHEMA(config)
# if device is available verify parameters against device capabilities
wrapper = get_device_wrapper(hass, config[CONF_DEVICE_ID])
if not wrapper:
return config
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
for block in wrapper.device.blocks:
input_triggers = get_input_triggers(wrapper.device, block)
if trigger in input_triggers:
return config
raise InvalidDeviceAutomationConfig(
f"Invalid ({CONF_TYPE},{CONF_SUBTYPE}): {trigger}"
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for Shelly devices."""
triggers = []
wrapper = get_device_wrapper(hass, device_id)
if not wrapper:
raise InvalidDeviceAutomationConfig(f"Device not found: {device_id}")
for block in wrapper.device.blocks:
input_triggers = get_input_triggers(wrapper.device, block)
for trigger, subtype in input_triggers:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
event_config = {
event_trigger.CONF_PLATFORM: CONF_EVENT,
event_trigger.CONF_EVENT_TYPE: EVENT_SHELLY_CLICK,
event_trigger.CONF_EVENT_DATA: {
ATTR_DEVICE_ID: config[CONF_DEVICE_ID],
ATTR_CHANNEL: INPUTS_EVENTS_SUBTYPES[config[CONF_SUBTYPE]],
ATTR_CLICK_TYPE: config[CONF_TYPE],
},
}
event_config = event_trigger.TRIGGER_SCHEMA(event_config)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
| 30.453704
| 82
| 0.703557
|
5fdd540635150cd516c917aa63d20999692fe91f
| 5,809
|
py
|
Python
|
intersight/models/cep_initiate_cluster_bringup_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/models/cep_initiate_cluster_bringup_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/models/cep_initiate_cluster_bringup_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CepInitiateClusterBringupRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
CepInitiateClusterBringupRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this CepInitiateClusterBringupRef.
The Object Type of the referenced REST resource.
:return: The object_type of this CepInitiateClusterBringupRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this CepInitiateClusterBringupRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this CepInitiateClusterBringupRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this CepInitiateClusterBringupRef.
The Moid of the referenced REST resource.
:return: The moid of this CepInitiateClusterBringupRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this CepInitiateClusterBringupRef.
The Moid of the referenced REST resource.
:param moid: The moid of this CepInitiateClusterBringupRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this CepInitiateClusterBringupRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this CepInitiateClusterBringupRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this CepInitiateClusterBringupRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this CepInitiateClusterBringupRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CepInitiateClusterBringupRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.917582
| 576
| 0.612842
|
ffddbf104ab8878cec070af62f448a249f9c1dd2
| 667
|
py
|
Python
|
estore_project/users/tests/test_drf_urls.py
|
Jawayria/estore_project
|
ad0c2f30b9a4d8a02b37ea29dfb2259635c55517
|
[
"MIT"
] | null | null | null |
estore_project/users/tests/test_drf_urls.py
|
Jawayria/estore_project
|
ad0c2f30b9a4d8a02b37ea29dfb2259635c55517
|
[
"MIT"
] | 1
|
2021-05-19T06:41:50.000Z
|
2021-05-19T06:41:50.000Z
|
estore_project/users/tests/test_drf_urls.py
|
Jawayria/estore_project
|
ad0c2f30b9a4d8a02b37ea29dfb2259635c55517
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
from estore_project.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
| 26.68
| 81
| 0.671664
|
0d18de38d64771d358d1c78783858e003e51010f
| 3,414
|
py
|
Python
|
tests/models/validators/v2_2_1/jsd_f7abdb7ab46a5918a74e839488ff6ae0.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/models/validators/v2_2_1/jsd_f7abdb7ab46a5918a74e839488ff6ae0.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/models/validators/v2_2_1/jsd_f7abdb7ab46a5918a74e839488ff6ae0.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center getSiteTopology data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF7Abdb7AB46A5918A74E839488Ff6Ae0(object):
"""getSiteTopology request schema definition."""
def __init__(self):
super(JSONSchemaValidatorF7Abdb7AB46A5918A74E839488Ff6Ae0, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"sites": {
"items": {
"properties": {
"displayName": {
"type": "string"
},
"groupNameHierarchy": {
"type": "string"
},
"id": {
"type": "string"
},
"latitude": {
"type": "string"
},
"locationAddress": {
"type": "string"
},
"locationCountry": {
"type": "string"
},
"locationType": {
"type": "string"
},
"longitude": {
"type": "string"
},
"name": {
"type": "string"
},
"parentId": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.207547
| 83
| 0.529291
|
98ffbfb45ad2ec2ab4bef8a5048c72e8d38818b6
| 749
|
py
|
Python
|
routingpolicy/dotenv.py
|
48ix/routingpolicy
|
fd3e9547a5c54bd78ee2144786f6b30fdf41d7ef
|
[
"MIT"
] | 1
|
2020-09-22T19:06:01.000Z
|
2020-09-22T19:06:01.000Z
|
routingpolicy/dotenv.py
|
48ix/routingpolicy
|
fd3e9547a5c54bd78ee2144786f6b30fdf41d7ef
|
[
"MIT"
] | null | null | null |
routingpolicy/dotenv.py
|
48ix/routingpolicy
|
fd3e9547a5c54bd78ee2144786f6b30fdf41d7ef
|
[
"MIT"
] | null | null | null |
"""Import Environment Variables from Local Files."""
# Standard Library
import os
import re
from typing import Generator
from pathlib import Path
ENV_FILES = (".env", ".env.local")
REPO = Path(__file__).parent.parent
def find_env() -> Generator[Path, None, None]:
"""Locate environment files."""
for file in ENV_FILES:
path = REPO / file
if path.exists():
yield path
def load_env() -> None:
"""Parse env files & set env variables."""
for file in find_env():
with file.open("r") as f:
for line in f.readlines():
key, value = line.strip().rstrip().split("=")
key = re.sub(r"[^A-Za-z0-9_]", "_", key).upper()
os.environ[key] = value
| 24.966667
| 64
| 0.580774
|
84c0f3b15ea070858b04ac552c3c962173c282b3
| 1,276
|
py
|
Python
|
src/cobra/apps/organization/summary/app.py
|
lyoniionly/django-cobra
|
2427e5cf74b7739115b1224da3306986b3ee345c
|
[
"Apache-2.0"
] | 1
|
2015-01-27T08:56:46.000Z
|
2015-01-27T08:56:46.000Z
|
src/cobra/apps/organization/summary/app.py
|
lyoniionly/django-cobra
|
2427e5cf74b7739115b1224da3306986b3ee345c
|
[
"Apache-2.0"
] | null | null | null |
src/cobra/apps/organization/summary/app.py
|
lyoniionly/django-cobra
|
2427e5cf74b7739115b1224da3306986b3ee345c
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from cobra.core.application import Application
from cobra.core.loading import get_class
class SummaryOrganizationApplication(Application):
name = 'summary'
summary_report_view = get_class('organization.summary.views', 'SummaryReportView')
# ajax
ajax_view = get_class('organization.summary.ajax', 'AjaxView')
ajax_workreport_view = get_class('organization.summary.ajax', 'AjaxWorkreportView')
ajax_update_workreport_view = get_class('organization.summary.ajax', 'AjaxUpdateWorkreportView')
def get_urls(self):
urls = [
url(r'^(?P<username>[\@\.\w-]+)/$', login_required(self.summary_report_view.as_view()), name='summary-member'),
# ajax
url(r'^ajax/view.json$', login_required(self.ajax_view.as_view()), name='ajax-summary-view'),
url(r'^ajax/workreport.json$', login_required(self.ajax_workreport_view.as_view()), name='ajax-workreport-view'),
url(r'^ajax/(?P<pk>[0-9]+).json$', login_required(self.ajax_update_workreport_view.as_view()), name='ajax-update-workreport-view'),
]
return self.post_process_urls(urls)
application = SummaryOrganizationApplication()
| 39.875
| 143
| 0.71395
|
7fbca2f83522bf8df4118f64e509ef242d448078
| 7,660
|
py
|
Python
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T16:35:55.000Z
|
2020-12-03T16:35:55.000Z
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/semi_supervised/tests/test_label_propagation.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 1
|
2020-02-16T05:40:12.000Z
|
2020-02-16T05:40:12.000Z
|
""" test the label propagation module """
import numpy as np
import pytest
import warnings
from scipy.sparse import issparse
from sklearn.semi_supervised import _label_propagation as label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from sklearn.datasets import make_classification
from sklearn.exceptions import ConvergenceWarning
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {"kernel": "rbf"}),
(label_propagation.LabelPropagation, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelPropagation,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
(label_propagation.LabelSpreading, {"kernel": "rbf"}),
(label_propagation.LabelSpreading, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelSpreading,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
]
def test_fit_transduction():
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2] == 1
def test_distribution():
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters["kernel"] == "knn":
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(
clf.predict_proba([[1.0, 0.0]]), np.array([[1.0, 0.0]]), 2
)
else:
assert_array_almost_equal(
np.asarray(clf.label_distributions_[2]), np.array([0.5, 0.5]), 2
)
def test_predict():
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(
clf.predict_proba([[1.0, 1.0]]), np.array([[0.5, 0.5]])
)
def test_label_spreading_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
clf = label_propagation.LabelSpreading().fit(X, y)
# adopting notation from Zhou et al (2004):
S = clf._build_graph()
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
Y = Y[:, :-1]
for alpha in [0.1, 0.3, 0.5, 0.7, 0.9]:
expected = np.dot(np.linalg.inv(np.eye(len(S)) - alpha * S), Y)
expected /= expected.sum(axis=1)[:, np.newaxis]
clf = label_propagation.LabelSpreading(max_iter=10000, alpha=alpha)
clf.fit(X, y)
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_label_propagation_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_valid_alpha():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
for alpha in [-0.1, 0, 1, 1.1, None]:
with pytest.raises(ValueError):
label_propagation.LabelSpreading(alpha=alpha).fit(X, y)
def test_convergence_speed():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=5000)
mdl.fit(X, y)
# this should converge quickly:
assert mdl.n_iter_ < 10
assert_array_equal(mdl.predict(X), [0, 1, 1])
def test_convergence_warning():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=1)
warn_msg = "max_iter=1 was reached without convergence."
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=1)
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=500)
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
mdl.fit(X, y)
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=500)
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
mdl.fit(X, y)
@pytest.mark.parametrize(
"LabelPropagationCls",
[label_propagation.LabelSpreading, label_propagation.LabelPropagation],
)
def test_label_propagation_non_zero_normalizer(LabelPropagationCls):
# check that we don't divide by zero in case of null normalizer
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/15946
# https://github.com/scikit-learn/scikit-learn/issues/9292
X = np.array([[100.0, 100.0], [100.0, 100.0], [0.0, 0.0], [0.0, 0.0]])
y = np.array([0, 1, -1, -1])
mdl = LabelPropagationCls(kernel="knn", max_iter=100, n_neighbors=1)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
mdl.fit(X, y)
def test_predict_sparse_callable_kernel():
# This is a non-regression test for #15866
# Custom sparse kernel (top-K RBF)
def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5):
nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=2)
nn.fit(X)
W = -1 * nn.kneighbors_graph(Y, mode="distance").power(2) * gamma
np.exp(W.data, out=W.data)
assert issparse(W)
return W.T
n_classes = 4
n_samples = 500
n_test = 10
X, y = make_classification(
n_classes=n_classes,
n_samples=n_samples,
n_features=20,
n_informative=20,
n_redundant=0,
n_repeated=0,
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=0
)
model = label_propagation.LabelSpreading(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
model = label_propagation.LabelPropagation(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
| 35.137615
| 82
| 0.648042
|
7822553bb31de99d7698ed2b7b281913cfa0850f
| 416
|
py
|
Python
|
python/helpers/rest_runners/sphinx_runner.py
|
tgodzik/intellij-community
|
f5ef4191fc30b69db945633951fb160c1cfb7b6f
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/helpers/rest_runners/sphinx_runner.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/helpers/rest_runners/sphinx_runner.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
if __name__ == "__main__":
import sys
try:
import sphinx
except ImportError:
raise NameError("Cannot find sphinx in selected interpreter.")
version = sphinx.version_info
if (version[0] >= 1 and version[1] >= 7) or version[0] >= 2:
from sphinx.cmd import build
build.main(sys.argv[1:])
else:
from sphinx import cmdline
cmdline.main(sys.argv)
| 24.470588
| 70
| 0.612981
|
14575b14abd4a1642bfe6d5d64a25a4b96ae82f7
| 717
|
py
|
Python
|
client/tests/terminal_test.py
|
ekmixon/pyre-check
|
bd2475cc0cf4bef941f0aea6df10d674e6c907ab
|
[
"MIT"
] | 5,975
|
2018-05-11T15:54:32.000Z
|
2022-03-31T14:38:00.000Z
|
client/tests/terminal_test.py
|
ekmixon/pyre-check
|
bd2475cc0cf4bef941f0aea6df10d674e6c907ab
|
[
"MIT"
] | 524
|
2018-05-11T16:47:20.000Z
|
2022-03-22T17:43:06.000Z
|
client/tests/terminal_test.py
|
ekmixon/pyre-check
|
bd2475cc0cf4bef941f0aea6df10d674e6c907ab
|
[
"MIT"
] | 501
|
2018-05-11T16:46:29.000Z
|
2022-03-19T04:41:57.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from unittest.mock import MagicMock, patch
from .. import terminal
class PyreTest(unittest.TestCase):
def test_is_capable_terminal(self) -> None:
with patch("os.isatty", side_effect=lambda x: x), patch(
"os.getenv", return_value="vim"
):
file = MagicMock()
file.fileno = lambda: True
self.assertEqual(terminal.is_capable(file), True)
file.fileno = lambda: False
self.assertEqual(terminal.is_capable(file), False)
| 29.875
| 65
| 0.665272
|
50cbd80edfa4181e2c3e758293b49eb0ab722b23
| 6,165
|
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/faker/providers/person/fr_CH/__init__.py
|
ibrahmm22/library-management
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
[
"MIT"
] | 412
|
2020-04-16T08:11:58.000Z
|
2022-02-02T19:49:53.000Z
|
frappe-bench/env/lib/python2.7/site-packages/faker/providers/person/fr_CH/__init__.py
|
ibrahmm22/library-management
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
[
"MIT"
] | 1
|
2020-04-16T14:03:46.000Z
|
2020-04-17T03:41:18.000Z
|
frappe-bench/env/lib/python2.7/site-packages/faker/providers/person/fr_CH/__init__.py
|
ibrahmm22/library-management
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
[
"MIT"
] | 33
|
2020-04-16T08:48:53.000Z
|
2021-10-20T04:39:29.000Z
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
)
formats_male = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
)
formats = formats_male + formats_female
# source:
# http://www.bfs.admin.ch/bfs/portal/fr/index/news/publikationen.html?publicationID=6704
first_names_male = [
'Alain',
'Albert',
'Alexandre',
'André',
'Antonio',
'Arthur',
'Bernard',
'Bruno',
'Charles',
'Christian',
'Christophe',
'Claude',
'Daniel',
'David',
'Eric',
'Ethan',
'Florian',
'François',
'Frédéric',
'Gabriel',
'Georges',
'Gilbert',
'Guillaume',
'Gérard',
'Henri',
'Hugo',
'Jacques',
'Jean',
'Jean-Claude',
'Jean-Pierre',
'Jonathan',
'José',
'Julien',
'Kevin',
'Laurent',
'Louis',
'Loïc',
'Luca',
'Lucas',
'Léo',
'Manuel',
'Marcel',
'Mathieu',
'Matteo',
'Maurice',
'Maxime',
'Michael',
'Michel',
'Nathan',
'Nicolas',
'Noah',
'Nolan',
'Olivier',
'Pascal',
'Patrick',
'Paul',
'Philippe',
'Pierre',
'Raymond',
'René',
'Robert',
'Roger',
'Roland',
'Romain',
'Samuel',
'Stéphane',
'Sébastien',
'Thierry',
'Thomas',
'Théo',
'Vincent']
first_names_female = [
'Alice',
'Alicia',
'Ana',
'Anna',
'Anne',
'Aurélie',
'Camille',
'Caroline',
'Catherine',
'Chantal',
'Charlotte',
'Chloé',
'Christiane',
'Christine',
'Clara',
'Claudine',
'Corinne',
'Céline',
'Danielle',
'Denise',
'Eliane',
'Elisa',
'Elisabeth',
'Elodie',
'Emilie',
'Emma',
'Eva',
'Fabienne',
'Françoise',
'Georgette',
'Germaine',
'Hélène',
'Isabelle',
'Jacqueline',
'Jeanne',
'Jessica',
'Josiane',
'Julie',
'Laetitia',
'Lara',
'Laura',
'Laurence',
'Liliane',
'Lisa',
'Lucie',
'Léa',
'Madeleine',
'Manon',
'Marcelle',
'Marguerite',
'Maria',
'Marianne',
'Marie',
'Mathilde',
'Monique',
'Mélanie',
'Nathalie',
'Nelly',
'Nicole',
'Odette',
'Patricia',
'Sandra',
'Sandrine',
'Sara',
'Sarah',
'Simone',
'Sophie',
'Stéphanie',
'Suzanne',
'Sylvie',
'Thérèse',
'Valérie',
'Vanessa',
'Véronique',
'Yvette',
'Yvonne',
'Zoé']
first_names = first_names_male + first_names_female
# source = http://kunden.eye.ch/swissgen/rsr/index.html
last_names = ['Aebi', 'Aeby', 'Alber', 'Babey', 'Badan', 'Badel', 'Bahon',
'Balmat', 'Barbey', 'Barillon', 'Barman', 'Bavaud', 'Beguin',
'Berberat', 'Bernasconi', 'Besançon', 'Besençon', 'Besse', 'Beuchat',
'Beuret', 'Beurret', 'Blanc', 'Bochud', 'Boechat', 'Boichat', 'Boillat',
'Bonvin', 'Bonvini', 'Botteron', 'Bourquard', 'Bourquin', 'Bouvier',
'Bovet', 'Brahier', 'Brandt', 'Broquet', 'Bugnon', 'Bujard', 'Béguelin',
'Candaux', 'Carraud', 'Carraux', 'Carron', 'Cattin', 'Chappuis',
'Chapuis', 'Charpié', 'Chatriand', 'Chatriant', 'Chaudet', 'Chenaux',
'Chevalley', 'Chevrolet', 'Chopard', 'Coigny', 'Comman', 'Comment',
'Comte', 'Conrad', 'Corbat', 'Corboz', 'Cornut', 'Cornuz', 'Corpataux',
'Cosandey', 'Cosendey', 'Cossy', 'Courvoisier', 'Cousin', 'Cretton',
'Crevoisier', 'Crivelli', 'Curdy', 'de Dardel', 'Deladoëy', 'Delèze',
'Deshusses', 'Diesbach', 'Droz', 'Dubey', 'Duroux', 'Duvanel', 'Délèze',
'Evéquoz', 'Fonjallaz', 'Francillon', 'Galland', 'Georges', 'Gillièron',
'Gilliéron', 'Godet', 'Grand', 'Grojean', 'Grosjean', 'Gubéran',
'Humbert', 'Isella', 'Jacot-Descombes', 'Jacot-Guillarmod', 'Joly',
'Jomini', 'Joye', 'Julliard', 'Maire', 'Marti', 'Martin', 'Marty',
'Masseron', 'Matile', 'Mayor', 'Menthonnex', 'Mercier', 'Meyer',
'Monnard', 'Monnet', 'Monnet', 'Monney', 'Montandon', 'Morand',
'Morard', 'Mottet', 'Mottiez', 'Muriset', 'Musy', 'Müller', 'Niquille',
'Nusslé', 'Nüsslin', 'Paccot', 'Pachoud', 'Paschoud', 'Pasquier',
'Peitrequin', 'Pellet', 'Piccand', 'Polla', 'Privet', 'Quartier',
'Rapin', 'Rappaz', 'Rapraz', 'Rey', 'Robadey', 'Robert', 'Romanens',
'Rosselat', 'Rosselet', 'Rossellat', 'Sandoz', 'Sansonnens', 'Saudan',
'Thorens', 'Théraulaz', 'Tinguely', 'Treboux', 'Uldry', 'Vallélian',
'Vermeil', 'Vienne', 'Vonlanthen', 'Vuille', 'Wicht',
]
| 28.808411
| 92
| 0.454826
|
edb4f7398f1d3401c899855ffc443ec2c68fa01e
| 516
|
py
|
Python
|
entity/cards/BARL_007H/LETL_340.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 102
|
2021-10-20T09:06:39.000Z
|
2022-03-28T13:35:11.000Z
|
entity/cards/BARL_007H/LETL_340.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 98
|
2021-10-19T16:13:27.000Z
|
2022-03-27T13:27:49.000Z
|
entity/cards/BARL_007H/LETL_340.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 55
|
2021-10-19T03:56:50.000Z
|
2022-03-25T08:25:26.000Z
|
# -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETL_340(SpellEntity):
"""
毒蛇噬咬5
对一个角色造成$6点伤害。在本回合结束时,使其受到$6点伤害。0对一个角色造成$7点伤害。在本回合结束时,使其受到$7点伤害。0对一个角色造成$8点伤害。在本回合结束时,使其受到$8点伤害。0对一个角色造成$9点伤害。在本回合结束时,使其受到$9点伤害。0对一个角色造成$10点伤害。在本回合结束时,使其受到$10点伤害。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 0
self.range = 0
def play(self, game, hero, target):
pass
| 24.571429
| 169
| 0.668605
|
0cbf19bf7c40e1193b50f0a588730ca20c909a26
| 2,990
|
py
|
Python
|
tests/emukit/benchmarking/test_metrics.py
|
EmuKit/Emukit
|
2df951e42c82400192220eb18af428f3eb764f6c
|
[
"Apache-2.0"
] | 272
|
2018-09-18T11:56:37.000Z
|
2020-10-10T22:21:25.000Z
|
tests/emukit/benchmarking/test_metrics.py
|
EmuKit/Emukit
|
2df951e42c82400192220eb18af428f3eb764f6c
|
[
"Apache-2.0"
] | 278
|
2018-09-19T15:38:50.000Z
|
2020-10-14T13:45:24.000Z
|
tests/emukit/benchmarking/test_metrics.py
|
EmuKit/Emukit
|
2df951e42c82400192220eb18af428f3eb764f6c
|
[
"Apache-2.0"
] | 88
|
2018-09-18T11:56:48.000Z
|
2020-09-23T13:33:55.000Z
|
import mock
import numpy as np
from emukit.benchmarking.loop_benchmarking.metrics import (
CumulativeCostMetric,
MeanSquaredErrorMetric,
MinimumObservedValueMetric,
TimeMetric,
)
from emukit.core.interfaces import IModel
from emukit.core.loop import LoopState, ModelUpdater, OuterLoop
from emukit.core.loop.loop_state import create_loop_state
def test_mean_squared_error_metric():
x_test = np.random.rand(50, 2)
y_test = np.random.rand(50, 2)
mock_model = mock.create_autospec(IModel)
mock_model.predict.return_value = (y_test, y_test * 10)
model_updater_mock = mock.create_autospec(ModelUpdater)
model_updater_mock.model = mock_model
mock_loop = mock.create_autospec(OuterLoop)
mock_loop.model_updaters = [model_updater_mock]
loop_state = LoopState([])
loop_state.metrics = dict()
mse = MeanSquaredErrorMetric(x_test, y_test)
metric_value = mse.evaluate(mock_loop, loop_state)
assert metric_value.shape == (2,)
def test_minimum_observed_value_metric():
x_observations = np.random.rand(50, 2)
y_observations = np.random.rand(50, 2)
mock_model = mock.create_autospec(IModel)
model_updater_mock = mock.create_autospec(ModelUpdater)
model_updater_mock.model = mock_model
mock_loop = mock.create_autospec(OuterLoop)
mock_loop.model_updaters = [model_updater_mock]
loop_state = create_loop_state(x_observations, y_observations)
loop_state.metrics = dict()
metric = MinimumObservedValueMetric()
metric_value = metric.evaluate(mock_loop, loop_state)
assert metric_value.shape == (2,)
def test_time_metric():
x_observations = np.random.rand(50, 2)
y_observations = np.random.rand(50, 2)
mock_model = mock.create_autospec(IModel)
model_updater_mock = mock.create_autospec(ModelUpdater)
model_updater_mock.model = mock_model
mock_loop = mock.create_autospec(OuterLoop)
mock_loop.model_updater = model_updater_mock
loop_state = create_loop_state(x_observations, y_observations)
loop_state.metrics = dict()
name = "time"
metric = TimeMetric(name)
metric.reset()
metric_value = metric.evaluate(mock_loop, loop_state)
assert metric_value.shape == (1,)
def test_cumulative_costs():
x_observations = np.random.rand(50, 2)
y_observations = np.random.rand(50, 2)
c_observations = np.random.rand(50, 1)
mock_model = mock.create_autospec(IModel)
model_updater_mock = mock.create_autospec(ModelUpdater)
model_updater_mock.model = mock_model
mock_loop = mock.create_autospec(OuterLoop)
mock_loop.model_updater = model_updater_mock
loop_state = create_loop_state(x_observations, y_observations, cost=c_observations)
loop_state.metrics = dict()
name = "cost"
metric = CumulativeCostMetric(name)
metric.reset()
metric_value = metric.evaluate(mock_loop, loop_state)
assert metric_value == np.cumsum(c_observations)[-1]
assert metric_value.shape == (1,)
| 30.510204
| 87
| 0.746488
|
899ea79b0c84ce11030a02bfec469836f3e53b9b
| 6,959
|
py
|
Python
|
azure-iot-device/tests/iothub/client_fixtures.py
|
nextdynamic/azure-iot-sdk-python
|
217853005ea507a5a415e8ca9ca4f6adb7284b7a
|
[
"MIT"
] | 1
|
2019-02-06T06:52:44.000Z
|
2019-02-06T06:52:44.000Z
|
azure-iot-device/tests/iothub/client_fixtures.py
|
nextdynamic/azure-iot-sdk-python
|
217853005ea507a5a415e8ca9ca4f6adb7284b7a
|
[
"MIT"
] | null | null | null |
azure-iot-device/tests/iothub/client_fixtures.py
|
nextdynamic/azure-iot-sdk-python
|
217853005ea507a5a415e8ca9ca4f6adb7284b7a
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.iot.device.iothub.pipeline import constant
from azure.iot.device.iothub.models import Message, MethodResponse, MethodRequest
from azure.iot.device.common.models.x509 import X509
from azure.iot.device.iothub.auth import (
SymmetricKeyAuthenticationProvider,
SharedAccessSignatureAuthenticationProvider,
IoTEdgeAuthenticationProvider,
X509AuthenticationProvider,
)
"""---Constants---"""
shared_access_key = "Zm9vYmFy"
hostname = "beauxbatons.academy-net"
device_id = "MyPensieve"
module_id = "Divination"
gateway_hostname = "EnchantedCeiling"
signature = "IsolemnlySwearThatIamuUptoNogood" # does this need to be something else?
expiry = "1539043658"
fake_x509_cert_value = "fantastic_beasts"
fake_x509_cert_key = "where_to_find_them"
fake_pass_phrase = "alohomora"
"""----Shared model fixtures----"""
@pytest.fixture
def message():
return Message("Wingardium Leviosa")
@pytest.fixture
def method_response():
return MethodResponse(request_id="1", status=200, payload={"key": "value"})
@pytest.fixture
def method_request():
return MethodRequest(request_id="1", name="some_method", payload={"key": "value"})
"""----Shared Twin fixtures----"""
@pytest.fixture
def twin_patch_desired():
return {"properties": {"desired": {"foo": 1}}}
@pytest.fixture
def twin_patch_reported():
return {"properties": {"reported": {"bar": 2}}}
"""----Shared connection string fixtures----"""
device_connection_string_format = (
"HostName={hostname};DeviceId={device_id};SharedAccessKey={shared_access_key}"
)
device_connection_string_gateway_format = "HostName={hostname};DeviceId={device_id};SharedAccessKey={shared_access_key};GatewayHostName={gateway_hostname}"
module_connection_string_format = "HostName={hostname};DeviceId={device_id};ModuleId={module_id};SharedAccessKey={shared_access_key}"
module_connection_string_gateway_format = "HostName={hostname};DeviceId={device_id};ModuleId={module_id};SharedAccessKey={shared_access_key};GatewayHostName={gateway_hostname}"
@pytest.fixture(params=["Device Connection String", "Device Connection String w/ Protocol Gateway"])
def device_connection_string(request):
string_type = request.param
if string_type == "Device Connection String":
return device_connection_string_format.format(
hostname=hostname, device_id=device_id, shared_access_key=shared_access_key
)
else:
return device_connection_string_gateway_format.format(
hostname=hostname,
device_id=device_id,
shared_access_key=shared_access_key,
gateway_hostname=gateway_hostname,
)
@pytest.fixture(params=["Module Connection String", "Module Connection String w/ Protocol Gateway"])
def module_connection_string(request):
string_type = request.param
if string_type == "Module Connection String":
return module_connection_string_format.format(
hostname=hostname,
device_id=device_id,
module_id=module_id,
shared_access_key=shared_access_key,
)
else:
return module_connection_string_gateway_format.format(
hostname=hostname,
device_id=device_id,
module_id=module_id,
shared_access_key=shared_access_key,
gateway_hostname=gateway_hostname,
)
"""----Shared sas token fixtures---"""
sas_token_format = "SharedAccessSignature sr={uri}&sig={signature}&se={expiry}"
# when to use the skn format?
sas_token_skn_format = (
"SharedAccessSignature sr={uri}&sig={signature}&se={expiry}&skn={shared_access_key_name}"
)
# what about variant input with different ordered attributes
# SharedAccessSignature sig={signature-string}&se={expiry}&skn={policyName}&sr={URL-encoded-resourceURI}
@pytest.fixture()
def device_sas_token_string():
uri = hostname + "/devices/" + device_id
return sas_token_format.format(uri=uri, signature=signature, expiry=expiry)
@pytest.fixture()
def module_sas_token_string():
uri = hostname + "/devices/" + device_id + "/modules/" + module_id
return sas_token_format.format(uri=uri, signature=signature, expiry=expiry)
"""----Shared certificate fixtures----"""
@pytest.fixture()
def x509():
return X509(fake_x509_cert_value, fake_x509_cert_key, fake_pass_phrase)
"""----Shared Edge Container configuration---"""
@pytest.fixture()
def edge_container_environment():
return {
"IOTEDGE_MODULEID": "__FAKE_MODULE_ID__",
"IOTEDGE_DEVICEID": "__FAKE_DEVICE_ID__",
"IOTEDGE_IOTHUBHOSTNAME": "__FAKE_HOSTNAME__",
"IOTEDGE_GATEWAYHOSTNAME": "__FAKE_GATEWAY_HOSTNAME__",
"IOTEDGE_APIVERSION": "__FAKE_API_VERSION__",
"IOTEDGE_MODULEGENERATIONID": "__FAKE_MODULE_GENERATION_ID__",
"IOTEDGE_WORKLOADURI": "http://__FAKE_WORKLOAD_URI__/",
}
@pytest.fixture()
def edge_local_debug_environment():
cs = module_connection_string_gateway_format.format(
hostname=hostname,
device_id=device_id,
module_id=module_id,
shared_access_key=shared_access_key,
gateway_hostname=gateway_hostname,
)
return {"EdgeHubConnectionString": cs, "EdgeModuleCACertificateFile": "__FAKE_CA_CERTIFICATE__"}
"""----Shared mock pipeline fixture----"""
class FakeIoTHubPipeline:
def __init__(self):
self.feature_enabled = {} # This just has to be here for the spec
def connect(self, callback):
callback()
def disconnect(self, callback):
callback()
def enable_feature(self, feature_name, callback):
callback()
def disable_feature(self, feature_name, callback):
callback()
def send_message(self, event, callback):
callback()
def send_output_event(self, event, callback):
callback()
def send_method_response(self, method_response, callback):
callback()
def get_twin(self, callback):
callback(twin={})
def patch_twin_reported_properties(self, patch, callback):
callback()
@pytest.fixture
def iothub_pipeline(mocker):
"""This fixture will automatically handle callbacks and should be
used in the majority of tests.
"""
return mocker.MagicMock(wraps=FakeIoTHubPipeline())
@pytest.fixture
def iothub_pipeline_manual_cb(mocker):
"""This fixture is for use in tests where manual triggering of a
callback is required
"""
return mocker.MagicMock()
@pytest.fixture
def edge_pipeline(mocker):
return mocker.MagicMock() # TODO: change this to wrap a pipeline object
@pytest.fixture
def fake_twin():
return {"fake_twin": True}
| 30.388646
| 176
| 0.706854
|
33d5f42296dbd0cc6c26d7097141f7881fd9ddfb
| 3,048
|
py
|
Python
|
web/transiq/fms/helper.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/fms/helper.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14
|
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/fms/helper.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, print_function, division
from api.utils import get_or_none
from broker.models import BrokerVehicle, Broker
from driver.models import WaytrackerGPSDevice, TempoGoGPSDevice, SecuGPSDevice, MahindraGPSDevice
from owner.models import Vehicle
from owner.vehicle_util import compare_format
def vehicles_data_gps(vehicles):
data = []
gps_devices = WaytrackerGPSDevice.objects.filter(vehicle_number__in=vehicles).exclude(is_active=False).exclude(
latitude=None)
for gps_device in gps_devices:
vehicle = get_or_none(Vehicle, vehicle_number=compare_format(gps_device.vehicle_number))
data.append({
'bearing': 0.0, 'vehicle_id': vehicle.id if isinstance(vehicle, Vehicle) else '',
'vehicle_status': u'unloaded',
'vehicle_number': gps_device.vehicle_number,
'driver': gps_device.driver_json(), 'location': gps_device.location(), 'path': []
})
gps_devices = SecuGPSDevice.objects.filter(vehicle_number__in=vehicles).exclude(is_active=False).exclude(
latitude=None)
for gps_device in gps_devices:
vehicle = get_or_none(Vehicle, vehicle_number=compare_format(gps_device.vehicle_number))
data.append({
'bearing': 0.0, 'vehicle_id': vehicle.id if isinstance(vehicle, Vehicle) else '',
'vehicle_status': u'unloaded',
'vehicle_number': gps_device.vehicle_number,
'driver': gps_device.driver_json(), 'location': gps_device.location(), 'path': []
})
gps_devices = TempoGoGPSDevice.objects.filter(vehicle_number__in=vehicles).exclude(is_active=False).exclude(
latitude=None)
for gps_device in gps_devices:
vehicle = get_or_none(Vehicle, vehicle_number=compare_format(gps_device.vehicle_number))
data.append({
'bearing': 0.0, 'vehicle_id': vehicle.id if isinstance(vehicle, Vehicle) else '',
'vehicle_status': u'unloaded',
'vehicle_number': gps_device.vehicle_number,
'driver': gps_device.driver_json(), 'location': gps_device.location(), 'path': []
})
gps_devices = MahindraGPSDevice.objects.filter(vehicle_number__in=vehicles).exclude(is_active=False).exclude(
latitude=None)
for gps_device in gps_devices:
vehicle = get_or_none(Vehicle, vehicle_number=compare_format(gps_device.vehicle_number))
data.append({
'bearing': 0.0, 'vehicle_id': vehicle.id if isinstance(vehicle, Vehicle) else '',
'vehicle_status': u'unloaded',
'vehicle_number': gps_device.vehicle_number,
'driver': gps_device.driver_json(), 'location': gps_device.location(), 'path': []
})
return data
def vehicles_gps_data():
vehicle_ids = list(
BrokerVehicle.objects.filter(broker=Broker.objects.get(id=439)).values_list('vehicle_id', flat=True))
vehicles = Vehicle.objects.filter(id__in=vehicle_ids).values_list('vehicle_number', flat=True)
print(vehicles_data_gps(vehicles))
| 49.16129
| 115
| 0.699147
|
cf15880eadfa00f5ca7f6eae412ce02dbd6896ca
| 28,235
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_network_virtual_appliances_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_network_virtual_appliances_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_network_virtual_appliances_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkVirtualAppliancesOperations(object):
"""NetworkVirtualAppliancesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
"""Gets the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
"""Updates a Network Virtual Appliance.
:param resource_group_name: The resource group name of Network Virtual Appliance.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance being updated.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to Update Network Virtual Appliance Tags.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.NetworkVirtualAppliance"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkVirtualAppliance')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.NetworkVirtualAppliance"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkVirtualAppliance"]
"""Creates or updates the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to the create or update Network Virtual Appliance.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.NetworkVirtualAppliance
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkVirtualAppliance or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.NetworkVirtualAppliance]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkVirtualApplianceListResult"]
"""Lists all Network Virtual Appliances in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkVirtualApplianceListResult"]
"""Gets all Network Virtual Appliances in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
| 50.600358
| 217
| 0.67349
|
491932453d5475050d6eb0276ce1ae6d7e8ea881
| 25,857
|
py
|
Python
|
delphin/itsdb.py
|
matichorvat/pydelphin
|
7e3a3fd15f33255dd4a11ebe716cec3ef0ca4558
|
[
"MIT"
] | null | null | null |
delphin/itsdb.py
|
matichorvat/pydelphin
|
7e3a3fd15f33255dd4a11ebe716cec3ef0ca4558
|
[
"MIT"
] | null | null | null |
delphin/itsdb.py
|
matichorvat/pydelphin
|
7e3a3fd15f33255dd4a11ebe716cec3ef0ca4558
|
[
"MIT"
] | null | null | null |
"""
The itsdb module makes it easy to work with [incr tsdb()] profiles.
The ItsdbProfile class works with whole profiles, but it generally
relies on the module-level functions to do its work (such as
get_relations() or decode_row()). Queries over profiles can be
customized through the use of filters (see filter_rows()), applicators
(see apply_rows()), and selectors (see select_rows()). In addition,
one can create a new skeleton using the make_skeleton() function.
"""
import os
import re
from gzip import open as gzopen
import logging
from io import TextIOWrapper, BufferedReader
from collections import defaultdict, namedtuple, OrderedDict
from itertools import chain
from contextlib import contextmanager
from delphin.exceptions import ItsdbError
from delphin.util import safe_int
##############################################################################
# Module variables
_relations_filename = 'relations'
_field_delimiter = '@'
_default_datatype_values = {
':integer': '-1'
}
_default_field_values = {
'i-wf': '1'
}
_primary_keys = [
["i-id", "item"],
["p-id", "phenomenon"],
["ip-id", "item-phenomenon"],
["s-id", "set"],
["run-id", "run"],
["parse-id", "parse"],
["e-id", "edge"],
["f-id", "fold"]
]
##############################################################################
# Non-class (i.e. static) functions
Field = namedtuple('Field', ['name', 'datatype', 'key', 'other', 'comment'])
'''
A tuple describing a column in an [incr tsdb()] profile.
Args:
name: the column name
datatype: e.g. ":string" or ":integer"
key: True if the column is a key in the database
other: any other non-datatype, non-key attributes (like ":partial")
comment: a description of the column
'''
def get_relations(path):
"""
Parse the relations file and return a dictionary describing the database
structure.
Args:
path: The path of the relations file.
Returns:
A dictionary mapping a table name to a list of Field tuples.
"""
relations = OrderedDict()
table_re = re.compile(r'^(?P<table>\w.+):$')
field_re = re.compile(r'\s*(?P<name>\S+)'
r'(\s+(?P<props>[^#]+))?'
r'(\s*#\s*(?P<comment>.*)$)?')
f = open(path)
current_table = None
for line in f:
table_match = table_re.search(line)
field_match = field_re.search(line)
if table_match is not None:
current_table = table_match.group('table')
if current_table not in relations:
relations[current_table] = list()
elif current_table is not None and field_match is not None:
name = field_match.group('name')
props = field_match.group('props').split()
comment = field_match.group('comment')
key = False
if len(props) > 0:
datatype = props.pop(0)
if ':key' in props:
key = True
props.remove(':key')
relations[current_table].append(
Field(name, datatype, key, props, comment)
)
f.close()
return relations
data_specifier_re = re.compile(r'(?P<table>[^:]+)?(:(?P<cols>.+))?$')
def get_data_specifier(string):
"""
Return a tuple (table, col) for some [incr tsdb()] data specifier.
For example::
item -> ('item', None)
item:i-input -> ('item', ['i-input'])
item:i-input@i-wf -> ('item', ['i-input', 'i-wf'])
:i-input -> (None, ['i-input'])
(otherwise) -> (None, None)
"""
match = data_specifier_re.match(string)
if match is None:
return (None, None)
table = match.group('table')
if table is not None:
table = table.strip()
cols = match.group('cols')
if cols is not None:
cols = list(map(str.strip, cols.split('@')))
return (table, cols)
def decode_row(line):
"""
Decode a raw line from a profile into a list of column values.
Decoding involves splitting the line by the field delimiter ('@' by
default) and unescaping special characters.
Args:
line: a raw line from a [incr tsdb()] profile.
Returns:
A list of column values.
"""
fields = line.rstrip('\n').split(_field_delimiter)
return list(map(unescape, fields))
def encode_row(fields):
"""
Encode a list of column values into a [incr tsdb()] profile line.
Encoding involves escaping special characters for each value, then
joining the values into a single string with the field delimiter
('@' by default). It does not fill in default values (see
make_row()).
Args:
fields: a list of column values
Returns:
A [incr tsdb()]-encoded string
"""
return _field_delimiter.join(map(escape, map(str, fields)))
_character_escapes = {
_field_delimiter: '\\s',
'\n': '\\n',
'\\': '\\\\'
}
def _escape(m):
return _character_escapes[m.group(0)]
def escape(string):
"""
Replace any special characters with their [incr tsdb()] escape
sequences. Default sequences are::
@ -> \s
(newline) -> \\n
\\ -> \\\\
Also see unescape()
Args:
string: the string to escape
Returns:
The escaped string
"""
return re.sub(r'(@|\n|\\)', _escape, string, re.UNICODE)
_character_unescapes = {'\\s': _field_delimiter, '\\n': '\n', '\\\\': '\\'}
def _unescape(m):
return _character_unescapes[m.group(0)]
def unescape(string):
"""
Replace [incr tsdb()] escape sequences with the regular equivalents.
See escape().
Args:
string: the escaped string
Returns:
The string with escape sequences replaced
"""
return re.sub(r'(\\s|\\n|\\\\)', _unescape, string, re.UNICODE)
@contextmanager
def _open_table(tbl_filename):
if tbl_filename.endswith('.gz'):
gz_filename = tbl_filename
tbl_filename = tbl_filename[:-3]
else:
gz_filename = tbl_filename + '.gz'
if os.path.exists(tbl_filename) and os.path.exists(gz_filename):
logging.warning(
'Both gzipped and plaintext files were found; attempting to '
'use the plaintext one.'
)
if os.path.exists(tbl_filename):
with open(tbl_filename) as f:
yield f
elif os.path.exists(gz_filename):
# text mode only from py3.3; until then use TextIOWrapper
with TextIOWrapper(
BufferedReader(gzopen(tbl_filename + '.gz', mode='r'))
) as f:
yield f
else:
raise ItsdbError(
'Table does not exist at {}(.gz)'
.format(tbl_filename)
)
def _write_table(profile_dir, table_name, rows, fields,
append=False, gzip=False):
# don't gzip if empty
rows = iter(rows)
try:
first_row = next(rows)
except StopIteration:
gzip = False
else:
rows = chain([first_row], rows)
if gzip and append:
logging.warning('Appending to a gzip file may result in '
'inefficient compression.')
if not os.path.exists(profile_dir):
raise ItsdbError('Profile directory does not exist: {}'
.format(profile_dir))
tbl_filename = os.path.join(profile_dir, table_name)
mode = 'a' if append else 'w'
if gzip:
# text mode only from py3.3; until then use TextIOWrapper
#mode += 't' # text mode for gzip
f = TextIOWrapper(gzopen(tbl_filename + '.gz', mode=mode))
else:
f = open(tbl_filename, mode=mode)
for row in rows:
f.write(make_row(row, fields) + '\n')
f.close()
def make_row(row, fields):
"""
Encode a mapping of column name to values into a [incr tsdb()]
profile line. The *fields* parameter determines what columns are
used, and default values are provided if a column is missing from
the mapping.
Args:
row: a dictionary mapping column names to values
fields: an iterable of [Field] objects
Returns:
A [incr tsdb()]-encoded string
"""
row_fields = [row.get(f.name, str(default_value(f.name, f.datatype)))
for f in fields]
return encode_row(row_fields)
def default_value(fieldname, datatype):
"""
Return the default value for a column.
If the column name (e.g. *i-wf*) is defined to have an idiosyncratic
value, that value is returned. Otherwise the default value for the
column's datatype is returned.
Args:
fieldname: the column name (e.g. `i-wf`)
datatype: the datatype of the column (e.g. `:integer`)
Returns:
The default value for the column.
"""
if fieldname in _default_field_values:
return _default_field_values[fieldname]
else:
return _default_datatype_values.get(datatype, '')
def filter_rows(filters, rows):
"""
Yield rows matching all applicable filters.
Filter functions have binary arity (e.g. `filter(row, col)`) where
the first parameter is the dictionary of row data, and the second
parameter is the data at one particular column.
Args:
filters: a tuple of (cols, filter_func) where filter_func will
be tested (filter_func(row, col)) for each col in cols where
col exists in the row
rows: an iterable of rows to filter
Yields:
Rows matching all applicable filters
"""
for row in rows:
if all(condition(row, row.get(col))
for (cols, condition) in filters
for col in cols
if col is None or col in row):
yield row
def apply_rows(applicators, rows):
"""
Yield rows after applying the applicator functions to them.
Applicators are simple unary functions that return a value, and that
value is stored in the yielded row. E.g.
`row[col] = applicator(row[col])`. These are useful to, e.g., cast
strings to numeric datatypes, to convert formats stored in a cell,
extract features for machine learning, and so on.
Args:
applicators: a tuple of (cols, applicator) where the applicator
will be applied to each col in cols
rows: an iterable of rows for applicators to be called on
Yields:
Rows with specified column values replaced with the results of
the applicators
"""
for row in rows:
for (cols, function) in applicators:
for col in (cols or []):
value = row.get(col, '')
row[col] = function(row, value)
yield row
def select_rows(cols, rows, mode='list'):
"""
Yield data selected from rows.
It is sometimes useful to select a subset of data from a profile.
This function selects the data in *cols* from *rows* and yields it
in a form specified by *mode*. Possible values of *mode* are:
| mode | description | example `['i-id', 'i-wf']` |
| -------------- | ----------------- | -------------------------- |
| list (default) | a list of values | `[10, 1]` |
| dict | col to value map | `{'i-id':'10','i-wf':'1'}` |
| row | [incr tsdb()] row | `'10@1'` |
Args:
cols: an iterable of column names to select data for
rows: the rows to select column data from
mode: the form yielded data should take
Yields:
Selected data in the form specified by *mode*.
"""
mode = mode.lower()
if mode == 'list':
cast = lambda cols, data: data
elif mode == 'dict':
cast = lambda cols, data: dict(zip(cols, data))
elif mode == 'row':
cast = lambda cols, data: encode_row(data)
else:
raise ItsdbError('Invalid mode for select operation: {}\n'
' Valid options include: list, dict, row'
.format(mode))
for row in rows:
data = [row.get(c) for c in cols]
yield cast(cols, data)
def match_rows(rows1, rows2, key, sort_keys=True):
"""
Yield triples of (value, left_rows, right_rows) where `left_rows`
and `right_rows` are lists of rows that share the same column
value for *key*.
"""
matched = OrderedDict()
for i, rows in enumerate([rows1, rows2]):
for row in rows:
val = row[key]
try:
data = matched[val]
except KeyError:
matched[val] = ([], [])
data = matched[val]
data[i].append(row)
vals = matched.keys()
if sort_keys:
vals = sorted(vals, key=safe_int)
for val in vals:
left, right = matched[val]
yield (val, left, right)
def make_skeleton(path, relations, item_rows, gzip=False):
"""
Instantiate a new profile skeleton (only the relations file and
item file) from an existing relations file and a list of rows
for the item table. For standard relations files, it is suggested
to have, as a minimum, the `i-id` and `i-input` fields in the
item rows.
Args:
path: the destination directory of the skeleton---must not
already exist, as it will be created
relations: the path to the relations file
item_rows: the rows to use for the item file
gzip: if `True`, the item file will be compressed
Returns:
An ItsdbProfile containing the skeleton data (but the profile
data will already have been written to disk).
Raises:
ItsdbError if the destination directory could not be created.
"""
try:
os.makedirs(path)
except OSError:
raise ItsdbError('Path already exists: {}.'.format(path))
import shutil
shutil.copyfile(relations, os.path.join(path, _relations_filename))
prof = ItsdbProfile(path, index=False)
prof.write_table('item', item_rows, gzip=gzip)
return prof
##############################################################################
# Profile class
class ItsdbProfile(object):
"""
A [incr tsdb()] profile, analyzed and ready for reading or writing.
"""
# _tables is a list of table names to consider (for indexing, writing,
# etc.). If `None`, all present in the relations file and on disk are
# considered. Otherwise, only those present in the list are considered.
_tables = None
def __init__(self, path, filters=None, applicators=None, index=True):
"""
Only the *path* parameter is required.
Args:
path: The path of the directory containing the profile
filters: A list of tuples [(table, cols, condition)] such
that only rows in table where condition(row, row[col])
evaluates to a non-false value are returned; filters are
tested in order for a table.
applicators: A list of tuples [(table, cols, function)]
which will be used when reading rows from a table---the
function will be applied to the contents of the column
cell in the table. For each table, each column-function
pair will be applied in order. Applicators apply after
the filters.
index: If `True`, indices are created based on the keys of
each table.
"""
self.root = path
self.relations = get_relations(
os.path.join(self.root, _relations_filename)
)
if self._tables is None:
self._tables = list(self.relations.keys())
self.filters = defaultdict(list)
self.applicators = defaultdict(list)
self._index = dict()
for (table, cols, condition) in (filters or []):
self.add_filter(table, cols, condition)
for (table, cols, function) in (applicators or []):
self.add_applicator(table, cols, function)
if index:
self._build_index()
def add_filter(self, table, cols, condition):
"""
Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function.
"""
if table is not None and table not in self.relations:
raise ItsdbError('Cannot add filter; table "{}" is not defined '
'by the relations file.'
.format(table))
# this is a hack, though perhaps well-motivated
if cols is None:
cols = [None]
self.filters[table].append((cols, condition))
def add_applicator(self, table, cols, function):
"""
Add an applicator. When reading *table*, rows in *table* will be
modified by apply_rows().
Args:
table: The table to apply the function to.
cols: The columns in *table* to apply the function on.
function: The applicator function.
"""
if table not in self.relations:
raise ItsdbError('Cannot add applicator; table "{}" is not '
'defined by the relations file.'
.format(table))
if cols is None:
raise ItsdbError('Cannot add applicator; columns not specified.')
fields = set(f.name for f in self.relations[table])
for col in cols:
if col not in fields:
raise ItsdbError('Cannot add applicator; column "{}" not '
'defined by the relations file.'
.format(col))
self.applicators[table].append((cols, function))
def _build_index(self):
self._index = {key: None for key, _ in _primary_keys}
tables = self._tables
if tables is not None:
tables = set(tables)
for (keyname, table) in _primary_keys:
if table in tables:
ids = set()
try:
for row in self.read_table(table):
key = row[keyname]
ids.add(key)
except ItsdbError:
logging.info('Failed to index {}.'.format(table))
self._index[keyname] = ids
def table_relations(self, table):
if table not in self.relations:
raise ItsdbError(
'Table {} is not defined in the profiles relations.'
.format(table)
)
return self.relations[table]
def read_raw_table(self, table):
"""
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
"""
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
with _open_table(os.path.join(self.root, table)) as tbl:
for line in tbl:
fields = decode_row(line)
if len(fields) != field_len:
# should this throw an exception instead?
logging.error('Number of stored fields ({}) '
'differ from the expected number({}); '
'fields may be misaligned!'
.format(len(fields), field_len))
row = OrderedDict(zip(field_names, fields))
yield row
def read_table(self, table, key_filter=True):
"""
Yield rows in the [incr tsdb()] *table* that pass any defined
filters, and with values changed by any applicators. If no
filters or applicators are defined, the result is the same as
from ItsdbProfile.read_raw_table().
"""
filters = self.filters[None] + self.filters[table]
if key_filter:
for f in self.relations[table]:
key = f.name
if f.key and (self._index.get(key) is not None):
ids = self._index[key]
# Can't keep local variables (like ids) in the scope of
# the lambda expression, so make it a default argument.
# Source: http://stackoverflow.com/a/938493/1441112
function = lambda r, x, ids=ids: x in ids
filters.append(([key], function))
applicators = self.applicators[table]
rows = self.read_raw_table(table)
return filter_rows(filters, apply_rows(applicators, rows))
def select(self, table, cols, mode='list', key_filter=True):
"""
Yield selected rows from *table*. This method just calls
select_rows() on the rows read from *table*.
"""
if cols is None:
cols = [c.name for c in self.relations[table]]
rows = self.read_table(table, key_filter=key_filter)
for row in select_rows(cols, rows, mode=mode):
yield row
def join(self, table1, table2, key_filter=True):
"""
Yield rows from a table built by joining *table1* and *table2*.
The column names in the rows have the original table name
prepended and separated by a colon. For example, joining tables
'item' and 'parse' will result in column names like
'item:i-input' and 'parse:parse-id'.
"""
get_keys = lambda t: (f.name for f in self.relations[t] if f.key)
keys = set(get_keys(table1)).intersection(get_keys(table2))
if not keys:
raise ItsdbError(
'Cannot join tables "{}" and "{}"; no shared key exists.'
.format(table1, table2)
)
key = keys.pop()
# this join method stores the whole of table2 in memory, but it is
# MUCH faster than a nested loop method. Most profiles will fit in
# memory anyway, so it's a decent tradeoff
table2_data = defaultdict(list)
for row in self.read_table(table2, key_filter=key_filter):
table2_data[row[key]].append(row)
for row1 in self.read_table(table1, key_filter=key_filter):
for row2 in table2_data.get(row1[key], []):
joinedrow = OrderedDict(
[('{}:{}'.format(table1, k), v)
for k, v in row1.items()] +
[('{}:{}'.format(table2, k), v)
for k, v in row2.items()]
)
yield joinedrow
def write_table(self, table, rows, append=False, gzip=False):
"""
Encode and write out *table* to the profile directory.
Args:
table: The name of the table to write
rows: The rows to write to the table
append: If `True`, append the encoded rows to any existing
data.
gzip: If `True`, compress the resulting table with `gzip`.
The table's filename will have `.gz` appended.
"""
_write_table(self.root,
table,
rows,
self.table_relations(table),
append=append,
gzip=gzip)
def write_profile(self, profile_directory, relations_filename=None,
key_filter=True,
append=False, gzip=None):
"""
Write all tables (as specified by the relations) to a profile.
Args:
profile_directory: The directory of the output profile
relations_filename: If given, read and use the relations
at this path instead of the current profile's
relations
key_filter: If True, filter the rows by keys in the index
append: If `True`, append profile data to existing tables
in the output profile directory
gzip: If `True`, compress tables using `gzip`. Table
filenames will have `.gz` appended. If `False`, only
write out text files. If `None`, use whatever the
original file was.
"""
import shutil
if relations_filename:
relations = get_relations(relations_filename)
else:
relations_filename = os.path.join(self.root, _relations_filename)
relations = self.relations
shutil.copyfile(relations_filename,
os.path.join(profile_directory, _relations_filename))
tables = self._tables
if tables is not None:
tables = set(tables)
for table, fields in relations.items():
fn = os.path.join(self.root, table)
if tables is None or table in tables:
if os.path.exists(fn):
pass
elif os.path.exists(fn + '.gz'):
fn += '.gz'
else:
logging.warning(
'Could not write "{}"; table doesn\'t exist.'
.format(table)
)
continue
_gzip = gzip if gzip is not None else fn.endswith('.gz')
rows = self.read_table(table, key_filter=key_filter)
_write_table(profile_directory, table, rows, fields,
append=append, gzip=_gzip)
elif os.path.exists(fn) or os.path.exists(fn + '.gz'):
logging.info('Ignoring "{}" table.'.format(table))
class ItsdbSkeleton(ItsdbProfile):
"""
A [incr tsdb()] skeleton, analyzed and ready for reading or writing.
"""
_tables = ['item']
| 35.179592
| 78
| 0.572379
|
994ccffcc823c454a34d4e923d97979cd5beafba
| 4,547
|
py
|
Python
|
figuras/PycharmKayStatisticalReport/bayesian_gaussian_pdf_properties.py
|
bor9/estudiando_el_kay
|
6e07908b8b0b5a5166dadce30001e6100e8304c3
|
[
"MIT"
] | null | null | null |
figuras/PycharmKayStatisticalReport/bayesian_gaussian_pdf_properties.py
|
bor9/estudiando_el_kay
|
6e07908b8b0b5a5166dadce30001e6100e8304c3
|
[
"MIT"
] | null | null | null |
figuras/PycharmKayStatisticalReport/bayesian_gaussian_pdf_properties.py
|
bor9/estudiando_el_kay
|
6e07908b8b0b5a5166dadce30001e6100e8304c3
|
[
"MIT"
] | 1
|
2021-11-02T05:27:27.000Z
|
2021-11-02T05:27:27.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import multivariate_normal
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch
from matplotlib import cm
from matplotlib import rc
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rc('mathtext', fontset='cm')
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
#####################################
# PARAMETERS - This can be modified #
#####################################
# media
mu_x = 0
mu_y = 0
# covarianza
rho = 0.5
var_x = 1
var_y = 1
x0 = 0.8
#####################
# END OF PARAMETERS #
#####################
# vetor de media
mu = [mu_x, mu_y]
# matriz de covarianza
C = [[var_x, rho], [rho, var_y]]
xmax = 3
ymax = xmax
x, y = np.mgrid[-xmax:xmax:.01, -ymax:ymax:.01]
pos = np.dstack((x, y))
rv = multivariate_normal(mu, C)
x0_idx = np. where(np.abs(x[:, 0]-x0) < 1e-5)
x0_idx =x0_idx[0][0]
pdf = rv.pdf(pos)
zmax = np.amax(pdf)
zmin = -0.18
nlevels = 16
levels = np.linspace(0.005, zmax, nlevels)
fig = plt.figure(0)
ax = fig.add_subplot(111)
ax.contourf(x, y, pdf)
plt.axis('equal')
x_c = x[x0_idx, :]
y_c = y[x0_idx, :]
pos_c = np.dstack((x_c, y_c))
p_cond = rv.pdf(pos_c)
fig = plt.figure(1)
ax = fig.add_subplot(111)
plt.plot(y_c, p_cond)
fontsize = 12
dx = 0.5
xmin_ax = -xmax-1
xmax_ax = xmax+dx
ymin_ax = -ymax-dx
ymax_ax = ymax+dx
fig = plt.figure(2, figsize=(10, 6), frameon=False)
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(xmin_ax, xmax_ax)
ax.set_zlim(ymin_ax, ymax_ax)
ax.set_zlim(zmin, zmax)
ax.view_init(elev=31, azim=-61)
# axis arrows
arw = Arrow3D([xmin_ax, xmax_ax], [0, 0], [zmin, zmin], arrowstyle="-|>, head_width=0.5, head_length=1",
lw=1, mutation_scale=5, facecolor='black', zorder=100)
ax.add_artist(arw)
arw = Arrow3D([0, 0], [ymin_ax, ymax_ax], [zmin, zmin], arrowstyle="-|>, head_width=0.5, head_length=1",
lw=1, mutation_scale=5, facecolor='black', zorder=100)
ax.add_artist(arw)
arw = Arrow3D([xmin_ax, xmin_ax], [ymin_ax, ymax_ax], [0, 0], arrowstyle="-|>, head_width=0.5, head_length=1",
lw=1, mutation_scale=5, facecolor='black', zorder=100)
ax.add_artist(arw)
arw = Arrow3D([xmin_ax, xmin_ax], [0, 0], [-0.03, zmax], arrowstyle="-|>, head_width=0.5, head_length=1",
lw=1, mutation_scale=5, facecolor='black', zorder=100)
ax.add_artist(arw)
plt.contour(x, y, pdf, cmap=cm.coolwarm, levels=levels, zorder=-1)
plt.contour(x, y, pdf, cmap=cm.coolwarm, offset=zmin, levels=levels, zorder=-1)
plt.plot(x_c, y_c, p_cond, 'k', zorder=100, lw=2)
plt.plot([-xmax, xmax], [-rho*xmax, rho*xmax], [zmin, zmin], 'k', zorder=100)
plt.plot([x0, x0], [-ymax, ymax], [zmin, zmin], 'k', zorder=100)
plt.plot([x0, x0], [rho*x0, rho*x0], [zmin, rv.pdf([x0, rho*x0])], 'k--', zorder=100, lw=1)
plt.plot(xmin_ax*np.ones(x_c.shape), y_c, p_cond, 'k', zorder=100, lw=2)
plt.plot([xmin_ax, x0], [rho*x0, rho*x0], [rv.pdf([x0, rho*x0]), rv.pdf([x0, rho*x0])], 'k--', zorder=100, lw=1)
plt.plot([xmin_ax, x0], [rho*x0, rho*x0], [rv.pdf([x0, rho*x0]), rv.pdf([x0, rho*x0])], 'k--', zorder=100, lw=1)
plt.plot([xmin_ax, xmin_ax], [rho*x0, rho*x0], [0, rv.pdf([x0, rho*x0])], 'k--', zorder=100, lw=1)
# axis labels
ax.text(xmax_ax, -0.55, zmin, '$x$', fontsize=fontsize, ha='center', va='baseline')
ax.text(-0.3, ymax_ax, zmin, '$y$', fontsize=fontsize, ha='right', va='center')
ax.text(xmin_ax, 0.15, zmax, '$p(y|x_0)$', fontsize=fontsize, ha='left', va='center')
ax.text(xmin_ax, ymax_ax, -0.025, '$y$', fontsize=fontsize, ha='right', va='center')
# lines label
ax.text(x0, ymin_ax+0.3, zmin, '$x_0$', fontsize=fontsize, ha='center', va='top')
ax.text(xmax, rho*xmax_ax, zmin, '$y=\\rho x$', fontsize=fontsize, ha='left', va='top')
#ax.text(xmin_ax, rho*x0, 0.01, '$\hat{y}=\\rho x_0$', fontsize=fontsize, ha='left', va='baseline', zdir=(1, 1, 0.037))
ax.text(xmin_ax, rho*x0, -0.025, '$\hat{y}=\\rho x_0$', fontsize=fontsize, ha='left', va='baseline')
# Distance view. Default is 10.
ax.dist = 8
plt.axis('off')
# save as pdf image
plt.savefig('bayesian_gaussian_pdf_properties.pdf', bbox_inches='tight')
plt.show()
| 32.248227
| 119
| 0.632945
|
701a3a59daf08b397b94d8c4e1994629d7ae2419
| 17,493
|
py
|
Python
|
intersight/model/policy_abstract_config_result.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/policy_abstract_config_result.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/policy_abstract_config_result.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.chassis_config_result import ChassisConfigResult
from intersight.model.display_names import DisplayNames
from intersight.model.fabric_config_result import FabricConfigResult
from intersight.model.hyperflex_config_result import HyperflexConfigResult
from intersight.model.kubernetes_config_result import KubernetesConfigResult
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.policy_abstract_config_result_all_of import PolicyAbstractConfigResultAllOf
from intersight.model.recovery_config_result import RecoveryConfigResult
from intersight.model.server_config_result import ServerConfigResult
globals()['ChassisConfigResult'] = ChassisConfigResult
globals()['DisplayNames'] = DisplayNames
globals()['FabricConfigResult'] = FabricConfigResult
globals()['HyperflexConfigResult'] = HyperflexConfigResult
globals()['KubernetesConfigResult'] = KubernetesConfigResult
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['PolicyAbstractConfigResultAllOf'] = PolicyAbstractConfigResultAllOf
globals()['RecoveryConfigResult'] = RecoveryConfigResult
globals()['ServerConfigResult'] = ServerConfigResult
class PolicyAbstractConfigResult(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'CHASSIS.CONFIGRESULT': "chassis.ConfigResult",
'FABRIC.CONFIGRESULT': "fabric.ConfigResult",
'HYPERFLEX.CONFIGRESULT': "hyperflex.ConfigResult",
'KUBERNETES.CONFIGRESULT': "kubernetes.ConfigResult",
'RECOVERY.CONFIGRESULT': "recovery.ConfigResult",
'SERVER.CONFIGRESULT': "server.ConfigResult",
},
('object_type',): {
'CHASSIS.CONFIGRESULT': "chassis.ConfigResult",
'FABRIC.CONFIGRESULT': "fabric.ConfigResult",
'HYPERFLEX.CONFIGRESULT': "hyperflex.ConfigResult",
'KUBERNETES.CONFIGRESULT': "kubernetes.ConfigResult",
'RECOVERY.CONFIGRESULT': "recovery.ConfigResult",
'SERVER.CONFIGRESULT': "server.ConfigResult",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'config_stage': (str,), # noqa: E501
'config_state': (str,), # noqa: E501
'validation_state': (str,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'chassis.ConfigResult': ChassisConfigResult,
'fabric.ConfigResult': FabricConfigResult,
'hyperflex.ConfigResult': HyperflexConfigResult,
'kubernetes.ConfigResult': KubernetesConfigResult,
'recovery.ConfigResult': RecoveryConfigResult,
'server.ConfigResult': ServerConfigResult,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'config_stage': 'ConfigStage', # noqa: E501
'config_state': 'ConfigState', # noqa: E501
'validation_state': 'ValidationState', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""PolicyAbstractConfigResult - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
config_stage (str): The current running stage of the configuration or workflow.. [optional] # noqa: E501
config_state (str): Indicates overall configuration state for applying the configuration to the end point. Values -- Ok, Ok-with-warning, Errored.. [optional] # noqa: E501
validation_state (str): Indicates overall state for logical model validation. Values -- Ok, Ok-with-warning, Errored.. [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseMo,
PolicyAbstractConfigResultAllOf,
],
'oneOf': [
],
}
| 53.659509
| 1,678
| 0.64763
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.